]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Merge remote-tracking branch 'remotes/spice/tags/pull-spice-20150529-1' into staging
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
8167ee88 19 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
2c0262af
FB
20 */
21#include <stdarg.h>
22#include <stdlib.h>
23#include <stdio.h>
24#include <string.h>
25#include <inttypes.h>
26
27#include "cpu.h"
ccd38087 28#include "internals.h"
76cad711 29#include "disas/disas.h"
57fec1fe 30#include "tcg-op.h"
1de7afc9 31#include "qemu/log.h"
534df156 32#include "qemu/bitops.h"
1d854765 33#include "arm_ldst.h"
1497c961 34
2ef6175a
RH
35#include "exec/helper-proto.h"
36#include "exec/helper-gen.h"
2c0262af 37
a7e30d84
LV
38#include "trace-tcg.h"
39
40
2b51668f
PM
41#define ENABLE_ARCH_4T arm_dc_feature(s, ARM_FEATURE_V4T)
42#define ENABLE_ARCH_5 arm_dc_feature(s, ARM_FEATURE_V5)
be5e7a76 43/* currently all emulated v5 cores are also v5TE, so don't bother */
2b51668f 44#define ENABLE_ARCH_5TE arm_dc_feature(s, ARM_FEATURE_V5)
9ee6e8bb 45#define ENABLE_ARCH_5J 0
2b51668f
PM
46#define ENABLE_ARCH_6 arm_dc_feature(s, ARM_FEATURE_V6)
47#define ENABLE_ARCH_6K arm_dc_feature(s, ARM_FEATURE_V6K)
48#define ENABLE_ARCH_6T2 arm_dc_feature(s, ARM_FEATURE_THUMB2)
49#define ENABLE_ARCH_7 arm_dc_feature(s, ARM_FEATURE_V7)
50#define ENABLE_ARCH_8 arm_dc_feature(s, ARM_FEATURE_V8)
b5ff1b31 51
86753403 52#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 53
f570c61e 54#include "translate.h"
e12ce78d
PM
55static uint32_t gen_opc_condexec_bits[OPC_BUF_SIZE];
56
b5ff1b31
FB
57#if defined(CONFIG_USER_ONLY)
58#define IS_USER(s) 1
59#else
60#define IS_USER(s) (s->user)
61#endif
62
3407ad0e 63TCGv_ptr cpu_env;
ad69471c 64/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 65static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
155c3eac 66static TCGv_i32 cpu_R[16];
66c374de 67static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
03d05e2d
PM
68static TCGv_i64 cpu_exclusive_addr;
69static TCGv_i64 cpu_exclusive_val;
426f5abc 70#ifdef CONFIG_USER_ONLY
03d05e2d 71static TCGv_i64 cpu_exclusive_test;
426f5abc
PB
72static TCGv_i32 cpu_exclusive_info;
73#endif
ad69471c 74
b26eefb6 75/* FIXME: These should be removed. */
39d5492a 76static TCGv_i32 cpu_F0s, cpu_F1s;
a7812ae4 77static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 78
022c62cb 79#include "exec/gen-icount.h"
2e70f6ef 80
155c3eac
FN
81static const char *regnames[] =
82 { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
83 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "pc" };
84
b26eefb6
PB
85/* initialize TCG globals. */
86void arm_translate_init(void)
87{
155c3eac
FN
88 int i;
89
a7812ae4
PB
90 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
91
155c3eac
FN
92 for (i = 0; i < 16; i++) {
93 cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 94 offsetof(CPUARMState, regs[i]),
155c3eac
FN
95 regnames[i]);
96 }
66c374de
AJ
97 cpu_CF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, CF), "CF");
98 cpu_NF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, NF), "NF");
99 cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
100 cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
101
03d05e2d 102 cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 103 offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
03d05e2d 104 cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 105 offsetof(CPUARMState, exclusive_val), "exclusive_val");
426f5abc 106#ifdef CONFIG_USER_ONLY
03d05e2d 107 cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
0ecb72a5 108 offsetof(CPUARMState, exclusive_test), "exclusive_test");
426f5abc 109 cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
0ecb72a5 110 offsetof(CPUARMState, exclusive_info), "exclusive_info");
426f5abc 111#endif
155c3eac 112
14ade10f 113 a64_translate_init();
b26eefb6
PB
114}
115
579d21cc
PM
116static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
117{
118 /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
119 * insns:
120 * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
121 * otherwise, access as if at PL0.
122 */
123 switch (s->mmu_idx) {
124 case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
125 case ARMMMUIdx_S12NSE0:
126 case ARMMMUIdx_S12NSE1:
127 return ARMMMUIdx_S12NSE0;
128 case ARMMMUIdx_S1E3:
129 case ARMMMUIdx_S1SE0:
130 case ARMMMUIdx_S1SE1:
131 return ARMMMUIdx_S1SE0;
132 case ARMMMUIdx_S2NS:
133 default:
134 g_assert_not_reached();
135 }
136}
137
39d5492a 138static inline TCGv_i32 load_cpu_offset(int offset)
d9ba4830 139{
39d5492a 140 TCGv_i32 tmp = tcg_temp_new_i32();
d9ba4830
PB
141 tcg_gen_ld_i32(tmp, cpu_env, offset);
142 return tmp;
143}
144
0ecb72a5 145#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
d9ba4830 146
39d5492a 147static inline void store_cpu_offset(TCGv_i32 var, int offset)
d9ba4830
PB
148{
149 tcg_gen_st_i32(var, cpu_env, offset);
7d1b0095 150 tcg_temp_free_i32(var);
d9ba4830
PB
151}
152
153#define store_cpu_field(var, name) \
0ecb72a5 154 store_cpu_offset(var, offsetof(CPUARMState, name))
d9ba4830 155
b26eefb6 156/* Set a variable to the value of a CPU register. */
39d5492a 157static void load_reg_var(DisasContext *s, TCGv_i32 var, int reg)
b26eefb6
PB
158{
159 if (reg == 15) {
160 uint32_t addr;
b90372ad 161 /* normally, since we updated PC, we need only to add one insn */
b26eefb6
PB
162 if (s->thumb)
163 addr = (long)s->pc + 2;
164 else
165 addr = (long)s->pc + 4;
166 tcg_gen_movi_i32(var, addr);
167 } else {
155c3eac 168 tcg_gen_mov_i32(var, cpu_R[reg]);
b26eefb6
PB
169 }
170}
171
172/* Create a new temporary and set it to the value of a CPU register. */
39d5492a 173static inline TCGv_i32 load_reg(DisasContext *s, int reg)
b26eefb6 174{
39d5492a 175 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
176 load_reg_var(s, tmp, reg);
177 return tmp;
178}
179
180/* Set a CPU register. The source must be a temporary and will be
181 marked as dead. */
39d5492a 182static void store_reg(DisasContext *s, int reg, TCGv_i32 var)
b26eefb6
PB
183{
184 if (reg == 15) {
185 tcg_gen_andi_i32(var, var, ~1);
186 s->is_jmp = DISAS_JUMP;
187 }
155c3eac 188 tcg_gen_mov_i32(cpu_R[reg], var);
7d1b0095 189 tcg_temp_free_i32(var);
b26eefb6
PB
190}
191
b26eefb6 192/* Value extensions. */
86831435
PB
193#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
194#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
195#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
196#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
197
1497c961
PB
198#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
199#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e 200
b26eefb6 201
39d5492a 202static inline void gen_set_cpsr(TCGv_i32 var, uint32_t mask)
b75263d6 203{
39d5492a 204 TCGv_i32 tmp_mask = tcg_const_i32(mask);
1ce94f81 205 gen_helper_cpsr_write(cpu_env, var, tmp_mask);
b75263d6
JR
206 tcg_temp_free_i32(tmp_mask);
207}
d9ba4830
PB
208/* Set NZCV flags from the high 4 bits of var. */
209#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
210
d4a2dc67 211static void gen_exception_internal(int excp)
d9ba4830 212{
d4a2dc67
PM
213 TCGv_i32 tcg_excp = tcg_const_i32(excp);
214
215 assert(excp_is_internal(excp));
216 gen_helper_exception_internal(cpu_env, tcg_excp);
217 tcg_temp_free_i32(tcg_excp);
218}
219
220static void gen_exception(int excp, uint32_t syndrome)
221{
222 TCGv_i32 tcg_excp = tcg_const_i32(excp);
223 TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
224
225 gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
226 tcg_temp_free_i32(tcg_syn);
227 tcg_temp_free_i32(tcg_excp);
d9ba4830
PB
228}
229
50225ad0
PM
230static void gen_ss_advance(DisasContext *s)
231{
232 /* If the singlestep state is Active-not-pending, advance to
233 * Active-pending.
234 */
235 if (s->ss_active) {
236 s->pstate_ss = 0;
237 gen_helper_clear_pstate_ss(cpu_env);
238 }
239}
240
241static void gen_step_complete_exception(DisasContext *s)
242{
243 /* We just completed step of an insn. Move from Active-not-pending
244 * to Active-pending, and then also take the swstep exception.
245 * This corresponds to making the (IMPDEF) choice to prioritize
246 * swstep exceptions over asynchronous exceptions taken to an exception
247 * level where debug is disabled. This choice has the advantage that
248 * we do not need to maintain internal state corresponding to the
249 * ISV/EX syndrome bits between completion of the step and generation
250 * of the exception, and our syndrome information is always correct.
251 */
252 gen_ss_advance(s);
253 gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
254 s->is_jmp = DISAS_EXC;
255}
256
39d5492a 257static void gen_smul_dual(TCGv_i32 a, TCGv_i32 b)
3670669c 258{
39d5492a
PM
259 TCGv_i32 tmp1 = tcg_temp_new_i32();
260 TCGv_i32 tmp2 = tcg_temp_new_i32();
22478e79
AZ
261 tcg_gen_ext16s_i32(tmp1, a);
262 tcg_gen_ext16s_i32(tmp2, b);
3670669c 263 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
7d1b0095 264 tcg_temp_free_i32(tmp2);
3670669c
PB
265 tcg_gen_sari_i32(a, a, 16);
266 tcg_gen_sari_i32(b, b, 16);
267 tcg_gen_mul_i32(b, b, a);
268 tcg_gen_mov_i32(a, tmp1);
7d1b0095 269 tcg_temp_free_i32(tmp1);
3670669c
PB
270}
271
272/* Byteswap each halfword. */
39d5492a 273static void gen_rev16(TCGv_i32 var)
3670669c 274{
39d5492a 275 TCGv_i32 tmp = tcg_temp_new_i32();
3670669c
PB
276 tcg_gen_shri_i32(tmp, var, 8);
277 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
278 tcg_gen_shli_i32(var, var, 8);
279 tcg_gen_andi_i32(var, var, 0xff00ff00);
280 tcg_gen_or_i32(var, var, tmp);
7d1b0095 281 tcg_temp_free_i32(tmp);
3670669c
PB
282}
283
284/* Byteswap low halfword and sign extend. */
39d5492a 285static void gen_revsh(TCGv_i32 var)
3670669c 286{
1a855029
AJ
287 tcg_gen_ext16u_i32(var, var);
288 tcg_gen_bswap16_i32(var, var);
289 tcg_gen_ext16s_i32(var, var);
3670669c
PB
290}
291
292/* Unsigned bitfield extract. */
39d5492a 293static void gen_ubfx(TCGv_i32 var, int shift, uint32_t mask)
3670669c
PB
294{
295 if (shift)
296 tcg_gen_shri_i32(var, var, shift);
297 tcg_gen_andi_i32(var, var, mask);
298}
299
300/* Signed bitfield extract. */
39d5492a 301static void gen_sbfx(TCGv_i32 var, int shift, int width)
3670669c
PB
302{
303 uint32_t signbit;
304
305 if (shift)
306 tcg_gen_sari_i32(var, var, shift);
307 if (shift + width < 32) {
308 signbit = 1u << (width - 1);
309 tcg_gen_andi_i32(var, var, (1u << width) - 1);
310 tcg_gen_xori_i32(var, var, signbit);
311 tcg_gen_subi_i32(var, var, signbit);
312 }
313}
314
838fa72d 315/* Return (b << 32) + a. Mark inputs as dead */
39d5492a 316static TCGv_i64 gen_addq_msw(TCGv_i64 a, TCGv_i32 b)
3670669c 317{
838fa72d
AJ
318 TCGv_i64 tmp64 = tcg_temp_new_i64();
319
320 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 321 tcg_temp_free_i32(b);
838fa72d
AJ
322 tcg_gen_shli_i64(tmp64, tmp64, 32);
323 tcg_gen_add_i64(a, tmp64, a);
324
325 tcg_temp_free_i64(tmp64);
326 return a;
327}
328
329/* Return (b << 32) - a. Mark inputs as dead. */
39d5492a 330static TCGv_i64 gen_subq_msw(TCGv_i64 a, TCGv_i32 b)
838fa72d
AJ
331{
332 TCGv_i64 tmp64 = tcg_temp_new_i64();
333
334 tcg_gen_extu_i32_i64(tmp64, b);
7d1b0095 335 tcg_temp_free_i32(b);
838fa72d
AJ
336 tcg_gen_shli_i64(tmp64, tmp64, 32);
337 tcg_gen_sub_i64(a, tmp64, a);
338
339 tcg_temp_free_i64(tmp64);
340 return a;
3670669c
PB
341}
342
5e3f878a 343/* 32x32->64 multiply. Marks inputs as dead. */
39d5492a 344static TCGv_i64 gen_mulu_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 345{
39d5492a
PM
346 TCGv_i32 lo = tcg_temp_new_i32();
347 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 348 TCGv_i64 ret;
5e3f878a 349
831d7fe8 350 tcg_gen_mulu2_i32(lo, hi, a, b);
7d1b0095 351 tcg_temp_free_i32(a);
7d1b0095 352 tcg_temp_free_i32(b);
831d7fe8
RH
353
354 ret = tcg_temp_new_i64();
355 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
356 tcg_temp_free_i32(lo);
357 tcg_temp_free_i32(hi);
831d7fe8
RH
358
359 return ret;
5e3f878a
PB
360}
361
39d5492a 362static TCGv_i64 gen_muls_i64_i32(TCGv_i32 a, TCGv_i32 b)
5e3f878a 363{
39d5492a
PM
364 TCGv_i32 lo = tcg_temp_new_i32();
365 TCGv_i32 hi = tcg_temp_new_i32();
831d7fe8 366 TCGv_i64 ret;
5e3f878a 367
831d7fe8 368 tcg_gen_muls2_i32(lo, hi, a, b);
7d1b0095 369 tcg_temp_free_i32(a);
7d1b0095 370 tcg_temp_free_i32(b);
831d7fe8
RH
371
372 ret = tcg_temp_new_i64();
373 tcg_gen_concat_i32_i64(ret, lo, hi);
39d5492a
PM
374 tcg_temp_free_i32(lo);
375 tcg_temp_free_i32(hi);
831d7fe8
RH
376
377 return ret;
5e3f878a
PB
378}
379
8f01245e 380/* Swap low and high halfwords. */
39d5492a 381static void gen_swap_half(TCGv_i32 var)
8f01245e 382{
39d5492a 383 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e
PB
384 tcg_gen_shri_i32(tmp, var, 16);
385 tcg_gen_shli_i32(var, var, 16);
386 tcg_gen_or_i32(var, var, tmp);
7d1b0095 387 tcg_temp_free_i32(tmp);
8f01245e
PB
388}
389
b26eefb6
PB
390/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
391 tmp = (t0 ^ t1) & 0x8000;
392 t0 &= ~0x8000;
393 t1 &= ~0x8000;
394 t0 = (t0 + t1) ^ tmp;
395 */
396
39d5492a 397static void gen_add16(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 398{
39d5492a 399 TCGv_i32 tmp = tcg_temp_new_i32();
b26eefb6
PB
400 tcg_gen_xor_i32(tmp, t0, t1);
401 tcg_gen_andi_i32(tmp, tmp, 0x8000);
402 tcg_gen_andi_i32(t0, t0, ~0x8000);
403 tcg_gen_andi_i32(t1, t1, ~0x8000);
404 tcg_gen_add_i32(t0, t0, t1);
405 tcg_gen_xor_i32(t0, t0, tmp);
7d1b0095
PM
406 tcg_temp_free_i32(tmp);
407 tcg_temp_free_i32(t1);
b26eefb6
PB
408}
409
410/* Set CF to the top bit of var. */
39d5492a 411static void gen_set_CF_bit31(TCGv_i32 var)
b26eefb6 412{
66c374de 413 tcg_gen_shri_i32(cpu_CF, var, 31);
b26eefb6
PB
414}
415
416/* Set N and Z flags from var. */
39d5492a 417static inline void gen_logic_CC(TCGv_i32 var)
b26eefb6 418{
66c374de
AJ
419 tcg_gen_mov_i32(cpu_NF, var);
420 tcg_gen_mov_i32(cpu_ZF, var);
b26eefb6
PB
421}
422
423/* T0 += T1 + CF. */
39d5492a 424static void gen_adc(TCGv_i32 t0, TCGv_i32 t1)
b26eefb6 425{
396e467c 426 tcg_gen_add_i32(t0, t0, t1);
66c374de 427 tcg_gen_add_i32(t0, t0, cpu_CF);
b26eefb6
PB
428}
429
e9bb4aa9 430/* dest = T0 + T1 + CF. */
39d5492a 431static void gen_add_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
e9bb4aa9 432{
e9bb4aa9 433 tcg_gen_add_i32(dest, t0, t1);
66c374de 434 tcg_gen_add_i32(dest, dest, cpu_CF);
e9bb4aa9
JR
435}
436
3670669c 437/* dest = T0 - T1 + CF - 1. */
39d5492a 438static void gen_sub_carry(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
3670669c 439{
3670669c 440 tcg_gen_sub_i32(dest, t0, t1);
66c374de 441 tcg_gen_add_i32(dest, dest, cpu_CF);
3670669c 442 tcg_gen_subi_i32(dest, dest, 1);
3670669c
PB
443}
444
72485ec4 445/* dest = T0 + T1. Compute C, N, V and Z flags */
39d5492a 446static void gen_add_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 447{
39d5492a 448 TCGv_i32 tmp = tcg_temp_new_i32();
e3482cb8
RH
449 tcg_gen_movi_i32(tmp, 0);
450 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, t1, tmp);
72485ec4 451 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
72485ec4 452 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
72485ec4
AJ
453 tcg_gen_xor_i32(tmp, t0, t1);
454 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
455 tcg_temp_free_i32(tmp);
456 tcg_gen_mov_i32(dest, cpu_NF);
457}
458
49b4c31e 459/* dest = T0 + T1 + CF. Compute C, N, V and Z flags */
39d5492a 460static void gen_adc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
49b4c31e 461{
39d5492a 462 TCGv_i32 tmp = tcg_temp_new_i32();
49b4c31e
RH
463 if (TCG_TARGET_HAS_add2_i32) {
464 tcg_gen_movi_i32(tmp, 0);
465 tcg_gen_add2_i32(cpu_NF, cpu_CF, t0, tmp, cpu_CF, tmp);
8c3ac601 466 tcg_gen_add2_i32(cpu_NF, cpu_CF, cpu_NF, cpu_CF, t1, tmp);
49b4c31e
RH
467 } else {
468 TCGv_i64 q0 = tcg_temp_new_i64();
469 TCGv_i64 q1 = tcg_temp_new_i64();
470 tcg_gen_extu_i32_i64(q0, t0);
471 tcg_gen_extu_i32_i64(q1, t1);
472 tcg_gen_add_i64(q0, q0, q1);
473 tcg_gen_extu_i32_i64(q1, cpu_CF);
474 tcg_gen_add_i64(q0, q0, q1);
475 tcg_gen_extr_i64_i32(cpu_NF, cpu_CF, q0);
476 tcg_temp_free_i64(q0);
477 tcg_temp_free_i64(q1);
478 }
479 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
480 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
481 tcg_gen_xor_i32(tmp, t0, t1);
482 tcg_gen_andc_i32(cpu_VF, cpu_VF, tmp);
483 tcg_temp_free_i32(tmp);
484 tcg_gen_mov_i32(dest, cpu_NF);
485}
486
72485ec4 487/* dest = T0 - T1. Compute C, N, V and Z flags */
39d5492a 488static void gen_sub_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
72485ec4 489{
39d5492a 490 TCGv_i32 tmp;
72485ec4
AJ
491 tcg_gen_sub_i32(cpu_NF, t0, t1);
492 tcg_gen_mov_i32(cpu_ZF, cpu_NF);
493 tcg_gen_setcond_i32(TCG_COND_GEU, cpu_CF, t0, t1);
494 tcg_gen_xor_i32(cpu_VF, cpu_NF, t0);
495 tmp = tcg_temp_new_i32();
496 tcg_gen_xor_i32(tmp, t0, t1);
497 tcg_gen_and_i32(cpu_VF, cpu_VF, tmp);
498 tcg_temp_free_i32(tmp);
499 tcg_gen_mov_i32(dest, cpu_NF);
500}
501
e77f0832 502/* dest = T0 + ~T1 + CF. Compute C, N, V and Z flags */
39d5492a 503static void gen_sbc_CC(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
2de68a49 504{
39d5492a 505 TCGv_i32 tmp = tcg_temp_new_i32();
e77f0832
RH
506 tcg_gen_not_i32(tmp, t1);
507 gen_adc_CC(dest, t0, tmp);
39d5492a 508 tcg_temp_free_i32(tmp);
2de68a49
RH
509}
510
365af80e 511#define GEN_SHIFT(name) \
39d5492a 512static void gen_##name(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1) \
365af80e 513{ \
39d5492a 514 TCGv_i32 tmp1, tmp2, tmp3; \
365af80e
AJ
515 tmp1 = tcg_temp_new_i32(); \
516 tcg_gen_andi_i32(tmp1, t1, 0xff); \
517 tmp2 = tcg_const_i32(0); \
518 tmp3 = tcg_const_i32(0x1f); \
519 tcg_gen_movcond_i32(TCG_COND_GTU, tmp2, tmp1, tmp3, tmp2, t0); \
520 tcg_temp_free_i32(tmp3); \
521 tcg_gen_andi_i32(tmp1, tmp1, 0x1f); \
522 tcg_gen_##name##_i32(dest, tmp2, tmp1); \
523 tcg_temp_free_i32(tmp2); \
524 tcg_temp_free_i32(tmp1); \
525}
526GEN_SHIFT(shl)
527GEN_SHIFT(shr)
528#undef GEN_SHIFT
529
39d5492a 530static void gen_sar(TCGv_i32 dest, TCGv_i32 t0, TCGv_i32 t1)
365af80e 531{
39d5492a 532 TCGv_i32 tmp1, tmp2;
365af80e
AJ
533 tmp1 = tcg_temp_new_i32();
534 tcg_gen_andi_i32(tmp1, t1, 0xff);
535 tmp2 = tcg_const_i32(0x1f);
536 tcg_gen_movcond_i32(TCG_COND_GTU, tmp1, tmp1, tmp2, tmp2, tmp1);
537 tcg_temp_free_i32(tmp2);
538 tcg_gen_sar_i32(dest, t0, tmp1);
539 tcg_temp_free_i32(tmp1);
540}
541
39d5492a 542static void tcg_gen_abs_i32(TCGv_i32 dest, TCGv_i32 src)
36c91fd1 543{
39d5492a
PM
544 TCGv_i32 c0 = tcg_const_i32(0);
545 TCGv_i32 tmp = tcg_temp_new_i32();
36c91fd1
PM
546 tcg_gen_neg_i32(tmp, src);
547 tcg_gen_movcond_i32(TCG_COND_GT, dest, src, c0, src, tmp);
548 tcg_temp_free_i32(c0);
549 tcg_temp_free_i32(tmp);
550}
ad69471c 551
39d5492a 552static void shifter_out_im(TCGv_i32 var, int shift)
b26eefb6 553{
9a119ff6 554 if (shift == 0) {
66c374de 555 tcg_gen_andi_i32(cpu_CF, var, 1);
b26eefb6 556 } else {
66c374de
AJ
557 tcg_gen_shri_i32(cpu_CF, var, shift);
558 if (shift != 31) {
559 tcg_gen_andi_i32(cpu_CF, cpu_CF, 1);
560 }
9a119ff6 561 }
9a119ff6 562}
b26eefb6 563
9a119ff6 564/* Shift by immediate. Includes special handling for shift == 0. */
39d5492a
PM
565static inline void gen_arm_shift_im(TCGv_i32 var, int shiftop,
566 int shift, int flags)
9a119ff6
PB
567{
568 switch (shiftop) {
569 case 0: /* LSL */
570 if (shift != 0) {
571 if (flags)
572 shifter_out_im(var, 32 - shift);
573 tcg_gen_shli_i32(var, var, shift);
574 }
575 break;
576 case 1: /* LSR */
577 if (shift == 0) {
578 if (flags) {
66c374de 579 tcg_gen_shri_i32(cpu_CF, var, 31);
9a119ff6
PB
580 }
581 tcg_gen_movi_i32(var, 0);
582 } else {
583 if (flags)
584 shifter_out_im(var, shift - 1);
585 tcg_gen_shri_i32(var, var, shift);
586 }
587 break;
588 case 2: /* ASR */
589 if (shift == 0)
590 shift = 32;
591 if (flags)
592 shifter_out_im(var, shift - 1);
593 if (shift == 32)
594 shift = 31;
595 tcg_gen_sari_i32(var, var, shift);
596 break;
597 case 3: /* ROR/RRX */
598 if (shift != 0) {
599 if (flags)
600 shifter_out_im(var, shift - 1);
f669df27 601 tcg_gen_rotri_i32(var, var, shift); break;
9a119ff6 602 } else {
39d5492a 603 TCGv_i32 tmp = tcg_temp_new_i32();
b6348f29 604 tcg_gen_shli_i32(tmp, cpu_CF, 31);
9a119ff6
PB
605 if (flags)
606 shifter_out_im(var, 0);
607 tcg_gen_shri_i32(var, var, 1);
b26eefb6 608 tcg_gen_or_i32(var, var, tmp);
7d1b0095 609 tcg_temp_free_i32(tmp);
b26eefb6
PB
610 }
611 }
612};
613
39d5492a
PM
614static inline void gen_arm_shift_reg(TCGv_i32 var, int shiftop,
615 TCGv_i32 shift, int flags)
8984bd2e
PB
616{
617 if (flags) {
618 switch (shiftop) {
9ef39277
BS
619 case 0: gen_helper_shl_cc(var, cpu_env, var, shift); break;
620 case 1: gen_helper_shr_cc(var, cpu_env, var, shift); break;
621 case 2: gen_helper_sar_cc(var, cpu_env, var, shift); break;
622 case 3: gen_helper_ror_cc(var, cpu_env, var, shift); break;
8984bd2e
PB
623 }
624 } else {
625 switch (shiftop) {
365af80e
AJ
626 case 0:
627 gen_shl(var, var, shift);
628 break;
629 case 1:
630 gen_shr(var, var, shift);
631 break;
632 case 2:
633 gen_sar(var, var, shift);
634 break;
f669df27
AJ
635 case 3: tcg_gen_andi_i32(shift, shift, 0x1f);
636 tcg_gen_rotr_i32(var, var, shift); break;
8984bd2e
PB
637 }
638 }
7d1b0095 639 tcg_temp_free_i32(shift);
8984bd2e
PB
640}
641
6ddbc6e4
PB
642#define PAS_OP(pfx) \
643 switch (op2) { \
644 case 0: gen_pas_helper(glue(pfx,add16)); break; \
645 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
646 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
647 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
648 case 4: gen_pas_helper(glue(pfx,add8)); break; \
649 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
650 }
39d5492a 651static void gen_arm_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 652{
a7812ae4 653 TCGv_ptr tmp;
6ddbc6e4
PB
654
655 switch (op1) {
656#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
657 case 1:
a7812ae4 658 tmp = tcg_temp_new_ptr();
0ecb72a5 659 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 660 PAS_OP(s)
b75263d6 661 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
662 break;
663 case 5:
a7812ae4 664 tmp = tcg_temp_new_ptr();
0ecb72a5 665 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 666 PAS_OP(u)
b75263d6 667 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
668 break;
669#undef gen_pas_helper
670#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
671 case 2:
672 PAS_OP(q);
673 break;
674 case 3:
675 PAS_OP(sh);
676 break;
677 case 6:
678 PAS_OP(uq);
679 break;
680 case 7:
681 PAS_OP(uh);
682 break;
683#undef gen_pas_helper
684 }
685}
9ee6e8bb
PB
686#undef PAS_OP
687
6ddbc6e4
PB
688/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
689#define PAS_OP(pfx) \
ed89a2f1 690 switch (op1) { \
6ddbc6e4
PB
691 case 0: gen_pas_helper(glue(pfx,add8)); break; \
692 case 1: gen_pas_helper(glue(pfx,add16)); break; \
693 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
694 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
695 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
696 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
697 }
39d5492a 698static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv_i32 a, TCGv_i32 b)
6ddbc6e4 699{
a7812ae4 700 TCGv_ptr tmp;
6ddbc6e4 701
ed89a2f1 702 switch (op2) {
6ddbc6e4
PB
703#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
704 case 0:
a7812ae4 705 tmp = tcg_temp_new_ptr();
0ecb72a5 706 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 707 PAS_OP(s)
b75263d6 708 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
709 break;
710 case 4:
a7812ae4 711 tmp = tcg_temp_new_ptr();
0ecb72a5 712 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 713 PAS_OP(u)
b75263d6 714 tcg_temp_free_ptr(tmp);
6ddbc6e4
PB
715 break;
716#undef gen_pas_helper
717#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
718 case 1:
719 PAS_OP(q);
720 break;
721 case 2:
722 PAS_OP(sh);
723 break;
724 case 5:
725 PAS_OP(uq);
726 break;
727 case 6:
728 PAS_OP(uh);
729 break;
730#undef gen_pas_helper
731 }
732}
9ee6e8bb
PB
733#undef PAS_OP
734
39fb730a
AG
735/*
736 * generate a conditional branch based on ARM condition code cc.
737 * This is common between ARM and Aarch64 targets.
738 */
42a268c2 739void arm_gen_test_cc(int cc, TCGLabel *label)
d9ba4830 740{
39d5492a 741 TCGv_i32 tmp;
42a268c2 742 TCGLabel *inv;
d9ba4830 743
d9ba4830
PB
744 switch (cc) {
745 case 0: /* eq: Z */
66c374de 746 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
747 break;
748 case 1: /* ne: !Z */
66c374de 749 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
750 break;
751 case 2: /* cs: C */
66c374de 752 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_CF, 0, label);
d9ba4830
PB
753 break;
754 case 3: /* cc: !C */
66c374de 755 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
d9ba4830
PB
756 break;
757 case 4: /* mi: N */
66c374de 758 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_NF, 0, label);
d9ba4830
PB
759 break;
760 case 5: /* pl: !N */
66c374de 761 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_NF, 0, label);
d9ba4830
PB
762 break;
763 case 6: /* vs: V */
66c374de 764 tcg_gen_brcondi_i32(TCG_COND_LT, cpu_VF, 0, label);
d9ba4830
PB
765 break;
766 case 7: /* vc: !V */
66c374de 767 tcg_gen_brcondi_i32(TCG_COND_GE, cpu_VF, 0, label);
d9ba4830
PB
768 break;
769 case 8: /* hi: C && !Z */
770 inv = gen_new_label();
66c374de
AJ
771 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, inv);
772 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_ZF, 0, label);
d9ba4830
PB
773 gen_set_label(inv);
774 break;
775 case 9: /* ls: !C || Z */
66c374de
AJ
776 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_CF, 0, label);
777 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
d9ba4830
PB
778 break;
779 case 10: /* ge: N == V -> N ^ V == 0 */
66c374de
AJ
780 tmp = tcg_temp_new_i32();
781 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 782 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 783 tcg_temp_free_i32(tmp);
d9ba4830
PB
784 break;
785 case 11: /* lt: N != V -> N ^ V != 0 */
66c374de
AJ
786 tmp = tcg_temp_new_i32();
787 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 788 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 789 tcg_temp_free_i32(tmp);
d9ba4830
PB
790 break;
791 case 12: /* gt: !Z && N == V */
792 inv = gen_new_label();
66c374de
AJ
793 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, inv);
794 tmp = tcg_temp_new_i32();
795 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 796 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
66c374de 797 tcg_temp_free_i32(tmp);
d9ba4830
PB
798 gen_set_label(inv);
799 break;
800 case 13: /* le: Z || N != V */
66c374de
AJ
801 tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_ZF, 0, label);
802 tmp = tcg_temp_new_i32();
803 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
cb63669a 804 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
66c374de 805 tcg_temp_free_i32(tmp);
d9ba4830
PB
806 break;
807 default:
808 fprintf(stderr, "Bad condition code 0x%x\n", cc);
809 abort();
810 }
d9ba4830 811}
2c0262af 812
b1d8e52e 813static const uint8_t table_logic_cc[16] = {
2c0262af
FB
814 1, /* and */
815 1, /* xor */
816 0, /* sub */
817 0, /* rsb */
818 0, /* add */
819 0, /* adc */
820 0, /* sbc */
821 0, /* rsc */
822 1, /* andl */
823 1, /* xorl */
824 0, /* cmp */
825 0, /* cmn */
826 1, /* orr */
827 1, /* mov */
828 1, /* bic */
829 1, /* mvn */
830};
3b46e624 831
d9ba4830
PB
832/* Set PC and Thumb state from an immediate address. */
833static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 834{
39d5492a 835 TCGv_i32 tmp;
99c475ab 836
b26eefb6 837 s->is_jmp = DISAS_UPDATE;
d9ba4830 838 if (s->thumb != (addr & 1)) {
7d1b0095 839 tmp = tcg_temp_new_i32();
d9ba4830 840 tcg_gen_movi_i32(tmp, addr & 1);
0ecb72a5 841 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
7d1b0095 842 tcg_temp_free_i32(tmp);
d9ba4830 843 }
155c3eac 844 tcg_gen_movi_i32(cpu_R[15], addr & ~1);
d9ba4830
PB
845}
846
847/* Set PC and Thumb state from var. var is marked as dead. */
39d5492a 848static inline void gen_bx(DisasContext *s, TCGv_i32 var)
d9ba4830 849{
d9ba4830 850 s->is_jmp = DISAS_UPDATE;
155c3eac
FN
851 tcg_gen_andi_i32(cpu_R[15], var, ~1);
852 tcg_gen_andi_i32(var, var, 1);
853 store_cpu_field(var, thumb);
d9ba4830
PB
854}
855
21aeb343
JR
856/* Variant of store_reg which uses branch&exchange logic when storing
857 to r15 in ARM architecture v7 and above. The source must be a temporary
858 and will be marked as dead. */
7dcc1f89 859static inline void store_reg_bx(DisasContext *s, int reg, TCGv_i32 var)
21aeb343
JR
860{
861 if (reg == 15 && ENABLE_ARCH_7) {
862 gen_bx(s, var);
863 } else {
864 store_reg(s, reg, var);
865 }
866}
867
be5e7a76
DES
868/* Variant of store_reg which uses branch&exchange logic when storing
869 * to r15 in ARM architecture v5T and above. This is used for storing
870 * the results of a LDR/LDM/POP into r15, and corresponds to the cases
871 * in the ARM ARM which use the LoadWritePC() pseudocode function. */
7dcc1f89 872static inline void store_reg_from_load(DisasContext *s, int reg, TCGv_i32 var)
be5e7a76
DES
873{
874 if (reg == 15 && ENABLE_ARCH_5) {
875 gen_bx(s, var);
876 } else {
877 store_reg(s, reg, var);
878 }
879}
880
08307563
PM
881/* Abstractions of "generate code to do a guest load/store for
882 * AArch32", where a vaddr is always 32 bits (and is zero
883 * extended if we're a 64 bit core) and data is also
884 * 32 bits unless specifically doing a 64 bit access.
885 * These functions work like tcg_gen_qemu_{ld,st}* except
09f78135 886 * that the address argument is TCGv_i32 rather than TCGv.
08307563
PM
887 */
888#if TARGET_LONG_BITS == 32
889
09f78135
RH
890#define DO_GEN_LD(SUFF, OPC) \
891static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 892{ \
09f78135 893 tcg_gen_qemu_ld_i32(val, addr, index, OPC); \
08307563
PM
894}
895
09f78135
RH
896#define DO_GEN_ST(SUFF, OPC) \
897static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563 898{ \
09f78135 899 tcg_gen_qemu_st_i32(val, addr, index, OPC); \
08307563
PM
900}
901
902static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
903{
09f78135 904 tcg_gen_qemu_ld_i64(val, addr, index, MO_TEQ);
08307563
PM
905}
906
907static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
908{
09f78135 909 tcg_gen_qemu_st_i64(val, addr, index, MO_TEQ);
08307563
PM
910}
911
912#else
913
09f78135
RH
914#define DO_GEN_LD(SUFF, OPC) \
915static inline void gen_aa32_ld##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
916{ \
917 TCGv addr64 = tcg_temp_new(); \
08307563 918 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 919 tcg_gen_qemu_ld_i32(val, addr64, index, OPC); \
08307563 920 tcg_temp_free(addr64); \
08307563
PM
921}
922
09f78135
RH
923#define DO_GEN_ST(SUFF, OPC) \
924static inline void gen_aa32_st##SUFF(TCGv_i32 val, TCGv_i32 addr, int index) \
08307563
PM
925{ \
926 TCGv addr64 = tcg_temp_new(); \
08307563 927 tcg_gen_extu_i32_i64(addr64, addr); \
09f78135 928 tcg_gen_qemu_st_i32(val, addr64, index, OPC); \
08307563 929 tcg_temp_free(addr64); \
08307563
PM
930}
931
932static inline void gen_aa32_ld64(TCGv_i64 val, TCGv_i32 addr, int index)
933{
934 TCGv addr64 = tcg_temp_new();
935 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 936 tcg_gen_qemu_ld_i64(val, addr64, index, MO_TEQ);
08307563
PM
937 tcg_temp_free(addr64);
938}
939
940static inline void gen_aa32_st64(TCGv_i64 val, TCGv_i32 addr, int index)
941{
942 TCGv addr64 = tcg_temp_new();
943 tcg_gen_extu_i32_i64(addr64, addr);
09f78135 944 tcg_gen_qemu_st_i64(val, addr64, index, MO_TEQ);
08307563
PM
945 tcg_temp_free(addr64);
946}
947
948#endif
949
09f78135
RH
950DO_GEN_LD(8s, MO_SB)
951DO_GEN_LD(8u, MO_UB)
952DO_GEN_LD(16s, MO_TESW)
953DO_GEN_LD(16u, MO_TEUW)
954DO_GEN_LD(32u, MO_TEUL)
955DO_GEN_ST(8, MO_UB)
956DO_GEN_ST(16, MO_TEUW)
957DO_GEN_ST(32, MO_TEUL)
08307563 958
eaed129d 959static inline void gen_set_pc_im(DisasContext *s, target_ulong val)
5e3f878a 960{
40f860cd 961 tcg_gen_movi_i32(cpu_R[15], val);
5e3f878a
PB
962}
963
37e6456e
PM
964static inline void gen_hvc(DisasContext *s, int imm16)
965{
966 /* The pre HVC helper handles cases when HVC gets trapped
967 * as an undefined insn by runtime configuration (ie before
968 * the insn really executes).
969 */
970 gen_set_pc_im(s, s->pc - 4);
971 gen_helper_pre_hvc(cpu_env);
972 /* Otherwise we will treat this as a real exception which
973 * happens after execution of the insn. (The distinction matters
974 * for the PC value reported to the exception handler and also
975 * for single stepping.)
976 */
977 s->svc_imm = imm16;
978 gen_set_pc_im(s, s->pc);
979 s->is_jmp = DISAS_HVC;
980}
981
982static inline void gen_smc(DisasContext *s)
983{
984 /* As with HVC, we may take an exception either before or after
985 * the insn executes.
986 */
987 TCGv_i32 tmp;
988
989 gen_set_pc_im(s, s->pc - 4);
990 tmp = tcg_const_i32(syn_aa32_smc());
991 gen_helper_pre_smc(cpu_env, tmp);
992 tcg_temp_free_i32(tmp);
993 gen_set_pc_im(s, s->pc);
994 s->is_jmp = DISAS_SMC;
995}
996
d4a2dc67
PM
997static inline void
998gen_set_condexec (DisasContext *s)
999{
1000 if (s->condexec_mask) {
1001 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
1002 TCGv_i32 tmp = tcg_temp_new_i32();
1003 tcg_gen_movi_i32(tmp, val);
1004 store_cpu_field(tmp, condexec_bits);
1005 }
1006}
1007
1008static void gen_exception_internal_insn(DisasContext *s, int offset, int excp)
1009{
1010 gen_set_condexec(s);
1011 gen_set_pc_im(s, s->pc - offset);
1012 gen_exception_internal(excp);
1013 s->is_jmp = DISAS_JUMP;
1014}
1015
1016static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
1017{
1018 gen_set_condexec(s);
1019 gen_set_pc_im(s, s->pc - offset);
1020 gen_exception(excp, syn);
1021 s->is_jmp = DISAS_JUMP;
1022}
1023
b5ff1b31
FB
1024/* Force a TB lookup after an instruction that changes the CPU state. */
1025static inline void gen_lookup_tb(DisasContext *s)
1026{
a6445c52 1027 tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
b5ff1b31
FB
1028 s->is_jmp = DISAS_UPDATE;
1029}
1030
b0109805 1031static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
39d5492a 1032 TCGv_i32 var)
2c0262af 1033{
1e8d4eec 1034 int val, rm, shift, shiftop;
39d5492a 1035 TCGv_i32 offset;
2c0262af
FB
1036
1037 if (!(insn & (1 << 25))) {
1038 /* immediate */
1039 val = insn & 0xfff;
1040 if (!(insn & (1 << 23)))
1041 val = -val;
537730b9 1042 if (val != 0)
b0109805 1043 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1044 } else {
1045 /* shift/register */
1046 rm = (insn) & 0xf;
1047 shift = (insn >> 7) & 0x1f;
1e8d4eec 1048 shiftop = (insn >> 5) & 3;
b26eefb6 1049 offset = load_reg(s, rm);
9a119ff6 1050 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 1051 if (!(insn & (1 << 23)))
b0109805 1052 tcg_gen_sub_i32(var, var, offset);
2c0262af 1053 else
b0109805 1054 tcg_gen_add_i32(var, var, offset);
7d1b0095 1055 tcg_temp_free_i32(offset);
2c0262af
FB
1056 }
1057}
1058
191f9a93 1059static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
39d5492a 1060 int extra, TCGv_i32 var)
2c0262af
FB
1061{
1062 int val, rm;
39d5492a 1063 TCGv_i32 offset;
3b46e624 1064
2c0262af
FB
1065 if (insn & (1 << 22)) {
1066 /* immediate */
1067 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
1068 if (!(insn & (1 << 23)))
1069 val = -val;
18acad92 1070 val += extra;
537730b9 1071 if (val != 0)
b0109805 1072 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
1073 } else {
1074 /* register */
191f9a93 1075 if (extra)
b0109805 1076 tcg_gen_addi_i32(var, var, extra);
2c0262af 1077 rm = (insn) & 0xf;
b26eefb6 1078 offset = load_reg(s, rm);
2c0262af 1079 if (!(insn & (1 << 23)))
b0109805 1080 tcg_gen_sub_i32(var, var, offset);
2c0262af 1081 else
b0109805 1082 tcg_gen_add_i32(var, var, offset);
7d1b0095 1083 tcg_temp_free_i32(offset);
2c0262af
FB
1084 }
1085}
1086
5aaebd13
PM
1087static TCGv_ptr get_fpstatus_ptr(int neon)
1088{
1089 TCGv_ptr statusptr = tcg_temp_new_ptr();
1090 int offset;
1091 if (neon) {
0ecb72a5 1092 offset = offsetof(CPUARMState, vfp.standard_fp_status);
5aaebd13 1093 } else {
0ecb72a5 1094 offset = offsetof(CPUARMState, vfp.fp_status);
5aaebd13
PM
1095 }
1096 tcg_gen_addi_ptr(statusptr, cpu_env, offset);
1097 return statusptr;
1098}
1099
4373f3ce
PB
1100#define VFP_OP2(name) \
1101static inline void gen_vfp_##name(int dp) \
1102{ \
ae1857ec
PM
1103 TCGv_ptr fpst = get_fpstatus_ptr(0); \
1104 if (dp) { \
1105 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
1106 } else { \
1107 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
1108 } \
1109 tcg_temp_free_ptr(fpst); \
b7bcbe95
FB
1110}
1111
4373f3ce
PB
1112VFP_OP2(add)
1113VFP_OP2(sub)
1114VFP_OP2(mul)
1115VFP_OP2(div)
1116
1117#undef VFP_OP2
1118
605a6aed
PM
1119static inline void gen_vfp_F1_mul(int dp)
1120{
1121 /* Like gen_vfp_mul() but put result in F1 */
ae1857ec 1122 TCGv_ptr fpst = get_fpstatus_ptr(0);
605a6aed 1123 if (dp) {
ae1857ec 1124 gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
605a6aed 1125 } else {
ae1857ec 1126 gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
605a6aed 1127 }
ae1857ec 1128 tcg_temp_free_ptr(fpst);
605a6aed
PM
1129}
1130
1131static inline void gen_vfp_F1_neg(int dp)
1132{
1133 /* Like gen_vfp_neg() but put result in F1 */
1134 if (dp) {
1135 gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
1136 } else {
1137 gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
1138 }
1139}
1140
4373f3ce
PB
1141static inline void gen_vfp_abs(int dp)
1142{
1143 if (dp)
1144 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1145 else
1146 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1147}
1148
1149static inline void gen_vfp_neg(int dp)
1150{
1151 if (dp)
1152 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1153 else
1154 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1155}
1156
1157static inline void gen_vfp_sqrt(int dp)
1158{
1159 if (dp)
1160 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1161 else
1162 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1163}
1164
1165static inline void gen_vfp_cmp(int dp)
1166{
1167 if (dp)
1168 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1169 else
1170 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1171}
1172
1173static inline void gen_vfp_cmpe(int dp)
1174{
1175 if (dp)
1176 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1177 else
1178 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1179}
1180
1181static inline void gen_vfp_F1_ld0(int dp)
1182{
1183 if (dp)
5b340b51 1184 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1185 else
5b340b51 1186 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1187}
1188
5500b06c
PM
1189#define VFP_GEN_ITOF(name) \
1190static inline void gen_vfp_##name(int dp, int neon) \
1191{ \
5aaebd13 1192 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1193 if (dp) { \
1194 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
1195 } else { \
1196 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1197 } \
b7fa9214 1198 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1199}
1200
5500b06c
PM
1201VFP_GEN_ITOF(uito)
1202VFP_GEN_ITOF(sito)
1203#undef VFP_GEN_ITOF
4373f3ce 1204
5500b06c
PM
1205#define VFP_GEN_FTOI(name) \
1206static inline void gen_vfp_##name(int dp, int neon) \
1207{ \
5aaebd13 1208 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c
PM
1209 if (dp) { \
1210 gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
1211 } else { \
1212 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
1213 } \
b7fa9214 1214 tcg_temp_free_ptr(statusptr); \
4373f3ce
PB
1215}
1216
5500b06c
PM
1217VFP_GEN_FTOI(toui)
1218VFP_GEN_FTOI(touiz)
1219VFP_GEN_FTOI(tosi)
1220VFP_GEN_FTOI(tosiz)
1221#undef VFP_GEN_FTOI
4373f3ce 1222
16d5b3ca 1223#define VFP_GEN_FIX(name, round) \
5500b06c 1224static inline void gen_vfp_##name(int dp, int shift, int neon) \
4373f3ce 1225{ \
39d5492a 1226 TCGv_i32 tmp_shift = tcg_const_i32(shift); \
5aaebd13 1227 TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
5500b06c 1228 if (dp) { \
16d5b3ca
WN
1229 gen_helper_vfp_##name##d##round(cpu_F0d, cpu_F0d, tmp_shift, \
1230 statusptr); \
5500b06c 1231 } else { \
16d5b3ca
WN
1232 gen_helper_vfp_##name##s##round(cpu_F0s, cpu_F0s, tmp_shift, \
1233 statusptr); \
5500b06c 1234 } \
b75263d6 1235 tcg_temp_free_i32(tmp_shift); \
b7fa9214 1236 tcg_temp_free_ptr(statusptr); \
9ee6e8bb 1237}
16d5b3ca
WN
1238VFP_GEN_FIX(tosh, _round_to_zero)
1239VFP_GEN_FIX(tosl, _round_to_zero)
1240VFP_GEN_FIX(touh, _round_to_zero)
1241VFP_GEN_FIX(toul, _round_to_zero)
1242VFP_GEN_FIX(shto, )
1243VFP_GEN_FIX(slto, )
1244VFP_GEN_FIX(uhto, )
1245VFP_GEN_FIX(ulto, )
4373f3ce 1246#undef VFP_GEN_FIX
9ee6e8bb 1247
39d5492a 1248static inline void gen_vfp_ld(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1249{
08307563 1250 if (dp) {
6ce2faf4 1251 gen_aa32_ld64(cpu_F0d, addr, get_mem_index(s));
08307563 1252 } else {
6ce2faf4 1253 gen_aa32_ld32u(cpu_F0s, addr, get_mem_index(s));
08307563 1254 }
b5ff1b31
FB
1255}
1256
39d5492a 1257static inline void gen_vfp_st(DisasContext *s, int dp, TCGv_i32 addr)
b5ff1b31 1258{
08307563 1259 if (dp) {
6ce2faf4 1260 gen_aa32_st64(cpu_F0d, addr, get_mem_index(s));
08307563 1261 } else {
6ce2faf4 1262 gen_aa32_st32(cpu_F0s, addr, get_mem_index(s));
08307563 1263 }
b5ff1b31
FB
1264}
1265
8e96005d
FB
1266static inline long
1267vfp_reg_offset (int dp, int reg)
1268{
1269 if (dp)
1270 return offsetof(CPUARMState, vfp.regs[reg]);
1271 else if (reg & 1) {
1272 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1273 + offsetof(CPU_DoubleU, l.upper);
1274 } else {
1275 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1276 + offsetof(CPU_DoubleU, l.lower);
1277 }
1278}
9ee6e8bb
PB
1279
1280/* Return the offset of a 32-bit piece of a NEON register.
1281 zero is the least significant end of the register. */
1282static inline long
1283neon_reg_offset (int reg, int n)
1284{
1285 int sreg;
1286 sreg = reg * 2 + n;
1287 return vfp_reg_offset(0, sreg);
1288}
1289
39d5492a 1290static TCGv_i32 neon_load_reg(int reg, int pass)
8f8e3aa4 1291{
39d5492a 1292 TCGv_i32 tmp = tcg_temp_new_i32();
8f8e3aa4
PB
1293 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1294 return tmp;
1295}
1296
39d5492a 1297static void neon_store_reg(int reg, int pass, TCGv_i32 var)
8f8e3aa4
PB
1298{
1299 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
7d1b0095 1300 tcg_temp_free_i32(var);
8f8e3aa4
PB
1301}
1302
a7812ae4 1303static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1304{
1305 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1306}
1307
a7812ae4 1308static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1309{
1310 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1311}
1312
4373f3ce
PB
1313#define tcg_gen_ld_f32 tcg_gen_ld_i32
1314#define tcg_gen_ld_f64 tcg_gen_ld_i64
1315#define tcg_gen_st_f32 tcg_gen_st_i32
1316#define tcg_gen_st_f64 tcg_gen_st_i64
1317
b7bcbe95
FB
1318static inline void gen_mov_F0_vreg(int dp, int reg)
1319{
1320 if (dp)
4373f3ce 1321 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1322 else
4373f3ce 1323 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1324}
1325
1326static inline void gen_mov_F1_vreg(int dp, int reg)
1327{
1328 if (dp)
4373f3ce 1329 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1330 else
4373f3ce 1331 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1332}
1333
1334static inline void gen_mov_vreg_F0(int dp, int reg)
1335{
1336 if (dp)
4373f3ce 1337 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1338 else
4373f3ce 1339 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1340}
1341
18c9b560
AZ
1342#define ARM_CP_RW_BIT (1 << 20)
1343
a7812ae4 1344static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d 1345{
0ecb72a5 1346 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1347}
1348
a7812ae4 1349static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d 1350{
0ecb72a5 1351 tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
e677137d
PB
1352}
1353
39d5492a 1354static inline TCGv_i32 iwmmxt_load_creg(int reg)
e677137d 1355{
39d5492a 1356 TCGv_i32 var = tcg_temp_new_i32();
0ecb72a5 1357 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
da6b5335 1358 return var;
e677137d
PB
1359}
1360
39d5492a 1361static inline void iwmmxt_store_creg(int reg, TCGv_i32 var)
e677137d 1362{
0ecb72a5 1363 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
7d1b0095 1364 tcg_temp_free_i32(var);
e677137d
PB
1365}
1366
1367static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1368{
1369 iwmmxt_store_reg(cpu_M0, rn);
1370}
1371
1372static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1373{
1374 iwmmxt_load_reg(cpu_M0, rn);
1375}
1376
1377static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1378{
1379 iwmmxt_load_reg(cpu_V1, rn);
1380 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1381}
1382
1383static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1384{
1385 iwmmxt_load_reg(cpu_V1, rn);
1386 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1387}
1388
1389static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1390{
1391 iwmmxt_load_reg(cpu_V1, rn);
1392 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1393}
1394
1395#define IWMMXT_OP(name) \
1396static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1397{ \
1398 iwmmxt_load_reg(cpu_V1, rn); \
1399 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1400}
1401
477955bd
PM
1402#define IWMMXT_OP_ENV(name) \
1403static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1404{ \
1405 iwmmxt_load_reg(cpu_V1, rn); \
1406 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1407}
1408
1409#define IWMMXT_OP_ENV_SIZE(name) \
1410IWMMXT_OP_ENV(name##b) \
1411IWMMXT_OP_ENV(name##w) \
1412IWMMXT_OP_ENV(name##l)
e677137d 1413
477955bd 1414#define IWMMXT_OP_ENV1(name) \
e677137d
PB
1415static inline void gen_op_iwmmxt_##name##_M0(void) \
1416{ \
477955bd 1417 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
e677137d
PB
1418}
1419
1420IWMMXT_OP(maddsq)
1421IWMMXT_OP(madduq)
1422IWMMXT_OP(sadb)
1423IWMMXT_OP(sadw)
1424IWMMXT_OP(mulslw)
1425IWMMXT_OP(mulshw)
1426IWMMXT_OP(mululw)
1427IWMMXT_OP(muluhw)
1428IWMMXT_OP(macsw)
1429IWMMXT_OP(macuw)
1430
477955bd
PM
1431IWMMXT_OP_ENV_SIZE(unpackl)
1432IWMMXT_OP_ENV_SIZE(unpackh)
1433
1434IWMMXT_OP_ENV1(unpacklub)
1435IWMMXT_OP_ENV1(unpackluw)
1436IWMMXT_OP_ENV1(unpacklul)
1437IWMMXT_OP_ENV1(unpackhub)
1438IWMMXT_OP_ENV1(unpackhuw)
1439IWMMXT_OP_ENV1(unpackhul)
1440IWMMXT_OP_ENV1(unpacklsb)
1441IWMMXT_OP_ENV1(unpacklsw)
1442IWMMXT_OP_ENV1(unpacklsl)
1443IWMMXT_OP_ENV1(unpackhsb)
1444IWMMXT_OP_ENV1(unpackhsw)
1445IWMMXT_OP_ENV1(unpackhsl)
1446
1447IWMMXT_OP_ENV_SIZE(cmpeq)
1448IWMMXT_OP_ENV_SIZE(cmpgtu)
1449IWMMXT_OP_ENV_SIZE(cmpgts)
1450
1451IWMMXT_OP_ENV_SIZE(mins)
1452IWMMXT_OP_ENV_SIZE(minu)
1453IWMMXT_OP_ENV_SIZE(maxs)
1454IWMMXT_OP_ENV_SIZE(maxu)
1455
1456IWMMXT_OP_ENV_SIZE(subn)
1457IWMMXT_OP_ENV_SIZE(addn)
1458IWMMXT_OP_ENV_SIZE(subu)
1459IWMMXT_OP_ENV_SIZE(addu)
1460IWMMXT_OP_ENV_SIZE(subs)
1461IWMMXT_OP_ENV_SIZE(adds)
1462
1463IWMMXT_OP_ENV(avgb0)
1464IWMMXT_OP_ENV(avgb1)
1465IWMMXT_OP_ENV(avgw0)
1466IWMMXT_OP_ENV(avgw1)
e677137d 1467
477955bd
PM
1468IWMMXT_OP_ENV(packuw)
1469IWMMXT_OP_ENV(packul)
1470IWMMXT_OP_ENV(packuq)
1471IWMMXT_OP_ENV(packsw)
1472IWMMXT_OP_ENV(packsl)
1473IWMMXT_OP_ENV(packsq)
e677137d 1474
e677137d
PB
1475static void gen_op_iwmmxt_set_mup(void)
1476{
39d5492a 1477 TCGv_i32 tmp;
e677137d
PB
1478 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1479 tcg_gen_ori_i32(tmp, tmp, 2);
1480 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1481}
1482
1483static void gen_op_iwmmxt_set_cup(void)
1484{
39d5492a 1485 TCGv_i32 tmp;
e677137d
PB
1486 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1487 tcg_gen_ori_i32(tmp, tmp, 1);
1488 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1489}
1490
1491static void gen_op_iwmmxt_setpsr_nz(void)
1492{
39d5492a 1493 TCGv_i32 tmp = tcg_temp_new_i32();
e677137d
PB
1494 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1495 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1496}
1497
1498static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1499{
1500 iwmmxt_load_reg(cpu_V1, rn);
86831435 1501 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1502 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1503}
1504
39d5492a
PM
1505static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn,
1506 TCGv_i32 dest)
18c9b560
AZ
1507{
1508 int rd;
1509 uint32_t offset;
39d5492a 1510 TCGv_i32 tmp;
18c9b560
AZ
1511
1512 rd = (insn >> 16) & 0xf;
da6b5335 1513 tmp = load_reg(s, rd);
18c9b560
AZ
1514
1515 offset = (insn & 0xff) << ((insn >> 7) & 2);
1516 if (insn & (1 << 24)) {
1517 /* Pre indexed */
1518 if (insn & (1 << 23))
da6b5335 1519 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1520 else
da6b5335
FN
1521 tcg_gen_addi_i32(tmp, tmp, -offset);
1522 tcg_gen_mov_i32(dest, tmp);
18c9b560 1523 if (insn & (1 << 21))
da6b5335
FN
1524 store_reg(s, rd, tmp);
1525 else
7d1b0095 1526 tcg_temp_free_i32(tmp);
18c9b560
AZ
1527 } else if (insn & (1 << 21)) {
1528 /* Post indexed */
da6b5335 1529 tcg_gen_mov_i32(dest, tmp);
18c9b560 1530 if (insn & (1 << 23))
da6b5335 1531 tcg_gen_addi_i32(tmp, tmp, offset);
18c9b560 1532 else
da6b5335
FN
1533 tcg_gen_addi_i32(tmp, tmp, -offset);
1534 store_reg(s, rd, tmp);
18c9b560
AZ
1535 } else if (!(insn & (1 << 23)))
1536 return 1;
1537 return 0;
1538}
1539
39d5492a 1540static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask, TCGv_i32 dest)
18c9b560
AZ
1541{
1542 int rd = (insn >> 0) & 0xf;
39d5492a 1543 TCGv_i32 tmp;
18c9b560 1544
da6b5335
FN
1545 if (insn & (1 << 8)) {
1546 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3) {
18c9b560 1547 return 1;
da6b5335
FN
1548 } else {
1549 tmp = iwmmxt_load_creg(rd);
1550 }
1551 } else {
7d1b0095 1552 tmp = tcg_temp_new_i32();
da6b5335
FN
1553 iwmmxt_load_reg(cpu_V0, rd);
1554 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
1555 }
1556 tcg_gen_andi_i32(tmp, tmp, mask);
1557 tcg_gen_mov_i32(dest, tmp);
7d1b0095 1558 tcg_temp_free_i32(tmp);
18c9b560
AZ
1559 return 0;
1560}
1561
a1c7273b 1562/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
18c9b560 1563 (ie. an undefined instruction). */
7dcc1f89 1564static int disas_iwmmxt_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
1565{
1566 int rd, wrd;
1567 int rdhi, rdlo, rd0, rd1, i;
39d5492a
PM
1568 TCGv_i32 addr;
1569 TCGv_i32 tmp, tmp2, tmp3;
18c9b560
AZ
1570
1571 if ((insn & 0x0e000e00) == 0x0c000000) {
1572 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1573 wrd = insn & 0xf;
1574 rdlo = (insn >> 12) & 0xf;
1575 rdhi = (insn >> 16) & 0xf;
1576 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
da6b5335
FN
1577 iwmmxt_load_reg(cpu_V0, wrd);
1578 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
1579 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1580 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
18c9b560 1581 } else { /* TMCRR */
da6b5335
FN
1582 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
1583 iwmmxt_store_reg(cpu_V0, wrd);
18c9b560
AZ
1584 gen_op_iwmmxt_set_mup();
1585 }
1586 return 0;
1587 }
1588
1589 wrd = (insn >> 12) & 0xf;
7d1b0095 1590 addr = tcg_temp_new_i32();
da6b5335 1591 if (gen_iwmmxt_address(s, insn, addr)) {
7d1b0095 1592 tcg_temp_free_i32(addr);
18c9b560 1593 return 1;
da6b5335 1594 }
18c9b560
AZ
1595 if (insn & ARM_CP_RW_BIT) {
1596 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
7d1b0095 1597 tmp = tcg_temp_new_i32();
6ce2faf4 1598 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
da6b5335 1599 iwmmxt_store_creg(wrd, tmp);
18c9b560 1600 } else {
e677137d
PB
1601 i = 1;
1602 if (insn & (1 << 8)) {
1603 if (insn & (1 << 22)) { /* WLDRD */
6ce2faf4 1604 gen_aa32_ld64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1605 i = 0;
1606 } else { /* WLDRW wRd */
29531141 1607 tmp = tcg_temp_new_i32();
6ce2faf4 1608 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
e677137d
PB
1609 }
1610 } else {
29531141 1611 tmp = tcg_temp_new_i32();
e677137d 1612 if (insn & (1 << 22)) { /* WLDRH */
6ce2faf4 1613 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
e677137d 1614 } else { /* WLDRB */
6ce2faf4 1615 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
e677137d
PB
1616 }
1617 }
1618 if (i) {
1619 tcg_gen_extu_i32_i64(cpu_M0, tmp);
7d1b0095 1620 tcg_temp_free_i32(tmp);
e677137d 1621 }
18c9b560
AZ
1622 gen_op_iwmmxt_movq_wRn_M0(wrd);
1623 }
1624 } else {
1625 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
da6b5335 1626 tmp = iwmmxt_load_creg(wrd);
6ce2faf4 1627 gen_aa32_st32(tmp, addr, get_mem_index(s));
18c9b560
AZ
1628 } else {
1629 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1630 tmp = tcg_temp_new_i32();
e677137d
PB
1631 if (insn & (1 << 8)) {
1632 if (insn & (1 << 22)) { /* WSTRD */
6ce2faf4 1633 gen_aa32_st64(cpu_M0, addr, get_mem_index(s));
e677137d
PB
1634 } else { /* WSTRW wRd */
1635 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1636 gen_aa32_st32(tmp, addr, get_mem_index(s));
e677137d
PB
1637 }
1638 } else {
1639 if (insn & (1 << 22)) { /* WSTRH */
1640 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1641 gen_aa32_st16(tmp, addr, get_mem_index(s));
e677137d
PB
1642 } else { /* WSTRB */
1643 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
6ce2faf4 1644 gen_aa32_st8(tmp, addr, get_mem_index(s));
e677137d
PB
1645 }
1646 }
18c9b560 1647 }
29531141 1648 tcg_temp_free_i32(tmp);
18c9b560 1649 }
7d1b0095 1650 tcg_temp_free_i32(addr);
18c9b560
AZ
1651 return 0;
1652 }
1653
1654 if ((insn & 0x0f000000) != 0x0e000000)
1655 return 1;
1656
1657 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1658 case 0x000: /* WOR */
1659 wrd = (insn >> 12) & 0xf;
1660 rd0 = (insn >> 0) & 0xf;
1661 rd1 = (insn >> 16) & 0xf;
1662 gen_op_iwmmxt_movq_M0_wRn(rd0);
1663 gen_op_iwmmxt_orq_M0_wRn(rd1);
1664 gen_op_iwmmxt_setpsr_nz();
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 gen_op_iwmmxt_set_cup();
1668 break;
1669 case 0x011: /* TMCR */
1670 if (insn & 0xf)
1671 return 1;
1672 rd = (insn >> 12) & 0xf;
1673 wrd = (insn >> 16) & 0xf;
1674 switch (wrd) {
1675 case ARM_IWMMXT_wCID:
1676 case ARM_IWMMXT_wCASF:
1677 break;
1678 case ARM_IWMMXT_wCon:
1679 gen_op_iwmmxt_set_cup();
1680 /* Fall through. */
1681 case ARM_IWMMXT_wCSSF:
da6b5335
FN
1682 tmp = iwmmxt_load_creg(wrd);
1683 tmp2 = load_reg(s, rd);
f669df27 1684 tcg_gen_andc_i32(tmp, tmp, tmp2);
7d1b0095 1685 tcg_temp_free_i32(tmp2);
da6b5335 1686 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1687 break;
1688 case ARM_IWMMXT_wCGR0:
1689 case ARM_IWMMXT_wCGR1:
1690 case ARM_IWMMXT_wCGR2:
1691 case ARM_IWMMXT_wCGR3:
1692 gen_op_iwmmxt_set_cup();
da6b5335
FN
1693 tmp = load_reg(s, rd);
1694 iwmmxt_store_creg(wrd, tmp);
18c9b560
AZ
1695 break;
1696 default:
1697 return 1;
1698 }
1699 break;
1700 case 0x100: /* WXOR */
1701 wrd = (insn >> 12) & 0xf;
1702 rd0 = (insn >> 0) & 0xf;
1703 rd1 = (insn >> 16) & 0xf;
1704 gen_op_iwmmxt_movq_M0_wRn(rd0);
1705 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1706 gen_op_iwmmxt_setpsr_nz();
1707 gen_op_iwmmxt_movq_wRn_M0(wrd);
1708 gen_op_iwmmxt_set_mup();
1709 gen_op_iwmmxt_set_cup();
1710 break;
1711 case 0x111: /* TMRC */
1712 if (insn & 0xf)
1713 return 1;
1714 rd = (insn >> 12) & 0xf;
1715 wrd = (insn >> 16) & 0xf;
da6b5335
FN
1716 tmp = iwmmxt_load_creg(wrd);
1717 store_reg(s, rd, tmp);
18c9b560
AZ
1718 break;
1719 case 0x300: /* WANDN */
1720 wrd = (insn >> 12) & 0xf;
1721 rd0 = (insn >> 0) & 0xf;
1722 rd1 = (insn >> 16) & 0xf;
1723 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1724 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1725 gen_op_iwmmxt_andq_M0_wRn(rd1);
1726 gen_op_iwmmxt_setpsr_nz();
1727 gen_op_iwmmxt_movq_wRn_M0(wrd);
1728 gen_op_iwmmxt_set_mup();
1729 gen_op_iwmmxt_set_cup();
1730 break;
1731 case 0x200: /* WAND */
1732 wrd = (insn >> 12) & 0xf;
1733 rd0 = (insn >> 0) & 0xf;
1734 rd1 = (insn >> 16) & 0xf;
1735 gen_op_iwmmxt_movq_M0_wRn(rd0);
1736 gen_op_iwmmxt_andq_M0_wRn(rd1);
1737 gen_op_iwmmxt_setpsr_nz();
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1739 gen_op_iwmmxt_set_mup();
1740 gen_op_iwmmxt_set_cup();
1741 break;
1742 case 0x810: case 0xa10: /* WMADD */
1743 wrd = (insn >> 12) & 0xf;
1744 rd0 = (insn >> 0) & 0xf;
1745 rd1 = (insn >> 16) & 0xf;
1746 gen_op_iwmmxt_movq_M0_wRn(rd0);
1747 if (insn & (1 << 21))
1748 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1749 else
1750 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1751 gen_op_iwmmxt_movq_wRn_M0(wrd);
1752 gen_op_iwmmxt_set_mup();
1753 break;
1754 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1755 wrd = (insn >> 12) & 0xf;
1756 rd0 = (insn >> 16) & 0xf;
1757 rd1 = (insn >> 0) & 0xf;
1758 gen_op_iwmmxt_movq_M0_wRn(rd0);
1759 switch ((insn >> 22) & 3) {
1760 case 0:
1761 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1762 break;
1763 case 1:
1764 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1765 break;
1766 case 2:
1767 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1768 break;
1769 case 3:
1770 return 1;
1771 }
1772 gen_op_iwmmxt_movq_wRn_M0(wrd);
1773 gen_op_iwmmxt_set_mup();
1774 gen_op_iwmmxt_set_cup();
1775 break;
1776 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1777 wrd = (insn >> 12) & 0xf;
1778 rd0 = (insn >> 16) & 0xf;
1779 rd1 = (insn >> 0) & 0xf;
1780 gen_op_iwmmxt_movq_M0_wRn(rd0);
1781 switch ((insn >> 22) & 3) {
1782 case 0:
1783 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1784 break;
1785 case 1:
1786 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1787 break;
1788 case 2:
1789 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1790 break;
1791 case 3:
1792 return 1;
1793 }
1794 gen_op_iwmmxt_movq_wRn_M0(wrd);
1795 gen_op_iwmmxt_set_mup();
1796 gen_op_iwmmxt_set_cup();
1797 break;
1798 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1799 wrd = (insn >> 12) & 0xf;
1800 rd0 = (insn >> 16) & 0xf;
1801 rd1 = (insn >> 0) & 0xf;
1802 gen_op_iwmmxt_movq_M0_wRn(rd0);
1803 if (insn & (1 << 22))
1804 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1805 else
1806 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1807 if (!(insn & (1 << 20)))
1808 gen_op_iwmmxt_addl_M0_wRn(wrd);
1809 gen_op_iwmmxt_movq_wRn_M0(wrd);
1810 gen_op_iwmmxt_set_mup();
1811 break;
1812 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1813 wrd = (insn >> 12) & 0xf;
1814 rd0 = (insn >> 16) & 0xf;
1815 rd1 = (insn >> 0) & 0xf;
1816 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1817 if (insn & (1 << 21)) {
1818 if (insn & (1 << 20))
1819 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1820 else
1821 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1822 } else {
1823 if (insn & (1 << 20))
1824 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1825 else
1826 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1827 }
18c9b560
AZ
1828 gen_op_iwmmxt_movq_wRn_M0(wrd);
1829 gen_op_iwmmxt_set_mup();
1830 break;
1831 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1832 wrd = (insn >> 12) & 0xf;
1833 rd0 = (insn >> 16) & 0xf;
1834 rd1 = (insn >> 0) & 0xf;
1835 gen_op_iwmmxt_movq_M0_wRn(rd0);
1836 if (insn & (1 << 21))
1837 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1838 else
1839 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1840 if (!(insn & (1 << 20))) {
e677137d
PB
1841 iwmmxt_load_reg(cpu_V1, wrd);
1842 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1843 }
1844 gen_op_iwmmxt_movq_wRn_M0(wrd);
1845 gen_op_iwmmxt_set_mup();
1846 break;
1847 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1848 wrd = (insn >> 12) & 0xf;
1849 rd0 = (insn >> 16) & 0xf;
1850 rd1 = (insn >> 0) & 0xf;
1851 gen_op_iwmmxt_movq_M0_wRn(rd0);
1852 switch ((insn >> 22) & 3) {
1853 case 0:
1854 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1855 break;
1856 case 1:
1857 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1858 break;
1859 case 2:
1860 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1861 break;
1862 case 3:
1863 return 1;
1864 }
1865 gen_op_iwmmxt_movq_wRn_M0(wrd);
1866 gen_op_iwmmxt_set_mup();
1867 gen_op_iwmmxt_set_cup();
1868 break;
1869 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1870 wrd = (insn >> 12) & 0xf;
1871 rd0 = (insn >> 16) & 0xf;
1872 rd1 = (insn >> 0) & 0xf;
1873 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1874 if (insn & (1 << 22)) {
1875 if (insn & (1 << 20))
1876 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1877 else
1878 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1879 } else {
1880 if (insn & (1 << 20))
1881 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1882 else
1883 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1884 }
18c9b560
AZ
1885 gen_op_iwmmxt_movq_wRn_M0(wrd);
1886 gen_op_iwmmxt_set_mup();
1887 gen_op_iwmmxt_set_cup();
1888 break;
1889 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1890 wrd = (insn >> 12) & 0xf;
1891 rd0 = (insn >> 16) & 0xf;
1892 rd1 = (insn >> 0) & 0xf;
1893 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
1894 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1895 tcg_gen_andi_i32(tmp, tmp, 7);
1896 iwmmxt_load_reg(cpu_V1, rd1);
1897 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
7d1b0095 1898 tcg_temp_free_i32(tmp);
18c9b560
AZ
1899 gen_op_iwmmxt_movq_wRn_M0(wrd);
1900 gen_op_iwmmxt_set_mup();
1901 break;
1902 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
da6b5335
FN
1903 if (((insn >> 6) & 3) == 3)
1904 return 1;
18c9b560
AZ
1905 rd = (insn >> 12) & 0xf;
1906 wrd = (insn >> 16) & 0xf;
da6b5335 1907 tmp = load_reg(s, rd);
18c9b560
AZ
1908 gen_op_iwmmxt_movq_M0_wRn(wrd);
1909 switch ((insn >> 6) & 3) {
1910 case 0:
da6b5335
FN
1911 tmp2 = tcg_const_i32(0xff);
1912 tmp3 = tcg_const_i32((insn & 7) << 3);
18c9b560
AZ
1913 break;
1914 case 1:
da6b5335
FN
1915 tmp2 = tcg_const_i32(0xffff);
1916 tmp3 = tcg_const_i32((insn & 3) << 4);
18c9b560
AZ
1917 break;
1918 case 2:
da6b5335
FN
1919 tmp2 = tcg_const_i32(0xffffffff);
1920 tmp3 = tcg_const_i32((insn & 1) << 5);
18c9b560 1921 break;
da6b5335 1922 default:
39d5492a
PM
1923 TCGV_UNUSED_I32(tmp2);
1924 TCGV_UNUSED_I32(tmp3);
18c9b560 1925 }
da6b5335 1926 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, tmp, tmp2, tmp3);
39d5492a
PM
1927 tcg_temp_free_i32(tmp3);
1928 tcg_temp_free_i32(tmp2);
7d1b0095 1929 tcg_temp_free_i32(tmp);
18c9b560
AZ
1930 gen_op_iwmmxt_movq_wRn_M0(wrd);
1931 gen_op_iwmmxt_set_mup();
1932 break;
1933 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1934 rd = (insn >> 12) & 0xf;
1935 wrd = (insn >> 16) & 0xf;
da6b5335 1936 if (rd == 15 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
1937 return 1;
1938 gen_op_iwmmxt_movq_M0_wRn(wrd);
7d1b0095 1939 tmp = tcg_temp_new_i32();
18c9b560
AZ
1940 switch ((insn >> 22) & 3) {
1941 case 0:
da6b5335
FN
1942 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 7) << 3);
1943 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1944 if (insn & 8) {
1945 tcg_gen_ext8s_i32(tmp, tmp);
1946 } else {
1947 tcg_gen_andi_i32(tmp, tmp, 0xff);
18c9b560
AZ
1948 }
1949 break;
1950 case 1:
da6b5335
FN
1951 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 3) << 4);
1952 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1953 if (insn & 8) {
1954 tcg_gen_ext16s_i32(tmp, tmp);
1955 } else {
1956 tcg_gen_andi_i32(tmp, tmp, 0xffff);
18c9b560
AZ
1957 }
1958 break;
1959 case 2:
da6b5335
FN
1960 tcg_gen_shri_i64(cpu_M0, cpu_M0, (insn & 1) << 5);
1961 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
18c9b560 1962 break;
18c9b560 1963 }
da6b5335 1964 store_reg(s, rd, tmp);
18c9b560
AZ
1965 break;
1966 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
da6b5335 1967 if ((insn & 0x000ff008) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 1968 return 1;
da6b5335 1969 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
18c9b560
AZ
1970 switch ((insn >> 22) & 3) {
1971 case 0:
da6b5335 1972 tcg_gen_shri_i32(tmp, tmp, ((insn & 7) << 2) + 0);
18c9b560
AZ
1973 break;
1974 case 1:
da6b5335 1975 tcg_gen_shri_i32(tmp, tmp, ((insn & 3) << 3) + 4);
18c9b560
AZ
1976 break;
1977 case 2:
da6b5335 1978 tcg_gen_shri_i32(tmp, tmp, ((insn & 1) << 4) + 12);
18c9b560 1979 break;
18c9b560 1980 }
da6b5335
FN
1981 tcg_gen_shli_i32(tmp, tmp, 28);
1982 gen_set_nzcv(tmp);
7d1b0095 1983 tcg_temp_free_i32(tmp);
18c9b560
AZ
1984 break;
1985 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
da6b5335
FN
1986 if (((insn >> 6) & 3) == 3)
1987 return 1;
18c9b560
AZ
1988 rd = (insn >> 12) & 0xf;
1989 wrd = (insn >> 16) & 0xf;
da6b5335 1990 tmp = load_reg(s, rd);
18c9b560
AZ
1991 switch ((insn >> 6) & 3) {
1992 case 0:
da6b5335 1993 gen_helper_iwmmxt_bcstb(cpu_M0, tmp);
18c9b560
AZ
1994 break;
1995 case 1:
da6b5335 1996 gen_helper_iwmmxt_bcstw(cpu_M0, tmp);
18c9b560
AZ
1997 break;
1998 case 2:
da6b5335 1999 gen_helper_iwmmxt_bcstl(cpu_M0, tmp);
18c9b560 2000 break;
18c9b560 2001 }
7d1b0095 2002 tcg_temp_free_i32(tmp);
18c9b560
AZ
2003 gen_op_iwmmxt_movq_wRn_M0(wrd);
2004 gen_op_iwmmxt_set_mup();
2005 break;
2006 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
da6b5335 2007 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2008 return 1;
da6b5335 2009 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2010 tmp2 = tcg_temp_new_i32();
da6b5335 2011 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2012 switch ((insn >> 22) & 3) {
2013 case 0:
2014 for (i = 0; i < 7; i ++) {
da6b5335
FN
2015 tcg_gen_shli_i32(tmp2, tmp2, 4);
2016 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2017 }
2018 break;
2019 case 1:
2020 for (i = 0; i < 3; i ++) {
da6b5335
FN
2021 tcg_gen_shli_i32(tmp2, tmp2, 8);
2022 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560
AZ
2023 }
2024 break;
2025 case 2:
da6b5335
FN
2026 tcg_gen_shli_i32(tmp2, tmp2, 16);
2027 tcg_gen_and_i32(tmp, tmp, tmp2);
18c9b560 2028 break;
18c9b560 2029 }
da6b5335 2030 gen_set_nzcv(tmp);
7d1b0095
PM
2031 tcg_temp_free_i32(tmp2);
2032 tcg_temp_free_i32(tmp);
18c9b560
AZ
2033 break;
2034 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
2035 wrd = (insn >> 12) & 0xf;
2036 rd0 = (insn >> 16) & 0xf;
2037 gen_op_iwmmxt_movq_M0_wRn(rd0);
2038 switch ((insn >> 22) & 3) {
2039 case 0:
e677137d 2040 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
2041 break;
2042 case 1:
e677137d 2043 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
2044 break;
2045 case 2:
e677137d 2046 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
2047 break;
2048 case 3:
2049 return 1;
2050 }
2051 gen_op_iwmmxt_movq_wRn_M0(wrd);
2052 gen_op_iwmmxt_set_mup();
2053 break;
2054 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
da6b5335 2055 if ((insn & 0x000ff00f) != 0x0003f000 || ((insn >> 22) & 3) == 3)
18c9b560 2056 return 1;
da6b5335 2057 tmp = iwmmxt_load_creg(ARM_IWMMXT_wCASF);
7d1b0095 2058 tmp2 = tcg_temp_new_i32();
da6b5335 2059 tcg_gen_mov_i32(tmp2, tmp);
18c9b560
AZ
2060 switch ((insn >> 22) & 3) {
2061 case 0:
2062 for (i = 0; i < 7; i ++) {
da6b5335
FN
2063 tcg_gen_shli_i32(tmp2, tmp2, 4);
2064 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2065 }
2066 break;
2067 case 1:
2068 for (i = 0; i < 3; i ++) {
da6b5335
FN
2069 tcg_gen_shli_i32(tmp2, tmp2, 8);
2070 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560
AZ
2071 }
2072 break;
2073 case 2:
da6b5335
FN
2074 tcg_gen_shli_i32(tmp2, tmp2, 16);
2075 tcg_gen_or_i32(tmp, tmp, tmp2);
18c9b560 2076 break;
18c9b560 2077 }
da6b5335 2078 gen_set_nzcv(tmp);
7d1b0095
PM
2079 tcg_temp_free_i32(tmp2);
2080 tcg_temp_free_i32(tmp);
18c9b560
AZ
2081 break;
2082 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2083 rd = (insn >> 12) & 0xf;
2084 rd0 = (insn >> 16) & 0xf;
da6b5335 2085 if ((insn & 0xf) != 0 || ((insn >> 22) & 3) == 3)
18c9b560
AZ
2086 return 1;
2087 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2088 tmp = tcg_temp_new_i32();
18c9b560
AZ
2089 switch ((insn >> 22) & 3) {
2090 case 0:
da6b5335 2091 gen_helper_iwmmxt_msbb(tmp, cpu_M0);
18c9b560
AZ
2092 break;
2093 case 1:
da6b5335 2094 gen_helper_iwmmxt_msbw(tmp, cpu_M0);
18c9b560
AZ
2095 break;
2096 case 2:
da6b5335 2097 gen_helper_iwmmxt_msbl(tmp, cpu_M0);
18c9b560 2098 break;
18c9b560 2099 }
da6b5335 2100 store_reg(s, rd, tmp);
18c9b560
AZ
2101 break;
2102 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2103 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2104 wrd = (insn >> 12) & 0xf;
2105 rd0 = (insn >> 16) & 0xf;
2106 rd1 = (insn >> 0) & 0xf;
2107 gen_op_iwmmxt_movq_M0_wRn(rd0);
2108 switch ((insn >> 22) & 3) {
2109 case 0:
2110 if (insn & (1 << 21))
2111 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2112 else
2113 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2114 break;
2115 case 1:
2116 if (insn & (1 << 21))
2117 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2118 else
2119 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2120 break;
2121 case 2:
2122 if (insn & (1 << 21))
2123 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2124 else
2125 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2126 break;
2127 case 3:
2128 return 1;
2129 }
2130 gen_op_iwmmxt_movq_wRn_M0(wrd);
2131 gen_op_iwmmxt_set_mup();
2132 gen_op_iwmmxt_set_cup();
2133 break;
2134 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2135 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2136 wrd = (insn >> 12) & 0xf;
2137 rd0 = (insn >> 16) & 0xf;
2138 gen_op_iwmmxt_movq_M0_wRn(rd0);
2139 switch ((insn >> 22) & 3) {
2140 case 0:
2141 if (insn & (1 << 21))
2142 gen_op_iwmmxt_unpacklsb_M0();
2143 else
2144 gen_op_iwmmxt_unpacklub_M0();
2145 break;
2146 case 1:
2147 if (insn & (1 << 21))
2148 gen_op_iwmmxt_unpacklsw_M0();
2149 else
2150 gen_op_iwmmxt_unpackluw_M0();
2151 break;
2152 case 2:
2153 if (insn & (1 << 21))
2154 gen_op_iwmmxt_unpacklsl_M0();
2155 else
2156 gen_op_iwmmxt_unpacklul_M0();
2157 break;
2158 case 3:
2159 return 1;
2160 }
2161 gen_op_iwmmxt_movq_wRn_M0(wrd);
2162 gen_op_iwmmxt_set_mup();
2163 gen_op_iwmmxt_set_cup();
2164 break;
2165 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2166 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2167 wrd = (insn >> 12) & 0xf;
2168 rd0 = (insn >> 16) & 0xf;
2169 gen_op_iwmmxt_movq_M0_wRn(rd0);
2170 switch ((insn >> 22) & 3) {
2171 case 0:
2172 if (insn & (1 << 21))
2173 gen_op_iwmmxt_unpackhsb_M0();
2174 else
2175 gen_op_iwmmxt_unpackhub_M0();
2176 break;
2177 case 1:
2178 if (insn & (1 << 21))
2179 gen_op_iwmmxt_unpackhsw_M0();
2180 else
2181 gen_op_iwmmxt_unpackhuw_M0();
2182 break;
2183 case 2:
2184 if (insn & (1 << 21))
2185 gen_op_iwmmxt_unpackhsl_M0();
2186 else
2187 gen_op_iwmmxt_unpackhul_M0();
2188 break;
2189 case 3:
2190 return 1;
2191 }
2192 gen_op_iwmmxt_movq_wRn_M0(wrd);
2193 gen_op_iwmmxt_set_mup();
2194 gen_op_iwmmxt_set_cup();
2195 break;
2196 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2197 case 0x214: case 0x614: case 0xa14: case 0xe14:
da6b5335
FN
2198 if (((insn >> 22) & 3) == 0)
2199 return 1;
18c9b560
AZ
2200 wrd = (insn >> 12) & 0xf;
2201 rd0 = (insn >> 16) & 0xf;
2202 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2203 tmp = tcg_temp_new_i32();
da6b5335 2204 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2205 tcg_temp_free_i32(tmp);
18c9b560 2206 return 1;
da6b5335 2207 }
18c9b560 2208 switch ((insn >> 22) & 3) {
18c9b560 2209 case 1:
477955bd 2210 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2211 break;
2212 case 2:
477955bd 2213 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2214 break;
2215 case 3:
477955bd 2216 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2217 break;
2218 }
7d1b0095 2219 tcg_temp_free_i32(tmp);
18c9b560
AZ
2220 gen_op_iwmmxt_movq_wRn_M0(wrd);
2221 gen_op_iwmmxt_set_mup();
2222 gen_op_iwmmxt_set_cup();
2223 break;
2224 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2225 case 0x014: case 0x414: case 0x814: case 0xc14:
da6b5335
FN
2226 if (((insn >> 22) & 3) == 0)
2227 return 1;
18c9b560
AZ
2228 wrd = (insn >> 12) & 0xf;
2229 rd0 = (insn >> 16) & 0xf;
2230 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2231 tmp = tcg_temp_new_i32();
da6b5335 2232 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2233 tcg_temp_free_i32(tmp);
18c9b560 2234 return 1;
da6b5335 2235 }
18c9b560 2236 switch ((insn >> 22) & 3) {
18c9b560 2237 case 1:
477955bd 2238 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2239 break;
2240 case 2:
477955bd 2241 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2242 break;
2243 case 3:
477955bd 2244 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2245 break;
2246 }
7d1b0095 2247 tcg_temp_free_i32(tmp);
18c9b560
AZ
2248 gen_op_iwmmxt_movq_wRn_M0(wrd);
2249 gen_op_iwmmxt_set_mup();
2250 gen_op_iwmmxt_set_cup();
2251 break;
2252 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2253 case 0x114: case 0x514: case 0x914: case 0xd14:
da6b5335
FN
2254 if (((insn >> 22) & 3) == 0)
2255 return 1;
18c9b560
AZ
2256 wrd = (insn >> 12) & 0xf;
2257 rd0 = (insn >> 16) & 0xf;
2258 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2259 tmp = tcg_temp_new_i32();
da6b5335 2260 if (gen_iwmmxt_shift(insn, 0xff, tmp)) {
7d1b0095 2261 tcg_temp_free_i32(tmp);
18c9b560 2262 return 1;
da6b5335 2263 }
18c9b560 2264 switch ((insn >> 22) & 3) {
18c9b560 2265 case 1:
477955bd 2266 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2267 break;
2268 case 2:
477955bd 2269 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2270 break;
2271 case 3:
477955bd 2272 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2273 break;
2274 }
7d1b0095 2275 tcg_temp_free_i32(tmp);
18c9b560
AZ
2276 gen_op_iwmmxt_movq_wRn_M0(wrd);
2277 gen_op_iwmmxt_set_mup();
2278 gen_op_iwmmxt_set_cup();
2279 break;
2280 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2281 case 0x314: case 0x714: case 0xb14: case 0xf14:
da6b5335
FN
2282 if (((insn >> 22) & 3) == 0)
2283 return 1;
18c9b560
AZ
2284 wrd = (insn >> 12) & 0xf;
2285 rd0 = (insn >> 16) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0);
7d1b0095 2287 tmp = tcg_temp_new_i32();
18c9b560 2288 switch ((insn >> 22) & 3) {
18c9b560 2289 case 1:
da6b5335 2290 if (gen_iwmmxt_shift(insn, 0xf, tmp)) {
7d1b0095 2291 tcg_temp_free_i32(tmp);
18c9b560 2292 return 1;
da6b5335 2293 }
477955bd 2294 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2295 break;
2296 case 2:
da6b5335 2297 if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
7d1b0095 2298 tcg_temp_free_i32(tmp);
18c9b560 2299 return 1;
da6b5335 2300 }
477955bd 2301 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2302 break;
2303 case 3:
da6b5335 2304 if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
7d1b0095 2305 tcg_temp_free_i32(tmp);
18c9b560 2306 return 1;
da6b5335 2307 }
477955bd 2308 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
18c9b560
AZ
2309 break;
2310 }
7d1b0095 2311 tcg_temp_free_i32(tmp);
18c9b560
AZ
2312 gen_op_iwmmxt_movq_wRn_M0(wrd);
2313 gen_op_iwmmxt_set_mup();
2314 gen_op_iwmmxt_set_cup();
2315 break;
2316 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2317 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2318 wrd = (insn >> 12) & 0xf;
2319 rd0 = (insn >> 16) & 0xf;
2320 rd1 = (insn >> 0) & 0xf;
2321 gen_op_iwmmxt_movq_M0_wRn(rd0);
2322 switch ((insn >> 22) & 3) {
2323 case 0:
2324 if (insn & (1 << 21))
2325 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2326 else
2327 gen_op_iwmmxt_minub_M0_wRn(rd1);
2328 break;
2329 case 1:
2330 if (insn & (1 << 21))
2331 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2332 else
2333 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2334 break;
2335 case 2:
2336 if (insn & (1 << 21))
2337 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2338 else
2339 gen_op_iwmmxt_minul_M0_wRn(rd1);
2340 break;
2341 case 3:
2342 return 1;
2343 }
2344 gen_op_iwmmxt_movq_wRn_M0(wrd);
2345 gen_op_iwmmxt_set_mup();
2346 break;
2347 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2348 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2349 wrd = (insn >> 12) & 0xf;
2350 rd0 = (insn >> 16) & 0xf;
2351 rd1 = (insn >> 0) & 0xf;
2352 gen_op_iwmmxt_movq_M0_wRn(rd0);
2353 switch ((insn >> 22) & 3) {
2354 case 0:
2355 if (insn & (1 << 21))
2356 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2357 else
2358 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2359 break;
2360 case 1:
2361 if (insn & (1 << 21))
2362 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2363 else
2364 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2365 break;
2366 case 2:
2367 if (insn & (1 << 21))
2368 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2369 else
2370 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2371 break;
2372 case 3:
2373 return 1;
2374 }
2375 gen_op_iwmmxt_movq_wRn_M0(wrd);
2376 gen_op_iwmmxt_set_mup();
2377 break;
2378 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2379 case 0x402: case 0x502: case 0x602: case 0x702:
2380 wrd = (insn >> 12) & 0xf;
2381 rd0 = (insn >> 16) & 0xf;
2382 rd1 = (insn >> 0) & 0xf;
2383 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335
FN
2384 tmp = tcg_const_i32((insn >> 20) & 3);
2385 iwmmxt_load_reg(cpu_V1, rd1);
2386 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, tmp);
39d5492a 2387 tcg_temp_free_i32(tmp);
18c9b560
AZ
2388 gen_op_iwmmxt_movq_wRn_M0(wrd);
2389 gen_op_iwmmxt_set_mup();
2390 break;
2391 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2392 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2393 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2394 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2395 wrd = (insn >> 12) & 0xf;
2396 rd0 = (insn >> 16) & 0xf;
2397 rd1 = (insn >> 0) & 0xf;
2398 gen_op_iwmmxt_movq_M0_wRn(rd0);
2399 switch ((insn >> 20) & 0xf) {
2400 case 0x0:
2401 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2402 break;
2403 case 0x1:
2404 gen_op_iwmmxt_subub_M0_wRn(rd1);
2405 break;
2406 case 0x3:
2407 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2408 break;
2409 case 0x4:
2410 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2411 break;
2412 case 0x5:
2413 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2414 break;
2415 case 0x7:
2416 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2417 break;
2418 case 0x8:
2419 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2420 break;
2421 case 0x9:
2422 gen_op_iwmmxt_subul_M0_wRn(rd1);
2423 break;
2424 case 0xb:
2425 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2426 break;
2427 default:
2428 return 1;
2429 }
2430 gen_op_iwmmxt_movq_wRn_M0(wrd);
2431 gen_op_iwmmxt_set_mup();
2432 gen_op_iwmmxt_set_cup();
2433 break;
2434 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2435 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2436 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2437 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2438 wrd = (insn >> 12) & 0xf;
2439 rd0 = (insn >> 16) & 0xf;
2440 gen_op_iwmmxt_movq_M0_wRn(rd0);
da6b5335 2441 tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
477955bd 2442 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
39d5492a 2443 tcg_temp_free_i32(tmp);
18c9b560
AZ
2444 gen_op_iwmmxt_movq_wRn_M0(wrd);
2445 gen_op_iwmmxt_set_mup();
2446 gen_op_iwmmxt_set_cup();
2447 break;
2448 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2449 case 0x418: case 0x518: case 0x618: case 0x718:
2450 case 0x818: case 0x918: case 0xa18: case 0xb18:
2451 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2452 wrd = (insn >> 12) & 0xf;
2453 rd0 = (insn >> 16) & 0xf;
2454 rd1 = (insn >> 0) & 0xf;
2455 gen_op_iwmmxt_movq_M0_wRn(rd0);
2456 switch ((insn >> 20) & 0xf) {
2457 case 0x0:
2458 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2459 break;
2460 case 0x1:
2461 gen_op_iwmmxt_addub_M0_wRn(rd1);
2462 break;
2463 case 0x3:
2464 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2465 break;
2466 case 0x4:
2467 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2468 break;
2469 case 0x5:
2470 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2471 break;
2472 case 0x7:
2473 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2474 break;
2475 case 0x8:
2476 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2477 break;
2478 case 0x9:
2479 gen_op_iwmmxt_addul_M0_wRn(rd1);
2480 break;
2481 case 0xb:
2482 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2483 break;
2484 default:
2485 return 1;
2486 }
2487 gen_op_iwmmxt_movq_wRn_M0(wrd);
2488 gen_op_iwmmxt_set_mup();
2489 gen_op_iwmmxt_set_cup();
2490 break;
2491 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2492 case 0x408: case 0x508: case 0x608: case 0x708:
2493 case 0x808: case 0x908: case 0xa08: case 0xb08:
2494 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
da6b5335
FN
2495 if (!(insn & (1 << 20)) || ((insn >> 22) & 3) == 0)
2496 return 1;
18c9b560
AZ
2497 wrd = (insn >> 12) & 0xf;
2498 rd0 = (insn >> 16) & 0xf;
2499 rd1 = (insn >> 0) & 0xf;
2500 gen_op_iwmmxt_movq_M0_wRn(rd0);
18c9b560 2501 switch ((insn >> 22) & 3) {
18c9b560
AZ
2502 case 1:
2503 if (insn & (1 << 21))
2504 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2505 else
2506 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2507 break;
2508 case 2:
2509 if (insn & (1 << 21))
2510 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2511 else
2512 gen_op_iwmmxt_packul_M0_wRn(rd1);
2513 break;
2514 case 3:
2515 if (insn & (1 << 21))
2516 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2517 else
2518 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2519 break;
2520 }
2521 gen_op_iwmmxt_movq_wRn_M0(wrd);
2522 gen_op_iwmmxt_set_mup();
2523 gen_op_iwmmxt_set_cup();
2524 break;
2525 case 0x201: case 0x203: case 0x205: case 0x207:
2526 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2527 case 0x211: case 0x213: case 0x215: case 0x217:
2528 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2529 wrd = (insn >> 5) & 0xf;
2530 rd0 = (insn >> 12) & 0xf;
2531 rd1 = (insn >> 0) & 0xf;
2532 if (rd0 == 0xf || rd1 == 0xf)
2533 return 1;
2534 gen_op_iwmmxt_movq_M0_wRn(wrd);
da6b5335
FN
2535 tmp = load_reg(s, rd0);
2536 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2537 switch ((insn >> 16) & 0xf) {
2538 case 0x0: /* TMIA */
da6b5335 2539 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2540 break;
2541 case 0x8: /* TMIAPH */
da6b5335 2542 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2543 break;
2544 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
18c9b560 2545 if (insn & (1 << 16))
da6b5335 2546 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2547 if (insn & (1 << 17))
da6b5335
FN
2548 tcg_gen_shri_i32(tmp2, tmp2, 16);
2549 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2550 break;
2551 default:
7d1b0095
PM
2552 tcg_temp_free_i32(tmp2);
2553 tcg_temp_free_i32(tmp);
18c9b560
AZ
2554 return 1;
2555 }
7d1b0095
PM
2556 tcg_temp_free_i32(tmp2);
2557 tcg_temp_free_i32(tmp);
18c9b560
AZ
2558 gen_op_iwmmxt_movq_wRn_M0(wrd);
2559 gen_op_iwmmxt_set_mup();
2560 break;
2561 default:
2562 return 1;
2563 }
2564
2565 return 0;
2566}
2567
a1c7273b 2568/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
18c9b560 2569 (ie. an undefined instruction). */
7dcc1f89 2570static int disas_dsp_insn(DisasContext *s, uint32_t insn)
18c9b560
AZ
2571{
2572 int acc, rd0, rd1, rdhi, rdlo;
39d5492a 2573 TCGv_i32 tmp, tmp2;
18c9b560
AZ
2574
2575 if ((insn & 0x0ff00f10) == 0x0e200010) {
2576 /* Multiply with Internal Accumulate Format */
2577 rd0 = (insn >> 12) & 0xf;
2578 rd1 = insn & 0xf;
2579 acc = (insn >> 5) & 7;
2580
2581 if (acc != 0)
2582 return 1;
2583
3a554c0f
FN
2584 tmp = load_reg(s, rd0);
2585 tmp2 = load_reg(s, rd1);
18c9b560
AZ
2586 switch ((insn >> 16) & 0xf) {
2587 case 0x0: /* MIA */
3a554c0f 2588 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2589 break;
2590 case 0x8: /* MIAPH */
3a554c0f 2591 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2592 break;
2593 case 0xc: /* MIABB */
2594 case 0xd: /* MIABT */
2595 case 0xe: /* MIATB */
2596 case 0xf: /* MIATT */
18c9b560 2597 if (insn & (1 << 16))
3a554c0f 2598 tcg_gen_shri_i32(tmp, tmp, 16);
18c9b560 2599 if (insn & (1 << 17))
3a554c0f
FN
2600 tcg_gen_shri_i32(tmp2, tmp2, 16);
2601 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, tmp, tmp2);
18c9b560
AZ
2602 break;
2603 default:
2604 return 1;
2605 }
7d1b0095
PM
2606 tcg_temp_free_i32(tmp2);
2607 tcg_temp_free_i32(tmp);
18c9b560
AZ
2608
2609 gen_op_iwmmxt_movq_wRn_M0(acc);
2610 return 0;
2611 }
2612
2613 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2614 /* Internal Accumulator Access Format */
2615 rdhi = (insn >> 16) & 0xf;
2616 rdlo = (insn >> 12) & 0xf;
2617 acc = insn & 7;
2618
2619 if (acc != 0)
2620 return 1;
2621
2622 if (insn & ARM_CP_RW_BIT) { /* MRA */
3a554c0f
FN
2623 iwmmxt_load_reg(cpu_V0, acc);
2624 tcg_gen_trunc_i64_i32(cpu_R[rdlo], cpu_V0);
2625 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
2626 tcg_gen_trunc_i64_i32(cpu_R[rdhi], cpu_V0);
2627 tcg_gen_andi_i32(cpu_R[rdhi], cpu_R[rdhi], (1 << (40 - 32)) - 1);
18c9b560 2628 } else { /* MAR */
3a554c0f
FN
2629 tcg_gen_concat_i32_i64(cpu_V0, cpu_R[rdlo], cpu_R[rdhi]);
2630 iwmmxt_store_reg(cpu_V0, acc);
18c9b560
AZ
2631 }
2632 return 0;
2633 }
2634
2635 return 1;
2636}
2637
9ee6e8bb
PB
2638#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2639#define VFP_SREG(insn, bigbit, smallbit) \
2640 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2641#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
d614a513 2642 if (arm_dc_feature(s, ARM_FEATURE_VFP3)) { \
9ee6e8bb
PB
2643 reg = (((insn) >> (bigbit)) & 0x0f) \
2644 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2645 } else { \
2646 if (insn & (1 << (smallbit))) \
2647 return 1; \
2648 reg = ((insn) >> (bigbit)) & 0x0f; \
2649 }} while (0)
2650
2651#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2652#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2653#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2654#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2655#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2656#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2657
4373f3ce 2658/* Move between integer and VFP cores. */
39d5492a 2659static TCGv_i32 gen_vfp_mrs(void)
4373f3ce 2660{
39d5492a 2661 TCGv_i32 tmp = tcg_temp_new_i32();
4373f3ce
PB
2662 tcg_gen_mov_i32(tmp, cpu_F0s);
2663 return tmp;
2664}
2665
39d5492a 2666static void gen_vfp_msr(TCGv_i32 tmp)
4373f3ce
PB
2667{
2668 tcg_gen_mov_i32(cpu_F0s, tmp);
7d1b0095 2669 tcg_temp_free_i32(tmp);
4373f3ce
PB
2670}
2671
39d5492a 2672static void gen_neon_dup_u8(TCGv_i32 var, int shift)
ad69471c 2673{
39d5492a 2674 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2675 if (shift)
2676 tcg_gen_shri_i32(var, var, shift);
86831435 2677 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2678 tcg_gen_shli_i32(tmp, var, 8);
2679 tcg_gen_or_i32(var, var, tmp);
2680 tcg_gen_shli_i32(tmp, var, 16);
2681 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2682 tcg_temp_free_i32(tmp);
ad69471c
PB
2683}
2684
39d5492a 2685static void gen_neon_dup_low16(TCGv_i32 var)
ad69471c 2686{
39d5492a 2687 TCGv_i32 tmp = tcg_temp_new_i32();
86831435 2688 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2689 tcg_gen_shli_i32(tmp, var, 16);
2690 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2691 tcg_temp_free_i32(tmp);
ad69471c
PB
2692}
2693
39d5492a 2694static void gen_neon_dup_high16(TCGv_i32 var)
ad69471c 2695{
39d5492a 2696 TCGv_i32 tmp = tcg_temp_new_i32();
ad69471c
PB
2697 tcg_gen_andi_i32(var, var, 0xffff0000);
2698 tcg_gen_shri_i32(tmp, var, 16);
2699 tcg_gen_or_i32(var, var, tmp);
7d1b0095 2700 tcg_temp_free_i32(tmp);
ad69471c
PB
2701}
2702
39d5492a 2703static TCGv_i32 gen_load_and_replicate(DisasContext *s, TCGv_i32 addr, int size)
8e18cde3
PM
2704{
2705 /* Load a single Neon element and replicate into a 32 bit TCG reg */
58ab8e96 2706 TCGv_i32 tmp = tcg_temp_new_i32();
8e18cde3
PM
2707 switch (size) {
2708 case 0:
6ce2faf4 2709 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2710 gen_neon_dup_u8(tmp, 0);
2711 break;
2712 case 1:
6ce2faf4 2713 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2714 gen_neon_dup_low16(tmp);
2715 break;
2716 case 2:
6ce2faf4 2717 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
8e18cde3
PM
2718 break;
2719 default: /* Avoid compiler warnings. */
2720 abort();
2721 }
2722 return tmp;
2723}
2724
04731fb5
WN
2725static int handle_vsel(uint32_t insn, uint32_t rd, uint32_t rn, uint32_t rm,
2726 uint32_t dp)
2727{
2728 uint32_t cc = extract32(insn, 20, 2);
2729
2730 if (dp) {
2731 TCGv_i64 frn, frm, dest;
2732 TCGv_i64 tmp, zero, zf, nf, vf;
2733
2734 zero = tcg_const_i64(0);
2735
2736 frn = tcg_temp_new_i64();
2737 frm = tcg_temp_new_i64();
2738 dest = tcg_temp_new_i64();
2739
2740 zf = tcg_temp_new_i64();
2741 nf = tcg_temp_new_i64();
2742 vf = tcg_temp_new_i64();
2743
2744 tcg_gen_extu_i32_i64(zf, cpu_ZF);
2745 tcg_gen_ext_i32_i64(nf, cpu_NF);
2746 tcg_gen_ext_i32_i64(vf, cpu_VF);
2747
2748 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2749 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2750 switch (cc) {
2751 case 0: /* eq: Z */
2752 tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
2753 frn, frm);
2754 break;
2755 case 1: /* vs: V */
2756 tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
2757 frn, frm);
2758 break;
2759 case 2: /* ge: N == V -> N ^ V == 0 */
2760 tmp = tcg_temp_new_i64();
2761 tcg_gen_xor_i64(tmp, vf, nf);
2762 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2763 frn, frm);
2764 tcg_temp_free_i64(tmp);
2765 break;
2766 case 3: /* gt: !Z && N == V */
2767 tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
2768 frn, frm);
2769 tmp = tcg_temp_new_i64();
2770 tcg_gen_xor_i64(tmp, vf, nf);
2771 tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
2772 dest, frm);
2773 tcg_temp_free_i64(tmp);
2774 break;
2775 }
2776 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2777 tcg_temp_free_i64(frn);
2778 tcg_temp_free_i64(frm);
2779 tcg_temp_free_i64(dest);
2780
2781 tcg_temp_free_i64(zf);
2782 tcg_temp_free_i64(nf);
2783 tcg_temp_free_i64(vf);
2784
2785 tcg_temp_free_i64(zero);
2786 } else {
2787 TCGv_i32 frn, frm, dest;
2788 TCGv_i32 tmp, zero;
2789
2790 zero = tcg_const_i32(0);
2791
2792 frn = tcg_temp_new_i32();
2793 frm = tcg_temp_new_i32();
2794 dest = tcg_temp_new_i32();
2795 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2796 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2797 switch (cc) {
2798 case 0: /* eq: Z */
2799 tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
2800 frn, frm);
2801 break;
2802 case 1: /* vs: V */
2803 tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
2804 frn, frm);
2805 break;
2806 case 2: /* ge: N == V -> N ^ V == 0 */
2807 tmp = tcg_temp_new_i32();
2808 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2809 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2810 frn, frm);
2811 tcg_temp_free_i32(tmp);
2812 break;
2813 case 3: /* gt: !Z && N == V */
2814 tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
2815 frn, frm);
2816 tmp = tcg_temp_new_i32();
2817 tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
2818 tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
2819 dest, frm);
2820 tcg_temp_free_i32(tmp);
2821 break;
2822 }
2823 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2824 tcg_temp_free_i32(frn);
2825 tcg_temp_free_i32(frm);
2826 tcg_temp_free_i32(dest);
2827
2828 tcg_temp_free_i32(zero);
2829 }
2830
2831 return 0;
2832}
2833
40cfacdd
WN
2834static int handle_vminmaxnm(uint32_t insn, uint32_t rd, uint32_t rn,
2835 uint32_t rm, uint32_t dp)
2836{
2837 uint32_t vmin = extract32(insn, 6, 1);
2838 TCGv_ptr fpst = get_fpstatus_ptr(0);
2839
2840 if (dp) {
2841 TCGv_i64 frn, frm, dest;
2842
2843 frn = tcg_temp_new_i64();
2844 frm = tcg_temp_new_i64();
2845 dest = tcg_temp_new_i64();
2846
2847 tcg_gen_ld_f64(frn, cpu_env, vfp_reg_offset(dp, rn));
2848 tcg_gen_ld_f64(frm, cpu_env, vfp_reg_offset(dp, rm));
2849 if (vmin) {
f71a2ae5 2850 gen_helper_vfp_minnumd(dest, frn, frm, fpst);
40cfacdd 2851 } else {
f71a2ae5 2852 gen_helper_vfp_maxnumd(dest, frn, frm, fpst);
40cfacdd
WN
2853 }
2854 tcg_gen_st_f64(dest, cpu_env, vfp_reg_offset(dp, rd));
2855 tcg_temp_free_i64(frn);
2856 tcg_temp_free_i64(frm);
2857 tcg_temp_free_i64(dest);
2858 } else {
2859 TCGv_i32 frn, frm, dest;
2860
2861 frn = tcg_temp_new_i32();
2862 frm = tcg_temp_new_i32();
2863 dest = tcg_temp_new_i32();
2864
2865 tcg_gen_ld_f32(frn, cpu_env, vfp_reg_offset(dp, rn));
2866 tcg_gen_ld_f32(frm, cpu_env, vfp_reg_offset(dp, rm));
2867 if (vmin) {
f71a2ae5 2868 gen_helper_vfp_minnums(dest, frn, frm, fpst);
40cfacdd 2869 } else {
f71a2ae5 2870 gen_helper_vfp_maxnums(dest, frn, frm, fpst);
40cfacdd
WN
2871 }
2872 tcg_gen_st_f32(dest, cpu_env, vfp_reg_offset(dp, rd));
2873 tcg_temp_free_i32(frn);
2874 tcg_temp_free_i32(frm);
2875 tcg_temp_free_i32(dest);
2876 }
2877
2878 tcg_temp_free_ptr(fpst);
2879 return 0;
2880}
2881
7655f39b
WN
2882static int handle_vrint(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2883 int rounding)
2884{
2885 TCGv_ptr fpst = get_fpstatus_ptr(0);
2886 TCGv_i32 tcg_rmode;
2887
2888 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2889 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2890
2891 if (dp) {
2892 TCGv_i64 tcg_op;
2893 TCGv_i64 tcg_res;
2894 tcg_op = tcg_temp_new_i64();
2895 tcg_res = tcg_temp_new_i64();
2896 tcg_gen_ld_f64(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2897 gen_helper_rintd(tcg_res, tcg_op, fpst);
2898 tcg_gen_st_f64(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2899 tcg_temp_free_i64(tcg_op);
2900 tcg_temp_free_i64(tcg_res);
2901 } else {
2902 TCGv_i32 tcg_op;
2903 TCGv_i32 tcg_res;
2904 tcg_op = tcg_temp_new_i32();
2905 tcg_res = tcg_temp_new_i32();
2906 tcg_gen_ld_f32(tcg_op, cpu_env, vfp_reg_offset(dp, rm));
2907 gen_helper_rints(tcg_res, tcg_op, fpst);
2908 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(dp, rd));
2909 tcg_temp_free_i32(tcg_op);
2910 tcg_temp_free_i32(tcg_res);
2911 }
2912
2913 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2914 tcg_temp_free_i32(tcg_rmode);
2915
2916 tcg_temp_free_ptr(fpst);
2917 return 0;
2918}
2919
c9975a83
WN
2920static int handle_vcvt(uint32_t insn, uint32_t rd, uint32_t rm, uint32_t dp,
2921 int rounding)
2922{
2923 bool is_signed = extract32(insn, 7, 1);
2924 TCGv_ptr fpst = get_fpstatus_ptr(0);
2925 TCGv_i32 tcg_rmode, tcg_shift;
2926
2927 tcg_shift = tcg_const_i32(0);
2928
2929 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
2930 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2931
2932 if (dp) {
2933 TCGv_i64 tcg_double, tcg_res;
2934 TCGv_i32 tcg_tmp;
2935 /* Rd is encoded as a single precision register even when the source
2936 * is double precision.
2937 */
2938 rd = ((rd << 1) & 0x1e) | ((rd >> 4) & 0x1);
2939 tcg_double = tcg_temp_new_i64();
2940 tcg_res = tcg_temp_new_i64();
2941 tcg_tmp = tcg_temp_new_i32();
2942 tcg_gen_ld_f64(tcg_double, cpu_env, vfp_reg_offset(1, rm));
2943 if (is_signed) {
2944 gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
2945 } else {
2946 gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
2947 }
2948 tcg_gen_trunc_i64_i32(tcg_tmp, tcg_res);
2949 tcg_gen_st_f32(tcg_tmp, cpu_env, vfp_reg_offset(0, rd));
2950 tcg_temp_free_i32(tcg_tmp);
2951 tcg_temp_free_i64(tcg_res);
2952 tcg_temp_free_i64(tcg_double);
2953 } else {
2954 TCGv_i32 tcg_single, tcg_res;
2955 tcg_single = tcg_temp_new_i32();
2956 tcg_res = tcg_temp_new_i32();
2957 tcg_gen_ld_f32(tcg_single, cpu_env, vfp_reg_offset(0, rm));
2958 if (is_signed) {
2959 gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
2960 } else {
2961 gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
2962 }
2963 tcg_gen_st_f32(tcg_res, cpu_env, vfp_reg_offset(0, rd));
2964 tcg_temp_free_i32(tcg_res);
2965 tcg_temp_free_i32(tcg_single);
2966 }
2967
2968 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
2969 tcg_temp_free_i32(tcg_rmode);
2970
2971 tcg_temp_free_i32(tcg_shift);
2972
2973 tcg_temp_free_ptr(fpst);
2974
2975 return 0;
2976}
7655f39b
WN
2977
2978/* Table for converting the most common AArch32 encoding of
2979 * rounding mode to arm_fprounding order (which matches the
2980 * common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
2981 */
2982static const uint8_t fp_decode_rm[] = {
2983 FPROUNDING_TIEAWAY,
2984 FPROUNDING_TIEEVEN,
2985 FPROUNDING_POSINF,
2986 FPROUNDING_NEGINF,
2987};
2988
7dcc1f89 2989static int disas_vfp_v8_insn(DisasContext *s, uint32_t insn)
04731fb5
WN
2990{
2991 uint32_t rd, rn, rm, dp = extract32(insn, 8, 1);
2992
d614a513 2993 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
04731fb5
WN
2994 return 1;
2995 }
2996
2997 if (dp) {
2998 VFP_DREG_D(rd, insn);
2999 VFP_DREG_N(rn, insn);
3000 VFP_DREG_M(rm, insn);
3001 } else {
3002 rd = VFP_SREG_D(insn);
3003 rn = VFP_SREG_N(insn);
3004 rm = VFP_SREG_M(insn);
3005 }
3006
3007 if ((insn & 0x0f800e50) == 0x0e000a00) {
3008 return handle_vsel(insn, rd, rn, rm, dp);
40cfacdd
WN
3009 } else if ((insn & 0x0fb00e10) == 0x0e800a00) {
3010 return handle_vminmaxnm(insn, rd, rn, rm, dp);
7655f39b
WN
3011 } else if ((insn & 0x0fbc0ed0) == 0x0eb80a40) {
3012 /* VRINTA, VRINTN, VRINTP, VRINTM */
3013 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3014 return handle_vrint(insn, rd, rm, dp, rounding);
c9975a83
WN
3015 } else if ((insn & 0x0fbc0e50) == 0x0ebc0a40) {
3016 /* VCVTA, VCVTN, VCVTP, VCVTM */
3017 int rounding = fp_decode_rm[extract32(insn, 16, 2)];
3018 return handle_vcvt(insn, rd, rm, dp, rounding);
04731fb5
WN
3019 }
3020 return 1;
3021}
3022
a1c7273b 3023/* Disassemble a VFP instruction. Returns nonzero if an error occurred
b7bcbe95 3024 (ie. an undefined instruction). */
7dcc1f89 3025static int disas_vfp_insn(DisasContext *s, uint32_t insn)
b7bcbe95
FB
3026{
3027 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
3028 int dp, veclen;
39d5492a
PM
3029 TCGv_i32 addr;
3030 TCGv_i32 tmp;
3031 TCGv_i32 tmp2;
b7bcbe95 3032
d614a513 3033 if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
40f137e1 3034 return 1;
d614a513 3035 }
40f137e1 3036
2c7ffc41
PM
3037 /* FIXME: this access check should not take precedence over UNDEF
3038 * for invalid encodings; we will generate incorrect syndrome information
3039 * for attempts to execute invalid vfp/neon encodings with FP disabled.
3040 */
3041 if (!s->cpacr_fpen) {
3042 gen_exception_insn(s, 4, EXCP_UDEF,
3043 syn_fp_access_trap(1, 0xe, s->thumb));
3044 return 0;
3045 }
3046
5df8bac1 3047 if (!s->vfp_enabled) {
9ee6e8bb 3048 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
3049 if ((insn & 0x0fe00fff) != 0x0ee00a10)
3050 return 1;
3051 rn = (insn >> 16) & 0xf;
a50c0f51
PM
3052 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
3053 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
40f137e1 3054 return 1;
a50c0f51 3055 }
40f137e1 3056 }
6a57f3eb
WN
3057
3058 if (extract32(insn, 28, 4) == 0xf) {
3059 /* Encodings with T=1 (Thumb) or unconditional (ARM):
3060 * only used in v8 and above.
3061 */
7dcc1f89 3062 return disas_vfp_v8_insn(s, insn);
6a57f3eb
WN
3063 }
3064
b7bcbe95
FB
3065 dp = ((insn & 0xf00) == 0xb00);
3066 switch ((insn >> 24) & 0xf) {
3067 case 0xe:
3068 if (insn & (1 << 4)) {
3069 /* single register transfer */
b7bcbe95
FB
3070 rd = (insn >> 12) & 0xf;
3071 if (dp) {
9ee6e8bb
PB
3072 int size;
3073 int pass;
3074
3075 VFP_DREG_N(rn, insn);
3076 if (insn & 0xf)
b7bcbe95 3077 return 1;
9ee6e8bb 3078 if (insn & 0x00c00060
d614a513 3079 && !arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 3080 return 1;
d614a513 3081 }
9ee6e8bb
PB
3082
3083 pass = (insn >> 21) & 1;
3084 if (insn & (1 << 22)) {
3085 size = 0;
3086 offset = ((insn >> 5) & 3) * 8;
3087 } else if (insn & (1 << 5)) {
3088 size = 1;
3089 offset = (insn & (1 << 6)) ? 16 : 0;
3090 } else {
3091 size = 2;
3092 offset = 0;
3093 }
18c9b560 3094 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3095 /* vfp->arm */
ad69471c 3096 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
3097 switch (size) {
3098 case 0:
9ee6e8bb 3099 if (offset)
ad69471c 3100 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 3101 if (insn & (1 << 23))
ad69471c 3102 gen_uxtb(tmp);
9ee6e8bb 3103 else
ad69471c 3104 gen_sxtb(tmp);
9ee6e8bb
PB
3105 break;
3106 case 1:
9ee6e8bb
PB
3107 if (insn & (1 << 23)) {
3108 if (offset) {
ad69471c 3109 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 3110 } else {
ad69471c 3111 gen_uxth(tmp);
9ee6e8bb
PB
3112 }
3113 } else {
3114 if (offset) {
ad69471c 3115 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 3116 } else {
ad69471c 3117 gen_sxth(tmp);
9ee6e8bb
PB
3118 }
3119 }
3120 break;
3121 case 2:
9ee6e8bb
PB
3122 break;
3123 }
ad69471c 3124 store_reg(s, rd, tmp);
b7bcbe95
FB
3125 } else {
3126 /* arm->vfp */
ad69471c 3127 tmp = load_reg(s, rd);
9ee6e8bb
PB
3128 if (insn & (1 << 23)) {
3129 /* VDUP */
3130 if (size == 0) {
ad69471c 3131 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 3132 } else if (size == 1) {
ad69471c 3133 gen_neon_dup_low16(tmp);
9ee6e8bb 3134 }
cbbccffc 3135 for (n = 0; n <= pass * 2; n++) {
7d1b0095 3136 tmp2 = tcg_temp_new_i32();
cbbccffc
PB
3137 tcg_gen_mov_i32(tmp2, tmp);
3138 neon_store_reg(rn, n, tmp2);
3139 }
3140 neon_store_reg(rn, n, tmp);
9ee6e8bb
PB
3141 } else {
3142 /* VMOV */
3143 switch (size) {
3144 case 0:
ad69471c 3145 tmp2 = neon_load_reg(rn, pass);
d593c48e 3146 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 8);
7d1b0095 3147 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3148 break;
3149 case 1:
ad69471c 3150 tmp2 = neon_load_reg(rn, pass);
d593c48e 3151 tcg_gen_deposit_i32(tmp, tmp2, tmp, offset, 16);
7d1b0095 3152 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
3153 break;
3154 case 2:
9ee6e8bb
PB
3155 break;
3156 }
ad69471c 3157 neon_store_reg(rn, pass, tmp);
9ee6e8bb 3158 }
b7bcbe95 3159 }
9ee6e8bb
PB
3160 } else { /* !dp */
3161 if ((insn & 0x6f) != 0x00)
3162 return 1;
3163 rn = VFP_SREG_N(insn);
18c9b560 3164 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3165 /* vfp->arm */
3166 if (insn & (1 << 21)) {
3167 /* system register */
40f137e1 3168 rn >>= 1;
9ee6e8bb 3169
b7bcbe95 3170 switch (rn) {
40f137e1 3171 case ARM_VFP_FPSID:
4373f3ce 3172 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
3173 VFP3 restricts all id registers to privileged
3174 accesses. */
3175 if (IS_USER(s)
d614a513 3176 && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3177 return 1;
d614a513 3178 }
4373f3ce 3179 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3180 break;
40f137e1 3181 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3182 if (IS_USER(s))
3183 return 1;
4373f3ce 3184 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3185 break;
40f137e1
PB
3186 case ARM_VFP_FPINST:
3187 case ARM_VFP_FPINST2:
9ee6e8bb
PB
3188 /* Not present in VFP3. */
3189 if (IS_USER(s)
d614a513 3190 || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
9ee6e8bb 3191 return 1;
d614a513 3192 }
4373f3ce 3193 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 3194 break;
40f137e1 3195 case ARM_VFP_FPSCR:
601d70b9 3196 if (rd == 15) {
4373f3ce
PB
3197 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
3198 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
3199 } else {
7d1b0095 3200 tmp = tcg_temp_new_i32();
4373f3ce
PB
3201 gen_helper_vfp_get_fpscr(tmp, cpu_env);
3202 }
b7bcbe95 3203 break;
a50c0f51 3204 case ARM_VFP_MVFR2:
d614a513 3205 if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
a50c0f51
PM
3206 return 1;
3207 }
3208 /* fall through */
9ee6e8bb
PB
3209 case ARM_VFP_MVFR0:
3210 case ARM_VFP_MVFR1:
3211 if (IS_USER(s)
d614a513 3212 || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
9ee6e8bb 3213 return 1;
d614a513 3214 }
4373f3ce 3215 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 3216 break;
b7bcbe95
FB
3217 default:
3218 return 1;
3219 }
3220 } else {
3221 gen_mov_F0_vreg(0, rn);
4373f3ce 3222 tmp = gen_vfp_mrs();
b7bcbe95
FB
3223 }
3224 if (rd == 15) {
b5ff1b31 3225 /* Set the 4 flag bits in the CPSR. */
4373f3ce 3226 gen_set_nzcv(tmp);
7d1b0095 3227 tcg_temp_free_i32(tmp);
4373f3ce
PB
3228 } else {
3229 store_reg(s, rd, tmp);
3230 }
b7bcbe95
FB
3231 } else {
3232 /* arm->vfp */
b7bcbe95 3233 if (insn & (1 << 21)) {
40f137e1 3234 rn >>= 1;
b7bcbe95
FB
3235 /* system register */
3236 switch (rn) {
40f137e1 3237 case ARM_VFP_FPSID:
9ee6e8bb
PB
3238 case ARM_VFP_MVFR0:
3239 case ARM_VFP_MVFR1:
b7bcbe95
FB
3240 /* Writes are ignored. */
3241 break;
40f137e1 3242 case ARM_VFP_FPSCR:
e4c1cfa5 3243 tmp = load_reg(s, rd);
4373f3ce 3244 gen_helper_vfp_set_fpscr(cpu_env, tmp);
7d1b0095 3245 tcg_temp_free_i32(tmp);
b5ff1b31 3246 gen_lookup_tb(s);
b7bcbe95 3247 break;
40f137e1 3248 case ARM_VFP_FPEXC:
9ee6e8bb
PB
3249 if (IS_USER(s))
3250 return 1;
71b3c3de
JR
3251 /* TODO: VFP subarchitecture support.
3252 * For now, keep the EN bit only */
e4c1cfa5 3253 tmp = load_reg(s, rd);
71b3c3de 3254 tcg_gen_andi_i32(tmp, tmp, 1 << 30);
4373f3ce 3255 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
3256 gen_lookup_tb(s);
3257 break;
3258 case ARM_VFP_FPINST:
3259 case ARM_VFP_FPINST2:
23adb861
PM
3260 if (IS_USER(s)) {
3261 return 1;
3262 }
e4c1cfa5 3263 tmp = load_reg(s, rd);
4373f3ce 3264 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 3265 break;
b7bcbe95
FB
3266 default:
3267 return 1;
3268 }
3269 } else {
e4c1cfa5 3270 tmp = load_reg(s, rd);
4373f3ce 3271 gen_vfp_msr(tmp);
b7bcbe95
FB
3272 gen_mov_vreg_F0(0, rn);
3273 }
3274 }
3275 }
3276 } else {
3277 /* data processing */
3278 /* The opcode is in bits 23, 21, 20 and 6. */
3279 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
3280 if (dp) {
3281 if (op == 15) {
3282 /* rn is opcode */
3283 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
3284 } else {
3285 /* rn is register number */
9ee6e8bb 3286 VFP_DREG_N(rn, insn);
b7bcbe95
FB
3287 }
3288
239c20c7
WN
3289 if (op == 15 && (rn == 15 || ((rn & 0x1c) == 0x18) ||
3290 ((rn & 0x1e) == 0x6))) {
3291 /* Integer or single/half precision destination. */
9ee6e8bb 3292 rd = VFP_SREG_D(insn);
b7bcbe95 3293 } else {
9ee6e8bb 3294 VFP_DREG_D(rd, insn);
b7bcbe95 3295 }
04595bf6 3296 if (op == 15 &&
239c20c7
WN
3297 (((rn & 0x1c) == 0x10) || ((rn & 0x14) == 0x14) ||
3298 ((rn & 0x1e) == 0x4))) {
3299 /* VCVT from int or half precision is always from S reg
3300 * regardless of dp bit. VCVT with immediate frac_bits
3301 * has same format as SREG_M.
04595bf6
PM
3302 */
3303 rm = VFP_SREG_M(insn);
b7bcbe95 3304 } else {
9ee6e8bb 3305 VFP_DREG_M(rm, insn);
b7bcbe95
FB
3306 }
3307 } else {
9ee6e8bb 3308 rn = VFP_SREG_N(insn);
b7bcbe95
FB
3309 if (op == 15 && rn == 15) {
3310 /* Double precision destination. */
9ee6e8bb
PB
3311 VFP_DREG_D(rd, insn);
3312 } else {
3313 rd = VFP_SREG_D(insn);
3314 }
04595bf6
PM
3315 /* NB that we implicitly rely on the encoding for the frac_bits
3316 * in VCVT of fixed to float being the same as that of an SREG_M
3317 */
9ee6e8bb 3318 rm = VFP_SREG_M(insn);
b7bcbe95
FB
3319 }
3320
69d1fc22 3321 veclen = s->vec_len;
b7bcbe95
FB
3322 if (op == 15 && rn > 3)
3323 veclen = 0;
3324
3325 /* Shut up compiler warnings. */
3326 delta_m = 0;
3327 delta_d = 0;
3328 bank_mask = 0;
3b46e624 3329
b7bcbe95
FB
3330 if (veclen > 0) {
3331 if (dp)
3332 bank_mask = 0xc;
3333 else
3334 bank_mask = 0x18;
3335
3336 /* Figure out what type of vector operation this is. */
3337 if ((rd & bank_mask) == 0) {
3338 /* scalar */
3339 veclen = 0;
3340 } else {
3341 if (dp)
69d1fc22 3342 delta_d = (s->vec_stride >> 1) + 1;
b7bcbe95 3343 else
69d1fc22 3344 delta_d = s->vec_stride + 1;
b7bcbe95
FB
3345
3346 if ((rm & bank_mask) == 0) {
3347 /* mixed scalar/vector */
3348 delta_m = 0;
3349 } else {
3350 /* vector */
3351 delta_m = delta_d;
3352 }
3353 }
3354 }
3355
3356 /* Load the initial operands. */
3357 if (op == 15) {
3358 switch (rn) {
3359 case 16:
3360 case 17:
3361 /* Integer source */
3362 gen_mov_F0_vreg(0, rm);
3363 break;
3364 case 8:
3365 case 9:
3366 /* Compare */
3367 gen_mov_F0_vreg(dp, rd);
3368 gen_mov_F1_vreg(dp, rm);
3369 break;
3370 case 10:
3371 case 11:
3372 /* Compare with zero */
3373 gen_mov_F0_vreg(dp, rd);
3374 gen_vfp_F1_ld0(dp);
3375 break;
9ee6e8bb
PB
3376 case 20:
3377 case 21:
3378 case 22:
3379 case 23:
644ad806
PB
3380 case 28:
3381 case 29:
3382 case 30:
3383 case 31:
9ee6e8bb
PB
3384 /* Source and destination the same. */
3385 gen_mov_F0_vreg(dp, rd);
3386 break;
6e0c0ed1
PM
3387 case 4:
3388 case 5:
3389 case 6:
3390 case 7:
239c20c7
WN
3391 /* VCVTB, VCVTT: only present with the halfprec extension
3392 * UNPREDICTABLE if bit 8 is set prior to ARMv8
3393 * (we choose to UNDEF)
6e0c0ed1 3394 */
d614a513
PM
3395 if ((dp && !arm_dc_feature(s, ARM_FEATURE_V8)) ||
3396 !arm_dc_feature(s, ARM_FEATURE_VFP_FP16)) {
6e0c0ed1
PM
3397 return 1;
3398 }
239c20c7
WN
3399 if (!extract32(rn, 1, 1)) {
3400 /* Half precision source. */
3401 gen_mov_F0_vreg(0, rm);
3402 break;
3403 }
6e0c0ed1 3404 /* Otherwise fall through */
b7bcbe95
FB
3405 default:
3406 /* One source operand. */
3407 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3408 break;
b7bcbe95
FB
3409 }
3410 } else {
3411 /* Two source operands. */
3412 gen_mov_F0_vreg(dp, rn);
3413 gen_mov_F1_vreg(dp, rm);
3414 }
3415
3416 for (;;) {
3417 /* Perform the calculation. */
3418 switch (op) {
605a6aed
PM
3419 case 0: /* VMLA: fd + (fn * fm) */
3420 /* Note that order of inputs to the add matters for NaNs */
3421 gen_vfp_F1_mul(dp);
3422 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3423 gen_vfp_add(dp);
3424 break;
605a6aed 3425 case 1: /* VMLS: fd + -(fn * fm) */
b7bcbe95 3426 gen_vfp_mul(dp);
605a6aed
PM
3427 gen_vfp_F1_neg(dp);
3428 gen_mov_F0_vreg(dp, rd);
b7bcbe95
FB
3429 gen_vfp_add(dp);
3430 break;
605a6aed
PM
3431 case 2: /* VNMLS: -fd + (fn * fm) */
3432 /* Note that it isn't valid to replace (-A + B) with (B - A)
3433 * or similar plausible looking simplifications
3434 * because this will give wrong results for NaNs.
3435 */
3436 gen_vfp_F1_mul(dp);
3437 gen_mov_F0_vreg(dp, rd);
3438 gen_vfp_neg(dp);
3439 gen_vfp_add(dp);
b7bcbe95 3440 break;
605a6aed 3441 case 3: /* VNMLA: -fd + -(fn * fm) */
b7bcbe95 3442 gen_vfp_mul(dp);
605a6aed
PM
3443 gen_vfp_F1_neg(dp);
3444 gen_mov_F0_vreg(dp, rd);
b7bcbe95 3445 gen_vfp_neg(dp);
605a6aed 3446 gen_vfp_add(dp);
b7bcbe95
FB
3447 break;
3448 case 4: /* mul: fn * fm */
3449 gen_vfp_mul(dp);
3450 break;
3451 case 5: /* nmul: -(fn * fm) */
3452 gen_vfp_mul(dp);
3453 gen_vfp_neg(dp);
3454 break;
3455 case 6: /* add: fn + fm */
3456 gen_vfp_add(dp);
3457 break;
3458 case 7: /* sub: fn - fm */
3459 gen_vfp_sub(dp);
3460 break;
3461 case 8: /* div: fn / fm */
3462 gen_vfp_div(dp);
3463 break;
da97f52c
PM
3464 case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
3465 case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
3466 case 12: /* VFMA : fd = muladd( fd, fn, fm) */
3467 case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
3468 /* These are fused multiply-add, and must be done as one
3469 * floating point operation with no rounding between the
3470 * multiplication and addition steps.
3471 * NB that doing the negations here as separate steps is
3472 * correct : an input NaN should come out with its sign bit
3473 * flipped if it is a negated-input.
3474 */
d614a513 3475 if (!arm_dc_feature(s, ARM_FEATURE_VFP4)) {
da97f52c
PM
3476 return 1;
3477 }
3478 if (dp) {
3479 TCGv_ptr fpst;
3480 TCGv_i64 frd;
3481 if (op & 1) {
3482 /* VFNMS, VFMS */
3483 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
3484 }
3485 frd = tcg_temp_new_i64();
3486 tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
3487 if (op & 2) {
3488 /* VFNMA, VFNMS */
3489 gen_helper_vfp_negd(frd, frd);
3490 }
3491 fpst = get_fpstatus_ptr(0);
3492 gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
3493 cpu_F1d, frd, fpst);
3494 tcg_temp_free_ptr(fpst);
3495 tcg_temp_free_i64(frd);
3496 } else {
3497 TCGv_ptr fpst;
3498 TCGv_i32 frd;
3499 if (op & 1) {
3500 /* VFNMS, VFMS */
3501 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
3502 }
3503 frd = tcg_temp_new_i32();
3504 tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
3505 if (op & 2) {
3506 gen_helper_vfp_negs(frd, frd);
3507 }
3508 fpst = get_fpstatus_ptr(0);
3509 gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
3510 cpu_F1s, frd, fpst);
3511 tcg_temp_free_ptr(fpst);
3512 tcg_temp_free_i32(frd);
3513 }
3514 break;
9ee6e8bb 3515 case 14: /* fconst */
d614a513
PM
3516 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3517 return 1;
3518 }
9ee6e8bb
PB
3519
3520 n = (insn << 12) & 0x80000000;
3521 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3522 if (dp) {
3523 if (i & 0x40)
3524 i |= 0x3f80;
3525 else
3526 i |= 0x4000;
3527 n |= i << 16;
4373f3ce 3528 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3529 } else {
3530 if (i & 0x40)
3531 i |= 0x780;
3532 else
3533 i |= 0x800;
3534 n |= i << 19;
5b340b51 3535 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3536 }
9ee6e8bb 3537 break;
b7bcbe95
FB
3538 case 15: /* extension space */
3539 switch (rn) {
3540 case 0: /* cpy */
3541 /* no-op */
3542 break;
3543 case 1: /* abs */
3544 gen_vfp_abs(dp);
3545 break;
3546 case 2: /* neg */
3547 gen_vfp_neg(dp);
3548 break;
3549 case 3: /* sqrt */
3550 gen_vfp_sqrt(dp);
3551 break;
239c20c7 3552 case 4: /* vcvtb.f32.f16, vcvtb.f64.f16 */
60011498
PB
3553 tmp = gen_vfp_mrs();
3554 tcg_gen_ext16u_i32(tmp, tmp);
239c20c7
WN
3555 if (dp) {
3556 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3557 cpu_env);
3558 } else {
3559 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3560 cpu_env);
3561 }
7d1b0095 3562 tcg_temp_free_i32(tmp);
60011498 3563 break;
239c20c7 3564 case 5: /* vcvtt.f32.f16, vcvtt.f64.f16 */
60011498
PB
3565 tmp = gen_vfp_mrs();
3566 tcg_gen_shri_i32(tmp, tmp, 16);
239c20c7
WN
3567 if (dp) {
3568 gen_helper_vfp_fcvt_f16_to_f64(cpu_F0d, tmp,
3569 cpu_env);
3570 } else {
3571 gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp,
3572 cpu_env);
3573 }
7d1b0095 3574 tcg_temp_free_i32(tmp);
60011498 3575 break;
239c20c7 3576 case 6: /* vcvtb.f16.f32, vcvtb.f16.f64 */
7d1b0095 3577 tmp = tcg_temp_new_i32();
239c20c7
WN
3578 if (dp) {
3579 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3580 cpu_env);
3581 } else {
3582 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3583 cpu_env);
3584 }
60011498
PB
3585 gen_mov_F0_vreg(0, rd);
3586 tmp2 = gen_vfp_mrs();
3587 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
3588 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3589 tcg_temp_free_i32(tmp2);
60011498
PB
3590 gen_vfp_msr(tmp);
3591 break;
239c20c7 3592 case 7: /* vcvtt.f16.f32, vcvtt.f16.f64 */
7d1b0095 3593 tmp = tcg_temp_new_i32();
239c20c7
WN
3594 if (dp) {
3595 gen_helper_vfp_fcvt_f64_to_f16(tmp, cpu_F0d,
3596 cpu_env);
3597 } else {
3598 gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s,
3599 cpu_env);
3600 }
60011498
PB
3601 tcg_gen_shli_i32(tmp, tmp, 16);
3602 gen_mov_F0_vreg(0, rd);
3603 tmp2 = gen_vfp_mrs();
3604 tcg_gen_ext16u_i32(tmp2, tmp2);
3605 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 3606 tcg_temp_free_i32(tmp2);
60011498
PB
3607 gen_vfp_msr(tmp);
3608 break;
b7bcbe95
FB
3609 case 8: /* cmp */
3610 gen_vfp_cmp(dp);
3611 break;
3612 case 9: /* cmpe */
3613 gen_vfp_cmpe(dp);
3614 break;
3615 case 10: /* cmpz */
3616 gen_vfp_cmp(dp);
3617 break;
3618 case 11: /* cmpez */
3619 gen_vfp_F1_ld0(dp);
3620 gen_vfp_cmpe(dp);
3621 break;
664c6733
WN
3622 case 12: /* vrintr */
3623 {
3624 TCGv_ptr fpst = get_fpstatus_ptr(0);
3625 if (dp) {
3626 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3627 } else {
3628 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3629 }
3630 tcg_temp_free_ptr(fpst);
3631 break;
3632 }
a290c62a
WN
3633 case 13: /* vrintz */
3634 {
3635 TCGv_ptr fpst = get_fpstatus_ptr(0);
3636 TCGv_i32 tcg_rmode;
3637 tcg_rmode = tcg_const_i32(float_round_to_zero);
3638 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3639 if (dp) {
3640 gen_helper_rintd(cpu_F0d, cpu_F0d, fpst);
3641 } else {
3642 gen_helper_rints(cpu_F0s, cpu_F0s, fpst);
3643 }
3644 gen_helper_set_rmode(tcg_rmode, tcg_rmode, cpu_env);
3645 tcg_temp_free_i32(tcg_rmode);
3646 tcg_temp_free_ptr(fpst);
3647 break;
3648 }
4e82bc01
WN
3649 case 14: /* vrintx */
3650 {
3651 TCGv_ptr fpst = get_fpstatus_ptr(0);
3652 if (dp) {
3653 gen_helper_rintd_exact(cpu_F0d, cpu_F0d, fpst);
3654 } else {
3655 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpst);
3656 }
3657 tcg_temp_free_ptr(fpst);
3658 break;
3659 }
b7bcbe95
FB
3660 case 15: /* single<->double conversion */
3661 if (dp)
4373f3ce 3662 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3663 else
4373f3ce 3664 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3665 break;
3666 case 16: /* fuito */
5500b06c 3667 gen_vfp_uito(dp, 0);
b7bcbe95
FB
3668 break;
3669 case 17: /* fsito */
5500b06c 3670 gen_vfp_sito(dp, 0);
b7bcbe95 3671 break;
9ee6e8bb 3672 case 20: /* fshto */
d614a513
PM
3673 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3674 return 1;
3675 }
5500b06c 3676 gen_vfp_shto(dp, 16 - rm, 0);
9ee6e8bb
PB
3677 break;
3678 case 21: /* fslto */
d614a513
PM
3679 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3680 return 1;
3681 }
5500b06c 3682 gen_vfp_slto(dp, 32 - rm, 0);
9ee6e8bb
PB
3683 break;
3684 case 22: /* fuhto */
d614a513
PM
3685 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3686 return 1;
3687 }
5500b06c 3688 gen_vfp_uhto(dp, 16 - rm, 0);
9ee6e8bb
PB
3689 break;
3690 case 23: /* fulto */
d614a513
PM
3691 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3692 return 1;
3693 }
5500b06c 3694 gen_vfp_ulto(dp, 32 - rm, 0);
9ee6e8bb 3695 break;
b7bcbe95 3696 case 24: /* ftoui */
5500b06c 3697 gen_vfp_toui(dp, 0);
b7bcbe95
FB
3698 break;
3699 case 25: /* ftouiz */
5500b06c 3700 gen_vfp_touiz(dp, 0);
b7bcbe95
FB
3701 break;
3702 case 26: /* ftosi */
5500b06c 3703 gen_vfp_tosi(dp, 0);
b7bcbe95
FB
3704 break;
3705 case 27: /* ftosiz */
5500b06c 3706 gen_vfp_tosiz(dp, 0);
b7bcbe95 3707 break;
9ee6e8bb 3708 case 28: /* ftosh */
d614a513
PM
3709 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3710 return 1;
3711 }
5500b06c 3712 gen_vfp_tosh(dp, 16 - rm, 0);
9ee6e8bb
PB
3713 break;
3714 case 29: /* ftosl */
d614a513
PM
3715 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3716 return 1;
3717 }
5500b06c 3718 gen_vfp_tosl(dp, 32 - rm, 0);
9ee6e8bb
PB
3719 break;
3720 case 30: /* ftouh */
d614a513
PM
3721 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3722 return 1;
3723 }
5500b06c 3724 gen_vfp_touh(dp, 16 - rm, 0);
9ee6e8bb
PB
3725 break;
3726 case 31: /* ftoul */
d614a513
PM
3727 if (!arm_dc_feature(s, ARM_FEATURE_VFP3)) {
3728 return 1;
3729 }
5500b06c 3730 gen_vfp_toul(dp, 32 - rm, 0);
9ee6e8bb 3731 break;
b7bcbe95 3732 default: /* undefined */
b7bcbe95
FB
3733 return 1;
3734 }
3735 break;
3736 default: /* undefined */
b7bcbe95
FB
3737 return 1;
3738 }
3739
3740 /* Write back the result. */
239c20c7
WN
3741 if (op == 15 && (rn >= 8 && rn <= 11)) {
3742 /* Comparison, do nothing. */
3743 } else if (op == 15 && dp && ((rn & 0x1c) == 0x18 ||
3744 (rn & 0x1e) == 0x6)) {
3745 /* VCVT double to int: always integer result.
3746 * VCVT double to half precision is always a single
3747 * precision result.
3748 */
b7bcbe95 3749 gen_mov_vreg_F0(0, rd);
239c20c7 3750 } else if (op == 15 && rn == 15) {
b7bcbe95
FB
3751 /* conversion */
3752 gen_mov_vreg_F0(!dp, rd);
239c20c7 3753 } else {
b7bcbe95 3754 gen_mov_vreg_F0(dp, rd);
239c20c7 3755 }
b7bcbe95
FB
3756
3757 /* break out of the loop if we have finished */
3758 if (veclen == 0)
3759 break;
3760
3761 if (op == 15 && delta_m == 0) {
3762 /* single source one-many */
3763 while (veclen--) {
3764 rd = ((rd + delta_d) & (bank_mask - 1))
3765 | (rd & bank_mask);
3766 gen_mov_vreg_F0(dp, rd);
3767 }
3768 break;
3769 }
3770 /* Setup the next operands. */
3771 veclen--;
3772 rd = ((rd + delta_d) & (bank_mask - 1))
3773 | (rd & bank_mask);
3774
3775 if (op == 15) {
3776 /* One source operand. */
3777 rm = ((rm + delta_m) & (bank_mask - 1))
3778 | (rm & bank_mask);
3779 gen_mov_F0_vreg(dp, rm);
3780 } else {
3781 /* Two source operands. */
3782 rn = ((rn + delta_d) & (bank_mask - 1))
3783 | (rn & bank_mask);
3784 gen_mov_F0_vreg(dp, rn);
3785 if (delta_m) {
3786 rm = ((rm + delta_m) & (bank_mask - 1))
3787 | (rm & bank_mask);
3788 gen_mov_F1_vreg(dp, rm);
3789 }
3790 }
3791 }
3792 }
3793 break;
3794 case 0xc:
3795 case 0xd:
8387da81 3796 if ((insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3797 /* two-register transfer */
3798 rn = (insn >> 16) & 0xf;
3799 rd = (insn >> 12) & 0xf;
3800 if (dp) {
9ee6e8bb
PB
3801 VFP_DREG_M(rm, insn);
3802 } else {
3803 rm = VFP_SREG_M(insn);
3804 }
b7bcbe95 3805
18c9b560 3806 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3807 /* vfp->arm */
3808 if (dp) {
4373f3ce
PB
3809 gen_mov_F0_vreg(0, rm * 2);
3810 tmp = gen_vfp_mrs();
3811 store_reg(s, rd, tmp);
3812 gen_mov_F0_vreg(0, rm * 2 + 1);
3813 tmp = gen_vfp_mrs();
3814 store_reg(s, rn, tmp);
b7bcbe95
FB
3815 } else {
3816 gen_mov_F0_vreg(0, rm);
4373f3ce 3817 tmp = gen_vfp_mrs();
8387da81 3818 store_reg(s, rd, tmp);
b7bcbe95 3819 gen_mov_F0_vreg(0, rm + 1);
4373f3ce 3820 tmp = gen_vfp_mrs();
8387da81 3821 store_reg(s, rn, tmp);
b7bcbe95
FB
3822 }
3823 } else {
3824 /* arm->vfp */
3825 if (dp) {
4373f3ce
PB
3826 tmp = load_reg(s, rd);
3827 gen_vfp_msr(tmp);
3828 gen_mov_vreg_F0(0, rm * 2);
3829 tmp = load_reg(s, rn);
3830 gen_vfp_msr(tmp);
3831 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3832 } else {
8387da81 3833 tmp = load_reg(s, rd);
4373f3ce 3834 gen_vfp_msr(tmp);
b7bcbe95 3835 gen_mov_vreg_F0(0, rm);
8387da81 3836 tmp = load_reg(s, rn);
4373f3ce 3837 gen_vfp_msr(tmp);
b7bcbe95
FB
3838 gen_mov_vreg_F0(0, rm + 1);
3839 }
3840 }
3841 } else {
3842 /* Load/store */
3843 rn = (insn >> 16) & 0xf;
3844 if (dp)
9ee6e8bb 3845 VFP_DREG_D(rd, insn);
b7bcbe95 3846 else
9ee6e8bb 3847 rd = VFP_SREG_D(insn);
b7bcbe95
FB
3848 if ((insn & 0x01200000) == 0x01000000) {
3849 /* Single load/store */
3850 offset = (insn & 0xff) << 2;
3851 if ((insn & (1 << 23)) == 0)
3852 offset = -offset;
934814f1
PM
3853 if (s->thumb && rn == 15) {
3854 /* This is actually UNPREDICTABLE */
3855 addr = tcg_temp_new_i32();
3856 tcg_gen_movi_i32(addr, s->pc & ~2);
3857 } else {
3858 addr = load_reg(s, rn);
3859 }
312eea9f 3860 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3861 if (insn & (1 << 20)) {
312eea9f 3862 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3863 gen_mov_vreg_F0(dp, rd);
3864 } else {
3865 gen_mov_F0_vreg(dp, rd);
312eea9f 3866 gen_vfp_st(s, dp, addr);
b7bcbe95 3867 }
7d1b0095 3868 tcg_temp_free_i32(addr);
b7bcbe95
FB
3869 } else {
3870 /* load/store multiple */
934814f1 3871 int w = insn & (1 << 21);
b7bcbe95
FB
3872 if (dp)
3873 n = (insn >> 1) & 0x7f;
3874 else
3875 n = insn & 0xff;
3876
934814f1
PM
3877 if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
3878 /* P == U , W == 1 => UNDEF */
3879 return 1;
3880 }
3881 if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
3882 /* UNPREDICTABLE cases for bad immediates: we choose to
3883 * UNDEF to avoid generating huge numbers of TCG ops
3884 */
3885 return 1;
3886 }
3887 if (rn == 15 && w) {
3888 /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
3889 return 1;
3890 }
3891
3892 if (s->thumb && rn == 15) {
3893 /* This is actually UNPREDICTABLE */
3894 addr = tcg_temp_new_i32();
3895 tcg_gen_movi_i32(addr, s->pc & ~2);
3896 } else {
3897 addr = load_reg(s, rn);
3898 }
b7bcbe95 3899 if (insn & (1 << 24)) /* pre-decrement */
312eea9f 3900 tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
b7bcbe95
FB
3901
3902 if (dp)
3903 offset = 8;
3904 else
3905 offset = 4;
3906 for (i = 0; i < n; i++) {
18c9b560 3907 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3908 /* load */
312eea9f 3909 gen_vfp_ld(s, dp, addr);
b7bcbe95
FB
3910 gen_mov_vreg_F0(dp, rd + i);
3911 } else {
3912 /* store */
3913 gen_mov_F0_vreg(dp, rd + i);
312eea9f 3914 gen_vfp_st(s, dp, addr);
b7bcbe95 3915 }
312eea9f 3916 tcg_gen_addi_i32(addr, addr, offset);
b7bcbe95 3917 }
934814f1 3918 if (w) {
b7bcbe95
FB
3919 /* writeback */
3920 if (insn & (1 << 24))
3921 offset = -offset * n;
3922 else if (dp && (insn & 1))
3923 offset = 4;
3924 else
3925 offset = 0;
3926
3927 if (offset != 0)
312eea9f
FN
3928 tcg_gen_addi_i32(addr, addr, offset);
3929 store_reg(s, rn, addr);
3930 } else {
7d1b0095 3931 tcg_temp_free_i32(addr);
b7bcbe95
FB
3932 }
3933 }
3934 }
3935 break;
3936 default:
3937 /* Should never happen. */
3938 return 1;
3939 }
3940 return 0;
3941}
3942
0a2461fa 3943static inline void gen_goto_tb(DisasContext *s, int n, target_ulong dest)
c53be334 3944{
6e256c93
FB
3945 TranslationBlock *tb;
3946
3947 tb = s->tb;
3948 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3949 tcg_gen_goto_tb(n);
eaed129d 3950 gen_set_pc_im(s, dest);
8cfd0495 3951 tcg_gen_exit_tb((uintptr_t)tb + n);
6e256c93 3952 } else {
eaed129d 3953 gen_set_pc_im(s, dest);
57fec1fe 3954 tcg_gen_exit_tb(0);
6e256c93 3955 }
c53be334
FB
3956}
3957
8aaca4c0
FB
3958static inline void gen_jmp (DisasContext *s, uint32_t dest)
3959{
50225ad0 3960 if (unlikely(s->singlestep_enabled || s->ss_active)) {
8aaca4c0 3961 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3962 if (s->thumb)
d9ba4830
PB
3963 dest |= 1;
3964 gen_bx_im(s, dest);
8aaca4c0 3965 } else {
6e256c93 3966 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3967 s->is_jmp = DISAS_TB_JUMP;
3968 }
3969}
3970
39d5492a 3971static inline void gen_mulxy(TCGv_i32 t0, TCGv_i32 t1, int x, int y)
b5ff1b31 3972{
ee097184 3973 if (x)
d9ba4830 3974 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3975 else
d9ba4830 3976 gen_sxth(t0);
ee097184 3977 if (y)
d9ba4830 3978 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3979 else
d9ba4830
PB
3980 gen_sxth(t1);
3981 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3982}
3983
3984/* Return the mask of PSR bits set by a MSR instruction. */
7dcc1f89
PM
3985static uint32_t msr_mask(DisasContext *s, int flags, int spsr)
3986{
b5ff1b31
FB
3987 uint32_t mask;
3988
3989 mask = 0;
3990 if (flags & (1 << 0))
3991 mask |= 0xff;
3992 if (flags & (1 << 1))
3993 mask |= 0xff00;
3994 if (flags & (1 << 2))
3995 mask |= 0xff0000;
3996 if (flags & (1 << 3))
3997 mask |= 0xff000000;
9ee6e8bb 3998
2ae23e75 3999 /* Mask out undefined bits. */
9ee6e8bb 4000 mask &= ~CPSR_RESERVED;
d614a513 4001 if (!arm_dc_feature(s, ARM_FEATURE_V4T)) {
be5e7a76 4002 mask &= ~CPSR_T;
d614a513
PM
4003 }
4004 if (!arm_dc_feature(s, ARM_FEATURE_V5)) {
be5e7a76 4005 mask &= ~CPSR_Q; /* V5TE in reality*/
d614a513
PM
4006 }
4007 if (!arm_dc_feature(s, ARM_FEATURE_V6)) {
e160c51c 4008 mask &= ~(CPSR_E | CPSR_GE);
d614a513
PM
4009 }
4010 if (!arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
e160c51c 4011 mask &= ~CPSR_IT;
d614a513 4012 }
4051e12c
PM
4013 /* Mask out execution state and reserved bits. */
4014 if (!spsr) {
4015 mask &= ~(CPSR_EXEC | CPSR_RESERVED);
4016 }
b5ff1b31
FB
4017 /* Mask out privileged bits. */
4018 if (IS_USER(s))
9ee6e8bb 4019 mask &= CPSR_USER;
b5ff1b31
FB
4020 return mask;
4021}
4022
2fbac54b 4023/* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
39d5492a 4024static int gen_set_psr(DisasContext *s, uint32_t mask, int spsr, TCGv_i32 t0)
b5ff1b31 4025{
39d5492a 4026 TCGv_i32 tmp;
b5ff1b31
FB
4027 if (spsr) {
4028 /* ??? This is also undefined in system mode. */
4029 if (IS_USER(s))
4030 return 1;
d9ba4830
PB
4031
4032 tmp = load_cpu_field(spsr);
4033 tcg_gen_andi_i32(tmp, tmp, ~mask);
2fbac54b
FN
4034 tcg_gen_andi_i32(t0, t0, mask);
4035 tcg_gen_or_i32(tmp, tmp, t0);
d9ba4830 4036 store_cpu_field(tmp, spsr);
b5ff1b31 4037 } else {
2fbac54b 4038 gen_set_cpsr(t0, mask);
b5ff1b31 4039 }
7d1b0095 4040 tcg_temp_free_i32(t0);
b5ff1b31
FB
4041 gen_lookup_tb(s);
4042 return 0;
4043}
4044
2fbac54b
FN
4045/* Returns nonzero if access to the PSR is not permitted. */
4046static int gen_set_psr_im(DisasContext *s, uint32_t mask, int spsr, uint32_t val)
4047{
39d5492a 4048 TCGv_i32 tmp;
7d1b0095 4049 tmp = tcg_temp_new_i32();
2fbac54b
FN
4050 tcg_gen_movi_i32(tmp, val);
4051 return gen_set_psr(s, mask, spsr, tmp);
4052}
4053
e9bb4aa9 4054/* Generate an old-style exception return. Marks pc as dead. */
39d5492a 4055static void gen_exception_return(DisasContext *s, TCGv_i32 pc)
b5ff1b31 4056{
39d5492a 4057 TCGv_i32 tmp;
e9bb4aa9 4058 store_reg(s, 15, pc);
d9ba4830 4059 tmp = load_cpu_field(spsr);
4051e12c 4060 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 4061 tcg_temp_free_i32(tmp);
b5ff1b31
FB
4062 s->is_jmp = DISAS_UPDATE;
4063}
4064
b0109805 4065/* Generate a v6 exception return. Marks both values as dead. */
39d5492a 4066static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr)
2c0262af 4067{
4051e12c 4068 gen_set_cpsr(cpsr, CPSR_ERET_MASK);
7d1b0095 4069 tcg_temp_free_i32(cpsr);
b0109805 4070 store_reg(s, 15, pc);
9ee6e8bb
PB
4071 s->is_jmp = DISAS_UPDATE;
4072}
3b46e624 4073
9ee6e8bb
PB
4074static void gen_nop_hint(DisasContext *s, int val)
4075{
4076 switch (val) {
4077 case 3: /* wfi */
eaed129d 4078 gen_set_pc_im(s, s->pc);
9ee6e8bb
PB
4079 s->is_jmp = DISAS_WFI;
4080 break;
4081 case 2: /* wfe */
72c1d3af
PM
4082 gen_set_pc_im(s, s->pc);
4083 s->is_jmp = DISAS_WFE;
4084 break;
9ee6e8bb 4085 case 4: /* sev */
12b10571
MR
4086 case 5: /* sevl */
4087 /* TODO: Implement SEV, SEVL and WFE. May help SMP performance. */
9ee6e8bb
PB
4088 default: /* nop */
4089 break;
4090 }
4091}
99c475ab 4092
ad69471c 4093#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb 4094
39d5492a 4095static inline void gen_neon_add(int size, TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
4096{
4097 switch (size) {
dd8fbd78
FN
4098 case 0: gen_helper_neon_add_u8(t0, t0, t1); break;
4099 case 1: gen_helper_neon_add_u16(t0, t0, t1); break;
4100 case 2: tcg_gen_add_i32(t0, t0, t1); break;
62698be3 4101 default: abort();
9ee6e8bb 4102 }
9ee6e8bb
PB
4103}
4104
39d5492a 4105static inline void gen_neon_rsb(int size, TCGv_i32 t0, TCGv_i32 t1)
ad69471c
PB
4106{
4107 switch (size) {
dd8fbd78
FN
4108 case 0: gen_helper_neon_sub_u8(t0, t1, t0); break;
4109 case 1: gen_helper_neon_sub_u16(t0, t1, t0); break;
4110 case 2: tcg_gen_sub_i32(t0, t1, t0); break;
ad69471c
PB
4111 default: return;
4112 }
4113}
4114
4115/* 32-bit pairwise ops end up the same as the elementwise versions. */
4116#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
4117#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
4118#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
4119#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
4120
ad69471c
PB
4121#define GEN_NEON_INTEGER_OP_ENV(name) do { \
4122 switch ((size << 1) | u) { \
4123 case 0: \
dd8fbd78 4124 gen_helper_neon_##name##_s8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4125 break; \
4126 case 1: \
dd8fbd78 4127 gen_helper_neon_##name##_u8(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4128 break; \
4129 case 2: \
dd8fbd78 4130 gen_helper_neon_##name##_s16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4131 break; \
4132 case 3: \
dd8fbd78 4133 gen_helper_neon_##name##_u16(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4134 break; \
4135 case 4: \
dd8fbd78 4136 gen_helper_neon_##name##_s32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4137 break; \
4138 case 5: \
dd8fbd78 4139 gen_helper_neon_##name##_u32(tmp, cpu_env, tmp, tmp2); \
ad69471c
PB
4140 break; \
4141 default: return 1; \
4142 }} while (0)
9ee6e8bb
PB
4143
4144#define GEN_NEON_INTEGER_OP(name) do { \
4145 switch ((size << 1) | u) { \
ad69471c 4146 case 0: \
dd8fbd78 4147 gen_helper_neon_##name##_s8(tmp, tmp, tmp2); \
ad69471c
PB
4148 break; \
4149 case 1: \
dd8fbd78 4150 gen_helper_neon_##name##_u8(tmp, tmp, tmp2); \
ad69471c
PB
4151 break; \
4152 case 2: \
dd8fbd78 4153 gen_helper_neon_##name##_s16(tmp, tmp, tmp2); \
ad69471c
PB
4154 break; \
4155 case 3: \
dd8fbd78 4156 gen_helper_neon_##name##_u16(tmp, tmp, tmp2); \
ad69471c
PB
4157 break; \
4158 case 4: \
dd8fbd78 4159 gen_helper_neon_##name##_s32(tmp, tmp, tmp2); \
ad69471c
PB
4160 break; \
4161 case 5: \
dd8fbd78 4162 gen_helper_neon_##name##_u32(tmp, tmp, tmp2); \
ad69471c 4163 break; \
9ee6e8bb
PB
4164 default: return 1; \
4165 }} while (0)
4166
39d5492a 4167static TCGv_i32 neon_load_scratch(int scratch)
9ee6e8bb 4168{
39d5492a 4169 TCGv_i32 tmp = tcg_temp_new_i32();
dd8fbd78
FN
4170 tcg_gen_ld_i32(tmp, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
4171 return tmp;
9ee6e8bb
PB
4172}
4173
39d5492a 4174static void neon_store_scratch(int scratch, TCGv_i32 var)
9ee6e8bb 4175{
dd8fbd78 4176 tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, vfp.scratch[scratch]));
7d1b0095 4177 tcg_temp_free_i32(var);
9ee6e8bb
PB
4178}
4179
39d5492a 4180static inline TCGv_i32 neon_get_scalar(int size, int reg)
9ee6e8bb 4181{
39d5492a 4182 TCGv_i32 tmp;
9ee6e8bb 4183 if (size == 1) {
0fad6efc
PM
4184 tmp = neon_load_reg(reg & 7, reg >> 4);
4185 if (reg & 8) {
dd8fbd78 4186 gen_neon_dup_high16(tmp);
0fad6efc
PM
4187 } else {
4188 gen_neon_dup_low16(tmp);
dd8fbd78 4189 }
0fad6efc
PM
4190 } else {
4191 tmp = neon_load_reg(reg & 15, reg >> 4);
9ee6e8bb 4192 }
dd8fbd78 4193 return tmp;
9ee6e8bb
PB
4194}
4195
02acedf9 4196static int gen_neon_unzip(int rd, int rm, int size, int q)
19457615 4197{
39d5492a 4198 TCGv_i32 tmp, tmp2;
600b828c 4199 if (!q && size == 2) {
02acedf9
PM
4200 return 1;
4201 }
4202 tmp = tcg_const_i32(rd);
4203 tmp2 = tcg_const_i32(rm);
4204 if (q) {
4205 switch (size) {
4206 case 0:
02da0b2d 4207 gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4208 break;
4209 case 1:
02da0b2d 4210 gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4211 break;
4212 case 2:
02da0b2d 4213 gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
02acedf9
PM
4214 break;
4215 default:
4216 abort();
4217 }
4218 } else {
4219 switch (size) {
4220 case 0:
02da0b2d 4221 gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
02acedf9
PM
4222 break;
4223 case 1:
02da0b2d 4224 gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
02acedf9
PM
4225 break;
4226 default:
4227 abort();
4228 }
4229 }
4230 tcg_temp_free_i32(tmp);
4231 tcg_temp_free_i32(tmp2);
4232 return 0;
19457615
FN
4233}
4234
d68a6f3a 4235static int gen_neon_zip(int rd, int rm, int size, int q)
19457615 4236{
39d5492a 4237 TCGv_i32 tmp, tmp2;
600b828c 4238 if (!q && size == 2) {
d68a6f3a
PM
4239 return 1;
4240 }
4241 tmp = tcg_const_i32(rd);
4242 tmp2 = tcg_const_i32(rm);
4243 if (q) {
4244 switch (size) {
4245 case 0:
02da0b2d 4246 gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4247 break;
4248 case 1:
02da0b2d 4249 gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4250 break;
4251 case 2:
02da0b2d 4252 gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
d68a6f3a
PM
4253 break;
4254 default:
4255 abort();
4256 }
4257 } else {
4258 switch (size) {
4259 case 0:
02da0b2d 4260 gen_helper_neon_zip8(cpu_env, tmp, tmp2);
d68a6f3a
PM
4261 break;
4262 case 1:
02da0b2d 4263 gen_helper_neon_zip16(cpu_env, tmp, tmp2);
d68a6f3a
PM
4264 break;
4265 default:
4266 abort();
4267 }
4268 }
4269 tcg_temp_free_i32(tmp);
4270 tcg_temp_free_i32(tmp2);
4271 return 0;
19457615
FN
4272}
4273
39d5492a 4274static void gen_neon_trn_u8(TCGv_i32 t0, TCGv_i32 t1)
19457615 4275{
39d5492a 4276 TCGv_i32 rd, tmp;
19457615 4277
7d1b0095
PM
4278 rd = tcg_temp_new_i32();
4279 tmp = tcg_temp_new_i32();
19457615
FN
4280
4281 tcg_gen_shli_i32(rd, t0, 8);
4282 tcg_gen_andi_i32(rd, rd, 0xff00ff00);
4283 tcg_gen_andi_i32(tmp, t1, 0x00ff00ff);
4284 tcg_gen_or_i32(rd, rd, tmp);
4285
4286 tcg_gen_shri_i32(t1, t1, 8);
4287 tcg_gen_andi_i32(t1, t1, 0x00ff00ff);
4288 tcg_gen_andi_i32(tmp, t0, 0xff00ff00);
4289 tcg_gen_or_i32(t1, t1, tmp);
4290 tcg_gen_mov_i32(t0, rd);
4291
7d1b0095
PM
4292 tcg_temp_free_i32(tmp);
4293 tcg_temp_free_i32(rd);
19457615
FN
4294}
4295
39d5492a 4296static void gen_neon_trn_u16(TCGv_i32 t0, TCGv_i32 t1)
19457615 4297{
39d5492a 4298 TCGv_i32 rd, tmp;
19457615 4299
7d1b0095
PM
4300 rd = tcg_temp_new_i32();
4301 tmp = tcg_temp_new_i32();
19457615
FN
4302
4303 tcg_gen_shli_i32(rd, t0, 16);
4304 tcg_gen_andi_i32(tmp, t1, 0xffff);
4305 tcg_gen_or_i32(rd, rd, tmp);
4306 tcg_gen_shri_i32(t1, t1, 16);
4307 tcg_gen_andi_i32(tmp, t0, 0xffff0000);
4308 tcg_gen_or_i32(t1, t1, tmp);
4309 tcg_gen_mov_i32(t0, rd);
4310
7d1b0095
PM
4311 tcg_temp_free_i32(tmp);
4312 tcg_temp_free_i32(rd);
19457615
FN
4313}
4314
4315
9ee6e8bb
PB
4316static struct {
4317 int nregs;
4318 int interleave;
4319 int spacing;
4320} neon_ls_element_type[11] = {
4321 {4, 4, 1},
4322 {4, 4, 2},
4323 {4, 1, 1},
4324 {4, 2, 1},
4325 {3, 3, 1},
4326 {3, 3, 2},
4327 {3, 1, 1},
4328 {1, 1, 1},
4329 {2, 2, 1},
4330 {2, 2, 2},
4331 {2, 1, 1}
4332};
4333
4334/* Translate a NEON load/store element instruction. Return nonzero if the
4335 instruction is invalid. */
7dcc1f89 4336static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
4337{
4338 int rd, rn, rm;
4339 int op;
4340 int nregs;
4341 int interleave;
84496233 4342 int spacing;
9ee6e8bb
PB
4343 int stride;
4344 int size;
4345 int reg;
4346 int pass;
4347 int load;
4348 int shift;
9ee6e8bb 4349 int n;
39d5492a
PM
4350 TCGv_i32 addr;
4351 TCGv_i32 tmp;
4352 TCGv_i32 tmp2;
84496233 4353 TCGv_i64 tmp64;
9ee6e8bb 4354
2c7ffc41
PM
4355 /* FIXME: this access check should not take precedence over UNDEF
4356 * for invalid encodings; we will generate incorrect syndrome information
4357 * for attempts to execute invalid vfp/neon encodings with FP disabled.
4358 */
4359 if (!s->cpacr_fpen) {
4360 gen_exception_insn(s, 4, EXCP_UDEF,
4361 syn_fp_access_trap(1, 0xe, s->thumb));
4362 return 0;
4363 }
4364
5df8bac1 4365 if (!s->vfp_enabled)
9ee6e8bb
PB
4366 return 1;
4367 VFP_DREG_D(rd, insn);
4368 rn = (insn >> 16) & 0xf;
4369 rm = insn & 0xf;
4370 load = (insn & (1 << 21)) != 0;
4371 if ((insn & (1 << 23)) == 0) {
4372 /* Load store all elements. */
4373 op = (insn >> 8) & 0xf;
4374 size = (insn >> 6) & 3;
84496233 4375 if (op > 10)
9ee6e8bb 4376 return 1;
f2dd89d0
PM
4377 /* Catch UNDEF cases for bad values of align field */
4378 switch (op & 0xc) {
4379 case 4:
4380 if (((insn >> 5) & 1) == 1) {
4381 return 1;
4382 }
4383 break;
4384 case 8:
4385 if (((insn >> 4) & 3) == 3) {
4386 return 1;
4387 }
4388 break;
4389 default:
4390 break;
4391 }
9ee6e8bb
PB
4392 nregs = neon_ls_element_type[op].nregs;
4393 interleave = neon_ls_element_type[op].interleave;
84496233
JR
4394 spacing = neon_ls_element_type[op].spacing;
4395 if (size == 3 && (interleave | spacing) != 1)
4396 return 1;
e318a60b 4397 addr = tcg_temp_new_i32();
dcc65026 4398 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4399 stride = (1 << size) * interleave;
4400 for (reg = 0; reg < nregs; reg++) {
4401 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
dcc65026
AJ
4402 load_reg_var(s, addr, rn);
4403 tcg_gen_addi_i32(addr, addr, (1 << size) * reg);
9ee6e8bb 4404 } else if (interleave == 2 && nregs == 4 && reg == 2) {
dcc65026
AJ
4405 load_reg_var(s, addr, rn);
4406 tcg_gen_addi_i32(addr, addr, 1 << size);
9ee6e8bb 4407 }
84496233 4408 if (size == 3) {
8ed1237d 4409 tmp64 = tcg_temp_new_i64();
84496233 4410 if (load) {
6ce2faf4 4411 gen_aa32_ld64(tmp64, addr, get_mem_index(s));
84496233 4412 neon_store_reg64(tmp64, rd);
84496233 4413 } else {
84496233 4414 neon_load_reg64(tmp64, rd);
6ce2faf4 4415 gen_aa32_st64(tmp64, addr, get_mem_index(s));
84496233 4416 }
8ed1237d 4417 tcg_temp_free_i64(tmp64);
84496233
JR
4418 tcg_gen_addi_i32(addr, addr, stride);
4419 } else {
4420 for (pass = 0; pass < 2; pass++) {
4421 if (size == 2) {
4422 if (load) {
58ab8e96 4423 tmp = tcg_temp_new_i32();
6ce2faf4 4424 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
84496233
JR
4425 neon_store_reg(rd, pass, tmp);
4426 } else {
4427 tmp = neon_load_reg(rd, pass);
6ce2faf4 4428 gen_aa32_st32(tmp, addr, get_mem_index(s));
58ab8e96 4429 tcg_temp_free_i32(tmp);
84496233 4430 }
1b2b1e54 4431 tcg_gen_addi_i32(addr, addr, stride);
84496233
JR
4432 } else if (size == 1) {
4433 if (load) {
58ab8e96 4434 tmp = tcg_temp_new_i32();
6ce2faf4 4435 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
84496233 4436 tcg_gen_addi_i32(addr, addr, stride);
58ab8e96 4437 tmp2 = tcg_temp_new_i32();
6ce2faf4 4438 gen_aa32_ld16u(tmp2, addr, get_mem_index(s));
84496233 4439 tcg_gen_addi_i32(addr, addr, stride);
41ba8341
PB
4440 tcg_gen_shli_i32(tmp2, tmp2, 16);
4441 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 4442 tcg_temp_free_i32(tmp2);
84496233
JR
4443 neon_store_reg(rd, pass, tmp);
4444 } else {
4445 tmp = neon_load_reg(rd, pass);
7d1b0095 4446 tmp2 = tcg_temp_new_i32();
84496233 4447 tcg_gen_shri_i32(tmp2, tmp, 16);
6ce2faf4 4448 gen_aa32_st16(tmp, addr, get_mem_index(s));
58ab8e96 4449 tcg_temp_free_i32(tmp);
84496233 4450 tcg_gen_addi_i32(addr, addr, stride);
6ce2faf4 4451 gen_aa32_st16(tmp2, addr, get_mem_index(s));
58ab8e96 4452 tcg_temp_free_i32(tmp2);
1b2b1e54 4453 tcg_gen_addi_i32(addr, addr, stride);
9ee6e8bb 4454 }
84496233
JR
4455 } else /* size == 0 */ {
4456 if (load) {
39d5492a 4457 TCGV_UNUSED_I32(tmp2);
84496233 4458 for (n = 0; n < 4; n++) {
58ab8e96 4459 tmp = tcg_temp_new_i32();
6ce2faf4 4460 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
84496233
JR
4461 tcg_gen_addi_i32(addr, addr, stride);
4462 if (n == 0) {
4463 tmp2 = tmp;
4464 } else {
41ba8341
PB
4465 tcg_gen_shli_i32(tmp, tmp, n * 8);
4466 tcg_gen_or_i32(tmp2, tmp2, tmp);
7d1b0095 4467 tcg_temp_free_i32(tmp);
84496233 4468 }
9ee6e8bb 4469 }
84496233
JR
4470 neon_store_reg(rd, pass, tmp2);
4471 } else {
4472 tmp2 = neon_load_reg(rd, pass);
4473 for (n = 0; n < 4; n++) {
7d1b0095 4474 tmp = tcg_temp_new_i32();
84496233
JR
4475 if (n == 0) {
4476 tcg_gen_mov_i32(tmp, tmp2);
4477 } else {
4478 tcg_gen_shri_i32(tmp, tmp2, n * 8);
4479 }
6ce2faf4 4480 gen_aa32_st8(tmp, addr, get_mem_index(s));
58ab8e96 4481 tcg_temp_free_i32(tmp);
84496233
JR
4482 tcg_gen_addi_i32(addr, addr, stride);
4483 }
7d1b0095 4484 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
4485 }
4486 }
4487 }
4488 }
84496233 4489 rd += spacing;
9ee6e8bb 4490 }
e318a60b 4491 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4492 stride = nregs * 8;
4493 } else {
4494 size = (insn >> 10) & 3;
4495 if (size == 3) {
4496 /* Load single element to all lanes. */
8e18cde3
PM
4497 int a = (insn >> 4) & 1;
4498 if (!load) {
9ee6e8bb 4499 return 1;
8e18cde3 4500 }
9ee6e8bb
PB
4501 size = (insn >> 6) & 3;
4502 nregs = ((insn >> 8) & 3) + 1;
8e18cde3
PM
4503
4504 if (size == 3) {
4505 if (nregs != 4 || a == 0) {
9ee6e8bb 4506 return 1;
99c475ab 4507 }
8e18cde3
PM
4508 /* For VLD4 size==3 a == 1 means 32 bits at 16 byte alignment */
4509 size = 2;
4510 }
4511 if (nregs == 1 && a == 1 && size == 0) {
4512 return 1;
4513 }
4514 if (nregs == 3 && a == 1) {
4515 return 1;
4516 }
e318a60b 4517 addr = tcg_temp_new_i32();
8e18cde3
PM
4518 load_reg_var(s, addr, rn);
4519 if (nregs == 1) {
4520 /* VLD1 to all lanes: bit 5 indicates how many Dregs to write */
4521 tmp = gen_load_and_replicate(s, addr, size);
4522 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4523 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4524 if (insn & (1 << 5)) {
4525 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 0));
4526 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd + 1, 1));
4527 }
4528 tcg_temp_free_i32(tmp);
4529 } else {
4530 /* VLD2/3/4 to all lanes: bit 5 indicates register stride */
4531 stride = (insn & (1 << 5)) ? 2 : 1;
4532 for (reg = 0; reg < nregs; reg++) {
4533 tmp = gen_load_and_replicate(s, addr, size);
4534 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 0));
4535 tcg_gen_st_i32(tmp, cpu_env, neon_reg_offset(rd, 1));
4536 tcg_temp_free_i32(tmp);
4537 tcg_gen_addi_i32(addr, addr, 1 << size);
4538 rd += stride;
4539 }
9ee6e8bb 4540 }
e318a60b 4541 tcg_temp_free_i32(addr);
9ee6e8bb
PB
4542 stride = (1 << size) * nregs;
4543 } else {
4544 /* Single element. */
93262b16 4545 int idx = (insn >> 4) & 0xf;
9ee6e8bb
PB
4546 pass = (insn >> 7) & 1;
4547 switch (size) {
4548 case 0:
4549 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
4550 stride = 1;
4551 break;
4552 case 1:
4553 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
4554 stride = (insn & (1 << 5)) ? 2 : 1;
4555 break;
4556 case 2:
4557 shift = 0;
9ee6e8bb
PB
4558 stride = (insn & (1 << 6)) ? 2 : 1;
4559 break;
4560 default:
4561 abort();
4562 }
4563 nregs = ((insn >> 8) & 3) + 1;
93262b16
PM
4564 /* Catch the UNDEF cases. This is unavoidably a bit messy. */
4565 switch (nregs) {
4566 case 1:
4567 if (((idx & (1 << size)) != 0) ||
4568 (size == 2 && ((idx & 3) == 1 || (idx & 3) == 2))) {
4569 return 1;
4570 }
4571 break;
4572 case 3:
4573 if ((idx & 1) != 0) {
4574 return 1;
4575 }
4576 /* fall through */
4577 case 2:
4578 if (size == 2 && (idx & 2) != 0) {
4579 return 1;
4580 }
4581 break;
4582 case 4:
4583 if ((size == 2) && ((idx & 3) == 3)) {
4584 return 1;
4585 }
4586 break;
4587 default:
4588 abort();
4589 }
4590 if ((rd + stride * (nregs - 1)) > 31) {
4591 /* Attempts to write off the end of the register file
4592 * are UNPREDICTABLE; we choose to UNDEF because otherwise
4593 * the neon_load_reg() would write off the end of the array.
4594 */
4595 return 1;
4596 }
e318a60b 4597 addr = tcg_temp_new_i32();
dcc65026 4598 load_reg_var(s, addr, rn);
9ee6e8bb
PB
4599 for (reg = 0; reg < nregs; reg++) {
4600 if (load) {
58ab8e96 4601 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
4602 switch (size) {
4603 case 0:
6ce2faf4 4604 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4605 break;
4606 case 1:
6ce2faf4 4607 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4608 break;
4609 case 2:
6ce2faf4 4610 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 4611 break;
a50f5b91
PB
4612 default: /* Avoid compiler warnings. */
4613 abort();
9ee6e8bb
PB
4614 }
4615 if (size != 2) {
8f8e3aa4 4616 tmp2 = neon_load_reg(rd, pass);
d593c48e
AJ
4617 tcg_gen_deposit_i32(tmp, tmp2, tmp,
4618 shift, size ? 16 : 8);
7d1b0095 4619 tcg_temp_free_i32(tmp2);
9ee6e8bb 4620 }
8f8e3aa4 4621 neon_store_reg(rd, pass, tmp);
9ee6e8bb 4622 } else { /* Store */
8f8e3aa4
PB
4623 tmp = neon_load_reg(rd, pass);
4624 if (shift)
4625 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
4626 switch (size) {
4627 case 0:
6ce2faf4 4628 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4629 break;
4630 case 1:
6ce2faf4 4631 gen_aa32_st16(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
4632 break;
4633 case 2:
6ce2faf4 4634 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 4635 break;
99c475ab 4636 }
58ab8e96 4637 tcg_temp_free_i32(tmp);
99c475ab 4638 }
9ee6e8bb 4639 rd += stride;
1b2b1e54 4640 tcg_gen_addi_i32(addr, addr, 1 << size);
99c475ab 4641 }
e318a60b 4642 tcg_temp_free_i32(addr);
9ee6e8bb 4643 stride = nregs * (1 << size);
99c475ab 4644 }
9ee6e8bb
PB
4645 }
4646 if (rm != 15) {
39d5492a 4647 TCGv_i32 base;
b26eefb6
PB
4648
4649 base = load_reg(s, rn);
9ee6e8bb 4650 if (rm == 13) {
b26eefb6 4651 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 4652 } else {
39d5492a 4653 TCGv_i32 index;
b26eefb6
PB
4654 index = load_reg(s, rm);
4655 tcg_gen_add_i32(base, base, index);
7d1b0095 4656 tcg_temp_free_i32(index);
9ee6e8bb 4657 }
b26eefb6 4658 store_reg(s, rn, base);
9ee6e8bb
PB
4659 }
4660 return 0;
4661}
3b46e624 4662
8f8e3aa4 4663/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
39d5492a 4664static void gen_neon_bsl(TCGv_i32 dest, TCGv_i32 t, TCGv_i32 f, TCGv_i32 c)
8f8e3aa4
PB
4665{
4666 tcg_gen_and_i32(t, t, c);
f669df27 4667 tcg_gen_andc_i32(f, f, c);
8f8e3aa4
PB
4668 tcg_gen_or_i32(dest, t, f);
4669}
4670
39d5492a 4671static inline void gen_neon_narrow(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4672{
4673 switch (size) {
4674 case 0: gen_helper_neon_narrow_u8(dest, src); break;
4675 case 1: gen_helper_neon_narrow_u16(dest, src); break;
4676 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
4677 default: abort();
4678 }
4679}
4680
39d5492a 4681static inline void gen_neon_narrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4682{
4683 switch (size) {
02da0b2d
PM
4684 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
4685 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
4686 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
ad69471c
PB
4687 default: abort();
4688 }
4689}
4690
39d5492a 4691static inline void gen_neon_narrow_satu(int size, TCGv_i32 dest, TCGv_i64 src)
ad69471c
PB
4692{
4693 switch (size) {
02da0b2d
PM
4694 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
4695 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
4696 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
ad69471c
PB
4697 default: abort();
4698 }
4699}
4700
39d5492a 4701static inline void gen_neon_unarrow_sats(int size, TCGv_i32 dest, TCGv_i64 src)
af1bbf30
JR
4702{
4703 switch (size) {
02da0b2d
PM
4704 case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
4705 case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
4706 case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
af1bbf30
JR
4707 default: abort();
4708 }
4709}
4710
39d5492a 4711static inline void gen_neon_shift_narrow(int size, TCGv_i32 var, TCGv_i32 shift,
ad69471c
PB
4712 int q, int u)
4713{
4714 if (q) {
4715 if (u) {
4716 switch (size) {
4717 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
4718 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
4719 default: abort();
4720 }
4721 } else {
4722 switch (size) {
4723 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
4724 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
4725 default: abort();
4726 }
4727 }
4728 } else {
4729 if (u) {
4730 switch (size) {
b408a9b0
CL
4731 case 1: gen_helper_neon_shl_u16(var, var, shift); break;
4732 case 2: gen_helper_neon_shl_u32(var, var, shift); break;
ad69471c
PB
4733 default: abort();
4734 }
4735 } else {
4736 switch (size) {
4737 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
4738 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
4739 default: abort();
4740 }
4741 }
4742 }
4743}
4744
39d5492a 4745static inline void gen_neon_widen(TCGv_i64 dest, TCGv_i32 src, int size, int u)
ad69471c
PB
4746{
4747 if (u) {
4748 switch (size) {
4749 case 0: gen_helper_neon_widen_u8(dest, src); break;
4750 case 1: gen_helper_neon_widen_u16(dest, src); break;
4751 case 2: tcg_gen_extu_i32_i64(dest, src); break;
4752 default: abort();
4753 }
4754 } else {
4755 switch (size) {
4756 case 0: gen_helper_neon_widen_s8(dest, src); break;
4757 case 1: gen_helper_neon_widen_s16(dest, src); break;
4758 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4759 default: abort();
4760 }
4761 }
7d1b0095 4762 tcg_temp_free_i32(src);
ad69471c
PB
4763}
4764
4765static inline void gen_neon_addl(int size)
4766{
4767 switch (size) {
4768 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4769 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4770 case 2: tcg_gen_add_i64(CPU_V001); break;
4771 default: abort();
4772 }
4773}
4774
4775static inline void gen_neon_subl(int size)
4776{
4777 switch (size) {
4778 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4779 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4780 case 2: tcg_gen_sub_i64(CPU_V001); break;
4781 default: abort();
4782 }
4783}
4784
a7812ae4 4785static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4786{
4787 switch (size) {
4788 case 0: gen_helper_neon_negl_u16(var, var); break;
4789 case 1: gen_helper_neon_negl_u32(var, var); break;
ee6fa559
PM
4790 case 2:
4791 tcg_gen_neg_i64(var, var);
4792 break;
ad69471c
PB
4793 default: abort();
4794 }
4795}
4796
a7812ae4 4797static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4798{
4799 switch (size) {
02da0b2d
PM
4800 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4801 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
ad69471c
PB
4802 default: abort();
4803 }
4804}
4805
39d5492a
PM
4806static inline void gen_neon_mull(TCGv_i64 dest, TCGv_i32 a, TCGv_i32 b,
4807 int size, int u)
ad69471c 4808{
a7812ae4 4809 TCGv_i64 tmp;
ad69471c
PB
4810
4811 switch ((size << 1) | u) {
4812 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4813 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4814 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4815 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4816 case 4:
4817 tmp = gen_muls_i64_i32(a, b);
4818 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4819 tcg_temp_free_i64(tmp);
ad69471c
PB
4820 break;
4821 case 5:
4822 tmp = gen_mulu_i64_i32(a, b);
4823 tcg_gen_mov_i64(dest, tmp);
7d2aabe2 4824 tcg_temp_free_i64(tmp);
ad69471c
PB
4825 break;
4826 default: abort();
4827 }
c6067f04
CL
4828
4829 /* gen_helper_neon_mull_[su]{8|16} do not free their parameters.
4830 Don't forget to clean them now. */
4831 if (size < 2) {
7d1b0095
PM
4832 tcg_temp_free_i32(a);
4833 tcg_temp_free_i32(b);
c6067f04 4834 }
ad69471c
PB
4835}
4836
39d5492a
PM
4837static void gen_neon_narrow_op(int op, int u, int size,
4838 TCGv_i32 dest, TCGv_i64 src)
c33171c7
PM
4839{
4840 if (op) {
4841 if (u) {
4842 gen_neon_unarrow_sats(size, dest, src);
4843 } else {
4844 gen_neon_narrow(size, dest, src);
4845 }
4846 } else {
4847 if (u) {
4848 gen_neon_narrow_satu(size, dest, src);
4849 } else {
4850 gen_neon_narrow_sats(size, dest, src);
4851 }
4852 }
4853}
4854
62698be3
PM
4855/* Symbolic constants for op fields for Neon 3-register same-length.
4856 * The values correspond to bits [11:8,4]; see the ARM ARM DDI0406B
4857 * table A7-9.
4858 */
4859#define NEON_3R_VHADD 0
4860#define NEON_3R_VQADD 1
4861#define NEON_3R_VRHADD 2
4862#define NEON_3R_LOGIC 3 /* VAND,VBIC,VORR,VMOV,VORN,VEOR,VBIF,VBIT,VBSL */
4863#define NEON_3R_VHSUB 4
4864#define NEON_3R_VQSUB 5
4865#define NEON_3R_VCGT 6
4866#define NEON_3R_VCGE 7
4867#define NEON_3R_VSHL 8
4868#define NEON_3R_VQSHL 9
4869#define NEON_3R_VRSHL 10
4870#define NEON_3R_VQRSHL 11
4871#define NEON_3R_VMAX 12
4872#define NEON_3R_VMIN 13
4873#define NEON_3R_VABD 14
4874#define NEON_3R_VABA 15
4875#define NEON_3R_VADD_VSUB 16
4876#define NEON_3R_VTST_VCEQ 17
4877#define NEON_3R_VML 18 /* VMLA, VMLAL, VMLS, VMLSL */
4878#define NEON_3R_VMUL 19
4879#define NEON_3R_VPMAX 20
4880#define NEON_3R_VPMIN 21
4881#define NEON_3R_VQDMULH_VQRDMULH 22
4882#define NEON_3R_VPADD 23
f1ecb913 4883#define NEON_3R_SHA 24 /* SHA1C,SHA1P,SHA1M,SHA1SU0,SHA256H{2},SHA256SU1 */
da97f52c 4884#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
62698be3
PM
4885#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
4886#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
4887#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
4888#define NEON_3R_FLOAT_ACMP 29 /* float VACGE, VACGT, VACLE, VACLT */
4889#define NEON_3R_FLOAT_MINMAX 30 /* float VMIN, VMAX */
505935fc 4890#define NEON_3R_FLOAT_MISC 31 /* float VRECPS, VRSQRTS, VMAXNM/MINNM */
62698be3
PM
4891
4892static const uint8_t neon_3r_sizes[] = {
4893 [NEON_3R_VHADD] = 0x7,
4894 [NEON_3R_VQADD] = 0xf,
4895 [NEON_3R_VRHADD] = 0x7,
4896 [NEON_3R_LOGIC] = 0xf, /* size field encodes op type */
4897 [NEON_3R_VHSUB] = 0x7,
4898 [NEON_3R_VQSUB] = 0xf,
4899 [NEON_3R_VCGT] = 0x7,
4900 [NEON_3R_VCGE] = 0x7,
4901 [NEON_3R_VSHL] = 0xf,
4902 [NEON_3R_VQSHL] = 0xf,
4903 [NEON_3R_VRSHL] = 0xf,
4904 [NEON_3R_VQRSHL] = 0xf,
4905 [NEON_3R_VMAX] = 0x7,
4906 [NEON_3R_VMIN] = 0x7,
4907 [NEON_3R_VABD] = 0x7,
4908 [NEON_3R_VABA] = 0x7,
4909 [NEON_3R_VADD_VSUB] = 0xf,
4910 [NEON_3R_VTST_VCEQ] = 0x7,
4911 [NEON_3R_VML] = 0x7,
4912 [NEON_3R_VMUL] = 0x7,
4913 [NEON_3R_VPMAX] = 0x7,
4914 [NEON_3R_VPMIN] = 0x7,
4915 [NEON_3R_VQDMULH_VQRDMULH] = 0x6,
4916 [NEON_3R_VPADD] = 0x7,
f1ecb913 4917 [NEON_3R_SHA] = 0xf, /* size field encodes op type */
da97f52c 4918 [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4919 [NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
4920 [NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
4921 [NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
4922 [NEON_3R_FLOAT_ACMP] = 0x5, /* size bit 1 encodes op */
4923 [NEON_3R_FLOAT_MINMAX] = 0x5, /* size bit 1 encodes op */
505935fc 4924 [NEON_3R_FLOAT_MISC] = 0x5, /* size bit 1 encodes op */
62698be3
PM
4925};
4926
600b828c
PM
4927/* Symbolic constants for op fields for Neon 2-register miscellaneous.
4928 * The values correspond to bits [17:16,10:7]; see the ARM ARM DDI0406B
4929 * table A7-13.
4930 */
4931#define NEON_2RM_VREV64 0
4932#define NEON_2RM_VREV32 1
4933#define NEON_2RM_VREV16 2
4934#define NEON_2RM_VPADDL 4
4935#define NEON_2RM_VPADDL_U 5
9d935509
AB
4936#define NEON_2RM_AESE 6 /* Includes AESD */
4937#define NEON_2RM_AESMC 7 /* Includes AESIMC */
600b828c
PM
4938#define NEON_2RM_VCLS 8
4939#define NEON_2RM_VCLZ 9
4940#define NEON_2RM_VCNT 10
4941#define NEON_2RM_VMVN 11
4942#define NEON_2RM_VPADAL 12
4943#define NEON_2RM_VPADAL_U 13
4944#define NEON_2RM_VQABS 14
4945#define NEON_2RM_VQNEG 15
4946#define NEON_2RM_VCGT0 16
4947#define NEON_2RM_VCGE0 17
4948#define NEON_2RM_VCEQ0 18
4949#define NEON_2RM_VCLE0 19
4950#define NEON_2RM_VCLT0 20
f1ecb913 4951#define NEON_2RM_SHA1H 21
600b828c
PM
4952#define NEON_2RM_VABS 22
4953#define NEON_2RM_VNEG 23
4954#define NEON_2RM_VCGT0_F 24
4955#define NEON_2RM_VCGE0_F 25
4956#define NEON_2RM_VCEQ0_F 26
4957#define NEON_2RM_VCLE0_F 27
4958#define NEON_2RM_VCLT0_F 28
4959#define NEON_2RM_VABS_F 30
4960#define NEON_2RM_VNEG_F 31
4961#define NEON_2RM_VSWP 32
4962#define NEON_2RM_VTRN 33
4963#define NEON_2RM_VUZP 34
4964#define NEON_2RM_VZIP 35
4965#define NEON_2RM_VMOVN 36 /* Includes VQMOVN, VQMOVUN */
4966#define NEON_2RM_VQMOVN 37 /* Includes VQMOVUN */
4967#define NEON_2RM_VSHLL 38
f1ecb913 4968#define NEON_2RM_SHA1SU1 39 /* Includes SHA256SU0 */
34f7b0a2 4969#define NEON_2RM_VRINTN 40
2ce70625 4970#define NEON_2RM_VRINTX 41
34f7b0a2
WN
4971#define NEON_2RM_VRINTA 42
4972#define NEON_2RM_VRINTZ 43
600b828c 4973#define NEON_2RM_VCVT_F16_F32 44
34f7b0a2 4974#define NEON_2RM_VRINTM 45
600b828c 4975#define NEON_2RM_VCVT_F32_F16 46
34f7b0a2 4976#define NEON_2RM_VRINTP 47
901ad525
WN
4977#define NEON_2RM_VCVTAU 48
4978#define NEON_2RM_VCVTAS 49
4979#define NEON_2RM_VCVTNU 50
4980#define NEON_2RM_VCVTNS 51
4981#define NEON_2RM_VCVTPU 52
4982#define NEON_2RM_VCVTPS 53
4983#define NEON_2RM_VCVTMU 54
4984#define NEON_2RM_VCVTMS 55
600b828c
PM
4985#define NEON_2RM_VRECPE 56
4986#define NEON_2RM_VRSQRTE 57
4987#define NEON_2RM_VRECPE_F 58
4988#define NEON_2RM_VRSQRTE_F 59
4989#define NEON_2RM_VCVT_FS 60
4990#define NEON_2RM_VCVT_FU 61
4991#define NEON_2RM_VCVT_SF 62
4992#define NEON_2RM_VCVT_UF 63
4993
4994static int neon_2rm_is_float_op(int op)
4995{
4996 /* Return true if this neon 2reg-misc op is float-to-float */
4997 return (op == NEON_2RM_VABS_F || op == NEON_2RM_VNEG_F ||
34f7b0a2 4998 (op >= NEON_2RM_VRINTN && op <= NEON_2RM_VRINTZ) ||
901ad525
WN
4999 op == NEON_2RM_VRINTM ||
5000 (op >= NEON_2RM_VRINTP && op <= NEON_2RM_VCVTMS) ||
34f7b0a2 5001 op >= NEON_2RM_VRECPE_F);
600b828c
PM
5002}
5003
5004/* Each entry in this array has bit n set if the insn allows
5005 * size value n (otherwise it will UNDEF). Since unallocated
5006 * op values will have no bits set they always UNDEF.
5007 */
5008static const uint8_t neon_2rm_sizes[] = {
5009 [NEON_2RM_VREV64] = 0x7,
5010 [NEON_2RM_VREV32] = 0x3,
5011 [NEON_2RM_VREV16] = 0x1,
5012 [NEON_2RM_VPADDL] = 0x7,
5013 [NEON_2RM_VPADDL_U] = 0x7,
9d935509
AB
5014 [NEON_2RM_AESE] = 0x1,
5015 [NEON_2RM_AESMC] = 0x1,
600b828c
PM
5016 [NEON_2RM_VCLS] = 0x7,
5017 [NEON_2RM_VCLZ] = 0x7,
5018 [NEON_2RM_VCNT] = 0x1,
5019 [NEON_2RM_VMVN] = 0x1,
5020 [NEON_2RM_VPADAL] = 0x7,
5021 [NEON_2RM_VPADAL_U] = 0x7,
5022 [NEON_2RM_VQABS] = 0x7,
5023 [NEON_2RM_VQNEG] = 0x7,
5024 [NEON_2RM_VCGT0] = 0x7,
5025 [NEON_2RM_VCGE0] = 0x7,
5026 [NEON_2RM_VCEQ0] = 0x7,
5027 [NEON_2RM_VCLE0] = 0x7,
5028 [NEON_2RM_VCLT0] = 0x7,
f1ecb913 5029 [NEON_2RM_SHA1H] = 0x4,
600b828c
PM
5030 [NEON_2RM_VABS] = 0x7,
5031 [NEON_2RM_VNEG] = 0x7,
5032 [NEON_2RM_VCGT0_F] = 0x4,
5033 [NEON_2RM_VCGE0_F] = 0x4,
5034 [NEON_2RM_VCEQ0_F] = 0x4,
5035 [NEON_2RM_VCLE0_F] = 0x4,
5036 [NEON_2RM_VCLT0_F] = 0x4,
5037 [NEON_2RM_VABS_F] = 0x4,
5038 [NEON_2RM_VNEG_F] = 0x4,
5039 [NEON_2RM_VSWP] = 0x1,
5040 [NEON_2RM_VTRN] = 0x7,
5041 [NEON_2RM_VUZP] = 0x7,
5042 [NEON_2RM_VZIP] = 0x7,
5043 [NEON_2RM_VMOVN] = 0x7,
5044 [NEON_2RM_VQMOVN] = 0x7,
5045 [NEON_2RM_VSHLL] = 0x7,
f1ecb913 5046 [NEON_2RM_SHA1SU1] = 0x4,
34f7b0a2 5047 [NEON_2RM_VRINTN] = 0x4,
2ce70625 5048 [NEON_2RM_VRINTX] = 0x4,
34f7b0a2
WN
5049 [NEON_2RM_VRINTA] = 0x4,
5050 [NEON_2RM_VRINTZ] = 0x4,
600b828c 5051 [NEON_2RM_VCVT_F16_F32] = 0x2,
34f7b0a2 5052 [NEON_2RM_VRINTM] = 0x4,
600b828c 5053 [NEON_2RM_VCVT_F32_F16] = 0x2,
34f7b0a2 5054 [NEON_2RM_VRINTP] = 0x4,
901ad525
WN
5055 [NEON_2RM_VCVTAU] = 0x4,
5056 [NEON_2RM_VCVTAS] = 0x4,
5057 [NEON_2RM_VCVTNU] = 0x4,
5058 [NEON_2RM_VCVTNS] = 0x4,
5059 [NEON_2RM_VCVTPU] = 0x4,
5060 [NEON_2RM_VCVTPS] = 0x4,
5061 [NEON_2RM_VCVTMU] = 0x4,
5062 [NEON_2RM_VCVTMS] = 0x4,
600b828c
PM
5063 [NEON_2RM_VRECPE] = 0x4,
5064 [NEON_2RM_VRSQRTE] = 0x4,
5065 [NEON_2RM_VRECPE_F] = 0x4,
5066 [NEON_2RM_VRSQRTE_F] = 0x4,
5067 [NEON_2RM_VCVT_FS] = 0x4,
5068 [NEON_2RM_VCVT_FU] = 0x4,
5069 [NEON_2RM_VCVT_SF] = 0x4,
5070 [NEON_2RM_VCVT_UF] = 0x4,
5071};
5072
9ee6e8bb
PB
5073/* Translate a NEON data processing instruction. Return nonzero if the
5074 instruction is invalid.
ad69471c
PB
5075 We process data in a mixture of 32-bit and 64-bit chunks.
5076 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 5077
7dcc1f89 5078static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
9ee6e8bb
PB
5079{
5080 int op;
5081 int q;
5082 int rd, rn, rm;
5083 int size;
5084 int shift;
5085 int pass;
5086 int count;
5087 int pairwise;
5088 int u;
ca9a32e4 5089 uint32_t imm, mask;
39d5492a 5090 TCGv_i32 tmp, tmp2, tmp3, tmp4, tmp5;
a7812ae4 5091 TCGv_i64 tmp64;
9ee6e8bb 5092
2c7ffc41
PM
5093 /* FIXME: this access check should not take precedence over UNDEF
5094 * for invalid encodings; we will generate incorrect syndrome information
5095 * for attempts to execute invalid vfp/neon encodings with FP disabled.
5096 */
5097 if (!s->cpacr_fpen) {
5098 gen_exception_insn(s, 4, EXCP_UDEF,
5099 syn_fp_access_trap(1, 0xe, s->thumb));
5100 return 0;
5101 }
5102
5df8bac1 5103 if (!s->vfp_enabled)
9ee6e8bb
PB
5104 return 1;
5105 q = (insn & (1 << 6)) != 0;
5106 u = (insn >> 24) & 1;
5107 VFP_DREG_D(rd, insn);
5108 VFP_DREG_N(rn, insn);
5109 VFP_DREG_M(rm, insn);
5110 size = (insn >> 20) & 3;
5111 if ((insn & (1 << 23)) == 0) {
5112 /* Three register same length. */
5113 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
62698be3
PM
5114 /* Catch invalid op and bad size combinations: UNDEF */
5115 if ((neon_3r_sizes[op] & (1 << size)) == 0) {
5116 return 1;
5117 }
25f84f79
PM
5118 /* All insns of this form UNDEF for either this condition or the
5119 * superset of cases "Q==1"; we catch the latter later.
5120 */
5121 if (q && ((rd | rn | rm) & 1)) {
5122 return 1;
5123 }
f1ecb913
AB
5124 /*
5125 * The SHA-1/SHA-256 3-register instructions require special treatment
5126 * here, as their size field is overloaded as an op type selector, and
5127 * they all consume their input in a single pass.
5128 */
5129 if (op == NEON_3R_SHA) {
5130 if (!q) {
5131 return 1;
5132 }
5133 if (!u) { /* SHA-1 */
d614a513 5134 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
5135 return 1;
5136 }
5137 tmp = tcg_const_i32(rd);
5138 tmp2 = tcg_const_i32(rn);
5139 tmp3 = tcg_const_i32(rm);
5140 tmp4 = tcg_const_i32(size);
5141 gen_helper_crypto_sha1_3reg(cpu_env, tmp, tmp2, tmp3, tmp4);
5142 tcg_temp_free_i32(tmp4);
5143 } else { /* SHA-256 */
d614a513 5144 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256) || size == 3) {
f1ecb913
AB
5145 return 1;
5146 }
5147 tmp = tcg_const_i32(rd);
5148 tmp2 = tcg_const_i32(rn);
5149 tmp3 = tcg_const_i32(rm);
5150 switch (size) {
5151 case 0:
5152 gen_helper_crypto_sha256h(cpu_env, tmp, tmp2, tmp3);
5153 break;
5154 case 1:
5155 gen_helper_crypto_sha256h2(cpu_env, tmp, tmp2, tmp3);
5156 break;
5157 case 2:
5158 gen_helper_crypto_sha256su1(cpu_env, tmp, tmp2, tmp3);
5159 break;
5160 }
5161 }
5162 tcg_temp_free_i32(tmp);
5163 tcg_temp_free_i32(tmp2);
5164 tcg_temp_free_i32(tmp3);
5165 return 0;
5166 }
62698be3
PM
5167 if (size == 3 && op != NEON_3R_LOGIC) {
5168 /* 64-bit element instructions. */
9ee6e8bb 5169 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
5170 neon_load_reg64(cpu_V0, rn + pass);
5171 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 5172 switch (op) {
62698be3 5173 case NEON_3R_VQADD:
9ee6e8bb 5174 if (u) {
02da0b2d
PM
5175 gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
5176 cpu_V0, cpu_V1);
2c0262af 5177 } else {
02da0b2d
PM
5178 gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
5179 cpu_V0, cpu_V1);
2c0262af 5180 }
9ee6e8bb 5181 break;
62698be3 5182 case NEON_3R_VQSUB:
9ee6e8bb 5183 if (u) {
02da0b2d
PM
5184 gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
5185 cpu_V0, cpu_V1);
ad69471c 5186 } else {
02da0b2d
PM
5187 gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
5188 cpu_V0, cpu_V1);
ad69471c
PB
5189 }
5190 break;
62698be3 5191 case NEON_3R_VSHL:
ad69471c
PB
5192 if (u) {
5193 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
5194 } else {
5195 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
5196 }
5197 break;
62698be3 5198 case NEON_3R_VQSHL:
ad69471c 5199 if (u) {
02da0b2d
PM
5200 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
5201 cpu_V1, cpu_V0);
ad69471c 5202 } else {
02da0b2d
PM
5203 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
5204 cpu_V1, cpu_V0);
ad69471c
PB
5205 }
5206 break;
62698be3 5207 case NEON_3R_VRSHL:
ad69471c
PB
5208 if (u) {
5209 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 5210 } else {
ad69471c
PB
5211 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
5212 }
5213 break;
62698be3 5214 case NEON_3R_VQRSHL:
ad69471c 5215 if (u) {
02da0b2d
PM
5216 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
5217 cpu_V1, cpu_V0);
ad69471c 5218 } else {
02da0b2d
PM
5219 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
5220 cpu_V1, cpu_V0);
1e8d4eec 5221 }
9ee6e8bb 5222 break;
62698be3 5223 case NEON_3R_VADD_VSUB:
9ee6e8bb 5224 if (u) {
ad69471c 5225 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 5226 } else {
ad69471c 5227 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
5228 }
5229 break;
5230 default:
5231 abort();
2c0262af 5232 }
ad69471c 5233 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 5234 }
9ee6e8bb 5235 return 0;
2c0262af 5236 }
25f84f79 5237 pairwise = 0;
9ee6e8bb 5238 switch (op) {
62698be3
PM
5239 case NEON_3R_VSHL:
5240 case NEON_3R_VQSHL:
5241 case NEON_3R_VRSHL:
5242 case NEON_3R_VQRSHL:
9ee6e8bb 5243 {
ad69471c
PB
5244 int rtmp;
5245 /* Shift instruction operands are reversed. */
5246 rtmp = rn;
9ee6e8bb 5247 rn = rm;
ad69471c 5248 rm = rtmp;
9ee6e8bb 5249 }
2c0262af 5250 break;
25f84f79
PM
5251 case NEON_3R_VPADD:
5252 if (u) {
5253 return 1;
5254 }
5255 /* Fall through */
62698be3
PM
5256 case NEON_3R_VPMAX:
5257 case NEON_3R_VPMIN:
9ee6e8bb 5258 pairwise = 1;
2c0262af 5259 break;
25f84f79
PM
5260 case NEON_3R_FLOAT_ARITH:
5261 pairwise = (u && size < 2); /* if VPADD (float) */
5262 break;
5263 case NEON_3R_FLOAT_MINMAX:
5264 pairwise = u; /* if VPMIN/VPMAX (float) */
5265 break;
5266 case NEON_3R_FLOAT_CMP:
5267 if (!u && size) {
5268 /* no encoding for U=0 C=1x */
5269 return 1;
5270 }
5271 break;
5272 case NEON_3R_FLOAT_ACMP:
5273 if (!u) {
5274 return 1;
5275 }
5276 break;
505935fc
WN
5277 case NEON_3R_FLOAT_MISC:
5278 /* VMAXNM/VMINNM in ARMv8 */
d614a513 5279 if (u && !arm_dc_feature(s, ARM_FEATURE_V8)) {
25f84f79
PM
5280 return 1;
5281 }
2c0262af 5282 break;
25f84f79
PM
5283 case NEON_3R_VMUL:
5284 if (u && (size != 0)) {
5285 /* UNDEF on invalid size for polynomial subcase */
5286 return 1;
5287 }
2c0262af 5288 break;
da97f52c 5289 case NEON_3R_VFM:
d614a513 5290 if (!arm_dc_feature(s, ARM_FEATURE_VFP4) || u) {
da97f52c
PM
5291 return 1;
5292 }
5293 break;
9ee6e8bb 5294 default:
2c0262af 5295 break;
9ee6e8bb 5296 }
dd8fbd78 5297
25f84f79
PM
5298 if (pairwise && q) {
5299 /* All the pairwise insns UNDEF if Q is set */
5300 return 1;
5301 }
5302
9ee6e8bb
PB
5303 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5304
5305 if (pairwise) {
5306 /* Pairwise. */
a5a14945
JR
5307 if (pass < 1) {
5308 tmp = neon_load_reg(rn, 0);
5309 tmp2 = neon_load_reg(rn, 1);
9ee6e8bb 5310 } else {
a5a14945
JR
5311 tmp = neon_load_reg(rm, 0);
5312 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb
PB
5313 }
5314 } else {
5315 /* Elementwise. */
dd8fbd78
FN
5316 tmp = neon_load_reg(rn, pass);
5317 tmp2 = neon_load_reg(rm, pass);
9ee6e8bb
PB
5318 }
5319 switch (op) {
62698be3 5320 case NEON_3R_VHADD:
9ee6e8bb
PB
5321 GEN_NEON_INTEGER_OP(hadd);
5322 break;
62698be3 5323 case NEON_3R_VQADD:
02da0b2d 5324 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 5325 break;
62698be3 5326 case NEON_3R_VRHADD:
9ee6e8bb 5327 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 5328 break;
62698be3 5329 case NEON_3R_LOGIC: /* Logic ops. */
9ee6e8bb
PB
5330 switch ((u << 2) | size) {
5331 case 0: /* VAND */
dd8fbd78 5332 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5333 break;
5334 case 1: /* BIC */
f669df27 5335 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5336 break;
5337 case 2: /* VORR */
dd8fbd78 5338 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5339 break;
5340 case 3: /* VORN */
f669df27 5341 tcg_gen_orc_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5342 break;
5343 case 4: /* VEOR */
dd8fbd78 5344 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb
PB
5345 break;
5346 case 5: /* VBSL */
dd8fbd78
FN
5347 tmp3 = neon_load_reg(rd, pass);
5348 gen_neon_bsl(tmp, tmp, tmp2, tmp3);
7d1b0095 5349 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5350 break;
5351 case 6: /* VBIT */
dd8fbd78
FN
5352 tmp3 = neon_load_reg(rd, pass);
5353 gen_neon_bsl(tmp, tmp, tmp3, tmp2);
7d1b0095 5354 tcg_temp_free_i32(tmp3);
9ee6e8bb
PB
5355 break;
5356 case 7: /* VBIF */
dd8fbd78
FN
5357 tmp3 = neon_load_reg(rd, pass);
5358 gen_neon_bsl(tmp, tmp3, tmp, tmp2);
7d1b0095 5359 tcg_temp_free_i32(tmp3);
9ee6e8bb 5360 break;
2c0262af
FB
5361 }
5362 break;
62698be3 5363 case NEON_3R_VHSUB:
9ee6e8bb
PB
5364 GEN_NEON_INTEGER_OP(hsub);
5365 break;
62698be3 5366 case NEON_3R_VQSUB:
02da0b2d 5367 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 5368 break;
62698be3 5369 case NEON_3R_VCGT:
9ee6e8bb
PB
5370 GEN_NEON_INTEGER_OP(cgt);
5371 break;
62698be3 5372 case NEON_3R_VCGE:
9ee6e8bb
PB
5373 GEN_NEON_INTEGER_OP(cge);
5374 break;
62698be3 5375 case NEON_3R_VSHL:
ad69471c 5376 GEN_NEON_INTEGER_OP(shl);
2c0262af 5377 break;
62698be3 5378 case NEON_3R_VQSHL:
02da0b2d 5379 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 5380 break;
62698be3 5381 case NEON_3R_VRSHL:
ad69471c 5382 GEN_NEON_INTEGER_OP(rshl);
2c0262af 5383 break;
62698be3 5384 case NEON_3R_VQRSHL:
02da0b2d 5385 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb 5386 break;
62698be3 5387 case NEON_3R_VMAX:
9ee6e8bb
PB
5388 GEN_NEON_INTEGER_OP(max);
5389 break;
62698be3 5390 case NEON_3R_VMIN:
9ee6e8bb
PB
5391 GEN_NEON_INTEGER_OP(min);
5392 break;
62698be3 5393 case NEON_3R_VABD:
9ee6e8bb
PB
5394 GEN_NEON_INTEGER_OP(abd);
5395 break;
62698be3 5396 case NEON_3R_VABA:
9ee6e8bb 5397 GEN_NEON_INTEGER_OP(abd);
7d1b0095 5398 tcg_temp_free_i32(tmp2);
dd8fbd78
FN
5399 tmp2 = neon_load_reg(rd, pass);
5400 gen_neon_add(size, tmp, tmp2);
9ee6e8bb 5401 break;
62698be3 5402 case NEON_3R_VADD_VSUB:
9ee6e8bb 5403 if (!u) { /* VADD */
62698be3 5404 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5405 } else { /* VSUB */
5406 switch (size) {
dd8fbd78
FN
5407 case 0: gen_helper_neon_sub_u8(tmp, tmp, tmp2); break;
5408 case 1: gen_helper_neon_sub_u16(tmp, tmp, tmp2); break;
5409 case 2: tcg_gen_sub_i32(tmp, tmp, tmp2); break;
62698be3 5410 default: abort();
9ee6e8bb
PB
5411 }
5412 }
5413 break;
62698be3 5414 case NEON_3R_VTST_VCEQ:
9ee6e8bb
PB
5415 if (!u) { /* VTST */
5416 switch (size) {
dd8fbd78
FN
5417 case 0: gen_helper_neon_tst_u8(tmp, tmp, tmp2); break;
5418 case 1: gen_helper_neon_tst_u16(tmp, tmp, tmp2); break;
5419 case 2: gen_helper_neon_tst_u32(tmp, tmp, tmp2); break;
62698be3 5420 default: abort();
9ee6e8bb
PB
5421 }
5422 } else { /* VCEQ */
5423 switch (size) {
dd8fbd78
FN
5424 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
5425 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
5426 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
62698be3 5427 default: abort();
9ee6e8bb
PB
5428 }
5429 }
5430 break;
62698be3 5431 case NEON_3R_VML: /* VMLA, VMLAL, VMLS,VMLSL */
9ee6e8bb 5432 switch (size) {
dd8fbd78
FN
5433 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5434 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5435 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5436 default: abort();
9ee6e8bb 5437 }
7d1b0095 5438 tcg_temp_free_i32(tmp2);
dd8fbd78 5439 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5440 if (u) { /* VMLS */
dd8fbd78 5441 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb 5442 } else { /* VMLA */
dd8fbd78 5443 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
5444 }
5445 break;
62698be3 5446 case NEON_3R_VMUL:
9ee6e8bb 5447 if (u) { /* polynomial */
dd8fbd78 5448 gen_helper_neon_mul_p8(tmp, tmp, tmp2);
9ee6e8bb
PB
5449 } else { /* Integer */
5450 switch (size) {
dd8fbd78
FN
5451 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
5452 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
5453 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
62698be3 5454 default: abort();
9ee6e8bb
PB
5455 }
5456 }
5457 break;
62698be3 5458 case NEON_3R_VPMAX:
9ee6e8bb
PB
5459 GEN_NEON_INTEGER_OP(pmax);
5460 break;
62698be3 5461 case NEON_3R_VPMIN:
9ee6e8bb
PB
5462 GEN_NEON_INTEGER_OP(pmin);
5463 break;
62698be3 5464 case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
9ee6e8bb
PB
5465 if (!u) { /* VQDMULH */
5466 switch (size) {
02da0b2d
PM
5467 case 1:
5468 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
5469 break;
5470 case 2:
5471 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
5472 break;
62698be3 5473 default: abort();
9ee6e8bb 5474 }
62698be3 5475 } else { /* VQRDMULH */
9ee6e8bb 5476 switch (size) {
02da0b2d
PM
5477 case 1:
5478 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
5479 break;
5480 case 2:
5481 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
5482 break;
62698be3 5483 default: abort();
9ee6e8bb
PB
5484 }
5485 }
5486 break;
62698be3 5487 case NEON_3R_VPADD:
9ee6e8bb 5488 switch (size) {
dd8fbd78
FN
5489 case 0: gen_helper_neon_padd_u8(tmp, tmp, tmp2); break;
5490 case 1: gen_helper_neon_padd_u16(tmp, tmp, tmp2); break;
5491 case 2: tcg_gen_add_i32(tmp, tmp, tmp2); break;
62698be3 5492 default: abort();
9ee6e8bb
PB
5493 }
5494 break;
62698be3 5495 case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
aa47cfdd
PM
5496 {
5497 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb
PB
5498 switch ((u << 2) | size) {
5499 case 0: /* VADD */
aa47cfdd
PM
5500 case 4: /* VPADD */
5501 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5502 break;
5503 case 2: /* VSUB */
aa47cfdd 5504 gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5505 break;
5506 case 6: /* VABD */
aa47cfdd 5507 gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
9ee6e8bb
PB
5508 break;
5509 default:
62698be3 5510 abort();
9ee6e8bb 5511 }
aa47cfdd 5512 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5513 break;
aa47cfdd 5514 }
62698be3 5515 case NEON_3R_FLOAT_MULTIPLY:
aa47cfdd
PM
5516 {
5517 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5518 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5519 if (!u) {
7d1b0095 5520 tcg_temp_free_i32(tmp2);
dd8fbd78 5521 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 5522 if (size == 0) {
aa47cfdd 5523 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
9ee6e8bb 5524 } else {
aa47cfdd 5525 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
9ee6e8bb
PB
5526 }
5527 }
aa47cfdd 5528 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5529 break;
aa47cfdd 5530 }
62698be3 5531 case NEON_3R_FLOAT_CMP:
aa47cfdd
PM
5532 {
5533 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
9ee6e8bb 5534 if (!u) {
aa47cfdd 5535 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
b5ff1b31 5536 } else {
aa47cfdd
PM
5537 if (size == 0) {
5538 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
5539 } else {
5540 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
5541 }
b5ff1b31 5542 }
aa47cfdd 5543 tcg_temp_free_ptr(fpstatus);
2c0262af 5544 break;
aa47cfdd 5545 }
62698be3 5546 case NEON_3R_FLOAT_ACMP:
aa47cfdd
PM
5547 {
5548 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5549 if (size == 0) {
5550 gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
5551 } else {
5552 gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
5553 }
5554 tcg_temp_free_ptr(fpstatus);
2c0262af 5555 break;
aa47cfdd 5556 }
62698be3 5557 case NEON_3R_FLOAT_MINMAX:
aa47cfdd
PM
5558 {
5559 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5560 if (size == 0) {
f71a2ae5 5561 gen_helper_vfp_maxs(tmp, tmp, tmp2, fpstatus);
aa47cfdd 5562 } else {
f71a2ae5 5563 gen_helper_vfp_mins(tmp, tmp, tmp2, fpstatus);
aa47cfdd
PM
5564 }
5565 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 5566 break;
aa47cfdd 5567 }
505935fc
WN
5568 case NEON_3R_FLOAT_MISC:
5569 if (u) {
5570 /* VMAXNM/VMINNM */
5571 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5572 if (size == 0) {
f71a2ae5 5573 gen_helper_vfp_maxnums(tmp, tmp, tmp2, fpstatus);
505935fc 5574 } else {
f71a2ae5 5575 gen_helper_vfp_minnums(tmp, tmp, tmp2, fpstatus);
505935fc
WN
5576 }
5577 tcg_temp_free_ptr(fpstatus);
5578 } else {
5579 if (size == 0) {
5580 gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
5581 } else {
5582 gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
5583 }
5584 }
2c0262af 5585 break;
da97f52c
PM
5586 case NEON_3R_VFM:
5587 {
5588 /* VFMA, VFMS: fused multiply-add */
5589 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
5590 TCGv_i32 tmp3 = neon_load_reg(rd, pass);
5591 if (size) {
5592 /* VFMS */
5593 gen_helper_vfp_negs(tmp, tmp);
5594 }
5595 gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
5596 tcg_temp_free_i32(tmp3);
5597 tcg_temp_free_ptr(fpstatus);
5598 break;
5599 }
9ee6e8bb
PB
5600 default:
5601 abort();
2c0262af 5602 }
7d1b0095 5603 tcg_temp_free_i32(tmp2);
dd8fbd78 5604
9ee6e8bb
PB
5605 /* Save the result. For elementwise operations we can put it
5606 straight into the destination register. For pairwise operations
5607 we have to be careful to avoid clobbering the source operands. */
5608 if (pairwise && rd == rm) {
dd8fbd78 5609 neon_store_scratch(pass, tmp);
9ee6e8bb 5610 } else {
dd8fbd78 5611 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5612 }
5613
5614 } /* for pass */
5615 if (pairwise && rd == rm) {
5616 for (pass = 0; pass < (q ? 4 : 2); pass++) {
dd8fbd78
FN
5617 tmp = neon_load_scratch(pass);
5618 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5619 }
5620 }
ad69471c 5621 /* End of 3 register same size operations. */
9ee6e8bb
PB
5622 } else if (insn & (1 << 4)) {
5623 if ((insn & 0x00380080) != 0) {
5624 /* Two registers and shift. */
5625 op = (insn >> 8) & 0xf;
5626 if (insn & (1 << 7)) {
cc13115b
PM
5627 /* 64-bit shift. */
5628 if (op > 7) {
5629 return 1;
5630 }
9ee6e8bb
PB
5631 size = 3;
5632 } else {
5633 size = 2;
5634 while ((insn & (1 << (size + 19))) == 0)
5635 size--;
5636 }
5637 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
b90372ad 5638 /* To avoid excessive duplication of ops we implement shift
9ee6e8bb
PB
5639 by immediate using the variable shift operations. */
5640 if (op < 8) {
5641 /* Shift by immediate:
5642 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
cc13115b
PM
5643 if (q && ((rd | rm) & 1)) {
5644 return 1;
5645 }
5646 if (!u && (op == 4 || op == 6)) {
5647 return 1;
5648 }
9ee6e8bb
PB
5649 /* Right shifts are encoded as N - shift, where N is the
5650 element size in bits. */
5651 if (op <= 4)
5652 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
5653 if (size == 3) {
5654 count = q + 1;
5655 } else {
5656 count = q ? 4: 2;
5657 }
5658 switch (size) {
5659 case 0:
5660 imm = (uint8_t) shift;
5661 imm |= imm << 8;
5662 imm |= imm << 16;
5663 break;
5664 case 1:
5665 imm = (uint16_t) shift;
5666 imm |= imm << 16;
5667 break;
5668 case 2:
5669 case 3:
5670 imm = shift;
5671 break;
5672 default:
5673 abort();
5674 }
5675
5676 for (pass = 0; pass < count; pass++) {
ad69471c
PB
5677 if (size == 3) {
5678 neon_load_reg64(cpu_V0, rm + pass);
5679 tcg_gen_movi_i64(cpu_V1, imm);
5680 switch (op) {
5681 case 0: /* VSHR */
5682 case 1: /* VSRA */
5683 if (u)
5684 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5685 else
ad69471c 5686 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5687 break;
ad69471c
PB
5688 case 2: /* VRSHR */
5689 case 3: /* VRSRA */
5690 if (u)
5691 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5692 else
ad69471c 5693 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 5694 break;
ad69471c 5695 case 4: /* VSRI */
ad69471c
PB
5696 case 5: /* VSHL, VSLI */
5697 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
5698 break;
0322b26e 5699 case 6: /* VQSHLU */
02da0b2d
PM
5700 gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
5701 cpu_V0, cpu_V1);
ad69471c 5702 break;
0322b26e
PM
5703 case 7: /* VQSHL */
5704 if (u) {
02da0b2d 5705 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
0322b26e
PM
5706 cpu_V0, cpu_V1);
5707 } else {
02da0b2d 5708 gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
0322b26e
PM
5709 cpu_V0, cpu_V1);
5710 }
9ee6e8bb 5711 break;
9ee6e8bb 5712 }
ad69471c
PB
5713 if (op == 1 || op == 3) {
5714 /* Accumulate. */
5371cb81 5715 neon_load_reg64(cpu_V1, rd + pass);
ad69471c
PB
5716 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
5717 } else if (op == 4 || (op == 5 && u)) {
5718 /* Insert */
923e6509
CL
5719 neon_load_reg64(cpu_V1, rd + pass);
5720 uint64_t mask;
5721 if (shift < -63 || shift > 63) {
5722 mask = 0;
5723 } else {
5724 if (op == 4) {
5725 mask = 0xffffffffffffffffull >> -shift;
5726 } else {
5727 mask = 0xffffffffffffffffull << shift;
5728 }
5729 }
5730 tcg_gen_andi_i64(cpu_V1, cpu_V1, ~mask);
5731 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
ad69471c
PB
5732 }
5733 neon_store_reg64(cpu_V0, rd + pass);
5734 } else { /* size < 3 */
5735 /* Operands in T0 and T1. */
dd8fbd78 5736 tmp = neon_load_reg(rm, pass);
7d1b0095 5737 tmp2 = tcg_temp_new_i32();
dd8fbd78 5738 tcg_gen_movi_i32(tmp2, imm);
ad69471c
PB
5739 switch (op) {
5740 case 0: /* VSHR */
5741 case 1: /* VSRA */
5742 GEN_NEON_INTEGER_OP(shl);
5743 break;
5744 case 2: /* VRSHR */
5745 case 3: /* VRSRA */
5746 GEN_NEON_INTEGER_OP(rshl);
5747 break;
5748 case 4: /* VSRI */
ad69471c
PB
5749 case 5: /* VSHL, VSLI */
5750 switch (size) {
dd8fbd78
FN
5751 case 0: gen_helper_neon_shl_u8(tmp, tmp, tmp2); break;
5752 case 1: gen_helper_neon_shl_u16(tmp, tmp, tmp2); break;
5753 case 2: gen_helper_neon_shl_u32(tmp, tmp, tmp2); break;
cc13115b 5754 default: abort();
ad69471c
PB
5755 }
5756 break;
0322b26e 5757 case 6: /* VQSHLU */
ad69471c 5758 switch (size) {
0322b26e 5759 case 0:
02da0b2d
PM
5760 gen_helper_neon_qshlu_s8(tmp, cpu_env,
5761 tmp, tmp2);
0322b26e
PM
5762 break;
5763 case 1:
02da0b2d
PM
5764 gen_helper_neon_qshlu_s16(tmp, cpu_env,
5765 tmp, tmp2);
0322b26e
PM
5766 break;
5767 case 2:
02da0b2d
PM
5768 gen_helper_neon_qshlu_s32(tmp, cpu_env,
5769 tmp, tmp2);
0322b26e
PM
5770 break;
5771 default:
cc13115b 5772 abort();
ad69471c
PB
5773 }
5774 break;
0322b26e 5775 case 7: /* VQSHL */
02da0b2d 5776 GEN_NEON_INTEGER_OP_ENV(qshl);
0322b26e 5777 break;
ad69471c 5778 }
7d1b0095 5779 tcg_temp_free_i32(tmp2);
ad69471c
PB
5780
5781 if (op == 1 || op == 3) {
5782 /* Accumulate. */
dd8fbd78 5783 tmp2 = neon_load_reg(rd, pass);
5371cb81 5784 gen_neon_add(size, tmp, tmp2);
7d1b0095 5785 tcg_temp_free_i32(tmp2);
ad69471c
PB
5786 } else if (op == 4 || (op == 5 && u)) {
5787 /* Insert */
5788 switch (size) {
5789 case 0:
5790 if (op == 4)
ca9a32e4 5791 mask = 0xff >> -shift;
ad69471c 5792 else
ca9a32e4
JR
5793 mask = (uint8_t)(0xff << shift);
5794 mask |= mask << 8;
5795 mask |= mask << 16;
ad69471c
PB
5796 break;
5797 case 1:
5798 if (op == 4)
ca9a32e4 5799 mask = 0xffff >> -shift;
ad69471c 5800 else
ca9a32e4
JR
5801 mask = (uint16_t)(0xffff << shift);
5802 mask |= mask << 16;
ad69471c
PB
5803 break;
5804 case 2:
ca9a32e4
JR
5805 if (shift < -31 || shift > 31) {
5806 mask = 0;
5807 } else {
5808 if (op == 4)
5809 mask = 0xffffffffu >> -shift;
5810 else
5811 mask = 0xffffffffu << shift;
5812 }
ad69471c
PB
5813 break;
5814 default:
5815 abort();
5816 }
dd8fbd78 5817 tmp2 = neon_load_reg(rd, pass);
ca9a32e4
JR
5818 tcg_gen_andi_i32(tmp, tmp, mask);
5819 tcg_gen_andi_i32(tmp2, tmp2, ~mask);
dd8fbd78 5820 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 5821 tcg_temp_free_i32(tmp2);
ad69471c 5822 }
dd8fbd78 5823 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
5824 }
5825 } /* for pass */
5826 } else if (op < 10) {
ad69471c 5827 /* Shift by immediate and narrow:
9ee6e8bb 5828 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
0b36f4cd 5829 int input_unsigned = (op == 8) ? !u : u;
cc13115b
PM
5830 if (rm & 1) {
5831 return 1;
5832 }
9ee6e8bb
PB
5833 shift = shift - (1 << (size + 3));
5834 size++;
92cdfaeb 5835 if (size == 3) {
a7812ae4 5836 tmp64 = tcg_const_i64(shift);
92cdfaeb
PM
5837 neon_load_reg64(cpu_V0, rm);
5838 neon_load_reg64(cpu_V1, rm + 1);
5839 for (pass = 0; pass < 2; pass++) {
5840 TCGv_i64 in;
5841 if (pass == 0) {
5842 in = cpu_V0;
5843 } else {
5844 in = cpu_V1;
5845 }
ad69471c 5846 if (q) {
0b36f4cd 5847 if (input_unsigned) {
92cdfaeb 5848 gen_helper_neon_rshl_u64(cpu_V0, in, tmp64);
0b36f4cd 5849 } else {
92cdfaeb 5850 gen_helper_neon_rshl_s64(cpu_V0, in, tmp64);
0b36f4cd 5851 }
ad69471c 5852 } else {
0b36f4cd 5853 if (input_unsigned) {
92cdfaeb 5854 gen_helper_neon_shl_u64(cpu_V0, in, tmp64);
0b36f4cd 5855 } else {
92cdfaeb 5856 gen_helper_neon_shl_s64(cpu_V0, in, tmp64);
0b36f4cd 5857 }
ad69471c 5858 }
7d1b0095 5859 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5860 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5861 neon_store_reg(rd, pass, tmp);
5862 } /* for pass */
5863 tcg_temp_free_i64(tmp64);
5864 } else {
5865 if (size == 1) {
5866 imm = (uint16_t)shift;
5867 imm |= imm << 16;
2c0262af 5868 } else {
92cdfaeb
PM
5869 /* size == 2 */
5870 imm = (uint32_t)shift;
5871 }
5872 tmp2 = tcg_const_i32(imm);
5873 tmp4 = neon_load_reg(rm + 1, 0);
5874 tmp5 = neon_load_reg(rm + 1, 1);
5875 for (pass = 0; pass < 2; pass++) {
5876 if (pass == 0) {
5877 tmp = neon_load_reg(rm, 0);
5878 } else {
5879 tmp = tmp4;
5880 }
0b36f4cd
CL
5881 gen_neon_shift_narrow(size, tmp, tmp2, q,
5882 input_unsigned);
92cdfaeb
PM
5883 if (pass == 0) {
5884 tmp3 = neon_load_reg(rm, 1);
5885 } else {
5886 tmp3 = tmp5;
5887 }
0b36f4cd
CL
5888 gen_neon_shift_narrow(size, tmp3, tmp2, q,
5889 input_unsigned);
36aa55dc 5890 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
7d1b0095
PM
5891 tcg_temp_free_i32(tmp);
5892 tcg_temp_free_i32(tmp3);
5893 tmp = tcg_temp_new_i32();
92cdfaeb
PM
5894 gen_neon_narrow_op(op == 8, u, size - 1, tmp, cpu_V0);
5895 neon_store_reg(rd, pass, tmp);
5896 } /* for pass */
c6067f04 5897 tcg_temp_free_i32(tmp2);
b75263d6 5898 }
9ee6e8bb 5899 } else if (op == 10) {
cc13115b
PM
5900 /* VSHLL, VMOVL */
5901 if (q || (rd & 1)) {
9ee6e8bb 5902 return 1;
cc13115b 5903 }
ad69471c
PB
5904 tmp = neon_load_reg(rm, 0);
5905 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5906 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5907 if (pass == 1)
5908 tmp = tmp2;
5909
5910 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 5911
9ee6e8bb
PB
5912 if (shift != 0) {
5913 /* The shift is less than the width of the source
ad69471c
PB
5914 type, so we can just shift the whole register. */
5915 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
acdf01ef
CL
5916 /* Widen the result of shift: we need to clear
5917 * the potential overflow bits resulting from
5918 * left bits of the narrow input appearing as
5919 * right bits of left the neighbour narrow
5920 * input. */
ad69471c
PB
5921 if (size < 2 || !u) {
5922 uint64_t imm64;
5923 if (size == 0) {
5924 imm = (0xffu >> (8 - shift));
5925 imm |= imm << 16;
acdf01ef 5926 } else if (size == 1) {
ad69471c 5927 imm = 0xffff >> (16 - shift);
acdf01ef
CL
5928 } else {
5929 /* size == 2 */
5930 imm = 0xffffffff >> (32 - shift);
5931 }
5932 if (size < 2) {
5933 imm64 = imm | (((uint64_t)imm) << 32);
5934 } else {
5935 imm64 = imm;
9ee6e8bb 5936 }
acdf01ef 5937 tcg_gen_andi_i64(cpu_V0, cpu_V0, ~imm64);
9ee6e8bb
PB
5938 }
5939 }
ad69471c 5940 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 5941 }
f73534a5 5942 } else if (op >= 14) {
9ee6e8bb 5943 /* VCVT fixed-point. */
cc13115b
PM
5944 if (!(insn & (1 << 21)) || (q && ((rd | rm) & 1))) {
5945 return 1;
5946 }
f73534a5
PM
5947 /* We have already masked out the must-be-1 top bit of imm6,
5948 * hence this 32-shift where the ARM ARM has 64-imm6.
5949 */
5950 shift = 32 - shift;
9ee6e8bb 5951 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 5952 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
f73534a5 5953 if (!(op & 1)) {
9ee6e8bb 5954 if (u)
5500b06c 5955 gen_vfp_ulto(0, shift, 1);
9ee6e8bb 5956 else
5500b06c 5957 gen_vfp_slto(0, shift, 1);
9ee6e8bb
PB
5958 } else {
5959 if (u)
5500b06c 5960 gen_vfp_toul(0, shift, 1);
9ee6e8bb 5961 else
5500b06c 5962 gen_vfp_tosl(0, shift, 1);
2c0262af 5963 }
4373f3ce 5964 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
5965 }
5966 } else {
9ee6e8bb
PB
5967 return 1;
5968 }
5969 } else { /* (insn & 0x00380080) == 0 */
5970 int invert;
7d80fee5
PM
5971 if (q && (rd & 1)) {
5972 return 1;
5973 }
9ee6e8bb
PB
5974
5975 op = (insn >> 8) & 0xf;
5976 /* One register and immediate. */
5977 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
5978 invert = (insn & (1 << 5)) != 0;
7d80fee5
PM
5979 /* Note that op = 2,3,4,5,6,7,10,11,12,13 imm=0 is UNPREDICTABLE.
5980 * We choose to not special-case this and will behave as if a
5981 * valid constant encoding of 0 had been given.
5982 */
9ee6e8bb
PB
5983 switch (op) {
5984 case 0: case 1:
5985 /* no-op */
5986 break;
5987 case 2: case 3:
5988 imm <<= 8;
5989 break;
5990 case 4: case 5:
5991 imm <<= 16;
5992 break;
5993 case 6: case 7:
5994 imm <<= 24;
5995 break;
5996 case 8: case 9:
5997 imm |= imm << 16;
5998 break;
5999 case 10: case 11:
6000 imm = (imm << 8) | (imm << 24);
6001 break;
6002 case 12:
8e31209e 6003 imm = (imm << 8) | 0xff;
9ee6e8bb
PB
6004 break;
6005 case 13:
6006 imm = (imm << 16) | 0xffff;
6007 break;
6008 case 14:
6009 imm |= (imm << 8) | (imm << 16) | (imm << 24);
6010 if (invert)
6011 imm = ~imm;
6012 break;
6013 case 15:
7d80fee5
PM
6014 if (invert) {
6015 return 1;
6016 }
9ee6e8bb
PB
6017 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
6018 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
6019 break;
6020 }
6021 if (invert)
6022 imm = ~imm;
6023
9ee6e8bb
PB
6024 for (pass = 0; pass < (q ? 4 : 2); pass++) {
6025 if (op & 1 && op < 12) {
ad69471c 6026 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
6027 if (invert) {
6028 /* The immediate value has already been inverted, so
6029 BIC becomes AND. */
ad69471c 6030 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 6031 } else {
ad69471c 6032 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 6033 }
9ee6e8bb 6034 } else {
ad69471c 6035 /* VMOV, VMVN. */
7d1b0095 6036 tmp = tcg_temp_new_i32();
9ee6e8bb 6037 if (op == 14 && invert) {
a5a14945 6038 int n;
ad69471c
PB
6039 uint32_t val;
6040 val = 0;
9ee6e8bb
PB
6041 for (n = 0; n < 4; n++) {
6042 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 6043 val |= 0xff << (n * 8);
9ee6e8bb 6044 }
ad69471c
PB
6045 tcg_gen_movi_i32(tmp, val);
6046 } else {
6047 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 6048 }
9ee6e8bb 6049 }
ad69471c 6050 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6051 }
6052 }
e4b3861d 6053 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
6054 if (size != 3) {
6055 op = (insn >> 8) & 0xf;
6056 if ((insn & (1 << 6)) == 0) {
6057 /* Three registers of different lengths. */
6058 int src1_wide;
6059 int src2_wide;
6060 int prewiden;
526d0096
PM
6061 /* undefreq: bit 0 : UNDEF if size == 0
6062 * bit 1 : UNDEF if size == 1
6063 * bit 2 : UNDEF if size == 2
6064 * bit 3 : UNDEF if U == 1
6065 * Note that [2:0] set implies 'always UNDEF'
695272dc
PM
6066 */
6067 int undefreq;
6068 /* prewiden, src1_wide, src2_wide, undefreq */
6069 static const int neon_3reg_wide[16][4] = {
6070 {1, 0, 0, 0}, /* VADDL */
6071 {1, 1, 0, 0}, /* VADDW */
6072 {1, 0, 0, 0}, /* VSUBL */
6073 {1, 1, 0, 0}, /* VSUBW */
6074 {0, 1, 1, 0}, /* VADDHN */
6075 {0, 0, 0, 0}, /* VABAL */
6076 {0, 1, 1, 0}, /* VSUBHN */
6077 {0, 0, 0, 0}, /* VABDL */
6078 {0, 0, 0, 0}, /* VMLAL */
526d0096 6079 {0, 0, 0, 9}, /* VQDMLAL */
695272dc 6080 {0, 0, 0, 0}, /* VMLSL */
526d0096 6081 {0, 0, 0, 9}, /* VQDMLSL */
695272dc 6082 {0, 0, 0, 0}, /* Integer VMULL */
526d0096 6083 {0, 0, 0, 1}, /* VQDMULL */
4e624eda 6084 {0, 0, 0, 0xa}, /* Polynomial VMULL */
526d0096 6085 {0, 0, 0, 7}, /* Reserved: always UNDEF */
9ee6e8bb
PB
6086 };
6087
6088 prewiden = neon_3reg_wide[op][0];
6089 src1_wide = neon_3reg_wide[op][1];
6090 src2_wide = neon_3reg_wide[op][2];
695272dc 6091 undefreq = neon_3reg_wide[op][3];
9ee6e8bb 6092
526d0096
PM
6093 if ((undefreq & (1 << size)) ||
6094 ((undefreq & 8) && u)) {
695272dc
PM
6095 return 1;
6096 }
6097 if ((src1_wide && (rn & 1)) ||
6098 (src2_wide && (rm & 1)) ||
6099 (!src2_wide && (rd & 1))) {
ad69471c 6100 return 1;
695272dc 6101 }
ad69471c 6102
4e624eda
PM
6103 /* Handle VMULL.P64 (Polynomial 64x64 to 128 bit multiply)
6104 * outside the loop below as it only performs a single pass.
6105 */
6106 if (op == 14 && size == 2) {
6107 TCGv_i64 tcg_rn, tcg_rm, tcg_rd;
6108
d614a513 6109 if (!arm_dc_feature(s, ARM_FEATURE_V8_PMULL)) {
4e624eda
PM
6110 return 1;
6111 }
6112 tcg_rn = tcg_temp_new_i64();
6113 tcg_rm = tcg_temp_new_i64();
6114 tcg_rd = tcg_temp_new_i64();
6115 neon_load_reg64(tcg_rn, rn);
6116 neon_load_reg64(tcg_rm, rm);
6117 gen_helper_neon_pmull_64_lo(tcg_rd, tcg_rn, tcg_rm);
6118 neon_store_reg64(tcg_rd, rd);
6119 gen_helper_neon_pmull_64_hi(tcg_rd, tcg_rn, tcg_rm);
6120 neon_store_reg64(tcg_rd, rd + 1);
6121 tcg_temp_free_i64(tcg_rn);
6122 tcg_temp_free_i64(tcg_rm);
6123 tcg_temp_free_i64(tcg_rd);
6124 return 0;
6125 }
6126
9ee6e8bb
PB
6127 /* Avoid overlapping operands. Wide source operands are
6128 always aligned so will never overlap with wide
6129 destinations in problematic ways. */
8f8e3aa4 6130 if (rd == rm && !src2_wide) {
dd8fbd78
FN
6131 tmp = neon_load_reg(rm, 1);
6132 neon_store_scratch(2, tmp);
8f8e3aa4 6133 } else if (rd == rn && !src1_wide) {
dd8fbd78
FN
6134 tmp = neon_load_reg(rn, 1);
6135 neon_store_scratch(2, tmp);
9ee6e8bb 6136 }
39d5492a 6137 TCGV_UNUSED_I32(tmp3);
9ee6e8bb 6138 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6139 if (src1_wide) {
6140 neon_load_reg64(cpu_V0, rn + pass);
39d5492a 6141 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6142 } else {
ad69471c 6143 if (pass == 1 && rd == rn) {
dd8fbd78 6144 tmp = neon_load_scratch(2);
9ee6e8bb 6145 } else {
ad69471c
PB
6146 tmp = neon_load_reg(rn, pass);
6147 }
6148 if (prewiden) {
6149 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
6150 }
6151 }
ad69471c
PB
6152 if (src2_wide) {
6153 neon_load_reg64(cpu_V1, rm + pass);
39d5492a 6154 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6155 } else {
ad69471c 6156 if (pass == 1 && rd == rm) {
dd8fbd78 6157 tmp2 = neon_load_scratch(2);
9ee6e8bb 6158 } else {
ad69471c
PB
6159 tmp2 = neon_load_reg(rm, pass);
6160 }
6161 if (prewiden) {
6162 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 6163 }
9ee6e8bb
PB
6164 }
6165 switch (op) {
6166 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 6167 gen_neon_addl(size);
9ee6e8bb 6168 break;
79b0e534 6169 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHN, VRSUBHN */
ad69471c 6170 gen_neon_subl(size);
9ee6e8bb
PB
6171 break;
6172 case 5: case 7: /* VABAL, VABDL */
6173 switch ((size << 1) | u) {
ad69471c
PB
6174 case 0:
6175 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
6176 break;
6177 case 1:
6178 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
6179 break;
6180 case 2:
6181 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
6182 break;
6183 case 3:
6184 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
6185 break;
6186 case 4:
6187 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
6188 break;
6189 case 5:
6190 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
6191 break;
9ee6e8bb
PB
6192 default: abort();
6193 }
7d1b0095
PM
6194 tcg_temp_free_i32(tmp2);
6195 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
6196 break;
6197 case 8: case 9: case 10: case 11: case 12: case 13:
6198 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 6199 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
6200 break;
6201 case 14: /* Polynomial VMULL */
e5ca24cb 6202 gen_helper_neon_mull_p8(cpu_V0, tmp, tmp2);
7d1b0095
PM
6203 tcg_temp_free_i32(tmp2);
6204 tcg_temp_free_i32(tmp);
e5ca24cb 6205 break;
695272dc
PM
6206 default: /* 15 is RESERVED: caught earlier */
6207 abort();
9ee6e8bb 6208 }
ebcd88ce
PM
6209 if (op == 13) {
6210 /* VQDMULL */
6211 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
6212 neon_store_reg64(cpu_V0, rd + pass);
6213 } else if (op == 5 || (op >= 8 && op <= 11)) {
9ee6e8bb 6214 /* Accumulate. */
ebcd88ce 6215 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6216 switch (op) {
4dc064e6
PM
6217 case 10: /* VMLSL */
6218 gen_neon_negl(cpu_V0, size);
6219 /* Fall through */
6220 case 5: case 8: /* VABAL, VMLAL */
ad69471c 6221 gen_neon_addl(size);
9ee6e8bb
PB
6222 break;
6223 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c 6224 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6225 if (op == 11) {
6226 gen_neon_negl(cpu_V0, size);
6227 }
ad69471c
PB
6228 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
6229 break;
9ee6e8bb
PB
6230 default:
6231 abort();
6232 }
ad69471c 6233 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6234 } else if (op == 4 || op == 6) {
6235 /* Narrowing operation. */
7d1b0095 6236 tmp = tcg_temp_new_i32();
79b0e534 6237 if (!u) {
9ee6e8bb 6238 switch (size) {
ad69471c
PB
6239 case 0:
6240 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
6241 break;
6242 case 1:
6243 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
6244 break;
6245 case 2:
6246 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6247 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6248 break;
9ee6e8bb
PB
6249 default: abort();
6250 }
6251 } else {
6252 switch (size) {
ad69471c
PB
6253 case 0:
6254 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
6255 break;
6256 case 1:
6257 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
6258 break;
6259 case 2:
6260 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
6261 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
6262 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
6263 break;
9ee6e8bb
PB
6264 default: abort();
6265 }
6266 }
ad69471c
PB
6267 if (pass == 0) {
6268 tmp3 = tmp;
6269 } else {
6270 neon_store_reg(rd, 0, tmp3);
6271 neon_store_reg(rd, 1, tmp);
6272 }
9ee6e8bb
PB
6273 } else {
6274 /* Write back the result. */
ad69471c 6275 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6276 }
6277 }
6278 } else {
3e3326df
PM
6279 /* Two registers and a scalar. NB that for ops of this form
6280 * the ARM ARM labels bit 24 as Q, but it is in our variable
6281 * 'u', not 'q'.
6282 */
6283 if (size == 0) {
6284 return 1;
6285 }
9ee6e8bb 6286 switch (op) {
9ee6e8bb 6287 case 1: /* Float VMLA scalar */
9ee6e8bb 6288 case 5: /* Floating point VMLS scalar */
9ee6e8bb 6289 case 9: /* Floating point VMUL scalar */
3e3326df
PM
6290 if (size == 1) {
6291 return 1;
6292 }
6293 /* fall through */
6294 case 0: /* Integer VMLA scalar */
6295 case 4: /* Integer VMLS scalar */
6296 case 8: /* Integer VMUL scalar */
9ee6e8bb
PB
6297 case 12: /* VQDMULH scalar */
6298 case 13: /* VQRDMULH scalar */
3e3326df
PM
6299 if (u && ((rd | rn) & 1)) {
6300 return 1;
6301 }
dd8fbd78
FN
6302 tmp = neon_get_scalar(size, rm);
6303 neon_store_scratch(0, tmp);
9ee6e8bb 6304 for (pass = 0; pass < (u ? 4 : 2); pass++) {
dd8fbd78
FN
6305 tmp = neon_load_scratch(0);
6306 tmp2 = neon_load_reg(rn, pass);
9ee6e8bb
PB
6307 if (op == 12) {
6308 if (size == 1) {
02da0b2d 6309 gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6310 } else {
02da0b2d 6311 gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6312 }
6313 } else if (op == 13) {
6314 if (size == 1) {
02da0b2d 6315 gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 6316 } else {
02da0b2d 6317 gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
9ee6e8bb
PB
6318 }
6319 } else if (op & 1) {
aa47cfdd
PM
6320 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6321 gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
6322 tcg_temp_free_ptr(fpstatus);
9ee6e8bb
PB
6323 } else {
6324 switch (size) {
dd8fbd78
FN
6325 case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
6326 case 1: gen_helper_neon_mul_u16(tmp, tmp, tmp2); break;
6327 case 2: tcg_gen_mul_i32(tmp, tmp, tmp2); break;
3e3326df 6328 default: abort();
9ee6e8bb
PB
6329 }
6330 }
7d1b0095 6331 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
6332 if (op < 8) {
6333 /* Accumulate. */
dd8fbd78 6334 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb
PB
6335 switch (op) {
6336 case 0:
dd8fbd78 6337 gen_neon_add(size, tmp, tmp2);
9ee6e8bb
PB
6338 break;
6339 case 1:
aa47cfdd
PM
6340 {
6341 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6342 gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
6343 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6344 break;
aa47cfdd 6345 }
9ee6e8bb 6346 case 4:
dd8fbd78 6347 gen_neon_rsb(size, tmp, tmp2);
9ee6e8bb
PB
6348 break;
6349 case 5:
aa47cfdd
PM
6350 {
6351 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6352 gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
6353 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6354 break;
aa47cfdd 6355 }
9ee6e8bb
PB
6356 default:
6357 abort();
6358 }
7d1b0095 6359 tcg_temp_free_i32(tmp2);
9ee6e8bb 6360 }
dd8fbd78 6361 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
6362 }
6363 break;
9ee6e8bb 6364 case 3: /* VQDMLAL scalar */
9ee6e8bb 6365 case 7: /* VQDMLSL scalar */
9ee6e8bb 6366 case 11: /* VQDMULL scalar */
3e3326df 6367 if (u == 1) {
ad69471c 6368 return 1;
3e3326df
PM
6369 }
6370 /* fall through */
6371 case 2: /* VMLAL sclar */
6372 case 6: /* VMLSL scalar */
6373 case 10: /* VMULL scalar */
6374 if (rd & 1) {
6375 return 1;
6376 }
dd8fbd78 6377 tmp2 = neon_get_scalar(size, rm);
c6067f04
CL
6378 /* We need a copy of tmp2 because gen_neon_mull
6379 * deletes it during pass 0. */
7d1b0095 6380 tmp4 = tcg_temp_new_i32();
c6067f04 6381 tcg_gen_mov_i32(tmp4, tmp2);
dd8fbd78 6382 tmp3 = neon_load_reg(rn, 1);
ad69471c 6383
9ee6e8bb 6384 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6385 if (pass == 0) {
6386 tmp = neon_load_reg(rn, 0);
9ee6e8bb 6387 } else {
dd8fbd78 6388 tmp = tmp3;
c6067f04 6389 tmp2 = tmp4;
9ee6e8bb 6390 }
ad69471c 6391 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
ad69471c
PB
6392 if (op != 11) {
6393 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 6394 }
9ee6e8bb 6395 switch (op) {
4dc064e6
PM
6396 case 6:
6397 gen_neon_negl(cpu_V0, size);
6398 /* Fall through */
6399 case 2:
ad69471c 6400 gen_neon_addl(size);
9ee6e8bb
PB
6401 break;
6402 case 3: case 7:
ad69471c 6403 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4dc064e6
PM
6404 if (op == 7) {
6405 gen_neon_negl(cpu_V0, size);
6406 }
ad69471c 6407 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
6408 break;
6409 case 10:
6410 /* no-op */
6411 break;
6412 case 11:
ad69471c 6413 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
6414 break;
6415 default:
6416 abort();
6417 }
ad69471c 6418 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb 6419 }
dd8fbd78 6420
dd8fbd78 6421
9ee6e8bb
PB
6422 break;
6423 default: /* 14 and 15 are RESERVED */
6424 return 1;
6425 }
6426 }
6427 } else { /* size == 3 */
6428 if (!u) {
6429 /* Extract. */
9ee6e8bb 6430 imm = (insn >> 8) & 0xf;
ad69471c
PB
6431
6432 if (imm > 7 && !q)
6433 return 1;
6434
52579ea1
PM
6435 if (q && ((rd | rn | rm) & 1)) {
6436 return 1;
6437 }
6438
ad69471c
PB
6439 if (imm == 0) {
6440 neon_load_reg64(cpu_V0, rn);
6441 if (q) {
6442 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 6443 }
ad69471c
PB
6444 } else if (imm == 8) {
6445 neon_load_reg64(cpu_V0, rn + 1);
6446 if (q) {
6447 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6448 }
ad69471c 6449 } else if (q) {
a7812ae4 6450 tmp64 = tcg_temp_new_i64();
ad69471c
PB
6451 if (imm < 8) {
6452 neon_load_reg64(cpu_V0, rn);
a7812ae4 6453 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
6454 } else {
6455 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 6456 neon_load_reg64(tmp64, rm);
ad69471c
PB
6457 }
6458 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 6459 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
6460 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6461 if (imm < 8) {
6462 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 6463 } else {
ad69471c
PB
6464 neon_load_reg64(cpu_V1, rm + 1);
6465 imm -= 8;
9ee6e8bb 6466 }
ad69471c 6467 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
6468 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
6469 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
b75263d6 6470 tcg_temp_free_i64(tmp64);
ad69471c 6471 } else {
a7812ae4 6472 /* BUGFIX */
ad69471c 6473 neon_load_reg64(cpu_V0, rn);
a7812ae4 6474 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 6475 neon_load_reg64(cpu_V1, rm);
a7812ae4 6476 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
6477 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
6478 }
6479 neon_store_reg64(cpu_V0, rd);
6480 if (q) {
6481 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
6482 }
6483 } else if ((insn & (1 << 11)) == 0) {
6484 /* Two register misc. */
6485 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
6486 size = (insn >> 18) & 3;
600b828c
PM
6487 /* UNDEF for unknown op values and bad op-size combinations */
6488 if ((neon_2rm_sizes[op] & (1 << size)) == 0) {
6489 return 1;
6490 }
fc2a9b37
PM
6491 if ((op != NEON_2RM_VMOVN && op != NEON_2RM_VQMOVN) &&
6492 q && ((rm | rd) & 1)) {
6493 return 1;
6494 }
9ee6e8bb 6495 switch (op) {
600b828c 6496 case NEON_2RM_VREV64:
9ee6e8bb 6497 for (pass = 0; pass < (q ? 2 : 1); pass++) {
dd8fbd78
FN
6498 tmp = neon_load_reg(rm, pass * 2);
6499 tmp2 = neon_load_reg(rm, pass * 2 + 1);
9ee6e8bb 6500 switch (size) {
dd8fbd78
FN
6501 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6502 case 1: gen_swap_half(tmp); break;
9ee6e8bb
PB
6503 case 2: /* no-op */ break;
6504 default: abort();
6505 }
dd8fbd78 6506 neon_store_reg(rd, pass * 2 + 1, tmp);
9ee6e8bb 6507 if (size == 2) {
dd8fbd78 6508 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb 6509 } else {
9ee6e8bb 6510 switch (size) {
dd8fbd78
FN
6511 case 0: tcg_gen_bswap32_i32(tmp2, tmp2); break;
6512 case 1: gen_swap_half(tmp2); break;
9ee6e8bb
PB
6513 default: abort();
6514 }
dd8fbd78 6515 neon_store_reg(rd, pass * 2, tmp2);
9ee6e8bb
PB
6516 }
6517 }
6518 break;
600b828c
PM
6519 case NEON_2RM_VPADDL: case NEON_2RM_VPADDL_U:
6520 case NEON_2RM_VPADAL: case NEON_2RM_VPADAL_U:
ad69471c
PB
6521 for (pass = 0; pass < q + 1; pass++) {
6522 tmp = neon_load_reg(rm, pass * 2);
6523 gen_neon_widen(cpu_V0, tmp, size, op & 1);
6524 tmp = neon_load_reg(rm, pass * 2 + 1);
6525 gen_neon_widen(cpu_V1, tmp, size, op & 1);
6526 switch (size) {
6527 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
6528 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
6529 case 2: tcg_gen_add_i64(CPU_V001); break;
6530 default: abort();
6531 }
600b828c 6532 if (op >= NEON_2RM_VPADAL) {
9ee6e8bb 6533 /* Accumulate. */
ad69471c
PB
6534 neon_load_reg64(cpu_V1, rd + pass);
6535 gen_neon_addl(size);
9ee6e8bb 6536 }
ad69471c 6537 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6538 }
6539 break;
600b828c 6540 case NEON_2RM_VTRN:
9ee6e8bb 6541 if (size == 2) {
a5a14945 6542 int n;
9ee6e8bb 6543 for (n = 0; n < (q ? 4 : 2); n += 2) {
dd8fbd78
FN
6544 tmp = neon_load_reg(rm, n);
6545 tmp2 = neon_load_reg(rd, n + 1);
6546 neon_store_reg(rm, n, tmp2);
6547 neon_store_reg(rd, n + 1, tmp);
9ee6e8bb
PB
6548 }
6549 } else {
6550 goto elementwise;
6551 }
6552 break;
600b828c 6553 case NEON_2RM_VUZP:
02acedf9 6554 if (gen_neon_unzip(rd, rm, size, q)) {
9ee6e8bb 6555 return 1;
9ee6e8bb
PB
6556 }
6557 break;
600b828c 6558 case NEON_2RM_VZIP:
d68a6f3a 6559 if (gen_neon_zip(rd, rm, size, q)) {
9ee6e8bb 6560 return 1;
9ee6e8bb
PB
6561 }
6562 break;
600b828c
PM
6563 case NEON_2RM_VMOVN: case NEON_2RM_VQMOVN:
6564 /* also VQMOVUN; op field and mnemonics don't line up */
fc2a9b37
PM
6565 if (rm & 1) {
6566 return 1;
6567 }
39d5492a 6568 TCGV_UNUSED_I32(tmp2);
9ee6e8bb 6569 for (pass = 0; pass < 2; pass++) {
ad69471c 6570 neon_load_reg64(cpu_V0, rm + pass);
7d1b0095 6571 tmp = tcg_temp_new_i32();
600b828c
PM
6572 gen_neon_narrow_op(op == NEON_2RM_VMOVN, q, size,
6573 tmp, cpu_V0);
ad69471c
PB
6574 if (pass == 0) {
6575 tmp2 = tmp;
6576 } else {
6577 neon_store_reg(rd, 0, tmp2);
6578 neon_store_reg(rd, 1, tmp);
9ee6e8bb 6579 }
9ee6e8bb
PB
6580 }
6581 break;
600b828c 6582 case NEON_2RM_VSHLL:
fc2a9b37 6583 if (q || (rd & 1)) {
9ee6e8bb 6584 return 1;
600b828c 6585 }
ad69471c
PB
6586 tmp = neon_load_reg(rm, 0);
6587 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 6588 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
6589 if (pass == 1)
6590 tmp = tmp2;
6591 gen_neon_widen(cpu_V0, tmp, size, 1);
30d11a2a 6592 tcg_gen_shli_i64(cpu_V0, cpu_V0, 8 << size);
ad69471c 6593 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
6594 }
6595 break;
600b828c 6596 case NEON_2RM_VCVT_F16_F32:
d614a513 6597 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6598 q || (rm & 1)) {
6599 return 1;
6600 }
7d1b0095
PM
6601 tmp = tcg_temp_new_i32();
6602 tmp2 = tcg_temp_new_i32();
60011498 6603 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 0));
2d981da7 6604 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498 6605 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 1));
2d981da7 6606 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6607 tcg_gen_shli_i32(tmp2, tmp2, 16);
6608 tcg_gen_or_i32(tmp2, tmp2, tmp);
6609 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 2));
2d981da7 6610 gen_helper_neon_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
60011498
PB
6611 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, 3));
6612 neon_store_reg(rd, 0, tmp2);
7d1b0095 6613 tmp2 = tcg_temp_new_i32();
2d981da7 6614 gen_helper_neon_fcvt_f32_to_f16(tmp2, cpu_F0s, cpu_env);
60011498
PB
6615 tcg_gen_shli_i32(tmp2, tmp2, 16);
6616 tcg_gen_or_i32(tmp2, tmp2, tmp);
6617 neon_store_reg(rd, 1, tmp2);
7d1b0095 6618 tcg_temp_free_i32(tmp);
60011498 6619 break;
600b828c 6620 case NEON_2RM_VCVT_F32_F16:
d614a513 6621 if (!arm_dc_feature(s, ARM_FEATURE_VFP_FP16) ||
fc2a9b37
PM
6622 q || (rd & 1)) {
6623 return 1;
6624 }
7d1b0095 6625 tmp3 = tcg_temp_new_i32();
60011498
PB
6626 tmp = neon_load_reg(rm, 0);
6627 tmp2 = neon_load_reg(rm, 1);
6628 tcg_gen_ext16u_i32(tmp3, tmp);
2d981da7 6629 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6630 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 0));
6631 tcg_gen_shri_i32(tmp3, tmp, 16);
2d981da7 6632 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6633 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 1));
7d1b0095 6634 tcg_temp_free_i32(tmp);
60011498 6635 tcg_gen_ext16u_i32(tmp3, tmp2);
2d981da7 6636 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498
PB
6637 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 2));
6638 tcg_gen_shri_i32(tmp3, tmp2, 16);
2d981da7 6639 gen_helper_neon_fcvt_f16_to_f32(cpu_F0s, tmp3, cpu_env);
60011498 6640 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, 3));
7d1b0095
PM
6641 tcg_temp_free_i32(tmp2);
6642 tcg_temp_free_i32(tmp3);
60011498 6643 break;
9d935509 6644 case NEON_2RM_AESE: case NEON_2RM_AESMC:
d614a513 6645 if (!arm_dc_feature(s, ARM_FEATURE_V8_AES)
9d935509
AB
6646 || ((rm | rd) & 1)) {
6647 return 1;
6648 }
6649 tmp = tcg_const_i32(rd);
6650 tmp2 = tcg_const_i32(rm);
6651
6652 /* Bit 6 is the lowest opcode bit; it distinguishes between
6653 * encryption (AESE/AESMC) and decryption (AESD/AESIMC)
6654 */
6655 tmp3 = tcg_const_i32(extract32(insn, 6, 1));
6656
6657 if (op == NEON_2RM_AESE) {
6658 gen_helper_crypto_aese(cpu_env, tmp, tmp2, tmp3);
6659 } else {
6660 gen_helper_crypto_aesmc(cpu_env, tmp, tmp2, tmp3);
6661 }
6662 tcg_temp_free_i32(tmp);
6663 tcg_temp_free_i32(tmp2);
6664 tcg_temp_free_i32(tmp3);
6665 break;
f1ecb913 6666 case NEON_2RM_SHA1H:
d614a513 6667 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)
f1ecb913
AB
6668 || ((rm | rd) & 1)) {
6669 return 1;
6670 }
6671 tmp = tcg_const_i32(rd);
6672 tmp2 = tcg_const_i32(rm);
6673
6674 gen_helper_crypto_sha1h(cpu_env, tmp, tmp2);
6675
6676 tcg_temp_free_i32(tmp);
6677 tcg_temp_free_i32(tmp2);
6678 break;
6679 case NEON_2RM_SHA1SU1:
6680 if ((rm | rd) & 1) {
6681 return 1;
6682 }
6683 /* bit 6 (q): set -> SHA256SU0, cleared -> SHA1SU1 */
6684 if (q) {
d614a513 6685 if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA256)) {
f1ecb913
AB
6686 return 1;
6687 }
d614a513 6688 } else if (!arm_dc_feature(s, ARM_FEATURE_V8_SHA1)) {
f1ecb913
AB
6689 return 1;
6690 }
6691 tmp = tcg_const_i32(rd);
6692 tmp2 = tcg_const_i32(rm);
6693 if (q) {
6694 gen_helper_crypto_sha256su0(cpu_env, tmp, tmp2);
6695 } else {
6696 gen_helper_crypto_sha1su1(cpu_env, tmp, tmp2);
6697 }
6698 tcg_temp_free_i32(tmp);
6699 tcg_temp_free_i32(tmp2);
6700 break;
9ee6e8bb
PB
6701 default:
6702 elementwise:
6703 for (pass = 0; pass < (q ? 4 : 2); pass++) {
600b828c 6704 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6705 tcg_gen_ld_f32(cpu_F0s, cpu_env,
6706 neon_reg_offset(rm, pass));
39d5492a 6707 TCGV_UNUSED_I32(tmp);
9ee6e8bb 6708 } else {
dd8fbd78 6709 tmp = neon_load_reg(rm, pass);
9ee6e8bb
PB
6710 }
6711 switch (op) {
600b828c 6712 case NEON_2RM_VREV32:
9ee6e8bb 6713 switch (size) {
dd8fbd78
FN
6714 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
6715 case 1: gen_swap_half(tmp); break;
600b828c 6716 default: abort();
9ee6e8bb
PB
6717 }
6718 break;
600b828c 6719 case NEON_2RM_VREV16:
dd8fbd78 6720 gen_rev16(tmp);
9ee6e8bb 6721 break;
600b828c 6722 case NEON_2RM_VCLS:
9ee6e8bb 6723 switch (size) {
dd8fbd78
FN
6724 case 0: gen_helper_neon_cls_s8(tmp, tmp); break;
6725 case 1: gen_helper_neon_cls_s16(tmp, tmp); break;
6726 case 2: gen_helper_neon_cls_s32(tmp, tmp); break;
600b828c 6727 default: abort();
9ee6e8bb
PB
6728 }
6729 break;
600b828c 6730 case NEON_2RM_VCLZ:
9ee6e8bb 6731 switch (size) {
dd8fbd78
FN
6732 case 0: gen_helper_neon_clz_u8(tmp, tmp); break;
6733 case 1: gen_helper_neon_clz_u16(tmp, tmp); break;
6734 case 2: gen_helper_clz(tmp, tmp); break;
600b828c 6735 default: abort();
9ee6e8bb
PB
6736 }
6737 break;
600b828c 6738 case NEON_2RM_VCNT:
dd8fbd78 6739 gen_helper_neon_cnt_u8(tmp, tmp);
9ee6e8bb 6740 break;
600b828c 6741 case NEON_2RM_VMVN:
dd8fbd78 6742 tcg_gen_not_i32(tmp, tmp);
9ee6e8bb 6743 break;
600b828c 6744 case NEON_2RM_VQABS:
9ee6e8bb 6745 switch (size) {
02da0b2d
PM
6746 case 0:
6747 gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
6748 break;
6749 case 1:
6750 gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
6751 break;
6752 case 2:
6753 gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
6754 break;
600b828c 6755 default: abort();
9ee6e8bb
PB
6756 }
6757 break;
600b828c 6758 case NEON_2RM_VQNEG:
9ee6e8bb 6759 switch (size) {
02da0b2d
PM
6760 case 0:
6761 gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
6762 break;
6763 case 1:
6764 gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
6765 break;
6766 case 2:
6767 gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
6768 break;
600b828c 6769 default: abort();
9ee6e8bb
PB
6770 }
6771 break;
600b828c 6772 case NEON_2RM_VCGT0: case NEON_2RM_VCLE0:
dd8fbd78 6773 tmp2 = tcg_const_i32(0);
9ee6e8bb 6774 switch(size) {
dd8fbd78
FN
6775 case 0: gen_helper_neon_cgt_s8(tmp, tmp, tmp2); break;
6776 case 1: gen_helper_neon_cgt_s16(tmp, tmp, tmp2); break;
6777 case 2: gen_helper_neon_cgt_s32(tmp, tmp, tmp2); break;
600b828c 6778 default: abort();
9ee6e8bb 6779 }
39d5492a 6780 tcg_temp_free_i32(tmp2);
600b828c 6781 if (op == NEON_2RM_VCLE0) {
dd8fbd78 6782 tcg_gen_not_i32(tmp, tmp);
600b828c 6783 }
9ee6e8bb 6784 break;
600b828c 6785 case NEON_2RM_VCGE0: case NEON_2RM_VCLT0:
dd8fbd78 6786 tmp2 = tcg_const_i32(0);
9ee6e8bb 6787 switch(size) {
dd8fbd78
FN
6788 case 0: gen_helper_neon_cge_s8(tmp, tmp, tmp2); break;
6789 case 1: gen_helper_neon_cge_s16(tmp, tmp, tmp2); break;
6790 case 2: gen_helper_neon_cge_s32(tmp, tmp, tmp2); break;
600b828c 6791 default: abort();
9ee6e8bb 6792 }
39d5492a 6793 tcg_temp_free_i32(tmp2);
600b828c 6794 if (op == NEON_2RM_VCLT0) {
dd8fbd78 6795 tcg_gen_not_i32(tmp, tmp);
600b828c 6796 }
9ee6e8bb 6797 break;
600b828c 6798 case NEON_2RM_VCEQ0:
dd8fbd78 6799 tmp2 = tcg_const_i32(0);
9ee6e8bb 6800 switch(size) {
dd8fbd78
FN
6801 case 0: gen_helper_neon_ceq_u8(tmp, tmp, tmp2); break;
6802 case 1: gen_helper_neon_ceq_u16(tmp, tmp, tmp2); break;
6803 case 2: gen_helper_neon_ceq_u32(tmp, tmp, tmp2); break;
600b828c 6804 default: abort();
9ee6e8bb 6805 }
39d5492a 6806 tcg_temp_free_i32(tmp2);
9ee6e8bb 6807 break;
600b828c 6808 case NEON_2RM_VABS:
9ee6e8bb 6809 switch(size) {
dd8fbd78
FN
6810 case 0: gen_helper_neon_abs_s8(tmp, tmp); break;
6811 case 1: gen_helper_neon_abs_s16(tmp, tmp); break;
6812 case 2: tcg_gen_abs_i32(tmp, tmp); break;
600b828c 6813 default: abort();
9ee6e8bb
PB
6814 }
6815 break;
600b828c 6816 case NEON_2RM_VNEG:
dd8fbd78
FN
6817 tmp2 = tcg_const_i32(0);
6818 gen_neon_rsb(size, tmp, tmp2);
39d5492a 6819 tcg_temp_free_i32(tmp2);
9ee6e8bb 6820 break;
600b828c 6821 case NEON_2RM_VCGT0_F:
aa47cfdd
PM
6822 {
6823 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6824 tmp2 = tcg_const_i32(0);
aa47cfdd 6825 gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6826 tcg_temp_free_i32(tmp2);
aa47cfdd 6827 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6828 break;
aa47cfdd 6829 }
600b828c 6830 case NEON_2RM_VCGE0_F:
aa47cfdd
PM
6831 {
6832 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6833 tmp2 = tcg_const_i32(0);
aa47cfdd 6834 gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6835 tcg_temp_free_i32(tmp2);
aa47cfdd 6836 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6837 break;
aa47cfdd 6838 }
600b828c 6839 case NEON_2RM_VCEQ0_F:
aa47cfdd
PM
6840 {
6841 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
dd8fbd78 6842 tmp2 = tcg_const_i32(0);
aa47cfdd 6843 gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
39d5492a 6844 tcg_temp_free_i32(tmp2);
aa47cfdd 6845 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6846 break;
aa47cfdd 6847 }
600b828c 6848 case NEON_2RM_VCLE0_F:
aa47cfdd
PM
6849 {
6850 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6851 tmp2 = tcg_const_i32(0);
aa47cfdd 6852 gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6853 tcg_temp_free_i32(tmp2);
aa47cfdd 6854 tcg_temp_free_ptr(fpstatus);
0e326109 6855 break;
aa47cfdd 6856 }
600b828c 6857 case NEON_2RM_VCLT0_F:
aa47cfdd
PM
6858 {
6859 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
0e326109 6860 tmp2 = tcg_const_i32(0);
aa47cfdd 6861 gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
39d5492a 6862 tcg_temp_free_i32(tmp2);
aa47cfdd 6863 tcg_temp_free_ptr(fpstatus);
0e326109 6864 break;
aa47cfdd 6865 }
600b828c 6866 case NEON_2RM_VABS_F:
4373f3ce 6867 gen_vfp_abs(0);
9ee6e8bb 6868 break;
600b828c 6869 case NEON_2RM_VNEG_F:
4373f3ce 6870 gen_vfp_neg(0);
9ee6e8bb 6871 break;
600b828c 6872 case NEON_2RM_VSWP:
dd8fbd78
FN
6873 tmp2 = neon_load_reg(rd, pass);
6874 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6875 break;
600b828c 6876 case NEON_2RM_VTRN:
dd8fbd78 6877 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 6878 switch (size) {
dd8fbd78
FN
6879 case 0: gen_neon_trn_u8(tmp, tmp2); break;
6880 case 1: gen_neon_trn_u16(tmp, tmp2); break;
600b828c 6881 default: abort();
9ee6e8bb 6882 }
dd8fbd78 6883 neon_store_reg(rm, pass, tmp2);
9ee6e8bb 6884 break;
34f7b0a2
WN
6885 case NEON_2RM_VRINTN:
6886 case NEON_2RM_VRINTA:
6887 case NEON_2RM_VRINTM:
6888 case NEON_2RM_VRINTP:
6889 case NEON_2RM_VRINTZ:
6890 {
6891 TCGv_i32 tcg_rmode;
6892 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6893 int rmode;
6894
6895 if (op == NEON_2RM_VRINTZ) {
6896 rmode = FPROUNDING_ZERO;
6897 } else {
6898 rmode = fp_decode_rm[((op & 0x6) >> 1) ^ 1];
6899 }
6900
6901 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6902 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6903 cpu_env);
6904 gen_helper_rints(cpu_F0s, cpu_F0s, fpstatus);
6905 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6906 cpu_env);
6907 tcg_temp_free_ptr(fpstatus);
6908 tcg_temp_free_i32(tcg_rmode);
6909 break;
6910 }
2ce70625
WN
6911 case NEON_2RM_VRINTX:
6912 {
6913 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6914 gen_helper_rints_exact(cpu_F0s, cpu_F0s, fpstatus);
6915 tcg_temp_free_ptr(fpstatus);
6916 break;
6917 }
901ad525
WN
6918 case NEON_2RM_VCVTAU:
6919 case NEON_2RM_VCVTAS:
6920 case NEON_2RM_VCVTNU:
6921 case NEON_2RM_VCVTNS:
6922 case NEON_2RM_VCVTPU:
6923 case NEON_2RM_VCVTPS:
6924 case NEON_2RM_VCVTMU:
6925 case NEON_2RM_VCVTMS:
6926 {
6927 bool is_signed = !extract32(insn, 7, 1);
6928 TCGv_ptr fpst = get_fpstatus_ptr(1);
6929 TCGv_i32 tcg_rmode, tcg_shift;
6930 int rmode = fp_decode_rm[extract32(insn, 8, 2)];
6931
6932 tcg_shift = tcg_const_i32(0);
6933 tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rmode));
6934 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6935 cpu_env);
6936
6937 if (is_signed) {
6938 gen_helper_vfp_tosls(cpu_F0s, cpu_F0s,
6939 tcg_shift, fpst);
6940 } else {
6941 gen_helper_vfp_touls(cpu_F0s, cpu_F0s,
6942 tcg_shift, fpst);
6943 }
6944
6945 gen_helper_set_neon_rmode(tcg_rmode, tcg_rmode,
6946 cpu_env);
6947 tcg_temp_free_i32(tcg_rmode);
6948 tcg_temp_free_i32(tcg_shift);
6949 tcg_temp_free_ptr(fpst);
6950 break;
6951 }
600b828c 6952 case NEON_2RM_VRECPE:
b6d4443a
AB
6953 {
6954 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6955 gen_helper_recpe_u32(tmp, tmp, fpstatus);
6956 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6957 break;
b6d4443a 6958 }
600b828c 6959 case NEON_2RM_VRSQRTE:
c2fb418e
AB
6960 {
6961 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6962 gen_helper_rsqrte_u32(tmp, tmp, fpstatus);
6963 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6964 break;
c2fb418e 6965 }
600b828c 6966 case NEON_2RM_VRECPE_F:
b6d4443a
AB
6967 {
6968 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6969 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, fpstatus);
6970 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6971 break;
b6d4443a 6972 }
600b828c 6973 case NEON_2RM_VRSQRTE_F:
c2fb418e
AB
6974 {
6975 TCGv_ptr fpstatus = get_fpstatus_ptr(1);
6976 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, fpstatus);
6977 tcg_temp_free_ptr(fpstatus);
9ee6e8bb 6978 break;
c2fb418e 6979 }
600b828c 6980 case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
5500b06c 6981 gen_vfp_sito(0, 1);
9ee6e8bb 6982 break;
600b828c 6983 case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
5500b06c 6984 gen_vfp_uito(0, 1);
9ee6e8bb 6985 break;
600b828c 6986 case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
5500b06c 6987 gen_vfp_tosiz(0, 1);
9ee6e8bb 6988 break;
600b828c 6989 case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
5500b06c 6990 gen_vfp_touiz(0, 1);
9ee6e8bb
PB
6991 break;
6992 default:
600b828c
PM
6993 /* Reserved op values were caught by the
6994 * neon_2rm_sizes[] check earlier.
6995 */
6996 abort();
9ee6e8bb 6997 }
600b828c 6998 if (neon_2rm_is_float_op(op)) {
4373f3ce
PB
6999 tcg_gen_st_f32(cpu_F0s, cpu_env,
7000 neon_reg_offset(rd, pass));
9ee6e8bb 7001 } else {
dd8fbd78 7002 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
7003 }
7004 }
7005 break;
7006 }
7007 } else if ((insn & (1 << 10)) == 0) {
7008 /* VTBL, VTBX. */
56907d77
PM
7009 int n = ((insn >> 8) & 3) + 1;
7010 if ((rn + n) > 32) {
7011 /* This is UNPREDICTABLE; we choose to UNDEF to avoid the
7012 * helper function running off the end of the register file.
7013 */
7014 return 1;
7015 }
7016 n <<= 3;
9ee6e8bb 7017 if (insn & (1 << 6)) {
8f8e3aa4 7018 tmp = neon_load_reg(rd, 0);
9ee6e8bb 7019 } else {
7d1b0095 7020 tmp = tcg_temp_new_i32();
8f8e3aa4 7021 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7022 }
8f8e3aa4 7023 tmp2 = neon_load_reg(rm, 0);
b75263d6
JR
7024 tmp4 = tcg_const_i32(rn);
7025 tmp5 = tcg_const_i32(n);
9ef39277 7026 gen_helper_neon_tbl(tmp2, cpu_env, tmp2, tmp, tmp4, tmp5);
7d1b0095 7027 tcg_temp_free_i32(tmp);
9ee6e8bb 7028 if (insn & (1 << 6)) {
8f8e3aa4 7029 tmp = neon_load_reg(rd, 1);
9ee6e8bb 7030 } else {
7d1b0095 7031 tmp = tcg_temp_new_i32();
8f8e3aa4 7032 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 7033 }
8f8e3aa4 7034 tmp3 = neon_load_reg(rm, 1);
9ef39277 7035 gen_helper_neon_tbl(tmp3, cpu_env, tmp3, tmp, tmp4, tmp5);
25aeb69b
JR
7036 tcg_temp_free_i32(tmp5);
7037 tcg_temp_free_i32(tmp4);
8f8e3aa4 7038 neon_store_reg(rd, 0, tmp2);
3018f259 7039 neon_store_reg(rd, 1, tmp3);
7d1b0095 7040 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7041 } else if ((insn & 0x380) == 0) {
7042 /* VDUP */
133da6aa
JR
7043 if ((insn & (7 << 16)) == 0 || (q && (rd & 1))) {
7044 return 1;
7045 }
9ee6e8bb 7046 if (insn & (1 << 19)) {
dd8fbd78 7047 tmp = neon_load_reg(rm, 1);
9ee6e8bb 7048 } else {
dd8fbd78 7049 tmp = neon_load_reg(rm, 0);
9ee6e8bb
PB
7050 }
7051 if (insn & (1 << 16)) {
dd8fbd78 7052 gen_neon_dup_u8(tmp, ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
7053 } else if (insn & (1 << 17)) {
7054 if ((insn >> 18) & 1)
dd8fbd78 7055 gen_neon_dup_high16(tmp);
9ee6e8bb 7056 else
dd8fbd78 7057 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
7058 }
7059 for (pass = 0; pass < (q ? 4 : 2); pass++) {
7d1b0095 7060 tmp2 = tcg_temp_new_i32();
dd8fbd78
FN
7061 tcg_gen_mov_i32(tmp2, tmp);
7062 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 7063 }
7d1b0095 7064 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
7065 } else {
7066 return 1;
7067 }
7068 }
7069 }
7070 return 0;
7071}
7072
7dcc1f89 7073static int disas_coproc_insn(DisasContext *s, uint32_t insn)
9ee6e8bb 7074{
4b6a83fb
PM
7075 int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
7076 const ARMCPRegInfo *ri;
9ee6e8bb
PB
7077
7078 cpnum = (insn >> 8) & 0xf;
c0f4af17
PM
7079
7080 /* First check for coprocessor space used for XScale/iwMMXt insns */
d614a513 7081 if (arm_dc_feature(s, ARM_FEATURE_XSCALE) && (cpnum < 2)) {
c0f4af17
PM
7082 if (extract32(s->c15_cpar, cpnum, 1) == 0) {
7083 return 1;
7084 }
d614a513 7085 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
7dcc1f89 7086 return disas_iwmmxt_insn(s, insn);
d614a513 7087 } else if (arm_dc_feature(s, ARM_FEATURE_XSCALE)) {
7dcc1f89 7088 return disas_dsp_insn(s, insn);
c0f4af17
PM
7089 }
7090 return 1;
4b6a83fb
PM
7091 }
7092
7093 /* Otherwise treat as a generic register access */
7094 is64 = (insn & (1 << 25)) == 0;
7095 if (!is64 && ((insn & (1 << 4)) == 0)) {
7096 /* cdp */
7097 return 1;
7098 }
7099
7100 crm = insn & 0xf;
7101 if (is64) {
7102 crn = 0;
7103 opc1 = (insn >> 4) & 0xf;
7104 opc2 = 0;
7105 rt2 = (insn >> 16) & 0xf;
7106 } else {
7107 crn = (insn >> 16) & 0xf;
7108 opc1 = (insn >> 21) & 7;
7109 opc2 = (insn >> 5) & 7;
7110 rt2 = 0;
7111 }
7112 isread = (insn >> 20) & 1;
7113 rt = (insn >> 12) & 0xf;
7114
60322b39 7115 ri = get_arm_cp_reginfo(s->cp_regs,
51a79b03 7116 ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
4b6a83fb
PM
7117 if (ri) {
7118 /* Check access permissions */
dcbff19b 7119 if (!cp_access_ok(s->current_el, ri, isread)) {
4b6a83fb
PM
7120 return 1;
7121 }
7122
c0f4af17 7123 if (ri->accessfn ||
d614a513 7124 (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) {
f59df3f2
PM
7125 /* Emit code to perform further access permissions checks at
7126 * runtime; this may result in an exception.
c0f4af17
PM
7127 * Note that on XScale all cp0..c13 registers do an access check
7128 * call in order to handle c15_cpar.
f59df3f2
PM
7129 */
7130 TCGv_ptr tmpptr;
8bcbf37c
PM
7131 TCGv_i32 tcg_syn;
7132 uint32_t syndrome;
7133
7134 /* Note that since we are an implementation which takes an
7135 * exception on a trapped conditional instruction only if the
7136 * instruction passes its condition code check, we can take
7137 * advantage of the clause in the ARM ARM that allows us to set
7138 * the COND field in the instruction to 0xE in all cases.
7139 * We could fish the actual condition out of the insn (ARM)
7140 * or the condexec bits (Thumb) but it isn't necessary.
7141 */
7142 switch (cpnum) {
7143 case 14:
7144 if (is64) {
7145 syndrome = syn_cp14_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7146 isread, s->thumb);
7147 } else {
7148 syndrome = syn_cp14_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7149 rt, isread, s->thumb);
7150 }
7151 break;
7152 case 15:
7153 if (is64) {
7154 syndrome = syn_cp15_rrt_trap(1, 0xe, opc1, crm, rt, rt2,
7155 isread, s->thumb);
7156 } else {
7157 syndrome = syn_cp15_rt_trap(1, 0xe, opc1, opc2, crn, crm,
7158 rt, isread, s->thumb);
7159 }
7160 break;
7161 default:
7162 /* ARMv8 defines that only coprocessors 14 and 15 exist,
7163 * so this can only happen if this is an ARMv7 or earlier CPU,
7164 * in which case the syndrome information won't actually be
7165 * guest visible.
7166 */
d614a513 7167 assert(!arm_dc_feature(s, ARM_FEATURE_V8));
8bcbf37c
PM
7168 syndrome = syn_uncategorized();
7169 break;
7170 }
7171
f59df3f2
PM
7172 gen_set_pc_im(s, s->pc);
7173 tmpptr = tcg_const_ptr(ri);
8bcbf37c
PM
7174 tcg_syn = tcg_const_i32(syndrome);
7175 gen_helper_access_check_cp_reg(cpu_env, tmpptr, tcg_syn);
f59df3f2 7176 tcg_temp_free_ptr(tmpptr);
8bcbf37c 7177 tcg_temp_free_i32(tcg_syn);
f59df3f2
PM
7178 }
7179
4b6a83fb
PM
7180 /* Handle special cases first */
7181 switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
7182 case ARM_CP_NOP:
7183 return 0;
7184 case ARM_CP_WFI:
7185 if (isread) {
7186 return 1;
7187 }
eaed129d 7188 gen_set_pc_im(s, s->pc);
4b6a83fb 7189 s->is_jmp = DISAS_WFI;
2bee5105 7190 return 0;
4b6a83fb
PM
7191 default:
7192 break;
7193 }
7194
bd79255d 7195 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7196 gen_io_start();
7197 }
7198
4b6a83fb
PM
7199 if (isread) {
7200 /* Read */
7201 if (is64) {
7202 TCGv_i64 tmp64;
7203 TCGv_i32 tmp;
7204 if (ri->type & ARM_CP_CONST) {
7205 tmp64 = tcg_const_i64(ri->resetvalue);
7206 } else if (ri->readfn) {
7207 TCGv_ptr tmpptr;
4b6a83fb
PM
7208 tmp64 = tcg_temp_new_i64();
7209 tmpptr = tcg_const_ptr(ri);
7210 gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
7211 tcg_temp_free_ptr(tmpptr);
7212 } else {
7213 tmp64 = tcg_temp_new_i64();
7214 tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
7215 }
7216 tmp = tcg_temp_new_i32();
7217 tcg_gen_trunc_i64_i32(tmp, tmp64);
7218 store_reg(s, rt, tmp);
7219 tcg_gen_shri_i64(tmp64, tmp64, 32);
ed336850 7220 tmp = tcg_temp_new_i32();
4b6a83fb 7221 tcg_gen_trunc_i64_i32(tmp, tmp64);
ed336850 7222 tcg_temp_free_i64(tmp64);
4b6a83fb
PM
7223 store_reg(s, rt2, tmp);
7224 } else {
39d5492a 7225 TCGv_i32 tmp;
4b6a83fb
PM
7226 if (ri->type & ARM_CP_CONST) {
7227 tmp = tcg_const_i32(ri->resetvalue);
7228 } else if (ri->readfn) {
7229 TCGv_ptr tmpptr;
4b6a83fb
PM
7230 tmp = tcg_temp_new_i32();
7231 tmpptr = tcg_const_ptr(ri);
7232 gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
7233 tcg_temp_free_ptr(tmpptr);
7234 } else {
7235 tmp = load_cpu_offset(ri->fieldoffset);
7236 }
7237 if (rt == 15) {
7238 /* Destination register of r15 for 32 bit loads sets
7239 * the condition codes from the high 4 bits of the value
7240 */
7241 gen_set_nzcv(tmp);
7242 tcg_temp_free_i32(tmp);
7243 } else {
7244 store_reg(s, rt, tmp);
7245 }
7246 }
7247 } else {
7248 /* Write */
7249 if (ri->type & ARM_CP_CONST) {
7250 /* If not forbidden by access permissions, treat as WI */
7251 return 0;
7252 }
7253
7254 if (is64) {
39d5492a 7255 TCGv_i32 tmplo, tmphi;
4b6a83fb
PM
7256 TCGv_i64 tmp64 = tcg_temp_new_i64();
7257 tmplo = load_reg(s, rt);
7258 tmphi = load_reg(s, rt2);
7259 tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
7260 tcg_temp_free_i32(tmplo);
7261 tcg_temp_free_i32(tmphi);
7262 if (ri->writefn) {
7263 TCGv_ptr tmpptr = tcg_const_ptr(ri);
4b6a83fb
PM
7264 gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
7265 tcg_temp_free_ptr(tmpptr);
7266 } else {
7267 tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
7268 }
7269 tcg_temp_free_i64(tmp64);
7270 } else {
7271 if (ri->writefn) {
39d5492a 7272 TCGv_i32 tmp;
4b6a83fb 7273 TCGv_ptr tmpptr;
4b6a83fb
PM
7274 tmp = load_reg(s, rt);
7275 tmpptr = tcg_const_ptr(ri);
7276 gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
7277 tcg_temp_free_ptr(tmpptr);
7278 tcg_temp_free_i32(tmp);
7279 } else {
39d5492a 7280 TCGv_i32 tmp = load_reg(s, rt);
4b6a83fb
PM
7281 store_cpu_offset(tmp, ri->fieldoffset);
7282 }
7283 }
2452731c
PM
7284 }
7285
bd79255d 7286 if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
2452731c
PM
7287 /* I/O operations must end the TB here (whether read or write) */
7288 gen_io_end();
7289 gen_lookup_tb(s);
7290 } else if (!isread && !(ri->type & ARM_CP_SUPPRESS_TB_END)) {
4b6a83fb
PM
7291 /* We default to ending the TB on a coprocessor register write,
7292 * but allow this to be suppressed by the register definition
7293 * (usually only necessary to work around guest bugs).
7294 */
2452731c 7295 gen_lookup_tb(s);
4b6a83fb 7296 }
2452731c 7297
4b6a83fb
PM
7298 return 0;
7299 }
7300
626187d8
PM
7301 /* Unknown register; this might be a guest error or a QEMU
7302 * unimplemented feature.
7303 */
7304 if (is64) {
7305 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7306 "64 bit system register cp:%d opc1: %d crm:%d "
7307 "(%s)\n",
7308 isread ? "read" : "write", cpnum, opc1, crm,
7309 s->ns ? "non-secure" : "secure");
626187d8
PM
7310 } else {
7311 qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
51a79b03
PM
7312 "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
7313 "(%s)\n",
7314 isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
7315 s->ns ? "non-secure" : "secure");
626187d8
PM
7316 }
7317
4a9a539f 7318 return 1;
9ee6e8bb
PB
7319}
7320
5e3f878a
PB
7321
7322/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 7323static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a 7324{
39d5492a 7325 TCGv_i32 tmp;
7d1b0095 7326 tmp = tcg_temp_new_i32();
5e3f878a
PB
7327 tcg_gen_trunc_i64_i32(tmp, val);
7328 store_reg(s, rlow, tmp);
7d1b0095 7329 tmp = tcg_temp_new_i32();
5e3f878a
PB
7330 tcg_gen_shri_i64(val, val, 32);
7331 tcg_gen_trunc_i64_i32(tmp, val);
7332 store_reg(s, rhigh, tmp);
7333}
7334
7335/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 7336static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 7337{
a7812ae4 7338 TCGv_i64 tmp;
39d5492a 7339 TCGv_i32 tmp2;
5e3f878a 7340
36aa55dc 7341 /* Load value and extend to 64 bits. */
a7812ae4 7342 tmp = tcg_temp_new_i64();
5e3f878a
PB
7343 tmp2 = load_reg(s, rlow);
7344 tcg_gen_extu_i32_i64(tmp, tmp2);
7d1b0095 7345 tcg_temp_free_i32(tmp2);
5e3f878a 7346 tcg_gen_add_i64(val, val, tmp);
b75263d6 7347 tcg_temp_free_i64(tmp);
5e3f878a
PB
7348}
7349
7350/* load and add a 64-bit value from a register pair. */
a7812ae4 7351static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 7352{
a7812ae4 7353 TCGv_i64 tmp;
39d5492a
PM
7354 TCGv_i32 tmpl;
7355 TCGv_i32 tmph;
5e3f878a
PB
7356
7357 /* Load 64-bit value rd:rn. */
36aa55dc
PB
7358 tmpl = load_reg(s, rlow);
7359 tmph = load_reg(s, rhigh);
a7812ae4 7360 tmp = tcg_temp_new_i64();
36aa55dc 7361 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
7d1b0095
PM
7362 tcg_temp_free_i32(tmpl);
7363 tcg_temp_free_i32(tmph);
5e3f878a 7364 tcg_gen_add_i64(val, val, tmp);
b75263d6 7365 tcg_temp_free_i64(tmp);
5e3f878a
PB
7366}
7367
c9f10124 7368/* Set N and Z flags from hi|lo. */
39d5492a 7369static void gen_logicq_cc(TCGv_i32 lo, TCGv_i32 hi)
5e3f878a 7370{
c9f10124
RH
7371 tcg_gen_mov_i32(cpu_NF, hi);
7372 tcg_gen_or_i32(cpu_ZF, lo, hi);
5e3f878a
PB
7373}
7374
426f5abc
PB
7375/* Load/Store exclusive instructions are implemented by remembering
7376 the value/address loaded, and seeing if these are the same
b90372ad 7377 when the store is performed. This should be sufficient to implement
426f5abc
PB
7378 the architecturally mandated semantics, and avoids having to monitor
7379 regular stores.
7380
7381 In system emulation mode only one CPU will be running at once, so
7382 this sequence is effectively atomic. In user emulation mode we
7383 throw an exception and handle the atomic operation elsewhere. */
7384static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
39d5492a 7385 TCGv_i32 addr, int size)
426f5abc 7386{
94ee24e7 7387 TCGv_i32 tmp = tcg_temp_new_i32();
426f5abc 7388
50225ad0
PM
7389 s->is_ldex = true;
7390
426f5abc
PB
7391 switch (size) {
7392 case 0:
6ce2faf4 7393 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7394 break;
7395 case 1:
6ce2faf4 7396 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7397 break;
7398 case 2:
7399 case 3:
6ce2faf4 7400 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7401 break;
7402 default:
7403 abort();
7404 }
03d05e2d 7405
426f5abc 7406 if (size == 3) {
39d5492a 7407 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d
PM
7408 TCGv_i32 tmp3 = tcg_temp_new_i32();
7409
2c9adbda 7410 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7411 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7412 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7413 tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
7414 store_reg(s, rt2, tmp3);
7415 } else {
7416 tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
426f5abc 7417 }
03d05e2d
PM
7418
7419 store_reg(s, rt, tmp);
7420 tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
426f5abc
PB
7421}
7422
7423static void gen_clrex(DisasContext *s)
7424{
03d05e2d 7425 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7426}
7427
7428#ifdef CONFIG_USER_ONLY
7429static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7430 TCGv_i32 addr, int size)
426f5abc 7431{
03d05e2d 7432 tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
426f5abc
PB
7433 tcg_gen_movi_i32(cpu_exclusive_info,
7434 size | (rd << 4) | (rt << 8) | (rt2 << 12));
d4a2dc67 7435 gen_exception_internal_insn(s, 4, EXCP_STREX);
426f5abc
PB
7436}
7437#else
7438static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
39d5492a 7439 TCGv_i32 addr, int size)
426f5abc 7440{
39d5492a 7441 TCGv_i32 tmp;
03d05e2d 7442 TCGv_i64 val64, extaddr;
42a268c2
RH
7443 TCGLabel *done_label;
7444 TCGLabel *fail_label;
426f5abc
PB
7445
7446 /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
7447 [addr] = {Rt};
7448 {Rd} = 0;
7449 } else {
7450 {Rd} = 1;
7451 } */
7452 fail_label = gen_new_label();
7453 done_label = gen_new_label();
03d05e2d
PM
7454 extaddr = tcg_temp_new_i64();
7455 tcg_gen_extu_i32_i64(extaddr, addr);
7456 tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
7457 tcg_temp_free_i64(extaddr);
7458
94ee24e7 7459 tmp = tcg_temp_new_i32();
426f5abc
PB
7460 switch (size) {
7461 case 0:
6ce2faf4 7462 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
426f5abc
PB
7463 break;
7464 case 1:
6ce2faf4 7465 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
426f5abc
PB
7466 break;
7467 case 2:
7468 case 3:
6ce2faf4 7469 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
426f5abc
PB
7470 break;
7471 default:
7472 abort();
7473 }
03d05e2d
PM
7474
7475 val64 = tcg_temp_new_i64();
426f5abc 7476 if (size == 3) {
39d5492a 7477 TCGv_i32 tmp2 = tcg_temp_new_i32();
03d05e2d 7478 TCGv_i32 tmp3 = tcg_temp_new_i32();
426f5abc 7479 tcg_gen_addi_i32(tmp2, addr, 4);
6ce2faf4 7480 gen_aa32_ld32u(tmp3, tmp2, get_mem_index(s));
7d1b0095 7481 tcg_temp_free_i32(tmp2);
03d05e2d
PM
7482 tcg_gen_concat_i32_i64(val64, tmp, tmp3);
7483 tcg_temp_free_i32(tmp3);
7484 } else {
7485 tcg_gen_extu_i32_i64(val64, tmp);
426f5abc 7486 }
03d05e2d
PM
7487 tcg_temp_free_i32(tmp);
7488
7489 tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
7490 tcg_temp_free_i64(val64);
7491
426f5abc
PB
7492 tmp = load_reg(s, rt);
7493 switch (size) {
7494 case 0:
6ce2faf4 7495 gen_aa32_st8(tmp, addr, get_mem_index(s));
426f5abc
PB
7496 break;
7497 case 1:
6ce2faf4 7498 gen_aa32_st16(tmp, addr, get_mem_index(s));
426f5abc
PB
7499 break;
7500 case 2:
7501 case 3:
6ce2faf4 7502 gen_aa32_st32(tmp, addr, get_mem_index(s));
426f5abc
PB
7503 break;
7504 default:
7505 abort();
7506 }
94ee24e7 7507 tcg_temp_free_i32(tmp);
426f5abc
PB
7508 if (size == 3) {
7509 tcg_gen_addi_i32(addr, addr, 4);
7510 tmp = load_reg(s, rt2);
6ce2faf4 7511 gen_aa32_st32(tmp, addr, get_mem_index(s));
94ee24e7 7512 tcg_temp_free_i32(tmp);
426f5abc
PB
7513 }
7514 tcg_gen_movi_i32(cpu_R[rd], 0);
7515 tcg_gen_br(done_label);
7516 gen_set_label(fail_label);
7517 tcg_gen_movi_i32(cpu_R[rd], 1);
7518 gen_set_label(done_label);
03d05e2d 7519 tcg_gen_movi_i64(cpu_exclusive_addr, -1);
426f5abc
PB
7520}
7521#endif
7522
81465888
PM
7523/* gen_srs:
7524 * @env: CPUARMState
7525 * @s: DisasContext
7526 * @mode: mode field from insn (which stack to store to)
7527 * @amode: addressing mode (DA/IA/DB/IB), encoded as per P,U bits in ARM insn
7528 * @writeback: true if writeback bit set
7529 *
7530 * Generate code for the SRS (Store Return State) insn.
7531 */
7532static void gen_srs(DisasContext *s,
7533 uint32_t mode, uint32_t amode, bool writeback)
7534{
7535 int32_t offset;
7536 TCGv_i32 addr = tcg_temp_new_i32();
7537 TCGv_i32 tmp = tcg_const_i32(mode);
7538 gen_helper_get_r13_banked(addr, cpu_env, tmp);
7539 tcg_temp_free_i32(tmp);
7540 switch (amode) {
7541 case 0: /* DA */
7542 offset = -4;
7543 break;
7544 case 1: /* IA */
7545 offset = 0;
7546 break;
7547 case 2: /* DB */
7548 offset = -8;
7549 break;
7550 case 3: /* IB */
7551 offset = 4;
7552 break;
7553 default:
7554 abort();
7555 }
7556 tcg_gen_addi_i32(addr, addr, offset);
7557 tmp = load_reg(s, 14);
c1197795 7558 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7559 tcg_temp_free_i32(tmp);
81465888
PM
7560 tmp = load_cpu_field(spsr);
7561 tcg_gen_addi_i32(addr, addr, 4);
c1197795 7562 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 7563 tcg_temp_free_i32(tmp);
81465888
PM
7564 if (writeback) {
7565 switch (amode) {
7566 case 0:
7567 offset = -8;
7568 break;
7569 case 1:
7570 offset = 4;
7571 break;
7572 case 2:
7573 offset = -4;
7574 break;
7575 case 3:
7576 offset = 0;
7577 break;
7578 default:
7579 abort();
7580 }
7581 tcg_gen_addi_i32(addr, addr, offset);
7582 tmp = tcg_const_i32(mode);
7583 gen_helper_set_r13_banked(cpu_env, tmp, addr);
7584 tcg_temp_free_i32(tmp);
7585 }
7586 tcg_temp_free_i32(addr);
7587}
7588
f4df2210 7589static void disas_arm_insn(DisasContext *s, unsigned int insn)
9ee6e8bb 7590{
f4df2210 7591 unsigned int cond, val, op1, i, shift, rm, rs, rn, rd, sh;
39d5492a
PM
7592 TCGv_i32 tmp;
7593 TCGv_i32 tmp2;
7594 TCGv_i32 tmp3;
7595 TCGv_i32 addr;
a7812ae4 7596 TCGv_i64 tmp64;
9ee6e8bb 7597
9ee6e8bb 7598 /* M variants do not implement ARM mode. */
b53d8923 7599 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 7600 goto illegal_op;
b53d8923 7601 }
9ee6e8bb
PB
7602 cond = insn >> 28;
7603 if (cond == 0xf){
be5e7a76
DES
7604 /* In ARMv3 and v4 the NV condition is UNPREDICTABLE; we
7605 * choose to UNDEF. In ARMv5 and above the space is used
7606 * for miscellaneous unconditional instructions.
7607 */
7608 ARCH(5);
7609
9ee6e8bb
PB
7610 /* Unconditional instructions. */
7611 if (((insn >> 25) & 7) == 1) {
7612 /* NEON Data processing. */
d614a513 7613 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7614 goto illegal_op;
d614a513 7615 }
9ee6e8bb 7616
7dcc1f89 7617 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 7618 goto illegal_op;
7dcc1f89 7619 }
9ee6e8bb
PB
7620 return;
7621 }
7622 if ((insn & 0x0f100000) == 0x04000000) {
7623 /* NEON load/store. */
d614a513 7624 if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
9ee6e8bb 7625 goto illegal_op;
d614a513 7626 }
9ee6e8bb 7627
7dcc1f89 7628 if (disas_neon_ls_insn(s, insn)) {
9ee6e8bb 7629 goto illegal_op;
7dcc1f89 7630 }
9ee6e8bb
PB
7631 return;
7632 }
6a57f3eb
WN
7633 if ((insn & 0x0f000e10) == 0x0e000a00) {
7634 /* VFP. */
7dcc1f89 7635 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
7636 goto illegal_op;
7637 }
7638 return;
7639 }
3d185e5d
PM
7640 if (((insn & 0x0f30f000) == 0x0510f000) ||
7641 ((insn & 0x0f30f010) == 0x0710f000)) {
7642 if ((insn & (1 << 22)) == 0) {
7643 /* PLDW; v7MP */
d614a513 7644 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7645 goto illegal_op;
7646 }
7647 }
7648 /* Otherwise PLD; v5TE+ */
be5e7a76 7649 ARCH(5TE);
3d185e5d
PM
7650 return;
7651 }
7652 if (((insn & 0x0f70f000) == 0x0450f000) ||
7653 ((insn & 0x0f70f010) == 0x0650f000)) {
7654 ARCH(7);
7655 return; /* PLI; V7 */
7656 }
7657 if (((insn & 0x0f700000) == 0x04100000) ||
7658 ((insn & 0x0f700010) == 0x06100000)) {
d614a513 7659 if (!arm_dc_feature(s, ARM_FEATURE_V7MP)) {
3d185e5d
PM
7660 goto illegal_op;
7661 }
7662 return; /* v7MP: Unallocated memory hint: must NOP */
7663 }
7664
7665 if ((insn & 0x0ffffdff) == 0x01010000) {
9ee6e8bb
PB
7666 ARCH(6);
7667 /* setend */
10962fd5
PM
7668 if (((insn >> 9) & 1) != s->bswap_code) {
7669 /* Dynamic endianness switching not implemented. */
e0c270d9 7670 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
9ee6e8bb
PB
7671 goto illegal_op;
7672 }
7673 return;
7674 } else if ((insn & 0x0fffff00) == 0x057ff000) {
7675 switch ((insn >> 4) & 0xf) {
7676 case 1: /* clrex */
7677 ARCH(6K);
426f5abc 7678 gen_clrex(s);
9ee6e8bb
PB
7679 return;
7680 case 4: /* dsb */
7681 case 5: /* dmb */
7682 case 6: /* isb */
7683 ARCH(7);
7684 /* We don't emulate caches so these are a no-op. */
7685 return;
7686 default:
7687 goto illegal_op;
7688 }
7689 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
7690 /* srs */
81465888 7691 if (IS_USER(s)) {
9ee6e8bb 7692 goto illegal_op;
9ee6e8bb 7693 }
81465888
PM
7694 ARCH(6);
7695 gen_srs(s, (insn & 0x1f), (insn >> 23) & 3, insn & (1 << 21));
3b328448 7696 return;
ea825eee 7697 } else if ((insn & 0x0e50ffe0) == 0x08100a00) {
9ee6e8bb 7698 /* rfe */
c67b6b71 7699 int32_t offset;
9ee6e8bb
PB
7700 if (IS_USER(s))
7701 goto illegal_op;
7702 ARCH(6);
7703 rn = (insn >> 16) & 0xf;
b0109805 7704 addr = load_reg(s, rn);
9ee6e8bb
PB
7705 i = (insn >> 23) & 3;
7706 switch (i) {
b0109805 7707 case 0: offset = -4; break; /* DA */
c67b6b71
FN
7708 case 1: offset = 0; break; /* IA */
7709 case 2: offset = -8; break; /* DB */
b0109805 7710 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
7711 default: abort();
7712 }
7713 if (offset)
b0109805
PB
7714 tcg_gen_addi_i32(addr, addr, offset);
7715 /* Load PC into tmp and CPSR into tmp2. */
5a839c0d 7716 tmp = tcg_temp_new_i32();
6ce2faf4 7717 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 7718 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 7719 tmp2 = tcg_temp_new_i32();
6ce2faf4 7720 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
7721 if (insn & (1 << 21)) {
7722 /* Base writeback. */
7723 switch (i) {
b0109805 7724 case 0: offset = -8; break;
c67b6b71
FN
7725 case 1: offset = 4; break;
7726 case 2: offset = -4; break;
b0109805 7727 case 3: offset = 0; break;
9ee6e8bb
PB
7728 default: abort();
7729 }
7730 if (offset)
b0109805
PB
7731 tcg_gen_addi_i32(addr, addr, offset);
7732 store_reg(s, rn, addr);
7733 } else {
7d1b0095 7734 tcg_temp_free_i32(addr);
9ee6e8bb 7735 }
b0109805 7736 gen_rfe(s, tmp, tmp2);
c67b6b71 7737 return;
9ee6e8bb
PB
7738 } else if ((insn & 0x0e000000) == 0x0a000000) {
7739 /* branch link and change to thumb (blx <offset>) */
7740 int32_t offset;
7741
7742 val = (uint32_t)s->pc;
7d1b0095 7743 tmp = tcg_temp_new_i32();
d9ba4830
PB
7744 tcg_gen_movi_i32(tmp, val);
7745 store_reg(s, 14, tmp);
9ee6e8bb
PB
7746 /* Sign-extend the 24-bit offset */
7747 offset = (((int32_t)insn) << 8) >> 8;
7748 /* offset * 4 + bit24 * 2 + (thumb bit) */
7749 val += (offset << 2) | ((insn >> 23) & 2) | 1;
7750 /* pipeline offset */
7751 val += 4;
be5e7a76 7752 /* protected by ARCH(5); above, near the start of uncond block */
d9ba4830 7753 gen_bx_im(s, val);
9ee6e8bb
PB
7754 return;
7755 } else if ((insn & 0x0e000f00) == 0x0c000100) {
d614a513 7756 if (arm_dc_feature(s, ARM_FEATURE_IWMMXT)) {
9ee6e8bb 7757 /* iWMMXt register transfer. */
c0f4af17 7758 if (extract32(s->c15_cpar, 1, 1)) {
7dcc1f89 7759 if (!disas_iwmmxt_insn(s, insn)) {
9ee6e8bb 7760 return;
c0f4af17
PM
7761 }
7762 }
9ee6e8bb
PB
7763 }
7764 } else if ((insn & 0x0fe00000) == 0x0c400000) {
7765 /* Coprocessor double register transfer. */
be5e7a76 7766 ARCH(5TE);
9ee6e8bb
PB
7767 } else if ((insn & 0x0f000010) == 0x0e000010) {
7768 /* Additional coprocessor register transfer. */
7997d92f 7769 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
7770 uint32_t mask;
7771 uint32_t val;
7772 /* cps (privileged) */
7773 if (IS_USER(s))
7774 return;
7775 mask = val = 0;
7776 if (insn & (1 << 19)) {
7777 if (insn & (1 << 8))
7778 mask |= CPSR_A;
7779 if (insn & (1 << 7))
7780 mask |= CPSR_I;
7781 if (insn & (1 << 6))
7782 mask |= CPSR_F;
7783 if (insn & (1 << 18))
7784 val |= mask;
7785 }
7997d92f 7786 if (insn & (1 << 17)) {
9ee6e8bb
PB
7787 mask |= CPSR_M;
7788 val |= (insn & 0x1f);
7789 }
7790 if (mask) {
2fbac54b 7791 gen_set_psr_im(s, mask, 0, val);
9ee6e8bb
PB
7792 }
7793 return;
7794 }
7795 goto illegal_op;
7796 }
7797 if (cond != 0xe) {
7798 /* if not always execute, we generate a conditional jump to
7799 next instruction */
7800 s->condlabel = gen_new_label();
39fb730a 7801 arm_gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7802 s->condjmp = 1;
7803 }
7804 if ((insn & 0x0f900000) == 0x03000000) {
7805 if ((insn & (1 << 21)) == 0) {
7806 ARCH(6T2);
7807 rd = (insn >> 12) & 0xf;
7808 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
7809 if ((insn & (1 << 22)) == 0) {
7810 /* MOVW */
7d1b0095 7811 tmp = tcg_temp_new_i32();
5e3f878a 7812 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
7813 } else {
7814 /* MOVT */
5e3f878a 7815 tmp = load_reg(s, rd);
86831435 7816 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7817 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 7818 }
5e3f878a 7819 store_reg(s, rd, tmp);
9ee6e8bb
PB
7820 } else {
7821 if (((insn >> 12) & 0xf) != 0xf)
7822 goto illegal_op;
7823 if (((insn >> 16) & 0xf) == 0) {
7824 gen_nop_hint(s, insn & 0xff);
7825 } else {
7826 /* CPSR = immediate */
7827 val = insn & 0xff;
7828 shift = ((insn >> 8) & 0xf) * 2;
7829 if (shift)
7830 val = (val >> shift) | (val << (32 - shift));
9ee6e8bb 7831 i = ((insn & (1 << 22)) != 0);
7dcc1f89
PM
7832 if (gen_set_psr_im(s, msr_mask(s, (insn >> 16) & 0xf, i),
7833 i, val)) {
9ee6e8bb 7834 goto illegal_op;
7dcc1f89 7835 }
9ee6e8bb
PB
7836 }
7837 }
7838 } else if ((insn & 0x0f900000) == 0x01000000
7839 && (insn & 0x00000090) != 0x00000090) {
7840 /* miscellaneous instructions */
7841 op1 = (insn >> 21) & 3;
7842 sh = (insn >> 4) & 0xf;
7843 rm = insn & 0xf;
7844 switch (sh) {
7845 case 0x0: /* move program status register */
7846 if (op1 & 1) {
7847 /* PSR = reg */
2fbac54b 7848 tmp = load_reg(s, rm);
9ee6e8bb 7849 i = ((op1 & 2) != 0);
7dcc1f89 7850 if (gen_set_psr(s, msr_mask(s, (insn >> 16) & 0xf, i), i, tmp))
9ee6e8bb
PB
7851 goto illegal_op;
7852 } else {
7853 /* reg = PSR */
7854 rd = (insn >> 12) & 0xf;
7855 if (op1 & 2) {
7856 if (IS_USER(s))
7857 goto illegal_op;
d9ba4830 7858 tmp = load_cpu_field(spsr);
9ee6e8bb 7859 } else {
7d1b0095 7860 tmp = tcg_temp_new_i32();
9ef39277 7861 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 7862 }
d9ba4830 7863 store_reg(s, rd, tmp);
9ee6e8bb
PB
7864 }
7865 break;
7866 case 0x1:
7867 if (op1 == 1) {
7868 /* branch/exchange thumb (bx). */
be5e7a76 7869 ARCH(4T);
d9ba4830
PB
7870 tmp = load_reg(s, rm);
7871 gen_bx(s, tmp);
9ee6e8bb
PB
7872 } else if (op1 == 3) {
7873 /* clz */
be5e7a76 7874 ARCH(5);
9ee6e8bb 7875 rd = (insn >> 12) & 0xf;
1497c961
PB
7876 tmp = load_reg(s, rm);
7877 gen_helper_clz(tmp, tmp);
7878 store_reg(s, rd, tmp);
9ee6e8bb
PB
7879 } else {
7880 goto illegal_op;
7881 }
7882 break;
7883 case 0x2:
7884 if (op1 == 1) {
7885 ARCH(5J); /* bxj */
7886 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7887 tmp = load_reg(s, rm);
7888 gen_bx(s, tmp);
9ee6e8bb
PB
7889 } else {
7890 goto illegal_op;
7891 }
7892 break;
7893 case 0x3:
7894 if (op1 != 1)
7895 goto illegal_op;
7896
be5e7a76 7897 ARCH(5);
9ee6e8bb 7898 /* branch link/exchange thumb (blx) */
d9ba4830 7899 tmp = load_reg(s, rm);
7d1b0095 7900 tmp2 = tcg_temp_new_i32();
d9ba4830
PB
7901 tcg_gen_movi_i32(tmp2, s->pc);
7902 store_reg(s, 14, tmp2);
7903 gen_bx(s, tmp);
9ee6e8bb 7904 break;
eb0ecd5a
WN
7905 case 0x4:
7906 {
7907 /* crc32/crc32c */
7908 uint32_t c = extract32(insn, 8, 4);
7909
7910 /* Check this CPU supports ARMv8 CRC instructions.
7911 * op1 == 3 is UNPREDICTABLE but handle as UNDEFINED.
7912 * Bits 8, 10 and 11 should be zero.
7913 */
d614a513 7914 if (!arm_dc_feature(s, ARM_FEATURE_CRC) || op1 == 0x3 ||
eb0ecd5a
WN
7915 (c & 0xd) != 0) {
7916 goto illegal_op;
7917 }
7918
7919 rn = extract32(insn, 16, 4);
7920 rd = extract32(insn, 12, 4);
7921
7922 tmp = load_reg(s, rn);
7923 tmp2 = load_reg(s, rm);
aa633469
PM
7924 if (op1 == 0) {
7925 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
7926 } else if (op1 == 1) {
7927 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
7928 }
eb0ecd5a
WN
7929 tmp3 = tcg_const_i32(1 << op1);
7930 if (c & 0x2) {
7931 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
7932 } else {
7933 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
7934 }
7935 tcg_temp_free_i32(tmp2);
7936 tcg_temp_free_i32(tmp3);
7937 store_reg(s, rd, tmp);
7938 break;
7939 }
9ee6e8bb 7940 case 0x5: /* saturating add/subtract */
be5e7a76 7941 ARCH(5TE);
9ee6e8bb
PB
7942 rd = (insn >> 12) & 0xf;
7943 rn = (insn >> 16) & 0xf;
b40d0353 7944 tmp = load_reg(s, rm);
5e3f878a 7945 tmp2 = load_reg(s, rn);
9ee6e8bb 7946 if (op1 & 2)
9ef39277 7947 gen_helper_double_saturate(tmp2, cpu_env, tmp2);
9ee6e8bb 7948 if (op1 & 1)
9ef39277 7949 gen_helper_sub_saturate(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 7950 else
9ef39277 7951 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 7952 tcg_temp_free_i32(tmp2);
5e3f878a 7953 store_reg(s, rd, tmp);
9ee6e8bb 7954 break;
49e14940 7955 case 7:
d4a2dc67
PM
7956 {
7957 int imm16 = extract32(insn, 0, 4) | (extract32(insn, 8, 12) << 4);
37e6456e
PM
7958 switch (op1) {
7959 case 1:
7960 /* bkpt */
7961 ARCH(5);
7962 gen_exception_insn(s, 4, EXCP_BKPT,
7963 syn_aa32_bkpt(imm16, false));
7964 break;
7965 case 2:
7966 /* Hypervisor call (v7) */
7967 ARCH(7);
7968 if (IS_USER(s)) {
7969 goto illegal_op;
7970 }
7971 gen_hvc(s, imm16);
7972 break;
7973 case 3:
7974 /* Secure monitor call (v6+) */
7975 ARCH(6K);
7976 if (IS_USER(s)) {
7977 goto illegal_op;
7978 }
7979 gen_smc(s);
7980 break;
7981 default:
49e14940
AL
7982 goto illegal_op;
7983 }
9ee6e8bb 7984 break;
d4a2dc67 7985 }
9ee6e8bb
PB
7986 case 0x8: /* signed multiply */
7987 case 0xa:
7988 case 0xc:
7989 case 0xe:
be5e7a76 7990 ARCH(5TE);
9ee6e8bb
PB
7991 rs = (insn >> 8) & 0xf;
7992 rn = (insn >> 12) & 0xf;
7993 rd = (insn >> 16) & 0xf;
7994 if (op1 == 1) {
7995 /* (32 * 16) >> 16 */
5e3f878a
PB
7996 tmp = load_reg(s, rm);
7997 tmp2 = load_reg(s, rs);
9ee6e8bb 7998 if (sh & 4)
5e3f878a 7999 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 8000 else
5e3f878a 8001 gen_sxth(tmp2);
a7812ae4
PB
8002 tmp64 = gen_muls_i64_i32(tmp, tmp2);
8003 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 8004 tmp = tcg_temp_new_i32();
a7812ae4 8005 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 8006 tcg_temp_free_i64(tmp64);
9ee6e8bb 8007 if ((sh & 2) == 0) {
5e3f878a 8008 tmp2 = load_reg(s, rn);
9ef39277 8009 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8010 tcg_temp_free_i32(tmp2);
9ee6e8bb 8011 }
5e3f878a 8012 store_reg(s, rd, tmp);
9ee6e8bb
PB
8013 } else {
8014 /* 16 * 16 */
5e3f878a
PB
8015 tmp = load_reg(s, rm);
8016 tmp2 = load_reg(s, rs);
8017 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
7d1b0095 8018 tcg_temp_free_i32(tmp2);
9ee6e8bb 8019 if (op1 == 2) {
a7812ae4
PB
8020 tmp64 = tcg_temp_new_i64();
8021 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 8022 tcg_temp_free_i32(tmp);
a7812ae4
PB
8023 gen_addq(s, tmp64, rn, rd);
8024 gen_storeq_reg(s, rn, rd, tmp64);
b75263d6 8025 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
8026 } else {
8027 if (op1 == 0) {
5e3f878a 8028 tmp2 = load_reg(s, rn);
9ef39277 8029 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8030 tcg_temp_free_i32(tmp2);
9ee6e8bb 8031 }
5e3f878a 8032 store_reg(s, rd, tmp);
9ee6e8bb
PB
8033 }
8034 }
8035 break;
8036 default:
8037 goto illegal_op;
8038 }
8039 } else if (((insn & 0x0e000000) == 0 &&
8040 (insn & 0x00000090) != 0x90) ||
8041 ((insn & 0x0e000000) == (1 << 25))) {
8042 int set_cc, logic_cc, shiftop;
8043
8044 op1 = (insn >> 21) & 0xf;
8045 set_cc = (insn >> 20) & 1;
8046 logic_cc = table_logic_cc[op1] & set_cc;
8047
8048 /* data processing instruction */
8049 if (insn & (1 << 25)) {
8050 /* immediate operand */
8051 val = insn & 0xff;
8052 shift = ((insn >> 8) & 0xf) * 2;
e9bb4aa9 8053 if (shift) {
9ee6e8bb 8054 val = (val >> shift) | (val << (32 - shift));
e9bb4aa9 8055 }
7d1b0095 8056 tmp2 = tcg_temp_new_i32();
e9bb4aa9
JR
8057 tcg_gen_movi_i32(tmp2, val);
8058 if (logic_cc && shift) {
8059 gen_set_CF_bit31(tmp2);
8060 }
9ee6e8bb
PB
8061 } else {
8062 /* register */
8063 rm = (insn) & 0xf;
e9bb4aa9 8064 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8065 shiftop = (insn >> 5) & 3;
8066 if (!(insn & (1 << 4))) {
8067 shift = (insn >> 7) & 0x1f;
e9bb4aa9 8068 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9ee6e8bb
PB
8069 } else {
8070 rs = (insn >> 8) & 0xf;
8984bd2e 8071 tmp = load_reg(s, rs);
e9bb4aa9 8072 gen_arm_shift_reg(tmp2, shiftop, tmp, logic_cc);
9ee6e8bb
PB
8073 }
8074 }
8075 if (op1 != 0x0f && op1 != 0x0d) {
8076 rn = (insn >> 16) & 0xf;
e9bb4aa9
JR
8077 tmp = load_reg(s, rn);
8078 } else {
39d5492a 8079 TCGV_UNUSED_I32(tmp);
9ee6e8bb
PB
8080 }
8081 rd = (insn >> 12) & 0xf;
8082 switch(op1) {
8083 case 0x00:
e9bb4aa9
JR
8084 tcg_gen_and_i32(tmp, tmp, tmp2);
8085 if (logic_cc) {
8086 gen_logic_CC(tmp);
8087 }
7dcc1f89 8088 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8089 break;
8090 case 0x01:
e9bb4aa9
JR
8091 tcg_gen_xor_i32(tmp, tmp, tmp2);
8092 if (logic_cc) {
8093 gen_logic_CC(tmp);
8094 }
7dcc1f89 8095 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8096 break;
8097 case 0x02:
8098 if (set_cc && rd == 15) {
8099 /* SUBS r15, ... is used for exception return. */
e9bb4aa9 8100 if (IS_USER(s)) {
9ee6e8bb 8101 goto illegal_op;
e9bb4aa9 8102 }
72485ec4 8103 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9 8104 gen_exception_return(s, tmp);
9ee6e8bb 8105 } else {
e9bb4aa9 8106 if (set_cc) {
72485ec4 8107 gen_sub_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8108 } else {
8109 tcg_gen_sub_i32(tmp, tmp, tmp2);
8110 }
7dcc1f89 8111 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8112 }
8113 break;
8114 case 0x03:
e9bb4aa9 8115 if (set_cc) {
72485ec4 8116 gen_sub_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8117 } else {
8118 tcg_gen_sub_i32(tmp, tmp2, tmp);
8119 }
7dcc1f89 8120 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8121 break;
8122 case 0x04:
e9bb4aa9 8123 if (set_cc) {
72485ec4 8124 gen_add_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8125 } else {
8126 tcg_gen_add_i32(tmp, tmp, tmp2);
8127 }
7dcc1f89 8128 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8129 break;
8130 case 0x05:
e9bb4aa9 8131 if (set_cc) {
49b4c31e 8132 gen_adc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8133 } else {
8134 gen_add_carry(tmp, tmp, tmp2);
8135 }
7dcc1f89 8136 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8137 break;
8138 case 0x06:
e9bb4aa9 8139 if (set_cc) {
2de68a49 8140 gen_sbc_CC(tmp, tmp, tmp2);
e9bb4aa9
JR
8141 } else {
8142 gen_sub_carry(tmp, tmp, tmp2);
8143 }
7dcc1f89 8144 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8145 break;
8146 case 0x07:
e9bb4aa9 8147 if (set_cc) {
2de68a49 8148 gen_sbc_CC(tmp, tmp2, tmp);
e9bb4aa9
JR
8149 } else {
8150 gen_sub_carry(tmp, tmp2, tmp);
8151 }
7dcc1f89 8152 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8153 break;
8154 case 0x08:
8155 if (set_cc) {
e9bb4aa9
JR
8156 tcg_gen_and_i32(tmp, tmp, tmp2);
8157 gen_logic_CC(tmp);
9ee6e8bb 8158 }
7d1b0095 8159 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8160 break;
8161 case 0x09:
8162 if (set_cc) {
e9bb4aa9
JR
8163 tcg_gen_xor_i32(tmp, tmp, tmp2);
8164 gen_logic_CC(tmp);
9ee6e8bb 8165 }
7d1b0095 8166 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8167 break;
8168 case 0x0a:
8169 if (set_cc) {
72485ec4 8170 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb 8171 }
7d1b0095 8172 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8173 break;
8174 case 0x0b:
8175 if (set_cc) {
72485ec4 8176 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 8177 }
7d1b0095 8178 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8179 break;
8180 case 0x0c:
e9bb4aa9
JR
8181 tcg_gen_or_i32(tmp, tmp, tmp2);
8182 if (logic_cc) {
8183 gen_logic_CC(tmp);
8184 }
7dcc1f89 8185 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8186 break;
8187 case 0x0d:
8188 if (logic_cc && rd == 15) {
8189 /* MOVS r15, ... is used for exception return. */
e9bb4aa9 8190 if (IS_USER(s)) {
9ee6e8bb 8191 goto illegal_op;
e9bb4aa9
JR
8192 }
8193 gen_exception_return(s, tmp2);
9ee6e8bb 8194 } else {
e9bb4aa9
JR
8195 if (logic_cc) {
8196 gen_logic_CC(tmp2);
8197 }
7dcc1f89 8198 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8199 }
8200 break;
8201 case 0x0e:
f669df27 8202 tcg_gen_andc_i32(tmp, tmp, tmp2);
e9bb4aa9
JR
8203 if (logic_cc) {
8204 gen_logic_CC(tmp);
8205 }
7dcc1f89 8206 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
8207 break;
8208 default:
8209 case 0x0f:
e9bb4aa9
JR
8210 tcg_gen_not_i32(tmp2, tmp2);
8211 if (logic_cc) {
8212 gen_logic_CC(tmp2);
8213 }
7dcc1f89 8214 store_reg_bx(s, rd, tmp2);
9ee6e8bb
PB
8215 break;
8216 }
e9bb4aa9 8217 if (op1 != 0x0f && op1 != 0x0d) {
7d1b0095 8218 tcg_temp_free_i32(tmp2);
e9bb4aa9 8219 }
9ee6e8bb
PB
8220 } else {
8221 /* other instructions */
8222 op1 = (insn >> 24) & 0xf;
8223 switch(op1) {
8224 case 0x0:
8225 case 0x1:
8226 /* multiplies, extra load/stores */
8227 sh = (insn >> 5) & 3;
8228 if (sh == 0) {
8229 if (op1 == 0x0) {
8230 rd = (insn >> 16) & 0xf;
8231 rn = (insn >> 12) & 0xf;
8232 rs = (insn >> 8) & 0xf;
8233 rm = (insn) & 0xf;
8234 op1 = (insn >> 20) & 0xf;
8235 switch (op1) {
8236 case 0: case 1: case 2: case 3: case 6:
8237 /* 32 bit mul */
5e3f878a
PB
8238 tmp = load_reg(s, rs);
8239 tmp2 = load_reg(s, rm);
8240 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 8241 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8242 if (insn & (1 << 22)) {
8243 /* Subtract (mls) */
8244 ARCH(6T2);
5e3f878a
PB
8245 tmp2 = load_reg(s, rn);
8246 tcg_gen_sub_i32(tmp, tmp2, tmp);
7d1b0095 8247 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8248 } else if (insn & (1 << 21)) {
8249 /* Add */
5e3f878a
PB
8250 tmp2 = load_reg(s, rn);
8251 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8252 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8253 }
8254 if (insn & (1 << 20))
5e3f878a
PB
8255 gen_logic_CC(tmp);
8256 store_reg(s, rd, tmp);
9ee6e8bb 8257 break;
8aac08b1
AJ
8258 case 4:
8259 /* 64 bit mul double accumulate (UMAAL) */
8260 ARCH(6);
8261 tmp = load_reg(s, rs);
8262 tmp2 = load_reg(s, rm);
8263 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
8264 gen_addq_lo(s, tmp64, rn);
8265 gen_addq_lo(s, tmp64, rd);
8266 gen_storeq_reg(s, rn, rd, tmp64);
8267 tcg_temp_free_i64(tmp64);
8268 break;
8269 case 8: case 9: case 10: case 11:
8270 case 12: case 13: case 14: case 15:
8271 /* 64 bit mul: UMULL, UMLAL, SMULL, SMLAL. */
5e3f878a
PB
8272 tmp = load_reg(s, rs);
8273 tmp2 = load_reg(s, rm);
8aac08b1 8274 if (insn & (1 << 22)) {
c9f10124 8275 tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1 8276 } else {
c9f10124 8277 tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
8aac08b1
AJ
8278 }
8279 if (insn & (1 << 21)) { /* mult accumulate */
39d5492a
PM
8280 TCGv_i32 al = load_reg(s, rn);
8281 TCGv_i32 ah = load_reg(s, rd);
c9f10124 8282 tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, al, ah);
39d5492a
PM
8283 tcg_temp_free_i32(al);
8284 tcg_temp_free_i32(ah);
9ee6e8bb 8285 }
8aac08b1 8286 if (insn & (1 << 20)) {
c9f10124 8287 gen_logicq_cc(tmp, tmp2);
8aac08b1 8288 }
c9f10124
RH
8289 store_reg(s, rn, tmp);
8290 store_reg(s, rd, tmp2);
9ee6e8bb 8291 break;
8aac08b1
AJ
8292 default:
8293 goto illegal_op;
9ee6e8bb
PB
8294 }
8295 } else {
8296 rn = (insn >> 16) & 0xf;
8297 rd = (insn >> 12) & 0xf;
8298 if (insn & (1 << 23)) {
8299 /* load/store exclusive */
2359bf80 8300 int op2 = (insn >> 8) & 3;
86753403 8301 op1 = (insn >> 21) & 0x3;
2359bf80
MR
8302
8303 switch (op2) {
8304 case 0: /* lda/stl */
8305 if (op1 == 1) {
8306 goto illegal_op;
8307 }
8308 ARCH(8);
8309 break;
8310 case 1: /* reserved */
8311 goto illegal_op;
8312 case 2: /* ldaex/stlex */
8313 ARCH(8);
8314 break;
8315 case 3: /* ldrex/strex */
8316 if (op1) {
8317 ARCH(6K);
8318 } else {
8319 ARCH(6);
8320 }
8321 break;
8322 }
8323
3174f8e9 8324 addr = tcg_temp_local_new_i32();
98a46317 8325 load_reg_var(s, addr, rn);
2359bf80
MR
8326
8327 /* Since the emulation does not have barriers,
8328 the acquire/release semantics need no special
8329 handling */
8330 if (op2 == 0) {
8331 if (insn & (1 << 20)) {
8332 tmp = tcg_temp_new_i32();
8333 switch (op1) {
8334 case 0: /* lda */
6ce2faf4 8335 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
8336 break;
8337 case 2: /* ldab */
6ce2faf4 8338 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
8339 break;
8340 case 3: /* ldah */
6ce2faf4 8341 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
8342 break;
8343 default:
8344 abort();
8345 }
8346 store_reg(s, rd, tmp);
8347 } else {
8348 rm = insn & 0xf;
8349 tmp = load_reg(s, rm);
8350 switch (op1) {
8351 case 0: /* stl */
6ce2faf4 8352 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
8353 break;
8354 case 2: /* stlb */
6ce2faf4 8355 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
8356 break;
8357 case 3: /* stlh */
6ce2faf4 8358 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
8359 break;
8360 default:
8361 abort();
8362 }
8363 tcg_temp_free_i32(tmp);
8364 }
8365 } else if (insn & (1 << 20)) {
86753403
PB
8366 switch (op1) {
8367 case 0: /* ldrex */
426f5abc 8368 gen_load_exclusive(s, rd, 15, addr, 2);
86753403
PB
8369 break;
8370 case 1: /* ldrexd */
426f5abc 8371 gen_load_exclusive(s, rd, rd + 1, addr, 3);
86753403
PB
8372 break;
8373 case 2: /* ldrexb */
426f5abc 8374 gen_load_exclusive(s, rd, 15, addr, 0);
86753403
PB
8375 break;
8376 case 3: /* ldrexh */
426f5abc 8377 gen_load_exclusive(s, rd, 15, addr, 1);
86753403
PB
8378 break;
8379 default:
8380 abort();
8381 }
9ee6e8bb
PB
8382 } else {
8383 rm = insn & 0xf;
86753403
PB
8384 switch (op1) {
8385 case 0: /* strex */
426f5abc 8386 gen_store_exclusive(s, rd, rm, 15, addr, 2);
86753403
PB
8387 break;
8388 case 1: /* strexd */
502e64fe 8389 gen_store_exclusive(s, rd, rm, rm + 1, addr, 3);
86753403
PB
8390 break;
8391 case 2: /* strexb */
426f5abc 8392 gen_store_exclusive(s, rd, rm, 15, addr, 0);
86753403
PB
8393 break;
8394 case 3: /* strexh */
426f5abc 8395 gen_store_exclusive(s, rd, rm, 15, addr, 1);
86753403
PB
8396 break;
8397 default:
8398 abort();
8399 }
9ee6e8bb 8400 }
39d5492a 8401 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8402 } else {
8403 /* SWP instruction */
8404 rm = (insn) & 0xf;
8405
8984bd2e
PB
8406 /* ??? This is not really atomic. However we know
8407 we never have multiple CPUs running in parallel,
8408 so it is good enough. */
8409 addr = load_reg(s, rn);
8410 tmp = load_reg(s, rm);
5a839c0d 8411 tmp2 = tcg_temp_new_i32();
9ee6e8bb 8412 if (insn & (1 << 22)) {
6ce2faf4
EI
8413 gen_aa32_ld8u(tmp2, addr, get_mem_index(s));
8414 gen_aa32_st8(tmp, addr, get_mem_index(s));
9ee6e8bb 8415 } else {
6ce2faf4
EI
8416 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
8417 gen_aa32_st32(tmp, addr, get_mem_index(s));
9ee6e8bb 8418 }
5a839c0d 8419 tcg_temp_free_i32(tmp);
7d1b0095 8420 tcg_temp_free_i32(addr);
8984bd2e 8421 store_reg(s, rd, tmp2);
9ee6e8bb
PB
8422 }
8423 }
8424 } else {
8425 int address_offset;
8426 int load;
8427 /* Misc load/store */
8428 rn = (insn >> 16) & 0xf;
8429 rd = (insn >> 12) & 0xf;
b0109805 8430 addr = load_reg(s, rn);
9ee6e8bb 8431 if (insn & (1 << 24))
b0109805 8432 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
8433 address_offset = 0;
8434 if (insn & (1 << 20)) {
8435 /* load */
5a839c0d 8436 tmp = tcg_temp_new_i32();
9ee6e8bb
PB
8437 switch(sh) {
8438 case 1:
6ce2faf4 8439 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8440 break;
8441 case 2:
6ce2faf4 8442 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8443 break;
8444 default:
8445 case 3:
6ce2faf4 8446 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8447 break;
8448 }
8449 load = 1;
8450 } else if (sh & 2) {
be5e7a76 8451 ARCH(5TE);
9ee6e8bb
PB
8452 /* doubleword */
8453 if (sh & 1) {
8454 /* store */
b0109805 8455 tmp = load_reg(s, rd);
6ce2faf4 8456 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8457 tcg_temp_free_i32(tmp);
b0109805
PB
8458 tcg_gen_addi_i32(addr, addr, 4);
8459 tmp = load_reg(s, rd + 1);
6ce2faf4 8460 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8461 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8462 load = 0;
8463 } else {
8464 /* load */
5a839c0d 8465 tmp = tcg_temp_new_i32();
6ce2faf4 8466 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
8467 store_reg(s, rd, tmp);
8468 tcg_gen_addi_i32(addr, addr, 4);
5a839c0d 8469 tmp = tcg_temp_new_i32();
6ce2faf4 8470 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb
PB
8471 rd++;
8472 load = 1;
8473 }
8474 address_offset = -4;
8475 } else {
8476 /* store */
b0109805 8477 tmp = load_reg(s, rd);
6ce2faf4 8478 gen_aa32_st16(tmp, addr, get_mem_index(s));
5a839c0d 8479 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8480 load = 0;
8481 }
8482 /* Perform base writeback before the loaded value to
8483 ensure correct behavior with overlapping index registers.
8484 ldrd with base writeback is is undefined if the
8485 destination and index registers overlap. */
8486 if (!(insn & (1 << 24))) {
b0109805
PB
8487 gen_add_datah_offset(s, insn, address_offset, addr);
8488 store_reg(s, rn, addr);
9ee6e8bb
PB
8489 } else if (insn & (1 << 21)) {
8490 if (address_offset)
b0109805
PB
8491 tcg_gen_addi_i32(addr, addr, address_offset);
8492 store_reg(s, rn, addr);
8493 } else {
7d1b0095 8494 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8495 }
8496 if (load) {
8497 /* Complete the load. */
b0109805 8498 store_reg(s, rd, tmp);
9ee6e8bb
PB
8499 }
8500 }
8501 break;
8502 case 0x4:
8503 case 0x5:
8504 goto do_ldst;
8505 case 0x6:
8506 case 0x7:
8507 if (insn & (1 << 4)) {
8508 ARCH(6);
8509 /* Armv6 Media instructions. */
8510 rm = insn & 0xf;
8511 rn = (insn >> 16) & 0xf;
2c0262af 8512 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
8513 rs = (insn >> 8) & 0xf;
8514 switch ((insn >> 23) & 3) {
8515 case 0: /* Parallel add/subtract. */
8516 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
8517 tmp = load_reg(s, rn);
8518 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
8519 sh = (insn >> 5) & 7;
8520 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
8521 goto illegal_op;
6ddbc6e4 8522 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
7d1b0095 8523 tcg_temp_free_i32(tmp2);
6ddbc6e4 8524 store_reg(s, rd, tmp);
9ee6e8bb
PB
8525 break;
8526 case 1:
8527 if ((insn & 0x00700020) == 0) {
6c95676b 8528 /* Halfword pack. */
3670669c
PB
8529 tmp = load_reg(s, rn);
8530 tmp2 = load_reg(s, rm);
9ee6e8bb 8531 shift = (insn >> 7) & 0x1f;
3670669c
PB
8532 if (insn & (1 << 6)) {
8533 /* pkhtb */
22478e79
AZ
8534 if (shift == 0)
8535 shift = 31;
8536 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 8537 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 8538 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
8539 } else {
8540 /* pkhbt */
22478e79
AZ
8541 if (shift)
8542 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 8543 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
8544 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
8545 }
8546 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 8547 tcg_temp_free_i32(tmp2);
3670669c 8548 store_reg(s, rd, tmp);
9ee6e8bb
PB
8549 } else if ((insn & 0x00200020) == 0x00200000) {
8550 /* [us]sat */
6ddbc6e4 8551 tmp = load_reg(s, rm);
9ee6e8bb
PB
8552 shift = (insn >> 7) & 0x1f;
8553 if (insn & (1 << 6)) {
8554 if (shift == 0)
8555 shift = 31;
6ddbc6e4 8556 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 8557 } else {
6ddbc6e4 8558 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
8559 }
8560 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8561 tmp2 = tcg_const_i32(sh);
8562 if (insn & (1 << 22))
9ef39277 8563 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
40d3c433 8564 else
9ef39277 8565 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
40d3c433 8566 tcg_temp_free_i32(tmp2);
6ddbc6e4 8567 store_reg(s, rd, tmp);
9ee6e8bb
PB
8568 } else if ((insn & 0x00300fe0) == 0x00200f20) {
8569 /* [us]sat16 */
6ddbc6e4 8570 tmp = load_reg(s, rm);
9ee6e8bb 8571 sh = (insn >> 16) & 0x1f;
40d3c433
CL
8572 tmp2 = tcg_const_i32(sh);
8573 if (insn & (1 << 22))
9ef39277 8574 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8575 else
9ef39277 8576 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
40d3c433 8577 tcg_temp_free_i32(tmp2);
6ddbc6e4 8578 store_reg(s, rd, tmp);
9ee6e8bb
PB
8579 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
8580 /* Select bytes. */
6ddbc6e4
PB
8581 tmp = load_reg(s, rn);
8582 tmp2 = load_reg(s, rm);
7d1b0095 8583 tmp3 = tcg_temp_new_i32();
0ecb72a5 8584 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
6ddbc6e4 8585 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
8586 tcg_temp_free_i32(tmp3);
8587 tcg_temp_free_i32(tmp2);
6ddbc6e4 8588 store_reg(s, rd, tmp);
9ee6e8bb 8589 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 8590 tmp = load_reg(s, rm);
9ee6e8bb 8591 shift = (insn >> 10) & 3;
1301f322 8592 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
8593 rotate, a shift is sufficient. */
8594 if (shift != 0)
f669df27 8595 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
8596 op1 = (insn >> 20) & 7;
8597 switch (op1) {
5e3f878a
PB
8598 case 0: gen_sxtb16(tmp); break;
8599 case 2: gen_sxtb(tmp); break;
8600 case 3: gen_sxth(tmp); break;
8601 case 4: gen_uxtb16(tmp); break;
8602 case 6: gen_uxtb(tmp); break;
8603 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
8604 default: goto illegal_op;
8605 }
8606 if (rn != 15) {
5e3f878a 8607 tmp2 = load_reg(s, rn);
9ee6e8bb 8608 if ((op1 & 3) == 0) {
5e3f878a 8609 gen_add16(tmp, tmp2);
9ee6e8bb 8610 } else {
5e3f878a 8611 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8612 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8613 }
8614 }
6c95676b 8615 store_reg(s, rd, tmp);
9ee6e8bb
PB
8616 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
8617 /* rev */
b0109805 8618 tmp = load_reg(s, rm);
9ee6e8bb
PB
8619 if (insn & (1 << 22)) {
8620 if (insn & (1 << 7)) {
b0109805 8621 gen_revsh(tmp);
9ee6e8bb
PB
8622 } else {
8623 ARCH(6T2);
b0109805 8624 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
8625 }
8626 } else {
8627 if (insn & (1 << 7))
b0109805 8628 gen_rev16(tmp);
9ee6e8bb 8629 else
66896cb8 8630 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb 8631 }
b0109805 8632 store_reg(s, rd, tmp);
9ee6e8bb
PB
8633 } else {
8634 goto illegal_op;
8635 }
8636 break;
8637 case 2: /* Multiplies (Type 3). */
41e9564d
PM
8638 switch ((insn >> 20) & 0x7) {
8639 case 5:
8640 if (((insn >> 6) ^ (insn >> 7)) & 1) {
8641 /* op2 not 00x or 11x : UNDEF */
8642 goto illegal_op;
8643 }
838fa72d
AJ
8644 /* Signed multiply most significant [accumulate].
8645 (SMMUL, SMMLA, SMMLS) */
41e9564d
PM
8646 tmp = load_reg(s, rm);
8647 tmp2 = load_reg(s, rs);
a7812ae4 8648 tmp64 = gen_muls_i64_i32(tmp, tmp2);
838fa72d 8649
955a7dd5 8650 if (rd != 15) {
838fa72d 8651 tmp = load_reg(s, rd);
9ee6e8bb 8652 if (insn & (1 << 6)) {
838fa72d 8653 tmp64 = gen_subq_msw(tmp64, tmp);
9ee6e8bb 8654 } else {
838fa72d 8655 tmp64 = gen_addq_msw(tmp64, tmp);
9ee6e8bb
PB
8656 }
8657 }
838fa72d
AJ
8658 if (insn & (1 << 5)) {
8659 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
8660 }
8661 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 8662 tmp = tcg_temp_new_i32();
838fa72d
AJ
8663 tcg_gen_trunc_i64_i32(tmp, tmp64);
8664 tcg_temp_free_i64(tmp64);
955a7dd5 8665 store_reg(s, rn, tmp);
41e9564d
PM
8666 break;
8667 case 0:
8668 case 4:
8669 /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
8670 if (insn & (1 << 7)) {
8671 goto illegal_op;
8672 }
8673 tmp = load_reg(s, rm);
8674 tmp2 = load_reg(s, rs);
9ee6e8bb 8675 if (insn & (1 << 5))
5e3f878a
PB
8676 gen_swap_half(tmp2);
8677 gen_smul_dual(tmp, tmp2);
9ee6e8bb 8678 if (insn & (1 << 22)) {
5e3f878a 8679 /* smlald, smlsld */
33bbd75a
PC
8680 TCGv_i64 tmp64_2;
8681
a7812ae4 8682 tmp64 = tcg_temp_new_i64();
33bbd75a 8683 tmp64_2 = tcg_temp_new_i64();
a7812ae4 8684 tcg_gen_ext_i32_i64(tmp64, tmp);
33bbd75a 8685 tcg_gen_ext_i32_i64(tmp64_2, tmp2);
7d1b0095 8686 tcg_temp_free_i32(tmp);
33bbd75a
PC
8687 tcg_temp_free_i32(tmp2);
8688 if (insn & (1 << 6)) {
8689 tcg_gen_sub_i64(tmp64, tmp64, tmp64_2);
8690 } else {
8691 tcg_gen_add_i64(tmp64, tmp64, tmp64_2);
8692 }
8693 tcg_temp_free_i64(tmp64_2);
a7812ae4
PB
8694 gen_addq(s, tmp64, rd, rn);
8695 gen_storeq_reg(s, rd, rn, tmp64);
b75263d6 8696 tcg_temp_free_i64(tmp64);
9ee6e8bb 8697 } else {
5e3f878a 8698 /* smuad, smusd, smlad, smlsd */
33bbd75a
PC
8699 if (insn & (1 << 6)) {
8700 /* This subtraction cannot overflow. */
8701 tcg_gen_sub_i32(tmp, tmp, tmp2);
8702 } else {
8703 /* This addition cannot overflow 32 bits;
8704 * however it may overflow considered as a
8705 * signed operation, in which case we must set
8706 * the Q flag.
8707 */
8708 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
8709 }
8710 tcg_temp_free_i32(tmp2);
22478e79 8711 if (rd != 15)
9ee6e8bb 8712 {
22478e79 8713 tmp2 = load_reg(s, rd);
9ef39277 8714 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 8715 tcg_temp_free_i32(tmp2);
9ee6e8bb 8716 }
22478e79 8717 store_reg(s, rn, tmp);
9ee6e8bb 8718 }
41e9564d 8719 break;
b8b8ea05
PM
8720 case 1:
8721 case 3:
8722 /* SDIV, UDIV */
d614a513 8723 if (!arm_dc_feature(s, ARM_FEATURE_ARM_DIV)) {
b8b8ea05
PM
8724 goto illegal_op;
8725 }
8726 if (((insn >> 5) & 7) || (rd != 15)) {
8727 goto illegal_op;
8728 }
8729 tmp = load_reg(s, rm);
8730 tmp2 = load_reg(s, rs);
8731 if (insn & (1 << 21)) {
8732 gen_helper_udiv(tmp, tmp, tmp2);
8733 } else {
8734 gen_helper_sdiv(tmp, tmp, tmp2);
8735 }
8736 tcg_temp_free_i32(tmp2);
8737 store_reg(s, rn, tmp);
8738 break;
41e9564d
PM
8739 default:
8740 goto illegal_op;
9ee6e8bb
PB
8741 }
8742 break;
8743 case 3:
8744 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
8745 switch (op1) {
8746 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
8747 ARCH(6);
8748 tmp = load_reg(s, rm);
8749 tmp2 = load_reg(s, rs);
8750 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 8751 tcg_temp_free_i32(tmp2);
ded9d295
AZ
8752 if (rd != 15) {
8753 tmp2 = load_reg(s, rd);
6ddbc6e4 8754 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 8755 tcg_temp_free_i32(tmp2);
9ee6e8bb 8756 }
ded9d295 8757 store_reg(s, rn, tmp);
9ee6e8bb
PB
8758 break;
8759 case 0x20: case 0x24: case 0x28: case 0x2c:
8760 /* Bitfield insert/clear. */
8761 ARCH(6T2);
8762 shift = (insn >> 7) & 0x1f;
8763 i = (insn >> 16) & 0x1f;
45140a57
KB
8764 if (i < shift) {
8765 /* UNPREDICTABLE; we choose to UNDEF */
8766 goto illegal_op;
8767 }
9ee6e8bb
PB
8768 i = i + 1 - shift;
8769 if (rm == 15) {
7d1b0095 8770 tmp = tcg_temp_new_i32();
5e3f878a 8771 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 8772 } else {
5e3f878a 8773 tmp = load_reg(s, rm);
9ee6e8bb
PB
8774 }
8775 if (i != 32) {
5e3f878a 8776 tmp2 = load_reg(s, rd);
d593c48e 8777 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, i);
7d1b0095 8778 tcg_temp_free_i32(tmp2);
9ee6e8bb 8779 }
5e3f878a 8780 store_reg(s, rd, tmp);
9ee6e8bb
PB
8781 break;
8782 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
8783 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
4cc633c3 8784 ARCH(6T2);
5e3f878a 8785 tmp = load_reg(s, rm);
9ee6e8bb
PB
8786 shift = (insn >> 7) & 0x1f;
8787 i = ((insn >> 16) & 0x1f) + 1;
8788 if (shift + i > 32)
8789 goto illegal_op;
8790 if (i < 32) {
8791 if (op1 & 0x20) {
5e3f878a 8792 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 8793 } else {
5e3f878a 8794 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
8795 }
8796 }
5e3f878a 8797 store_reg(s, rd, tmp);
9ee6e8bb
PB
8798 break;
8799 default:
8800 goto illegal_op;
8801 }
8802 break;
8803 }
8804 break;
8805 }
8806 do_ldst:
8807 /* Check for undefined extension instructions
8808 * per the ARM Bible IE:
8809 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
8810 */
8811 sh = (0xf << 20) | (0xf << 4);
8812 if (op1 == 0x7 && ((insn & sh) == sh))
8813 {
8814 goto illegal_op;
8815 }
8816 /* load/store byte/word */
8817 rn = (insn >> 16) & 0xf;
8818 rd = (insn >> 12) & 0xf;
b0109805 8819 tmp2 = load_reg(s, rn);
a99caa48
PM
8820 if ((insn & 0x01200000) == 0x00200000) {
8821 /* ldrt/strt */
579d21cc 8822 i = get_a32_user_mem_index(s);
a99caa48
PM
8823 } else {
8824 i = get_mem_index(s);
8825 }
9ee6e8bb 8826 if (insn & (1 << 24))
b0109805 8827 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
8828 if (insn & (1 << 20)) {
8829 /* load */
5a839c0d 8830 tmp = tcg_temp_new_i32();
9ee6e8bb 8831 if (insn & (1 << 22)) {
08307563 8832 gen_aa32_ld8u(tmp, tmp2, i);
9ee6e8bb 8833 } else {
08307563 8834 gen_aa32_ld32u(tmp, tmp2, i);
9ee6e8bb 8835 }
9ee6e8bb
PB
8836 } else {
8837 /* store */
b0109805 8838 tmp = load_reg(s, rd);
5a839c0d 8839 if (insn & (1 << 22)) {
08307563 8840 gen_aa32_st8(tmp, tmp2, i);
5a839c0d 8841 } else {
08307563 8842 gen_aa32_st32(tmp, tmp2, i);
5a839c0d
PM
8843 }
8844 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8845 }
8846 if (!(insn & (1 << 24))) {
b0109805
PB
8847 gen_add_data_offset(s, insn, tmp2);
8848 store_reg(s, rn, tmp2);
8849 } else if (insn & (1 << 21)) {
8850 store_reg(s, rn, tmp2);
8851 } else {
7d1b0095 8852 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
8853 }
8854 if (insn & (1 << 20)) {
8855 /* Complete the load. */
7dcc1f89 8856 store_reg_from_load(s, rd, tmp);
9ee6e8bb
PB
8857 }
8858 break;
8859 case 0x08:
8860 case 0x09:
8861 {
da3e53dd
PM
8862 int j, n, loaded_base;
8863 bool exc_return = false;
8864 bool is_load = extract32(insn, 20, 1);
8865 bool user = false;
39d5492a 8866 TCGv_i32 loaded_var;
9ee6e8bb
PB
8867 /* load/store multiple words */
8868 /* XXX: store correct base if write back */
9ee6e8bb 8869 if (insn & (1 << 22)) {
da3e53dd 8870 /* LDM (user), LDM (exception return) and STM (user) */
9ee6e8bb
PB
8871 if (IS_USER(s))
8872 goto illegal_op; /* only usable in supervisor mode */
8873
da3e53dd
PM
8874 if (is_load && extract32(insn, 15, 1)) {
8875 exc_return = true;
8876 } else {
8877 user = true;
8878 }
9ee6e8bb
PB
8879 }
8880 rn = (insn >> 16) & 0xf;
b0109805 8881 addr = load_reg(s, rn);
9ee6e8bb
PB
8882
8883 /* compute total size */
8884 loaded_base = 0;
39d5492a 8885 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
8886 n = 0;
8887 for(i=0;i<16;i++) {
8888 if (insn & (1 << i))
8889 n++;
8890 }
8891 /* XXX: test invalid n == 0 case ? */
8892 if (insn & (1 << 23)) {
8893 if (insn & (1 << 24)) {
8894 /* pre increment */
b0109805 8895 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8896 } else {
8897 /* post increment */
8898 }
8899 } else {
8900 if (insn & (1 << 24)) {
8901 /* pre decrement */
b0109805 8902 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8903 } else {
8904 /* post decrement */
8905 if (n != 1)
b0109805 8906 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8907 }
8908 }
8909 j = 0;
8910 for(i=0;i<16;i++) {
8911 if (insn & (1 << i)) {
da3e53dd 8912 if (is_load) {
9ee6e8bb 8913 /* load */
5a839c0d 8914 tmp = tcg_temp_new_i32();
6ce2faf4 8915 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
be5e7a76 8916 if (user) {
b75263d6 8917 tmp2 = tcg_const_i32(i);
1ce94f81 8918 gen_helper_set_user_reg(cpu_env, tmp2, tmp);
b75263d6 8919 tcg_temp_free_i32(tmp2);
7d1b0095 8920 tcg_temp_free_i32(tmp);
9ee6e8bb 8921 } else if (i == rn) {
b0109805 8922 loaded_var = tmp;
9ee6e8bb
PB
8923 loaded_base = 1;
8924 } else {
7dcc1f89 8925 store_reg_from_load(s, i, tmp);
9ee6e8bb
PB
8926 }
8927 } else {
8928 /* store */
8929 if (i == 15) {
8930 /* special case: r15 = PC + 8 */
8931 val = (long)s->pc + 4;
7d1b0095 8932 tmp = tcg_temp_new_i32();
b0109805 8933 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 8934 } else if (user) {
7d1b0095 8935 tmp = tcg_temp_new_i32();
b75263d6 8936 tmp2 = tcg_const_i32(i);
9ef39277 8937 gen_helper_get_user_reg(tmp, cpu_env, tmp2);
b75263d6 8938 tcg_temp_free_i32(tmp2);
9ee6e8bb 8939 } else {
b0109805 8940 tmp = load_reg(s, i);
9ee6e8bb 8941 }
6ce2faf4 8942 gen_aa32_st32(tmp, addr, get_mem_index(s));
5a839c0d 8943 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8944 }
8945 j++;
8946 /* no need to add after the last transfer */
8947 if (j != n)
b0109805 8948 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8949 }
8950 }
8951 if (insn & (1 << 21)) {
8952 /* write back */
8953 if (insn & (1 << 23)) {
8954 if (insn & (1 << 24)) {
8955 /* pre increment */
8956 } else {
8957 /* post increment */
b0109805 8958 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
8959 }
8960 } else {
8961 if (insn & (1 << 24)) {
8962 /* pre decrement */
8963 if (n != 1)
b0109805 8964 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
8965 } else {
8966 /* post decrement */
b0109805 8967 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
8968 }
8969 }
b0109805
PB
8970 store_reg(s, rn, addr);
8971 } else {
7d1b0095 8972 tcg_temp_free_i32(addr);
9ee6e8bb
PB
8973 }
8974 if (loaded_base) {
b0109805 8975 store_reg(s, rn, loaded_var);
9ee6e8bb 8976 }
da3e53dd 8977 if (exc_return) {
9ee6e8bb 8978 /* Restore CPSR from SPSR. */
d9ba4830 8979 tmp = load_cpu_field(spsr);
4051e12c 8980 gen_set_cpsr(tmp, CPSR_ERET_MASK);
7d1b0095 8981 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
8982 s->is_jmp = DISAS_UPDATE;
8983 }
8984 }
8985 break;
8986 case 0xa:
8987 case 0xb:
8988 {
8989 int32_t offset;
8990
8991 /* branch (and link) */
8992 val = (int32_t)s->pc;
8993 if (insn & (1 << 24)) {
7d1b0095 8994 tmp = tcg_temp_new_i32();
5e3f878a
PB
8995 tcg_gen_movi_i32(tmp, val);
8996 store_reg(s, 14, tmp);
9ee6e8bb 8997 }
534df156
PM
8998 offset = sextract32(insn << 2, 0, 26);
8999 val += offset + 4;
9ee6e8bb
PB
9000 gen_jmp(s, val);
9001 }
9002 break;
9003 case 0xc:
9004 case 0xd:
9005 case 0xe:
6a57f3eb
WN
9006 if (((insn >> 8) & 0xe) == 10) {
9007 /* VFP. */
7dcc1f89 9008 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9009 goto illegal_op;
9010 }
7dcc1f89 9011 } else if (disas_coproc_insn(s, insn)) {
6a57f3eb 9012 /* Coprocessor. */
9ee6e8bb 9013 goto illegal_op;
6a57f3eb 9014 }
9ee6e8bb
PB
9015 break;
9016 case 0xf:
9017 /* swi */
eaed129d 9018 gen_set_pc_im(s, s->pc);
d4a2dc67 9019 s->svc_imm = extract32(insn, 0, 24);
9ee6e8bb
PB
9020 s->is_jmp = DISAS_SWI;
9021 break;
9022 default:
9023 illegal_op:
d4a2dc67 9024 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
9025 break;
9026 }
9027 }
9028}
9029
9030/* Return true if this is a Thumb-2 logical op. */
9031static int
9032thumb2_logic_op(int op)
9033{
9034 return (op < 8);
9035}
9036
9037/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
9038 then set condition code flags based on the result of the operation.
9039 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
9040 to the high bit of T1.
9041 Returns zero if the opcode is valid. */
9042
9043static int
39d5492a
PM
9044gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
9045 TCGv_i32 t0, TCGv_i32 t1)
9ee6e8bb
PB
9046{
9047 int logic_cc;
9048
9049 logic_cc = 0;
9050 switch (op) {
9051 case 0: /* and */
396e467c 9052 tcg_gen_and_i32(t0, t0, t1);
9ee6e8bb
PB
9053 logic_cc = conds;
9054 break;
9055 case 1: /* bic */
f669df27 9056 tcg_gen_andc_i32(t0, t0, t1);
9ee6e8bb
PB
9057 logic_cc = conds;
9058 break;
9059 case 2: /* orr */
396e467c 9060 tcg_gen_or_i32(t0, t0, t1);
9ee6e8bb
PB
9061 logic_cc = conds;
9062 break;
9063 case 3: /* orn */
29501f1b 9064 tcg_gen_orc_i32(t0, t0, t1);
9ee6e8bb
PB
9065 logic_cc = conds;
9066 break;
9067 case 4: /* eor */
396e467c 9068 tcg_gen_xor_i32(t0, t0, t1);
9ee6e8bb
PB
9069 logic_cc = conds;
9070 break;
9071 case 8: /* add */
9072 if (conds)
72485ec4 9073 gen_add_CC(t0, t0, t1);
9ee6e8bb 9074 else
396e467c 9075 tcg_gen_add_i32(t0, t0, t1);
9ee6e8bb
PB
9076 break;
9077 case 10: /* adc */
9078 if (conds)
49b4c31e 9079 gen_adc_CC(t0, t0, t1);
9ee6e8bb 9080 else
396e467c 9081 gen_adc(t0, t1);
9ee6e8bb
PB
9082 break;
9083 case 11: /* sbc */
2de68a49
RH
9084 if (conds) {
9085 gen_sbc_CC(t0, t0, t1);
9086 } else {
396e467c 9087 gen_sub_carry(t0, t0, t1);
2de68a49 9088 }
9ee6e8bb
PB
9089 break;
9090 case 13: /* sub */
9091 if (conds)
72485ec4 9092 gen_sub_CC(t0, t0, t1);
9ee6e8bb 9093 else
396e467c 9094 tcg_gen_sub_i32(t0, t0, t1);
9ee6e8bb
PB
9095 break;
9096 case 14: /* rsb */
9097 if (conds)
72485ec4 9098 gen_sub_CC(t0, t1, t0);
9ee6e8bb 9099 else
396e467c 9100 tcg_gen_sub_i32(t0, t1, t0);
9ee6e8bb
PB
9101 break;
9102 default: /* 5, 6, 7, 9, 12, 15. */
9103 return 1;
9104 }
9105 if (logic_cc) {
396e467c 9106 gen_logic_CC(t0);
9ee6e8bb 9107 if (shifter_out)
396e467c 9108 gen_set_CF_bit31(t1);
9ee6e8bb
PB
9109 }
9110 return 0;
9111}
9112
9113/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
9114 is not legal. */
0ecb72a5 9115static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
9ee6e8bb 9116{
b0109805 9117 uint32_t insn, imm, shift, offset;
9ee6e8bb 9118 uint32_t rd, rn, rm, rs;
39d5492a
PM
9119 TCGv_i32 tmp;
9120 TCGv_i32 tmp2;
9121 TCGv_i32 tmp3;
9122 TCGv_i32 addr;
a7812ae4 9123 TCGv_i64 tmp64;
9ee6e8bb
PB
9124 int op;
9125 int shiftop;
9126 int conds;
9127 int logic_cc;
9128
d614a513
PM
9129 if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
9130 || arm_dc_feature(s, ARM_FEATURE_M))) {
601d70b9 9131 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
9132 16-bit instructions to get correct prefetch abort behavior. */
9133 insn = insn_hw1;
9134 if ((insn & (1 << 12)) == 0) {
be5e7a76 9135 ARCH(5);
9ee6e8bb
PB
9136 /* Second half of blx. */
9137 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
9138 tmp = load_reg(s, 14);
9139 tcg_gen_addi_i32(tmp, tmp, offset);
9140 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 9141
7d1b0095 9142 tmp2 = tcg_temp_new_i32();
b0109805 9143 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9144 store_reg(s, 14, tmp2);
9145 gen_bx(s, tmp);
9ee6e8bb
PB
9146 return 0;
9147 }
9148 if (insn & (1 << 11)) {
9149 /* Second half of bl. */
9150 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 9151 tmp = load_reg(s, 14);
6a0d8a1d 9152 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 9153
7d1b0095 9154 tmp2 = tcg_temp_new_i32();
b0109805 9155 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
9156 store_reg(s, 14, tmp2);
9157 gen_bx(s, tmp);
9ee6e8bb
PB
9158 return 0;
9159 }
9160 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
9161 /* Instruction spans a page boundary. Implement it as two
9162 16-bit instructions in case the second half causes an
9163 prefetch abort. */
9164 offset = ((int32_t)insn << 21) >> 9;
396e467c 9165 tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
9ee6e8bb
PB
9166 return 0;
9167 }
9168 /* Fall through to 32-bit decode. */
9169 }
9170
d31dd73e 9171 insn = arm_lduw_code(env, s->pc, s->bswap_code);
9ee6e8bb
PB
9172 s->pc += 2;
9173 insn |= (uint32_t)insn_hw1 << 16;
9174
9175 if ((insn & 0xf800e800) != 0xf000e800) {
9176 ARCH(6T2);
9177 }
9178
9179 rn = (insn >> 16) & 0xf;
9180 rs = (insn >> 12) & 0xf;
9181 rd = (insn >> 8) & 0xf;
9182 rm = insn & 0xf;
9183 switch ((insn >> 25) & 0xf) {
9184 case 0: case 1: case 2: case 3:
9185 /* 16-bit instructions. Should never happen. */
9186 abort();
9187 case 4:
9188 if (insn & (1 << 22)) {
9189 /* Other load/store, table branch. */
9190 if (insn & 0x01200000) {
9191 /* Load/store doubleword. */
9192 if (rn == 15) {
7d1b0095 9193 addr = tcg_temp_new_i32();
b0109805 9194 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 9195 } else {
b0109805 9196 addr = load_reg(s, rn);
9ee6e8bb
PB
9197 }
9198 offset = (insn & 0xff) * 4;
9199 if ((insn & (1 << 23)) == 0)
9200 offset = -offset;
9201 if (insn & (1 << 24)) {
b0109805 9202 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
9203 offset = 0;
9204 }
9205 if (insn & (1 << 20)) {
9206 /* ldrd */
e2592fad 9207 tmp = tcg_temp_new_i32();
6ce2faf4 9208 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805
PB
9209 store_reg(s, rs, tmp);
9210 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9211 tmp = tcg_temp_new_i32();
6ce2faf4 9212 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9213 store_reg(s, rd, tmp);
9ee6e8bb
PB
9214 } else {
9215 /* strd */
b0109805 9216 tmp = load_reg(s, rs);
6ce2faf4 9217 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9218 tcg_temp_free_i32(tmp);
b0109805
PB
9219 tcg_gen_addi_i32(addr, addr, 4);
9220 tmp = load_reg(s, rd);
6ce2faf4 9221 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9222 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9223 }
9224 if (insn & (1 << 21)) {
9225 /* Base writeback. */
9226 if (rn == 15)
9227 goto illegal_op;
b0109805
PB
9228 tcg_gen_addi_i32(addr, addr, offset - 4);
9229 store_reg(s, rn, addr);
9230 } else {
7d1b0095 9231 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9232 }
9233 } else if ((insn & (1 << 23)) == 0) {
9234 /* Load/store exclusive word. */
39d5492a 9235 addr = tcg_temp_local_new_i32();
98a46317 9236 load_reg_var(s, addr, rn);
426f5abc 9237 tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
2c0262af 9238 if (insn & (1 << 20)) {
426f5abc 9239 gen_load_exclusive(s, rs, 15, addr, 2);
9ee6e8bb 9240 } else {
426f5abc 9241 gen_store_exclusive(s, rd, rs, 15, addr, 2);
9ee6e8bb 9242 }
39d5492a 9243 tcg_temp_free_i32(addr);
2359bf80 9244 } else if ((insn & (7 << 5)) == 0) {
9ee6e8bb
PB
9245 /* Table Branch. */
9246 if (rn == 15) {
7d1b0095 9247 addr = tcg_temp_new_i32();
b0109805 9248 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 9249 } else {
b0109805 9250 addr = load_reg(s, rn);
9ee6e8bb 9251 }
b26eefb6 9252 tmp = load_reg(s, rm);
b0109805 9253 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
9254 if (insn & (1 << 4)) {
9255 /* tbh */
b0109805 9256 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 9257 tcg_temp_free_i32(tmp);
e2592fad 9258 tmp = tcg_temp_new_i32();
6ce2faf4 9259 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
9ee6e8bb 9260 } else { /* tbb */
7d1b0095 9261 tcg_temp_free_i32(tmp);
e2592fad 9262 tmp = tcg_temp_new_i32();
6ce2faf4 9263 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
9ee6e8bb 9264 }
7d1b0095 9265 tcg_temp_free_i32(addr);
b0109805
PB
9266 tcg_gen_shli_i32(tmp, tmp, 1);
9267 tcg_gen_addi_i32(tmp, tmp, s->pc);
9268 store_reg(s, 15, tmp);
9ee6e8bb 9269 } else {
2359bf80 9270 int op2 = (insn >> 6) & 0x3;
9ee6e8bb 9271 op = (insn >> 4) & 0x3;
2359bf80
MR
9272 switch (op2) {
9273 case 0:
426f5abc 9274 goto illegal_op;
2359bf80
MR
9275 case 1:
9276 /* Load/store exclusive byte/halfword/doubleword */
9277 if (op == 2) {
9278 goto illegal_op;
9279 }
9280 ARCH(7);
9281 break;
9282 case 2:
9283 /* Load-acquire/store-release */
9284 if (op == 3) {
9285 goto illegal_op;
9286 }
9287 /* Fall through */
9288 case 3:
9289 /* Load-acquire/store-release exclusive */
9290 ARCH(8);
9291 break;
426f5abc 9292 }
39d5492a 9293 addr = tcg_temp_local_new_i32();
98a46317 9294 load_reg_var(s, addr, rn);
2359bf80
MR
9295 if (!(op2 & 1)) {
9296 if (insn & (1 << 20)) {
9297 tmp = tcg_temp_new_i32();
9298 switch (op) {
9299 case 0: /* ldab */
6ce2faf4 9300 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
2359bf80
MR
9301 break;
9302 case 1: /* ldah */
6ce2faf4 9303 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
2359bf80
MR
9304 break;
9305 case 2: /* lda */
6ce2faf4 9306 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
2359bf80
MR
9307 break;
9308 default:
9309 abort();
9310 }
9311 store_reg(s, rs, tmp);
9312 } else {
9313 tmp = load_reg(s, rs);
9314 switch (op) {
9315 case 0: /* stlb */
6ce2faf4 9316 gen_aa32_st8(tmp, addr, get_mem_index(s));
2359bf80
MR
9317 break;
9318 case 1: /* stlh */
6ce2faf4 9319 gen_aa32_st16(tmp, addr, get_mem_index(s));
2359bf80
MR
9320 break;
9321 case 2: /* stl */
6ce2faf4 9322 gen_aa32_st32(tmp, addr, get_mem_index(s));
2359bf80
MR
9323 break;
9324 default:
9325 abort();
9326 }
9327 tcg_temp_free_i32(tmp);
9328 }
9329 } else if (insn & (1 << 20)) {
426f5abc 9330 gen_load_exclusive(s, rs, rd, addr, op);
9ee6e8bb 9331 } else {
426f5abc 9332 gen_store_exclusive(s, rm, rs, rd, addr, op);
9ee6e8bb 9333 }
39d5492a 9334 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9335 }
9336 } else {
9337 /* Load/store multiple, RFE, SRS. */
9338 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
00115976 9339 /* RFE, SRS: not available in user mode or on M profile */
b53d8923 9340 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9341 goto illegal_op;
00115976 9342 }
9ee6e8bb
PB
9343 if (insn & (1 << 20)) {
9344 /* rfe */
b0109805
PB
9345 addr = load_reg(s, rn);
9346 if ((insn & (1 << 24)) == 0)
9347 tcg_gen_addi_i32(addr, addr, -8);
9348 /* Load PC into tmp and CPSR into tmp2. */
e2592fad 9349 tmp = tcg_temp_new_i32();
6ce2faf4 9350 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 9351 tcg_gen_addi_i32(addr, addr, 4);
e2592fad 9352 tmp2 = tcg_temp_new_i32();
6ce2faf4 9353 gen_aa32_ld32u(tmp2, addr, get_mem_index(s));
9ee6e8bb
PB
9354 if (insn & (1 << 21)) {
9355 /* Base writeback. */
b0109805
PB
9356 if (insn & (1 << 24)) {
9357 tcg_gen_addi_i32(addr, addr, 4);
9358 } else {
9359 tcg_gen_addi_i32(addr, addr, -4);
9360 }
9361 store_reg(s, rn, addr);
9362 } else {
7d1b0095 9363 tcg_temp_free_i32(addr);
9ee6e8bb 9364 }
b0109805 9365 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
9366 } else {
9367 /* srs */
81465888
PM
9368 gen_srs(s, (insn & 0x1f), (insn & (1 << 24)) ? 1 : 2,
9369 insn & (1 << 21));
9ee6e8bb
PB
9370 }
9371 } else {
5856d44e 9372 int i, loaded_base = 0;
39d5492a 9373 TCGv_i32 loaded_var;
9ee6e8bb 9374 /* Load/store multiple. */
b0109805 9375 addr = load_reg(s, rn);
9ee6e8bb
PB
9376 offset = 0;
9377 for (i = 0; i < 16; i++) {
9378 if (insn & (1 << i))
9379 offset += 4;
9380 }
9381 if (insn & (1 << 24)) {
b0109805 9382 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9383 }
9384
39d5492a 9385 TCGV_UNUSED_I32(loaded_var);
9ee6e8bb
PB
9386 for (i = 0; i < 16; i++) {
9387 if ((insn & (1 << i)) == 0)
9388 continue;
9389 if (insn & (1 << 20)) {
9390 /* Load. */
e2592fad 9391 tmp = tcg_temp_new_i32();
6ce2faf4 9392 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
9ee6e8bb 9393 if (i == 15) {
b0109805 9394 gen_bx(s, tmp);
5856d44e
YO
9395 } else if (i == rn) {
9396 loaded_var = tmp;
9397 loaded_base = 1;
9ee6e8bb 9398 } else {
b0109805 9399 store_reg(s, i, tmp);
9ee6e8bb
PB
9400 }
9401 } else {
9402 /* Store. */
b0109805 9403 tmp = load_reg(s, i);
6ce2faf4 9404 gen_aa32_st32(tmp, addr, get_mem_index(s));
e2592fad 9405 tcg_temp_free_i32(tmp);
9ee6e8bb 9406 }
b0109805 9407 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb 9408 }
5856d44e
YO
9409 if (loaded_base) {
9410 store_reg(s, rn, loaded_var);
9411 }
9ee6e8bb
PB
9412 if (insn & (1 << 21)) {
9413 /* Base register writeback. */
9414 if (insn & (1 << 24)) {
b0109805 9415 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
9416 }
9417 /* Fault if writeback register is in register list. */
9418 if (insn & (1 << rn))
9419 goto illegal_op;
b0109805
PB
9420 store_reg(s, rn, addr);
9421 } else {
7d1b0095 9422 tcg_temp_free_i32(addr);
9ee6e8bb
PB
9423 }
9424 }
9425 }
9426 break;
2af9ab77
JB
9427 case 5:
9428
9ee6e8bb 9429 op = (insn >> 21) & 0xf;
2af9ab77
JB
9430 if (op == 6) {
9431 /* Halfword pack. */
9432 tmp = load_reg(s, rn);
9433 tmp2 = load_reg(s, rm);
9434 shift = ((insn >> 10) & 0x1c) | ((insn >> 6) & 0x3);
9435 if (insn & (1 << 5)) {
9436 /* pkhtb */
9437 if (shift == 0)
9438 shift = 31;
9439 tcg_gen_sari_i32(tmp2, tmp2, shift);
9440 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
9441 tcg_gen_ext16u_i32(tmp2, tmp2);
9442 } else {
9443 /* pkhbt */
9444 if (shift)
9445 tcg_gen_shli_i32(tmp2, tmp2, shift);
9446 tcg_gen_ext16u_i32(tmp, tmp);
9447 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
9448 }
9449 tcg_gen_or_i32(tmp, tmp, tmp2);
7d1b0095 9450 tcg_temp_free_i32(tmp2);
3174f8e9
FN
9451 store_reg(s, rd, tmp);
9452 } else {
2af9ab77
JB
9453 /* Data processing register constant shift. */
9454 if (rn == 15) {
7d1b0095 9455 tmp = tcg_temp_new_i32();
2af9ab77
JB
9456 tcg_gen_movi_i32(tmp, 0);
9457 } else {
9458 tmp = load_reg(s, rn);
9459 }
9460 tmp2 = load_reg(s, rm);
9461
9462 shiftop = (insn >> 4) & 3;
9463 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
9464 conds = (insn & (1 << 20)) != 0;
9465 logic_cc = (conds && thumb2_logic_op(op));
9466 gen_arm_shift_im(tmp2, shiftop, shift, logic_cc);
9467 if (gen_thumb2_data_op(s, op, conds, 0, tmp, tmp2))
9468 goto illegal_op;
7d1b0095 9469 tcg_temp_free_i32(tmp2);
2af9ab77
JB
9470 if (rd != 15) {
9471 store_reg(s, rd, tmp);
9472 } else {
7d1b0095 9473 tcg_temp_free_i32(tmp);
2af9ab77 9474 }
3174f8e9 9475 }
9ee6e8bb
PB
9476 break;
9477 case 13: /* Misc data processing. */
9478 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
9479 if (op < 4 && (insn & 0xf000) != 0xf000)
9480 goto illegal_op;
9481 switch (op) {
9482 case 0: /* Register controlled shift. */
8984bd2e
PB
9483 tmp = load_reg(s, rn);
9484 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9485 if ((insn & 0x70) != 0)
9486 goto illegal_op;
9487 op = (insn >> 21) & 3;
8984bd2e
PB
9488 logic_cc = (insn & (1 << 20)) != 0;
9489 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
9490 if (logic_cc)
9491 gen_logic_CC(tmp);
7dcc1f89 9492 store_reg_bx(s, rd, tmp);
9ee6e8bb
PB
9493 break;
9494 case 1: /* Sign/zero extend. */
5e3f878a 9495 tmp = load_reg(s, rm);
9ee6e8bb 9496 shift = (insn >> 4) & 3;
1301f322 9497 /* ??? In many cases it's not necessary to do a
9ee6e8bb
PB
9498 rotate, a shift is sufficient. */
9499 if (shift != 0)
f669df27 9500 tcg_gen_rotri_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
9501 op = (insn >> 20) & 7;
9502 switch (op) {
5e3f878a
PB
9503 case 0: gen_sxth(tmp); break;
9504 case 1: gen_uxth(tmp); break;
9505 case 2: gen_sxtb16(tmp); break;
9506 case 3: gen_uxtb16(tmp); break;
9507 case 4: gen_sxtb(tmp); break;
9508 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
9509 default: goto illegal_op;
9510 }
9511 if (rn != 15) {
5e3f878a 9512 tmp2 = load_reg(s, rn);
9ee6e8bb 9513 if ((op >> 1) == 1) {
5e3f878a 9514 gen_add16(tmp, tmp2);
9ee6e8bb 9515 } else {
5e3f878a 9516 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9517 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9518 }
9519 }
5e3f878a 9520 store_reg(s, rd, tmp);
9ee6e8bb
PB
9521 break;
9522 case 2: /* SIMD add/subtract. */
9523 op = (insn >> 20) & 7;
9524 shift = (insn >> 4) & 7;
9525 if ((op & 3) == 3 || (shift & 3) == 3)
9526 goto illegal_op;
6ddbc6e4
PB
9527 tmp = load_reg(s, rn);
9528 tmp2 = load_reg(s, rm);
9529 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7d1b0095 9530 tcg_temp_free_i32(tmp2);
6ddbc6e4 9531 store_reg(s, rd, tmp);
9ee6e8bb
PB
9532 break;
9533 case 3: /* Other data processing. */
9534 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
9535 if (op < 4) {
9536 /* Saturating add/subtract. */
d9ba4830
PB
9537 tmp = load_reg(s, rn);
9538 tmp2 = load_reg(s, rm);
9ee6e8bb 9539 if (op & 1)
9ef39277 9540 gen_helper_double_saturate(tmp, cpu_env, tmp);
4809c612 9541 if (op & 2)
9ef39277 9542 gen_helper_sub_saturate(tmp, cpu_env, tmp2, tmp);
9ee6e8bb 9543 else
9ef39277 9544 gen_helper_add_saturate(tmp, cpu_env, tmp, tmp2);
7d1b0095 9545 tcg_temp_free_i32(tmp2);
9ee6e8bb 9546 } else {
d9ba4830 9547 tmp = load_reg(s, rn);
9ee6e8bb
PB
9548 switch (op) {
9549 case 0x0a: /* rbit */
d9ba4830 9550 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
9551 break;
9552 case 0x08: /* rev */
66896cb8 9553 tcg_gen_bswap32_i32(tmp, tmp);
9ee6e8bb
PB
9554 break;
9555 case 0x09: /* rev16 */
d9ba4830 9556 gen_rev16(tmp);
9ee6e8bb
PB
9557 break;
9558 case 0x0b: /* revsh */
d9ba4830 9559 gen_revsh(tmp);
9ee6e8bb
PB
9560 break;
9561 case 0x10: /* sel */
d9ba4830 9562 tmp2 = load_reg(s, rm);
7d1b0095 9563 tmp3 = tcg_temp_new_i32();
0ecb72a5 9564 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
d9ba4830 9565 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
7d1b0095
PM
9566 tcg_temp_free_i32(tmp3);
9567 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9568 break;
9569 case 0x18: /* clz */
d9ba4830 9570 gen_helper_clz(tmp, tmp);
9ee6e8bb 9571 break;
eb0ecd5a
WN
9572 case 0x20:
9573 case 0x21:
9574 case 0x22:
9575 case 0x28:
9576 case 0x29:
9577 case 0x2a:
9578 {
9579 /* crc32/crc32c */
9580 uint32_t sz = op & 0x3;
9581 uint32_t c = op & 0x8;
9582
d614a513 9583 if (!arm_dc_feature(s, ARM_FEATURE_CRC)) {
eb0ecd5a
WN
9584 goto illegal_op;
9585 }
9586
9587 tmp2 = load_reg(s, rm);
aa633469
PM
9588 if (sz == 0) {
9589 tcg_gen_andi_i32(tmp2, tmp2, 0xff);
9590 } else if (sz == 1) {
9591 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
9592 }
eb0ecd5a
WN
9593 tmp3 = tcg_const_i32(1 << sz);
9594 if (c) {
9595 gen_helper_crc32c(tmp, tmp, tmp2, tmp3);
9596 } else {
9597 gen_helper_crc32(tmp, tmp, tmp2, tmp3);
9598 }
9599 tcg_temp_free_i32(tmp2);
9600 tcg_temp_free_i32(tmp3);
9601 break;
9602 }
9ee6e8bb
PB
9603 default:
9604 goto illegal_op;
9605 }
9606 }
d9ba4830 9607 store_reg(s, rd, tmp);
9ee6e8bb
PB
9608 break;
9609 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
9610 op = (insn >> 4) & 0xf;
d9ba4830
PB
9611 tmp = load_reg(s, rn);
9612 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9613 switch ((insn >> 20) & 7) {
9614 case 0: /* 32 x 32 -> 32 */
d9ba4830 9615 tcg_gen_mul_i32(tmp, tmp, tmp2);
7d1b0095 9616 tcg_temp_free_i32(tmp2);
9ee6e8bb 9617 if (rs != 15) {
d9ba4830 9618 tmp2 = load_reg(s, rs);
9ee6e8bb 9619 if (op)
d9ba4830 9620 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 9621 else
d9ba4830 9622 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9623 tcg_temp_free_i32(tmp2);
9ee6e8bb 9624 }
9ee6e8bb
PB
9625 break;
9626 case 1: /* 16 x 16 -> 32 */
d9ba4830 9627 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9628 tcg_temp_free_i32(tmp2);
9ee6e8bb 9629 if (rs != 15) {
d9ba4830 9630 tmp2 = load_reg(s, rs);
9ef39277 9631 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9632 tcg_temp_free_i32(tmp2);
9ee6e8bb 9633 }
9ee6e8bb
PB
9634 break;
9635 case 2: /* Dual multiply add. */
9636 case 4: /* Dual multiply subtract. */
9637 if (op)
d9ba4830
PB
9638 gen_swap_half(tmp2);
9639 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9640 if (insn & (1 << 22)) {
e1d177b9 9641 /* This subtraction cannot overflow. */
d9ba4830 9642 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 9643 } else {
e1d177b9
PM
9644 /* This addition cannot overflow 32 bits;
9645 * however it may overflow considered as a signed
9646 * operation, in which case we must set the Q flag.
9647 */
9ef39277 9648 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 9649 }
7d1b0095 9650 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
9651 if (rs != 15)
9652 {
d9ba4830 9653 tmp2 = load_reg(s, rs);
9ef39277 9654 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9655 tcg_temp_free_i32(tmp2);
9ee6e8bb 9656 }
9ee6e8bb
PB
9657 break;
9658 case 3: /* 32 * 16 -> 32msb */
9659 if (op)
d9ba4830 9660 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 9661 else
d9ba4830 9662 gen_sxth(tmp2);
a7812ae4
PB
9663 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9664 tcg_gen_shri_i64(tmp64, tmp64, 16);
7d1b0095 9665 tmp = tcg_temp_new_i32();
a7812ae4 9666 tcg_gen_trunc_i64_i32(tmp, tmp64);
b75263d6 9667 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9668 if (rs != 15)
9669 {
d9ba4830 9670 tmp2 = load_reg(s, rs);
9ef39277 9671 gen_helper_add_setq(tmp, cpu_env, tmp, tmp2);
7d1b0095 9672 tcg_temp_free_i32(tmp2);
9ee6e8bb 9673 }
9ee6e8bb 9674 break;
838fa72d
AJ
9675 case 5: case 6: /* 32 * 32 -> 32msb (SMMUL, SMMLA, SMMLS) */
9676 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9677 if (rs != 15) {
838fa72d
AJ
9678 tmp = load_reg(s, rs);
9679 if (insn & (1 << 20)) {
9680 tmp64 = gen_addq_msw(tmp64, tmp);
99c475ab 9681 } else {
838fa72d 9682 tmp64 = gen_subq_msw(tmp64, tmp);
99c475ab 9683 }
2c0262af 9684 }
838fa72d
AJ
9685 if (insn & (1 << 4)) {
9686 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
9687 }
9688 tcg_gen_shri_i64(tmp64, tmp64, 32);
7d1b0095 9689 tmp = tcg_temp_new_i32();
838fa72d
AJ
9690 tcg_gen_trunc_i64_i32(tmp, tmp64);
9691 tcg_temp_free_i64(tmp64);
9ee6e8bb
PB
9692 break;
9693 case 7: /* Unsigned sum of absolute differences. */
d9ba4830 9694 gen_helper_usad8(tmp, tmp, tmp2);
7d1b0095 9695 tcg_temp_free_i32(tmp2);
9ee6e8bb 9696 if (rs != 15) {
d9ba4830
PB
9697 tmp2 = load_reg(s, rs);
9698 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 9699 tcg_temp_free_i32(tmp2);
5fd46862 9700 }
9ee6e8bb 9701 break;
2c0262af 9702 }
d9ba4830 9703 store_reg(s, rd, tmp);
2c0262af 9704 break;
9ee6e8bb
PB
9705 case 6: case 7: /* 64-bit multiply, Divide. */
9706 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
9707 tmp = load_reg(s, rn);
9708 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
9709 if ((op & 0x50) == 0x10) {
9710 /* sdiv, udiv */
d614a513 9711 if (!arm_dc_feature(s, ARM_FEATURE_THUMB_DIV)) {
9ee6e8bb 9712 goto illegal_op;
47789990 9713 }
9ee6e8bb 9714 if (op & 0x20)
5e3f878a 9715 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 9716 else
5e3f878a 9717 gen_helper_sdiv(tmp, tmp, tmp2);
7d1b0095 9718 tcg_temp_free_i32(tmp2);
5e3f878a 9719 store_reg(s, rd, tmp);
9ee6e8bb
PB
9720 } else if ((op & 0xe) == 0xc) {
9721 /* Dual multiply accumulate long. */
9722 if (op & 1)
5e3f878a
PB
9723 gen_swap_half(tmp2);
9724 gen_smul_dual(tmp, tmp2);
9ee6e8bb 9725 if (op & 0x10) {
5e3f878a 9726 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 9727 } else {
5e3f878a 9728 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 9729 }
7d1b0095 9730 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9731 /* BUGFIX */
9732 tmp64 = tcg_temp_new_i64();
9733 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9734 tcg_temp_free_i32(tmp);
a7812ae4
PB
9735 gen_addq(s, tmp64, rs, rd);
9736 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9737 tcg_temp_free_i64(tmp64);
2c0262af 9738 } else {
9ee6e8bb
PB
9739 if (op & 0x20) {
9740 /* Unsigned 64-bit multiply */
a7812ae4 9741 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 9742 } else {
9ee6e8bb
PB
9743 if (op & 8) {
9744 /* smlalxy */
5e3f878a 9745 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7d1b0095 9746 tcg_temp_free_i32(tmp2);
a7812ae4
PB
9747 tmp64 = tcg_temp_new_i64();
9748 tcg_gen_ext_i32_i64(tmp64, tmp);
7d1b0095 9749 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9750 } else {
9751 /* Signed 64-bit multiply */
a7812ae4 9752 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 9753 }
b5ff1b31 9754 }
9ee6e8bb
PB
9755 if (op & 4) {
9756 /* umaal */
a7812ae4
PB
9757 gen_addq_lo(s, tmp64, rs);
9758 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
9759 } else if (op & 0x40) {
9760 /* 64-bit accumulate. */
a7812ae4 9761 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 9762 }
a7812ae4 9763 gen_storeq_reg(s, rs, rd, tmp64);
b75263d6 9764 tcg_temp_free_i64(tmp64);
5fd46862 9765 }
2c0262af 9766 break;
9ee6e8bb
PB
9767 }
9768 break;
9769 case 6: case 7: case 14: case 15:
9770 /* Coprocessor. */
9771 if (((insn >> 24) & 3) == 3) {
9772 /* Translate into the equivalent ARM encoding. */
f06053e3 9773 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4) | (1 << 28);
7dcc1f89 9774 if (disas_neon_data_insn(s, insn)) {
9ee6e8bb 9775 goto illegal_op;
7dcc1f89 9776 }
6a57f3eb 9777 } else if (((insn >> 8) & 0xe) == 10) {
7dcc1f89 9778 if (disas_vfp_insn(s, insn)) {
6a57f3eb
WN
9779 goto illegal_op;
9780 }
9ee6e8bb
PB
9781 } else {
9782 if (insn & (1 << 28))
9783 goto illegal_op;
7dcc1f89 9784 if (disas_coproc_insn(s, insn)) {
9ee6e8bb 9785 goto illegal_op;
7dcc1f89 9786 }
9ee6e8bb
PB
9787 }
9788 break;
9789 case 8: case 9: case 10: case 11:
9790 if (insn & (1 << 15)) {
9791 /* Branches, misc control. */
9792 if (insn & 0x5000) {
9793 /* Unconditional branch. */
9794 /* signextend(hw1[10:0]) -> offset[:12]. */
9795 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
9796 /* hw1[10:0] -> offset[11:1]. */
9797 offset |= (insn & 0x7ff) << 1;
9798 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
9799 offset[24:22] already have the same value because of the
9800 sign extension above. */
9801 offset ^= ((~insn) & (1 << 13)) << 10;
9802 offset ^= ((~insn) & (1 << 11)) << 11;
9803
9ee6e8bb
PB
9804 if (insn & (1 << 14)) {
9805 /* Branch and link. */
3174f8e9 9806 tcg_gen_movi_i32(cpu_R[14], s->pc | 1);
b5ff1b31 9807 }
3b46e624 9808
b0109805 9809 offset += s->pc;
9ee6e8bb
PB
9810 if (insn & (1 << 12)) {
9811 /* b/bl */
b0109805 9812 gen_jmp(s, offset);
9ee6e8bb
PB
9813 } else {
9814 /* blx */
b0109805 9815 offset &= ~(uint32_t)2;
be5e7a76 9816 /* thumb2 bx, no need to check */
b0109805 9817 gen_bx_im(s, offset);
2c0262af 9818 }
9ee6e8bb
PB
9819 } else if (((insn >> 23) & 7) == 7) {
9820 /* Misc control */
9821 if (insn & (1 << 13))
9822 goto illegal_op;
9823
9824 if (insn & (1 << 26)) {
37e6456e
PM
9825 if (!(insn & (1 << 20))) {
9826 /* Hypervisor call (v7) */
9827 int imm16 = extract32(insn, 16, 4) << 12
9828 | extract32(insn, 0, 12);
9829 ARCH(7);
9830 if (IS_USER(s)) {
9831 goto illegal_op;
9832 }
9833 gen_hvc(s, imm16);
9834 } else {
9835 /* Secure monitor call (v6+) */
9836 ARCH(6K);
9837 if (IS_USER(s)) {
9838 goto illegal_op;
9839 }
9840 gen_smc(s);
9841 }
2c0262af 9842 } else {
9ee6e8bb
PB
9843 op = (insn >> 20) & 7;
9844 switch (op) {
9845 case 0: /* msr cpsr. */
b53d8923 9846 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9847 tmp = load_reg(s, rn);
9848 addr = tcg_const_i32(insn & 0xff);
9849 gen_helper_v7m_msr(cpu_env, addr, tmp);
b75263d6 9850 tcg_temp_free_i32(addr);
7d1b0095 9851 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
9852 gen_lookup_tb(s);
9853 break;
9854 }
9855 /* fall through */
9856 case 1: /* msr spsr. */
b53d8923 9857 if (arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9858 goto illegal_op;
b53d8923 9859 }
2fbac54b
FN
9860 tmp = load_reg(s, rn);
9861 if (gen_set_psr(s,
7dcc1f89 9862 msr_mask(s, (insn >> 8) & 0xf, op == 1),
2fbac54b 9863 op == 1, tmp))
9ee6e8bb
PB
9864 goto illegal_op;
9865 break;
9866 case 2: /* cps, nop-hint. */
9867 if (((insn >> 8) & 7) == 0) {
9868 gen_nop_hint(s, insn & 0xff);
9869 }
9870 /* Implemented as NOP in user mode. */
9871 if (IS_USER(s))
9872 break;
9873 offset = 0;
9874 imm = 0;
9875 if (insn & (1 << 10)) {
9876 if (insn & (1 << 7))
9877 offset |= CPSR_A;
9878 if (insn & (1 << 6))
9879 offset |= CPSR_I;
9880 if (insn & (1 << 5))
9881 offset |= CPSR_F;
9882 if (insn & (1 << 9))
9883 imm = CPSR_A | CPSR_I | CPSR_F;
9884 }
9885 if (insn & (1 << 8)) {
9886 offset |= 0x1f;
9887 imm |= (insn & 0x1f);
9888 }
9889 if (offset) {
2fbac54b 9890 gen_set_psr_im(s, offset, 0, imm);
9ee6e8bb
PB
9891 }
9892 break;
9893 case 3: /* Special control operations. */
426f5abc 9894 ARCH(7);
9ee6e8bb
PB
9895 op = (insn >> 4) & 0xf;
9896 switch (op) {
9897 case 2: /* clrex */
426f5abc 9898 gen_clrex(s);
9ee6e8bb
PB
9899 break;
9900 case 4: /* dsb */
9901 case 5: /* dmb */
9902 case 6: /* isb */
9903 /* These execute as NOPs. */
9ee6e8bb
PB
9904 break;
9905 default:
9906 goto illegal_op;
9907 }
9908 break;
9909 case 4: /* bxj */
9910 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
9911 tmp = load_reg(s, rn);
9912 gen_bx(s, tmp);
9ee6e8bb
PB
9913 break;
9914 case 5: /* Exception return. */
b8b45b68
RV
9915 if (IS_USER(s)) {
9916 goto illegal_op;
9917 }
9918 if (rn != 14 || rd != 15) {
9919 goto illegal_op;
9920 }
9921 tmp = load_reg(s, rn);
9922 tcg_gen_subi_i32(tmp, tmp, insn & 0xff);
9923 gen_exception_return(s, tmp);
9924 break;
9ee6e8bb 9925 case 6: /* mrs cpsr. */
7d1b0095 9926 tmp = tcg_temp_new_i32();
b53d8923 9927 if (arm_dc_feature(s, ARM_FEATURE_M)) {
8984bd2e
PB
9928 addr = tcg_const_i32(insn & 0xff);
9929 gen_helper_v7m_mrs(tmp, cpu_env, addr);
b75263d6 9930 tcg_temp_free_i32(addr);
9ee6e8bb 9931 } else {
9ef39277 9932 gen_helper_cpsr_read(tmp, cpu_env);
9ee6e8bb 9933 }
8984bd2e 9934 store_reg(s, rd, tmp);
9ee6e8bb
PB
9935 break;
9936 case 7: /* mrs spsr. */
9937 /* Not accessible in user mode. */
b53d8923 9938 if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_M)) {
9ee6e8bb 9939 goto illegal_op;
b53d8923 9940 }
d9ba4830
PB
9941 tmp = load_cpu_field(spsr);
9942 store_reg(s, rd, tmp);
9ee6e8bb 9943 break;
2c0262af
FB
9944 }
9945 }
9ee6e8bb
PB
9946 } else {
9947 /* Conditional branch. */
9948 op = (insn >> 22) & 0xf;
9949 /* Generate a conditional jump to next instruction. */
9950 s->condlabel = gen_new_label();
39fb730a 9951 arm_gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
9952 s->condjmp = 1;
9953
9954 /* offset[11:1] = insn[10:0] */
9955 offset = (insn & 0x7ff) << 1;
9956 /* offset[17:12] = insn[21:16]. */
9957 offset |= (insn & 0x003f0000) >> 4;
9958 /* offset[31:20] = insn[26]. */
9959 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
9960 /* offset[18] = insn[13]. */
9961 offset |= (insn & (1 << 13)) << 5;
9962 /* offset[19] = insn[11]. */
9963 offset |= (insn & (1 << 11)) << 8;
9964
9965 /* jump to the offset */
b0109805 9966 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
9967 }
9968 } else {
9969 /* Data processing immediate. */
9970 if (insn & (1 << 25)) {
9971 if (insn & (1 << 24)) {
9972 if (insn & (1 << 20))
9973 goto illegal_op;
9974 /* Bitfield/Saturate. */
9975 op = (insn >> 21) & 7;
9976 imm = insn & 0x1f;
9977 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4 9978 if (rn == 15) {
7d1b0095 9979 tmp = tcg_temp_new_i32();
6ddbc6e4
PB
9980 tcg_gen_movi_i32(tmp, 0);
9981 } else {
9982 tmp = load_reg(s, rn);
9983 }
9ee6e8bb
PB
9984 switch (op) {
9985 case 2: /* Signed bitfield extract. */
9986 imm++;
9987 if (shift + imm > 32)
9988 goto illegal_op;
9989 if (imm < 32)
6ddbc6e4 9990 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
9991 break;
9992 case 6: /* Unsigned bitfield extract. */
9993 imm++;
9994 if (shift + imm > 32)
9995 goto illegal_op;
9996 if (imm < 32)
6ddbc6e4 9997 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
9998 break;
9999 case 3: /* Bitfield insert/clear. */
10000 if (imm < shift)
10001 goto illegal_op;
10002 imm = imm + 1 - shift;
10003 if (imm != 32) {
6ddbc6e4 10004 tmp2 = load_reg(s, rd);
d593c48e 10005 tcg_gen_deposit_i32(tmp, tmp2, tmp, shift, imm);
7d1b0095 10006 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10007 }
10008 break;
10009 case 7:
10010 goto illegal_op;
10011 default: /* Saturate. */
9ee6e8bb
PB
10012 if (shift) {
10013 if (op & 1)
6ddbc6e4 10014 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 10015 else
6ddbc6e4 10016 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 10017 }
6ddbc6e4 10018 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
10019 if (op & 4) {
10020 /* Unsigned. */
9ee6e8bb 10021 if ((op & 1) && shift == 0)
9ef39277 10022 gen_helper_usat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10023 else
9ef39277 10024 gen_helper_usat(tmp, cpu_env, tmp, tmp2);
2c0262af 10025 } else {
9ee6e8bb 10026 /* Signed. */
9ee6e8bb 10027 if ((op & 1) && shift == 0)
9ef39277 10028 gen_helper_ssat16(tmp, cpu_env, tmp, tmp2);
9ee6e8bb 10029 else
9ef39277 10030 gen_helper_ssat(tmp, cpu_env, tmp, tmp2);
2c0262af 10031 }
b75263d6 10032 tcg_temp_free_i32(tmp2);
9ee6e8bb 10033 break;
2c0262af 10034 }
6ddbc6e4 10035 store_reg(s, rd, tmp);
9ee6e8bb
PB
10036 } else {
10037 imm = ((insn & 0x04000000) >> 15)
10038 | ((insn & 0x7000) >> 4) | (insn & 0xff);
10039 if (insn & (1 << 22)) {
10040 /* 16-bit immediate. */
10041 imm |= (insn >> 4) & 0xf000;
10042 if (insn & (1 << 23)) {
10043 /* movt */
5e3f878a 10044 tmp = load_reg(s, rd);
86831435 10045 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 10046 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 10047 } else {
9ee6e8bb 10048 /* movw */
7d1b0095 10049 tmp = tcg_temp_new_i32();
5e3f878a 10050 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
10051 }
10052 } else {
9ee6e8bb
PB
10053 /* Add/sub 12-bit immediate. */
10054 if (rn == 15) {
b0109805 10055 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 10056 if (insn & (1 << 23))
b0109805 10057 offset -= imm;
9ee6e8bb 10058 else
b0109805 10059 offset += imm;
7d1b0095 10060 tmp = tcg_temp_new_i32();
5e3f878a 10061 tcg_gen_movi_i32(tmp, offset);
2c0262af 10062 } else {
5e3f878a 10063 tmp = load_reg(s, rn);
9ee6e8bb 10064 if (insn & (1 << 23))
5e3f878a 10065 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 10066 else
5e3f878a 10067 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 10068 }
9ee6e8bb 10069 }
5e3f878a 10070 store_reg(s, rd, tmp);
191abaa2 10071 }
9ee6e8bb
PB
10072 } else {
10073 int shifter_out = 0;
10074 /* modified 12-bit immediate. */
10075 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
10076 imm = (insn & 0xff);
10077 switch (shift) {
10078 case 0: /* XY */
10079 /* Nothing to do. */
10080 break;
10081 case 1: /* 00XY00XY */
10082 imm |= imm << 16;
10083 break;
10084 case 2: /* XY00XY00 */
10085 imm |= imm << 16;
10086 imm <<= 8;
10087 break;
10088 case 3: /* XYXYXYXY */
10089 imm |= imm << 16;
10090 imm |= imm << 8;
10091 break;
10092 default: /* Rotated constant. */
10093 shift = (shift << 1) | (imm >> 7);
10094 imm |= 0x80;
10095 imm = imm << (32 - shift);
10096 shifter_out = 1;
10097 break;
b5ff1b31 10098 }
7d1b0095 10099 tmp2 = tcg_temp_new_i32();
3174f8e9 10100 tcg_gen_movi_i32(tmp2, imm);
9ee6e8bb 10101 rn = (insn >> 16) & 0xf;
3174f8e9 10102 if (rn == 15) {
7d1b0095 10103 tmp = tcg_temp_new_i32();
3174f8e9
FN
10104 tcg_gen_movi_i32(tmp, 0);
10105 } else {
10106 tmp = load_reg(s, rn);
10107 }
9ee6e8bb
PB
10108 op = (insn >> 21) & 0xf;
10109 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
3174f8e9 10110 shifter_out, tmp, tmp2))
9ee6e8bb 10111 goto illegal_op;
7d1b0095 10112 tcg_temp_free_i32(tmp2);
9ee6e8bb
PB
10113 rd = (insn >> 8) & 0xf;
10114 if (rd != 15) {
3174f8e9
FN
10115 store_reg(s, rd, tmp);
10116 } else {
7d1b0095 10117 tcg_temp_free_i32(tmp);
2c0262af 10118 }
2c0262af 10119 }
9ee6e8bb
PB
10120 }
10121 break;
10122 case 12: /* Load/store single data item. */
10123 {
10124 int postinc = 0;
10125 int writeback = 0;
a99caa48 10126 int memidx;
9ee6e8bb 10127 if ((insn & 0x01100000) == 0x01000000) {
7dcc1f89 10128 if (disas_neon_ls_insn(s, insn)) {
c1713132 10129 goto illegal_op;
7dcc1f89 10130 }
9ee6e8bb
PB
10131 break;
10132 }
a2fdc890
PM
10133 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
10134 if (rs == 15) {
10135 if (!(insn & (1 << 20))) {
10136 goto illegal_op;
10137 }
10138 if (op != 2) {
10139 /* Byte or halfword load space with dest == r15 : memory hints.
10140 * Catch them early so we don't emit pointless addressing code.
10141 * This space is a mix of:
10142 * PLD/PLDW/PLI, which we implement as NOPs (note that unlike
10143 * the ARM encodings, PLDW space doesn't UNDEF for non-v7MP
10144 * cores)
10145 * unallocated hints, which must be treated as NOPs
10146 * UNPREDICTABLE space, which we NOP or UNDEF depending on
10147 * which is easiest for the decoding logic
10148 * Some space which must UNDEF
10149 */
10150 int op1 = (insn >> 23) & 3;
10151 int op2 = (insn >> 6) & 0x3f;
10152 if (op & 2) {
10153 goto illegal_op;
10154 }
10155 if (rn == 15) {
02afbf64
PM
10156 /* UNPREDICTABLE, unallocated hint or
10157 * PLD/PLDW/PLI (literal)
10158 */
a2fdc890
PM
10159 return 0;
10160 }
10161 if (op1 & 1) {
02afbf64 10162 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10163 }
10164 if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
02afbf64 10165 return 0; /* PLD/PLDW/PLI or unallocated hint */
a2fdc890
PM
10166 }
10167 /* UNDEF space, or an UNPREDICTABLE */
10168 return 1;
10169 }
10170 }
a99caa48 10171 memidx = get_mem_index(s);
9ee6e8bb 10172 if (rn == 15) {
7d1b0095 10173 addr = tcg_temp_new_i32();
9ee6e8bb
PB
10174 /* PC relative. */
10175 /* s->pc has already been incremented by 4. */
10176 imm = s->pc & 0xfffffffc;
10177 if (insn & (1 << 23))
10178 imm += insn & 0xfff;
10179 else
10180 imm -= insn & 0xfff;
b0109805 10181 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 10182 } else {
b0109805 10183 addr = load_reg(s, rn);
9ee6e8bb
PB
10184 if (insn & (1 << 23)) {
10185 /* Positive offset. */
10186 imm = insn & 0xfff;
b0109805 10187 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb 10188 } else {
9ee6e8bb 10189 imm = insn & 0xff;
2a0308c5
PM
10190 switch ((insn >> 8) & 0xf) {
10191 case 0x0: /* Shifted Register. */
9ee6e8bb 10192 shift = (insn >> 4) & 0xf;
2a0308c5
PM
10193 if (shift > 3) {
10194 tcg_temp_free_i32(addr);
18c9b560 10195 goto illegal_op;
2a0308c5 10196 }
b26eefb6 10197 tmp = load_reg(s, rm);
9ee6e8bb 10198 if (shift)
b26eefb6 10199 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 10200 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10201 tcg_temp_free_i32(tmp);
9ee6e8bb 10202 break;
2a0308c5 10203 case 0xc: /* Negative offset. */
b0109805 10204 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb 10205 break;
2a0308c5 10206 case 0xe: /* User privilege. */
b0109805 10207 tcg_gen_addi_i32(addr, addr, imm);
579d21cc 10208 memidx = get_a32_user_mem_index(s);
9ee6e8bb 10209 break;
2a0308c5 10210 case 0x9: /* Post-decrement. */
9ee6e8bb
PB
10211 imm = -imm;
10212 /* Fall through. */
2a0308c5 10213 case 0xb: /* Post-increment. */
9ee6e8bb
PB
10214 postinc = 1;
10215 writeback = 1;
10216 break;
2a0308c5 10217 case 0xd: /* Pre-decrement. */
9ee6e8bb
PB
10218 imm = -imm;
10219 /* Fall through. */
2a0308c5 10220 case 0xf: /* Pre-increment. */
b0109805 10221 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
10222 writeback = 1;
10223 break;
10224 default:
2a0308c5 10225 tcg_temp_free_i32(addr);
b7bcbe95 10226 goto illegal_op;
9ee6e8bb
PB
10227 }
10228 }
10229 }
9ee6e8bb
PB
10230 if (insn & (1 << 20)) {
10231 /* Load. */
5a839c0d 10232 tmp = tcg_temp_new_i32();
a2fdc890 10233 switch (op) {
5a839c0d 10234 case 0:
a99caa48 10235 gen_aa32_ld8u(tmp, addr, memidx);
5a839c0d
PM
10236 break;
10237 case 4:
a99caa48 10238 gen_aa32_ld8s(tmp, addr, memidx);
5a839c0d
PM
10239 break;
10240 case 1:
a99caa48 10241 gen_aa32_ld16u(tmp, addr, memidx);
5a839c0d
PM
10242 break;
10243 case 5:
a99caa48 10244 gen_aa32_ld16s(tmp, addr, memidx);
5a839c0d
PM
10245 break;
10246 case 2:
a99caa48 10247 gen_aa32_ld32u(tmp, addr, memidx);
5a839c0d 10248 break;
2a0308c5 10249 default:
5a839c0d 10250 tcg_temp_free_i32(tmp);
2a0308c5
PM
10251 tcg_temp_free_i32(addr);
10252 goto illegal_op;
a2fdc890
PM
10253 }
10254 if (rs == 15) {
10255 gen_bx(s, tmp);
9ee6e8bb 10256 } else {
a2fdc890 10257 store_reg(s, rs, tmp);
9ee6e8bb
PB
10258 }
10259 } else {
10260 /* Store. */
b0109805 10261 tmp = load_reg(s, rs);
9ee6e8bb 10262 switch (op) {
5a839c0d 10263 case 0:
a99caa48 10264 gen_aa32_st8(tmp, addr, memidx);
5a839c0d
PM
10265 break;
10266 case 1:
a99caa48 10267 gen_aa32_st16(tmp, addr, memidx);
5a839c0d
PM
10268 break;
10269 case 2:
a99caa48 10270 gen_aa32_st32(tmp, addr, memidx);
5a839c0d 10271 break;
2a0308c5 10272 default:
5a839c0d 10273 tcg_temp_free_i32(tmp);
2a0308c5
PM
10274 tcg_temp_free_i32(addr);
10275 goto illegal_op;
b7bcbe95 10276 }
5a839c0d 10277 tcg_temp_free_i32(tmp);
2c0262af 10278 }
9ee6e8bb 10279 if (postinc)
b0109805
PB
10280 tcg_gen_addi_i32(addr, addr, imm);
10281 if (writeback) {
10282 store_reg(s, rn, addr);
10283 } else {
7d1b0095 10284 tcg_temp_free_i32(addr);
b0109805 10285 }
9ee6e8bb
PB
10286 }
10287 break;
10288 default:
10289 goto illegal_op;
2c0262af 10290 }
9ee6e8bb
PB
10291 return 0;
10292illegal_op:
10293 return 1;
2c0262af
FB
10294}
10295
0ecb72a5 10296static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
99c475ab
FB
10297{
10298 uint32_t val, insn, op, rm, rn, rd, shift, cond;
10299 int32_t offset;
10300 int i;
39d5492a
PM
10301 TCGv_i32 tmp;
10302 TCGv_i32 tmp2;
10303 TCGv_i32 addr;
99c475ab 10304
9ee6e8bb
PB
10305 if (s->condexec_mask) {
10306 cond = s->condexec_cond;
bedd2912
JB
10307 if (cond != 0x0e) { /* Skip conditional when condition is AL. */
10308 s->condlabel = gen_new_label();
39fb730a 10309 arm_gen_test_cc(cond ^ 1, s->condlabel);
bedd2912
JB
10310 s->condjmp = 1;
10311 }
9ee6e8bb
PB
10312 }
10313
d31dd73e 10314 insn = arm_lduw_code(env, s->pc, s->bswap_code);
99c475ab 10315 s->pc += 2;
b5ff1b31 10316
99c475ab
FB
10317 switch (insn >> 12) {
10318 case 0: case 1:
396e467c 10319
99c475ab
FB
10320 rd = insn & 7;
10321 op = (insn >> 11) & 3;
10322 if (op == 3) {
10323 /* add/subtract */
10324 rn = (insn >> 3) & 7;
396e467c 10325 tmp = load_reg(s, rn);
99c475ab
FB
10326 if (insn & (1 << 10)) {
10327 /* immediate */
7d1b0095 10328 tmp2 = tcg_temp_new_i32();
396e467c 10329 tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
99c475ab
FB
10330 } else {
10331 /* reg */
10332 rm = (insn >> 6) & 7;
396e467c 10333 tmp2 = load_reg(s, rm);
99c475ab 10334 }
9ee6e8bb
PB
10335 if (insn & (1 << 9)) {
10336 if (s->condexec_mask)
396e467c 10337 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 10338 else
72485ec4 10339 gen_sub_CC(tmp, tmp, tmp2);
9ee6e8bb
PB
10340 } else {
10341 if (s->condexec_mask)
396e467c 10342 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 10343 else
72485ec4 10344 gen_add_CC(tmp, tmp, tmp2);
9ee6e8bb 10345 }
7d1b0095 10346 tcg_temp_free_i32(tmp2);
396e467c 10347 store_reg(s, rd, tmp);
99c475ab
FB
10348 } else {
10349 /* shift immediate */
10350 rm = (insn >> 3) & 7;
10351 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
10352 tmp = load_reg(s, rm);
10353 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
10354 if (!s->condexec_mask)
10355 gen_logic_CC(tmp);
10356 store_reg(s, rd, tmp);
99c475ab
FB
10357 }
10358 break;
10359 case 2: case 3:
10360 /* arithmetic large immediate */
10361 op = (insn >> 11) & 3;
10362 rd = (insn >> 8) & 0x7;
396e467c 10363 if (op == 0) { /* mov */
7d1b0095 10364 tmp = tcg_temp_new_i32();
396e467c 10365 tcg_gen_movi_i32(tmp, insn & 0xff);
9ee6e8bb 10366 if (!s->condexec_mask)
396e467c
FN
10367 gen_logic_CC(tmp);
10368 store_reg(s, rd, tmp);
10369 } else {
10370 tmp = load_reg(s, rd);
7d1b0095 10371 tmp2 = tcg_temp_new_i32();
396e467c
FN
10372 tcg_gen_movi_i32(tmp2, insn & 0xff);
10373 switch (op) {
10374 case 1: /* cmp */
72485ec4 10375 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10376 tcg_temp_free_i32(tmp);
10377 tcg_temp_free_i32(tmp2);
396e467c
FN
10378 break;
10379 case 2: /* add */
10380 if (s->condexec_mask)
10381 tcg_gen_add_i32(tmp, tmp, tmp2);
10382 else
72485ec4 10383 gen_add_CC(tmp, tmp, tmp2);
7d1b0095 10384 tcg_temp_free_i32(tmp2);
396e467c
FN
10385 store_reg(s, rd, tmp);
10386 break;
10387 case 3: /* sub */
10388 if (s->condexec_mask)
10389 tcg_gen_sub_i32(tmp, tmp, tmp2);
10390 else
72485ec4 10391 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095 10392 tcg_temp_free_i32(tmp2);
396e467c
FN
10393 store_reg(s, rd, tmp);
10394 break;
10395 }
99c475ab 10396 }
99c475ab
FB
10397 break;
10398 case 4:
10399 if (insn & (1 << 11)) {
10400 rd = (insn >> 8) & 7;
5899f386
FB
10401 /* load pc-relative. Bit 1 of PC is ignored. */
10402 val = s->pc + 2 + ((insn & 0xff) * 4);
10403 val &= ~(uint32_t)2;
7d1b0095 10404 addr = tcg_temp_new_i32();
b0109805 10405 tcg_gen_movi_i32(addr, val);
c40c8556 10406 tmp = tcg_temp_new_i32();
6ce2faf4 10407 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
7d1b0095 10408 tcg_temp_free_i32(addr);
b0109805 10409 store_reg(s, rd, tmp);
99c475ab
FB
10410 break;
10411 }
10412 if (insn & (1 << 10)) {
10413 /* data processing extended or blx */
10414 rd = (insn & 7) | ((insn >> 4) & 8);
10415 rm = (insn >> 3) & 0xf;
10416 op = (insn >> 8) & 3;
10417 switch (op) {
10418 case 0: /* add */
396e467c
FN
10419 tmp = load_reg(s, rd);
10420 tmp2 = load_reg(s, rm);
10421 tcg_gen_add_i32(tmp, tmp, tmp2);
7d1b0095 10422 tcg_temp_free_i32(tmp2);
396e467c 10423 store_reg(s, rd, tmp);
99c475ab
FB
10424 break;
10425 case 1: /* cmp */
396e467c
FN
10426 tmp = load_reg(s, rd);
10427 tmp2 = load_reg(s, rm);
72485ec4 10428 gen_sub_CC(tmp, tmp, tmp2);
7d1b0095
PM
10429 tcg_temp_free_i32(tmp2);
10430 tcg_temp_free_i32(tmp);
99c475ab
FB
10431 break;
10432 case 2: /* mov/cpy */
396e467c
FN
10433 tmp = load_reg(s, rm);
10434 store_reg(s, rd, tmp);
99c475ab
FB
10435 break;
10436 case 3:/* branch [and link] exchange thumb register */
b0109805 10437 tmp = load_reg(s, rm);
99c475ab 10438 if (insn & (1 << 7)) {
be5e7a76 10439 ARCH(5);
99c475ab 10440 val = (uint32_t)s->pc | 1;
7d1b0095 10441 tmp2 = tcg_temp_new_i32();
b0109805
PB
10442 tcg_gen_movi_i32(tmp2, val);
10443 store_reg(s, 14, tmp2);
99c475ab 10444 }
be5e7a76 10445 /* already thumb, no need to check */
d9ba4830 10446 gen_bx(s, tmp);
99c475ab
FB
10447 break;
10448 }
10449 break;
10450 }
10451
10452 /* data processing register */
10453 rd = insn & 7;
10454 rm = (insn >> 3) & 7;
10455 op = (insn >> 6) & 0xf;
10456 if (op == 2 || op == 3 || op == 4 || op == 7) {
10457 /* the shift/rotate ops want the operands backwards */
10458 val = rm;
10459 rm = rd;
10460 rd = val;
10461 val = 1;
10462 } else {
10463 val = 0;
10464 }
10465
396e467c 10466 if (op == 9) { /* neg */
7d1b0095 10467 tmp = tcg_temp_new_i32();
396e467c
FN
10468 tcg_gen_movi_i32(tmp, 0);
10469 } else if (op != 0xf) { /* mvn doesn't read its first operand */
10470 tmp = load_reg(s, rd);
10471 } else {
39d5492a 10472 TCGV_UNUSED_I32(tmp);
396e467c 10473 }
99c475ab 10474
396e467c 10475 tmp2 = load_reg(s, rm);
5899f386 10476 switch (op) {
99c475ab 10477 case 0x0: /* and */
396e467c 10478 tcg_gen_and_i32(tmp, tmp, tmp2);
9ee6e8bb 10479 if (!s->condexec_mask)
396e467c 10480 gen_logic_CC(tmp);
99c475ab
FB
10481 break;
10482 case 0x1: /* eor */
396e467c 10483 tcg_gen_xor_i32(tmp, tmp, tmp2);
9ee6e8bb 10484 if (!s->condexec_mask)
396e467c 10485 gen_logic_CC(tmp);
99c475ab
FB
10486 break;
10487 case 0x2: /* lsl */
9ee6e8bb 10488 if (s->condexec_mask) {
365af80e 10489 gen_shl(tmp2, tmp2, tmp);
9ee6e8bb 10490 } else {
9ef39277 10491 gen_helper_shl_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10492 gen_logic_CC(tmp2);
9ee6e8bb 10493 }
99c475ab
FB
10494 break;
10495 case 0x3: /* lsr */
9ee6e8bb 10496 if (s->condexec_mask) {
365af80e 10497 gen_shr(tmp2, tmp2, tmp);
9ee6e8bb 10498 } else {
9ef39277 10499 gen_helper_shr_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10500 gen_logic_CC(tmp2);
9ee6e8bb 10501 }
99c475ab
FB
10502 break;
10503 case 0x4: /* asr */
9ee6e8bb 10504 if (s->condexec_mask) {
365af80e 10505 gen_sar(tmp2, tmp2, tmp);
9ee6e8bb 10506 } else {
9ef39277 10507 gen_helper_sar_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10508 gen_logic_CC(tmp2);
9ee6e8bb 10509 }
99c475ab
FB
10510 break;
10511 case 0x5: /* adc */
49b4c31e 10512 if (s->condexec_mask) {
396e467c 10513 gen_adc(tmp, tmp2);
49b4c31e
RH
10514 } else {
10515 gen_adc_CC(tmp, tmp, tmp2);
10516 }
99c475ab
FB
10517 break;
10518 case 0x6: /* sbc */
2de68a49 10519 if (s->condexec_mask) {
396e467c 10520 gen_sub_carry(tmp, tmp, tmp2);
2de68a49
RH
10521 } else {
10522 gen_sbc_CC(tmp, tmp, tmp2);
10523 }
99c475ab
FB
10524 break;
10525 case 0x7: /* ror */
9ee6e8bb 10526 if (s->condexec_mask) {
f669df27
AJ
10527 tcg_gen_andi_i32(tmp, tmp, 0x1f);
10528 tcg_gen_rotr_i32(tmp2, tmp2, tmp);
9ee6e8bb 10529 } else {
9ef39277 10530 gen_helper_ror_cc(tmp2, cpu_env, tmp2, tmp);
396e467c 10531 gen_logic_CC(tmp2);
9ee6e8bb 10532 }
99c475ab
FB
10533 break;
10534 case 0x8: /* tst */
396e467c
FN
10535 tcg_gen_and_i32(tmp, tmp, tmp2);
10536 gen_logic_CC(tmp);
99c475ab 10537 rd = 16;
5899f386 10538 break;
99c475ab 10539 case 0x9: /* neg */
9ee6e8bb 10540 if (s->condexec_mask)
396e467c 10541 tcg_gen_neg_i32(tmp, tmp2);
9ee6e8bb 10542 else
72485ec4 10543 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10544 break;
10545 case 0xa: /* cmp */
72485ec4 10546 gen_sub_CC(tmp, tmp, tmp2);
99c475ab
FB
10547 rd = 16;
10548 break;
10549 case 0xb: /* cmn */
72485ec4 10550 gen_add_CC(tmp, tmp, tmp2);
99c475ab
FB
10551 rd = 16;
10552 break;
10553 case 0xc: /* orr */
396e467c 10554 tcg_gen_or_i32(tmp, tmp, tmp2);
9ee6e8bb 10555 if (!s->condexec_mask)
396e467c 10556 gen_logic_CC(tmp);
99c475ab
FB
10557 break;
10558 case 0xd: /* mul */
7b2919a0 10559 tcg_gen_mul_i32(tmp, tmp, tmp2);
9ee6e8bb 10560 if (!s->condexec_mask)
396e467c 10561 gen_logic_CC(tmp);
99c475ab
FB
10562 break;
10563 case 0xe: /* bic */
f669df27 10564 tcg_gen_andc_i32(tmp, tmp, tmp2);
9ee6e8bb 10565 if (!s->condexec_mask)
396e467c 10566 gen_logic_CC(tmp);
99c475ab
FB
10567 break;
10568 case 0xf: /* mvn */
396e467c 10569 tcg_gen_not_i32(tmp2, tmp2);
9ee6e8bb 10570 if (!s->condexec_mask)
396e467c 10571 gen_logic_CC(tmp2);
99c475ab 10572 val = 1;
5899f386 10573 rm = rd;
99c475ab
FB
10574 break;
10575 }
10576 if (rd != 16) {
396e467c
FN
10577 if (val) {
10578 store_reg(s, rm, tmp2);
10579 if (op != 0xf)
7d1b0095 10580 tcg_temp_free_i32(tmp);
396e467c
FN
10581 } else {
10582 store_reg(s, rd, tmp);
7d1b0095 10583 tcg_temp_free_i32(tmp2);
396e467c
FN
10584 }
10585 } else {
7d1b0095
PM
10586 tcg_temp_free_i32(tmp);
10587 tcg_temp_free_i32(tmp2);
99c475ab
FB
10588 }
10589 break;
10590
10591 case 5:
10592 /* load/store register offset. */
10593 rd = insn & 7;
10594 rn = (insn >> 3) & 7;
10595 rm = (insn >> 6) & 7;
10596 op = (insn >> 9) & 7;
b0109805 10597 addr = load_reg(s, rn);
b26eefb6 10598 tmp = load_reg(s, rm);
b0109805 10599 tcg_gen_add_i32(addr, addr, tmp);
7d1b0095 10600 tcg_temp_free_i32(tmp);
99c475ab 10601
c40c8556 10602 if (op < 3) { /* store */
b0109805 10603 tmp = load_reg(s, rd);
c40c8556
PM
10604 } else {
10605 tmp = tcg_temp_new_i32();
10606 }
99c475ab
FB
10607
10608 switch (op) {
10609 case 0: /* str */
6ce2faf4 10610 gen_aa32_st32(tmp, addr, get_mem_index(s));
99c475ab
FB
10611 break;
10612 case 1: /* strh */
6ce2faf4 10613 gen_aa32_st16(tmp, addr, get_mem_index(s));
99c475ab
FB
10614 break;
10615 case 2: /* strb */
6ce2faf4 10616 gen_aa32_st8(tmp, addr, get_mem_index(s));
99c475ab
FB
10617 break;
10618 case 3: /* ldrsb */
6ce2faf4 10619 gen_aa32_ld8s(tmp, addr, get_mem_index(s));
99c475ab
FB
10620 break;
10621 case 4: /* ldr */
6ce2faf4 10622 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10623 break;
10624 case 5: /* ldrh */
6ce2faf4 10625 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
99c475ab
FB
10626 break;
10627 case 6: /* ldrb */
6ce2faf4 10628 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
99c475ab
FB
10629 break;
10630 case 7: /* ldrsh */
6ce2faf4 10631 gen_aa32_ld16s(tmp, addr, get_mem_index(s));
99c475ab
FB
10632 break;
10633 }
c40c8556 10634 if (op >= 3) { /* load */
b0109805 10635 store_reg(s, rd, tmp);
c40c8556
PM
10636 } else {
10637 tcg_temp_free_i32(tmp);
10638 }
7d1b0095 10639 tcg_temp_free_i32(addr);
99c475ab
FB
10640 break;
10641
10642 case 6:
10643 /* load/store word immediate offset */
10644 rd = insn & 7;
10645 rn = (insn >> 3) & 7;
b0109805 10646 addr = load_reg(s, rn);
99c475ab 10647 val = (insn >> 4) & 0x7c;
b0109805 10648 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10649
10650 if (insn & (1 << 11)) {
10651 /* load */
c40c8556 10652 tmp = tcg_temp_new_i32();
6ce2faf4 10653 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10654 store_reg(s, rd, tmp);
99c475ab
FB
10655 } else {
10656 /* store */
b0109805 10657 tmp = load_reg(s, rd);
6ce2faf4 10658 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10659 tcg_temp_free_i32(tmp);
99c475ab 10660 }
7d1b0095 10661 tcg_temp_free_i32(addr);
99c475ab
FB
10662 break;
10663
10664 case 7:
10665 /* load/store byte immediate offset */
10666 rd = insn & 7;
10667 rn = (insn >> 3) & 7;
b0109805 10668 addr = load_reg(s, rn);
99c475ab 10669 val = (insn >> 6) & 0x1f;
b0109805 10670 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10671
10672 if (insn & (1 << 11)) {
10673 /* load */
c40c8556 10674 tmp = tcg_temp_new_i32();
6ce2faf4 10675 gen_aa32_ld8u(tmp, addr, get_mem_index(s));
b0109805 10676 store_reg(s, rd, tmp);
99c475ab
FB
10677 } else {
10678 /* store */
b0109805 10679 tmp = load_reg(s, rd);
6ce2faf4 10680 gen_aa32_st8(tmp, addr, get_mem_index(s));
c40c8556 10681 tcg_temp_free_i32(tmp);
99c475ab 10682 }
7d1b0095 10683 tcg_temp_free_i32(addr);
99c475ab
FB
10684 break;
10685
10686 case 8:
10687 /* load/store halfword immediate offset */
10688 rd = insn & 7;
10689 rn = (insn >> 3) & 7;
b0109805 10690 addr = load_reg(s, rn);
99c475ab 10691 val = (insn >> 5) & 0x3e;
b0109805 10692 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10693
10694 if (insn & (1 << 11)) {
10695 /* load */
c40c8556 10696 tmp = tcg_temp_new_i32();
6ce2faf4 10697 gen_aa32_ld16u(tmp, addr, get_mem_index(s));
b0109805 10698 store_reg(s, rd, tmp);
99c475ab
FB
10699 } else {
10700 /* store */
b0109805 10701 tmp = load_reg(s, rd);
6ce2faf4 10702 gen_aa32_st16(tmp, addr, get_mem_index(s));
c40c8556 10703 tcg_temp_free_i32(tmp);
99c475ab 10704 }
7d1b0095 10705 tcg_temp_free_i32(addr);
99c475ab
FB
10706 break;
10707
10708 case 9:
10709 /* load/store from stack */
10710 rd = (insn >> 8) & 7;
b0109805 10711 addr = load_reg(s, 13);
99c475ab 10712 val = (insn & 0xff) * 4;
b0109805 10713 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
10714
10715 if (insn & (1 << 11)) {
10716 /* load */
c40c8556 10717 tmp = tcg_temp_new_i32();
6ce2faf4 10718 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10719 store_reg(s, rd, tmp);
99c475ab
FB
10720 } else {
10721 /* store */
b0109805 10722 tmp = load_reg(s, rd);
6ce2faf4 10723 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10724 tcg_temp_free_i32(tmp);
99c475ab 10725 }
7d1b0095 10726 tcg_temp_free_i32(addr);
99c475ab
FB
10727 break;
10728
10729 case 10:
10730 /* add to high reg */
10731 rd = (insn >> 8) & 7;
5899f386
FB
10732 if (insn & (1 << 11)) {
10733 /* SP */
5e3f878a 10734 tmp = load_reg(s, 13);
5899f386
FB
10735 } else {
10736 /* PC. bit 1 is ignored. */
7d1b0095 10737 tmp = tcg_temp_new_i32();
5e3f878a 10738 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 10739 }
99c475ab 10740 val = (insn & 0xff) * 4;
5e3f878a
PB
10741 tcg_gen_addi_i32(tmp, tmp, val);
10742 store_reg(s, rd, tmp);
99c475ab
FB
10743 break;
10744
10745 case 11:
10746 /* misc */
10747 op = (insn >> 8) & 0xf;
10748 switch (op) {
10749 case 0:
10750 /* adjust stack pointer */
b26eefb6 10751 tmp = load_reg(s, 13);
99c475ab
FB
10752 val = (insn & 0x7f) * 4;
10753 if (insn & (1 << 7))
6a0d8a1d 10754 val = -(int32_t)val;
b26eefb6
PB
10755 tcg_gen_addi_i32(tmp, tmp, val);
10756 store_reg(s, 13, tmp);
99c475ab
FB
10757 break;
10758
9ee6e8bb
PB
10759 case 2: /* sign/zero extend. */
10760 ARCH(6);
10761 rd = insn & 7;
10762 rm = (insn >> 3) & 7;
b0109805 10763 tmp = load_reg(s, rm);
9ee6e8bb 10764 switch ((insn >> 6) & 3) {
b0109805
PB
10765 case 0: gen_sxth(tmp); break;
10766 case 1: gen_sxtb(tmp); break;
10767 case 2: gen_uxth(tmp); break;
10768 case 3: gen_uxtb(tmp); break;
9ee6e8bb 10769 }
b0109805 10770 store_reg(s, rd, tmp);
9ee6e8bb 10771 break;
99c475ab
FB
10772 case 4: case 5: case 0xc: case 0xd:
10773 /* push/pop */
b0109805 10774 addr = load_reg(s, 13);
5899f386
FB
10775 if (insn & (1 << 8))
10776 offset = 4;
99c475ab 10777 else
5899f386
FB
10778 offset = 0;
10779 for (i = 0; i < 8; i++) {
10780 if (insn & (1 << i))
10781 offset += 4;
10782 }
10783 if ((insn & (1 << 11)) == 0) {
b0109805 10784 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10785 }
99c475ab
FB
10786 for (i = 0; i < 8; i++) {
10787 if (insn & (1 << i)) {
10788 if (insn & (1 << 11)) {
10789 /* pop */
c40c8556 10790 tmp = tcg_temp_new_i32();
6ce2faf4 10791 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
b0109805 10792 store_reg(s, i, tmp);
99c475ab
FB
10793 } else {
10794 /* push */
b0109805 10795 tmp = load_reg(s, i);
6ce2faf4 10796 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10797 tcg_temp_free_i32(tmp);
99c475ab 10798 }
5899f386 10799 /* advance to the next address. */
b0109805 10800 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10801 }
10802 }
39d5492a 10803 TCGV_UNUSED_I32(tmp);
99c475ab
FB
10804 if (insn & (1 << 8)) {
10805 if (insn & (1 << 11)) {
10806 /* pop pc */
c40c8556 10807 tmp = tcg_temp_new_i32();
6ce2faf4 10808 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
99c475ab
FB
10809 /* don't set the pc until the rest of the instruction
10810 has completed */
10811 } else {
10812 /* push lr */
b0109805 10813 tmp = load_reg(s, 14);
6ce2faf4 10814 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10815 tcg_temp_free_i32(tmp);
99c475ab 10816 }
b0109805 10817 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 10818 }
5899f386 10819 if ((insn & (1 << 11)) == 0) {
b0109805 10820 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 10821 }
99c475ab 10822 /* write back the new stack pointer */
b0109805 10823 store_reg(s, 13, addr);
99c475ab 10824 /* set the new PC value */
be5e7a76 10825 if ((insn & 0x0900) == 0x0900) {
7dcc1f89 10826 store_reg_from_load(s, 15, tmp);
be5e7a76 10827 }
99c475ab
FB
10828 break;
10829
9ee6e8bb
PB
10830 case 1: case 3: case 9: case 11: /* czb */
10831 rm = insn & 7;
d9ba4830 10832 tmp = load_reg(s, rm);
9ee6e8bb
PB
10833 s->condlabel = gen_new_label();
10834 s->condjmp = 1;
10835 if (insn & (1 << 11))
cb63669a 10836 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 10837 else
cb63669a 10838 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
7d1b0095 10839 tcg_temp_free_i32(tmp);
9ee6e8bb
PB
10840 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
10841 val = (uint32_t)s->pc + 2;
10842 val += offset;
10843 gen_jmp(s, val);
10844 break;
10845
10846 case 15: /* IT, nop-hint. */
10847 if ((insn & 0xf) == 0) {
10848 gen_nop_hint(s, (insn >> 4) & 0xf);
10849 break;
10850 }
10851 /* If Then. */
10852 s->condexec_cond = (insn >> 4) & 0xe;
10853 s->condexec_mask = insn & 0x1f;
10854 /* No actual code generated for this insn, just setup state. */
10855 break;
10856
06c949e6 10857 case 0xe: /* bkpt */
d4a2dc67
PM
10858 {
10859 int imm8 = extract32(insn, 0, 8);
be5e7a76 10860 ARCH(5);
d4a2dc67 10861 gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
06c949e6 10862 break;
d4a2dc67 10863 }
06c949e6 10864
9ee6e8bb
PB
10865 case 0xa: /* rev */
10866 ARCH(6);
10867 rn = (insn >> 3) & 0x7;
10868 rd = insn & 0x7;
b0109805 10869 tmp = load_reg(s, rn);
9ee6e8bb 10870 switch ((insn >> 6) & 3) {
66896cb8 10871 case 0: tcg_gen_bswap32_i32(tmp, tmp); break;
b0109805
PB
10872 case 1: gen_rev16(tmp); break;
10873 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
10874 default: goto illegal_op;
10875 }
b0109805 10876 store_reg(s, rd, tmp);
9ee6e8bb
PB
10877 break;
10878
d9e028c1
PM
10879 case 6:
10880 switch ((insn >> 5) & 7) {
10881 case 2:
10882 /* setend */
10883 ARCH(6);
10962fd5
PM
10884 if (((insn >> 3) & 1) != s->bswap_code) {
10885 /* Dynamic endianness switching not implemented. */
e0c270d9 10886 qemu_log_mask(LOG_UNIMP, "arm: unimplemented setend\n");
d9e028c1
PM
10887 goto illegal_op;
10888 }
9ee6e8bb 10889 break;
d9e028c1
PM
10890 case 3:
10891 /* cps */
10892 ARCH(6);
10893 if (IS_USER(s)) {
10894 break;
8984bd2e 10895 }
b53d8923 10896 if (arm_dc_feature(s, ARM_FEATURE_M)) {
d9e028c1
PM
10897 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
10898 /* FAULTMASK */
10899 if (insn & 1) {
10900 addr = tcg_const_i32(19);
10901 gen_helper_v7m_msr(cpu_env, addr, tmp);
10902 tcg_temp_free_i32(addr);
10903 }
10904 /* PRIMASK */
10905 if (insn & 2) {
10906 addr = tcg_const_i32(16);
10907 gen_helper_v7m_msr(cpu_env, addr, tmp);
10908 tcg_temp_free_i32(addr);
10909 }
10910 tcg_temp_free_i32(tmp);
10911 gen_lookup_tb(s);
10912 } else {
10913 if (insn & (1 << 4)) {
10914 shift = CPSR_A | CPSR_I | CPSR_F;
10915 } else {
10916 shift = 0;
10917 }
10918 gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
8984bd2e 10919 }
d9e028c1
PM
10920 break;
10921 default:
10922 goto undef;
9ee6e8bb
PB
10923 }
10924 break;
10925
99c475ab
FB
10926 default:
10927 goto undef;
10928 }
10929 break;
10930
10931 case 12:
a7d3970d 10932 {
99c475ab 10933 /* load/store multiple */
39d5492a
PM
10934 TCGv_i32 loaded_var;
10935 TCGV_UNUSED_I32(loaded_var);
99c475ab 10936 rn = (insn >> 8) & 0x7;
b0109805 10937 addr = load_reg(s, rn);
99c475ab
FB
10938 for (i = 0; i < 8; i++) {
10939 if (insn & (1 << i)) {
99c475ab
FB
10940 if (insn & (1 << 11)) {
10941 /* load */
c40c8556 10942 tmp = tcg_temp_new_i32();
6ce2faf4 10943 gen_aa32_ld32u(tmp, addr, get_mem_index(s));
a7d3970d
PM
10944 if (i == rn) {
10945 loaded_var = tmp;
10946 } else {
10947 store_reg(s, i, tmp);
10948 }
99c475ab
FB
10949 } else {
10950 /* store */
b0109805 10951 tmp = load_reg(s, i);
6ce2faf4 10952 gen_aa32_st32(tmp, addr, get_mem_index(s));
c40c8556 10953 tcg_temp_free_i32(tmp);
99c475ab 10954 }
5899f386 10955 /* advance to the next address */
b0109805 10956 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
10957 }
10958 }
b0109805 10959 if ((insn & (1 << rn)) == 0) {
a7d3970d 10960 /* base reg not in list: base register writeback */
b0109805
PB
10961 store_reg(s, rn, addr);
10962 } else {
a7d3970d
PM
10963 /* base reg in list: if load, complete it now */
10964 if (insn & (1 << 11)) {
10965 store_reg(s, rn, loaded_var);
10966 }
7d1b0095 10967 tcg_temp_free_i32(addr);
b0109805 10968 }
99c475ab 10969 break;
a7d3970d 10970 }
99c475ab
FB
10971 case 13:
10972 /* conditional branch or swi */
10973 cond = (insn >> 8) & 0xf;
10974 if (cond == 0xe)
10975 goto undef;
10976
10977 if (cond == 0xf) {
10978 /* swi */
eaed129d 10979 gen_set_pc_im(s, s->pc);
d4a2dc67 10980 s->svc_imm = extract32(insn, 0, 8);
9ee6e8bb 10981 s->is_jmp = DISAS_SWI;
99c475ab
FB
10982 break;
10983 }
10984 /* generate a conditional jump to next instruction */
e50e6a20 10985 s->condlabel = gen_new_label();
39fb730a 10986 arm_gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 10987 s->condjmp = 1;
99c475ab
FB
10988
10989 /* jump to the offset */
5899f386 10990 val = (uint32_t)s->pc + 2;
99c475ab 10991 offset = ((int32_t)insn << 24) >> 24;
5899f386 10992 val += offset << 1;
8aaca4c0 10993 gen_jmp(s, val);
99c475ab
FB
10994 break;
10995
10996 case 14:
358bf29e 10997 if (insn & (1 << 11)) {
9ee6e8bb
PB
10998 if (disas_thumb2_insn(env, s, insn))
10999 goto undef32;
358bf29e
PB
11000 break;
11001 }
9ee6e8bb 11002 /* unconditional branch */
99c475ab
FB
11003 val = (uint32_t)s->pc;
11004 offset = ((int32_t)insn << 21) >> 21;
11005 val += (offset << 1) + 2;
8aaca4c0 11006 gen_jmp(s, val);
99c475ab
FB
11007 break;
11008
11009 case 15:
9ee6e8bb 11010 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 11011 goto undef32;
9ee6e8bb 11012 break;
99c475ab
FB
11013 }
11014 return;
9ee6e8bb 11015undef32:
d4a2dc67 11016 gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
9ee6e8bb
PB
11017 return;
11018illegal_op:
99c475ab 11019undef:
d4a2dc67 11020 gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
99c475ab
FB
11021}
11022
2c0262af
FB
11023/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
11024 basic block 'tb'. If search_pc is TRUE, also generate PC
11025 information for each intermediate instruction. */
5639c3f2 11026static inline void gen_intermediate_code_internal(ARMCPU *cpu,
2cfc5f17 11027 TranslationBlock *tb,
5639c3f2 11028 bool search_pc)
2c0262af 11029{
ed2803da 11030 CPUState *cs = CPU(cpu);
5639c3f2 11031 CPUARMState *env = &cpu->env;
2c0262af 11032 DisasContext dc1, *dc = &dc1;
a1d1bb31 11033 CPUBreakpoint *bp;
2c0262af 11034 int j, lj;
0fa85d43 11035 target_ulong pc_start;
0a2461fa 11036 target_ulong next_page_start;
2e70f6ef
PB
11037 int num_insns;
11038 int max_insns;
3b46e624 11039
2c0262af 11040 /* generate intermediate code */
40f860cd
PM
11041
11042 /* The A64 decoder has its own top level loop, because it doesn't need
11043 * the A32/T32 complexity to do with conditional execution/IT blocks/etc.
11044 */
11045 if (ARM_TBFLAG_AARCH64_STATE(tb->flags)) {
11046 gen_intermediate_code_internal_a64(cpu, tb, search_pc);
11047 return;
11048 }
11049
0fa85d43 11050 pc_start = tb->pc;
3b46e624 11051
2c0262af
FB
11052 dc->tb = tb;
11053
2c0262af
FB
11054 dc->is_jmp = DISAS_NEXT;
11055 dc->pc = pc_start;
ed2803da 11056 dc->singlestep_enabled = cs->singlestep_enabled;
e50e6a20 11057 dc->condjmp = 0;
3926cc84 11058
40f860cd
PM
11059 dc->aarch64 = 0;
11060 dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
11061 dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
11062 dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
11063 dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
c1e37810
PM
11064 dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
11065 dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
3926cc84 11066#if !defined(CONFIG_USER_ONLY)
c1e37810 11067 dc->user = (dc->current_el == 0);
3926cc84 11068#endif
3f342b9e 11069 dc->ns = ARM_TBFLAG_NS(tb->flags);
2c7ffc41 11070 dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
40f860cd
PM
11071 dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
11072 dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
11073 dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
c0f4af17 11074 dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
60322b39 11075 dc->cp_regs = cpu->cp_regs;
a984e42c 11076 dc->features = env->features;
40f860cd 11077
50225ad0
PM
11078 /* Single step state. The code-generation logic here is:
11079 * SS_ACTIVE == 0:
11080 * generate code with no special handling for single-stepping (except
11081 * that anything that can make us go to SS_ACTIVE == 1 must end the TB;
11082 * this happens anyway because those changes are all system register or
11083 * PSTATE writes).
11084 * SS_ACTIVE == 1, PSTATE.SS == 1: (active-not-pending)
11085 * emit code for one insn
11086 * emit code to clear PSTATE.SS
11087 * emit code to generate software step exception for completed step
11088 * end TB (as usual for having generated an exception)
11089 * SS_ACTIVE == 1, PSTATE.SS == 0: (active-pending)
11090 * emit code to generate a software step exception
11091 * end the TB
11092 */
11093 dc->ss_active = ARM_TBFLAG_SS_ACTIVE(tb->flags);
11094 dc->pstate_ss = ARM_TBFLAG_PSTATE_SS(tb->flags);
11095 dc->is_ldex = false;
11096 dc->ss_same_el = false; /* Can't be true since EL_d must be AArch64 */
11097
a7812ae4
PB
11098 cpu_F0s = tcg_temp_new_i32();
11099 cpu_F1s = tcg_temp_new_i32();
11100 cpu_F0d = tcg_temp_new_i64();
11101 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
11102 cpu_V0 = cpu_F0d;
11103 cpu_V1 = cpu_F1d;
e677137d 11104 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 11105 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 11106 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 11107 lj = -1;
2e70f6ef
PB
11108 num_insns = 0;
11109 max_insns = tb->cflags & CF_COUNT_MASK;
11110 if (max_insns == 0)
11111 max_insns = CF_COUNT_MASK;
11112
cd42d5b2 11113 gen_tb_start(tb);
e12ce78d 11114
3849902c
PM
11115 tcg_clear_temp_count();
11116
e12ce78d
PM
11117 /* A note on handling of the condexec (IT) bits:
11118 *
11119 * We want to avoid the overhead of having to write the updated condexec
0ecb72a5 11120 * bits back to the CPUARMState for every instruction in an IT block. So:
e12ce78d 11121 * (1) if the condexec bits are not already zero then we write
0ecb72a5 11122 * zero back into the CPUARMState now. This avoids complications trying
e12ce78d
PM
11123 * to do it at the end of the block. (For example if we don't do this
11124 * it's hard to identify whether we can safely skip writing condexec
11125 * at the end of the TB, which we definitely want to do for the case
11126 * where a TB doesn't do anything with the IT state at all.)
11127 * (2) if we are going to leave the TB then we call gen_set_condexec()
0ecb72a5 11128 * which will write the correct value into CPUARMState if zero is wrong.
e12ce78d
PM
11129 * This is done both for leaving the TB at the end, and for leaving
11130 * it because of an exception we know will happen, which is done in
11131 * gen_exception_insn(). The latter is necessary because we need to
11132 * leave the TB with the PC/IT state just prior to execution of the
11133 * instruction which caused the exception.
11134 * (3) if we leave the TB unexpectedly (eg a data abort on a load)
0ecb72a5 11135 * then the CPUARMState will be wrong and we need to reset it.
e12ce78d
PM
11136 * This is handled in the same way as restoration of the
11137 * PC in these situations: we will be called again with search_pc=1
11138 * and generate a mapping of the condexec bits for each PC in
e87b7cb0
SW
11139 * gen_opc_condexec_bits[]. restore_state_to_opc() then uses
11140 * this to restore the condexec bits.
e12ce78d
PM
11141 *
11142 * Note that there are no instructions which can read the condexec
11143 * bits, and none which can write non-static values to them, so
0ecb72a5 11144 * we don't need to care about whether CPUARMState is correct in the
e12ce78d
PM
11145 * middle of a TB.
11146 */
11147
9ee6e8bb
PB
11148 /* Reset the conditional execution bits immediately. This avoids
11149 complications trying to do it at the end of the block. */
98eac7ca 11150 if (dc->condexec_mask || dc->condexec_cond)
8f01245e 11151 {
39d5492a 11152 TCGv_i32 tmp = tcg_temp_new_i32();
8f01245e 11153 tcg_gen_movi_i32(tmp, 0);
d9ba4830 11154 store_cpu_field(tmp, condexec_bits);
8f01245e 11155 }
2c0262af 11156 do {
fbb4a2e3
PB
11157#ifdef CONFIG_USER_ONLY
11158 /* Intercept jump to the magic kernel page. */
40f860cd 11159 if (dc->pc >= 0xffff0000) {
fbb4a2e3
PB
11160 /* We always get here via a jump, so know we are not in a
11161 conditional execution block. */
d4a2dc67 11162 gen_exception_internal(EXCP_KERNEL_TRAP);
fbb4a2e3
PB
11163 dc->is_jmp = DISAS_UPDATE;
11164 break;
11165 }
11166#else
b53d8923 11167 if (dc->pc >= 0xfffffff0 && arm_dc_feature(dc, ARM_FEATURE_M)) {
9ee6e8bb
PB
11168 /* We always get here via a jump, so know we are not in a
11169 conditional execution block. */
d4a2dc67 11170 gen_exception_internal(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
11171 dc->is_jmp = DISAS_UPDATE;
11172 break;
9ee6e8bb
PB
11173 }
11174#endif
11175
f0c3c505
AF
11176 if (unlikely(!QTAILQ_EMPTY(&cs->breakpoints))) {
11177 QTAILQ_FOREACH(bp, &cs->breakpoints, entry) {
a1d1bb31 11178 if (bp->pc == dc->pc) {
d4a2dc67 11179 gen_exception_internal_insn(dc, 0, EXCP_DEBUG);
9ee6e8bb
PB
11180 /* Advance PC so that clearing the breakpoint will
11181 invalidate this TB. */
11182 dc->pc += 2;
11183 goto done_generating;
1fddef4b
FB
11184 }
11185 }
11186 }
2c0262af 11187 if (search_pc) {
fe700adb 11188 j = tcg_op_buf_count();
2c0262af
FB
11189 if (lj < j) {
11190 lj++;
11191 while (lj < j)
ab1103de 11192 tcg_ctx.gen_opc_instr_start[lj++] = 0;
2c0262af 11193 }
25983cad 11194 tcg_ctx.gen_opc_pc[lj] = dc->pc;
e12ce78d 11195 gen_opc_condexec_bits[lj] = (dc->condexec_cond << 4) | (dc->condexec_mask >> 1);
ab1103de 11196 tcg_ctx.gen_opc_instr_start[lj] = 1;
c9c99c22 11197 tcg_ctx.gen_opc_icount[lj] = num_insns;
2c0262af 11198 }
e50e6a20 11199
2e70f6ef
PB
11200 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
11201 gen_io_start();
11202
fdefe51c 11203 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
5642463a
PM
11204 tcg_gen_debug_insn_start(dc->pc);
11205 }
11206
50225ad0
PM
11207 if (dc->ss_active && !dc->pstate_ss) {
11208 /* Singlestep state is Active-pending.
11209 * If we're in this state at the start of a TB then either
11210 * a) we just took an exception to an EL which is being debugged
11211 * and this is the first insn in the exception handler
11212 * b) debug exceptions were masked and we just unmasked them
11213 * without changing EL (eg by clearing PSTATE.D)
11214 * In either case we're going to take a swstep exception in the
11215 * "did not step an insn" case, and so the syndrome ISV and EX
11216 * bits should be zero.
11217 */
11218 assert(num_insns == 0);
11219 gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
11220 goto done_generating;
11221 }
11222
40f860cd 11223 if (dc->thumb) {
9ee6e8bb
PB
11224 disas_thumb_insn(env, dc);
11225 if (dc->condexec_mask) {
11226 dc->condexec_cond = (dc->condexec_cond & 0xe)
11227 | ((dc->condexec_mask >> 4) & 1);
11228 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
11229 if (dc->condexec_mask == 0) {
11230 dc->condexec_cond = 0;
11231 }
11232 }
11233 } else {
f4df2210
PM
11234 unsigned int insn = arm_ldl_code(env, dc->pc, dc->bswap_code);
11235 dc->pc += 4;
11236 disas_arm_insn(dc, insn);
9ee6e8bb 11237 }
e50e6a20
FB
11238
11239 if (dc->condjmp && !dc->is_jmp) {
11240 gen_set_label(dc->condlabel);
11241 dc->condjmp = 0;
11242 }
3849902c
PM
11243
11244 if (tcg_check_temp_count()) {
0a2461fa
AG
11245 fprintf(stderr, "TCG temporary leak before "TARGET_FMT_lx"\n",
11246 dc->pc);
3849902c
PM
11247 }
11248
aaf2d97d 11249 /* Translation stops when a conditional branch is encountered.
e50e6a20 11250 * Otherwise the subsequent code could get translated several times.
b5ff1b31 11251 * Also stop translation when a page boundary is reached. This
bf20dc07 11252 * ensures prefetch aborts occur at the right place. */
2e70f6ef 11253 num_insns ++;
fe700adb 11254 } while (!dc->is_jmp && !tcg_op_buf_full() &&
ed2803da 11255 !cs->singlestep_enabled &&
1b530a6d 11256 !singlestep &&
50225ad0 11257 !dc->ss_active &&
2e70f6ef
PB
11258 dc->pc < next_page_start &&
11259 num_insns < max_insns);
11260
11261 if (tb->cflags & CF_LAST_IO) {
11262 if (dc->condjmp) {
11263 /* FIXME: This can theoretically happen with self-modifying
11264 code. */
a47dddd7 11265 cpu_abort(cs, "IO on conditional branch instruction");
2e70f6ef
PB
11266 }
11267 gen_io_end();
11268 }
9ee6e8bb 11269
b5ff1b31 11270 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
11271 instruction was a conditional branch or trap, and the PC has
11272 already been written. */
50225ad0 11273 if (unlikely(cs->singlestep_enabled || dc->ss_active)) {
8aaca4c0 11274 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 11275 if (dc->condjmp) {
9ee6e8bb
PB
11276 gen_set_condexec(dc);
11277 if (dc->is_jmp == DISAS_SWI) {
50225ad0 11278 gen_ss_advance(dc);
d4a2dc67 11279 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
37e6456e
PM
11280 } else if (dc->is_jmp == DISAS_HVC) {
11281 gen_ss_advance(dc);
11282 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11283 } else if (dc->is_jmp == DISAS_SMC) {
11284 gen_ss_advance(dc);
11285 gen_exception(EXCP_SMC, syn_aa32_smc());
50225ad0
PM
11286 } else if (dc->ss_active) {
11287 gen_step_complete_exception(dc);
9ee6e8bb 11288 } else {
d4a2dc67 11289 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11290 }
e50e6a20
FB
11291 gen_set_label(dc->condlabel);
11292 }
11293 if (dc->condjmp || !dc->is_jmp) {
eaed129d 11294 gen_set_pc_im(dc, dc->pc);
e50e6a20 11295 dc->condjmp = 0;
8aaca4c0 11296 }
9ee6e8bb
PB
11297 gen_set_condexec(dc);
11298 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
50225ad0 11299 gen_ss_advance(dc);
d4a2dc67 11300 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
37e6456e
PM
11301 } else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
11302 gen_ss_advance(dc);
11303 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11304 } else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
11305 gen_ss_advance(dc);
11306 gen_exception(EXCP_SMC, syn_aa32_smc());
50225ad0
PM
11307 } else if (dc->ss_active) {
11308 gen_step_complete_exception(dc);
9ee6e8bb
PB
11309 } else {
11310 /* FIXME: Single stepping a WFI insn will not halt
11311 the CPU. */
d4a2dc67 11312 gen_exception_internal(EXCP_DEBUG);
9ee6e8bb 11313 }
8aaca4c0 11314 } else {
9ee6e8bb
PB
11315 /* While branches must always occur at the end of an IT block,
11316 there are a few other things that can cause us to terminate
65626741 11317 the TB in the middle of an IT block:
9ee6e8bb
PB
11318 - Exception generating instructions (bkpt, swi, undefined).
11319 - Page boundaries.
11320 - Hardware watchpoints.
11321 Hardware breakpoints have already been handled and skip this code.
11322 */
11323 gen_set_condexec(dc);
8aaca4c0 11324 switch(dc->is_jmp) {
8aaca4c0 11325 case DISAS_NEXT:
6e256c93 11326 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
11327 break;
11328 default:
11329 case DISAS_JUMP:
11330 case DISAS_UPDATE:
11331 /* indicate that the hash table must be used to find the next TB */
57fec1fe 11332 tcg_gen_exit_tb(0);
8aaca4c0
FB
11333 break;
11334 case DISAS_TB_JUMP:
11335 /* nothing more to generate */
11336 break;
9ee6e8bb 11337 case DISAS_WFI:
1ce94f81 11338 gen_helper_wfi(cpu_env);
9ee6e8bb 11339 break;
72c1d3af
PM
11340 case DISAS_WFE:
11341 gen_helper_wfe(cpu_env);
11342 break;
9ee6e8bb 11343 case DISAS_SWI:
d4a2dc67 11344 gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
9ee6e8bb 11345 break;
37e6456e
PM
11346 case DISAS_HVC:
11347 gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
11348 break;
11349 case DISAS_SMC:
11350 gen_exception(EXCP_SMC, syn_aa32_smc());
11351 break;
8aaca4c0 11352 }
e50e6a20
FB
11353 if (dc->condjmp) {
11354 gen_set_label(dc->condlabel);
9ee6e8bb 11355 gen_set_condexec(dc);
6e256c93 11356 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
11357 dc->condjmp = 0;
11358 }
2c0262af 11359 }
2e70f6ef 11360
9ee6e8bb 11361done_generating:
806f352d 11362 gen_tb_end(tb, num_insns);
2c0262af
FB
11363
11364#ifdef DEBUG_DISAS
8fec2b8c 11365 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
93fcfe39
AL
11366 qemu_log("----------------\n");
11367 qemu_log("IN: %s\n", lookup_symbol(pc_start));
f4359b9f 11368 log_target_disas(env, pc_start, dc->pc - pc_start,
d8fd2954 11369 dc->thumb | (dc->bswap_code << 1));
93fcfe39 11370 qemu_log("\n");
2c0262af
FB
11371 }
11372#endif
b5ff1b31 11373 if (search_pc) {
fe700adb 11374 j = tcg_op_buf_count();
b5ff1b31
FB
11375 lj++;
11376 while (lj <= j)
ab1103de 11377 tcg_ctx.gen_opc_instr_start[lj++] = 0;
b5ff1b31 11378 } else {
2c0262af 11379 tb->size = dc->pc - pc_start;
2e70f6ef 11380 tb->icount = num_insns;
b5ff1b31 11381 }
2c0262af
FB
11382}
11383
0ecb72a5 11384void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
2c0262af 11385{
5639c3f2 11386 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, false);
2c0262af
FB
11387}
11388
0ecb72a5 11389void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
2c0262af 11390{
5639c3f2 11391 gen_intermediate_code_internal(arm_env_get_cpu(env), tb, true);
2c0262af
FB
11392}
11393
b5ff1b31 11394static const char *cpu_mode_names[16] = {
28c9457d
EI
11395 "usr", "fiq", "irq", "svc", "???", "???", "mon", "abt",
11396 "???", "???", "hyp", "und", "???", "???", "???", "sys"
b5ff1b31 11397};
9ee6e8bb 11398
878096ee
AF
11399void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
11400 int flags)
2c0262af 11401{
878096ee
AF
11402 ARMCPU *cpu = ARM_CPU(cs);
11403 CPUARMState *env = &cpu->env;
2c0262af 11404 int i;
b5ff1b31 11405 uint32_t psr;
2c0262af 11406
17731115
PM
11407 if (is_a64(env)) {
11408 aarch64_cpu_dump_state(cs, f, cpu_fprintf, flags);
11409 return;
11410 }
11411
2c0262af 11412 for(i=0;i<16;i++) {
7fe48483 11413 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 11414 if ((i % 4) == 3)
7fe48483 11415 cpu_fprintf(f, "\n");
2c0262af 11416 else
7fe48483 11417 cpu_fprintf(f, " ");
2c0262af 11418 }
b5ff1b31 11419 psr = cpsr_read(env);
687fa640
TS
11420 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
11421 psr,
b5ff1b31
FB
11422 psr & (1 << 31) ? 'N' : '-',
11423 psr & (1 << 30) ? 'Z' : '-',
11424 psr & (1 << 29) ? 'C' : '-',
11425 psr & (1 << 28) ? 'V' : '-',
5fafdf24 11426 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 11427 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 11428
f2617cfc
PM
11429 if (flags & CPU_DUMP_FPU) {
11430 int numvfpregs = 0;
11431 if (arm_feature(env, ARM_FEATURE_VFP)) {
11432 numvfpregs += 16;
11433 }
11434 if (arm_feature(env, ARM_FEATURE_VFP3)) {
11435 numvfpregs += 16;
11436 }
11437 for (i = 0; i < numvfpregs; i++) {
11438 uint64_t v = float64_val(env->vfp.regs[i]);
11439 cpu_fprintf(f, "s%02d=%08x s%02d=%08x d%02d=%016" PRIx64 "\n",
11440 i * 2, (uint32_t)v,
11441 i * 2 + 1, (uint32_t)(v >> 32),
11442 i, v);
11443 }
11444 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
b7bcbe95 11445 }
2c0262af 11446}
a6b025d3 11447
0ecb72a5 11448void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
d2856f1a 11449{
3926cc84
AG
11450 if (is_a64(env)) {
11451 env->pc = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11452 env->condexec_bits = 0;
3926cc84
AG
11453 } else {
11454 env->regs[15] = tcg_ctx.gen_opc_pc[pc_pos];
40f860cd 11455 env->condexec_bits = gen_opc_condexec_bits[pc_pos];
3926cc84 11456 }
d2856f1a 11457}