]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
omap1: fix uart3 init (Jean-Christophe PLAGNIOL-VILLARD).
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
79383c9c 32#include "qemu-log.h"
1497c961 33
a7812ae4 34#include "helpers.h"
1497c961 35#define GEN_HELPER 1
b26eefb6 36#include "helpers.h"
2c0262af 37
9ee6e8bb
PB
38#define ENABLE_ARCH_5J 0
39#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
40#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
41#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
42#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31 43
86753403 44#define ARCH(x) do { if (!ENABLE_ARCH_##x) goto illegal_op; } while(0)
b5ff1b31 45
2c0262af
FB
46/* internal defines */
47typedef struct DisasContext {
0fa85d43 48 target_ulong pc;
2c0262af 49 int is_jmp;
e50e6a20
FB
50 /* Nonzero if this instruction has been conditionally skipped. */
51 int condjmp;
52 /* The label that will be jumped to when the instruction is skipped. */
53 int condlabel;
9ee6e8bb
PB
54 /* Thumb-2 condtional execution bits. */
55 int condexec_mask;
56 int condexec_cond;
2c0262af 57 struct TranslationBlock *tb;
8aaca4c0 58 int singlestep_enabled;
5899f386 59 int thumb;
6658ffb8 60 int is_mem;
b5ff1b31
FB
61#if !defined(CONFIG_USER_ONLY)
62 int user;
63#endif
2c0262af
FB
64} DisasContext;
65
b5ff1b31
FB
66#if defined(CONFIG_USER_ONLY)
67#define IS_USER(s) 1
68#else
69#define IS_USER(s) (s->user)
70#endif
71
9ee6e8bb
PB
72/* These instructions trap after executing, so defer them until after the
73 conditional executions state has been updated. */
74#define DISAS_WFI 4
75#define DISAS_SWI 5
2c0262af 76
a7812ae4 77static TCGv_ptr cpu_env;
ad69471c 78/* We reuse the same 64-bit temporaries for efficiency. */
a7812ae4 79static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
ad69471c 80
b26eefb6 81/* FIXME: These should be removed. */
8f8e3aa4 82static TCGv cpu_T[2];
a7812ae4
PB
83static TCGv cpu_F0s, cpu_F1s;
84static TCGv_i64 cpu_F0d, cpu_F1d;
b26eefb6 85
2e70f6ef
PB
86#define ICOUNT_TEMP cpu_T[0]
87#include "gen-icount.h"
88
b26eefb6
PB
89/* initialize TCG globals. */
90void arm_translate_init(void)
91{
a7812ae4
PB
92 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
93
94 cpu_T[0] = tcg_global_reg_new_i32(TCG_AREG1, "T0");
95 cpu_T[1] = tcg_global_reg_new_i32(TCG_AREG2, "T1");
b26eefb6 96
a7812ae4
PB
97#define GEN_HELPER 2
98#include "helpers.h"
b26eefb6
PB
99}
100
101/* The code generator doesn't like lots of temporaries, so maintain our own
102 cache for reuse within a function. */
103#define MAX_TEMPS 8
104static int num_temps;
105static TCGv temps[MAX_TEMPS];
106
107/* Allocate a temporary variable. */
a7812ae4 108static TCGv_i32 new_tmp(void)
b26eefb6
PB
109{
110 TCGv tmp;
111 if (num_temps == MAX_TEMPS)
112 abort();
113
a7812ae4 114 if (GET_TCGV_I32(temps[num_temps]))
b26eefb6
PB
115 return temps[num_temps++];
116
a7812ae4 117 tmp = tcg_temp_new_i32();
b26eefb6
PB
118 temps[num_temps++] = tmp;
119 return tmp;
120}
121
122/* Release a temporary variable. */
123static void dead_tmp(TCGv tmp)
124{
125 int i;
126 num_temps--;
127 i = num_temps;
a7812ae4 128 if (TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
129 return;
130
131 /* Shuffle this temp to the last slot. */
a7812ae4 132 while (!TCGV_EQUAL(temps[i], tmp))
b26eefb6
PB
133 i--;
134 while (i < num_temps) {
135 temps[i] = temps[i + 1];
136 i++;
137 }
138 temps[i] = tmp;
139}
140
d9ba4830
PB
141static inline TCGv load_cpu_offset(int offset)
142{
143 TCGv tmp = new_tmp();
144 tcg_gen_ld_i32(tmp, cpu_env, offset);
145 return tmp;
146}
147
148#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
149
150static inline void store_cpu_offset(TCGv var, int offset)
151{
152 tcg_gen_st_i32(var, cpu_env, offset);
153 dead_tmp(var);
154}
155
156#define store_cpu_field(var, name) \
157 store_cpu_offset(var, offsetof(CPUState, name))
158
b26eefb6
PB
159/* Set a variable to the value of a CPU register. */
160static void load_reg_var(DisasContext *s, TCGv var, int reg)
161{
162 if (reg == 15) {
163 uint32_t addr;
164 /* normaly, since we updated PC, we need only to add one insn */
165 if (s->thumb)
166 addr = (long)s->pc + 2;
167 else
168 addr = (long)s->pc + 4;
169 tcg_gen_movi_i32(var, addr);
170 } else {
171 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
172 }
173}
174
175/* Create a new temporary and set it to the value of a CPU register. */
176static inline TCGv load_reg(DisasContext *s, int reg)
177{
178 TCGv tmp = new_tmp();
179 load_reg_var(s, tmp, reg);
180 return tmp;
181}
182
183/* Set a CPU register. The source must be a temporary and will be
184 marked as dead. */
185static void store_reg(DisasContext *s, int reg, TCGv var)
186{
187 if (reg == 15) {
188 tcg_gen_andi_i32(var, var, ~1);
189 s->is_jmp = DISAS_JUMP;
190 }
191 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
192 dead_tmp(var);
193}
194
195
196/* Basic operations. */
197#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6 198#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
b26eefb6
PB
199#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
200#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
201
202#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
203#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
204#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
206
8984bd2e
PB
207#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
208#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
209#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
210#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
211#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
212#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
213
b26eefb6
PB
214#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
215#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
216#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
217#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
218#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
219#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
220#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
221
222#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
223#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
224#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
225#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
226#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
227
228/* Value extensions. */
86831435
PB
229#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
230#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
231#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
232#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
233
1497c961
PB
234#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
235#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
236
237#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 238
d9ba4830
PB
239#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
240/* Set NZCV flags from the high 4 bits of var. */
241#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
242
243static void gen_exception(int excp)
244{
245 TCGv tmp = new_tmp();
246 tcg_gen_movi_i32(tmp, excp);
247 gen_helper_exception(tmp);
248 dead_tmp(tmp);
249}
250
3670669c
PB
251static void gen_smul_dual(TCGv a, TCGv b)
252{
253 TCGv tmp1 = new_tmp();
254 TCGv tmp2 = new_tmp();
22478e79
AZ
255 tcg_gen_ext16s_i32(tmp1, a);
256 tcg_gen_ext16s_i32(tmp2, b);
3670669c
PB
257 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
258 dead_tmp(tmp2);
259 tcg_gen_sari_i32(a, a, 16);
260 tcg_gen_sari_i32(b, b, 16);
261 tcg_gen_mul_i32(b, b, a);
262 tcg_gen_mov_i32(a, tmp1);
263 dead_tmp(tmp1);
264}
265
266/* Byteswap each halfword. */
267static void gen_rev16(TCGv var)
268{
269 TCGv tmp = new_tmp();
270 tcg_gen_shri_i32(tmp, var, 8);
271 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
272 tcg_gen_shli_i32(var, var, 8);
273 tcg_gen_andi_i32(var, var, 0xff00ff00);
274 tcg_gen_or_i32(var, var, tmp);
275 dead_tmp(tmp);
276}
277
278/* Byteswap low halfword and sign extend. */
279static void gen_revsh(TCGv var)
280{
281 TCGv tmp = new_tmp();
282 tcg_gen_shri_i32(tmp, var, 8);
283 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
284 tcg_gen_shli_i32(var, var, 8);
285 tcg_gen_ext8s_i32(var, var);
286 tcg_gen_or_i32(var, var, tmp);
287 dead_tmp(tmp);
288}
289
290/* Unsigned bitfield extract. */
291static void gen_ubfx(TCGv var, int shift, uint32_t mask)
292{
293 if (shift)
294 tcg_gen_shri_i32(var, var, shift);
295 tcg_gen_andi_i32(var, var, mask);
296}
297
298/* Signed bitfield extract. */
299static void gen_sbfx(TCGv var, int shift, int width)
300{
301 uint32_t signbit;
302
303 if (shift)
304 tcg_gen_sari_i32(var, var, shift);
305 if (shift + width < 32) {
306 signbit = 1u << (width - 1);
307 tcg_gen_andi_i32(var, var, (1u << width) - 1);
308 tcg_gen_xori_i32(var, var, signbit);
309 tcg_gen_subi_i32(var, var, signbit);
310 }
311}
312
313/* Bitfield insertion. Insert val into base. Clobbers base and val. */
314static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
315{
3670669c 316 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
317 tcg_gen_shli_i32(val, val, shift);
318 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
319 tcg_gen_or_i32(dest, base, val);
320}
321
d9ba4830
PB
322/* Round the top 32 bits of a 64-bit value. */
323static void gen_roundqd(TCGv a, TCGv b)
3670669c 324{
d9ba4830
PB
325 tcg_gen_shri_i32(a, a, 31);
326 tcg_gen_add_i32(a, a, b);
3670669c
PB
327}
328
8f01245e
PB
329/* FIXME: Most targets have native widening multiplication.
330 It would be good to use that instead of a full wide multiply. */
5e3f878a 331/* 32x32->64 multiply. Marks inputs as dead. */
a7812ae4 332static TCGv_i64 gen_mulu_i64_i32(TCGv a, TCGv b)
5e3f878a 333{
a7812ae4
PB
334 TCGv_i64 tmp1 = tcg_temp_new_i64();
335 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
336
337 tcg_gen_extu_i32_i64(tmp1, a);
338 dead_tmp(a);
339 tcg_gen_extu_i32_i64(tmp2, b);
340 dead_tmp(b);
341 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
342 return tmp1;
343}
344
a7812ae4 345static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
5e3f878a 346{
a7812ae4
PB
347 TCGv_i64 tmp1 = tcg_temp_new_i64();
348 TCGv_i64 tmp2 = tcg_temp_new_i64();
5e3f878a
PB
349
350 tcg_gen_ext_i32_i64(tmp1, a);
351 dead_tmp(a);
352 tcg_gen_ext_i32_i64(tmp2, b);
353 dead_tmp(b);
354 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
355 return tmp1;
356}
357
8f01245e
PB
358/* Unsigned 32x32->64 multiply. */
359static void gen_op_mull_T0_T1(void)
360{
a7812ae4
PB
361 TCGv_i64 tmp1 = tcg_temp_new_i64();
362 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e
PB
363
364 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
365 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
366 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
367 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
368 tcg_gen_shri_i64(tmp1, tmp1, 32);
369 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
370}
371
372/* Signed 32x32->64 multiply. */
d9ba4830 373static void gen_imull(TCGv a, TCGv b)
8f01245e 374{
a7812ae4
PB
375 TCGv_i64 tmp1 = tcg_temp_new_i64();
376 TCGv_i64 tmp2 = tcg_temp_new_i64();
8f01245e 377
d9ba4830
PB
378 tcg_gen_ext_i32_i64(tmp1, a);
379 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 380 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 381 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 382 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
383 tcg_gen_trunc_i64_i32(b, tmp1);
384}
385#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
386
8f01245e
PB
387/* Swap low and high halfwords. */
388static void gen_swap_half(TCGv var)
389{
390 TCGv tmp = new_tmp();
391 tcg_gen_shri_i32(tmp, var, 16);
392 tcg_gen_shli_i32(var, var, 16);
393 tcg_gen_or_i32(var, var, tmp);
3670669c 394 dead_tmp(tmp);
8f01245e
PB
395}
396
b26eefb6
PB
397/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
398 tmp = (t0 ^ t1) & 0x8000;
399 t0 &= ~0x8000;
400 t1 &= ~0x8000;
401 t0 = (t0 + t1) ^ tmp;
402 */
403
404static void gen_add16(TCGv t0, TCGv t1)
405{
406 TCGv tmp = new_tmp();
407 tcg_gen_xor_i32(tmp, t0, t1);
408 tcg_gen_andi_i32(tmp, tmp, 0x8000);
409 tcg_gen_andi_i32(t0, t0, ~0x8000);
410 tcg_gen_andi_i32(t1, t1, ~0x8000);
411 tcg_gen_add_i32(t0, t0, t1);
412 tcg_gen_xor_i32(t0, t0, tmp);
413 dead_tmp(tmp);
414 dead_tmp(t1);
415}
416
9a119ff6
PB
417#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
418
b26eefb6
PB
419/* Set CF to the top bit of var. */
420static void gen_set_CF_bit31(TCGv var)
421{
422 TCGv tmp = new_tmp();
423 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 424 gen_set_CF(var);
b26eefb6
PB
425 dead_tmp(tmp);
426}
427
428/* Set N and Z flags from var. */
429static inline void gen_logic_CC(TCGv var)
430{
6fbe23d5
PB
431 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
432 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
433}
434
435/* T0 += T1 + CF. */
436static void gen_adc_T0_T1(void)
437{
d9ba4830 438 TCGv tmp;
b26eefb6 439 gen_op_addl_T0_T1();
d9ba4830 440 tmp = load_cpu_field(CF);
b26eefb6
PB
441 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
442 dead_tmp(tmp);
443}
444
3670669c
PB
445/* dest = T0 - T1 + CF - 1. */
446static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
447{
d9ba4830 448 TCGv tmp;
3670669c 449 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 450 tmp = load_cpu_field(CF);
3670669c
PB
451 tcg_gen_add_i32(dest, dest, tmp);
452 tcg_gen_subi_i32(dest, dest, 1);
453 dead_tmp(tmp);
454}
455
456#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
457#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
458
b26eefb6
PB
459/* T0 &= ~T1. Clobbers T1. */
460/* FIXME: Implement bic natively. */
8f8e3aa4
PB
461static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
462{
463 TCGv tmp = new_tmp();
464 tcg_gen_not_i32(tmp, t1);
465 tcg_gen_and_i32(dest, t0, tmp);
466 dead_tmp(tmp);
467}
b26eefb6
PB
468static inline void gen_op_bicl_T0_T1(void)
469{
470 gen_op_notl_T1();
471 gen_op_andl_T0_T1();
472}
473
ad69471c
PB
474/* FIXME: Implement this natively. */
475#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
476
b26eefb6
PB
477/* FIXME: Implement this natively. */
478static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
479{
480 TCGv tmp;
481
482 if (i == 0)
483 return;
484
485 tmp = new_tmp();
486 tcg_gen_shri_i32(tmp, t1, i);
487 tcg_gen_shli_i32(t1, t1, 32 - i);
488 tcg_gen_or_i32(t0, t1, tmp);
489 dead_tmp(tmp);
490}
491
9a119ff6 492static void shifter_out_im(TCGv var, int shift)
b26eefb6 493{
9a119ff6
PB
494 TCGv tmp = new_tmp();
495 if (shift == 0) {
496 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 497 } else {
9a119ff6
PB
498 tcg_gen_shri_i32(tmp, var, shift);
499 if (shift != 31);
500 tcg_gen_andi_i32(tmp, tmp, 1);
501 }
502 gen_set_CF(tmp);
503 dead_tmp(tmp);
504}
b26eefb6 505
9a119ff6
PB
506/* Shift by immediate. Includes special handling for shift == 0. */
507static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
508{
509 switch (shiftop) {
510 case 0: /* LSL */
511 if (shift != 0) {
512 if (flags)
513 shifter_out_im(var, 32 - shift);
514 tcg_gen_shli_i32(var, var, shift);
515 }
516 break;
517 case 1: /* LSR */
518 if (shift == 0) {
519 if (flags) {
520 tcg_gen_shri_i32(var, var, 31);
521 gen_set_CF(var);
522 }
523 tcg_gen_movi_i32(var, 0);
524 } else {
525 if (flags)
526 shifter_out_im(var, shift - 1);
527 tcg_gen_shri_i32(var, var, shift);
528 }
529 break;
530 case 2: /* ASR */
531 if (shift == 0)
532 shift = 32;
533 if (flags)
534 shifter_out_im(var, shift - 1);
535 if (shift == 32)
536 shift = 31;
537 tcg_gen_sari_i32(var, var, shift);
538 break;
539 case 3: /* ROR/RRX */
540 if (shift != 0) {
541 if (flags)
542 shifter_out_im(var, shift - 1);
543 tcg_gen_rori_i32(var, var, shift); break;
544 } else {
d9ba4830 545 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
546 if (flags)
547 shifter_out_im(var, 0);
548 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
549 tcg_gen_shli_i32(tmp, tmp, 31);
550 tcg_gen_or_i32(var, var, tmp);
551 dead_tmp(tmp);
b26eefb6
PB
552 }
553 }
554};
555
8984bd2e
PB
556static inline void gen_arm_shift_reg(TCGv var, int shiftop,
557 TCGv shift, int flags)
558{
559 if (flags) {
560 switch (shiftop) {
561 case 0: gen_helper_shl_cc(var, var, shift); break;
562 case 1: gen_helper_shr_cc(var, var, shift); break;
563 case 2: gen_helper_sar_cc(var, var, shift); break;
564 case 3: gen_helper_ror_cc(var, var, shift); break;
565 }
566 } else {
567 switch (shiftop) {
568 case 0: gen_helper_shl(var, var, shift); break;
569 case 1: gen_helper_shr(var, var, shift); break;
570 case 2: gen_helper_sar(var, var, shift); break;
571 case 3: gen_helper_ror(var, var, shift); break;
572 }
573 }
574 dead_tmp(shift);
575}
576
6ddbc6e4
PB
577#define PAS_OP(pfx) \
578 switch (op2) { \
579 case 0: gen_pas_helper(glue(pfx,add16)); break; \
580 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
581 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
582 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
583 case 4: gen_pas_helper(glue(pfx,add8)); break; \
584 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
585 }
d9ba4830 586static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 587{
a7812ae4 588 TCGv_ptr tmp;
6ddbc6e4
PB
589
590 switch (op1) {
591#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
592 case 1:
a7812ae4 593 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
594 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
595 PAS_OP(s)
596 break;
597 case 5:
a7812ae4 598 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
599 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
600 PAS_OP(u)
601 break;
602#undef gen_pas_helper
603#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
604 case 2:
605 PAS_OP(q);
606 break;
607 case 3:
608 PAS_OP(sh);
609 break;
610 case 6:
611 PAS_OP(uq);
612 break;
613 case 7:
614 PAS_OP(uh);
615 break;
616#undef gen_pas_helper
617 }
618}
9ee6e8bb
PB
619#undef PAS_OP
620
6ddbc6e4
PB
621/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
622#define PAS_OP(pfx) \
623 switch (op2) { \
624 case 0: gen_pas_helper(glue(pfx,add8)); break; \
625 case 1: gen_pas_helper(glue(pfx,add16)); break; \
626 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
627 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
628 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
629 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
630 }
d9ba4830 631static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4 632{
a7812ae4 633 TCGv_ptr tmp;
6ddbc6e4
PB
634
635 switch (op1) {
636#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
637 case 0:
a7812ae4 638 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
639 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
640 PAS_OP(s)
641 break;
642 case 4:
a7812ae4 643 tmp = tcg_temp_new_ptr();
6ddbc6e4
PB
644 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
645 PAS_OP(u)
646 break;
647#undef gen_pas_helper
648#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
649 case 1:
650 PAS_OP(q);
651 break;
652 case 2:
653 PAS_OP(sh);
654 break;
655 case 5:
656 PAS_OP(uq);
657 break;
658 case 6:
659 PAS_OP(uh);
660 break;
661#undef gen_pas_helper
662 }
663}
9ee6e8bb
PB
664#undef PAS_OP
665
d9ba4830
PB
666static void gen_test_cc(int cc, int label)
667{
668 TCGv tmp;
669 TCGv tmp2;
d9ba4830
PB
670 int inv;
671
d9ba4830
PB
672 switch (cc) {
673 case 0: /* eq: Z */
6fbe23d5 674 tmp = load_cpu_field(ZF);
cb63669a 675 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
676 break;
677 case 1: /* ne: !Z */
6fbe23d5 678 tmp = load_cpu_field(ZF);
cb63669a 679 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
680 break;
681 case 2: /* cs: C */
682 tmp = load_cpu_field(CF);
cb63669a 683 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
684 break;
685 case 3: /* cc: !C */
686 tmp = load_cpu_field(CF);
cb63669a 687 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
688 break;
689 case 4: /* mi: N */
6fbe23d5 690 tmp = load_cpu_field(NF);
cb63669a 691 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
692 break;
693 case 5: /* pl: !N */
6fbe23d5 694 tmp = load_cpu_field(NF);
cb63669a 695 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
696 break;
697 case 6: /* vs: V */
698 tmp = load_cpu_field(VF);
cb63669a 699 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
700 break;
701 case 7: /* vc: !V */
702 tmp = load_cpu_field(VF);
cb63669a 703 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
704 break;
705 case 8: /* hi: C && !Z */
706 inv = gen_new_label();
707 tmp = load_cpu_field(CF);
cb63669a 708 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 709 dead_tmp(tmp);
6fbe23d5 710 tmp = load_cpu_field(ZF);
cb63669a 711 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
712 gen_set_label(inv);
713 break;
714 case 9: /* ls: !C || Z */
715 tmp = load_cpu_field(CF);
cb63669a 716 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 717 dead_tmp(tmp);
6fbe23d5 718 tmp = load_cpu_field(ZF);
cb63669a 719 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
720 break;
721 case 10: /* ge: N == V -> N ^ V == 0 */
722 tmp = load_cpu_field(VF);
6fbe23d5 723 tmp2 = load_cpu_field(NF);
d9ba4830
PB
724 tcg_gen_xor_i32(tmp, tmp, tmp2);
725 dead_tmp(tmp2);
cb63669a 726 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
727 break;
728 case 11: /* lt: N != V -> N ^ V != 0 */
729 tmp = load_cpu_field(VF);
6fbe23d5 730 tmp2 = load_cpu_field(NF);
d9ba4830
PB
731 tcg_gen_xor_i32(tmp, tmp, tmp2);
732 dead_tmp(tmp2);
cb63669a 733 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
734 break;
735 case 12: /* gt: !Z && N == V */
736 inv = gen_new_label();
6fbe23d5 737 tmp = load_cpu_field(ZF);
cb63669a 738 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
739 dead_tmp(tmp);
740 tmp = load_cpu_field(VF);
6fbe23d5 741 tmp2 = load_cpu_field(NF);
d9ba4830
PB
742 tcg_gen_xor_i32(tmp, tmp, tmp2);
743 dead_tmp(tmp2);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
745 gen_set_label(inv);
746 break;
747 case 13: /* le: Z || N != V */
6fbe23d5 748 tmp = load_cpu_field(ZF);
cb63669a 749 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
750 dead_tmp(tmp);
751 tmp = load_cpu_field(VF);
6fbe23d5 752 tmp2 = load_cpu_field(NF);
d9ba4830
PB
753 tcg_gen_xor_i32(tmp, tmp, tmp2);
754 dead_tmp(tmp2);
cb63669a 755 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
756 break;
757 default:
758 fprintf(stderr, "Bad condition code 0x%x\n", cc);
759 abort();
760 }
761 dead_tmp(tmp);
762}
2c0262af 763
b1d8e52e 764static const uint8_t table_logic_cc[16] = {
2c0262af
FB
765 1, /* and */
766 1, /* xor */
767 0, /* sub */
768 0, /* rsb */
769 0, /* add */
770 0, /* adc */
771 0, /* sbc */
772 0, /* rsc */
773 1, /* andl */
774 1, /* xorl */
775 0, /* cmp */
776 0, /* cmn */
777 1, /* orr */
778 1, /* mov */
779 1, /* bic */
780 1, /* mvn */
781};
3b46e624 782
d9ba4830
PB
783/* Set PC and Thumb state from an immediate address. */
784static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 785{
b26eefb6 786 TCGv tmp;
99c475ab 787
b26eefb6
PB
788 s->is_jmp = DISAS_UPDATE;
789 tmp = new_tmp();
d9ba4830
PB
790 if (s->thumb != (addr & 1)) {
791 tcg_gen_movi_i32(tmp, addr & 1);
792 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
793 }
794 tcg_gen_movi_i32(tmp, addr & ~1);
795 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 796 dead_tmp(tmp);
d9ba4830
PB
797}
798
799/* Set PC and Thumb state from var. var is marked as dead. */
800static inline void gen_bx(DisasContext *s, TCGv var)
801{
802 TCGv tmp;
803
804 s->is_jmp = DISAS_UPDATE;
805 tmp = new_tmp();
806 tcg_gen_andi_i32(tmp, var, 1);
807 store_cpu_field(tmp, thumb);
808 tcg_gen_andi_i32(var, var, ~1);
809 store_cpu_field(var, regs[15]);
810}
811
812/* TODO: This should be removed. Use gen_bx instead. */
813static inline void gen_bx_T0(DisasContext *s)
814{
815 TCGv tmp = new_tmp();
816 tcg_gen_mov_i32(tmp, cpu_T[0]);
817 gen_bx(s, tmp);
b26eefb6 818}
b5ff1b31
FB
819
820#if defined(CONFIG_USER_ONLY)
821#define gen_ldst(name, s) gen_op_##name##_raw()
822#else
823#define gen_ldst(name, s) do { \
6658ffb8 824 s->is_mem = 1; \
b5ff1b31
FB
825 if (IS_USER(s)) \
826 gen_op_##name##_user(); \
827 else \
828 gen_op_##name##_kernel(); \
829 } while (0)
830#endif
b0109805
PB
831static inline TCGv gen_ld8s(TCGv addr, int index)
832{
833 TCGv tmp = new_tmp();
834 tcg_gen_qemu_ld8s(tmp, addr, index);
835 return tmp;
836}
837static inline TCGv gen_ld8u(TCGv addr, int index)
838{
839 TCGv tmp = new_tmp();
840 tcg_gen_qemu_ld8u(tmp, addr, index);
841 return tmp;
842}
843static inline TCGv gen_ld16s(TCGv addr, int index)
844{
845 TCGv tmp = new_tmp();
846 tcg_gen_qemu_ld16s(tmp, addr, index);
847 return tmp;
848}
849static inline TCGv gen_ld16u(TCGv addr, int index)
850{
851 TCGv tmp = new_tmp();
852 tcg_gen_qemu_ld16u(tmp, addr, index);
853 return tmp;
854}
855static inline TCGv gen_ld32(TCGv addr, int index)
856{
857 TCGv tmp = new_tmp();
858 tcg_gen_qemu_ld32u(tmp, addr, index);
859 return tmp;
860}
861static inline void gen_st8(TCGv val, TCGv addr, int index)
862{
863 tcg_gen_qemu_st8(val, addr, index);
864 dead_tmp(val);
865}
866static inline void gen_st16(TCGv val, TCGv addr, int index)
867{
868 tcg_gen_qemu_st16(val, addr, index);
869 dead_tmp(val);
870}
871static inline void gen_st32(TCGv val, TCGv addr, int index)
872{
873 tcg_gen_qemu_st32(val, addr, index);
874 dead_tmp(val);
875}
b5ff1b31 876
2c0262af
FB
877static inline void gen_movl_T0_reg(DisasContext *s, int reg)
878{
b26eefb6 879 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
880}
881
882static inline void gen_movl_T1_reg(DisasContext *s, int reg)
883{
b26eefb6 884 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
885}
886
887static inline void gen_movl_T2_reg(DisasContext *s, int reg)
888{
b26eefb6
PB
889 load_reg_var(s, cpu_T[2], reg);
890}
891
5e3f878a
PB
892static inline void gen_set_pc_im(uint32_t val)
893{
894 TCGv tmp = new_tmp();
895 tcg_gen_movi_i32(tmp, val);
896 store_cpu_field(tmp, regs[15]);
897}
898
2c0262af
FB
899static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
900{
b26eefb6
PB
901 TCGv tmp;
902 if (reg == 15) {
903 tmp = new_tmp();
904 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
905 } else {
906 tmp = cpu_T[t];
907 }
908 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 909 if (reg == 15) {
b26eefb6 910 dead_tmp(tmp);
2c0262af
FB
911 s->is_jmp = DISAS_JUMP;
912 }
913}
914
915static inline void gen_movl_reg_T0(DisasContext *s, int reg)
916{
917 gen_movl_reg_TN(s, reg, 0);
918}
919
920static inline void gen_movl_reg_T1(DisasContext *s, int reg)
921{
922 gen_movl_reg_TN(s, reg, 1);
923}
924
b5ff1b31
FB
925/* Force a TB lookup after an instruction that changes the CPU state. */
926static inline void gen_lookup_tb(DisasContext *s)
927{
928 gen_op_movl_T0_im(s->pc);
929 gen_movl_reg_T0(s, 15);
930 s->is_jmp = DISAS_UPDATE;
931}
932
b0109805
PB
933static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
934 TCGv var)
2c0262af 935{
1e8d4eec 936 int val, rm, shift, shiftop;
b26eefb6 937 TCGv offset;
2c0262af
FB
938
939 if (!(insn & (1 << 25))) {
940 /* immediate */
941 val = insn & 0xfff;
942 if (!(insn & (1 << 23)))
943 val = -val;
537730b9 944 if (val != 0)
b0109805 945 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
946 } else {
947 /* shift/register */
948 rm = (insn) & 0xf;
949 shift = (insn >> 7) & 0x1f;
1e8d4eec 950 shiftop = (insn >> 5) & 3;
b26eefb6 951 offset = load_reg(s, rm);
9a119ff6 952 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 953 if (!(insn & (1 << 23)))
b0109805 954 tcg_gen_sub_i32(var, var, offset);
2c0262af 955 else
b0109805 956 tcg_gen_add_i32(var, var, offset);
b26eefb6 957 dead_tmp(offset);
2c0262af
FB
958 }
959}
960
191f9a93 961static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 962 int extra, TCGv var)
2c0262af
FB
963{
964 int val, rm;
b26eefb6 965 TCGv offset;
3b46e624 966
2c0262af
FB
967 if (insn & (1 << 22)) {
968 /* immediate */
969 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
970 if (!(insn & (1 << 23)))
971 val = -val;
18acad92 972 val += extra;
537730b9 973 if (val != 0)
b0109805 974 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
975 } else {
976 /* register */
191f9a93 977 if (extra)
b0109805 978 tcg_gen_addi_i32(var, var, extra);
2c0262af 979 rm = (insn) & 0xf;
b26eefb6 980 offset = load_reg(s, rm);
2c0262af 981 if (!(insn & (1 << 23)))
b0109805 982 tcg_gen_sub_i32(var, var, offset);
2c0262af 983 else
b0109805 984 tcg_gen_add_i32(var, var, offset);
b26eefb6 985 dead_tmp(offset);
2c0262af
FB
986 }
987}
988
4373f3ce
PB
989#define VFP_OP2(name) \
990static inline void gen_vfp_##name(int dp) \
991{ \
992 if (dp) \
993 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
994 else \
995 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
996}
997
5b340b51 998#define VFP_OP1(name) \
9ee6e8bb
PB
999static inline void gen_vfp_##name(int dp, int arg) \
1000{ \
1001 if (dp) \
1002 gen_op_vfp_##name##d(arg); \
1003 else \
1004 gen_op_vfp_##name##s(arg); \
1005}
1006
4373f3ce
PB
1007VFP_OP2(add)
1008VFP_OP2(sub)
1009VFP_OP2(mul)
1010VFP_OP2(div)
1011
1012#undef VFP_OP2
1013
1014static inline void gen_vfp_abs(int dp)
1015{
1016 if (dp)
1017 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1018 else
1019 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1020}
1021
1022static inline void gen_vfp_neg(int dp)
1023{
1024 if (dp)
1025 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1026 else
1027 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1028}
1029
1030static inline void gen_vfp_sqrt(int dp)
1031{
1032 if (dp)
1033 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1034 else
1035 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1036}
1037
1038static inline void gen_vfp_cmp(int dp)
1039{
1040 if (dp)
1041 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1042 else
1043 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1044}
1045
1046static inline void gen_vfp_cmpe(int dp)
1047{
1048 if (dp)
1049 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1050 else
1051 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1052}
1053
1054static inline void gen_vfp_F1_ld0(int dp)
1055{
1056 if (dp)
5b340b51 1057 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1058 else
5b340b51 1059 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1060}
1061
1062static inline void gen_vfp_uito(int dp)
1063{
1064 if (dp)
1065 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1066 else
1067 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1068}
1069
1070static inline void gen_vfp_sito(int dp)
1071{
1072 if (dp)
66230e0d 1073 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1074 else
66230e0d 1075 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1076}
1077
1078static inline void gen_vfp_toui(int dp)
1079{
1080 if (dp)
1081 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1082 else
1083 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1084}
1085
1086static inline void gen_vfp_touiz(int dp)
1087{
1088 if (dp)
1089 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1090 else
1091 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1092}
1093
1094static inline void gen_vfp_tosi(int dp)
1095{
1096 if (dp)
1097 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1098 else
1099 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1100}
1101
1102static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1103{
1104 if (dp)
4373f3ce 1105 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1106 else
4373f3ce
PB
1107 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1108}
1109
1110#define VFP_GEN_FIX(name) \
1111static inline void gen_vfp_##name(int dp, int shift) \
1112{ \
1113 if (dp) \
1114 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1115 else \
1116 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1117}
4373f3ce
PB
1118VFP_GEN_FIX(tosh)
1119VFP_GEN_FIX(tosl)
1120VFP_GEN_FIX(touh)
1121VFP_GEN_FIX(toul)
1122VFP_GEN_FIX(shto)
1123VFP_GEN_FIX(slto)
1124VFP_GEN_FIX(uhto)
1125VFP_GEN_FIX(ulto)
1126#undef VFP_GEN_FIX
9ee6e8bb 1127
b5ff1b31
FB
1128static inline void gen_vfp_ld(DisasContext *s, int dp)
1129{
1130 if (dp)
4373f3ce 1131 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1132 else
4373f3ce 1133 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1134}
1135
1136static inline void gen_vfp_st(DisasContext *s, int dp)
1137{
1138 if (dp)
4373f3ce 1139 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1140 else
4373f3ce 1141 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1142}
1143
8e96005d
FB
1144static inline long
1145vfp_reg_offset (int dp, int reg)
1146{
1147 if (dp)
1148 return offsetof(CPUARMState, vfp.regs[reg]);
1149 else if (reg & 1) {
1150 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1151 + offsetof(CPU_DoubleU, l.upper);
1152 } else {
1153 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1154 + offsetof(CPU_DoubleU, l.lower);
1155 }
1156}
9ee6e8bb
PB
1157
1158/* Return the offset of a 32-bit piece of a NEON register.
1159 zero is the least significant end of the register. */
1160static inline long
1161neon_reg_offset (int reg, int n)
1162{
1163 int sreg;
1164 sreg = reg * 2 + n;
1165 return vfp_reg_offset(0, sreg);
1166}
1167
ad69471c
PB
1168/* FIXME: Remove these. */
1169#define neon_T0 cpu_T[0]
1170#define neon_T1 cpu_T[1]
1171#define NEON_GET_REG(T, reg, n) \
1172 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1173#define NEON_SET_REG(T, reg, n) \
1174 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1175
8f8e3aa4
PB
1176static TCGv neon_load_reg(int reg, int pass)
1177{
1178 TCGv tmp = new_tmp();
1179 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1180 return tmp;
1181}
1182
1183static void neon_store_reg(int reg, int pass, TCGv var)
1184{
1185 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1186 dead_tmp(var);
1187}
1188
a7812ae4 1189static inline void neon_load_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1190{
1191 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1192}
1193
a7812ae4 1194static inline void neon_store_reg64(TCGv_i64 var, int reg)
ad69471c
PB
1195{
1196 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1197}
1198
4373f3ce
PB
1199#define tcg_gen_ld_f32 tcg_gen_ld_i32
1200#define tcg_gen_ld_f64 tcg_gen_ld_i64
1201#define tcg_gen_st_f32 tcg_gen_st_i32
1202#define tcg_gen_st_f64 tcg_gen_st_i64
1203
b7bcbe95
FB
1204static inline void gen_mov_F0_vreg(int dp, int reg)
1205{
1206 if (dp)
4373f3ce 1207 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1208 else
4373f3ce 1209 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1210}
1211
1212static inline void gen_mov_F1_vreg(int dp, int reg)
1213{
1214 if (dp)
4373f3ce 1215 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1216 else
4373f3ce 1217 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1218}
1219
1220static inline void gen_mov_vreg_F0(int dp, int reg)
1221{
1222 if (dp)
4373f3ce 1223 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1224 else
4373f3ce 1225 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1226}
1227
18c9b560
AZ
1228#define ARM_CP_RW_BIT (1 << 20)
1229
a7812ae4 1230static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
e677137d
PB
1231{
1232 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1233}
1234
a7812ae4 1235static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
e677137d
PB
1236{
1237 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1238}
1239
1240static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1241{
1242 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1243}
1244
1245static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1246{
1247 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1248}
1249
1250static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1251{
1252 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1253}
1254
1255static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1256{
1257 iwmmxt_store_reg(cpu_M0, rn);
1258}
1259
1260static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1261{
1262 iwmmxt_load_reg(cpu_M0, rn);
1263}
1264
1265static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1266{
1267 iwmmxt_load_reg(cpu_V1, rn);
1268 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1269}
1270
1271static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1272{
1273 iwmmxt_load_reg(cpu_V1, rn);
1274 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1275}
1276
1277static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1278{
1279 iwmmxt_load_reg(cpu_V1, rn);
1280 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1281}
1282
1283#define IWMMXT_OP(name) \
1284static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1285{ \
1286 iwmmxt_load_reg(cpu_V1, rn); \
1287 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1288}
1289
1290#define IWMMXT_OP_ENV(name) \
1291static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1292{ \
1293 iwmmxt_load_reg(cpu_V1, rn); \
1294 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1295}
1296
1297#define IWMMXT_OP_ENV_SIZE(name) \
1298IWMMXT_OP_ENV(name##b) \
1299IWMMXT_OP_ENV(name##w) \
1300IWMMXT_OP_ENV(name##l)
1301
1302#define IWMMXT_OP_ENV1(name) \
1303static inline void gen_op_iwmmxt_##name##_M0(void) \
1304{ \
1305 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1306}
1307
1308IWMMXT_OP(maddsq)
1309IWMMXT_OP(madduq)
1310IWMMXT_OP(sadb)
1311IWMMXT_OP(sadw)
1312IWMMXT_OP(mulslw)
1313IWMMXT_OP(mulshw)
1314IWMMXT_OP(mululw)
1315IWMMXT_OP(muluhw)
1316IWMMXT_OP(macsw)
1317IWMMXT_OP(macuw)
1318
1319IWMMXT_OP_ENV_SIZE(unpackl)
1320IWMMXT_OP_ENV_SIZE(unpackh)
1321
1322IWMMXT_OP_ENV1(unpacklub)
1323IWMMXT_OP_ENV1(unpackluw)
1324IWMMXT_OP_ENV1(unpacklul)
1325IWMMXT_OP_ENV1(unpackhub)
1326IWMMXT_OP_ENV1(unpackhuw)
1327IWMMXT_OP_ENV1(unpackhul)
1328IWMMXT_OP_ENV1(unpacklsb)
1329IWMMXT_OP_ENV1(unpacklsw)
1330IWMMXT_OP_ENV1(unpacklsl)
1331IWMMXT_OP_ENV1(unpackhsb)
1332IWMMXT_OP_ENV1(unpackhsw)
1333IWMMXT_OP_ENV1(unpackhsl)
1334
1335IWMMXT_OP_ENV_SIZE(cmpeq)
1336IWMMXT_OP_ENV_SIZE(cmpgtu)
1337IWMMXT_OP_ENV_SIZE(cmpgts)
1338
1339IWMMXT_OP_ENV_SIZE(mins)
1340IWMMXT_OP_ENV_SIZE(minu)
1341IWMMXT_OP_ENV_SIZE(maxs)
1342IWMMXT_OP_ENV_SIZE(maxu)
1343
1344IWMMXT_OP_ENV_SIZE(subn)
1345IWMMXT_OP_ENV_SIZE(addn)
1346IWMMXT_OP_ENV_SIZE(subu)
1347IWMMXT_OP_ENV_SIZE(addu)
1348IWMMXT_OP_ENV_SIZE(subs)
1349IWMMXT_OP_ENV_SIZE(adds)
1350
1351IWMMXT_OP_ENV(avgb0)
1352IWMMXT_OP_ENV(avgb1)
1353IWMMXT_OP_ENV(avgw0)
1354IWMMXT_OP_ENV(avgw1)
1355
1356IWMMXT_OP(msadb)
1357
1358IWMMXT_OP_ENV(packuw)
1359IWMMXT_OP_ENV(packul)
1360IWMMXT_OP_ENV(packuq)
1361IWMMXT_OP_ENV(packsw)
1362IWMMXT_OP_ENV(packsl)
1363IWMMXT_OP_ENV(packsq)
1364
1365static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1366{
1367 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1368}
1369
1370static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1371{
1372 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1373}
1374
1375static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1376{
1377 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1378}
1379
1380static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1381{
1382 iwmmxt_load_reg(cpu_V1, rn);
1383 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1384}
1385
1386static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1387{
1388 TCGv tmp = tcg_const_i32(shift);
1389 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1390}
1391
1392static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1393{
1394 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1395 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1396 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1397}
1398
1399static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1400{
1401 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1402 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1403 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1404}
1405
1406static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1407{
1408 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1409 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1410 if (mask != ~0u)
1411 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1412}
1413
1414static void gen_op_iwmmxt_set_mup(void)
1415{
1416 TCGv tmp;
1417 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1418 tcg_gen_ori_i32(tmp, tmp, 2);
1419 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1420}
1421
1422static void gen_op_iwmmxt_set_cup(void)
1423{
1424 TCGv tmp;
1425 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1426 tcg_gen_ori_i32(tmp, tmp, 1);
1427 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1428}
1429
1430static void gen_op_iwmmxt_setpsr_nz(void)
1431{
1432 TCGv tmp = new_tmp();
1433 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1434 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1435}
1436
1437static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1438{
1439 iwmmxt_load_reg(cpu_V1, rn);
86831435 1440 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1441 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1442}
1443
1444
1445static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1446{
1447 iwmmxt_load_reg(cpu_V0, rn);
1448 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1449 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1450 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1451}
1452
1453static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1454{
36aa55dc 1455 tcg_gen_concat_i32_i64(cpu_V0, cpu_T[0], cpu_T[1]);
e677137d
PB
1456 iwmmxt_store_reg(cpu_V0, rn);
1457}
1458
18c9b560
AZ
1459static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1460{
1461 int rd;
1462 uint32_t offset;
1463
1464 rd = (insn >> 16) & 0xf;
1465 gen_movl_T1_reg(s, rd);
1466
1467 offset = (insn & 0xff) << ((insn >> 7) & 2);
1468 if (insn & (1 << 24)) {
1469 /* Pre indexed */
1470 if (insn & (1 << 23))
1471 gen_op_addl_T1_im(offset);
1472 else
1473 gen_op_addl_T1_im(-offset);
1474
1475 if (insn & (1 << 21))
1476 gen_movl_reg_T1(s, rd);
1477 } else if (insn & (1 << 21)) {
1478 /* Post indexed */
1479 if (insn & (1 << 23))
1480 gen_op_movl_T0_im(offset);
1481 else
1482 gen_op_movl_T0_im(- offset);
1483 gen_op_addl_T0_T1();
1484 gen_movl_reg_T0(s, rd);
1485 } else if (!(insn & (1 << 23)))
1486 return 1;
1487 return 0;
1488}
1489
1490static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1491{
1492 int rd = (insn >> 0) & 0xf;
1493
1494 if (insn & (1 << 8))
1495 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1496 return 1;
1497 else
1498 gen_op_iwmmxt_movl_T0_wCx(rd);
1499 else
e677137d 1500 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1501
1502 gen_op_movl_T1_im(mask);
1503 gen_op_andl_T0_T1();
1504 return 0;
1505}
1506
1507/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1508 (ie. an undefined instruction). */
1509static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1510{
1511 int rd, wrd;
1512 int rdhi, rdlo, rd0, rd1, i;
b0109805 1513 TCGv tmp;
18c9b560
AZ
1514
1515 if ((insn & 0x0e000e00) == 0x0c000000) {
1516 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1517 wrd = insn & 0xf;
1518 rdlo = (insn >> 12) & 0xf;
1519 rdhi = (insn >> 16) & 0xf;
1520 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1521 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1522 gen_movl_reg_T0(s, rdlo);
1523 gen_movl_reg_T1(s, rdhi);
1524 } else { /* TMCRR */
1525 gen_movl_T0_reg(s, rdlo);
1526 gen_movl_T1_reg(s, rdhi);
e677137d 1527 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1528 gen_op_iwmmxt_set_mup();
1529 }
1530 return 0;
1531 }
1532
1533 wrd = (insn >> 12) & 0xf;
1534 if (gen_iwmmxt_address(s, insn))
1535 return 1;
1536 if (insn & ARM_CP_RW_BIT) {
1537 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1538 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1539 tcg_gen_mov_i32(cpu_T[0], tmp);
1540 dead_tmp(tmp);
18c9b560
AZ
1541 gen_op_iwmmxt_movl_wCx_T0(wrd);
1542 } else {
e677137d
PB
1543 i = 1;
1544 if (insn & (1 << 8)) {
1545 if (insn & (1 << 22)) { /* WLDRD */
1546 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1547 i = 0;
1548 } else { /* WLDRW wRd */
1549 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1550 }
1551 } else {
1552 if (insn & (1 << 22)) { /* WLDRH */
1553 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1554 } else { /* WLDRB */
1555 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1556 }
1557 }
1558 if (i) {
1559 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1560 dead_tmp(tmp);
1561 }
18c9b560
AZ
1562 gen_op_iwmmxt_movq_wRn_M0(wrd);
1563 }
1564 } else {
1565 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1566 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1567 tmp = new_tmp();
1568 tcg_gen_mov_i32(tmp, cpu_T[0]);
1569 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1570 } else {
1571 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1572 tmp = new_tmp();
1573 if (insn & (1 << 8)) {
1574 if (insn & (1 << 22)) { /* WSTRD */
1575 dead_tmp(tmp);
1576 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1577 } else { /* WSTRW wRd */
1578 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1579 gen_st32(tmp, cpu_T[1], IS_USER(s));
1580 }
1581 } else {
1582 if (insn & (1 << 22)) { /* WSTRH */
1583 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1584 gen_st16(tmp, cpu_T[1], IS_USER(s));
1585 } else { /* WSTRB */
1586 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1587 gen_st8(tmp, cpu_T[1], IS_USER(s));
1588 }
1589 }
18c9b560
AZ
1590 }
1591 }
1592 return 0;
1593 }
1594
1595 if ((insn & 0x0f000000) != 0x0e000000)
1596 return 1;
1597
1598 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1599 case 0x000: /* WOR */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 0) & 0xf;
1602 rd1 = (insn >> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
1604 gen_op_iwmmxt_orq_M0_wRn(rd1);
1605 gen_op_iwmmxt_setpsr_nz();
1606 gen_op_iwmmxt_movq_wRn_M0(wrd);
1607 gen_op_iwmmxt_set_mup();
1608 gen_op_iwmmxt_set_cup();
1609 break;
1610 case 0x011: /* TMCR */
1611 if (insn & 0xf)
1612 return 1;
1613 rd = (insn >> 12) & 0xf;
1614 wrd = (insn >> 16) & 0xf;
1615 switch (wrd) {
1616 case ARM_IWMMXT_wCID:
1617 case ARM_IWMMXT_wCASF:
1618 break;
1619 case ARM_IWMMXT_wCon:
1620 gen_op_iwmmxt_set_cup();
1621 /* Fall through. */
1622 case ARM_IWMMXT_wCSSF:
1623 gen_op_iwmmxt_movl_T0_wCx(wrd);
1624 gen_movl_T1_reg(s, rd);
1625 gen_op_bicl_T0_T1();
1626 gen_op_iwmmxt_movl_wCx_T0(wrd);
1627 break;
1628 case ARM_IWMMXT_wCGR0:
1629 case ARM_IWMMXT_wCGR1:
1630 case ARM_IWMMXT_wCGR2:
1631 case ARM_IWMMXT_wCGR3:
1632 gen_op_iwmmxt_set_cup();
1633 gen_movl_reg_T0(s, rd);
1634 gen_op_iwmmxt_movl_wCx_T0(wrd);
1635 break;
1636 default:
1637 return 1;
1638 }
1639 break;
1640 case 0x100: /* WXOR */
1641 wrd = (insn >> 12) & 0xf;
1642 rd0 = (insn >> 0) & 0xf;
1643 rd1 = (insn >> 16) & 0xf;
1644 gen_op_iwmmxt_movq_M0_wRn(rd0);
1645 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1646 gen_op_iwmmxt_setpsr_nz();
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 gen_op_iwmmxt_set_cup();
1650 break;
1651 case 0x111: /* TMRC */
1652 if (insn & 0xf)
1653 return 1;
1654 rd = (insn >> 12) & 0xf;
1655 wrd = (insn >> 16) & 0xf;
1656 gen_op_iwmmxt_movl_T0_wCx(wrd);
1657 gen_movl_reg_T0(s, rd);
1658 break;
1659 case 0x300: /* WANDN */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1664 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1665 gen_op_iwmmxt_andq_M0_wRn(rd1);
1666 gen_op_iwmmxt_setpsr_nz();
1667 gen_op_iwmmxt_movq_wRn_M0(wrd);
1668 gen_op_iwmmxt_set_mup();
1669 gen_op_iwmmxt_set_cup();
1670 break;
1671 case 0x200: /* WAND */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 0) & 0xf;
1674 rd1 = (insn >> 16) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 gen_op_iwmmxt_andq_M0_wRn(rd1);
1677 gen_op_iwmmxt_setpsr_nz();
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 gen_op_iwmmxt_set_cup();
1681 break;
1682 case 0x810: case 0xa10: /* WMADD */
1683 wrd = (insn >> 12) & 0xf;
1684 rd0 = (insn >> 0) & 0xf;
1685 rd1 = (insn >> 16) & 0xf;
1686 gen_op_iwmmxt_movq_M0_wRn(rd0);
1687 if (insn & (1 << 21))
1688 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1689 else
1690 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 break;
1694 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 16) & 0xf;
1697 rd1 = (insn >> 0) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 switch ((insn >> 22) & 3) {
1700 case 0:
1701 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1702 break;
1703 case 1:
1704 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1705 break;
1706 case 2:
1707 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1708 break;
1709 case 3:
1710 return 1;
1711 }
1712 gen_op_iwmmxt_movq_wRn_M0(wrd);
1713 gen_op_iwmmxt_set_mup();
1714 gen_op_iwmmxt_set_cup();
1715 break;
1716 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 switch ((insn >> 22) & 3) {
1722 case 0:
1723 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1724 break;
1725 case 1:
1726 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1727 break;
1728 case 2:
1729 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1730 break;
1731 case 3:
1732 return 1;
1733 }
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 gen_op_iwmmxt_set_cup();
1737 break;
1738 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1739 wrd = (insn >> 12) & 0xf;
1740 rd0 = (insn >> 16) & 0xf;
1741 rd1 = (insn >> 0) & 0xf;
1742 gen_op_iwmmxt_movq_M0_wRn(rd0);
1743 if (insn & (1 << 22))
1744 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1745 else
1746 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1747 if (!(insn & (1 << 20)))
1748 gen_op_iwmmxt_addl_M0_wRn(wrd);
1749 gen_op_iwmmxt_movq_wRn_M0(wrd);
1750 gen_op_iwmmxt_set_mup();
1751 break;
1752 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1753 wrd = (insn >> 12) & 0xf;
1754 rd0 = (insn >> 16) & 0xf;
1755 rd1 = (insn >> 0) & 0xf;
1756 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1757 if (insn & (1 << 21)) {
1758 if (insn & (1 << 20))
1759 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1760 else
1761 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1762 } else {
1763 if (insn & (1 << 20))
1764 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1765 else
1766 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1767 }
18c9b560
AZ
1768 gen_op_iwmmxt_movq_wRn_M0(wrd);
1769 gen_op_iwmmxt_set_mup();
1770 break;
1771 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1772 wrd = (insn >> 12) & 0xf;
1773 rd0 = (insn >> 16) & 0xf;
1774 rd1 = (insn >> 0) & 0xf;
1775 gen_op_iwmmxt_movq_M0_wRn(rd0);
1776 if (insn & (1 << 21))
1777 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1778 else
1779 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1780 if (!(insn & (1 << 20))) {
e677137d
PB
1781 iwmmxt_load_reg(cpu_V1, wrd);
1782 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1783 }
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 break;
1787 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1788 wrd = (insn >> 12) & 0xf;
1789 rd0 = (insn >> 16) & 0xf;
1790 rd1 = (insn >> 0) & 0xf;
1791 gen_op_iwmmxt_movq_M0_wRn(rd0);
1792 switch ((insn >> 22) & 3) {
1793 case 0:
1794 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1795 break;
1796 case 1:
1797 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1798 break;
1799 case 2:
1800 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1801 break;
1802 case 3:
1803 return 1;
1804 }
1805 gen_op_iwmmxt_movq_wRn_M0(wrd);
1806 gen_op_iwmmxt_set_mup();
1807 gen_op_iwmmxt_set_cup();
1808 break;
1809 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 rd1 = (insn >> 0) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1814 if (insn & (1 << 22)) {
1815 if (insn & (1 << 20))
1816 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1817 else
1818 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1819 } else {
1820 if (insn & (1 << 20))
1821 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1822 else
1823 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1824 }
18c9b560
AZ
1825 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 gen_op_iwmmxt_set_mup();
1827 gen_op_iwmmxt_set_cup();
1828 break;
1829 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1830 wrd = (insn >> 12) & 0xf;
1831 rd0 = (insn >> 16) & 0xf;
1832 rd1 = (insn >> 0) & 0xf;
1833 gen_op_iwmmxt_movq_M0_wRn(rd0);
1834 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1835 gen_op_movl_T1_im(7);
1836 gen_op_andl_T0_T1();
1837 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1838 gen_op_iwmmxt_movq_wRn_M0(wrd);
1839 gen_op_iwmmxt_set_mup();
1840 break;
1841 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1842 rd = (insn >> 12) & 0xf;
1843 wrd = (insn >> 16) & 0xf;
1844 gen_movl_T0_reg(s, rd);
1845 gen_op_iwmmxt_movq_M0_wRn(wrd);
1846 switch ((insn >> 6) & 3) {
1847 case 0:
1848 gen_op_movl_T1_im(0xff);
1849 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1850 break;
1851 case 1:
1852 gen_op_movl_T1_im(0xffff);
1853 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1854 break;
1855 case 2:
1856 gen_op_movl_T1_im(0xffffffff);
1857 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1858 break;
1859 case 3:
1860 return 1;
1861 }
1862 gen_op_iwmmxt_movq_wRn_M0(wrd);
1863 gen_op_iwmmxt_set_mup();
1864 break;
1865 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1866 rd = (insn >> 12) & 0xf;
1867 wrd = (insn >> 16) & 0xf;
1868 if (rd == 15)
1869 return 1;
1870 gen_op_iwmmxt_movq_M0_wRn(wrd);
1871 switch ((insn >> 22) & 3) {
1872 case 0:
1873 if (insn & 8)
1874 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1875 else {
e677137d 1876 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1877 }
1878 break;
1879 case 1:
1880 if (insn & 8)
1881 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1882 else {
e677137d 1883 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1884 }
1885 break;
1886 case 2:
e677137d 1887 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1888 break;
1889 case 3:
1890 return 1;
1891 }
b26eefb6 1892 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1893 break;
1894 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1895 if ((insn & 0x000ff008) != 0x0003f000)
1896 return 1;
1897 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1898 switch ((insn >> 22) & 3) {
1899 case 0:
1900 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1901 break;
1902 case 1:
1903 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1904 break;
1905 case 2:
1906 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1907 break;
1908 case 3:
1909 return 1;
1910 }
1911 gen_op_shll_T1_im(28);
d9ba4830 1912 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1913 break;
1914 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1915 rd = (insn >> 12) & 0xf;
1916 wrd = (insn >> 16) & 0xf;
1917 gen_movl_T0_reg(s, rd);
1918 switch ((insn >> 6) & 3) {
1919 case 0:
e677137d 1920 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1921 break;
1922 case 1:
e677137d 1923 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1924 break;
1925 case 2:
e677137d 1926 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 break;
1934 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1935 if ((insn & 0x000ff00f) != 0x0003f000)
1936 return 1;
1937 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 for (i = 0; i < 7; i ++) {
1941 gen_op_shll_T1_im(4);
1942 gen_op_andl_T0_T1();
1943 }
1944 break;
1945 case 1:
1946 for (i = 0; i < 3; i ++) {
1947 gen_op_shll_T1_im(8);
1948 gen_op_andl_T0_T1();
1949 }
1950 break;
1951 case 2:
1952 gen_op_shll_T1_im(16);
1953 gen_op_andl_T0_T1();
1954 break;
1955 case 3:
1956 return 1;
1957 }
d9ba4830 1958 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1959 break;
1960 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1961 wrd = (insn >> 12) & 0xf;
1962 rd0 = (insn >> 16) & 0xf;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
1964 switch ((insn >> 22) & 3) {
1965 case 0:
e677137d 1966 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1967 break;
1968 case 1:
e677137d 1969 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1970 break;
1971 case 2:
e677137d 1972 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1973 break;
1974 case 3:
1975 return 1;
1976 }
1977 gen_op_iwmmxt_movq_wRn_M0(wrd);
1978 gen_op_iwmmxt_set_mup();
1979 break;
1980 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1981 if ((insn & 0x000ff00f) != 0x0003f000)
1982 return 1;
1983 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 for (i = 0; i < 7; i ++) {
1987 gen_op_shll_T1_im(4);
1988 gen_op_orl_T0_T1();
1989 }
1990 break;
1991 case 1:
1992 for (i = 0; i < 3; i ++) {
1993 gen_op_shll_T1_im(8);
1994 gen_op_orl_T0_T1();
1995 }
1996 break;
1997 case 2:
1998 gen_op_shll_T1_im(16);
1999 gen_op_orl_T0_T1();
2000 break;
2001 case 3:
2002 return 1;
2003 }
d9ba4830 2004 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
2005 break;
2006 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2007 rd = (insn >> 12) & 0xf;
2008 rd0 = (insn >> 16) & 0xf;
2009 if ((insn & 0xf) != 0)
2010 return 1;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
2012 switch ((insn >> 22) & 3) {
2013 case 0:
e677137d 2014 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2015 break;
2016 case 1:
e677137d 2017 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2018 break;
2019 case 2:
e677137d 2020 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2021 break;
2022 case 3:
2023 return 1;
2024 }
2025 gen_movl_reg_T0(s, rd);
2026 break;
2027 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2028 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2029 wrd = (insn >> 12) & 0xf;
2030 rd0 = (insn >> 16) & 0xf;
2031 rd1 = (insn >> 0) & 0xf;
2032 gen_op_iwmmxt_movq_M0_wRn(rd0);
2033 switch ((insn >> 22) & 3) {
2034 case 0:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2039 break;
2040 case 1:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2043 else
2044 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2045 break;
2046 case 2:
2047 if (insn & (1 << 21))
2048 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2049 else
2050 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2051 break;
2052 case 3:
2053 return 1;
2054 }
2055 gen_op_iwmmxt_movq_wRn_M0(wrd);
2056 gen_op_iwmmxt_set_mup();
2057 gen_op_iwmmxt_set_cup();
2058 break;
2059 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2060 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2061 wrd = (insn >> 12) & 0xf;
2062 rd0 = (insn >> 16) & 0xf;
2063 gen_op_iwmmxt_movq_M0_wRn(rd0);
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsb_M0();
2068 else
2069 gen_op_iwmmxt_unpacklub_M0();
2070 break;
2071 case 1:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_unpacklsw_M0();
2074 else
2075 gen_op_iwmmxt_unpackluw_M0();
2076 break;
2077 case 2:
2078 if (insn & (1 << 21))
2079 gen_op_iwmmxt_unpacklsl_M0();
2080 else
2081 gen_op_iwmmxt_unpacklul_M0();
2082 break;
2083 case 3:
2084 return 1;
2085 }
2086 gen_op_iwmmxt_movq_wRn_M0(wrd);
2087 gen_op_iwmmxt_set_mup();
2088 gen_op_iwmmxt_set_cup();
2089 break;
2090 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2091 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
2095 switch ((insn >> 22) & 3) {
2096 case 0:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsb_M0();
2099 else
2100 gen_op_iwmmxt_unpackhub_M0();
2101 break;
2102 case 1:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_unpackhsw_M0();
2105 else
2106 gen_op_iwmmxt_unpackhuw_M0();
2107 break;
2108 case 2:
2109 if (insn & (1 << 21))
2110 gen_op_iwmmxt_unpackhsl_M0();
2111 else
2112 gen_op_iwmmxt_unpackhul_M0();
2113 break;
2114 case 3:
2115 return 1;
2116 }
2117 gen_op_iwmmxt_movq_wRn_M0(wrd);
2118 gen_op_iwmmxt_set_mup();
2119 gen_op_iwmmxt_set_cup();
2120 break;
2121 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2122 case 0x214: case 0x614: case 0xa14: case 0xe14:
2123 wrd = (insn >> 12) & 0xf;
2124 rd0 = (insn >> 16) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 if (gen_iwmmxt_shift(insn, 0xff))
2127 return 1;
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 return 1;
2131 case 1:
e677137d 2132 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2133 break;
2134 case 2:
e677137d 2135 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2136 break;
2137 case 3:
e677137d 2138 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2139 break;
2140 }
2141 gen_op_iwmmxt_movq_wRn_M0(wrd);
2142 gen_op_iwmmxt_set_mup();
2143 gen_op_iwmmxt_set_cup();
2144 break;
2145 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2146 case 0x014: case 0x414: case 0x814: case 0xc14:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
2150 if (gen_iwmmxt_shift(insn, 0xff))
2151 return 1;
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 return 1;
2155 case 1:
e677137d 2156 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2157 break;
2158 case 2:
e677137d 2159 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2160 break;
2161 case 3:
e677137d 2162 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2163 break;
2164 }
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2170 case 0x114: case 0x514: case 0x914: case 0xd14:
2171 wrd = (insn >> 12) & 0xf;
2172 rd0 = (insn >> 16) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 if (gen_iwmmxt_shift(insn, 0xff))
2175 return 1;
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 return 1;
2179 case 1:
e677137d 2180 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2181 break;
2182 case 2:
e677137d 2183 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2184 break;
2185 case 3:
e677137d 2186 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2187 break;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
2193 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2194 case 0x314: case 0x714: case 0xb14: case 0xf14:
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 return 1;
2201 case 1:
2202 if (gen_iwmmxt_shift(insn, 0xf))
2203 return 1;
e677137d 2204 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2205 break;
2206 case 2:
2207 if (gen_iwmmxt_shift(insn, 0x1f))
2208 return 1;
e677137d 2209 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2210 break;
2211 case 3:
2212 if (gen_iwmmxt_shift(insn, 0x3f))
2213 return 1;
e677137d 2214 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2215 break;
2216 }
2217 gen_op_iwmmxt_movq_wRn_M0(wrd);
2218 gen_op_iwmmxt_set_mup();
2219 gen_op_iwmmxt_set_cup();
2220 break;
2221 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2222 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2223 wrd = (insn >> 12) & 0xf;
2224 rd0 = (insn >> 16) & 0xf;
2225 rd1 = (insn >> 0) & 0xf;
2226 gen_op_iwmmxt_movq_M0_wRn(rd0);
2227 switch ((insn >> 22) & 3) {
2228 case 0:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minub_M0_wRn(rd1);
2233 break;
2234 case 1:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2239 break;
2240 case 2:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_minul_M0_wRn(rd1);
2245 break;
2246 case 3:
2247 return 1;
2248 }
2249 gen_op_iwmmxt_movq_wRn_M0(wrd);
2250 gen_op_iwmmxt_set_mup();
2251 break;
2252 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2253 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2264 break;
2265 case 1:
2266 if (insn & (1 << 21))
2267 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2268 else
2269 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2270 break;
2271 case 2:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2276 break;
2277 case 3:
2278 return 1;
2279 }
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2284 case 0x402: case 0x502: case 0x602: case 0x702:
2285 wrd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
2287 rd1 = (insn >> 0) & 0xf;
2288 gen_op_iwmmxt_movq_M0_wRn(rd0);
2289 gen_op_movl_T0_im((insn >> 20) & 3);
2290 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 break;
2294 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2295 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2296 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2297 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2298 wrd = (insn >> 12) & 0xf;
2299 rd0 = (insn >> 16) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 gen_op_iwmmxt_movq_M0_wRn(rd0);
2302 switch ((insn >> 20) & 0xf) {
2303 case 0x0:
2304 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2305 break;
2306 case 0x1:
2307 gen_op_iwmmxt_subub_M0_wRn(rd1);
2308 break;
2309 case 0x3:
2310 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2311 break;
2312 case 0x4:
2313 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2314 break;
2315 case 0x5:
2316 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2317 break;
2318 case 0x7:
2319 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2320 break;
2321 case 0x8:
2322 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2323 break;
2324 case 0x9:
2325 gen_op_iwmmxt_subul_M0_wRn(rd1);
2326 break;
2327 case 0xb:
2328 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2329 break;
2330 default:
2331 return 1;
2332 }
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
2337 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2338 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2339 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2340 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2341 wrd = (insn >> 12) & 0xf;
2342 rd0 = (insn >> 16) & 0xf;
2343 gen_op_iwmmxt_movq_M0_wRn(rd0);
2344 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2345 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2346 gen_op_iwmmxt_movq_wRn_M0(wrd);
2347 gen_op_iwmmxt_set_mup();
2348 gen_op_iwmmxt_set_cup();
2349 break;
2350 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2351 case 0x418: case 0x518: case 0x618: case 0x718:
2352 case 0x818: case 0x918: case 0xa18: case 0xb18:
2353 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2354 wrd = (insn >> 12) & 0xf;
2355 rd0 = (insn >> 16) & 0xf;
2356 rd1 = (insn >> 0) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0);
2358 switch ((insn >> 20) & 0xf) {
2359 case 0x0:
2360 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2361 break;
2362 case 0x1:
2363 gen_op_iwmmxt_addub_M0_wRn(rd1);
2364 break;
2365 case 0x3:
2366 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2367 break;
2368 case 0x4:
2369 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2370 break;
2371 case 0x5:
2372 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2373 break;
2374 case 0x7:
2375 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2376 break;
2377 case 0x8:
2378 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2379 break;
2380 case 0x9:
2381 gen_op_iwmmxt_addul_M0_wRn(rd1);
2382 break;
2383 case 0xb:
2384 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2385 break;
2386 default:
2387 return 1;
2388 }
2389 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2392 break;
2393 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2394 case 0x408: case 0x508: case 0x608: case 0x708:
2395 case 0x808: case 0x908: case 0xa08: case 0xb08:
2396 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2397 wrd = (insn >> 12) & 0xf;
2398 rd0 = (insn >> 16) & 0xf;
2399 rd1 = (insn >> 0) & 0xf;
2400 gen_op_iwmmxt_movq_M0_wRn(rd0);
2401 if (!(insn & (1 << 20)))
2402 return 1;
2403 switch ((insn >> 22) & 3) {
2404 case 0:
2405 return 1;
2406 case 1:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2411 break;
2412 case 2:
2413 if (insn & (1 << 21))
2414 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2415 else
2416 gen_op_iwmmxt_packul_M0_wRn(rd1);
2417 break;
2418 case 3:
2419 if (insn & (1 << 21))
2420 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2421 else
2422 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2423 break;
2424 }
2425 gen_op_iwmmxt_movq_wRn_M0(wrd);
2426 gen_op_iwmmxt_set_mup();
2427 gen_op_iwmmxt_set_cup();
2428 break;
2429 case 0x201: case 0x203: case 0x205: case 0x207:
2430 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2431 case 0x211: case 0x213: case 0x215: case 0x217:
2432 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2433 wrd = (insn >> 5) & 0xf;
2434 rd0 = (insn >> 12) & 0xf;
2435 rd1 = (insn >> 0) & 0xf;
2436 if (rd0 == 0xf || rd1 == 0xf)
2437 return 1;
2438 gen_op_iwmmxt_movq_M0_wRn(wrd);
2439 switch ((insn >> 16) & 0xf) {
2440 case 0x0: /* TMIA */
b26eefb6
PB
2441 gen_movl_T0_reg(s, rd0);
2442 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2443 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2444 break;
2445 case 0x8: /* TMIAPH */
b26eefb6
PB
2446 gen_movl_T0_reg(s, rd0);
2447 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2448 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2449 break;
2450 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2451 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2452 if (insn & (1 << 16))
2453 gen_op_shrl_T1_im(16);
2454 gen_op_movl_T0_T1();
b26eefb6 2455 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2456 if (insn & (1 << 17))
2457 gen_op_shrl_T1_im(16);
2458 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2459 break;
2460 default:
2461 return 1;
2462 }
2463 gen_op_iwmmxt_movq_wRn_M0(wrd);
2464 gen_op_iwmmxt_set_mup();
2465 break;
2466 default:
2467 return 1;
2468 }
2469
2470 return 0;
2471}
2472
2473/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2474 (ie. an undefined instruction). */
2475static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2476{
2477 int acc, rd0, rd1, rdhi, rdlo;
2478
2479 if ((insn & 0x0ff00f10) == 0x0e200010) {
2480 /* Multiply with Internal Accumulate Format */
2481 rd0 = (insn >> 12) & 0xf;
2482 rd1 = insn & 0xf;
2483 acc = (insn >> 5) & 7;
2484
2485 if (acc != 0)
2486 return 1;
2487
2488 switch ((insn >> 16) & 0xf) {
2489 case 0x0: /* MIA */
b26eefb6
PB
2490 gen_movl_T0_reg(s, rd0);
2491 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2492 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2493 break;
2494 case 0x8: /* MIAPH */
b26eefb6
PB
2495 gen_movl_T0_reg(s, rd0);
2496 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2497 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2498 break;
2499 case 0xc: /* MIABB */
2500 case 0xd: /* MIABT */
2501 case 0xe: /* MIATB */
2502 case 0xf: /* MIATT */
b26eefb6 2503 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2504 if (insn & (1 << 16))
2505 gen_op_shrl_T1_im(16);
2506 gen_op_movl_T0_T1();
b26eefb6 2507 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2508 if (insn & (1 << 17))
2509 gen_op_shrl_T1_im(16);
2510 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2511 break;
2512 default:
2513 return 1;
2514 }
2515
2516 gen_op_iwmmxt_movq_wRn_M0(acc);
2517 return 0;
2518 }
2519
2520 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2521 /* Internal Accumulator Access Format */
2522 rdhi = (insn >> 16) & 0xf;
2523 rdlo = (insn >> 12) & 0xf;
2524 acc = insn & 7;
2525
2526 if (acc != 0)
2527 return 1;
2528
2529 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2530 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2531 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2532 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2533 gen_op_andl_T0_T1();
b26eefb6 2534 gen_movl_reg_T0(s, rdhi);
18c9b560 2535 } else { /* MAR */
b26eefb6
PB
2536 gen_movl_T0_reg(s, rdlo);
2537 gen_movl_T1_reg(s, rdhi);
e677137d 2538 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2539 }
2540 return 0;
2541 }
2542
2543 return 1;
2544}
2545
c1713132
AZ
2546/* Disassemble system coprocessor instruction. Return nonzero if
2547 instruction is not defined. */
2548static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2549{
8984bd2e 2550 TCGv tmp;
c1713132
AZ
2551 uint32_t rd = (insn >> 12) & 0xf;
2552 uint32_t cp = (insn >> 8) & 0xf;
2553 if (IS_USER(s)) {
2554 return 1;
2555 }
2556
18c9b560 2557 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2558 if (!env->cp[cp].cp_read)
2559 return 1;
8984bd2e
PB
2560 gen_set_pc_im(s->pc);
2561 tmp = new_tmp();
2562 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2563 store_reg(s, rd, tmp);
c1713132
AZ
2564 } else {
2565 if (!env->cp[cp].cp_write)
2566 return 1;
8984bd2e
PB
2567 gen_set_pc_im(s->pc);
2568 tmp = load_reg(s, rd);
2569 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2570 dead_tmp(tmp);
c1713132
AZ
2571 }
2572 return 0;
2573}
2574
9ee6e8bb
PB
2575static int cp15_user_ok(uint32_t insn)
2576{
2577 int cpn = (insn >> 16) & 0xf;
2578 int cpm = insn & 0xf;
2579 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2580
2581 if (cpn == 13 && cpm == 0) {
2582 /* TLS register. */
2583 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2584 return 1;
2585 }
2586 if (cpn == 7) {
2587 /* ISB, DSB, DMB. */
2588 if ((cpm == 5 && op == 4)
2589 || (cpm == 10 && (op == 4 || op == 5)))
2590 return 1;
2591 }
2592 return 0;
2593}
2594
b5ff1b31
FB
2595/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2596 instruction is not defined. */
a90b7318 2597static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2598{
2599 uint32_t rd;
8984bd2e 2600 TCGv tmp;
b5ff1b31 2601
9ee6e8bb
PB
2602 /* M profile cores use memory mapped registers instead of cp15. */
2603 if (arm_feature(env, ARM_FEATURE_M))
2604 return 1;
2605
2606 if ((insn & (1 << 25)) == 0) {
2607 if (insn & (1 << 20)) {
2608 /* mrrc */
2609 return 1;
2610 }
2611 /* mcrr. Used for block cache operations, so implement as no-op. */
2612 return 0;
2613 }
2614 if ((insn & (1 << 4)) == 0) {
2615 /* cdp */
2616 return 1;
2617 }
2618 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2619 return 1;
2620 }
9332f9da
FB
2621 if ((insn & 0x0fff0fff) == 0x0e070f90
2622 || (insn & 0x0fff0fff) == 0x0e070f58) {
2623 /* Wait for interrupt. */
8984bd2e 2624 gen_set_pc_im(s->pc);
9ee6e8bb 2625 s->is_jmp = DISAS_WFI;
9332f9da
FB
2626 return 0;
2627 }
b5ff1b31 2628 rd = (insn >> 12) & 0xf;
18c9b560 2629 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2630 tmp = new_tmp();
2631 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2632 /* If the destination register is r15 then sets condition codes. */
2633 if (rd != 15)
8984bd2e
PB
2634 store_reg(s, rd, tmp);
2635 else
2636 dead_tmp(tmp);
b5ff1b31 2637 } else {
8984bd2e
PB
2638 tmp = load_reg(s, rd);
2639 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2640 dead_tmp(tmp);
a90b7318
AZ
2641 /* Normally we would always end the TB here, but Linux
2642 * arch/arm/mach-pxa/sleep.S expects two instructions following
2643 * an MMU enable to execute from cache. Imitate this behaviour. */
2644 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2645 (insn & 0x0fff0fff) != 0x0e010f10)
2646 gen_lookup_tb(s);
b5ff1b31 2647 }
b5ff1b31
FB
2648 return 0;
2649}
2650
9ee6e8bb
PB
2651#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2652#define VFP_SREG(insn, bigbit, smallbit) \
2653 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2654#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2655 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2656 reg = (((insn) >> (bigbit)) & 0x0f) \
2657 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2658 } else { \
2659 if (insn & (1 << (smallbit))) \
2660 return 1; \
2661 reg = ((insn) >> (bigbit)) & 0x0f; \
2662 }} while (0)
2663
2664#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2665#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2666#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2667#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2668#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2669#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2670
4373f3ce
PB
2671/* Move between integer and VFP cores. */
2672static TCGv gen_vfp_mrs(void)
2673{
2674 TCGv tmp = new_tmp();
2675 tcg_gen_mov_i32(tmp, cpu_F0s);
2676 return tmp;
2677}
2678
2679static void gen_vfp_msr(TCGv tmp)
2680{
2681 tcg_gen_mov_i32(cpu_F0s, tmp);
2682 dead_tmp(tmp);
2683}
2684
9ee6e8bb
PB
2685static inline int
2686vfp_enabled(CPUState * env)
2687{
2688 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2689}
2690
ad69471c
PB
2691static void gen_neon_dup_u8(TCGv var, int shift)
2692{
2693 TCGv tmp = new_tmp();
2694 if (shift)
2695 tcg_gen_shri_i32(var, var, shift);
86831435 2696 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2697 tcg_gen_shli_i32(tmp, var, 8);
2698 tcg_gen_or_i32(var, var, tmp);
2699 tcg_gen_shli_i32(tmp, var, 16);
2700 tcg_gen_or_i32(var, var, tmp);
2701 dead_tmp(tmp);
2702}
2703
2704static void gen_neon_dup_low16(TCGv var)
2705{
2706 TCGv tmp = new_tmp();
86831435 2707 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2708 tcg_gen_shli_i32(tmp, var, 16);
2709 tcg_gen_or_i32(var, var, tmp);
2710 dead_tmp(tmp);
2711}
2712
2713static void gen_neon_dup_high16(TCGv var)
2714{
2715 TCGv tmp = new_tmp();
2716 tcg_gen_andi_i32(var, var, 0xffff0000);
2717 tcg_gen_shri_i32(tmp, var, 16);
2718 tcg_gen_or_i32(var, var, tmp);
2719 dead_tmp(tmp);
2720}
2721
b7bcbe95
FB
2722/* Disassemble a VFP instruction. Returns nonzero if an error occured
2723 (ie. an undefined instruction). */
2724static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2725{
2726 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2727 int dp, veclen;
4373f3ce 2728 TCGv tmp;
ad69471c 2729 TCGv tmp2;
b7bcbe95 2730
40f137e1
PB
2731 if (!arm_feature(env, ARM_FEATURE_VFP))
2732 return 1;
2733
9ee6e8bb
PB
2734 if (!vfp_enabled(env)) {
2735 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2736 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2737 return 1;
2738 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2739 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2740 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2741 return 1;
2742 }
b7bcbe95
FB
2743 dp = ((insn & 0xf00) == 0xb00);
2744 switch ((insn >> 24) & 0xf) {
2745 case 0xe:
2746 if (insn & (1 << 4)) {
2747 /* single register transfer */
b7bcbe95
FB
2748 rd = (insn >> 12) & 0xf;
2749 if (dp) {
9ee6e8bb
PB
2750 int size;
2751 int pass;
2752
2753 VFP_DREG_N(rn, insn);
2754 if (insn & 0xf)
b7bcbe95 2755 return 1;
9ee6e8bb
PB
2756 if (insn & 0x00c00060
2757 && !arm_feature(env, ARM_FEATURE_NEON))
2758 return 1;
2759
2760 pass = (insn >> 21) & 1;
2761 if (insn & (1 << 22)) {
2762 size = 0;
2763 offset = ((insn >> 5) & 3) * 8;
2764 } else if (insn & (1 << 5)) {
2765 size = 1;
2766 offset = (insn & (1 << 6)) ? 16 : 0;
2767 } else {
2768 size = 2;
2769 offset = 0;
2770 }
18c9b560 2771 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2772 /* vfp->arm */
ad69471c 2773 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2774 switch (size) {
2775 case 0:
9ee6e8bb 2776 if (offset)
ad69471c 2777 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2778 if (insn & (1 << 23))
ad69471c 2779 gen_uxtb(tmp);
9ee6e8bb 2780 else
ad69471c 2781 gen_sxtb(tmp);
9ee6e8bb
PB
2782 break;
2783 case 1:
9ee6e8bb
PB
2784 if (insn & (1 << 23)) {
2785 if (offset) {
ad69471c 2786 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2787 } else {
ad69471c 2788 gen_uxth(tmp);
9ee6e8bb
PB
2789 }
2790 } else {
2791 if (offset) {
ad69471c 2792 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2793 } else {
ad69471c 2794 gen_sxth(tmp);
9ee6e8bb
PB
2795 }
2796 }
2797 break;
2798 case 2:
9ee6e8bb
PB
2799 break;
2800 }
ad69471c 2801 store_reg(s, rd, tmp);
b7bcbe95
FB
2802 } else {
2803 /* arm->vfp */
ad69471c 2804 tmp = load_reg(s, rd);
9ee6e8bb
PB
2805 if (insn & (1 << 23)) {
2806 /* VDUP */
2807 if (size == 0) {
ad69471c 2808 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2809 } else if (size == 1) {
ad69471c 2810 gen_neon_dup_low16(tmp);
9ee6e8bb 2811 }
ad69471c
PB
2812 tmp2 = new_tmp();
2813 tcg_gen_mov_i32(tmp2, tmp);
2814 neon_store_reg(rn, 0, tmp2);
3018f259 2815 neon_store_reg(rn, 1, tmp);
9ee6e8bb
PB
2816 } else {
2817 /* VMOV */
2818 switch (size) {
2819 case 0:
ad69471c
PB
2820 tmp2 = neon_load_reg(rn, pass);
2821 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2822 dead_tmp(tmp2);
9ee6e8bb
PB
2823 break;
2824 case 1:
ad69471c
PB
2825 tmp2 = neon_load_reg(rn, pass);
2826 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2827 dead_tmp(tmp2);
9ee6e8bb
PB
2828 break;
2829 case 2:
9ee6e8bb
PB
2830 break;
2831 }
ad69471c 2832 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2833 }
b7bcbe95 2834 }
9ee6e8bb
PB
2835 } else { /* !dp */
2836 if ((insn & 0x6f) != 0x00)
2837 return 1;
2838 rn = VFP_SREG_N(insn);
18c9b560 2839 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2840 /* vfp->arm */
2841 if (insn & (1 << 21)) {
2842 /* system register */
40f137e1 2843 rn >>= 1;
9ee6e8bb 2844
b7bcbe95 2845 switch (rn) {
40f137e1 2846 case ARM_VFP_FPSID:
4373f3ce 2847 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2848 VFP3 restricts all id registers to privileged
2849 accesses. */
2850 if (IS_USER(s)
2851 && arm_feature(env, ARM_FEATURE_VFP3))
2852 return 1;
4373f3ce 2853 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2854 break;
40f137e1 2855 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2856 if (IS_USER(s))
2857 return 1;
4373f3ce 2858 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2859 break;
40f137e1
PB
2860 case ARM_VFP_FPINST:
2861 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2862 /* Not present in VFP3. */
2863 if (IS_USER(s)
2864 || arm_feature(env, ARM_FEATURE_VFP3))
2865 return 1;
4373f3ce 2866 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2867 break;
40f137e1 2868 case ARM_VFP_FPSCR:
601d70b9 2869 if (rd == 15) {
4373f3ce
PB
2870 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2871 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2872 } else {
2873 tmp = new_tmp();
2874 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2875 }
b7bcbe95 2876 break;
9ee6e8bb
PB
2877 case ARM_VFP_MVFR0:
2878 case ARM_VFP_MVFR1:
2879 if (IS_USER(s)
2880 || !arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2883 break;
b7bcbe95
FB
2884 default:
2885 return 1;
2886 }
2887 } else {
2888 gen_mov_F0_vreg(0, rn);
4373f3ce 2889 tmp = gen_vfp_mrs();
b7bcbe95
FB
2890 }
2891 if (rd == 15) {
b5ff1b31 2892 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2893 gen_set_nzcv(tmp);
2894 dead_tmp(tmp);
2895 } else {
2896 store_reg(s, rd, tmp);
2897 }
b7bcbe95
FB
2898 } else {
2899 /* arm->vfp */
4373f3ce 2900 tmp = load_reg(s, rd);
b7bcbe95 2901 if (insn & (1 << 21)) {
40f137e1 2902 rn >>= 1;
b7bcbe95
FB
2903 /* system register */
2904 switch (rn) {
40f137e1 2905 case ARM_VFP_FPSID:
9ee6e8bb
PB
2906 case ARM_VFP_MVFR0:
2907 case ARM_VFP_MVFR1:
b7bcbe95
FB
2908 /* Writes are ignored. */
2909 break;
40f137e1 2910 case ARM_VFP_FPSCR:
4373f3ce
PB
2911 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2912 dead_tmp(tmp);
b5ff1b31 2913 gen_lookup_tb(s);
b7bcbe95 2914 break;
40f137e1 2915 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2916 if (IS_USER(s))
2917 return 1;
4373f3ce 2918 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2919 gen_lookup_tb(s);
2920 break;
2921 case ARM_VFP_FPINST:
2922 case ARM_VFP_FPINST2:
4373f3ce 2923 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2924 break;
b7bcbe95
FB
2925 default:
2926 return 1;
2927 }
2928 } else {
4373f3ce 2929 gen_vfp_msr(tmp);
b7bcbe95
FB
2930 gen_mov_vreg_F0(0, rn);
2931 }
2932 }
2933 }
2934 } else {
2935 /* data processing */
2936 /* The opcode is in bits 23, 21, 20 and 6. */
2937 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2938 if (dp) {
2939 if (op == 15) {
2940 /* rn is opcode */
2941 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2942 } else {
2943 /* rn is register number */
9ee6e8bb 2944 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2945 }
2946
2947 if (op == 15 && (rn == 15 || rn > 17)) {
2948 /* Integer or single precision destination. */
9ee6e8bb 2949 rd = VFP_SREG_D(insn);
b7bcbe95 2950 } else {
9ee6e8bb 2951 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2952 }
2953
2954 if (op == 15 && (rn == 16 || rn == 17)) {
2955 /* Integer source. */
2956 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2957 } else {
9ee6e8bb 2958 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2959 }
2960 } else {
9ee6e8bb 2961 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2962 if (op == 15 && rn == 15) {
2963 /* Double precision destination. */
9ee6e8bb
PB
2964 VFP_DREG_D(rd, insn);
2965 } else {
2966 rd = VFP_SREG_D(insn);
2967 }
2968 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2969 }
2970
2971 veclen = env->vfp.vec_len;
2972 if (op == 15 && rn > 3)
2973 veclen = 0;
2974
2975 /* Shut up compiler warnings. */
2976 delta_m = 0;
2977 delta_d = 0;
2978 bank_mask = 0;
3b46e624 2979
b7bcbe95
FB
2980 if (veclen > 0) {
2981 if (dp)
2982 bank_mask = 0xc;
2983 else
2984 bank_mask = 0x18;
2985
2986 /* Figure out what type of vector operation this is. */
2987 if ((rd & bank_mask) == 0) {
2988 /* scalar */
2989 veclen = 0;
2990 } else {
2991 if (dp)
2992 delta_d = (env->vfp.vec_stride >> 1) + 1;
2993 else
2994 delta_d = env->vfp.vec_stride + 1;
2995
2996 if ((rm & bank_mask) == 0) {
2997 /* mixed scalar/vector */
2998 delta_m = 0;
2999 } else {
3000 /* vector */
3001 delta_m = delta_d;
3002 }
3003 }
3004 }
3005
3006 /* Load the initial operands. */
3007 if (op == 15) {
3008 switch (rn) {
3009 case 16:
3010 case 17:
3011 /* Integer source */
3012 gen_mov_F0_vreg(0, rm);
3013 break;
3014 case 8:
3015 case 9:
3016 /* Compare */
3017 gen_mov_F0_vreg(dp, rd);
3018 gen_mov_F1_vreg(dp, rm);
3019 break;
3020 case 10:
3021 case 11:
3022 /* Compare with zero */
3023 gen_mov_F0_vreg(dp, rd);
3024 gen_vfp_F1_ld0(dp);
3025 break;
9ee6e8bb
PB
3026 case 20:
3027 case 21:
3028 case 22:
3029 case 23:
3030 /* Source and destination the same. */
3031 gen_mov_F0_vreg(dp, rd);
3032 break;
b7bcbe95
FB
3033 default:
3034 /* One source operand. */
3035 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3036 break;
b7bcbe95
FB
3037 }
3038 } else {
3039 /* Two source operands. */
3040 gen_mov_F0_vreg(dp, rn);
3041 gen_mov_F1_vreg(dp, rm);
3042 }
3043
3044 for (;;) {
3045 /* Perform the calculation. */
3046 switch (op) {
3047 case 0: /* mac: fd + (fn * fm) */
3048 gen_vfp_mul(dp);
3049 gen_mov_F1_vreg(dp, rd);
3050 gen_vfp_add(dp);
3051 break;
3052 case 1: /* nmac: fd - (fn * fm) */
3053 gen_vfp_mul(dp);
3054 gen_vfp_neg(dp);
3055 gen_mov_F1_vreg(dp, rd);
3056 gen_vfp_add(dp);
3057 break;
3058 case 2: /* msc: -fd + (fn * fm) */
3059 gen_vfp_mul(dp);
3060 gen_mov_F1_vreg(dp, rd);
3061 gen_vfp_sub(dp);
3062 break;
3063 case 3: /* nmsc: -fd - (fn * fm) */
3064 gen_vfp_mul(dp);
b7bcbe95 3065 gen_vfp_neg(dp);
c9fb531a
PB
3066 gen_mov_F1_vreg(dp, rd);
3067 gen_vfp_sub(dp);
b7bcbe95
FB
3068 break;
3069 case 4: /* mul: fn * fm */
3070 gen_vfp_mul(dp);
3071 break;
3072 case 5: /* nmul: -(fn * fm) */
3073 gen_vfp_mul(dp);
3074 gen_vfp_neg(dp);
3075 break;
3076 case 6: /* add: fn + fm */
3077 gen_vfp_add(dp);
3078 break;
3079 case 7: /* sub: fn - fm */
3080 gen_vfp_sub(dp);
3081 break;
3082 case 8: /* div: fn / fm */
3083 gen_vfp_div(dp);
3084 break;
9ee6e8bb
PB
3085 case 14: /* fconst */
3086 if (!arm_feature(env, ARM_FEATURE_VFP3))
3087 return 1;
3088
3089 n = (insn << 12) & 0x80000000;
3090 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3091 if (dp) {
3092 if (i & 0x40)
3093 i |= 0x3f80;
3094 else
3095 i |= 0x4000;
3096 n |= i << 16;
4373f3ce 3097 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3098 } else {
3099 if (i & 0x40)
3100 i |= 0x780;
3101 else
3102 i |= 0x800;
3103 n |= i << 19;
5b340b51 3104 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3105 }
9ee6e8bb 3106 break;
b7bcbe95
FB
3107 case 15: /* extension space */
3108 switch (rn) {
3109 case 0: /* cpy */
3110 /* no-op */
3111 break;
3112 case 1: /* abs */
3113 gen_vfp_abs(dp);
3114 break;
3115 case 2: /* neg */
3116 gen_vfp_neg(dp);
3117 break;
3118 case 3: /* sqrt */
3119 gen_vfp_sqrt(dp);
3120 break;
3121 case 8: /* cmp */
3122 gen_vfp_cmp(dp);
3123 break;
3124 case 9: /* cmpe */
3125 gen_vfp_cmpe(dp);
3126 break;
3127 case 10: /* cmpz */
3128 gen_vfp_cmp(dp);
3129 break;
3130 case 11: /* cmpez */
3131 gen_vfp_F1_ld0(dp);
3132 gen_vfp_cmpe(dp);
3133 break;
3134 case 15: /* single<->double conversion */
3135 if (dp)
4373f3ce 3136 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3137 else
4373f3ce 3138 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3139 break;
3140 case 16: /* fuito */
3141 gen_vfp_uito(dp);
3142 break;
3143 case 17: /* fsito */
3144 gen_vfp_sito(dp);
3145 break;
9ee6e8bb
PB
3146 case 20: /* fshto */
3147 if (!arm_feature(env, ARM_FEATURE_VFP3))
3148 return 1;
3149 gen_vfp_shto(dp, rm);
3150 break;
3151 case 21: /* fslto */
3152 if (!arm_feature(env, ARM_FEATURE_VFP3))
3153 return 1;
3154 gen_vfp_slto(dp, rm);
3155 break;
3156 case 22: /* fuhto */
3157 if (!arm_feature(env, ARM_FEATURE_VFP3))
3158 return 1;
3159 gen_vfp_uhto(dp, rm);
3160 break;
3161 case 23: /* fulto */
3162 if (!arm_feature(env, ARM_FEATURE_VFP3))
3163 return 1;
3164 gen_vfp_ulto(dp, rm);
3165 break;
b7bcbe95
FB
3166 case 24: /* ftoui */
3167 gen_vfp_toui(dp);
3168 break;
3169 case 25: /* ftouiz */
3170 gen_vfp_touiz(dp);
3171 break;
3172 case 26: /* ftosi */
3173 gen_vfp_tosi(dp);
3174 break;
3175 case 27: /* ftosiz */
3176 gen_vfp_tosiz(dp);
3177 break;
9ee6e8bb
PB
3178 case 28: /* ftosh */
3179 if (!arm_feature(env, ARM_FEATURE_VFP3))
3180 return 1;
3181 gen_vfp_tosh(dp, rm);
3182 break;
3183 case 29: /* ftosl */
3184 if (!arm_feature(env, ARM_FEATURE_VFP3))
3185 return 1;
3186 gen_vfp_tosl(dp, rm);
3187 break;
3188 case 30: /* ftouh */
3189 if (!arm_feature(env, ARM_FEATURE_VFP3))
3190 return 1;
3191 gen_vfp_touh(dp, rm);
3192 break;
3193 case 31: /* ftoul */
3194 if (!arm_feature(env, ARM_FEATURE_VFP3))
3195 return 1;
3196 gen_vfp_toul(dp, rm);
3197 break;
b7bcbe95
FB
3198 default: /* undefined */
3199 printf ("rn:%d\n", rn);
3200 return 1;
3201 }
3202 break;
3203 default: /* undefined */
3204 printf ("op:%d\n", op);
3205 return 1;
3206 }
3207
3208 /* Write back the result. */
3209 if (op == 15 && (rn >= 8 && rn <= 11))
3210 ; /* Comparison, do nothing. */
3211 else if (op == 15 && rn > 17)
3212 /* Integer result. */
3213 gen_mov_vreg_F0(0, rd);
3214 else if (op == 15 && rn == 15)
3215 /* conversion */
3216 gen_mov_vreg_F0(!dp, rd);
3217 else
3218 gen_mov_vreg_F0(dp, rd);
3219
3220 /* break out of the loop if we have finished */
3221 if (veclen == 0)
3222 break;
3223
3224 if (op == 15 && delta_m == 0) {
3225 /* single source one-many */
3226 while (veclen--) {
3227 rd = ((rd + delta_d) & (bank_mask - 1))
3228 | (rd & bank_mask);
3229 gen_mov_vreg_F0(dp, rd);
3230 }
3231 break;
3232 }
3233 /* Setup the next operands. */
3234 veclen--;
3235 rd = ((rd + delta_d) & (bank_mask - 1))
3236 | (rd & bank_mask);
3237
3238 if (op == 15) {
3239 /* One source operand. */
3240 rm = ((rm + delta_m) & (bank_mask - 1))
3241 | (rm & bank_mask);
3242 gen_mov_F0_vreg(dp, rm);
3243 } else {
3244 /* Two source operands. */
3245 rn = ((rn + delta_d) & (bank_mask - 1))
3246 | (rn & bank_mask);
3247 gen_mov_F0_vreg(dp, rn);
3248 if (delta_m) {
3249 rm = ((rm + delta_m) & (bank_mask - 1))
3250 | (rm & bank_mask);
3251 gen_mov_F1_vreg(dp, rm);
3252 }
3253 }
3254 }
3255 }
3256 break;
3257 case 0xc:
3258 case 0xd:
9ee6e8bb 3259 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3260 /* two-register transfer */
3261 rn = (insn >> 16) & 0xf;
3262 rd = (insn >> 12) & 0xf;
3263 if (dp) {
9ee6e8bb
PB
3264 VFP_DREG_M(rm, insn);
3265 } else {
3266 rm = VFP_SREG_M(insn);
3267 }
b7bcbe95 3268
18c9b560 3269 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3270 /* vfp->arm */
3271 if (dp) {
4373f3ce
PB
3272 gen_mov_F0_vreg(0, rm * 2);
3273 tmp = gen_vfp_mrs();
3274 store_reg(s, rd, tmp);
3275 gen_mov_F0_vreg(0, rm * 2 + 1);
3276 tmp = gen_vfp_mrs();
3277 store_reg(s, rn, tmp);
b7bcbe95
FB
3278 } else {
3279 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3280 tmp = gen_vfp_mrs();
3281 store_reg(s, rn, tmp);
b7bcbe95 3282 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3283 tmp = gen_vfp_mrs();
3284 store_reg(s, rd, tmp);
b7bcbe95
FB
3285 }
3286 } else {
3287 /* arm->vfp */
3288 if (dp) {
4373f3ce
PB
3289 tmp = load_reg(s, rd);
3290 gen_vfp_msr(tmp);
3291 gen_mov_vreg_F0(0, rm * 2);
3292 tmp = load_reg(s, rn);
3293 gen_vfp_msr(tmp);
3294 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3295 } else {
4373f3ce
PB
3296 tmp = load_reg(s, rn);
3297 gen_vfp_msr(tmp);
b7bcbe95 3298 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3299 tmp = load_reg(s, rd);
3300 gen_vfp_msr(tmp);
b7bcbe95
FB
3301 gen_mov_vreg_F0(0, rm + 1);
3302 }
3303 }
3304 } else {
3305 /* Load/store */
3306 rn = (insn >> 16) & 0xf;
3307 if (dp)
9ee6e8bb 3308 VFP_DREG_D(rd, insn);
b7bcbe95 3309 else
9ee6e8bb
PB
3310 rd = VFP_SREG_D(insn);
3311 if (s->thumb && rn == 15) {
3312 gen_op_movl_T1_im(s->pc & ~2);
3313 } else {
3314 gen_movl_T1_reg(s, rn);
3315 }
b7bcbe95
FB
3316 if ((insn & 0x01200000) == 0x01000000) {
3317 /* Single load/store */
3318 offset = (insn & 0xff) << 2;
3319 if ((insn & (1 << 23)) == 0)
3320 offset = -offset;
3321 gen_op_addl_T1_im(offset);
3322 if (insn & (1 << 20)) {
b5ff1b31 3323 gen_vfp_ld(s, dp);
b7bcbe95
FB
3324 gen_mov_vreg_F0(dp, rd);
3325 } else {
3326 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3327 gen_vfp_st(s, dp);
b7bcbe95
FB
3328 }
3329 } else {
3330 /* load/store multiple */
3331 if (dp)
3332 n = (insn >> 1) & 0x7f;
3333 else
3334 n = insn & 0xff;
3335
3336 if (insn & (1 << 24)) /* pre-decrement */
3337 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3338
3339 if (dp)
3340 offset = 8;
3341 else
3342 offset = 4;
3343 for (i = 0; i < n; i++) {
18c9b560 3344 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3345 /* load */
b5ff1b31 3346 gen_vfp_ld(s, dp);
b7bcbe95
FB
3347 gen_mov_vreg_F0(dp, rd + i);
3348 } else {
3349 /* store */
3350 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3351 gen_vfp_st(s, dp);
b7bcbe95
FB
3352 }
3353 gen_op_addl_T1_im(offset);
3354 }
3355 if (insn & (1 << 21)) {
3356 /* writeback */
3357 if (insn & (1 << 24))
3358 offset = -offset * n;
3359 else if (dp && (insn & 1))
3360 offset = 4;
3361 else
3362 offset = 0;
3363
3364 if (offset != 0)
3365 gen_op_addl_T1_im(offset);
3366 gen_movl_reg_T1(s, rn);
3367 }
3368 }
3369 }
3370 break;
3371 default:
3372 /* Should never happen. */
3373 return 1;
3374 }
3375 return 0;
3376}
3377
6e256c93 3378static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3379{
6e256c93
FB
3380 TranslationBlock *tb;
3381
3382 tb = s->tb;
3383 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3384 tcg_gen_goto_tb(n);
8984bd2e 3385 gen_set_pc_im(dest);
57fec1fe 3386 tcg_gen_exit_tb((long)tb + n);
6e256c93 3387 } else {
8984bd2e 3388 gen_set_pc_im(dest);
57fec1fe 3389 tcg_gen_exit_tb(0);
6e256c93 3390 }
c53be334
FB
3391}
3392
8aaca4c0
FB
3393static inline void gen_jmp (DisasContext *s, uint32_t dest)
3394{
551bd27f 3395 if (unlikely(s->singlestep_enabled)) {
8aaca4c0 3396 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3397 if (s->thumb)
d9ba4830
PB
3398 dest |= 1;
3399 gen_bx_im(s, dest);
8aaca4c0 3400 } else {
6e256c93 3401 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3402 s->is_jmp = DISAS_TB_JUMP;
3403 }
3404}
3405
d9ba4830 3406static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3407{
ee097184 3408 if (x)
d9ba4830 3409 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3410 else
d9ba4830 3411 gen_sxth(t0);
ee097184 3412 if (y)
d9ba4830 3413 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3414 else
d9ba4830
PB
3415 gen_sxth(t1);
3416 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3417}
3418
3419/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3420static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3421 uint32_t mask;
3422
3423 mask = 0;
3424 if (flags & (1 << 0))
3425 mask |= 0xff;
3426 if (flags & (1 << 1))
3427 mask |= 0xff00;
3428 if (flags & (1 << 2))
3429 mask |= 0xff0000;
3430 if (flags & (1 << 3))
3431 mask |= 0xff000000;
9ee6e8bb 3432
2ae23e75 3433 /* Mask out undefined bits. */
9ee6e8bb
PB
3434 mask &= ~CPSR_RESERVED;
3435 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3436 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3437 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3438 mask &= ~CPSR_IT;
9ee6e8bb 3439 /* Mask out execution state bits. */
2ae23e75 3440 if (!spsr)
e160c51c 3441 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3442 /* Mask out privileged bits. */
3443 if (IS_USER(s))
9ee6e8bb 3444 mask &= CPSR_USER;
b5ff1b31
FB
3445 return mask;
3446}
3447
3448/* Returns nonzero if access to the PSR is not permitted. */
3449static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3450{
d9ba4830 3451 TCGv tmp;
b5ff1b31
FB
3452 if (spsr) {
3453 /* ??? This is also undefined in system mode. */
3454 if (IS_USER(s))
3455 return 1;
d9ba4830
PB
3456
3457 tmp = load_cpu_field(spsr);
3458 tcg_gen_andi_i32(tmp, tmp, ~mask);
3459 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3460 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3461 store_cpu_field(tmp, spsr);
b5ff1b31 3462 } else {
d9ba4830 3463 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3464 }
3465 gen_lookup_tb(s);
3466 return 0;
3467}
3468
9ee6e8bb 3469/* Generate an old-style exception return. */
b5ff1b31
FB
3470static void gen_exception_return(DisasContext *s)
3471{
d9ba4830 3472 TCGv tmp;
e22f8f39 3473 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3474 tmp = load_cpu_field(spsr);
3475 gen_set_cpsr(tmp, 0xffffffff);
3476 dead_tmp(tmp);
b5ff1b31
FB
3477 s->is_jmp = DISAS_UPDATE;
3478}
3479
b0109805
PB
3480/* Generate a v6 exception return. Marks both values as dead. */
3481static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3482{
b0109805
PB
3483 gen_set_cpsr(cpsr, 0xffffffff);
3484 dead_tmp(cpsr);
3485 store_reg(s, 15, pc);
9ee6e8bb
PB
3486 s->is_jmp = DISAS_UPDATE;
3487}
3b46e624 3488
9ee6e8bb
PB
3489static inline void
3490gen_set_condexec (DisasContext *s)
3491{
3492 if (s->condexec_mask) {
8f01245e
PB
3493 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3494 TCGv tmp = new_tmp();
3495 tcg_gen_movi_i32(tmp, val);
d9ba4830 3496 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3497 }
3498}
3b46e624 3499
9ee6e8bb
PB
3500static void gen_nop_hint(DisasContext *s, int val)
3501{
3502 switch (val) {
3503 case 3: /* wfi */
8984bd2e 3504 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3505 s->is_jmp = DISAS_WFI;
3506 break;
3507 case 2: /* wfe */
3508 case 4: /* sev */
3509 /* TODO: Implement SEV and WFE. May help SMP performance. */
3510 default: /* nop */
3511 break;
3512 }
3513}
99c475ab 3514
ad69471c
PB
3515/* These macros help make the code more readable when migrating from the
3516 old dyngen helpers. They should probably be removed when
3517 T0/T1 are removed. */
3518#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3519#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3520
ad69471c 3521#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3522
3523static inline int gen_neon_add(int size)
3524{
3525 switch (size) {
ad69471c
PB
3526 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3527 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3528 case 2: gen_op_addl_T0_T1(); break;
3529 default: return 1;
3530 }
3531 return 0;
3532}
3533
ad69471c
PB
3534static inline void gen_neon_rsb(int size)
3535{
3536 switch (size) {
3537 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3538 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3539 case 2: gen_op_rsbl_T0_T1(); break;
3540 default: return;
3541 }
3542}
3543
3544/* 32-bit pairwise ops end up the same as the elementwise versions. */
3545#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3546#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3547#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3548#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3549
3550/* FIXME: This is wrong. They set the wrong overflow bit. */
3551#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3552#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3553#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3554#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3555
3556#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3557 switch ((size << 1) | u) { \
3558 case 0: \
3559 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3560 break; \
3561 case 1: \
3562 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3563 break; \
3564 case 2: \
3565 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3566 break; \
3567 case 3: \
3568 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3569 break; \
3570 case 4: \
3571 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3572 break; \
3573 case 5: \
3574 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3575 break; \
3576 default: return 1; \
3577 }} while (0)
9ee6e8bb
PB
3578
3579#define GEN_NEON_INTEGER_OP(name) do { \
3580 switch ((size << 1) | u) { \
ad69471c
PB
3581 case 0: \
3582 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3583 break; \
3584 case 1: \
3585 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3586 break; \
3587 case 2: \
3588 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3589 break; \
3590 case 3: \
3591 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3592 break; \
3593 case 4: \
3594 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3595 break; \
3596 case 5: \
3597 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3598 break; \
9ee6e8bb
PB
3599 default: return 1; \
3600 }} while (0)
3601
3602static inline void
3603gen_neon_movl_scratch_T0(int scratch)
3604{
3605 uint32_t offset;
3606
3607 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3608 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3609}
3610
3611static inline void
3612gen_neon_movl_scratch_T1(int scratch)
3613{
3614 uint32_t offset;
3615
3616 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3617 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3618}
3619
3620static inline void
3621gen_neon_movl_T0_scratch(int scratch)
3622{
3623 uint32_t offset;
3624
3625 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3626 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3627}
3628
3629static inline void
3630gen_neon_movl_T1_scratch(int scratch)
3631{
3632 uint32_t offset;
3633
3634 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3635 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3636}
3637
3638static inline void gen_neon_get_scalar(int size, int reg)
3639{
3640 if (size == 1) {
3641 NEON_GET_REG(T0, reg >> 1, reg & 1);
3642 } else {
3643 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3644 if (reg & 1)
ad69471c 3645 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3646 else
ad69471c 3647 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3648 }
3649}
3650
3651static void gen_neon_unzip(int reg, int q, int tmp, int size)
3652{
3653 int n;
3654
3655 for (n = 0; n < q + 1; n += 2) {
3656 NEON_GET_REG(T0, reg, n);
3657 NEON_GET_REG(T0, reg, n + n);
3658 switch (size) {
ad69471c
PB
3659 case 0: gen_helper_neon_unzip_u8(); break;
3660 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3661 case 2: /* no-op */; break;
3662 default: abort();
3663 }
3664 gen_neon_movl_scratch_T0(tmp + n);
3665 gen_neon_movl_scratch_T1(tmp + n + 1);
3666 }
3667}
3668
3669static struct {
3670 int nregs;
3671 int interleave;
3672 int spacing;
3673} neon_ls_element_type[11] = {
3674 {4, 4, 1},
3675 {4, 4, 2},
3676 {4, 1, 1},
3677 {4, 2, 1},
3678 {3, 3, 1},
3679 {3, 3, 2},
3680 {3, 1, 1},
3681 {1, 1, 1},
3682 {2, 2, 1},
3683 {2, 2, 2},
3684 {2, 1, 1}
3685};
3686
3687/* Translate a NEON load/store element instruction. Return nonzero if the
3688 instruction is invalid. */
3689static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3690{
3691 int rd, rn, rm;
3692 int op;
3693 int nregs;
3694 int interleave;
3695 int stride;
3696 int size;
3697 int reg;
3698 int pass;
3699 int load;
3700 int shift;
9ee6e8bb 3701 int n;
b0109805 3702 TCGv tmp;
8f8e3aa4 3703 TCGv tmp2;
9ee6e8bb
PB
3704
3705 if (!vfp_enabled(env))
3706 return 1;
3707 VFP_DREG_D(rd, insn);
3708 rn = (insn >> 16) & 0xf;
3709 rm = insn & 0xf;
3710 load = (insn & (1 << 21)) != 0;
3711 if ((insn & (1 << 23)) == 0) {
3712 /* Load store all elements. */
3713 op = (insn >> 8) & 0xf;
3714 size = (insn >> 6) & 3;
3715 if (op > 10 || size == 3)
3716 return 1;
3717 nregs = neon_ls_element_type[op].nregs;
3718 interleave = neon_ls_element_type[op].interleave;
3719 gen_movl_T1_reg(s, rn);
3720 stride = (1 << size) * interleave;
3721 for (reg = 0; reg < nregs; reg++) {
3722 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3723 gen_movl_T1_reg(s, rn);
3724 gen_op_addl_T1_im((1 << size) * reg);
3725 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3726 gen_movl_T1_reg(s, rn);
3727 gen_op_addl_T1_im(1 << size);
3728 }
3729 for (pass = 0; pass < 2; pass++) {
3730 if (size == 2) {
3731 if (load) {
b0109805 3732 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3733 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3734 } else {
ad69471c 3735 tmp = neon_load_reg(rd, pass);
b0109805 3736 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3737 }
3738 gen_op_addl_T1_im(stride);
3739 } else if (size == 1) {
3740 if (load) {
b0109805 3741 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3742 gen_op_addl_T1_im(stride);
8f8e3aa4 3743 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3744 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3745 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3746 dead_tmp(tmp2);
3747 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3748 } else {
8f8e3aa4
PB
3749 tmp = neon_load_reg(rd, pass);
3750 tmp2 = new_tmp();
3751 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3752 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3753 gen_op_addl_T1_im(stride);
8f8e3aa4 3754 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3755 gen_op_addl_T1_im(stride);
3756 }
3757 } else /* size == 0 */ {
3758 if (load) {
a50f5b91 3759 TCGV_UNUSED(tmp2);
9ee6e8bb 3760 for (n = 0; n < 4; n++) {
b0109805 3761 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3762 gen_op_addl_T1_im(stride);
3763 if (n == 0) {
8f8e3aa4 3764 tmp2 = tmp;
9ee6e8bb 3765 } else {
8f8e3aa4
PB
3766 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3767 dead_tmp(tmp);
9ee6e8bb 3768 }
9ee6e8bb 3769 }
8f8e3aa4 3770 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3771 } else {
8f8e3aa4 3772 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3773 for (n = 0; n < 4; n++) {
8f8e3aa4 3774 tmp = new_tmp();
9ee6e8bb 3775 if (n == 0) {
8f8e3aa4 3776 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3777 } else {
8f8e3aa4 3778 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3779 }
b0109805 3780 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3781 gen_op_addl_T1_im(stride);
9ee6e8bb 3782 }
8f8e3aa4 3783 dead_tmp(tmp2);
9ee6e8bb
PB
3784 }
3785 }
3786 }
3787 rd += neon_ls_element_type[op].spacing;
3788 }
3789 stride = nregs * 8;
3790 } else {
3791 size = (insn >> 10) & 3;
3792 if (size == 3) {
3793 /* Load single element to all lanes. */
3794 if (!load)
3795 return 1;
3796 size = (insn >> 6) & 3;
3797 nregs = ((insn >> 8) & 3) + 1;
3798 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3799 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3800 for (reg = 0; reg < nregs; reg++) {
3801 switch (size) {
3802 case 0:
b0109805 3803 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3804 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3805 break;
3806 case 1:
b0109805 3807 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3808 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3809 break;
3810 case 2:
b0109805 3811 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3812 break;
3813 case 3:
3814 return 1;
a50f5b91
PB
3815 default: /* Avoid compiler warnings. */
3816 abort();
99c475ab 3817 }
9ee6e8bb 3818 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3819 tmp2 = new_tmp();
3820 tcg_gen_mov_i32(tmp2, tmp);
3821 neon_store_reg(rd, 0, tmp2);
3018f259 3822 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
3823 rd += stride;
3824 }
3825 stride = (1 << size) * nregs;
3826 } else {
3827 /* Single element. */
3828 pass = (insn >> 7) & 1;
3829 switch (size) {
3830 case 0:
3831 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3832 stride = 1;
3833 break;
3834 case 1:
3835 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3836 stride = (insn & (1 << 5)) ? 2 : 1;
3837 break;
3838 case 2:
3839 shift = 0;
9ee6e8bb
PB
3840 stride = (insn & (1 << 6)) ? 2 : 1;
3841 break;
3842 default:
3843 abort();
3844 }
3845 nregs = ((insn >> 8) & 3) + 1;
3846 gen_movl_T1_reg(s, rn);
3847 for (reg = 0; reg < nregs; reg++) {
3848 if (load) {
9ee6e8bb
PB
3849 switch (size) {
3850 case 0:
b0109805 3851 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3852 break;
3853 case 1:
b0109805 3854 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3855 break;
3856 case 2:
b0109805 3857 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb 3858 break;
a50f5b91
PB
3859 default: /* Avoid compiler warnings. */
3860 abort();
9ee6e8bb
PB
3861 }
3862 if (size != 2) {
8f8e3aa4
PB
3863 tmp2 = neon_load_reg(rd, pass);
3864 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3865 dead_tmp(tmp2);
9ee6e8bb 3866 }
8f8e3aa4 3867 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3868 } else { /* Store */
8f8e3aa4
PB
3869 tmp = neon_load_reg(rd, pass);
3870 if (shift)
3871 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3872 switch (size) {
3873 case 0:
b0109805 3874 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3875 break;
3876 case 1:
b0109805 3877 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3878 break;
3879 case 2:
b0109805 3880 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3881 break;
99c475ab 3882 }
99c475ab 3883 }
9ee6e8bb
PB
3884 rd += stride;
3885 gen_op_addl_T1_im(1 << size);
99c475ab 3886 }
9ee6e8bb 3887 stride = nregs * (1 << size);
99c475ab 3888 }
9ee6e8bb
PB
3889 }
3890 if (rm != 15) {
b26eefb6
PB
3891 TCGv base;
3892
3893 base = load_reg(s, rn);
9ee6e8bb 3894 if (rm == 13) {
b26eefb6 3895 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3896 } else {
b26eefb6
PB
3897 TCGv index;
3898 index = load_reg(s, rm);
3899 tcg_gen_add_i32(base, base, index);
3900 dead_tmp(index);
9ee6e8bb 3901 }
b26eefb6 3902 store_reg(s, rn, base);
9ee6e8bb
PB
3903 }
3904 return 0;
3905}
3b46e624 3906
8f8e3aa4
PB
3907/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3908static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3909{
3910 tcg_gen_and_i32(t, t, c);
3911 tcg_gen_bic_i32(f, f, c);
3912 tcg_gen_or_i32(dest, t, f);
3913}
3914
a7812ae4 3915static inline void gen_neon_narrow(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3916{
3917 switch (size) {
3918 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3919 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3920 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3921 default: abort();
3922 }
3923}
3924
a7812ae4 3925static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3926{
3927 switch (size) {
3928 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3929 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3930 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3931 default: abort();
3932 }
3933}
3934
a7812ae4 3935static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
ad69471c
PB
3936{
3937 switch (size) {
3938 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3939 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3940 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3941 default: abort();
3942 }
3943}
3944
3945static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3946 int q, int u)
3947{
3948 if (q) {
3949 if (u) {
3950 switch (size) {
3951 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3952 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3953 default: abort();
3954 }
3955 } else {
3956 switch (size) {
3957 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3958 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3959 default: abort();
3960 }
3961 }
3962 } else {
3963 if (u) {
3964 switch (size) {
3965 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3966 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3967 default: abort();
3968 }
3969 } else {
3970 switch (size) {
3971 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3972 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3973 default: abort();
3974 }
3975 }
3976 }
3977}
3978
a7812ae4 3979static inline void gen_neon_widen(TCGv_i64 dest, TCGv src, int size, int u)
ad69471c
PB
3980{
3981 if (u) {
3982 switch (size) {
3983 case 0: gen_helper_neon_widen_u8(dest, src); break;
3984 case 1: gen_helper_neon_widen_u16(dest, src); break;
3985 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3986 default: abort();
3987 }
3988 } else {
3989 switch (size) {
3990 case 0: gen_helper_neon_widen_s8(dest, src); break;
3991 case 1: gen_helper_neon_widen_s16(dest, src); break;
3992 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3993 default: abort();
3994 }
3995 }
3996 dead_tmp(src);
3997}
3998
3999static inline void gen_neon_addl(int size)
4000{
4001 switch (size) {
4002 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4003 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4004 case 2: tcg_gen_add_i64(CPU_V001); break;
4005 default: abort();
4006 }
4007}
4008
4009static inline void gen_neon_subl(int size)
4010{
4011 switch (size) {
4012 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4013 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4014 case 2: tcg_gen_sub_i64(CPU_V001); break;
4015 default: abort();
4016 }
4017}
4018
a7812ae4 4019static inline void gen_neon_negl(TCGv_i64 var, int size)
ad69471c
PB
4020{
4021 switch (size) {
4022 case 0: gen_helper_neon_negl_u16(var, var); break;
4023 case 1: gen_helper_neon_negl_u32(var, var); break;
4024 case 2: gen_helper_neon_negl_u64(var, var); break;
4025 default: abort();
4026 }
4027}
4028
a7812ae4 4029static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
ad69471c
PB
4030{
4031 switch (size) {
4032 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4033 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4034 default: abort();
4035 }
4036}
4037
a7812ae4 4038static inline void gen_neon_mull(TCGv_i64 dest, TCGv a, TCGv b, int size, int u)
ad69471c 4039{
a7812ae4 4040 TCGv_i64 tmp;
ad69471c
PB
4041
4042 switch ((size << 1) | u) {
4043 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4044 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4045 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4046 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4047 case 4:
4048 tmp = gen_muls_i64_i32(a, b);
4049 tcg_gen_mov_i64(dest, tmp);
4050 break;
4051 case 5:
4052 tmp = gen_mulu_i64_i32(a, b);
4053 tcg_gen_mov_i64(dest, tmp);
4054 break;
4055 default: abort();
4056 }
4057 if (size < 2) {
4058 dead_tmp(b);
4059 dead_tmp(a);
4060 }
4061}
4062
9ee6e8bb
PB
4063/* Translate a NEON data processing instruction. Return nonzero if the
4064 instruction is invalid.
ad69471c
PB
4065 We process data in a mixture of 32-bit and 64-bit chunks.
4066 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4067
9ee6e8bb
PB
4068static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4069{
4070 int op;
4071 int q;
4072 int rd, rn, rm;
4073 int size;
4074 int shift;
4075 int pass;
4076 int count;
4077 int pairwise;
4078 int u;
4079 int n;
4080 uint32_t imm;
8f8e3aa4
PB
4081 TCGv tmp;
4082 TCGv tmp2;
4083 TCGv tmp3;
a7812ae4 4084 TCGv_i64 tmp64;
9ee6e8bb
PB
4085
4086 if (!vfp_enabled(env))
4087 return 1;
4088 q = (insn & (1 << 6)) != 0;
4089 u = (insn >> 24) & 1;
4090 VFP_DREG_D(rd, insn);
4091 VFP_DREG_N(rn, insn);
4092 VFP_DREG_M(rm, insn);
4093 size = (insn >> 20) & 3;
4094 if ((insn & (1 << 23)) == 0) {
4095 /* Three register same length. */
4096 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4097 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4098 || op == 10 || op == 11 || op == 16)) {
4099 /* 64-bit element instructions. */
9ee6e8bb 4100 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4101 neon_load_reg64(cpu_V0, rn + pass);
4102 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4103 switch (op) {
4104 case 1: /* VQADD */
4105 if (u) {
ad69471c 4106 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4107 } else {
ad69471c 4108 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4109 }
9ee6e8bb
PB
4110 break;
4111 case 5: /* VQSUB */
4112 if (u) {
ad69471c
PB
4113 gen_helper_neon_sub_saturate_u64(CPU_V001);
4114 } else {
4115 gen_helper_neon_sub_saturate_s64(CPU_V001);
4116 }
4117 break;
4118 case 8: /* VSHL */
4119 if (u) {
4120 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4121 } else {
4122 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4123 }
4124 break;
4125 case 9: /* VQSHL */
4126 if (u) {
4127 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4128 cpu_V0, cpu_V0);
4129 } else {
4130 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4131 cpu_V1, cpu_V0);
4132 }
4133 break;
4134 case 10: /* VRSHL */
4135 if (u) {
4136 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4137 } else {
ad69471c
PB
4138 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4139 }
4140 break;
4141 case 11: /* VQRSHL */
4142 if (u) {
4143 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4144 cpu_V1, cpu_V0);
4145 } else {
4146 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4147 cpu_V1, cpu_V0);
1e8d4eec 4148 }
9ee6e8bb
PB
4149 break;
4150 case 16:
4151 if (u) {
ad69471c 4152 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4153 } else {
ad69471c 4154 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4155 }
4156 break;
4157 default:
4158 abort();
2c0262af 4159 }
ad69471c 4160 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4161 }
9ee6e8bb 4162 return 0;
2c0262af 4163 }
9ee6e8bb
PB
4164 switch (op) {
4165 case 8: /* VSHL */
4166 case 9: /* VQSHL */
4167 case 10: /* VRSHL */
ad69471c 4168 case 11: /* VQRSHL */
9ee6e8bb 4169 {
ad69471c
PB
4170 int rtmp;
4171 /* Shift instruction operands are reversed. */
4172 rtmp = rn;
9ee6e8bb 4173 rn = rm;
ad69471c 4174 rm = rtmp;
9ee6e8bb
PB
4175 pairwise = 0;
4176 }
2c0262af 4177 break;
9ee6e8bb
PB
4178 case 20: /* VPMAX */
4179 case 21: /* VPMIN */
4180 case 23: /* VPADD */
4181 pairwise = 1;
2c0262af 4182 break;
9ee6e8bb
PB
4183 case 26: /* VPADD (float) */
4184 pairwise = (u && size < 2);
2c0262af 4185 break;
9ee6e8bb
PB
4186 case 30: /* VPMIN/VPMAX (float) */
4187 pairwise = u;
2c0262af 4188 break;
9ee6e8bb
PB
4189 default:
4190 pairwise = 0;
2c0262af 4191 break;
9ee6e8bb
PB
4192 }
4193 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4194
4195 if (pairwise) {
4196 /* Pairwise. */
4197 if (q)
4198 n = (pass & 1) * 2;
2c0262af 4199 else
9ee6e8bb
PB
4200 n = 0;
4201 if (pass < q + 1) {
4202 NEON_GET_REG(T0, rn, n);
4203 NEON_GET_REG(T1, rn, n + 1);
4204 } else {
4205 NEON_GET_REG(T0, rm, n);
4206 NEON_GET_REG(T1, rm, n + 1);
4207 }
4208 } else {
4209 /* Elementwise. */
4210 NEON_GET_REG(T0, rn, pass);
4211 NEON_GET_REG(T1, rm, pass);
4212 }
4213 switch (op) {
4214 case 0: /* VHADD */
4215 GEN_NEON_INTEGER_OP(hadd);
4216 break;
4217 case 1: /* VQADD */
ad69471c 4218 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4219 break;
9ee6e8bb
PB
4220 case 2: /* VRHADD */
4221 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4222 break;
9ee6e8bb
PB
4223 case 3: /* Logic ops. */
4224 switch ((u << 2) | size) {
4225 case 0: /* VAND */
2c0262af 4226 gen_op_andl_T0_T1();
9ee6e8bb
PB
4227 break;
4228 case 1: /* BIC */
4229 gen_op_bicl_T0_T1();
4230 break;
4231 case 2: /* VORR */
4232 gen_op_orl_T0_T1();
4233 break;
4234 case 3: /* VORN */
4235 gen_op_notl_T1();
4236 gen_op_orl_T0_T1();
4237 break;
4238 case 4: /* VEOR */
4239 gen_op_xorl_T0_T1();
4240 break;
4241 case 5: /* VBSL */
8f8e3aa4
PB
4242 tmp = neon_load_reg(rd, pass);
4243 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4244 dead_tmp(tmp);
9ee6e8bb
PB
4245 break;
4246 case 6: /* VBIT */
8f8e3aa4
PB
4247 tmp = neon_load_reg(rd, pass);
4248 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4249 dead_tmp(tmp);
9ee6e8bb
PB
4250 break;
4251 case 7: /* VBIF */
8f8e3aa4
PB
4252 tmp = neon_load_reg(rd, pass);
4253 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4254 dead_tmp(tmp);
9ee6e8bb 4255 break;
2c0262af
FB
4256 }
4257 break;
9ee6e8bb
PB
4258 case 4: /* VHSUB */
4259 GEN_NEON_INTEGER_OP(hsub);
4260 break;
4261 case 5: /* VQSUB */
ad69471c 4262 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4263 break;
9ee6e8bb
PB
4264 case 6: /* VCGT */
4265 GEN_NEON_INTEGER_OP(cgt);
4266 break;
4267 case 7: /* VCGE */
4268 GEN_NEON_INTEGER_OP(cge);
4269 break;
4270 case 8: /* VSHL */
ad69471c 4271 GEN_NEON_INTEGER_OP(shl);
2c0262af 4272 break;
9ee6e8bb 4273 case 9: /* VQSHL */
ad69471c 4274 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4275 break;
9ee6e8bb 4276 case 10: /* VRSHL */
ad69471c 4277 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4278 break;
9ee6e8bb 4279 case 11: /* VQRSHL */
ad69471c 4280 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4281 break;
4282 case 12: /* VMAX */
4283 GEN_NEON_INTEGER_OP(max);
4284 break;
4285 case 13: /* VMIN */
4286 GEN_NEON_INTEGER_OP(min);
4287 break;
4288 case 14: /* VABD */
4289 GEN_NEON_INTEGER_OP(abd);
4290 break;
4291 case 15: /* VABA */
4292 GEN_NEON_INTEGER_OP(abd);
4293 NEON_GET_REG(T1, rd, pass);
4294 gen_neon_add(size);
4295 break;
4296 case 16:
4297 if (!u) { /* VADD */
4298 if (gen_neon_add(size))
4299 return 1;
4300 } else { /* VSUB */
4301 switch (size) {
ad69471c
PB
4302 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4303 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4304 case 2: gen_op_subl_T0_T1(); break;
4305 default: return 1;
4306 }
4307 }
4308 break;
4309 case 17:
4310 if (!u) { /* VTST */
4311 switch (size) {
ad69471c
PB
4312 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4313 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4314 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4315 default: return 1;
4316 }
4317 } else { /* VCEQ */
4318 switch (size) {
ad69471c
PB
4319 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4320 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4321 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4322 default: return 1;
4323 }
4324 }
4325 break;
4326 case 18: /* Multiply. */
4327 switch (size) {
ad69471c
PB
4328 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4329 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4330 case 2: gen_op_mul_T0_T1(); break;
4331 default: return 1;
4332 }
4333 NEON_GET_REG(T1, rd, pass);
4334 if (u) { /* VMLS */
ad69471c 4335 gen_neon_rsb(size);
9ee6e8bb
PB
4336 } else { /* VMLA */
4337 gen_neon_add(size);
4338 }
4339 break;
4340 case 19: /* VMUL */
4341 if (u) { /* polynomial */
ad69471c 4342 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4343 } else { /* Integer */
4344 switch (size) {
ad69471c
PB
4345 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4346 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4347 case 2: gen_op_mul_T0_T1(); break;
4348 default: return 1;
4349 }
4350 }
4351 break;
4352 case 20: /* VPMAX */
4353 GEN_NEON_INTEGER_OP(pmax);
4354 break;
4355 case 21: /* VPMIN */
4356 GEN_NEON_INTEGER_OP(pmin);
4357 break;
4358 case 22: /* Hultiply high. */
4359 if (!u) { /* VQDMULH */
4360 switch (size) {
ad69471c
PB
4361 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4362 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4363 default: return 1;
4364 }
4365 } else { /* VQRDHMUL */
4366 switch (size) {
ad69471c
PB
4367 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4368 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4369 default: return 1;
4370 }
4371 }
4372 break;
4373 case 23: /* VPADD */
4374 if (u)
4375 return 1;
4376 switch (size) {
ad69471c
PB
4377 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4378 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4379 case 2: gen_op_addl_T0_T1(); break;
4380 default: return 1;
4381 }
4382 break;
4383 case 26: /* Floating point arithnetic. */
4384 switch ((u << 2) | size) {
4385 case 0: /* VADD */
ad69471c 4386 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4387 break;
4388 case 2: /* VSUB */
ad69471c 4389 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4390 break;
4391 case 4: /* VPADD */
ad69471c 4392 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4393 break;
4394 case 6: /* VABD */
ad69471c 4395 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4396 break;
4397 default:
4398 return 1;
4399 }
4400 break;
4401 case 27: /* Float multiply. */
ad69471c 4402 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4403 if (!u) {
4404 NEON_GET_REG(T1, rd, pass);
4405 if (size == 0) {
ad69471c 4406 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4407 } else {
ad69471c 4408 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4409 }
4410 }
4411 break;
4412 case 28: /* Float compare. */
4413 if (!u) {
ad69471c 4414 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4415 } else {
9ee6e8bb 4416 if (size == 0)
ad69471c 4417 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4418 else
ad69471c 4419 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4420 }
2c0262af 4421 break;
9ee6e8bb
PB
4422 case 29: /* Float compare absolute. */
4423 if (!u)
4424 return 1;
4425 if (size == 0)
ad69471c 4426 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4427 else
ad69471c 4428 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4429 break;
9ee6e8bb
PB
4430 case 30: /* Float min/max. */
4431 if (size == 0)
ad69471c 4432 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4433 else
ad69471c 4434 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4435 break;
4436 case 31:
4437 if (size == 0)
4373f3ce 4438 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4439 else
4373f3ce 4440 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4441 break;
9ee6e8bb
PB
4442 default:
4443 abort();
2c0262af 4444 }
9ee6e8bb
PB
4445 /* Save the result. For elementwise operations we can put it
4446 straight into the destination register. For pairwise operations
4447 we have to be careful to avoid clobbering the source operands. */
4448 if (pairwise && rd == rm) {
4449 gen_neon_movl_scratch_T0(pass);
4450 } else {
4451 NEON_SET_REG(T0, rd, pass);
4452 }
4453
4454 } /* for pass */
4455 if (pairwise && rd == rm) {
4456 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4457 gen_neon_movl_T0_scratch(pass);
4458 NEON_SET_REG(T0, rd, pass);
4459 }
4460 }
ad69471c 4461 /* End of 3 register same size operations. */
9ee6e8bb
PB
4462 } else if (insn & (1 << 4)) {
4463 if ((insn & 0x00380080) != 0) {
4464 /* Two registers and shift. */
4465 op = (insn >> 8) & 0xf;
4466 if (insn & (1 << 7)) {
4467 /* 64-bit shift. */
4468 size = 3;
4469 } else {
4470 size = 2;
4471 while ((insn & (1 << (size + 19))) == 0)
4472 size--;
4473 }
4474 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4475 /* To avoid excessive dumplication of ops we implement shift
4476 by immediate using the variable shift operations. */
4477 if (op < 8) {
4478 /* Shift by immediate:
4479 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4480 /* Right shifts are encoded as N - shift, where N is the
4481 element size in bits. */
4482 if (op <= 4)
4483 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4484 if (size == 3) {
4485 count = q + 1;
4486 } else {
4487 count = q ? 4: 2;
4488 }
4489 switch (size) {
4490 case 0:
4491 imm = (uint8_t) shift;
4492 imm |= imm << 8;
4493 imm |= imm << 16;
4494 break;
4495 case 1:
4496 imm = (uint16_t) shift;
4497 imm |= imm << 16;
4498 break;
4499 case 2:
4500 case 3:
4501 imm = shift;
4502 break;
4503 default:
4504 abort();
4505 }
4506
4507 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4508 if (size == 3) {
4509 neon_load_reg64(cpu_V0, rm + pass);
4510 tcg_gen_movi_i64(cpu_V1, imm);
4511 switch (op) {
4512 case 0: /* VSHR */
4513 case 1: /* VSRA */
4514 if (u)
4515 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4516 else
ad69471c 4517 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4518 break;
ad69471c
PB
4519 case 2: /* VRSHR */
4520 case 3: /* VRSRA */
4521 if (u)
4522 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4523 else
ad69471c 4524 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4525 break;
ad69471c
PB
4526 case 4: /* VSRI */
4527 if (!u)
4528 return 1;
4529 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4530 break;
4531 case 5: /* VSHL, VSLI */
4532 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4533 break;
4534 case 6: /* VQSHL */
4535 if (u)
4536 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4537 else
ad69471c
PB
4538 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4539 break;
4540 case 7: /* VQSHLU */
4541 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4542 break;
9ee6e8bb 4543 }
ad69471c
PB
4544 if (op == 1 || op == 3) {
4545 /* Accumulate. */
4546 neon_load_reg64(cpu_V0, rd + pass);
4547 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4548 } else if (op == 4 || (op == 5 && u)) {
4549 /* Insert */
4550 cpu_abort(env, "VS[LR]I.64 not implemented");
4551 }
4552 neon_store_reg64(cpu_V0, rd + pass);
4553 } else { /* size < 3 */
4554 /* Operands in T0 and T1. */
4555 gen_op_movl_T1_im(imm);
4556 NEON_GET_REG(T0, rm, pass);
4557 switch (op) {
4558 case 0: /* VSHR */
4559 case 1: /* VSRA */
4560 GEN_NEON_INTEGER_OP(shl);
4561 break;
4562 case 2: /* VRSHR */
4563 case 3: /* VRSRA */
4564 GEN_NEON_INTEGER_OP(rshl);
4565 break;
4566 case 4: /* VSRI */
4567 if (!u)
4568 return 1;
4569 GEN_NEON_INTEGER_OP(shl);
4570 break;
4571 case 5: /* VSHL, VSLI */
4572 switch (size) {
4573 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4574 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4575 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4576 default: return 1;
4577 }
4578 break;
4579 case 6: /* VQSHL */
4580 GEN_NEON_INTEGER_OP_ENV(qshl);
4581 break;
4582 case 7: /* VQSHLU */
4583 switch (size) {
4584 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4585 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4586 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4587 default: return 1;
4588 }
4589 break;
4590 }
4591
4592 if (op == 1 || op == 3) {
4593 /* Accumulate. */
4594 NEON_GET_REG(T1, rd, pass);
4595 gen_neon_add(size);
4596 } else if (op == 4 || (op == 5 && u)) {
4597 /* Insert */
4598 switch (size) {
4599 case 0:
4600 if (op == 4)
4601 imm = 0xff >> -shift;
4602 else
4603 imm = (uint8_t)(0xff << shift);
4604 imm |= imm << 8;
4605 imm |= imm << 16;
4606 break;
4607 case 1:
4608 if (op == 4)
4609 imm = 0xffff >> -shift;
4610 else
4611 imm = (uint16_t)(0xffff << shift);
4612 imm |= imm << 16;
4613 break;
4614 case 2:
4615 if (op == 4)
4616 imm = 0xffffffffu >> -shift;
4617 else
4618 imm = 0xffffffffu << shift;
4619 break;
4620 default:
4621 abort();
4622 }
4623 tmp = neon_load_reg(rd, pass);
4624 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4625 tcg_gen_andi_i32(tmp, tmp, ~imm);
4626 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4627 }
9ee6e8bb
PB
4628 NEON_SET_REG(T0, rd, pass);
4629 }
4630 } /* for pass */
4631 } else if (op < 10) {
ad69471c 4632 /* Shift by immediate and narrow:
9ee6e8bb
PB
4633 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4634 shift = shift - (1 << (size + 3));
4635 size++;
9ee6e8bb
PB
4636 switch (size) {
4637 case 1:
ad69471c 4638 imm = (uint16_t)shift;
9ee6e8bb 4639 imm |= imm << 16;
ad69471c 4640 tmp2 = tcg_const_i32(imm);
a7812ae4 4641 TCGV_UNUSED_I64(tmp64);
9ee6e8bb
PB
4642 break;
4643 case 2:
ad69471c
PB
4644 imm = (uint32_t)shift;
4645 tmp2 = tcg_const_i32(imm);
a7812ae4 4646 TCGV_UNUSED_I64(tmp64);
9ee6e8bb 4647 case 3:
a7812ae4
PB
4648 tmp64 = tcg_const_i64(shift);
4649 TCGV_UNUSED(tmp2);
9ee6e8bb
PB
4650 break;
4651 default:
4652 abort();
4653 }
4654
ad69471c
PB
4655 for (pass = 0; pass < 2; pass++) {
4656 if (size == 3) {
4657 neon_load_reg64(cpu_V0, rm + pass);
4658 if (q) {
4659 if (u)
a7812ae4 4660 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4661 else
a7812ae4 4662 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c
PB
4663 } else {
4664 if (u)
a7812ae4 4665 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp64);
ad69471c 4666 else
a7812ae4 4667 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp64);
ad69471c 4668 }
2c0262af 4669 } else {
ad69471c
PB
4670 tmp = neon_load_reg(rm + pass, 0);
4671 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
36aa55dc
PB
4672 tmp3 = neon_load_reg(rm + pass, 1);
4673 gen_neon_shift_narrow(size, tmp3, tmp2, q, u);
4674 tcg_gen_concat_i32_i64(cpu_V0, tmp, tmp3);
ad69471c 4675 dead_tmp(tmp);
36aa55dc 4676 dead_tmp(tmp3);
9ee6e8bb 4677 }
ad69471c
PB
4678 tmp = new_tmp();
4679 if (op == 8 && !u) {
4680 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4681 } else {
ad69471c
PB
4682 if (op == 8)
4683 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4684 else
ad69471c
PB
4685 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4686 }
4687 if (pass == 0) {
4688 tmp2 = tmp;
4689 } else {
4690 neon_store_reg(rd, 0, tmp2);
4691 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4692 }
4693 } /* for pass */
4694 } else if (op == 10) {
4695 /* VSHLL */
ad69471c 4696 if (q || size == 3)
9ee6e8bb 4697 return 1;
ad69471c
PB
4698 tmp = neon_load_reg(rm, 0);
4699 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4700 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4701 if (pass == 1)
4702 tmp = tmp2;
4703
4704 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4705
9ee6e8bb
PB
4706 if (shift != 0) {
4707 /* The shift is less than the width of the source
ad69471c
PB
4708 type, so we can just shift the whole register. */
4709 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4710 if (size < 2 || !u) {
4711 uint64_t imm64;
4712 if (size == 0) {
4713 imm = (0xffu >> (8 - shift));
4714 imm |= imm << 16;
4715 } else {
4716 imm = 0xffff >> (16 - shift);
9ee6e8bb 4717 }
ad69471c
PB
4718 imm64 = imm | (((uint64_t)imm) << 32);
4719 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4720 }
4721 }
ad69471c 4722 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4723 }
4724 } else if (op == 15 || op == 16) {
4725 /* VCVT fixed-point. */
4726 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4727 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4728 if (op & 1) {
4729 if (u)
4373f3ce 4730 gen_vfp_ulto(0, shift);
9ee6e8bb 4731 else
4373f3ce 4732 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4733 } else {
4734 if (u)
4373f3ce 4735 gen_vfp_toul(0, shift);
9ee6e8bb 4736 else
4373f3ce 4737 gen_vfp_tosl(0, shift);
2c0262af 4738 }
4373f3ce 4739 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4740 }
4741 } else {
9ee6e8bb
PB
4742 return 1;
4743 }
4744 } else { /* (insn & 0x00380080) == 0 */
4745 int invert;
4746
4747 op = (insn >> 8) & 0xf;
4748 /* One register and immediate. */
4749 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4750 invert = (insn & (1 << 5)) != 0;
4751 switch (op) {
4752 case 0: case 1:
4753 /* no-op */
4754 break;
4755 case 2: case 3:
4756 imm <<= 8;
4757 break;
4758 case 4: case 5:
4759 imm <<= 16;
4760 break;
4761 case 6: case 7:
4762 imm <<= 24;
4763 break;
4764 case 8: case 9:
4765 imm |= imm << 16;
4766 break;
4767 case 10: case 11:
4768 imm = (imm << 8) | (imm << 24);
4769 break;
4770 case 12:
4771 imm = (imm < 8) | 0xff;
4772 break;
4773 case 13:
4774 imm = (imm << 16) | 0xffff;
4775 break;
4776 case 14:
4777 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4778 if (invert)
4779 imm = ~imm;
4780 break;
4781 case 15:
4782 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4783 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4784 break;
4785 }
4786 if (invert)
4787 imm = ~imm;
4788
4789 if (op != 14 || !invert)
4790 gen_op_movl_T1_im(imm);
4791
4792 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4793 if (op & 1 && op < 12) {
ad69471c 4794 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4795 if (invert) {
4796 /* The immediate value has already been inverted, so
4797 BIC becomes AND. */
ad69471c 4798 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4799 } else {
ad69471c 4800 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4801 }
9ee6e8bb 4802 } else {
ad69471c
PB
4803 /* VMOV, VMVN. */
4804 tmp = new_tmp();
9ee6e8bb 4805 if (op == 14 && invert) {
ad69471c
PB
4806 uint32_t val;
4807 val = 0;
9ee6e8bb
PB
4808 for (n = 0; n < 4; n++) {
4809 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4810 val |= 0xff << (n * 8);
9ee6e8bb 4811 }
ad69471c
PB
4812 tcg_gen_movi_i32(tmp, val);
4813 } else {
4814 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4815 }
9ee6e8bb 4816 }
ad69471c 4817 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4818 }
4819 }
e4b3861d 4820 } else { /* (insn & 0x00800010 == 0x00800000) */
9ee6e8bb
PB
4821 if (size != 3) {
4822 op = (insn >> 8) & 0xf;
4823 if ((insn & (1 << 6)) == 0) {
4824 /* Three registers of different lengths. */
4825 int src1_wide;
4826 int src2_wide;
4827 int prewiden;
4828 /* prewiden, src1_wide, src2_wide */
4829 static const int neon_3reg_wide[16][3] = {
4830 {1, 0, 0}, /* VADDL */
4831 {1, 1, 0}, /* VADDW */
4832 {1, 0, 0}, /* VSUBL */
4833 {1, 1, 0}, /* VSUBW */
4834 {0, 1, 1}, /* VADDHN */
4835 {0, 0, 0}, /* VABAL */
4836 {0, 1, 1}, /* VSUBHN */
4837 {0, 0, 0}, /* VABDL */
4838 {0, 0, 0}, /* VMLAL */
4839 {0, 0, 0}, /* VQDMLAL */
4840 {0, 0, 0}, /* VMLSL */
4841 {0, 0, 0}, /* VQDMLSL */
4842 {0, 0, 0}, /* Integer VMULL */
4843 {0, 0, 0}, /* VQDMULL */
4844 {0, 0, 0} /* Polynomial VMULL */
4845 };
4846
4847 prewiden = neon_3reg_wide[op][0];
4848 src1_wide = neon_3reg_wide[op][1];
4849 src2_wide = neon_3reg_wide[op][2];
4850
ad69471c
PB
4851 if (size == 0 && (op == 9 || op == 11 || op == 13))
4852 return 1;
4853
9ee6e8bb
PB
4854 /* Avoid overlapping operands. Wide source operands are
4855 always aligned so will never overlap with wide
4856 destinations in problematic ways. */
8f8e3aa4
PB
4857 if (rd == rm && !src2_wide) {
4858 NEON_GET_REG(T0, rm, 1);
4859 gen_neon_movl_scratch_T0(2);
4860 } else if (rd == rn && !src1_wide) {
4861 NEON_GET_REG(T0, rn, 1);
4862 gen_neon_movl_scratch_T0(2);
9ee6e8bb 4863 }
a50f5b91 4864 TCGV_UNUSED(tmp3);
9ee6e8bb 4865 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4866 if (src1_wide) {
4867 neon_load_reg64(cpu_V0, rn + pass);
a50f5b91 4868 TCGV_UNUSED(tmp);
9ee6e8bb 4869 } else {
ad69471c
PB
4870 if (pass == 1 && rd == rn) {
4871 gen_neon_movl_T0_scratch(2);
4872 tmp = new_tmp();
4873 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4874 } else {
ad69471c
PB
4875 tmp = neon_load_reg(rn, pass);
4876 }
4877 if (prewiden) {
4878 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4879 }
4880 }
ad69471c
PB
4881 if (src2_wide) {
4882 neon_load_reg64(cpu_V1, rm + pass);
a50f5b91 4883 TCGV_UNUSED(tmp2);
9ee6e8bb 4884 } else {
ad69471c 4885 if (pass == 1 && rd == rm) {
8f8e3aa4 4886 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4887 tmp2 = new_tmp();
4888 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4889 } else {
ad69471c
PB
4890 tmp2 = neon_load_reg(rm, pass);
4891 }
4892 if (prewiden) {
4893 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4894 }
9ee6e8bb
PB
4895 }
4896 switch (op) {
4897 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4898 gen_neon_addl(size);
9ee6e8bb
PB
4899 break;
4900 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4901 gen_neon_subl(size);
9ee6e8bb
PB
4902 break;
4903 case 5: case 7: /* VABAL, VABDL */
4904 switch ((size << 1) | u) {
ad69471c
PB
4905 case 0:
4906 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4907 break;
4908 case 1:
4909 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4910 break;
4911 case 2:
4912 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4913 break;
4914 case 3:
4915 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4916 break;
4917 case 4:
4918 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4919 break;
4920 case 5:
4921 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4922 break;
9ee6e8bb
PB
4923 default: abort();
4924 }
ad69471c
PB
4925 dead_tmp(tmp2);
4926 dead_tmp(tmp);
9ee6e8bb
PB
4927 break;
4928 case 8: case 9: case 10: case 11: case 12: case 13:
4929 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4930 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4931 break;
4932 case 14: /* Polynomial VMULL */
4933 cpu_abort(env, "Polynomial VMULL not implemented");
4934
4935 default: /* 15 is RESERVED. */
4936 return 1;
4937 }
4938 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4939 /* Accumulate. */
4940 if (op == 10 || op == 11) {
ad69471c 4941 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4942 }
4943
9ee6e8bb 4944 if (op != 13) {
ad69471c 4945 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4946 }
4947
4948 switch (op) {
4949 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4950 gen_neon_addl(size);
9ee6e8bb
PB
4951 break;
4952 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4953 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4954 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4955 break;
9ee6e8bb
PB
4956 /* Fall through. */
4957 case 13: /* VQDMULL */
ad69471c 4958 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4959 break;
4960 default:
4961 abort();
4962 }
ad69471c 4963 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4964 } else if (op == 4 || op == 6) {
4965 /* Narrowing operation. */
ad69471c 4966 tmp = new_tmp();
9ee6e8bb
PB
4967 if (u) {
4968 switch (size) {
ad69471c
PB
4969 case 0:
4970 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4971 break;
4972 case 1:
4973 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4974 break;
4975 case 2:
4976 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4977 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4978 break;
9ee6e8bb
PB
4979 default: abort();
4980 }
4981 } else {
4982 switch (size) {
ad69471c
PB
4983 case 0:
4984 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4985 break;
4986 case 1:
4987 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4988 break;
4989 case 2:
4990 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4991 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4992 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4993 break;
9ee6e8bb
PB
4994 default: abort();
4995 }
4996 }
ad69471c
PB
4997 if (pass == 0) {
4998 tmp3 = tmp;
4999 } else {
5000 neon_store_reg(rd, 0, tmp3);
5001 neon_store_reg(rd, 1, tmp);
5002 }
9ee6e8bb
PB
5003 } else {
5004 /* Write back the result. */
ad69471c 5005 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5006 }
5007 }
5008 } else {
5009 /* Two registers and a scalar. */
5010 switch (op) {
5011 case 0: /* Integer VMLA scalar */
5012 case 1: /* Float VMLA scalar */
5013 case 4: /* Integer VMLS scalar */
5014 case 5: /* Floating point VMLS scalar */
5015 case 8: /* Integer VMUL scalar */
5016 case 9: /* Floating point VMUL scalar */
5017 case 12: /* VQDMULH scalar */
5018 case 13: /* VQRDMULH scalar */
5019 gen_neon_get_scalar(size, rm);
8f8e3aa4 5020 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5021 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5022 if (pass != 0)
8f8e3aa4 5023 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5024 NEON_GET_REG(T1, rn, pass);
5025 if (op == 12) {
5026 if (size == 1) {
ad69471c 5027 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5028 } else {
ad69471c 5029 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5030 }
5031 } else if (op == 13) {
5032 if (size == 1) {
ad69471c 5033 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5034 } else {
ad69471c 5035 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5036 }
5037 } else if (op & 1) {
ad69471c 5038 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5039 } else {
5040 switch (size) {
ad69471c
PB
5041 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5042 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5043 case 2: gen_op_mul_T0_T1(); break;
5044 default: return 1;
5045 }
5046 }
5047 if (op < 8) {
5048 /* Accumulate. */
5049 NEON_GET_REG(T1, rd, pass);
5050 switch (op) {
5051 case 0:
5052 gen_neon_add(size);
5053 break;
5054 case 1:
ad69471c 5055 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5056 break;
5057 case 4:
ad69471c 5058 gen_neon_rsb(size);
9ee6e8bb
PB
5059 break;
5060 case 5:
ad69471c 5061 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5062 break;
5063 default:
5064 abort();
5065 }
5066 }
5067 NEON_SET_REG(T0, rd, pass);
5068 }
5069 break;
5070 case 2: /* VMLAL sclar */
5071 case 3: /* VQDMLAL scalar */
5072 case 6: /* VMLSL scalar */
5073 case 7: /* VQDMLSL scalar */
5074 case 10: /* VMULL scalar */
5075 case 11: /* VQDMULL scalar */
ad69471c
PB
5076 if (size == 0 && (op == 3 || op == 7 || op == 11))
5077 return 1;
5078
9ee6e8bb 5079 gen_neon_get_scalar(size, rm);
ad69471c
PB
5080 NEON_GET_REG(T1, rn, 1);
5081
9ee6e8bb 5082 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5083 if (pass == 0) {
5084 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5085 } else {
ad69471c
PB
5086 tmp = new_tmp();
5087 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5088 }
ad69471c
PB
5089 tmp2 = new_tmp();
5090 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5091 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5092 if (op == 6 || op == 7) {
ad69471c
PB
5093 gen_neon_negl(cpu_V0, size);
5094 }
5095 if (op != 11) {
5096 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5097 }
9ee6e8bb
PB
5098 switch (op) {
5099 case 2: case 6:
ad69471c 5100 gen_neon_addl(size);
9ee6e8bb
PB
5101 break;
5102 case 3: case 7:
ad69471c
PB
5103 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5104 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5105 break;
5106 case 10:
5107 /* no-op */
5108 break;
5109 case 11:
ad69471c 5110 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5111 break;
5112 default:
5113 abort();
5114 }
ad69471c 5115 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5116 }
5117 break;
5118 default: /* 14 and 15 are RESERVED */
5119 return 1;
5120 }
5121 }
5122 } else { /* size == 3 */
5123 if (!u) {
5124 /* Extract. */
9ee6e8bb 5125 imm = (insn >> 8) & 0xf;
ad69471c
PB
5126 count = q + 1;
5127
5128 if (imm > 7 && !q)
5129 return 1;
5130
5131 if (imm == 0) {
5132 neon_load_reg64(cpu_V0, rn);
5133 if (q) {
5134 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5135 }
ad69471c
PB
5136 } else if (imm == 8) {
5137 neon_load_reg64(cpu_V0, rn + 1);
5138 if (q) {
5139 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5140 }
ad69471c 5141 } else if (q) {
a7812ae4 5142 tmp64 = tcg_temp_new_i64();
ad69471c
PB
5143 if (imm < 8) {
5144 neon_load_reg64(cpu_V0, rn);
a7812ae4 5145 neon_load_reg64(tmp64, rn + 1);
ad69471c
PB
5146 } else {
5147 neon_load_reg64(cpu_V0, rn + 1);
a7812ae4 5148 neon_load_reg64(tmp64, rm);
ad69471c
PB
5149 }
5150 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
a7812ae4 5151 tcg_gen_shli_i64(cpu_V1, tmp64, 64 - ((imm & 7) * 8));
ad69471c
PB
5152 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5153 if (imm < 8) {
5154 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5155 } else {
ad69471c
PB
5156 neon_load_reg64(cpu_V1, rm + 1);
5157 imm -= 8;
9ee6e8bb 5158 }
ad69471c 5159 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
a7812ae4
PB
5160 tcg_gen_shri_i64(tmp64, tmp64, imm * 8);
5161 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp64);
ad69471c 5162 } else {
a7812ae4 5163 /* BUGFIX */
ad69471c 5164 neon_load_reg64(cpu_V0, rn);
a7812ae4 5165 tcg_gen_shri_i64(cpu_V0, cpu_V0, imm * 8);
ad69471c 5166 neon_load_reg64(cpu_V1, rm);
a7812ae4 5167 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
ad69471c
PB
5168 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5169 }
5170 neon_store_reg64(cpu_V0, rd);
5171 if (q) {
5172 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5173 }
5174 } else if ((insn & (1 << 11)) == 0) {
5175 /* Two register misc. */
5176 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5177 size = (insn >> 18) & 3;
5178 switch (op) {
5179 case 0: /* VREV64 */
5180 if (size == 3)
5181 return 1;
5182 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5183 NEON_GET_REG(T0, rm, pass * 2);
5184 NEON_GET_REG(T1, rm, pass * 2 + 1);
5185 switch (size) {
b0109805 5186 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5187 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5188 case 2: /* no-op */ break;
5189 default: abort();
5190 }
5191 NEON_SET_REG(T0, rd, pass * 2 + 1);
5192 if (size == 2) {
5193 NEON_SET_REG(T1, rd, pass * 2);
5194 } else {
5195 gen_op_movl_T0_T1();
5196 switch (size) {
b0109805 5197 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5198 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5199 default: abort();
5200 }
5201 NEON_SET_REG(T0, rd, pass * 2);
5202 }
5203 }
5204 break;
5205 case 4: case 5: /* VPADDL */
5206 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5207 if (size == 3)
5208 return 1;
ad69471c
PB
5209 for (pass = 0; pass < q + 1; pass++) {
5210 tmp = neon_load_reg(rm, pass * 2);
5211 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5212 tmp = neon_load_reg(rm, pass * 2 + 1);
5213 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5214 switch (size) {
5215 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5216 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5217 case 2: tcg_gen_add_i64(CPU_V001); break;
5218 default: abort();
5219 }
9ee6e8bb
PB
5220 if (op >= 12) {
5221 /* Accumulate. */
ad69471c
PB
5222 neon_load_reg64(cpu_V1, rd + pass);
5223 gen_neon_addl(size);
9ee6e8bb 5224 }
ad69471c 5225 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5226 }
5227 break;
5228 case 33: /* VTRN */
5229 if (size == 2) {
5230 for (n = 0; n < (q ? 4 : 2); n += 2) {
5231 NEON_GET_REG(T0, rm, n);
5232 NEON_GET_REG(T1, rd, n + 1);
5233 NEON_SET_REG(T1, rm, n);
5234 NEON_SET_REG(T0, rd, n + 1);
5235 }
5236 } else {
5237 goto elementwise;
5238 }
5239 break;
5240 case 34: /* VUZP */
5241 /* Reg Before After
5242 Rd A3 A2 A1 A0 B2 B0 A2 A0
5243 Rm B3 B2 B1 B0 B3 B1 A3 A1
5244 */
5245 if (size == 3)
5246 return 1;
5247 gen_neon_unzip(rd, q, 0, size);
5248 gen_neon_unzip(rm, q, 4, size);
5249 if (q) {
5250 static int unzip_order_q[8] =
5251 {0, 2, 4, 6, 1, 3, 5, 7};
5252 for (n = 0; n < 8; n++) {
5253 int reg = (n < 4) ? rd : rm;
5254 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5255 NEON_SET_REG(T0, reg, n % 4);
5256 }
5257 } else {
5258 static int unzip_order[4] =
5259 {0, 4, 1, 5};
5260 for (n = 0; n < 4; n++) {
5261 int reg = (n < 2) ? rd : rm;
5262 gen_neon_movl_T0_scratch(unzip_order[n]);
5263 NEON_SET_REG(T0, reg, n % 2);
5264 }
5265 }
5266 break;
5267 case 35: /* VZIP */
5268 /* Reg Before After
5269 Rd A3 A2 A1 A0 B1 A1 B0 A0
5270 Rm B3 B2 B1 B0 B3 A3 B2 A2
5271 */
5272 if (size == 3)
5273 return 1;
5274 count = (q ? 4 : 2);
5275 for (n = 0; n < count; n++) {
5276 NEON_GET_REG(T0, rd, n);
5277 NEON_GET_REG(T1, rd, n);
5278 switch (size) {
ad69471c
PB
5279 case 0: gen_helper_neon_zip_u8(); break;
5280 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5281 case 2: /* no-op */; break;
5282 default: abort();
5283 }
5284 gen_neon_movl_scratch_T0(n * 2);
5285 gen_neon_movl_scratch_T1(n * 2 + 1);
5286 }
5287 for (n = 0; n < count * 2; n++) {
5288 int reg = (n < count) ? rd : rm;
5289 gen_neon_movl_T0_scratch(n);
5290 NEON_SET_REG(T0, reg, n % count);
5291 }
5292 break;
5293 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5294 if (size == 3)
5295 return 1;
a50f5b91 5296 TCGV_UNUSED(tmp2);
9ee6e8bb 5297 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5298 neon_load_reg64(cpu_V0, rm + pass);
5299 tmp = new_tmp();
9ee6e8bb 5300 if (op == 36 && q == 0) {
ad69471c 5301 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5302 } else if (q) {
ad69471c 5303 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5304 } else {
ad69471c
PB
5305 gen_neon_narrow_sats(size, tmp, cpu_V0);
5306 }
5307 if (pass == 0) {
5308 tmp2 = tmp;
5309 } else {
5310 neon_store_reg(rd, 0, tmp2);
5311 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5312 }
9ee6e8bb
PB
5313 }
5314 break;
5315 case 38: /* VSHLL */
ad69471c 5316 if (q || size == 3)
9ee6e8bb 5317 return 1;
ad69471c
PB
5318 tmp = neon_load_reg(rm, 0);
5319 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5320 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5321 if (pass == 1)
5322 tmp = tmp2;
5323 gen_neon_widen(cpu_V0, tmp, size, 1);
5324 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5325 }
5326 break;
5327 default:
5328 elementwise:
5329 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5330 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5331 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5332 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5333 } else {
5334 NEON_GET_REG(T0, rm, pass);
5335 }
5336 switch (op) {
5337 case 1: /* VREV32 */
5338 switch (size) {
b0109805 5339 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5340 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5341 default: return 1;
5342 }
5343 break;
5344 case 2: /* VREV16 */
5345 if (size != 0)
5346 return 1;
3670669c 5347 gen_rev16(cpu_T[0]);
9ee6e8bb 5348 break;
9ee6e8bb
PB
5349 case 8: /* CLS */
5350 switch (size) {
ad69471c
PB
5351 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5352 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5353 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5354 default: return 1;
5355 }
5356 break;
5357 case 9: /* CLZ */
5358 switch (size) {
ad69471c
PB
5359 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5360 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5361 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5362 default: return 1;
5363 }
5364 break;
5365 case 10: /* CNT */
5366 if (size != 0)
5367 return 1;
ad69471c 5368 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5369 break;
5370 case 11: /* VNOT */
5371 if (size != 0)
5372 return 1;
5373 gen_op_notl_T0();
5374 break;
5375 case 14: /* VQABS */
5376 switch (size) {
ad69471c
PB
5377 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5378 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5379 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5380 default: return 1;
5381 }
5382 break;
5383 case 15: /* VQNEG */
5384 switch (size) {
ad69471c
PB
5385 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5386 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5387 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5388 default: return 1;
5389 }
5390 break;
5391 case 16: case 19: /* VCGT #0, VCLE #0 */
5392 gen_op_movl_T1_im(0);
5393 switch(size) {
ad69471c
PB
5394 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5395 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5396 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5397 default: return 1;
5398 }
5399 if (op == 19)
5400 gen_op_notl_T0();
5401 break;
5402 case 17: case 20: /* VCGE #0, VCLT #0 */
5403 gen_op_movl_T1_im(0);
5404 switch(size) {
ad69471c
PB
5405 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5406 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5407 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5408 default: return 1;
5409 }
5410 if (op == 20)
5411 gen_op_notl_T0();
5412 break;
5413 case 18: /* VCEQ #0 */
5414 gen_op_movl_T1_im(0);
5415 switch(size) {
ad69471c
PB
5416 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5417 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5418 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5419 default: return 1;
5420 }
5421 break;
5422 case 22: /* VABS */
5423 switch(size) {
ad69471c
PB
5424 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5425 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5426 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5427 default: return 1;
5428 }
5429 break;
5430 case 23: /* VNEG */
5431 gen_op_movl_T1_im(0);
ad69471c
PB
5432 if (size == 3)
5433 return 1;
5434 gen_neon_rsb(size);
9ee6e8bb
PB
5435 break;
5436 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5437 gen_op_movl_T1_im(0);
ad69471c 5438 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5439 if (op == 27)
5440 gen_op_notl_T0();
5441 break;
5442 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5443 gen_op_movl_T1_im(0);
ad69471c 5444 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5445 if (op == 28)
5446 gen_op_notl_T0();
5447 break;
5448 case 26: /* Float VCEQ #0 */
5449 gen_op_movl_T1_im(0);
ad69471c 5450 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5451 break;
5452 case 30: /* Float VABS */
4373f3ce 5453 gen_vfp_abs(0);
9ee6e8bb
PB
5454 break;
5455 case 31: /* Float VNEG */
4373f3ce 5456 gen_vfp_neg(0);
9ee6e8bb
PB
5457 break;
5458 case 32: /* VSWP */
5459 NEON_GET_REG(T1, rd, pass);
5460 NEON_SET_REG(T1, rm, pass);
5461 break;
5462 case 33: /* VTRN */
5463 NEON_GET_REG(T1, rd, pass);
5464 switch (size) {
ad69471c
PB
5465 case 0: gen_helper_neon_trn_u8(); break;
5466 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5467 case 2: abort();
5468 default: return 1;
5469 }
5470 NEON_SET_REG(T1, rm, pass);
5471 break;
5472 case 56: /* Integer VRECPE */
4373f3ce 5473 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5474 break;
5475 case 57: /* Integer VRSQRTE */
4373f3ce 5476 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5477 break;
5478 case 58: /* Float VRECPE */
4373f3ce 5479 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5480 break;
5481 case 59: /* Float VRSQRTE */
4373f3ce 5482 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5483 break;
5484 case 60: /* VCVT.F32.S32 */
4373f3ce 5485 gen_vfp_tosiz(0);
9ee6e8bb
PB
5486 break;
5487 case 61: /* VCVT.F32.U32 */
4373f3ce 5488 gen_vfp_touiz(0);
9ee6e8bb
PB
5489 break;
5490 case 62: /* VCVT.S32.F32 */
4373f3ce 5491 gen_vfp_sito(0);
9ee6e8bb
PB
5492 break;
5493 case 63: /* VCVT.U32.F32 */
4373f3ce 5494 gen_vfp_uito(0);
9ee6e8bb
PB
5495 break;
5496 default:
5497 /* Reserved: 21, 29, 39-56 */
5498 return 1;
5499 }
5500 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5501 tcg_gen_st_f32(cpu_F0s, cpu_env,
5502 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5503 } else {
5504 NEON_SET_REG(T0, rd, pass);
5505 }
5506 }
5507 break;
5508 }
5509 } else if ((insn & (1 << 10)) == 0) {
5510 /* VTBL, VTBX. */
3018f259 5511 n = ((insn >> 5) & 0x18) + 8;
9ee6e8bb 5512 if (insn & (1 << 6)) {
8f8e3aa4 5513 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5514 } else {
8f8e3aa4
PB
5515 tmp = new_tmp();
5516 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5517 }
8f8e3aa4
PB
5518 tmp2 = neon_load_reg(rm, 0);
5519 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5520 tcg_const_i32(n));
3018f259 5521 dead_tmp(tmp);
9ee6e8bb 5522 if (insn & (1 << 6)) {
8f8e3aa4 5523 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5524 } else {
8f8e3aa4
PB
5525 tmp = new_tmp();
5526 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5527 }
8f8e3aa4
PB
5528 tmp3 = neon_load_reg(rm, 1);
5529 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5530 tcg_const_i32(n));
5531 neon_store_reg(rd, 0, tmp2);
3018f259
PB
5532 neon_store_reg(rd, 1, tmp3);
5533 dead_tmp(tmp);
9ee6e8bb
PB
5534 } else if ((insn & 0x380) == 0) {
5535 /* VDUP */
5536 if (insn & (1 << 19)) {
5537 NEON_SET_REG(T0, rm, 1);
5538 } else {
5539 NEON_SET_REG(T0, rm, 0);
5540 }
5541 if (insn & (1 << 16)) {
ad69471c 5542 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5543 } else if (insn & (1 << 17)) {
5544 if ((insn >> 18) & 1)
ad69471c 5545 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5546 else
ad69471c 5547 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5548 }
5549 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5550 NEON_SET_REG(T0, rd, pass);
5551 }
5552 } else {
5553 return 1;
5554 }
5555 }
5556 }
5557 return 0;
5558}
5559
5560static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5561{
5562 int cpnum;
5563
5564 cpnum = (insn >> 8) & 0xf;
5565 if (arm_feature(env, ARM_FEATURE_XSCALE)
5566 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5567 return 1;
5568
5569 switch (cpnum) {
5570 case 0:
5571 case 1:
5572 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5573 return disas_iwmmxt_insn(env, s, insn);
5574 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5575 return disas_dsp_insn(env, s, insn);
5576 }
5577 return 1;
5578 case 10:
5579 case 11:
5580 return disas_vfp_insn (env, s, insn);
5581 case 15:
5582 return disas_cp15_insn (env, s, insn);
5583 default:
5584 /* Unknown coprocessor. See if the board has hooked it. */
5585 return disas_cp_insn (env, s, insn);
5586 }
5587}
5588
5e3f878a
PB
5589
5590/* Store a 64-bit value to a register pair. Clobbers val. */
a7812ae4 5591static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv_i64 val)
5e3f878a
PB
5592{
5593 TCGv tmp;
5594 tmp = new_tmp();
5595 tcg_gen_trunc_i64_i32(tmp, val);
5596 store_reg(s, rlow, tmp);
5597 tmp = new_tmp();
5598 tcg_gen_shri_i64(val, val, 32);
5599 tcg_gen_trunc_i64_i32(tmp, val);
5600 store_reg(s, rhigh, tmp);
5601}
5602
5603/* load a 32-bit value from a register and perform a 64-bit accumulate. */
a7812ae4 5604static void gen_addq_lo(DisasContext *s, TCGv_i64 val, int rlow)
5e3f878a 5605{
a7812ae4 5606 TCGv_i64 tmp;
5e3f878a
PB
5607 TCGv tmp2;
5608
36aa55dc 5609 /* Load value and extend to 64 bits. */
a7812ae4 5610 tmp = tcg_temp_new_i64();
5e3f878a
PB
5611 tmp2 = load_reg(s, rlow);
5612 tcg_gen_extu_i32_i64(tmp, tmp2);
5613 dead_tmp(tmp2);
5614 tcg_gen_add_i64(val, val, tmp);
5615}
5616
5617/* load and add a 64-bit value from a register pair. */
a7812ae4 5618static void gen_addq(DisasContext *s, TCGv_i64 val, int rlow, int rhigh)
5e3f878a 5619{
a7812ae4 5620 TCGv_i64 tmp;
36aa55dc
PB
5621 TCGv tmpl;
5622 TCGv tmph;
5e3f878a
PB
5623
5624 /* Load 64-bit value rd:rn. */
36aa55dc
PB
5625 tmpl = load_reg(s, rlow);
5626 tmph = load_reg(s, rhigh);
a7812ae4 5627 tmp = tcg_temp_new_i64();
36aa55dc
PB
5628 tcg_gen_concat_i32_i64(tmp, tmpl, tmph);
5629 dead_tmp(tmpl);
5630 dead_tmp(tmph);
5e3f878a
PB
5631 tcg_gen_add_i64(val, val, tmp);
5632}
5633
5634/* Set N and Z flags from a 64-bit value. */
a7812ae4 5635static void gen_logicq_cc(TCGv_i64 val)
5e3f878a
PB
5636{
5637 TCGv tmp = new_tmp();
5638 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5639 gen_logic_CC(tmp);
5640 dead_tmp(tmp);
5e3f878a
PB
5641}
5642
9ee6e8bb
PB
5643static void disas_arm_insn(CPUState * env, DisasContext *s)
5644{
5645 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5646 TCGv tmp;
3670669c 5647 TCGv tmp2;
6ddbc6e4 5648 TCGv tmp3;
b0109805 5649 TCGv addr;
a7812ae4 5650 TCGv_i64 tmp64;
9ee6e8bb
PB
5651
5652 insn = ldl_code(s->pc);
5653 s->pc += 4;
5654
5655 /* M variants do not implement ARM mode. */
5656 if (IS_M(env))
5657 goto illegal_op;
5658 cond = insn >> 28;
5659 if (cond == 0xf){
5660 /* Unconditional instructions. */
5661 if (((insn >> 25) & 7) == 1) {
5662 /* NEON Data processing. */
5663 if (!arm_feature(env, ARM_FEATURE_NEON))
5664 goto illegal_op;
5665
5666 if (disas_neon_data_insn(env, s, insn))
5667 goto illegal_op;
5668 return;
5669 }
5670 if ((insn & 0x0f100000) == 0x04000000) {
5671 /* NEON load/store. */
5672 if (!arm_feature(env, ARM_FEATURE_NEON))
5673 goto illegal_op;
5674
5675 if (disas_neon_ls_insn(env, s, insn))
5676 goto illegal_op;
5677 return;
5678 }
5679 if ((insn & 0x0d70f000) == 0x0550f000)
5680 return; /* PLD */
5681 else if ((insn & 0x0ffffdff) == 0x01010000) {
5682 ARCH(6);
5683 /* setend */
5684 if (insn & (1 << 9)) {
5685 /* BE8 mode not implemented. */
5686 goto illegal_op;
5687 }
5688 return;
5689 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5690 switch ((insn >> 4) & 0xf) {
5691 case 1: /* clrex */
5692 ARCH(6K);
8f8e3aa4 5693 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5694 return;
5695 case 4: /* dsb */
5696 case 5: /* dmb */
5697 case 6: /* isb */
5698 ARCH(7);
5699 /* We don't emulate caches so these are a no-op. */
5700 return;
5701 default:
5702 goto illegal_op;
5703 }
5704 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5705 /* srs */
5706 uint32_t offset;
5707 if (IS_USER(s))
5708 goto illegal_op;
5709 ARCH(6);
5710 op1 = (insn & 0x1f);
5711 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5712 addr = load_reg(s, 13);
9ee6e8bb 5713 } else {
b0109805
PB
5714 addr = new_tmp();
5715 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5716 }
5717 i = (insn >> 23) & 3;
5718 switch (i) {
5719 case 0: offset = -4; break; /* DA */
5720 case 1: offset = -8; break; /* DB */
5721 case 2: offset = 0; break; /* IA */
5722 case 3: offset = 4; break; /* IB */
5723 default: abort();
5724 }
5725 if (offset)
b0109805
PB
5726 tcg_gen_addi_i32(addr, addr, offset);
5727 tmp = load_reg(s, 14);
5728 gen_st32(tmp, addr, 0);
5729 tmp = new_tmp();
5730 gen_helper_cpsr_read(tmp);
5731 tcg_gen_addi_i32(addr, addr, 4);
5732 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5733 if (insn & (1 << 21)) {
5734 /* Base writeback. */
5735 switch (i) {
5736 case 0: offset = -8; break;
5737 case 1: offset = -4; break;
5738 case 2: offset = 4; break;
5739 case 3: offset = 0; break;
5740 default: abort();
5741 }
5742 if (offset)
b0109805 5743 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5744 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5745 gen_movl_reg_T1(s, 13);
5746 } else {
b0109805 5747 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5748 }
b0109805
PB
5749 } else {
5750 dead_tmp(addr);
9ee6e8bb
PB
5751 }
5752 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5753 /* rfe */
5754 uint32_t offset;
5755 if (IS_USER(s))
5756 goto illegal_op;
5757 ARCH(6);
5758 rn = (insn >> 16) & 0xf;
b0109805 5759 addr = load_reg(s, rn);
9ee6e8bb
PB
5760 i = (insn >> 23) & 3;
5761 switch (i) {
b0109805
PB
5762 case 0: offset = -4; break; /* DA */
5763 case 1: offset = -8; break; /* DB */
5764 case 2: offset = 0; break; /* IA */
5765 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5766 default: abort();
5767 }
5768 if (offset)
b0109805
PB
5769 tcg_gen_addi_i32(addr, addr, offset);
5770 /* Load PC into tmp and CPSR into tmp2. */
5771 tmp = gen_ld32(addr, 0);
5772 tcg_gen_addi_i32(addr, addr, 4);
5773 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5774 if (insn & (1 << 21)) {
5775 /* Base writeback. */
5776 switch (i) {
b0109805
PB
5777 case 0: offset = -8; break;
5778 case 1: offset = -4; break;
5779 case 2: offset = 4; break;
5780 case 3: offset = 0; break;
9ee6e8bb
PB
5781 default: abort();
5782 }
5783 if (offset)
b0109805
PB
5784 tcg_gen_addi_i32(addr, addr, offset);
5785 store_reg(s, rn, addr);
5786 } else {
5787 dead_tmp(addr);
9ee6e8bb 5788 }
b0109805 5789 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5790 } else if ((insn & 0x0e000000) == 0x0a000000) {
5791 /* branch link and change to thumb (blx <offset>) */
5792 int32_t offset;
5793
5794 val = (uint32_t)s->pc;
d9ba4830
PB
5795 tmp = new_tmp();
5796 tcg_gen_movi_i32(tmp, val);
5797 store_reg(s, 14, tmp);
9ee6e8bb
PB
5798 /* Sign-extend the 24-bit offset */
5799 offset = (((int32_t)insn) << 8) >> 8;
5800 /* offset * 4 + bit24 * 2 + (thumb bit) */
5801 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5802 /* pipeline offset */
5803 val += 4;
d9ba4830 5804 gen_bx_im(s, val);
9ee6e8bb
PB
5805 return;
5806 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5807 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5808 /* iWMMXt register transfer. */
5809 if (env->cp15.c15_cpar & (1 << 1))
5810 if (!disas_iwmmxt_insn(env, s, insn))
5811 return;
5812 }
5813 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5814 /* Coprocessor double register transfer. */
5815 } else if ((insn & 0x0f000010) == 0x0e000010) {
5816 /* Additional coprocessor register transfer. */
7997d92f 5817 } else if ((insn & 0x0ff10020) == 0x01000000) {
9ee6e8bb
PB
5818 uint32_t mask;
5819 uint32_t val;
5820 /* cps (privileged) */
5821 if (IS_USER(s))
5822 return;
5823 mask = val = 0;
5824 if (insn & (1 << 19)) {
5825 if (insn & (1 << 8))
5826 mask |= CPSR_A;
5827 if (insn & (1 << 7))
5828 mask |= CPSR_I;
5829 if (insn & (1 << 6))
5830 mask |= CPSR_F;
5831 if (insn & (1 << 18))
5832 val |= mask;
5833 }
7997d92f 5834 if (insn & (1 << 17)) {
9ee6e8bb
PB
5835 mask |= CPSR_M;
5836 val |= (insn & 0x1f);
5837 }
5838 if (mask) {
5839 gen_op_movl_T0_im(val);
5840 gen_set_psr_T0(s, mask, 0);
5841 }
5842 return;
5843 }
5844 goto illegal_op;
5845 }
5846 if (cond != 0xe) {
5847 /* if not always execute, we generate a conditional jump to
5848 next instruction */
5849 s->condlabel = gen_new_label();
d9ba4830 5850 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5851 s->condjmp = 1;
5852 }
5853 if ((insn & 0x0f900000) == 0x03000000) {
5854 if ((insn & (1 << 21)) == 0) {
5855 ARCH(6T2);
5856 rd = (insn >> 12) & 0xf;
5857 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5858 if ((insn & (1 << 22)) == 0) {
5859 /* MOVW */
5e3f878a
PB
5860 tmp = new_tmp();
5861 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5862 } else {
5863 /* MOVT */
5e3f878a 5864 tmp = load_reg(s, rd);
86831435 5865 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5866 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5867 }
5e3f878a 5868 store_reg(s, rd, tmp);
9ee6e8bb
PB
5869 } else {
5870 if (((insn >> 12) & 0xf) != 0xf)
5871 goto illegal_op;
5872 if (((insn >> 16) & 0xf) == 0) {
5873 gen_nop_hint(s, insn & 0xff);
5874 } else {
5875 /* CPSR = immediate */
5876 val = insn & 0xff;
5877 shift = ((insn >> 8) & 0xf) * 2;
5878 if (shift)
5879 val = (val >> shift) | (val << (32 - shift));
5880 gen_op_movl_T0_im(val);
5881 i = ((insn & (1 << 22)) != 0);
5882 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5883 goto illegal_op;
5884 }
5885 }
5886 } else if ((insn & 0x0f900000) == 0x01000000
5887 && (insn & 0x00000090) != 0x00000090) {
5888 /* miscellaneous instructions */
5889 op1 = (insn >> 21) & 3;
5890 sh = (insn >> 4) & 0xf;
5891 rm = insn & 0xf;
5892 switch (sh) {
5893 case 0x0: /* move program status register */
5894 if (op1 & 1) {
5895 /* PSR = reg */
5896 gen_movl_T0_reg(s, rm);
5897 i = ((op1 & 2) != 0);
5898 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5899 goto illegal_op;
5900 } else {
5901 /* reg = PSR */
5902 rd = (insn >> 12) & 0xf;
5903 if (op1 & 2) {
5904 if (IS_USER(s))
5905 goto illegal_op;
d9ba4830 5906 tmp = load_cpu_field(spsr);
9ee6e8bb 5907 } else {
d9ba4830
PB
5908 tmp = new_tmp();
5909 gen_helper_cpsr_read(tmp);
9ee6e8bb 5910 }
d9ba4830 5911 store_reg(s, rd, tmp);
9ee6e8bb
PB
5912 }
5913 break;
5914 case 0x1:
5915 if (op1 == 1) {
5916 /* branch/exchange thumb (bx). */
d9ba4830
PB
5917 tmp = load_reg(s, rm);
5918 gen_bx(s, tmp);
9ee6e8bb
PB
5919 } else if (op1 == 3) {
5920 /* clz */
5921 rd = (insn >> 12) & 0xf;
1497c961
PB
5922 tmp = load_reg(s, rm);
5923 gen_helper_clz(tmp, tmp);
5924 store_reg(s, rd, tmp);
9ee6e8bb
PB
5925 } else {
5926 goto illegal_op;
5927 }
5928 break;
5929 case 0x2:
5930 if (op1 == 1) {
5931 ARCH(5J); /* bxj */
5932 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5933 tmp = load_reg(s, rm);
5934 gen_bx(s, tmp);
9ee6e8bb
PB
5935 } else {
5936 goto illegal_op;
5937 }
5938 break;
5939 case 0x3:
5940 if (op1 != 1)
5941 goto illegal_op;
5942
5943 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5944 tmp = load_reg(s, rm);
5945 tmp2 = new_tmp();
5946 tcg_gen_movi_i32(tmp2, s->pc);
5947 store_reg(s, 14, tmp2);
5948 gen_bx(s, tmp);
9ee6e8bb
PB
5949 break;
5950 case 0x5: /* saturating add/subtract */
5951 rd = (insn >> 12) & 0xf;
5952 rn = (insn >> 16) & 0xf;
b40d0353 5953 tmp = load_reg(s, rm);
5e3f878a 5954 tmp2 = load_reg(s, rn);
9ee6e8bb 5955 if (op1 & 2)
5e3f878a 5956 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5957 if (op1 & 1)
5e3f878a 5958 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5959 else
5e3f878a
PB
5960 gen_helper_add_saturate(tmp, tmp, tmp2);
5961 dead_tmp(tmp2);
5962 store_reg(s, rd, tmp);
9ee6e8bb
PB
5963 break;
5964 case 7: /* bkpt */
5965 gen_set_condexec(s);
5e3f878a 5966 gen_set_pc_im(s->pc - 4);
d9ba4830 5967 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5968 s->is_jmp = DISAS_JUMP;
5969 break;
5970 case 0x8: /* signed multiply */
5971 case 0xa:
5972 case 0xc:
5973 case 0xe:
5974 rs = (insn >> 8) & 0xf;
5975 rn = (insn >> 12) & 0xf;
5976 rd = (insn >> 16) & 0xf;
5977 if (op1 == 1) {
5978 /* (32 * 16) >> 16 */
5e3f878a
PB
5979 tmp = load_reg(s, rm);
5980 tmp2 = load_reg(s, rs);
9ee6e8bb 5981 if (sh & 4)
5e3f878a 5982 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5983 else
5e3f878a 5984 gen_sxth(tmp2);
a7812ae4
PB
5985 tmp64 = gen_muls_i64_i32(tmp, tmp2);
5986 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 5987 tmp = new_tmp();
a7812ae4 5988 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 5989 if ((sh & 2) == 0) {
5e3f878a
PB
5990 tmp2 = load_reg(s, rn);
5991 gen_helper_add_setq(tmp, tmp, tmp2);
5992 dead_tmp(tmp2);
9ee6e8bb 5993 }
5e3f878a 5994 store_reg(s, rd, tmp);
9ee6e8bb
PB
5995 } else {
5996 /* 16 * 16 */
5e3f878a
PB
5997 tmp = load_reg(s, rm);
5998 tmp2 = load_reg(s, rs);
5999 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6000 dead_tmp(tmp2);
9ee6e8bb 6001 if (op1 == 2) {
a7812ae4
PB
6002 tmp64 = tcg_temp_new_i64();
6003 tcg_gen_ext_i32_i64(tmp64, tmp);
22478e79 6004 dead_tmp(tmp);
a7812ae4
PB
6005 gen_addq(s, tmp64, rn, rd);
6006 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6007 } else {
6008 if (op1 == 0) {
5e3f878a
PB
6009 tmp2 = load_reg(s, rn);
6010 gen_helper_add_setq(tmp, tmp, tmp2);
6011 dead_tmp(tmp2);
9ee6e8bb 6012 }
5e3f878a 6013 store_reg(s, rd, tmp);
9ee6e8bb
PB
6014 }
6015 }
6016 break;
6017 default:
6018 goto illegal_op;
6019 }
6020 } else if (((insn & 0x0e000000) == 0 &&
6021 (insn & 0x00000090) != 0x90) ||
6022 ((insn & 0x0e000000) == (1 << 25))) {
6023 int set_cc, logic_cc, shiftop;
6024
6025 op1 = (insn >> 21) & 0xf;
6026 set_cc = (insn >> 20) & 1;
6027 logic_cc = table_logic_cc[op1] & set_cc;
6028
6029 /* data processing instruction */
6030 if (insn & (1 << 25)) {
6031 /* immediate operand */
6032 val = insn & 0xff;
6033 shift = ((insn >> 8) & 0xf) * 2;
6034 if (shift)
6035 val = (val >> shift) | (val << (32 - shift));
6036 gen_op_movl_T1_im(val);
6037 if (logic_cc && shift)
b26eefb6 6038 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6039 } else {
6040 /* register */
6041 rm = (insn) & 0xf;
6042 gen_movl_T1_reg(s, rm);
6043 shiftop = (insn >> 5) & 3;
6044 if (!(insn & (1 << 4))) {
6045 shift = (insn >> 7) & 0x1f;
9a119ff6 6046 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6047 } else {
6048 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6049 tmp = load_reg(s, rs);
6050 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6051 }
6052 }
6053 if (op1 != 0x0f && op1 != 0x0d) {
6054 rn = (insn >> 16) & 0xf;
6055 gen_movl_T0_reg(s, rn);
6056 }
6057 rd = (insn >> 12) & 0xf;
6058 switch(op1) {
6059 case 0x00:
6060 gen_op_andl_T0_T1();
6061 gen_movl_reg_T0(s, rd);
6062 if (logic_cc)
6063 gen_op_logic_T0_cc();
6064 break;
6065 case 0x01:
6066 gen_op_xorl_T0_T1();
6067 gen_movl_reg_T0(s, rd);
6068 if (logic_cc)
6069 gen_op_logic_T0_cc();
6070 break;
6071 case 0x02:
6072 if (set_cc && rd == 15) {
6073 /* SUBS r15, ... is used for exception return. */
6074 if (IS_USER(s))
6075 goto illegal_op;
6076 gen_op_subl_T0_T1_cc();
6077 gen_exception_return(s);
6078 } else {
6079 if (set_cc)
6080 gen_op_subl_T0_T1_cc();
6081 else
6082 gen_op_subl_T0_T1();
6083 gen_movl_reg_T0(s, rd);
6084 }
6085 break;
6086 case 0x03:
6087 if (set_cc)
6088 gen_op_rsbl_T0_T1_cc();
6089 else
6090 gen_op_rsbl_T0_T1();
6091 gen_movl_reg_T0(s, rd);
6092 break;
6093 case 0x04:
6094 if (set_cc)
6095 gen_op_addl_T0_T1_cc();
6096 else
6097 gen_op_addl_T0_T1();
6098 gen_movl_reg_T0(s, rd);
6099 break;
6100 case 0x05:
6101 if (set_cc)
6102 gen_op_adcl_T0_T1_cc();
6103 else
b26eefb6 6104 gen_adc_T0_T1();
9ee6e8bb
PB
6105 gen_movl_reg_T0(s, rd);
6106 break;
6107 case 0x06:
6108 if (set_cc)
6109 gen_op_sbcl_T0_T1_cc();
6110 else
3670669c 6111 gen_sbc_T0_T1();
9ee6e8bb
PB
6112 gen_movl_reg_T0(s, rd);
6113 break;
6114 case 0x07:
6115 if (set_cc)
6116 gen_op_rscl_T0_T1_cc();
6117 else
3670669c 6118 gen_rsc_T0_T1();
9ee6e8bb
PB
6119 gen_movl_reg_T0(s, rd);
6120 break;
6121 case 0x08:
6122 if (set_cc) {
6123 gen_op_andl_T0_T1();
6124 gen_op_logic_T0_cc();
6125 }
6126 break;
6127 case 0x09:
6128 if (set_cc) {
6129 gen_op_xorl_T0_T1();
6130 gen_op_logic_T0_cc();
6131 }
6132 break;
6133 case 0x0a:
6134 if (set_cc) {
6135 gen_op_subl_T0_T1_cc();
6136 }
6137 break;
6138 case 0x0b:
6139 if (set_cc) {
6140 gen_op_addl_T0_T1_cc();
6141 }
6142 break;
6143 case 0x0c:
6144 gen_op_orl_T0_T1();
6145 gen_movl_reg_T0(s, rd);
6146 if (logic_cc)
6147 gen_op_logic_T0_cc();
6148 break;
6149 case 0x0d:
6150 if (logic_cc && rd == 15) {
6151 /* MOVS r15, ... is used for exception return. */
6152 if (IS_USER(s))
6153 goto illegal_op;
6154 gen_op_movl_T0_T1();
6155 gen_exception_return(s);
6156 } else {
6157 gen_movl_reg_T1(s, rd);
6158 if (logic_cc)
6159 gen_op_logic_T1_cc();
6160 }
6161 break;
6162 case 0x0e:
6163 gen_op_bicl_T0_T1();
6164 gen_movl_reg_T0(s, rd);
6165 if (logic_cc)
6166 gen_op_logic_T0_cc();
6167 break;
6168 default:
6169 case 0x0f:
6170 gen_op_notl_T1();
6171 gen_movl_reg_T1(s, rd);
6172 if (logic_cc)
6173 gen_op_logic_T1_cc();
6174 break;
6175 }
6176 } else {
6177 /* other instructions */
6178 op1 = (insn >> 24) & 0xf;
6179 switch(op1) {
6180 case 0x0:
6181 case 0x1:
6182 /* multiplies, extra load/stores */
6183 sh = (insn >> 5) & 3;
6184 if (sh == 0) {
6185 if (op1 == 0x0) {
6186 rd = (insn >> 16) & 0xf;
6187 rn = (insn >> 12) & 0xf;
6188 rs = (insn >> 8) & 0xf;
6189 rm = (insn) & 0xf;
6190 op1 = (insn >> 20) & 0xf;
6191 switch (op1) {
6192 case 0: case 1: case 2: case 3: case 6:
6193 /* 32 bit mul */
5e3f878a
PB
6194 tmp = load_reg(s, rs);
6195 tmp2 = load_reg(s, rm);
6196 tcg_gen_mul_i32(tmp, tmp, tmp2);
6197 dead_tmp(tmp2);
9ee6e8bb
PB
6198 if (insn & (1 << 22)) {
6199 /* Subtract (mls) */
6200 ARCH(6T2);
5e3f878a
PB
6201 tmp2 = load_reg(s, rn);
6202 tcg_gen_sub_i32(tmp, tmp2, tmp);
6203 dead_tmp(tmp2);
9ee6e8bb
PB
6204 } else if (insn & (1 << 21)) {
6205 /* Add */
5e3f878a
PB
6206 tmp2 = load_reg(s, rn);
6207 tcg_gen_add_i32(tmp, tmp, tmp2);
6208 dead_tmp(tmp2);
9ee6e8bb
PB
6209 }
6210 if (insn & (1 << 20))
5e3f878a
PB
6211 gen_logic_CC(tmp);
6212 store_reg(s, rd, tmp);
9ee6e8bb
PB
6213 break;
6214 default:
6215 /* 64 bit mul */
5e3f878a
PB
6216 tmp = load_reg(s, rs);
6217 tmp2 = load_reg(s, rm);
9ee6e8bb 6218 if (insn & (1 << 22))
a7812ae4 6219 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6220 else
a7812ae4 6221 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6222 if (insn & (1 << 21)) /* mult accumulate */
a7812ae4 6223 gen_addq(s, tmp64, rn, rd);
9ee6e8bb
PB
6224 if (!(insn & (1 << 23))) { /* double accumulate */
6225 ARCH(6);
a7812ae4
PB
6226 gen_addq_lo(s, tmp64, rn);
6227 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
6228 }
6229 if (insn & (1 << 20))
a7812ae4
PB
6230 gen_logicq_cc(tmp64);
6231 gen_storeq_reg(s, rn, rd, tmp64);
9ee6e8bb
PB
6232 break;
6233 }
6234 } else {
6235 rn = (insn >> 16) & 0xf;
6236 rd = (insn >> 12) & 0xf;
6237 if (insn & (1 << 23)) {
6238 /* load/store exclusive */
86753403
PB
6239 op1 = (insn >> 21) & 0x3;
6240 if (op1)
a47f43d2 6241 ARCH(6K);
86753403
PB
6242 else
6243 ARCH(6);
9ee6e8bb 6244 gen_movl_T1_reg(s, rn);
72f1c62f 6245 addr = cpu_T[1];
9ee6e8bb 6246 if (insn & (1 << 20)) {
8f8e3aa4 6247 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
86753403
PB
6248 switch (op1) {
6249 case 0: /* ldrex */
6250 tmp = gen_ld32(addr, IS_USER(s));
6251 break;
6252 case 1: /* ldrexd */
6253 tmp = gen_ld32(addr, IS_USER(s));
6254 store_reg(s, rd, tmp);
6255 tcg_gen_addi_i32(addr, addr, 4);
6256 tmp = gen_ld32(addr, IS_USER(s));
6257 rd++;
6258 break;
6259 case 2: /* ldrexb */
6260 tmp = gen_ld8u(addr, IS_USER(s));
6261 break;
6262 case 3: /* ldrexh */
6263 tmp = gen_ld16u(addr, IS_USER(s));
6264 break;
6265 default:
6266 abort();
6267 }
8f8e3aa4 6268 store_reg(s, rd, tmp);
9ee6e8bb 6269 } else {
8f8e3aa4 6270 int label = gen_new_label();
9ee6e8bb 6271 rm = insn & 0xf;
8f8e3aa4 6272 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6273 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6274 0, label);
8f8e3aa4 6275 tmp = load_reg(s,rm);
86753403
PB
6276 switch (op1) {
6277 case 0: /* strex */
6278 gen_st32(tmp, addr, IS_USER(s));
6279 break;
6280 case 1: /* strexd */
6281 gen_st32(tmp, addr, IS_USER(s));
6282 tcg_gen_addi_i32(addr, addr, 4);
6283 tmp = load_reg(s, rm + 1);
6284 gen_st32(tmp, addr, IS_USER(s));
6285 break;
6286 case 2: /* strexb */
6287 gen_st8(tmp, addr, IS_USER(s));
6288 break;
6289 case 3: /* strexh */
6290 gen_st16(tmp, addr, IS_USER(s));
6291 break;
6292 default:
6293 abort();
6294 }
2637a3be 6295 gen_set_label(label);
8f8e3aa4 6296 gen_movl_reg_T0(s, rd);
9ee6e8bb 6297 }
9ee6e8bb
PB
6298 } else {
6299 /* SWP instruction */
6300 rm = (insn) & 0xf;
6301
8984bd2e
PB
6302 /* ??? This is not really atomic. However we know
6303 we never have multiple CPUs running in parallel,
6304 so it is good enough. */
6305 addr = load_reg(s, rn);
6306 tmp = load_reg(s, rm);
9ee6e8bb 6307 if (insn & (1 << 22)) {
8984bd2e
PB
6308 tmp2 = gen_ld8u(addr, IS_USER(s));
6309 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6310 } else {
8984bd2e
PB
6311 tmp2 = gen_ld32(addr, IS_USER(s));
6312 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6313 }
8984bd2e
PB
6314 dead_tmp(addr);
6315 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6316 }
6317 }
6318 } else {
6319 int address_offset;
6320 int load;
6321 /* Misc load/store */
6322 rn = (insn >> 16) & 0xf;
6323 rd = (insn >> 12) & 0xf;
b0109805 6324 addr = load_reg(s, rn);
9ee6e8bb 6325 if (insn & (1 << 24))
b0109805 6326 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6327 address_offset = 0;
6328 if (insn & (1 << 20)) {
6329 /* load */
6330 switch(sh) {
6331 case 1:
b0109805 6332 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6333 break;
6334 case 2:
b0109805 6335 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6336 break;
6337 default:
6338 case 3:
b0109805 6339 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6340 break;
6341 }
6342 load = 1;
6343 } else if (sh & 2) {
6344 /* doubleword */
6345 if (sh & 1) {
6346 /* store */
b0109805
PB
6347 tmp = load_reg(s, rd);
6348 gen_st32(tmp, addr, IS_USER(s));
6349 tcg_gen_addi_i32(addr, addr, 4);
6350 tmp = load_reg(s, rd + 1);
6351 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6352 load = 0;
6353 } else {
6354 /* load */
b0109805
PB
6355 tmp = gen_ld32(addr, IS_USER(s));
6356 store_reg(s, rd, tmp);
6357 tcg_gen_addi_i32(addr, addr, 4);
6358 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6359 rd++;
6360 load = 1;
6361 }
6362 address_offset = -4;
6363 } else {
6364 /* store */
b0109805
PB
6365 tmp = load_reg(s, rd);
6366 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6367 load = 0;
6368 }
6369 /* Perform base writeback before the loaded value to
6370 ensure correct behavior with overlapping index registers.
6371 ldrd with base writeback is is undefined if the
6372 destination and index registers overlap. */
6373 if (!(insn & (1 << 24))) {
b0109805
PB
6374 gen_add_datah_offset(s, insn, address_offset, addr);
6375 store_reg(s, rn, addr);
9ee6e8bb
PB
6376 } else if (insn & (1 << 21)) {
6377 if (address_offset)
b0109805
PB
6378 tcg_gen_addi_i32(addr, addr, address_offset);
6379 store_reg(s, rn, addr);
6380 } else {
6381 dead_tmp(addr);
9ee6e8bb
PB
6382 }
6383 if (load) {
6384 /* Complete the load. */
b0109805 6385 store_reg(s, rd, tmp);
9ee6e8bb
PB
6386 }
6387 }
6388 break;
6389 case 0x4:
6390 case 0x5:
6391 goto do_ldst;
6392 case 0x6:
6393 case 0x7:
6394 if (insn & (1 << 4)) {
6395 ARCH(6);
6396 /* Armv6 Media instructions. */
6397 rm = insn & 0xf;
6398 rn = (insn >> 16) & 0xf;
2c0262af 6399 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6400 rs = (insn >> 8) & 0xf;
6401 switch ((insn >> 23) & 3) {
6402 case 0: /* Parallel add/subtract. */
6403 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6404 tmp = load_reg(s, rn);
6405 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6406 sh = (insn >> 5) & 7;
6407 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6408 goto illegal_op;
6ddbc6e4
PB
6409 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6410 dead_tmp(tmp2);
6411 store_reg(s, rd, tmp);
9ee6e8bb
PB
6412 break;
6413 case 1:
6414 if ((insn & 0x00700020) == 0) {
6c95676b 6415 /* Halfword pack. */
3670669c
PB
6416 tmp = load_reg(s, rn);
6417 tmp2 = load_reg(s, rm);
9ee6e8bb 6418 shift = (insn >> 7) & 0x1f;
3670669c
PB
6419 if (insn & (1 << 6)) {
6420 /* pkhtb */
22478e79
AZ
6421 if (shift == 0)
6422 shift = 31;
6423 tcg_gen_sari_i32(tmp2, tmp2, shift);
3670669c 6424 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6425 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6426 } else {
6427 /* pkhbt */
22478e79
AZ
6428 if (shift)
6429 tcg_gen_shli_i32(tmp2, tmp2, shift);
86831435 6430 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6431 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6432 }
6433 tcg_gen_or_i32(tmp, tmp, tmp2);
22478e79 6434 dead_tmp(tmp2);
3670669c 6435 store_reg(s, rd, tmp);
9ee6e8bb
PB
6436 } else if ((insn & 0x00200020) == 0x00200000) {
6437 /* [us]sat */
6ddbc6e4 6438 tmp = load_reg(s, rm);
9ee6e8bb
PB
6439 shift = (insn >> 7) & 0x1f;
6440 if (insn & (1 << 6)) {
6441 if (shift == 0)
6442 shift = 31;
6ddbc6e4 6443 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6444 } else {
6ddbc6e4 6445 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6446 }
6447 sh = (insn >> 16) & 0x1f;
6448 if (sh != 0) {
6449 if (insn & (1 << 22))
6ddbc6e4 6450 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6451 else
6ddbc6e4 6452 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6453 }
6ddbc6e4 6454 store_reg(s, rd, tmp);
9ee6e8bb
PB
6455 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6456 /* [us]sat16 */
6ddbc6e4 6457 tmp = load_reg(s, rm);
9ee6e8bb
PB
6458 sh = (insn >> 16) & 0x1f;
6459 if (sh != 0) {
6460 if (insn & (1 << 22))
6ddbc6e4 6461 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6462 else
6ddbc6e4 6463 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6464 }
6ddbc6e4 6465 store_reg(s, rd, tmp);
9ee6e8bb
PB
6466 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6467 /* Select bytes. */
6ddbc6e4
PB
6468 tmp = load_reg(s, rn);
6469 tmp2 = load_reg(s, rm);
6470 tmp3 = new_tmp();
6471 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6472 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6473 dead_tmp(tmp3);
6474 dead_tmp(tmp2);
6475 store_reg(s, rd, tmp);
9ee6e8bb 6476 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6477 tmp = load_reg(s, rm);
9ee6e8bb
PB
6478 shift = (insn >> 10) & 3;
6479 /* ??? In many cases it's not neccessary to do a
6480 rotate, a shift is sufficient. */
6481 if (shift != 0)
5e3f878a 6482 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6483 op1 = (insn >> 20) & 7;
6484 switch (op1) {
5e3f878a
PB
6485 case 0: gen_sxtb16(tmp); break;
6486 case 2: gen_sxtb(tmp); break;
6487 case 3: gen_sxth(tmp); break;
6488 case 4: gen_uxtb16(tmp); break;
6489 case 6: gen_uxtb(tmp); break;
6490 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6491 default: goto illegal_op;
6492 }
6493 if (rn != 15) {
5e3f878a 6494 tmp2 = load_reg(s, rn);
9ee6e8bb 6495 if ((op1 & 3) == 0) {
5e3f878a 6496 gen_add16(tmp, tmp2);
9ee6e8bb 6497 } else {
5e3f878a
PB
6498 tcg_gen_add_i32(tmp, tmp, tmp2);
6499 dead_tmp(tmp2);
9ee6e8bb
PB
6500 }
6501 }
6c95676b 6502 store_reg(s, rd, tmp);
9ee6e8bb
PB
6503 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6504 /* rev */
b0109805 6505 tmp = load_reg(s, rm);
9ee6e8bb
PB
6506 if (insn & (1 << 22)) {
6507 if (insn & (1 << 7)) {
b0109805 6508 gen_revsh(tmp);
9ee6e8bb
PB
6509 } else {
6510 ARCH(6T2);
b0109805 6511 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6512 }
6513 } else {
6514 if (insn & (1 << 7))
b0109805 6515 gen_rev16(tmp);
9ee6e8bb 6516 else
b0109805 6517 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6518 }
b0109805 6519 store_reg(s, rd, tmp);
9ee6e8bb
PB
6520 } else {
6521 goto illegal_op;
6522 }
6523 break;
6524 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6525 tmp = load_reg(s, rm);
6526 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6527 if (insn & (1 << 20)) {
6528 /* Signed multiply most significant [accumulate]. */
a7812ae4 6529 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6530 if (insn & (1 << 5))
a7812ae4
PB
6531 tcg_gen_addi_i64(tmp64, tmp64, 0x80000000u);
6532 tcg_gen_shri_i64(tmp64, tmp64, 32);
5e3f878a 6533 tmp = new_tmp();
a7812ae4 6534 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb 6535 if (rn != 15) {
5e3f878a 6536 tmp2 = load_reg(s, rn);
9ee6e8bb 6537 if (insn & (1 << 6)) {
5e3f878a 6538 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6539 } else {
5e3f878a 6540 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6541 }
5e3f878a 6542 dead_tmp(tmp2);
9ee6e8bb 6543 }
5e3f878a 6544 store_reg(s, rd, tmp);
9ee6e8bb
PB
6545 } else {
6546 if (insn & (1 << 5))
5e3f878a
PB
6547 gen_swap_half(tmp2);
6548 gen_smul_dual(tmp, tmp2);
6549 /* This addition cannot overflow. */
6550 if (insn & (1 << 6)) {
6551 tcg_gen_sub_i32(tmp, tmp, tmp2);
6552 } else {
6553 tcg_gen_add_i32(tmp, tmp, tmp2);
6554 }
6555 dead_tmp(tmp2);
9ee6e8bb 6556 if (insn & (1 << 22)) {
5e3f878a 6557 /* smlald, smlsld */
a7812ae4
PB
6558 tmp64 = tcg_temp_new_i64();
6559 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 6560 dead_tmp(tmp);
a7812ae4
PB
6561 gen_addq(s, tmp64, rd, rn);
6562 gen_storeq_reg(s, rd, rn, tmp64);
9ee6e8bb 6563 } else {
5e3f878a 6564 /* smuad, smusd, smlad, smlsd */
22478e79 6565 if (rd != 15)
9ee6e8bb 6566 {
22478e79 6567 tmp2 = load_reg(s, rd);
5e3f878a
PB
6568 gen_helper_add_setq(tmp, tmp, tmp2);
6569 dead_tmp(tmp2);
9ee6e8bb 6570 }
22478e79 6571 store_reg(s, rn, tmp);
9ee6e8bb
PB
6572 }
6573 }
6574 break;
6575 case 3:
6576 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6577 switch (op1) {
6578 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6579 ARCH(6);
6580 tmp = load_reg(s, rm);
6581 tmp2 = load_reg(s, rs);
6582 gen_helper_usad8(tmp, tmp, tmp2);
6583 dead_tmp(tmp2);
9ee6e8bb 6584 if (rn != 15) {
6ddbc6e4
PB
6585 tmp2 = load_reg(s, rn);
6586 tcg_gen_add_i32(tmp, tmp, tmp2);
6587 dead_tmp(tmp2);
9ee6e8bb 6588 }
6ddbc6e4 6589 store_reg(s, rd, tmp);
9ee6e8bb
PB
6590 break;
6591 case 0x20: case 0x24: case 0x28: case 0x2c:
6592 /* Bitfield insert/clear. */
6593 ARCH(6T2);
6594 shift = (insn >> 7) & 0x1f;
6595 i = (insn >> 16) & 0x1f;
6596 i = i + 1 - shift;
6597 if (rm == 15) {
5e3f878a
PB
6598 tmp = new_tmp();
6599 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6600 } else {
5e3f878a 6601 tmp = load_reg(s, rm);
9ee6e8bb
PB
6602 }
6603 if (i != 32) {
5e3f878a 6604 tmp2 = load_reg(s, rd);
8f8e3aa4 6605 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6606 dead_tmp(tmp2);
9ee6e8bb 6607 }
5e3f878a 6608 store_reg(s, rd, tmp);
9ee6e8bb
PB
6609 break;
6610 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6611 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6612 tmp = load_reg(s, rm);
9ee6e8bb
PB
6613 shift = (insn >> 7) & 0x1f;
6614 i = ((insn >> 16) & 0x1f) + 1;
6615 if (shift + i > 32)
6616 goto illegal_op;
6617 if (i < 32) {
6618 if (op1 & 0x20) {
5e3f878a 6619 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6620 } else {
5e3f878a 6621 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6622 }
6623 }
5e3f878a 6624 store_reg(s, rd, tmp);
9ee6e8bb
PB
6625 break;
6626 default:
6627 goto illegal_op;
6628 }
6629 break;
6630 }
6631 break;
6632 }
6633 do_ldst:
6634 /* Check for undefined extension instructions
6635 * per the ARM Bible IE:
6636 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6637 */
6638 sh = (0xf << 20) | (0xf << 4);
6639 if (op1 == 0x7 && ((insn & sh) == sh))
6640 {
6641 goto illegal_op;
6642 }
6643 /* load/store byte/word */
6644 rn = (insn >> 16) & 0xf;
6645 rd = (insn >> 12) & 0xf;
b0109805 6646 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6647 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6648 if (insn & (1 << 24))
b0109805 6649 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6650 if (insn & (1 << 20)) {
6651 /* load */
6652 s->is_mem = 1;
9ee6e8bb 6653 if (insn & (1 << 22)) {
b0109805 6654 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6655 } else {
b0109805 6656 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6657 }
9ee6e8bb
PB
6658 } else {
6659 /* store */
b0109805 6660 tmp = load_reg(s, rd);
9ee6e8bb 6661 if (insn & (1 << 22))
b0109805 6662 gen_st8(tmp, tmp2, i);
9ee6e8bb 6663 else
b0109805 6664 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6665 }
6666 if (!(insn & (1 << 24))) {
b0109805
PB
6667 gen_add_data_offset(s, insn, tmp2);
6668 store_reg(s, rn, tmp2);
6669 } else if (insn & (1 << 21)) {
6670 store_reg(s, rn, tmp2);
6671 } else {
6672 dead_tmp(tmp2);
9ee6e8bb
PB
6673 }
6674 if (insn & (1 << 20)) {
6675 /* Complete the load. */
6676 if (rd == 15)
b0109805 6677 gen_bx(s, tmp);
9ee6e8bb 6678 else
b0109805 6679 store_reg(s, rd, tmp);
9ee6e8bb
PB
6680 }
6681 break;
6682 case 0x08:
6683 case 0x09:
6684 {
6685 int j, n, user, loaded_base;
b0109805 6686 TCGv loaded_var;
9ee6e8bb
PB
6687 /* load/store multiple words */
6688 /* XXX: store correct base if write back */
6689 user = 0;
6690 if (insn & (1 << 22)) {
6691 if (IS_USER(s))
6692 goto illegal_op; /* only usable in supervisor mode */
6693
6694 if ((insn & (1 << 15)) == 0)
6695 user = 1;
6696 }
6697 rn = (insn >> 16) & 0xf;
b0109805 6698 addr = load_reg(s, rn);
9ee6e8bb
PB
6699
6700 /* compute total size */
6701 loaded_base = 0;
a50f5b91 6702 TCGV_UNUSED(loaded_var);
9ee6e8bb
PB
6703 n = 0;
6704 for(i=0;i<16;i++) {
6705 if (insn & (1 << i))
6706 n++;
6707 }
6708 /* XXX: test invalid n == 0 case ? */
6709 if (insn & (1 << 23)) {
6710 if (insn & (1 << 24)) {
6711 /* pre increment */
b0109805 6712 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6713 } else {
6714 /* post increment */
6715 }
6716 } else {
6717 if (insn & (1 << 24)) {
6718 /* pre decrement */
b0109805 6719 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6720 } else {
6721 /* post decrement */
6722 if (n != 1)
b0109805 6723 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6724 }
6725 }
6726 j = 0;
6727 for(i=0;i<16;i++) {
6728 if (insn & (1 << i)) {
6729 if (insn & (1 << 20)) {
6730 /* load */
b0109805 6731 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6732 if (i == 15) {
b0109805 6733 gen_bx(s, tmp);
9ee6e8bb 6734 } else if (user) {
b0109805
PB
6735 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6736 dead_tmp(tmp);
9ee6e8bb 6737 } else if (i == rn) {
b0109805 6738 loaded_var = tmp;
9ee6e8bb
PB
6739 loaded_base = 1;
6740 } else {
b0109805 6741 store_reg(s, i, tmp);
9ee6e8bb
PB
6742 }
6743 } else {
6744 /* store */
6745 if (i == 15) {
6746 /* special case: r15 = PC + 8 */
6747 val = (long)s->pc + 4;
b0109805
PB
6748 tmp = new_tmp();
6749 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6750 } else if (user) {
b0109805
PB
6751 tmp = new_tmp();
6752 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6753 } else {
b0109805 6754 tmp = load_reg(s, i);
9ee6e8bb 6755 }
b0109805 6756 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6757 }
6758 j++;
6759 /* no need to add after the last transfer */
6760 if (j != n)
b0109805 6761 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6762 }
6763 }
6764 if (insn & (1 << 21)) {
6765 /* write back */
6766 if (insn & (1 << 23)) {
6767 if (insn & (1 << 24)) {
6768 /* pre increment */
6769 } else {
6770 /* post increment */
b0109805 6771 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6772 }
6773 } else {
6774 if (insn & (1 << 24)) {
6775 /* pre decrement */
6776 if (n != 1)
b0109805 6777 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6778 } else {
6779 /* post decrement */
b0109805 6780 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6781 }
6782 }
b0109805
PB
6783 store_reg(s, rn, addr);
6784 } else {
6785 dead_tmp(addr);
9ee6e8bb
PB
6786 }
6787 if (loaded_base) {
b0109805 6788 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6789 }
6790 if ((insn & (1 << 22)) && !user) {
6791 /* Restore CPSR from SPSR. */
d9ba4830
PB
6792 tmp = load_cpu_field(spsr);
6793 gen_set_cpsr(tmp, 0xffffffff);
6794 dead_tmp(tmp);
9ee6e8bb
PB
6795 s->is_jmp = DISAS_UPDATE;
6796 }
6797 }
6798 break;
6799 case 0xa:
6800 case 0xb:
6801 {
6802 int32_t offset;
6803
6804 /* branch (and link) */
6805 val = (int32_t)s->pc;
6806 if (insn & (1 << 24)) {
5e3f878a
PB
6807 tmp = new_tmp();
6808 tcg_gen_movi_i32(tmp, val);
6809 store_reg(s, 14, tmp);
9ee6e8bb
PB
6810 }
6811 offset = (((int32_t)insn << 8) >> 8);
6812 val += (offset << 2) + 4;
6813 gen_jmp(s, val);
6814 }
6815 break;
6816 case 0xc:
6817 case 0xd:
6818 case 0xe:
6819 /* Coprocessor. */
6820 if (disas_coproc_insn(env, s, insn))
6821 goto illegal_op;
6822 break;
6823 case 0xf:
6824 /* swi */
5e3f878a 6825 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6826 s->is_jmp = DISAS_SWI;
6827 break;
6828 default:
6829 illegal_op:
6830 gen_set_condexec(s);
5e3f878a 6831 gen_set_pc_im(s->pc - 4);
d9ba4830 6832 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6833 s->is_jmp = DISAS_JUMP;
6834 break;
6835 }
6836 }
6837}
6838
6839/* Return true if this is a Thumb-2 logical op. */
6840static int
6841thumb2_logic_op(int op)
6842{
6843 return (op < 8);
6844}
6845
6846/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6847 then set condition code flags based on the result of the operation.
6848 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6849 to the high bit of T1.
6850 Returns zero if the opcode is valid. */
6851
6852static int
6853gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6854{
6855 int logic_cc;
6856
6857 logic_cc = 0;
6858 switch (op) {
6859 case 0: /* and */
6860 gen_op_andl_T0_T1();
6861 logic_cc = conds;
6862 break;
6863 case 1: /* bic */
6864 gen_op_bicl_T0_T1();
6865 logic_cc = conds;
6866 break;
6867 case 2: /* orr */
6868 gen_op_orl_T0_T1();
6869 logic_cc = conds;
6870 break;
6871 case 3: /* orn */
6872 gen_op_notl_T1();
6873 gen_op_orl_T0_T1();
6874 logic_cc = conds;
6875 break;
6876 case 4: /* eor */
6877 gen_op_xorl_T0_T1();
6878 logic_cc = conds;
6879 break;
6880 case 8: /* add */
6881 if (conds)
6882 gen_op_addl_T0_T1_cc();
6883 else
6884 gen_op_addl_T0_T1();
6885 break;
6886 case 10: /* adc */
6887 if (conds)
6888 gen_op_adcl_T0_T1_cc();
6889 else
b26eefb6 6890 gen_adc_T0_T1();
9ee6e8bb
PB
6891 break;
6892 case 11: /* sbc */
6893 if (conds)
6894 gen_op_sbcl_T0_T1_cc();
6895 else
3670669c 6896 gen_sbc_T0_T1();
9ee6e8bb
PB
6897 break;
6898 case 13: /* sub */
6899 if (conds)
6900 gen_op_subl_T0_T1_cc();
6901 else
6902 gen_op_subl_T0_T1();
6903 break;
6904 case 14: /* rsb */
6905 if (conds)
6906 gen_op_rsbl_T0_T1_cc();
6907 else
6908 gen_op_rsbl_T0_T1();
6909 break;
6910 default: /* 5, 6, 7, 9, 12, 15. */
6911 return 1;
6912 }
6913 if (logic_cc) {
6914 gen_op_logic_T0_cc();
6915 if (shifter_out)
b26eefb6 6916 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6917 }
6918 return 0;
6919}
6920
6921/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6922 is not legal. */
6923static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6924{
b0109805 6925 uint32_t insn, imm, shift, offset;
9ee6e8bb 6926 uint32_t rd, rn, rm, rs;
b26eefb6 6927 TCGv tmp;
6ddbc6e4
PB
6928 TCGv tmp2;
6929 TCGv tmp3;
b0109805 6930 TCGv addr;
a7812ae4 6931 TCGv_i64 tmp64;
9ee6e8bb
PB
6932 int op;
6933 int shiftop;
6934 int conds;
6935 int logic_cc;
6936
6937 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6938 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6939 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6940 16-bit instructions to get correct prefetch abort behavior. */
6941 insn = insn_hw1;
6942 if ((insn & (1 << 12)) == 0) {
6943 /* Second half of blx. */
6944 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6945 tmp = load_reg(s, 14);
6946 tcg_gen_addi_i32(tmp, tmp, offset);
6947 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6948
d9ba4830 6949 tmp2 = new_tmp();
b0109805 6950 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6951 store_reg(s, 14, tmp2);
6952 gen_bx(s, tmp);
9ee6e8bb
PB
6953 return 0;
6954 }
6955 if (insn & (1 << 11)) {
6956 /* Second half of bl. */
6957 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6958 tmp = load_reg(s, 14);
6a0d8a1d 6959 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6960
d9ba4830 6961 tmp2 = new_tmp();
b0109805 6962 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6963 store_reg(s, 14, tmp2);
6964 gen_bx(s, tmp);
9ee6e8bb
PB
6965 return 0;
6966 }
6967 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6968 /* Instruction spans a page boundary. Implement it as two
6969 16-bit instructions in case the second half causes an
6970 prefetch abort. */
6971 offset = ((int32_t)insn << 21) >> 9;
b0109805 6972 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6973 gen_movl_reg_T0(s, 14);
6974 return 0;
6975 }
6976 /* Fall through to 32-bit decode. */
6977 }
6978
6979 insn = lduw_code(s->pc);
6980 s->pc += 2;
6981 insn |= (uint32_t)insn_hw1 << 16;
6982
6983 if ((insn & 0xf800e800) != 0xf000e800) {
6984 ARCH(6T2);
6985 }
6986
6987 rn = (insn >> 16) & 0xf;
6988 rs = (insn >> 12) & 0xf;
6989 rd = (insn >> 8) & 0xf;
6990 rm = insn & 0xf;
6991 switch ((insn >> 25) & 0xf) {
6992 case 0: case 1: case 2: case 3:
6993 /* 16-bit instructions. Should never happen. */
6994 abort();
6995 case 4:
6996 if (insn & (1 << 22)) {
6997 /* Other load/store, table branch. */
6998 if (insn & 0x01200000) {
6999 /* Load/store doubleword. */
7000 if (rn == 15) {
b0109805
PB
7001 addr = new_tmp();
7002 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 7003 } else {
b0109805 7004 addr = load_reg(s, rn);
9ee6e8bb
PB
7005 }
7006 offset = (insn & 0xff) * 4;
7007 if ((insn & (1 << 23)) == 0)
7008 offset = -offset;
7009 if (insn & (1 << 24)) {
b0109805 7010 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
7011 offset = 0;
7012 }
7013 if (insn & (1 << 20)) {
7014 /* ldrd */
b0109805
PB
7015 tmp = gen_ld32(addr, IS_USER(s));
7016 store_reg(s, rs, tmp);
7017 tcg_gen_addi_i32(addr, addr, 4);
7018 tmp = gen_ld32(addr, IS_USER(s));
7019 store_reg(s, rd, tmp);
9ee6e8bb
PB
7020 } else {
7021 /* strd */
b0109805
PB
7022 tmp = load_reg(s, rs);
7023 gen_st32(tmp, addr, IS_USER(s));
7024 tcg_gen_addi_i32(addr, addr, 4);
7025 tmp = load_reg(s, rd);
7026 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7027 }
7028 if (insn & (1 << 21)) {
7029 /* Base writeback. */
7030 if (rn == 15)
7031 goto illegal_op;
b0109805
PB
7032 tcg_gen_addi_i32(addr, addr, offset - 4);
7033 store_reg(s, rn, addr);
7034 } else {
7035 dead_tmp(addr);
9ee6e8bb
PB
7036 }
7037 } else if ((insn & (1 << 23)) == 0) {
7038 /* Load/store exclusive word. */
2c0262af 7039 gen_movl_T1_reg(s, rn);
72f1c62f 7040 addr = cpu_T[1];
2c0262af 7041 if (insn & (1 << 20)) {
8f8e3aa4
PB
7042 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
7043 tmp = gen_ld32(addr, IS_USER(s));
7044 store_reg(s, rd, tmp);
9ee6e8bb 7045 } else {
8f8e3aa4
PB
7046 int label = gen_new_label();
7047 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
7048 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
7049 0, label);
8f8e3aa4
PB
7050 tmp = load_reg(s, rs);
7051 gen_st32(tmp, cpu_T[1], IS_USER(s));
7052 gen_set_label(label);
7053 gen_movl_reg_T0(s, rd);
9ee6e8bb 7054 }
9ee6e8bb
PB
7055 } else if ((insn & (1 << 6)) == 0) {
7056 /* Table Branch. */
7057 if (rn == 15) {
b0109805
PB
7058 addr = new_tmp();
7059 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7060 } else {
b0109805 7061 addr = load_reg(s, rn);
9ee6e8bb 7062 }
b26eefb6 7063 tmp = load_reg(s, rm);
b0109805 7064 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7065 if (insn & (1 << 4)) {
7066 /* tbh */
b0109805 7067 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7068 dead_tmp(tmp);
b0109805 7069 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7070 } else { /* tbb */
b26eefb6 7071 dead_tmp(tmp);
b0109805 7072 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7073 }
b0109805
PB
7074 dead_tmp(addr);
7075 tcg_gen_shli_i32(tmp, tmp, 1);
7076 tcg_gen_addi_i32(tmp, tmp, s->pc);
7077 store_reg(s, 15, tmp);
9ee6e8bb
PB
7078 } else {
7079 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7080 /* ??? These are not really atomic. However we know
7081 we never have multiple CPUs running in parallel,
7082 so it is good enough. */
9ee6e8bb 7083 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7084 /* Must use a global reg for the address because we have
7085 a conditional branch in the store instruction. */
9ee6e8bb 7086 gen_movl_T1_reg(s, rn);
8f8e3aa4 7087 addr = cpu_T[1];
9ee6e8bb 7088 if (insn & (1 << 20)) {
8f8e3aa4 7089 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7090 switch (op) {
7091 case 0:
8f8e3aa4 7092 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7093 break;
2c0262af 7094 case 1:
8f8e3aa4 7095 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7096 break;
9ee6e8bb 7097 case 3:
8f8e3aa4
PB
7098 tmp = gen_ld32(addr, IS_USER(s));
7099 tcg_gen_addi_i32(addr, addr, 4);
7100 tmp2 = gen_ld32(addr, IS_USER(s));
7101 store_reg(s, rd, tmp2);
2c0262af
FB
7102 break;
7103 default:
9ee6e8bb
PB
7104 goto illegal_op;
7105 }
8f8e3aa4 7106 store_reg(s, rs, tmp);
9ee6e8bb 7107 } else {
8f8e3aa4
PB
7108 int label = gen_new_label();
7109 /* Must use a global that is not killed by the branch. */
7110 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7111 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7112 tmp = load_reg(s, rs);
9ee6e8bb
PB
7113 switch (op) {
7114 case 0:
8f8e3aa4 7115 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7116 break;
7117 case 1:
8f8e3aa4 7118 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7119 break;
2c0262af 7120 case 3:
8f8e3aa4
PB
7121 gen_st32(tmp, addr, IS_USER(s));
7122 tcg_gen_addi_i32(addr, addr, 4);
7123 tmp = load_reg(s, rd);
7124 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7125 break;
9ee6e8bb
PB
7126 default:
7127 goto illegal_op;
2c0262af 7128 }
8f8e3aa4 7129 gen_set_label(label);
9ee6e8bb
PB
7130 gen_movl_reg_T0(s, rm);
7131 }
7132 }
7133 } else {
7134 /* Load/store multiple, RFE, SRS. */
7135 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7136 /* Not available in user mode. */
b0109805 7137 if (IS_USER(s))
9ee6e8bb
PB
7138 goto illegal_op;
7139 if (insn & (1 << 20)) {
7140 /* rfe */
b0109805
PB
7141 addr = load_reg(s, rn);
7142 if ((insn & (1 << 24)) == 0)
7143 tcg_gen_addi_i32(addr, addr, -8);
7144 /* Load PC into tmp and CPSR into tmp2. */
7145 tmp = gen_ld32(addr, 0);
7146 tcg_gen_addi_i32(addr, addr, 4);
7147 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7148 if (insn & (1 << 21)) {
7149 /* Base writeback. */
b0109805
PB
7150 if (insn & (1 << 24)) {
7151 tcg_gen_addi_i32(addr, addr, 4);
7152 } else {
7153 tcg_gen_addi_i32(addr, addr, -4);
7154 }
7155 store_reg(s, rn, addr);
7156 } else {
7157 dead_tmp(addr);
9ee6e8bb 7158 }
b0109805 7159 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7160 } else {
7161 /* srs */
7162 op = (insn & 0x1f);
7163 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7164 addr = load_reg(s, 13);
9ee6e8bb 7165 } else {
b0109805
PB
7166 addr = new_tmp();
7167 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7168 }
7169 if ((insn & (1 << 24)) == 0) {
b0109805 7170 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7171 }
b0109805
PB
7172 tmp = load_reg(s, 14);
7173 gen_st32(tmp, addr, 0);
7174 tcg_gen_addi_i32(addr, addr, 4);
7175 tmp = new_tmp();
7176 gen_helper_cpsr_read(tmp);
7177 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7178 if (insn & (1 << 21)) {
7179 if ((insn & (1 << 24)) == 0) {
b0109805 7180 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7181 } else {
b0109805 7182 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7183 }
7184 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7185 store_reg(s, 13, addr);
9ee6e8bb 7186 } else {
b0109805
PB
7187 gen_helper_set_r13_banked(cpu_env,
7188 tcg_const_i32(op), addr);
9ee6e8bb 7189 }
b0109805
PB
7190 } else {
7191 dead_tmp(addr);
9ee6e8bb
PB
7192 }
7193 }
7194 } else {
7195 int i;
7196 /* Load/store multiple. */
b0109805 7197 addr = load_reg(s, rn);
9ee6e8bb
PB
7198 offset = 0;
7199 for (i = 0; i < 16; i++) {
7200 if (insn & (1 << i))
7201 offset += 4;
7202 }
7203 if (insn & (1 << 24)) {
b0109805 7204 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7205 }
7206
7207 for (i = 0; i < 16; i++) {
7208 if ((insn & (1 << i)) == 0)
7209 continue;
7210 if (insn & (1 << 20)) {
7211 /* Load. */
b0109805 7212 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7213 if (i == 15) {
b0109805 7214 gen_bx(s, tmp);
9ee6e8bb 7215 } else {
b0109805 7216 store_reg(s, i, tmp);
9ee6e8bb
PB
7217 }
7218 } else {
7219 /* Store. */
b0109805
PB
7220 tmp = load_reg(s, i);
7221 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7222 }
b0109805 7223 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7224 }
7225 if (insn & (1 << 21)) {
7226 /* Base register writeback. */
7227 if (insn & (1 << 24)) {
b0109805 7228 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7229 }
7230 /* Fault if writeback register is in register list. */
7231 if (insn & (1 << rn))
7232 goto illegal_op;
b0109805
PB
7233 store_reg(s, rn, addr);
7234 } else {
7235 dead_tmp(addr);
9ee6e8bb
PB
7236 }
7237 }
7238 }
7239 break;
7240 case 5: /* Data processing register constant shift. */
7241 if (rn == 15)
7242 gen_op_movl_T0_im(0);
7243 else
7244 gen_movl_T0_reg(s, rn);
7245 gen_movl_T1_reg(s, rm);
7246 op = (insn >> 21) & 0xf;
7247 shiftop = (insn >> 4) & 3;
7248 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7249 conds = (insn & (1 << 20)) != 0;
7250 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7251 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7252 if (gen_thumb2_data_op(s, op, conds, 0))
7253 goto illegal_op;
7254 if (rd != 15)
7255 gen_movl_reg_T0(s, rd);
7256 break;
7257 case 13: /* Misc data processing. */
7258 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7259 if (op < 4 && (insn & 0xf000) != 0xf000)
7260 goto illegal_op;
7261 switch (op) {
7262 case 0: /* Register controlled shift. */
8984bd2e
PB
7263 tmp = load_reg(s, rn);
7264 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7265 if ((insn & 0x70) != 0)
7266 goto illegal_op;
7267 op = (insn >> 21) & 3;
8984bd2e
PB
7268 logic_cc = (insn & (1 << 20)) != 0;
7269 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7270 if (logic_cc)
7271 gen_logic_CC(tmp);
7272 store_reg(s, rd, tmp);
9ee6e8bb
PB
7273 break;
7274 case 1: /* Sign/zero extend. */
5e3f878a 7275 tmp = load_reg(s, rm);
9ee6e8bb
PB
7276 shift = (insn >> 4) & 3;
7277 /* ??? In many cases it's not neccessary to do a
7278 rotate, a shift is sufficient. */
7279 if (shift != 0)
5e3f878a 7280 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7281 op = (insn >> 20) & 7;
7282 switch (op) {
5e3f878a
PB
7283 case 0: gen_sxth(tmp); break;
7284 case 1: gen_uxth(tmp); break;
7285 case 2: gen_sxtb16(tmp); break;
7286 case 3: gen_uxtb16(tmp); break;
7287 case 4: gen_sxtb(tmp); break;
7288 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7289 default: goto illegal_op;
7290 }
7291 if (rn != 15) {
5e3f878a 7292 tmp2 = load_reg(s, rn);
9ee6e8bb 7293 if ((op >> 1) == 1) {
5e3f878a 7294 gen_add16(tmp, tmp2);
9ee6e8bb 7295 } else {
5e3f878a
PB
7296 tcg_gen_add_i32(tmp, tmp, tmp2);
7297 dead_tmp(tmp2);
9ee6e8bb
PB
7298 }
7299 }
5e3f878a 7300 store_reg(s, rd, tmp);
9ee6e8bb
PB
7301 break;
7302 case 2: /* SIMD add/subtract. */
7303 op = (insn >> 20) & 7;
7304 shift = (insn >> 4) & 7;
7305 if ((op & 3) == 3 || (shift & 3) == 3)
7306 goto illegal_op;
6ddbc6e4
PB
7307 tmp = load_reg(s, rn);
7308 tmp2 = load_reg(s, rm);
7309 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7310 dead_tmp(tmp2);
7311 store_reg(s, rd, tmp);
9ee6e8bb
PB
7312 break;
7313 case 3: /* Other data processing. */
7314 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7315 if (op < 4) {
7316 /* Saturating add/subtract. */
d9ba4830
PB
7317 tmp = load_reg(s, rn);
7318 tmp2 = load_reg(s, rm);
9ee6e8bb 7319 if (op & 2)
d9ba4830 7320 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7321 if (op & 1)
d9ba4830 7322 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7323 else
d9ba4830
PB
7324 gen_helper_add_saturate(tmp, tmp, tmp2);
7325 dead_tmp(tmp2);
9ee6e8bb 7326 } else {
d9ba4830 7327 tmp = load_reg(s, rn);
9ee6e8bb
PB
7328 switch (op) {
7329 case 0x0a: /* rbit */
d9ba4830 7330 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7331 break;
7332 case 0x08: /* rev */
d9ba4830 7333 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7334 break;
7335 case 0x09: /* rev16 */
d9ba4830 7336 gen_rev16(tmp);
9ee6e8bb
PB
7337 break;
7338 case 0x0b: /* revsh */
d9ba4830 7339 gen_revsh(tmp);
9ee6e8bb
PB
7340 break;
7341 case 0x10: /* sel */
d9ba4830 7342 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7343 tmp3 = new_tmp();
7344 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7345 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7346 dead_tmp(tmp3);
d9ba4830 7347 dead_tmp(tmp2);
9ee6e8bb
PB
7348 break;
7349 case 0x18: /* clz */
d9ba4830 7350 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7351 break;
7352 default:
7353 goto illegal_op;
7354 }
7355 }
d9ba4830 7356 store_reg(s, rd, tmp);
9ee6e8bb
PB
7357 break;
7358 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7359 op = (insn >> 4) & 0xf;
d9ba4830
PB
7360 tmp = load_reg(s, rn);
7361 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7362 switch ((insn >> 20) & 7) {
7363 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7364 tcg_gen_mul_i32(tmp, tmp, tmp2);
7365 dead_tmp(tmp2);
9ee6e8bb 7366 if (rs != 15) {
d9ba4830 7367 tmp2 = load_reg(s, rs);
9ee6e8bb 7368 if (op)
d9ba4830 7369 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7370 else
d9ba4830
PB
7371 tcg_gen_add_i32(tmp, tmp, tmp2);
7372 dead_tmp(tmp2);
9ee6e8bb 7373 }
9ee6e8bb
PB
7374 break;
7375 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7376 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7377 dead_tmp(tmp2);
9ee6e8bb 7378 if (rs != 15) {
d9ba4830
PB
7379 tmp2 = load_reg(s, rs);
7380 gen_helper_add_setq(tmp, tmp, tmp2);
7381 dead_tmp(tmp2);
9ee6e8bb 7382 }
9ee6e8bb
PB
7383 break;
7384 case 2: /* Dual multiply add. */
7385 case 4: /* Dual multiply subtract. */
7386 if (op)
d9ba4830
PB
7387 gen_swap_half(tmp2);
7388 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7389 /* This addition cannot overflow. */
7390 if (insn & (1 << 22)) {
d9ba4830 7391 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7392 } else {
d9ba4830 7393 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7394 }
d9ba4830 7395 dead_tmp(tmp2);
9ee6e8bb
PB
7396 if (rs != 15)
7397 {
d9ba4830
PB
7398 tmp2 = load_reg(s, rs);
7399 gen_helper_add_setq(tmp, tmp, tmp2);
7400 dead_tmp(tmp2);
9ee6e8bb 7401 }
9ee6e8bb
PB
7402 break;
7403 case 3: /* 32 * 16 -> 32msb */
7404 if (op)
d9ba4830 7405 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7406 else
d9ba4830 7407 gen_sxth(tmp2);
a7812ae4
PB
7408 tmp64 = gen_muls_i64_i32(tmp, tmp2);
7409 tcg_gen_shri_i64(tmp64, tmp64, 16);
5e3f878a 7410 tmp = new_tmp();
a7812ae4 7411 tcg_gen_trunc_i64_i32(tmp, tmp64);
9ee6e8bb
PB
7412 if (rs != 15)
7413 {
d9ba4830
PB
7414 tmp2 = load_reg(s, rs);
7415 gen_helper_add_setq(tmp, tmp, tmp2);
7416 dead_tmp(tmp2);
9ee6e8bb 7417 }
9ee6e8bb
PB
7418 break;
7419 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7420 gen_imull(tmp, tmp2);
7421 if (insn & (1 << 5)) {
7422 gen_roundqd(tmp, tmp2);
7423 dead_tmp(tmp2);
7424 } else {
7425 dead_tmp(tmp);
7426 tmp = tmp2;
7427 }
9ee6e8bb 7428 if (rs != 15) {
d9ba4830 7429 tmp2 = load_reg(s, rs);
9ee6e8bb 7430 if (insn & (1 << 21)) {
d9ba4830 7431 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7432 } else {
d9ba4830 7433 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7434 }
d9ba4830 7435 dead_tmp(tmp2);
2c0262af 7436 }
9ee6e8bb
PB
7437 break;
7438 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7439 gen_helper_usad8(tmp, tmp, tmp2);
7440 dead_tmp(tmp2);
9ee6e8bb 7441 if (rs != 15) {
d9ba4830
PB
7442 tmp2 = load_reg(s, rs);
7443 tcg_gen_add_i32(tmp, tmp, tmp2);
7444 dead_tmp(tmp2);
5fd46862 7445 }
9ee6e8bb 7446 break;
2c0262af 7447 }
d9ba4830 7448 store_reg(s, rd, tmp);
2c0262af 7449 break;
9ee6e8bb
PB
7450 case 6: case 7: /* 64-bit multiply, Divide. */
7451 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7452 tmp = load_reg(s, rn);
7453 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7454 if ((op & 0x50) == 0x10) {
7455 /* sdiv, udiv */
7456 if (!arm_feature(env, ARM_FEATURE_DIV))
7457 goto illegal_op;
7458 if (op & 0x20)
5e3f878a 7459 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7460 else
5e3f878a
PB
7461 gen_helper_sdiv(tmp, tmp, tmp2);
7462 dead_tmp(tmp2);
7463 store_reg(s, rd, tmp);
9ee6e8bb
PB
7464 } else if ((op & 0xe) == 0xc) {
7465 /* Dual multiply accumulate long. */
7466 if (op & 1)
5e3f878a
PB
7467 gen_swap_half(tmp2);
7468 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7469 if (op & 0x10) {
5e3f878a 7470 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7471 } else {
5e3f878a 7472 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7473 }
5e3f878a 7474 dead_tmp(tmp2);
a7812ae4
PB
7475 /* BUGFIX */
7476 tmp64 = tcg_temp_new_i64();
7477 tcg_gen_ext_i32_i64(tmp64, tmp);
7478 dead_tmp(tmp);
7479 gen_addq(s, tmp64, rs, rd);
7480 gen_storeq_reg(s, rs, rd, tmp64);
2c0262af 7481 } else {
9ee6e8bb
PB
7482 if (op & 0x20) {
7483 /* Unsigned 64-bit multiply */
a7812ae4 7484 tmp64 = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7485 } else {
9ee6e8bb
PB
7486 if (op & 8) {
7487 /* smlalxy */
5e3f878a
PB
7488 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7489 dead_tmp(tmp2);
a7812ae4
PB
7490 tmp64 = tcg_temp_new_i64();
7491 tcg_gen_ext_i32_i64(tmp64, tmp);
5e3f878a 7492 dead_tmp(tmp);
9ee6e8bb
PB
7493 } else {
7494 /* Signed 64-bit multiply */
a7812ae4 7495 tmp64 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7496 }
b5ff1b31 7497 }
9ee6e8bb
PB
7498 if (op & 4) {
7499 /* umaal */
a7812ae4
PB
7500 gen_addq_lo(s, tmp64, rs);
7501 gen_addq_lo(s, tmp64, rd);
9ee6e8bb
PB
7502 } else if (op & 0x40) {
7503 /* 64-bit accumulate. */
a7812ae4 7504 gen_addq(s, tmp64, rs, rd);
9ee6e8bb 7505 }
a7812ae4 7506 gen_storeq_reg(s, rs, rd, tmp64);
5fd46862 7507 }
2c0262af 7508 break;
9ee6e8bb
PB
7509 }
7510 break;
7511 case 6: case 7: case 14: case 15:
7512 /* Coprocessor. */
7513 if (((insn >> 24) & 3) == 3) {
7514 /* Translate into the equivalent ARM encoding. */
7515 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7516 if (disas_neon_data_insn(env, s, insn))
7517 goto illegal_op;
7518 } else {
7519 if (insn & (1 << 28))
7520 goto illegal_op;
7521 if (disas_coproc_insn (env, s, insn))
7522 goto illegal_op;
7523 }
7524 break;
7525 case 8: case 9: case 10: case 11:
7526 if (insn & (1 << 15)) {
7527 /* Branches, misc control. */
7528 if (insn & 0x5000) {
7529 /* Unconditional branch. */
7530 /* signextend(hw1[10:0]) -> offset[:12]. */
7531 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7532 /* hw1[10:0] -> offset[11:1]. */
7533 offset |= (insn & 0x7ff) << 1;
7534 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7535 offset[24:22] already have the same value because of the
7536 sign extension above. */
7537 offset ^= ((~insn) & (1 << 13)) << 10;
7538 offset ^= ((~insn) & (1 << 11)) << 11;
7539
9ee6e8bb
PB
7540 if (insn & (1 << 14)) {
7541 /* Branch and link. */
b0109805 7542 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7543 gen_movl_reg_T1(s, 14);
b5ff1b31 7544 }
3b46e624 7545
b0109805 7546 offset += s->pc;
9ee6e8bb
PB
7547 if (insn & (1 << 12)) {
7548 /* b/bl */
b0109805 7549 gen_jmp(s, offset);
9ee6e8bb
PB
7550 } else {
7551 /* blx */
b0109805
PB
7552 offset &= ~(uint32_t)2;
7553 gen_bx_im(s, offset);
2c0262af 7554 }
9ee6e8bb
PB
7555 } else if (((insn >> 23) & 7) == 7) {
7556 /* Misc control */
7557 if (insn & (1 << 13))
7558 goto illegal_op;
7559
7560 if (insn & (1 << 26)) {
7561 /* Secure monitor call (v6Z) */
7562 goto illegal_op; /* not implemented. */
2c0262af 7563 } else {
9ee6e8bb
PB
7564 op = (insn >> 20) & 7;
7565 switch (op) {
7566 case 0: /* msr cpsr. */
7567 if (IS_M(env)) {
8984bd2e
PB
7568 tmp = load_reg(s, rn);
7569 addr = tcg_const_i32(insn & 0xff);
7570 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7571 gen_lookup_tb(s);
7572 break;
7573 }
7574 /* fall through */
7575 case 1: /* msr spsr. */
7576 if (IS_M(env))
7577 goto illegal_op;
7578 gen_movl_T0_reg(s, rn);
7579 if (gen_set_psr_T0(s,
7580 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7581 op == 1))
7582 goto illegal_op;
7583 break;
7584 case 2: /* cps, nop-hint. */
7585 if (((insn >> 8) & 7) == 0) {
7586 gen_nop_hint(s, insn & 0xff);
7587 }
7588 /* Implemented as NOP in user mode. */
7589 if (IS_USER(s))
7590 break;
7591 offset = 0;
7592 imm = 0;
7593 if (insn & (1 << 10)) {
7594 if (insn & (1 << 7))
7595 offset |= CPSR_A;
7596 if (insn & (1 << 6))
7597 offset |= CPSR_I;
7598 if (insn & (1 << 5))
7599 offset |= CPSR_F;
7600 if (insn & (1 << 9))
7601 imm = CPSR_A | CPSR_I | CPSR_F;
7602 }
7603 if (insn & (1 << 8)) {
7604 offset |= 0x1f;
7605 imm |= (insn & 0x1f);
7606 }
7607 if (offset) {
7608 gen_op_movl_T0_im(imm);
7609 gen_set_psr_T0(s, offset, 0);
7610 }
7611 break;
7612 case 3: /* Special control operations. */
7613 op = (insn >> 4) & 0xf;
7614 switch (op) {
7615 case 2: /* clrex */
8f8e3aa4 7616 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7617 break;
7618 case 4: /* dsb */
7619 case 5: /* dmb */
7620 case 6: /* isb */
7621 /* These execute as NOPs. */
7622 ARCH(7);
7623 break;
7624 default:
7625 goto illegal_op;
7626 }
7627 break;
7628 case 4: /* bxj */
7629 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7630 tmp = load_reg(s, rn);
7631 gen_bx(s, tmp);
9ee6e8bb
PB
7632 break;
7633 case 5: /* Exception return. */
7634 /* Unpredictable in user mode. */
7635 goto illegal_op;
7636 case 6: /* mrs cpsr. */
8984bd2e 7637 tmp = new_tmp();
9ee6e8bb 7638 if (IS_M(env)) {
8984bd2e
PB
7639 addr = tcg_const_i32(insn & 0xff);
7640 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7641 } else {
8984bd2e 7642 gen_helper_cpsr_read(tmp);
9ee6e8bb 7643 }
8984bd2e 7644 store_reg(s, rd, tmp);
9ee6e8bb
PB
7645 break;
7646 case 7: /* mrs spsr. */
7647 /* Not accessible in user mode. */
7648 if (IS_USER(s) || IS_M(env))
7649 goto illegal_op;
d9ba4830
PB
7650 tmp = load_cpu_field(spsr);
7651 store_reg(s, rd, tmp);
9ee6e8bb 7652 break;
2c0262af
FB
7653 }
7654 }
9ee6e8bb
PB
7655 } else {
7656 /* Conditional branch. */
7657 op = (insn >> 22) & 0xf;
7658 /* Generate a conditional jump to next instruction. */
7659 s->condlabel = gen_new_label();
d9ba4830 7660 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7661 s->condjmp = 1;
7662
7663 /* offset[11:1] = insn[10:0] */
7664 offset = (insn & 0x7ff) << 1;
7665 /* offset[17:12] = insn[21:16]. */
7666 offset |= (insn & 0x003f0000) >> 4;
7667 /* offset[31:20] = insn[26]. */
7668 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7669 /* offset[18] = insn[13]. */
7670 offset |= (insn & (1 << 13)) << 5;
7671 /* offset[19] = insn[11]. */
7672 offset |= (insn & (1 << 11)) << 8;
7673
7674 /* jump to the offset */
b0109805 7675 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7676 }
7677 } else {
7678 /* Data processing immediate. */
7679 if (insn & (1 << 25)) {
7680 if (insn & (1 << 24)) {
7681 if (insn & (1 << 20))
7682 goto illegal_op;
7683 /* Bitfield/Saturate. */
7684 op = (insn >> 21) & 7;
7685 imm = insn & 0x1f;
7686 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7687 if (rn == 15) {
7688 tmp = new_tmp();
7689 tcg_gen_movi_i32(tmp, 0);
7690 } else {
7691 tmp = load_reg(s, rn);
7692 }
9ee6e8bb
PB
7693 switch (op) {
7694 case 2: /* Signed bitfield extract. */
7695 imm++;
7696 if (shift + imm > 32)
7697 goto illegal_op;
7698 if (imm < 32)
6ddbc6e4 7699 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7700 break;
7701 case 6: /* Unsigned bitfield extract. */
7702 imm++;
7703 if (shift + imm > 32)
7704 goto illegal_op;
7705 if (imm < 32)
6ddbc6e4 7706 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7707 break;
7708 case 3: /* Bitfield insert/clear. */
7709 if (imm < shift)
7710 goto illegal_op;
7711 imm = imm + 1 - shift;
7712 if (imm != 32) {
6ddbc6e4 7713 tmp2 = load_reg(s, rd);
8f8e3aa4 7714 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7715 dead_tmp(tmp2);
9ee6e8bb
PB
7716 }
7717 break;
7718 case 7:
7719 goto illegal_op;
7720 default: /* Saturate. */
9ee6e8bb
PB
7721 if (shift) {
7722 if (op & 1)
6ddbc6e4 7723 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7724 else
6ddbc6e4 7725 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7726 }
6ddbc6e4 7727 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7728 if (op & 4) {
7729 /* Unsigned. */
9ee6e8bb 7730 if ((op & 1) && shift == 0)
6ddbc6e4 7731 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7732 else
6ddbc6e4 7733 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7734 } else {
9ee6e8bb 7735 /* Signed. */
9ee6e8bb 7736 if ((op & 1) && shift == 0)
6ddbc6e4 7737 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7738 else
6ddbc6e4 7739 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7740 }
9ee6e8bb 7741 break;
2c0262af 7742 }
6ddbc6e4 7743 store_reg(s, rd, tmp);
9ee6e8bb
PB
7744 } else {
7745 imm = ((insn & 0x04000000) >> 15)
7746 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7747 if (insn & (1 << 22)) {
7748 /* 16-bit immediate. */
7749 imm |= (insn >> 4) & 0xf000;
7750 if (insn & (1 << 23)) {
7751 /* movt */
5e3f878a 7752 tmp = load_reg(s, rd);
86831435 7753 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7754 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7755 } else {
9ee6e8bb 7756 /* movw */
5e3f878a
PB
7757 tmp = new_tmp();
7758 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7759 }
7760 } else {
9ee6e8bb
PB
7761 /* Add/sub 12-bit immediate. */
7762 if (rn == 15) {
b0109805 7763 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7764 if (insn & (1 << 23))
b0109805 7765 offset -= imm;
9ee6e8bb 7766 else
b0109805 7767 offset += imm;
5e3f878a
PB
7768 tmp = new_tmp();
7769 tcg_gen_movi_i32(tmp, offset);
2c0262af 7770 } else {
5e3f878a 7771 tmp = load_reg(s, rn);
9ee6e8bb 7772 if (insn & (1 << 23))
5e3f878a 7773 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7774 else
5e3f878a 7775 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7776 }
9ee6e8bb 7777 }
5e3f878a 7778 store_reg(s, rd, tmp);
191abaa2 7779 }
9ee6e8bb
PB
7780 } else {
7781 int shifter_out = 0;
7782 /* modified 12-bit immediate. */
7783 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7784 imm = (insn & 0xff);
7785 switch (shift) {
7786 case 0: /* XY */
7787 /* Nothing to do. */
7788 break;
7789 case 1: /* 00XY00XY */
7790 imm |= imm << 16;
7791 break;
7792 case 2: /* XY00XY00 */
7793 imm |= imm << 16;
7794 imm <<= 8;
7795 break;
7796 case 3: /* XYXYXYXY */
7797 imm |= imm << 16;
7798 imm |= imm << 8;
7799 break;
7800 default: /* Rotated constant. */
7801 shift = (shift << 1) | (imm >> 7);
7802 imm |= 0x80;
7803 imm = imm << (32 - shift);
7804 shifter_out = 1;
7805 break;
b5ff1b31 7806 }
9ee6e8bb
PB
7807 gen_op_movl_T1_im(imm);
7808 rn = (insn >> 16) & 0xf;
7809 if (rn == 15)
7810 gen_op_movl_T0_im(0);
7811 else
7812 gen_movl_T0_reg(s, rn);
7813 op = (insn >> 21) & 0xf;
7814 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7815 shifter_out))
7816 goto illegal_op;
7817 rd = (insn >> 8) & 0xf;
7818 if (rd != 15) {
7819 gen_movl_reg_T0(s, rd);
2c0262af 7820 }
2c0262af 7821 }
9ee6e8bb
PB
7822 }
7823 break;
7824 case 12: /* Load/store single data item. */
7825 {
7826 int postinc = 0;
7827 int writeback = 0;
b0109805 7828 int user;
9ee6e8bb
PB
7829 if ((insn & 0x01100000) == 0x01000000) {
7830 if (disas_neon_ls_insn(env, s, insn))
c1713132 7831 goto illegal_op;
9ee6e8bb
PB
7832 break;
7833 }
b0109805 7834 user = IS_USER(s);
9ee6e8bb 7835 if (rn == 15) {
b0109805 7836 addr = new_tmp();
9ee6e8bb
PB
7837 /* PC relative. */
7838 /* s->pc has already been incremented by 4. */
7839 imm = s->pc & 0xfffffffc;
7840 if (insn & (1 << 23))
7841 imm += insn & 0xfff;
7842 else
7843 imm -= insn & 0xfff;
b0109805 7844 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7845 } else {
b0109805 7846 addr = load_reg(s, rn);
9ee6e8bb
PB
7847 if (insn & (1 << 23)) {
7848 /* Positive offset. */
7849 imm = insn & 0xfff;
b0109805 7850 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7851 } else {
7852 op = (insn >> 8) & 7;
7853 imm = insn & 0xff;
7854 switch (op) {
7855 case 0: case 8: /* Shifted Register. */
7856 shift = (insn >> 4) & 0xf;
7857 if (shift > 3)
18c9b560 7858 goto illegal_op;
b26eefb6 7859 tmp = load_reg(s, rm);
9ee6e8bb 7860 if (shift)
b26eefb6 7861 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7862 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7863 dead_tmp(tmp);
9ee6e8bb
PB
7864 break;
7865 case 4: /* Negative offset. */
b0109805 7866 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7867 break;
7868 case 6: /* User privilege. */
b0109805
PB
7869 tcg_gen_addi_i32(addr, addr, imm);
7870 user = 1;
9ee6e8bb
PB
7871 break;
7872 case 1: /* Post-decrement. */
7873 imm = -imm;
7874 /* Fall through. */
7875 case 3: /* Post-increment. */
9ee6e8bb
PB
7876 postinc = 1;
7877 writeback = 1;
7878 break;
7879 case 5: /* Pre-decrement. */
7880 imm = -imm;
7881 /* Fall through. */
7882 case 7: /* Pre-increment. */
b0109805 7883 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7884 writeback = 1;
7885 break;
7886 default:
b7bcbe95 7887 goto illegal_op;
9ee6e8bb
PB
7888 }
7889 }
7890 }
7891 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7892 if (insn & (1 << 20)) {
7893 /* Load. */
7894 if (rs == 15 && op != 2) {
7895 if (op & 2)
b5ff1b31 7896 goto illegal_op;
9ee6e8bb
PB
7897 /* Memory hint. Implemented as NOP. */
7898 } else {
7899 switch (op) {
b0109805
PB
7900 case 0: tmp = gen_ld8u(addr, user); break;
7901 case 4: tmp = gen_ld8s(addr, user); break;
7902 case 1: tmp = gen_ld16u(addr, user); break;
7903 case 5: tmp = gen_ld16s(addr, user); break;
7904 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7905 default: goto illegal_op;
7906 }
7907 if (rs == 15) {
b0109805 7908 gen_bx(s, tmp);
9ee6e8bb 7909 } else {
b0109805 7910 store_reg(s, rs, tmp);
9ee6e8bb
PB
7911 }
7912 }
7913 } else {
7914 /* Store. */
7915 if (rs == 15)
b7bcbe95 7916 goto illegal_op;
b0109805 7917 tmp = load_reg(s, rs);
9ee6e8bb 7918 switch (op) {
b0109805
PB
7919 case 0: gen_st8(tmp, addr, user); break;
7920 case 1: gen_st16(tmp, addr, user); break;
7921 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7922 default: goto illegal_op;
b7bcbe95 7923 }
2c0262af 7924 }
9ee6e8bb 7925 if (postinc)
b0109805
PB
7926 tcg_gen_addi_i32(addr, addr, imm);
7927 if (writeback) {
7928 store_reg(s, rn, addr);
7929 } else {
7930 dead_tmp(addr);
7931 }
9ee6e8bb
PB
7932 }
7933 break;
7934 default:
7935 goto illegal_op;
2c0262af 7936 }
9ee6e8bb
PB
7937 return 0;
7938illegal_op:
7939 return 1;
2c0262af
FB
7940}
7941
9ee6e8bb 7942static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7943{
7944 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7945 int32_t offset;
7946 int i;
b26eefb6 7947 TCGv tmp;
d9ba4830 7948 TCGv tmp2;
b0109805 7949 TCGv addr;
99c475ab 7950
9ee6e8bb
PB
7951 if (s->condexec_mask) {
7952 cond = s->condexec_cond;
7953 s->condlabel = gen_new_label();
d9ba4830 7954 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7955 s->condjmp = 1;
7956 }
7957
b5ff1b31 7958 insn = lduw_code(s->pc);
99c475ab 7959 s->pc += 2;
b5ff1b31 7960
99c475ab
FB
7961 switch (insn >> 12) {
7962 case 0: case 1:
7963 rd = insn & 7;
7964 op = (insn >> 11) & 3;
7965 if (op == 3) {
7966 /* add/subtract */
7967 rn = (insn >> 3) & 7;
7968 gen_movl_T0_reg(s, rn);
7969 if (insn & (1 << 10)) {
7970 /* immediate */
7971 gen_op_movl_T1_im((insn >> 6) & 7);
7972 } else {
7973 /* reg */
7974 rm = (insn >> 6) & 7;
7975 gen_movl_T1_reg(s, rm);
7976 }
9ee6e8bb
PB
7977 if (insn & (1 << 9)) {
7978 if (s->condexec_mask)
7979 gen_op_subl_T0_T1();
7980 else
7981 gen_op_subl_T0_T1_cc();
7982 } else {
7983 if (s->condexec_mask)
7984 gen_op_addl_T0_T1();
7985 else
7986 gen_op_addl_T0_T1_cc();
7987 }
99c475ab
FB
7988 gen_movl_reg_T0(s, rd);
7989 } else {
7990 /* shift immediate */
7991 rm = (insn >> 3) & 7;
7992 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7993 tmp = load_reg(s, rm);
7994 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7995 if (!s->condexec_mask)
7996 gen_logic_CC(tmp);
7997 store_reg(s, rd, tmp);
99c475ab
FB
7998 }
7999 break;
8000 case 2: case 3:
8001 /* arithmetic large immediate */
8002 op = (insn >> 11) & 3;
8003 rd = (insn >> 8) & 0x7;
8004 if (op == 0) {
8005 gen_op_movl_T0_im(insn & 0xff);
8006 } else {
8007 gen_movl_T0_reg(s, rd);
8008 gen_op_movl_T1_im(insn & 0xff);
8009 }
8010 switch (op) {
8011 case 0: /* mov */
9ee6e8bb
PB
8012 if (!s->condexec_mask)
8013 gen_op_logic_T0_cc();
99c475ab
FB
8014 break;
8015 case 1: /* cmp */
8016 gen_op_subl_T0_T1_cc();
8017 break;
8018 case 2: /* add */
9ee6e8bb
PB
8019 if (s->condexec_mask)
8020 gen_op_addl_T0_T1();
8021 else
8022 gen_op_addl_T0_T1_cc();
99c475ab
FB
8023 break;
8024 case 3: /* sub */
9ee6e8bb
PB
8025 if (s->condexec_mask)
8026 gen_op_subl_T0_T1();
8027 else
8028 gen_op_subl_T0_T1_cc();
99c475ab
FB
8029 break;
8030 }
8031 if (op != 1)
8032 gen_movl_reg_T0(s, rd);
8033 break;
8034 case 4:
8035 if (insn & (1 << 11)) {
8036 rd = (insn >> 8) & 7;
5899f386
FB
8037 /* load pc-relative. Bit 1 of PC is ignored. */
8038 val = s->pc + 2 + ((insn & 0xff) * 4);
8039 val &= ~(uint32_t)2;
b0109805
PB
8040 addr = new_tmp();
8041 tcg_gen_movi_i32(addr, val);
8042 tmp = gen_ld32(addr, IS_USER(s));
8043 dead_tmp(addr);
8044 store_reg(s, rd, tmp);
99c475ab
FB
8045 break;
8046 }
8047 if (insn & (1 << 10)) {
8048 /* data processing extended or blx */
8049 rd = (insn & 7) | ((insn >> 4) & 8);
8050 rm = (insn >> 3) & 0xf;
8051 op = (insn >> 8) & 3;
8052 switch (op) {
8053 case 0: /* add */
8054 gen_movl_T0_reg(s, rd);
8055 gen_movl_T1_reg(s, rm);
8056 gen_op_addl_T0_T1();
8057 gen_movl_reg_T0(s, rd);
8058 break;
8059 case 1: /* cmp */
8060 gen_movl_T0_reg(s, rd);
8061 gen_movl_T1_reg(s, rm);
8062 gen_op_subl_T0_T1_cc();
8063 break;
8064 case 2: /* mov/cpy */
8065 gen_movl_T0_reg(s, rm);
8066 gen_movl_reg_T0(s, rd);
8067 break;
8068 case 3:/* branch [and link] exchange thumb register */
b0109805 8069 tmp = load_reg(s, rm);
99c475ab
FB
8070 if (insn & (1 << 7)) {
8071 val = (uint32_t)s->pc | 1;
b0109805
PB
8072 tmp2 = new_tmp();
8073 tcg_gen_movi_i32(tmp2, val);
8074 store_reg(s, 14, tmp2);
99c475ab 8075 }
d9ba4830 8076 gen_bx(s, tmp);
99c475ab
FB
8077 break;
8078 }
8079 break;
8080 }
8081
8082 /* data processing register */
8083 rd = insn & 7;
8084 rm = (insn >> 3) & 7;
8085 op = (insn >> 6) & 0xf;
8086 if (op == 2 || op == 3 || op == 4 || op == 7) {
8087 /* the shift/rotate ops want the operands backwards */
8088 val = rm;
8089 rm = rd;
8090 rd = val;
8091 val = 1;
8092 } else {
8093 val = 0;
8094 }
8095
8096 if (op == 9) /* neg */
8097 gen_op_movl_T0_im(0);
8098 else if (op != 0xf) /* mvn doesn't read its first operand */
8099 gen_movl_T0_reg(s, rd);
8100
8101 gen_movl_T1_reg(s, rm);
5899f386 8102 switch (op) {
99c475ab
FB
8103 case 0x0: /* and */
8104 gen_op_andl_T0_T1();
9ee6e8bb
PB
8105 if (!s->condexec_mask)
8106 gen_op_logic_T0_cc();
99c475ab
FB
8107 break;
8108 case 0x1: /* eor */
8109 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8110 if (!s->condexec_mask)
8111 gen_op_logic_T0_cc();
99c475ab
FB
8112 break;
8113 case 0x2: /* lsl */
9ee6e8bb 8114 if (s->condexec_mask) {
8984bd2e 8115 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8116 } else {
8984bd2e 8117 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8118 gen_op_logic_T1_cc();
8119 }
99c475ab
FB
8120 break;
8121 case 0x3: /* lsr */
9ee6e8bb 8122 if (s->condexec_mask) {
8984bd2e 8123 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8124 } else {
8984bd2e 8125 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8126 gen_op_logic_T1_cc();
8127 }
99c475ab
FB
8128 break;
8129 case 0x4: /* asr */
9ee6e8bb 8130 if (s->condexec_mask) {
8984bd2e 8131 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8132 } else {
8984bd2e 8133 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8134 gen_op_logic_T1_cc();
8135 }
99c475ab
FB
8136 break;
8137 case 0x5: /* adc */
9ee6e8bb 8138 if (s->condexec_mask)
b26eefb6 8139 gen_adc_T0_T1();
9ee6e8bb
PB
8140 else
8141 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8142 break;
8143 case 0x6: /* sbc */
9ee6e8bb 8144 if (s->condexec_mask)
3670669c 8145 gen_sbc_T0_T1();
9ee6e8bb
PB
8146 else
8147 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8148 break;
8149 case 0x7: /* ror */
9ee6e8bb 8150 if (s->condexec_mask) {
8984bd2e 8151 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8152 } else {
8984bd2e 8153 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8154 gen_op_logic_T1_cc();
8155 }
99c475ab
FB
8156 break;
8157 case 0x8: /* tst */
8158 gen_op_andl_T0_T1();
8159 gen_op_logic_T0_cc();
8160 rd = 16;
5899f386 8161 break;
99c475ab 8162 case 0x9: /* neg */
9ee6e8bb 8163 if (s->condexec_mask)
390efc54 8164 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8165 else
8166 gen_op_subl_T0_T1_cc();
99c475ab
FB
8167 break;
8168 case 0xa: /* cmp */
8169 gen_op_subl_T0_T1_cc();
8170 rd = 16;
8171 break;
8172 case 0xb: /* cmn */
8173 gen_op_addl_T0_T1_cc();
8174 rd = 16;
8175 break;
8176 case 0xc: /* orr */
8177 gen_op_orl_T0_T1();
9ee6e8bb
PB
8178 if (!s->condexec_mask)
8179 gen_op_logic_T0_cc();
99c475ab
FB
8180 break;
8181 case 0xd: /* mul */
8182 gen_op_mull_T0_T1();
9ee6e8bb
PB
8183 if (!s->condexec_mask)
8184 gen_op_logic_T0_cc();
99c475ab
FB
8185 break;
8186 case 0xe: /* bic */
8187 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8188 if (!s->condexec_mask)
8189 gen_op_logic_T0_cc();
99c475ab
FB
8190 break;
8191 case 0xf: /* mvn */
8192 gen_op_notl_T1();
9ee6e8bb
PB
8193 if (!s->condexec_mask)
8194 gen_op_logic_T1_cc();
99c475ab 8195 val = 1;
5899f386 8196 rm = rd;
99c475ab
FB
8197 break;
8198 }
8199 if (rd != 16) {
8200 if (val)
5899f386 8201 gen_movl_reg_T1(s, rm);
99c475ab
FB
8202 else
8203 gen_movl_reg_T0(s, rd);
8204 }
8205 break;
8206
8207 case 5:
8208 /* load/store register offset. */
8209 rd = insn & 7;
8210 rn = (insn >> 3) & 7;
8211 rm = (insn >> 6) & 7;
8212 op = (insn >> 9) & 7;
b0109805 8213 addr = load_reg(s, rn);
b26eefb6 8214 tmp = load_reg(s, rm);
b0109805 8215 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8216 dead_tmp(tmp);
99c475ab
FB
8217
8218 if (op < 3) /* store */
b0109805 8219 tmp = load_reg(s, rd);
99c475ab
FB
8220
8221 switch (op) {
8222 case 0: /* str */
b0109805 8223 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8224 break;
8225 case 1: /* strh */
b0109805 8226 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8227 break;
8228 case 2: /* strb */
b0109805 8229 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8230 break;
8231 case 3: /* ldrsb */
b0109805 8232 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8233 break;
8234 case 4: /* ldr */
b0109805 8235 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8236 break;
8237 case 5: /* ldrh */
b0109805 8238 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8239 break;
8240 case 6: /* ldrb */
b0109805 8241 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8242 break;
8243 case 7: /* ldrsh */
b0109805 8244 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8245 break;
8246 }
8247 if (op >= 3) /* load */
b0109805
PB
8248 store_reg(s, rd, tmp);
8249 dead_tmp(addr);
99c475ab
FB
8250 break;
8251
8252 case 6:
8253 /* load/store word immediate offset */
8254 rd = insn & 7;
8255 rn = (insn >> 3) & 7;
b0109805 8256 addr = load_reg(s, rn);
99c475ab 8257 val = (insn >> 4) & 0x7c;
b0109805 8258 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8259
8260 if (insn & (1 << 11)) {
8261 /* load */
b0109805
PB
8262 tmp = gen_ld32(addr, IS_USER(s));
8263 store_reg(s, rd, tmp);
99c475ab
FB
8264 } else {
8265 /* store */
b0109805
PB
8266 tmp = load_reg(s, rd);
8267 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8268 }
b0109805 8269 dead_tmp(addr);
99c475ab
FB
8270 break;
8271
8272 case 7:
8273 /* load/store byte immediate offset */
8274 rd = insn & 7;
8275 rn = (insn >> 3) & 7;
b0109805 8276 addr = load_reg(s, rn);
99c475ab 8277 val = (insn >> 6) & 0x1f;
b0109805 8278 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8279
8280 if (insn & (1 << 11)) {
8281 /* load */
b0109805
PB
8282 tmp = gen_ld8u(addr, IS_USER(s));
8283 store_reg(s, rd, tmp);
99c475ab
FB
8284 } else {
8285 /* store */
b0109805
PB
8286 tmp = load_reg(s, rd);
8287 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8288 }
b0109805 8289 dead_tmp(addr);
99c475ab
FB
8290 break;
8291
8292 case 8:
8293 /* load/store halfword immediate offset */
8294 rd = insn & 7;
8295 rn = (insn >> 3) & 7;
b0109805 8296 addr = load_reg(s, rn);
99c475ab 8297 val = (insn >> 5) & 0x3e;
b0109805 8298 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8299
8300 if (insn & (1 << 11)) {
8301 /* load */
b0109805
PB
8302 tmp = gen_ld16u(addr, IS_USER(s));
8303 store_reg(s, rd, tmp);
99c475ab
FB
8304 } else {
8305 /* store */
b0109805
PB
8306 tmp = load_reg(s, rd);
8307 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8308 }
b0109805 8309 dead_tmp(addr);
99c475ab
FB
8310 break;
8311
8312 case 9:
8313 /* load/store from stack */
8314 rd = (insn >> 8) & 7;
b0109805 8315 addr = load_reg(s, 13);
99c475ab 8316 val = (insn & 0xff) * 4;
b0109805 8317 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8318
8319 if (insn & (1 << 11)) {
8320 /* load */
b0109805
PB
8321 tmp = gen_ld32(addr, IS_USER(s));
8322 store_reg(s, rd, tmp);
99c475ab
FB
8323 } else {
8324 /* store */
b0109805
PB
8325 tmp = load_reg(s, rd);
8326 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8327 }
b0109805 8328 dead_tmp(addr);
99c475ab
FB
8329 break;
8330
8331 case 10:
8332 /* add to high reg */
8333 rd = (insn >> 8) & 7;
5899f386
FB
8334 if (insn & (1 << 11)) {
8335 /* SP */
5e3f878a 8336 tmp = load_reg(s, 13);
5899f386
FB
8337 } else {
8338 /* PC. bit 1 is ignored. */
5e3f878a
PB
8339 tmp = new_tmp();
8340 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8341 }
99c475ab 8342 val = (insn & 0xff) * 4;
5e3f878a
PB
8343 tcg_gen_addi_i32(tmp, tmp, val);
8344 store_reg(s, rd, tmp);
99c475ab
FB
8345 break;
8346
8347 case 11:
8348 /* misc */
8349 op = (insn >> 8) & 0xf;
8350 switch (op) {
8351 case 0:
8352 /* adjust stack pointer */
b26eefb6 8353 tmp = load_reg(s, 13);
99c475ab
FB
8354 val = (insn & 0x7f) * 4;
8355 if (insn & (1 << 7))
6a0d8a1d 8356 val = -(int32_t)val;
b26eefb6
PB
8357 tcg_gen_addi_i32(tmp, tmp, val);
8358 store_reg(s, 13, tmp);
99c475ab
FB
8359 break;
8360
9ee6e8bb
PB
8361 case 2: /* sign/zero extend. */
8362 ARCH(6);
8363 rd = insn & 7;
8364 rm = (insn >> 3) & 7;
b0109805 8365 tmp = load_reg(s, rm);
9ee6e8bb 8366 switch ((insn >> 6) & 3) {
b0109805
PB
8367 case 0: gen_sxth(tmp); break;
8368 case 1: gen_sxtb(tmp); break;
8369 case 2: gen_uxth(tmp); break;
8370 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8371 }
b0109805 8372 store_reg(s, rd, tmp);
9ee6e8bb 8373 break;
99c475ab
FB
8374 case 4: case 5: case 0xc: case 0xd:
8375 /* push/pop */
b0109805 8376 addr = load_reg(s, 13);
5899f386
FB
8377 if (insn & (1 << 8))
8378 offset = 4;
99c475ab 8379 else
5899f386
FB
8380 offset = 0;
8381 for (i = 0; i < 8; i++) {
8382 if (insn & (1 << i))
8383 offset += 4;
8384 }
8385 if ((insn & (1 << 11)) == 0) {
b0109805 8386 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8387 }
99c475ab
FB
8388 for (i = 0; i < 8; i++) {
8389 if (insn & (1 << i)) {
8390 if (insn & (1 << 11)) {
8391 /* pop */
b0109805
PB
8392 tmp = gen_ld32(addr, IS_USER(s));
8393 store_reg(s, i, tmp);
99c475ab
FB
8394 } else {
8395 /* push */
b0109805
PB
8396 tmp = load_reg(s, i);
8397 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8398 }
5899f386 8399 /* advance to the next address. */
b0109805 8400 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8401 }
8402 }
a50f5b91 8403 TCGV_UNUSED(tmp);
99c475ab
FB
8404 if (insn & (1 << 8)) {
8405 if (insn & (1 << 11)) {
8406 /* pop pc */
b0109805 8407 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8408 /* don't set the pc until the rest of the instruction
8409 has completed */
8410 } else {
8411 /* push lr */
b0109805
PB
8412 tmp = load_reg(s, 14);
8413 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8414 }
b0109805 8415 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8416 }
5899f386 8417 if ((insn & (1 << 11)) == 0) {
b0109805 8418 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8419 }
99c475ab 8420 /* write back the new stack pointer */
b0109805 8421 store_reg(s, 13, addr);
99c475ab
FB
8422 /* set the new PC value */
8423 if ((insn & 0x0900) == 0x0900)
b0109805 8424 gen_bx(s, tmp);
99c475ab
FB
8425 break;
8426
9ee6e8bb
PB
8427 case 1: case 3: case 9: case 11: /* czb */
8428 rm = insn & 7;
d9ba4830 8429 tmp = load_reg(s, rm);
9ee6e8bb
PB
8430 s->condlabel = gen_new_label();
8431 s->condjmp = 1;
8432 if (insn & (1 << 11))
cb63669a 8433 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8434 else
cb63669a 8435 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8436 dead_tmp(tmp);
9ee6e8bb
PB
8437 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8438 val = (uint32_t)s->pc + 2;
8439 val += offset;
8440 gen_jmp(s, val);
8441 break;
8442
8443 case 15: /* IT, nop-hint. */
8444 if ((insn & 0xf) == 0) {
8445 gen_nop_hint(s, (insn >> 4) & 0xf);
8446 break;
8447 }
8448 /* If Then. */
8449 s->condexec_cond = (insn >> 4) & 0xe;
8450 s->condexec_mask = insn & 0x1f;
8451 /* No actual code generated for this insn, just setup state. */
8452 break;
8453
06c949e6 8454 case 0xe: /* bkpt */
9ee6e8bb 8455 gen_set_condexec(s);
5e3f878a 8456 gen_set_pc_im(s->pc - 2);
d9ba4830 8457 gen_exception(EXCP_BKPT);
06c949e6
PB
8458 s->is_jmp = DISAS_JUMP;
8459 break;
8460
9ee6e8bb
PB
8461 case 0xa: /* rev */
8462 ARCH(6);
8463 rn = (insn >> 3) & 0x7;
8464 rd = insn & 0x7;
b0109805 8465 tmp = load_reg(s, rn);
9ee6e8bb 8466 switch ((insn >> 6) & 3) {
b0109805
PB
8467 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8468 case 1: gen_rev16(tmp); break;
8469 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8470 default: goto illegal_op;
8471 }
b0109805 8472 store_reg(s, rd, tmp);
9ee6e8bb
PB
8473 break;
8474
8475 case 6: /* cps */
8476 ARCH(6);
8477 if (IS_USER(s))
8478 break;
8479 if (IS_M(env)) {
8984bd2e 8480 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8481 /* PRIMASK */
8984bd2e
PB
8482 if (insn & 1) {
8483 addr = tcg_const_i32(16);
8484 gen_helper_v7m_msr(cpu_env, addr, tmp);
8485 }
9ee6e8bb 8486 /* FAULTMASK */
8984bd2e
PB
8487 if (insn & 2) {
8488 addr = tcg_const_i32(17);
8489 gen_helper_v7m_msr(cpu_env, addr, tmp);
8490 }
9ee6e8bb
PB
8491 gen_lookup_tb(s);
8492 } else {
8493 if (insn & (1 << 4))
8494 shift = CPSR_A | CPSR_I | CPSR_F;
8495 else
8496 shift = 0;
8497
8498 val = ((insn & 7) << 6) & shift;
8499 gen_op_movl_T0_im(val);
8500 gen_set_psr_T0(s, shift, 0);
8501 }
8502 break;
8503
99c475ab
FB
8504 default:
8505 goto undef;
8506 }
8507 break;
8508
8509 case 12:
8510 /* load/store multiple */
8511 rn = (insn >> 8) & 0x7;
b0109805 8512 addr = load_reg(s, rn);
99c475ab
FB
8513 for (i = 0; i < 8; i++) {
8514 if (insn & (1 << i)) {
99c475ab
FB
8515 if (insn & (1 << 11)) {
8516 /* load */
b0109805
PB
8517 tmp = gen_ld32(addr, IS_USER(s));
8518 store_reg(s, i, tmp);
99c475ab
FB
8519 } else {
8520 /* store */
b0109805
PB
8521 tmp = load_reg(s, i);
8522 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8523 }
5899f386 8524 /* advance to the next address */
b0109805 8525 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8526 }
8527 }
5899f386 8528 /* Base register writeback. */
b0109805
PB
8529 if ((insn & (1 << rn)) == 0) {
8530 store_reg(s, rn, addr);
8531 } else {
8532 dead_tmp(addr);
8533 }
99c475ab
FB
8534 break;
8535
8536 case 13:
8537 /* conditional branch or swi */
8538 cond = (insn >> 8) & 0xf;
8539 if (cond == 0xe)
8540 goto undef;
8541
8542 if (cond == 0xf) {
8543 /* swi */
9ee6e8bb 8544 gen_set_condexec(s);
422ebf69 8545 gen_set_pc_im(s->pc);
9ee6e8bb 8546 s->is_jmp = DISAS_SWI;
99c475ab
FB
8547 break;
8548 }
8549 /* generate a conditional jump to next instruction */
e50e6a20 8550 s->condlabel = gen_new_label();
d9ba4830 8551 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8552 s->condjmp = 1;
99c475ab
FB
8553 gen_movl_T1_reg(s, 15);
8554
8555 /* jump to the offset */
5899f386 8556 val = (uint32_t)s->pc + 2;
99c475ab 8557 offset = ((int32_t)insn << 24) >> 24;
5899f386 8558 val += offset << 1;
8aaca4c0 8559 gen_jmp(s, val);
99c475ab
FB
8560 break;
8561
8562 case 14:
358bf29e 8563 if (insn & (1 << 11)) {
9ee6e8bb
PB
8564 if (disas_thumb2_insn(env, s, insn))
8565 goto undef32;
358bf29e
PB
8566 break;
8567 }
9ee6e8bb 8568 /* unconditional branch */
99c475ab
FB
8569 val = (uint32_t)s->pc;
8570 offset = ((int32_t)insn << 21) >> 21;
8571 val += (offset << 1) + 2;
8aaca4c0 8572 gen_jmp(s, val);
99c475ab
FB
8573 break;
8574
8575 case 15:
9ee6e8bb 8576 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8577 goto undef32;
9ee6e8bb 8578 break;
99c475ab
FB
8579 }
8580 return;
9ee6e8bb
PB
8581undef32:
8582 gen_set_condexec(s);
5e3f878a 8583 gen_set_pc_im(s->pc - 4);
d9ba4830 8584 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8585 s->is_jmp = DISAS_JUMP;
8586 return;
8587illegal_op:
99c475ab 8588undef:
9ee6e8bb 8589 gen_set_condexec(s);
5e3f878a 8590 gen_set_pc_im(s->pc - 2);
d9ba4830 8591 gen_exception(EXCP_UDEF);
99c475ab
FB
8592 s->is_jmp = DISAS_JUMP;
8593}
8594
2c0262af
FB
8595/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8596 basic block 'tb'. If search_pc is TRUE, also generate PC
8597 information for each intermediate instruction. */
2cfc5f17
TS
8598static inline void gen_intermediate_code_internal(CPUState *env,
8599 TranslationBlock *tb,
8600 int search_pc)
2c0262af
FB
8601{
8602 DisasContext dc1, *dc = &dc1;
a1d1bb31 8603 CPUBreakpoint *bp;
2c0262af
FB
8604 uint16_t *gen_opc_end;
8605 int j, lj;
0fa85d43 8606 target_ulong pc_start;
b5ff1b31 8607 uint32_t next_page_start;
2e70f6ef
PB
8608 int num_insns;
8609 int max_insns;
3b46e624 8610
2c0262af 8611 /* generate intermediate code */
b26eefb6
PB
8612 num_temps = 0;
8613 memset(temps, 0, sizeof(temps));
8614
0fa85d43 8615 pc_start = tb->pc;
3b46e624 8616
2c0262af
FB
8617 dc->tb = tb;
8618
2c0262af 8619 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8620
8621 dc->is_jmp = DISAS_NEXT;
8622 dc->pc = pc_start;
8aaca4c0 8623 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8624 dc->condjmp = 0;
5899f386 8625 dc->thumb = env->thumb;
9ee6e8bb
PB
8626 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8627 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8628 dc->is_mem = 0;
b5ff1b31 8629#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8630 if (IS_M(env)) {
8631 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8632 } else {
8633 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8634 }
b5ff1b31 8635#endif
a7812ae4
PB
8636 cpu_F0s = tcg_temp_new_i32();
8637 cpu_F1s = tcg_temp_new_i32();
8638 cpu_F0d = tcg_temp_new_i64();
8639 cpu_F1d = tcg_temp_new_i64();
ad69471c
PB
8640 cpu_V0 = cpu_F0d;
8641 cpu_V1 = cpu_F1d;
e677137d 8642 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
a7812ae4 8643 cpu_M0 = tcg_temp_new_i64();
b5ff1b31 8644 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8645 lj = -1;
2e70f6ef
PB
8646 num_insns = 0;
8647 max_insns = tb->cflags & CF_COUNT_MASK;
8648 if (max_insns == 0)
8649 max_insns = CF_COUNT_MASK;
8650
8651 gen_icount_start();
9ee6e8bb
PB
8652 /* Reset the conditional execution bits immediately. This avoids
8653 complications trying to do it at the end of the block. */
8654 if (env->condexec_bits)
8f01245e
PB
8655 {
8656 TCGv tmp = new_tmp();
8657 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8658 store_cpu_field(tmp, condexec_bits);
8f01245e 8659 }
2c0262af 8660 do {
fbb4a2e3
PB
8661#ifdef CONFIG_USER_ONLY
8662 /* Intercept jump to the magic kernel page. */
8663 if (dc->pc >= 0xffff0000) {
8664 /* We always get here via a jump, so know we are not in a
8665 conditional execution block. */
8666 gen_exception(EXCP_KERNEL_TRAP);
8667 dc->is_jmp = DISAS_UPDATE;
8668 break;
8669 }
8670#else
9ee6e8bb
PB
8671 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8672 /* We always get here via a jump, so know we are not in a
8673 conditional execution block. */
d9ba4830 8674 gen_exception(EXCP_EXCEPTION_EXIT);
d60bb01c
PB
8675 dc->is_jmp = DISAS_UPDATE;
8676 break;
9ee6e8bb
PB
8677 }
8678#endif
8679
c0ce998e
AL
8680 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
8681 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
a1d1bb31 8682 if (bp->pc == dc->pc) {
9ee6e8bb 8683 gen_set_condexec(dc);
5e3f878a 8684 gen_set_pc_im(dc->pc);
d9ba4830 8685 gen_exception(EXCP_DEBUG);
1fddef4b 8686 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8687 /* Advance PC so that clearing the breakpoint will
8688 invalidate this TB. */
8689 dc->pc += 2;
8690 goto done_generating;
1fddef4b
FB
8691 break;
8692 }
8693 }
8694 }
2c0262af
FB
8695 if (search_pc) {
8696 j = gen_opc_ptr - gen_opc_buf;
8697 if (lj < j) {
8698 lj++;
8699 while (lj < j)
8700 gen_opc_instr_start[lj++] = 0;
8701 }
0fa85d43 8702 gen_opc_pc[lj] = dc->pc;
2c0262af 8703 gen_opc_instr_start[lj] = 1;
2e70f6ef 8704 gen_opc_icount[lj] = num_insns;
2c0262af 8705 }
e50e6a20 8706
2e70f6ef
PB
8707 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
8708 gen_io_start();
8709
9ee6e8bb
PB
8710 if (env->thumb) {
8711 disas_thumb_insn(env, dc);
8712 if (dc->condexec_mask) {
8713 dc->condexec_cond = (dc->condexec_cond & 0xe)
8714 | ((dc->condexec_mask >> 4) & 1);
8715 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8716 if (dc->condexec_mask == 0) {
8717 dc->condexec_cond = 0;
8718 }
8719 }
8720 } else {
8721 disas_arm_insn(env, dc);
8722 }
b26eefb6
PB
8723 if (num_temps) {
8724 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8725 num_temps = 0;
8726 }
e50e6a20
FB
8727
8728 if (dc->condjmp && !dc->is_jmp) {
8729 gen_set_label(dc->condlabel);
8730 dc->condjmp = 0;
8731 }
8732 /* Translation stops when a conditional branch is enoutered.
8733 * Otherwise the subsequent code could get translated several times.
b5ff1b31 8734 * Also stop translation when a page boundary is reached. This
bf20dc07 8735 * ensures prefetch aborts occur at the right place. */
2e70f6ef 8736 num_insns ++;
1fddef4b
FB
8737 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8738 !env->singlestep_enabled &&
2e70f6ef
PB
8739 dc->pc < next_page_start &&
8740 num_insns < max_insns);
8741
8742 if (tb->cflags & CF_LAST_IO) {
8743 if (dc->condjmp) {
8744 /* FIXME: This can theoretically happen with self-modifying
8745 code. */
8746 cpu_abort(env, "IO on conditional branch instruction");
8747 }
8748 gen_io_end();
8749 }
9ee6e8bb 8750
b5ff1b31 8751 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8752 instruction was a conditional branch or trap, and the PC has
8753 already been written. */
551bd27f 8754 if (unlikely(env->singlestep_enabled)) {
8aaca4c0 8755 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8756 if (dc->condjmp) {
9ee6e8bb
PB
8757 gen_set_condexec(dc);
8758 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8759 gen_exception(EXCP_SWI);
9ee6e8bb 8760 } else {
d9ba4830 8761 gen_exception(EXCP_DEBUG);
9ee6e8bb 8762 }
e50e6a20
FB
8763 gen_set_label(dc->condlabel);
8764 }
8765 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8766 gen_set_pc_im(dc->pc);
e50e6a20 8767 dc->condjmp = 0;
8aaca4c0 8768 }
9ee6e8bb
PB
8769 gen_set_condexec(dc);
8770 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8771 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8772 } else {
8773 /* FIXME: Single stepping a WFI insn will not halt
8774 the CPU. */
d9ba4830 8775 gen_exception(EXCP_DEBUG);
9ee6e8bb 8776 }
8aaca4c0 8777 } else {
9ee6e8bb
PB
8778 /* While branches must always occur at the end of an IT block,
8779 there are a few other things that can cause us to terminate
8780 the TB in the middel of an IT block:
8781 - Exception generating instructions (bkpt, swi, undefined).
8782 - Page boundaries.
8783 - Hardware watchpoints.
8784 Hardware breakpoints have already been handled and skip this code.
8785 */
8786 gen_set_condexec(dc);
8aaca4c0 8787 switch(dc->is_jmp) {
8aaca4c0 8788 case DISAS_NEXT:
6e256c93 8789 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8790 break;
8791 default:
8792 case DISAS_JUMP:
8793 case DISAS_UPDATE:
8794 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8795 tcg_gen_exit_tb(0);
8aaca4c0
FB
8796 break;
8797 case DISAS_TB_JUMP:
8798 /* nothing more to generate */
8799 break;
9ee6e8bb 8800 case DISAS_WFI:
d9ba4830 8801 gen_helper_wfi();
9ee6e8bb
PB
8802 break;
8803 case DISAS_SWI:
d9ba4830 8804 gen_exception(EXCP_SWI);
9ee6e8bb 8805 break;
8aaca4c0 8806 }
e50e6a20
FB
8807 if (dc->condjmp) {
8808 gen_set_label(dc->condlabel);
9ee6e8bb 8809 gen_set_condexec(dc);
6e256c93 8810 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8811 dc->condjmp = 0;
8812 }
2c0262af 8813 }
2e70f6ef 8814
9ee6e8bb 8815done_generating:
2e70f6ef 8816 gen_icount_end(tb, num_insns);
2c0262af
FB
8817 *gen_opc_ptr = INDEX_op_end;
8818
8819#ifdef DEBUG_DISAS
e19e89a5 8820 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8821 fprintf(logfile, "----------------\n");
8822 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8823 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8824 fprintf(logfile, "\n");
8825 }
8826#endif
b5ff1b31
FB
8827 if (search_pc) {
8828 j = gen_opc_ptr - gen_opc_buf;
8829 lj++;
8830 while (lj <= j)
8831 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8832 } else {
2c0262af 8833 tb->size = dc->pc - pc_start;
2e70f6ef 8834 tb->icount = num_insns;
b5ff1b31 8835 }
2c0262af
FB
8836}
8837
2cfc5f17 8838void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2c0262af 8839{
2cfc5f17 8840 gen_intermediate_code_internal(env, tb, 0);
2c0262af
FB
8841}
8842
2cfc5f17 8843void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2c0262af 8844{
2cfc5f17 8845 gen_intermediate_code_internal(env, tb, 1);
2c0262af
FB
8846}
8847
b5ff1b31
FB
8848static const char *cpu_mode_names[16] = {
8849 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8850 "???", "???", "???", "und", "???", "???", "???", "sys"
8851};
9ee6e8bb 8852
5fafdf24 8853void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8854 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8855 int flags)
2c0262af
FB
8856{
8857 int i;
06e80fc9 8858#if 0
bc380d17 8859 union {
b7bcbe95
FB
8860 uint32_t i;
8861 float s;
8862 } s0, s1;
8863 CPU_DoubleU d;
a94a6abf
PB
8864 /* ??? This assumes float64 and double have the same layout.
8865 Oh well, it's only debug dumps. */
8866 union {
8867 float64 f64;
8868 double d;
8869 } d0;
06e80fc9 8870#endif
b5ff1b31 8871 uint32_t psr;
2c0262af
FB
8872
8873 for(i=0;i<16;i++) {
7fe48483 8874 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8875 if ((i % 4) == 3)
7fe48483 8876 cpu_fprintf(f, "\n");
2c0262af 8877 else
7fe48483 8878 cpu_fprintf(f, " ");
2c0262af 8879 }
b5ff1b31 8880 psr = cpsr_read(env);
687fa640
TS
8881 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8882 psr,
b5ff1b31
FB
8883 psr & (1 << 31) ? 'N' : '-',
8884 psr & (1 << 30) ? 'Z' : '-',
8885 psr & (1 << 29) ? 'C' : '-',
8886 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8887 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8888 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8889
5e3f878a 8890#if 0
b7bcbe95 8891 for (i = 0; i < 16; i++) {
8e96005d
FB
8892 d.d = env->vfp.regs[i];
8893 s0.i = d.l.lower;
8894 s1.i = d.l.upper;
a94a6abf
PB
8895 d0.f64 = d.d;
8896 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8897 i * 2, (int)s0.i, s0.s,
a94a6abf 8898 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8899 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8900 d0.d);
b7bcbe95 8901 }
40f137e1 8902 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8903#endif
2c0262af 8904}
a6b025d3 8905
d2856f1a
AJ
8906void gen_pc_load(CPUState *env, TranslationBlock *tb,
8907 unsigned long searched_pc, int pc_pos, void *puc)
8908{
8909 env->regs[15] = gen_opc_pc[pc_pos];
8910}