]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
Fix keyboard emulation for ARM versatile board:
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6 79static TCGv cpu_env;
ad69471c 80/* We reuse the same 64-bit temporaries for efficiency. */
e677137d 81static TCGv cpu_V0, cpu_V1, cpu_M0;
ad69471c 82
b26eefb6 83/* FIXME: These should be removed. */
8f8e3aa4 84static TCGv cpu_T[2];
4373f3ce 85static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
b26eefb6
PB
86
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91
92 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
b26eefb6
PB
94}
95
96/* The code generator doesn't like lots of temporaries, so maintain our own
97 cache for reuse within a function. */
98#define MAX_TEMPS 8
99static int num_temps;
100static TCGv temps[MAX_TEMPS];
101
102/* Allocate a temporary variable. */
103static TCGv new_tmp(void)
104{
105 TCGv tmp;
106 if (num_temps == MAX_TEMPS)
107 abort();
108
109 if (GET_TCGV(temps[num_temps]))
110 return temps[num_temps++];
111
112 tmp = tcg_temp_new(TCG_TYPE_I32);
113 temps[num_temps++] = tmp;
114 return tmp;
115}
116
117/* Release a temporary variable. */
118static void dead_tmp(TCGv tmp)
119{
120 int i;
121 num_temps--;
122 i = num_temps;
123 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
124 return;
125
126 /* Shuffle this temp to the last slot. */
127 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
128 i--;
129 while (i < num_temps) {
130 temps[i] = temps[i + 1];
131 i++;
132 }
133 temps[i] = tmp;
134}
135
d9ba4830
PB
136static inline TCGv load_cpu_offset(int offset)
137{
138 TCGv tmp = new_tmp();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141}
142
143#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144
145static inline void store_cpu_offset(TCGv var, int offset)
146{
147 tcg_gen_st_i32(var, cpu_env, offset);
148 dead_tmp(var);
149}
150
151#define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUState, name))
153
b26eefb6
PB
154/* Set a variable to the value of a CPU register. */
155static void load_reg_var(DisasContext *s, TCGv var, int reg)
156{
157 if (reg == 15) {
158 uint32_t addr;
159 /* normaly, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
167 }
168}
169
170/* Create a new temporary and set it to the value of a CPU register. */
171static inline TCGv load_reg(DisasContext *s, int reg)
172{
173 TCGv tmp = new_tmp();
174 load_reg_var(s, tmp, reg);
175 return tmp;
176}
177
178/* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180static void store_reg(DisasContext *s, int reg, TCGv var)
181{
182 if (reg == 15) {
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
185 }
186 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
187 dead_tmp(var);
188}
189
190
191/* Basic operations. */
192#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6 193#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
b26eefb6
PB
194#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
196
197#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
198#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
199#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
201
8984bd2e
PB
202#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
204#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
207#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
208
b26eefb6
PB
209#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
213#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
214#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
215#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
216
217#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
218#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
220#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
221#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
222
223/* Value extensions. */
224#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
225#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
226#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
1497c961
PB
229#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
231
232#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 233
d9ba4830
PB
234#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235/* Set NZCV flags from the high 4 bits of var. */
236#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238static void gen_exception(int excp)
239{
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244}
245
3670669c
PB
246static void gen_smul_dual(TCGv a, TCGv b)
247{
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
3670669c
PB
250 tcg_gen_ext8s_i32(tmp1, a);
251 tcg_gen_ext8s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259}
260
261/* Byteswap each halfword. */
262static void gen_rev16(TCGv var)
263{
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271}
272
273/* Byteswap low halfword and sign extend. */
274static void gen_revsh(TCGv var)
275{
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283}
284
285/* Unsigned bitfield extract. */
286static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287{
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291}
292
293/* Signed bitfield extract. */
294static void gen_sbfx(TCGv var, int shift, int width)
295{
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306}
307
308/* Bitfield insertion. Insert val into base. Clobbers base and val. */
309static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310{
3670669c 311 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
314 tcg_gen_or_i32(dest, base, val);
315}
316
d9ba4830
PB
317/* Round the top 32 bits of a 64-bit value. */
318static void gen_roundqd(TCGv a, TCGv b)
3670669c 319{
d9ba4830
PB
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
3670669c
PB
322}
323
8f01245e
PB
324/* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
5e3f878a
PB
326/* 32x32->64 multiply. Marks inputs as dead. */
327static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
328{
329 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
340static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
341{
342 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351}
352
8f01245e
PB
353/* Unsigned 32x32->64 multiply. */
354static void gen_op_mull_T0_T1(void)
355{
356 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365}
366
367/* Signed 32x32->64 multiply. */
d9ba4830 368static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
369{
370 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
372
d9ba4830
PB
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 376 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 377 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
378 tcg_gen_trunc_i64_i32(b, tmp1);
379}
380#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
381
8f01245e
PB
382/* Swap low and high halfwords. */
383static void gen_swap_half(TCGv var)
384{
385 TCGv tmp = new_tmp();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
3670669c 389 dead_tmp(tmp);
8f01245e
PB
390}
391
b26eefb6
PB
392/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
394 t0 &= ~0x8000;
395 t1 &= ~0x8000;
396 t0 = (t0 + t1) ^ tmp;
397 */
398
399static void gen_add16(TCGv t0, TCGv t1)
400{
401 TCGv tmp = new_tmp();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
408 dead_tmp(tmp);
409 dead_tmp(t1);
410}
411
9a119ff6
PB
412#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413
b26eefb6
PB
414/* Set CF to the top bit of var. */
415static void gen_set_CF_bit31(TCGv var)
416{
417 TCGv tmp = new_tmp();
418 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 419 gen_set_CF(var);
b26eefb6
PB
420 dead_tmp(tmp);
421}
422
423/* Set N and Z flags from var. */
424static inline void gen_logic_CC(TCGv var)
425{
6fbe23d5
PB
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
427 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
428}
429
430/* T0 += T1 + CF. */
431static void gen_adc_T0_T1(void)
432{
d9ba4830 433 TCGv tmp;
b26eefb6 434 gen_op_addl_T0_T1();
d9ba4830 435 tmp = load_cpu_field(CF);
b26eefb6
PB
436 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
437 dead_tmp(tmp);
438}
439
3670669c
PB
440/* dest = T0 - T1 + CF - 1. */
441static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
442{
d9ba4830 443 TCGv tmp;
3670669c 444 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 445 tmp = load_cpu_field(CF);
3670669c
PB
446 tcg_gen_add_i32(dest, dest, tmp);
447 tcg_gen_subi_i32(dest, dest, 1);
448 dead_tmp(tmp);
449}
450
451#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
452#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453
b26eefb6
PB
454/* FIXME: Implement this natively. */
455static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
456{
457 tcg_gen_xori_i32(t0, t1, ~0);
458}
459
e677137d
PB
460/* FIXME: Implement this natively. */
461static inline void tcg_gen_neg_i64(TCGv dest, TCGv src)
462{
463 tcg_gen_sub_i64(dest, tcg_const_i64(0), src);
464}
465
b26eefb6
PB
466/* T0 &= ~T1. Clobbers T1. */
467/* FIXME: Implement bic natively. */
8f8e3aa4
PB
468static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
469{
470 TCGv tmp = new_tmp();
471 tcg_gen_not_i32(tmp, t1);
472 tcg_gen_and_i32(dest, t0, tmp);
473 dead_tmp(tmp);
474}
b26eefb6
PB
475static inline void gen_op_bicl_T0_T1(void)
476{
477 gen_op_notl_T1();
478 gen_op_andl_T0_T1();
479}
480
ad69471c
PB
481/* FIXME: Implement this natively. */
482#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
483
b26eefb6
PB
484/* FIXME: Implement this natively. */
485static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
486{
487 TCGv tmp;
488
489 if (i == 0)
490 return;
491
492 tmp = new_tmp();
493 tcg_gen_shri_i32(tmp, t1, i);
494 tcg_gen_shli_i32(t1, t1, 32 - i);
495 tcg_gen_or_i32(t0, t1, tmp);
496 dead_tmp(tmp);
497}
498
9a119ff6 499static void shifter_out_im(TCGv var, int shift)
b26eefb6 500{
9a119ff6
PB
501 TCGv tmp = new_tmp();
502 if (shift == 0) {
503 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 504 } else {
9a119ff6
PB
505 tcg_gen_shri_i32(tmp, var, shift);
506 if (shift != 31);
507 tcg_gen_andi_i32(tmp, tmp, 1);
508 }
509 gen_set_CF(tmp);
510 dead_tmp(tmp);
511}
b26eefb6 512
9a119ff6
PB
513/* Shift by immediate. Includes special handling for shift == 0. */
514static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
515{
516 switch (shiftop) {
517 case 0: /* LSL */
518 if (shift != 0) {
519 if (flags)
520 shifter_out_im(var, 32 - shift);
521 tcg_gen_shli_i32(var, var, shift);
522 }
523 break;
524 case 1: /* LSR */
525 if (shift == 0) {
526 if (flags) {
527 tcg_gen_shri_i32(var, var, 31);
528 gen_set_CF(var);
529 }
530 tcg_gen_movi_i32(var, 0);
531 } else {
532 if (flags)
533 shifter_out_im(var, shift - 1);
534 tcg_gen_shri_i32(var, var, shift);
535 }
536 break;
537 case 2: /* ASR */
538 if (shift == 0)
539 shift = 32;
540 if (flags)
541 shifter_out_im(var, shift - 1);
542 if (shift == 32)
543 shift = 31;
544 tcg_gen_sari_i32(var, var, shift);
545 break;
546 case 3: /* ROR/RRX */
547 if (shift != 0) {
548 if (flags)
549 shifter_out_im(var, shift - 1);
550 tcg_gen_rori_i32(var, var, shift); break;
551 } else {
d9ba4830 552 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
553 if (flags)
554 shifter_out_im(var, 0);
555 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
556 tcg_gen_shli_i32(tmp, tmp, 31);
557 tcg_gen_or_i32(var, var, tmp);
558 dead_tmp(tmp);
b26eefb6
PB
559 }
560 }
561};
562
8984bd2e
PB
563static inline void gen_arm_shift_reg(TCGv var, int shiftop,
564 TCGv shift, int flags)
565{
566 if (flags) {
567 switch (shiftop) {
568 case 0: gen_helper_shl_cc(var, var, shift); break;
569 case 1: gen_helper_shr_cc(var, var, shift); break;
570 case 2: gen_helper_sar_cc(var, var, shift); break;
571 case 3: gen_helper_ror_cc(var, var, shift); break;
572 }
573 } else {
574 switch (shiftop) {
575 case 0: gen_helper_shl(var, var, shift); break;
576 case 1: gen_helper_shr(var, var, shift); break;
577 case 2: gen_helper_sar(var, var, shift); break;
578 case 3: gen_helper_ror(var, var, shift); break;
579 }
580 }
581 dead_tmp(shift);
582}
583
6ddbc6e4
PB
584#define PAS_OP(pfx) \
585 switch (op2) { \
586 case 0: gen_pas_helper(glue(pfx,add16)); break; \
587 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
588 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
589 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
590 case 4: gen_pas_helper(glue(pfx,add8)); break; \
591 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
592 }
d9ba4830 593static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
594{
595 TCGv tmp;
596
597 switch (op1) {
598#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
599 case 1:
600 tmp = tcg_temp_new(TCG_TYPE_PTR);
601 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
602 PAS_OP(s)
603 break;
604 case 5:
605 tmp = tcg_temp_new(TCG_TYPE_PTR);
606 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
607 PAS_OP(u)
608 break;
609#undef gen_pas_helper
610#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
611 case 2:
612 PAS_OP(q);
613 break;
614 case 3:
615 PAS_OP(sh);
616 break;
617 case 6:
618 PAS_OP(uq);
619 break;
620 case 7:
621 PAS_OP(uh);
622 break;
623#undef gen_pas_helper
624 }
625}
9ee6e8bb
PB
626#undef PAS_OP
627
6ddbc6e4
PB
628/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
629#define PAS_OP(pfx) \
630 switch (op2) { \
631 case 0: gen_pas_helper(glue(pfx,add8)); break; \
632 case 1: gen_pas_helper(glue(pfx,add16)); break; \
633 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
634 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
635 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
636 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
637 }
d9ba4830 638static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
639{
640 TCGv tmp;
641
642 switch (op1) {
643#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
644 case 0:
645 tmp = tcg_temp_new(TCG_TYPE_PTR);
646 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
647 PAS_OP(s)
648 break;
649 case 4:
650 tmp = tcg_temp_new(TCG_TYPE_PTR);
651 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
652 PAS_OP(u)
653 break;
654#undef gen_pas_helper
655#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
656 case 1:
657 PAS_OP(q);
658 break;
659 case 2:
660 PAS_OP(sh);
661 break;
662 case 5:
663 PAS_OP(uq);
664 break;
665 case 6:
666 PAS_OP(uh);
667 break;
668#undef gen_pas_helper
669 }
670}
9ee6e8bb
PB
671#undef PAS_OP
672
d9ba4830
PB
673static void gen_test_cc(int cc, int label)
674{
675 TCGv tmp;
676 TCGv tmp2;
677 TCGv zero;
678 int inv;
679
680 zero = tcg_const_i32(0);
681 switch (cc) {
682 case 0: /* eq: Z */
6fbe23d5 683 tmp = load_cpu_field(ZF);
d9ba4830
PB
684 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
685 break;
686 case 1: /* ne: !Z */
6fbe23d5 687 tmp = load_cpu_field(ZF);
d9ba4830
PB
688 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
689 break;
690 case 2: /* cs: C */
691 tmp = load_cpu_field(CF);
692 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
693 break;
694 case 3: /* cc: !C */
695 tmp = load_cpu_field(CF);
696 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
697 break;
698 case 4: /* mi: N */
6fbe23d5 699 tmp = load_cpu_field(NF);
d9ba4830
PB
700 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
701 break;
702 case 5: /* pl: !N */
6fbe23d5 703 tmp = load_cpu_field(NF);
d9ba4830
PB
704 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
705 break;
706 case 6: /* vs: V */
707 tmp = load_cpu_field(VF);
708 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
709 break;
710 case 7: /* vc: !V */
711 tmp = load_cpu_field(VF);
712 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
713 break;
714 case 8: /* hi: C && !Z */
715 inv = gen_new_label();
716 tmp = load_cpu_field(CF);
717 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
718 dead_tmp(tmp);
6fbe23d5 719 tmp = load_cpu_field(ZF);
d9ba4830
PB
720 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
721 gen_set_label(inv);
722 break;
723 case 9: /* ls: !C || Z */
724 tmp = load_cpu_field(CF);
725 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
726 dead_tmp(tmp);
6fbe23d5 727 tmp = load_cpu_field(ZF);
d9ba4830
PB
728 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
729 break;
730 case 10: /* ge: N == V -> N ^ V == 0 */
731 tmp = load_cpu_field(VF);
6fbe23d5 732 tmp2 = load_cpu_field(NF);
d9ba4830
PB
733 tcg_gen_xor_i32(tmp, tmp, tmp2);
734 dead_tmp(tmp2);
735 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
736 break;
737 case 11: /* lt: N != V -> N ^ V != 0 */
738 tmp = load_cpu_field(VF);
6fbe23d5 739 tmp2 = load_cpu_field(NF);
d9ba4830
PB
740 tcg_gen_xor_i32(tmp, tmp, tmp2);
741 dead_tmp(tmp2);
742 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
743 break;
744 case 12: /* gt: !Z && N == V */
745 inv = gen_new_label();
6fbe23d5 746 tmp = load_cpu_field(ZF);
d9ba4830
PB
747 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
748 dead_tmp(tmp);
749 tmp = load_cpu_field(VF);
6fbe23d5 750 tmp2 = load_cpu_field(NF);
d9ba4830
PB
751 tcg_gen_xor_i32(tmp, tmp, tmp2);
752 dead_tmp(tmp2);
753 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
754 gen_set_label(inv);
755 break;
756 case 13: /* le: Z || N != V */
6fbe23d5 757 tmp = load_cpu_field(ZF);
d9ba4830
PB
758 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
759 dead_tmp(tmp);
760 tmp = load_cpu_field(VF);
6fbe23d5 761 tmp2 = load_cpu_field(NF);
d9ba4830
PB
762 tcg_gen_xor_i32(tmp, tmp, tmp2);
763 dead_tmp(tmp2);
764 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
765 break;
766 default:
767 fprintf(stderr, "Bad condition code 0x%x\n", cc);
768 abort();
769 }
770 dead_tmp(tmp);
771}
2c0262af
FB
772
773const uint8_t table_logic_cc[16] = {
774 1, /* and */
775 1, /* xor */
776 0, /* sub */
777 0, /* rsb */
778 0, /* add */
779 0, /* adc */
780 0, /* sbc */
781 0, /* rsc */
782 1, /* andl */
783 1, /* xorl */
784 0, /* cmp */
785 0, /* cmn */
786 1, /* orr */
787 1, /* mov */
788 1, /* bic */
789 1, /* mvn */
790};
3b46e624 791
d9ba4830
PB
792/* Set PC and Thumb state from an immediate address. */
793static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 794{
b26eefb6 795 TCGv tmp;
99c475ab 796
b26eefb6
PB
797 s->is_jmp = DISAS_UPDATE;
798 tmp = new_tmp();
d9ba4830
PB
799 if (s->thumb != (addr & 1)) {
800 tcg_gen_movi_i32(tmp, addr & 1);
801 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
802 }
803 tcg_gen_movi_i32(tmp, addr & ~1);
804 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 805 dead_tmp(tmp);
d9ba4830
PB
806}
807
808/* Set PC and Thumb state from var. var is marked as dead. */
809static inline void gen_bx(DisasContext *s, TCGv var)
810{
811 TCGv tmp;
812
813 s->is_jmp = DISAS_UPDATE;
814 tmp = new_tmp();
815 tcg_gen_andi_i32(tmp, var, 1);
816 store_cpu_field(tmp, thumb);
817 tcg_gen_andi_i32(var, var, ~1);
818 store_cpu_field(var, regs[15]);
819}
820
821/* TODO: This should be removed. Use gen_bx instead. */
822static inline void gen_bx_T0(DisasContext *s)
823{
824 TCGv tmp = new_tmp();
825 tcg_gen_mov_i32(tmp, cpu_T[0]);
826 gen_bx(s, tmp);
b26eefb6 827}
b5ff1b31
FB
828
829#if defined(CONFIG_USER_ONLY)
830#define gen_ldst(name, s) gen_op_##name##_raw()
831#else
832#define gen_ldst(name, s) do { \
6658ffb8 833 s->is_mem = 1; \
b5ff1b31
FB
834 if (IS_USER(s)) \
835 gen_op_##name##_user(); \
836 else \
837 gen_op_##name##_kernel(); \
838 } while (0)
839#endif
b0109805
PB
840static inline TCGv gen_ld8s(TCGv addr, int index)
841{
842 TCGv tmp = new_tmp();
843 tcg_gen_qemu_ld8s(tmp, addr, index);
844 return tmp;
845}
846static inline TCGv gen_ld8u(TCGv addr, int index)
847{
848 TCGv tmp = new_tmp();
849 tcg_gen_qemu_ld8u(tmp, addr, index);
850 return tmp;
851}
852static inline TCGv gen_ld16s(TCGv addr, int index)
853{
854 TCGv tmp = new_tmp();
855 tcg_gen_qemu_ld16s(tmp, addr, index);
856 return tmp;
857}
858static inline TCGv gen_ld16u(TCGv addr, int index)
859{
860 TCGv tmp = new_tmp();
861 tcg_gen_qemu_ld16u(tmp, addr, index);
862 return tmp;
863}
864static inline TCGv gen_ld32(TCGv addr, int index)
865{
866 TCGv tmp = new_tmp();
867 tcg_gen_qemu_ld32u(tmp, addr, index);
868 return tmp;
869}
870static inline void gen_st8(TCGv val, TCGv addr, int index)
871{
872 tcg_gen_qemu_st8(val, addr, index);
873 dead_tmp(val);
874}
875static inline void gen_st16(TCGv val, TCGv addr, int index)
876{
877 tcg_gen_qemu_st16(val, addr, index);
878 dead_tmp(val);
879}
880static inline void gen_st32(TCGv val, TCGv addr, int index)
881{
882 tcg_gen_qemu_st32(val, addr, index);
883 dead_tmp(val);
884}
b5ff1b31 885
2c0262af
FB
886static inline void gen_movl_T0_reg(DisasContext *s, int reg)
887{
b26eefb6 888 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
889}
890
891static inline void gen_movl_T1_reg(DisasContext *s, int reg)
892{
b26eefb6 893 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
894}
895
896static inline void gen_movl_T2_reg(DisasContext *s, int reg)
897{
b26eefb6
PB
898 load_reg_var(s, cpu_T[2], reg);
899}
900
5e3f878a
PB
901static inline void gen_set_pc_im(uint32_t val)
902{
903 TCGv tmp = new_tmp();
904 tcg_gen_movi_i32(tmp, val);
905 store_cpu_field(tmp, regs[15]);
906}
907
b26eefb6
PB
908static inline void gen_set_pc_T0(void)
909{
910 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
2c0262af
FB
911}
912
913static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
914{
b26eefb6
PB
915 TCGv tmp;
916 if (reg == 15) {
917 tmp = new_tmp();
918 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
919 } else {
920 tmp = cpu_T[t];
921 }
922 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 923 if (reg == 15) {
b26eefb6 924 dead_tmp(tmp);
2c0262af
FB
925 s->is_jmp = DISAS_JUMP;
926 }
927}
928
929static inline void gen_movl_reg_T0(DisasContext *s, int reg)
930{
931 gen_movl_reg_TN(s, reg, 0);
932}
933
934static inline void gen_movl_reg_T1(DisasContext *s, int reg)
935{
936 gen_movl_reg_TN(s, reg, 1);
937}
938
b5ff1b31
FB
939/* Force a TB lookup after an instruction that changes the CPU state. */
940static inline void gen_lookup_tb(DisasContext *s)
941{
942 gen_op_movl_T0_im(s->pc);
943 gen_movl_reg_T0(s, 15);
944 s->is_jmp = DISAS_UPDATE;
945}
946
b0109805
PB
947static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
948 TCGv var)
2c0262af 949{
1e8d4eec 950 int val, rm, shift, shiftop;
b26eefb6 951 TCGv offset;
2c0262af
FB
952
953 if (!(insn & (1 << 25))) {
954 /* immediate */
955 val = insn & 0xfff;
956 if (!(insn & (1 << 23)))
957 val = -val;
537730b9 958 if (val != 0)
b0109805 959 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
960 } else {
961 /* shift/register */
962 rm = (insn) & 0xf;
963 shift = (insn >> 7) & 0x1f;
1e8d4eec 964 shiftop = (insn >> 5) & 3;
b26eefb6 965 offset = load_reg(s, rm);
9a119ff6 966 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 967 if (!(insn & (1 << 23)))
b0109805 968 tcg_gen_sub_i32(var, var, offset);
2c0262af 969 else
b0109805 970 tcg_gen_add_i32(var, var, offset);
b26eefb6 971 dead_tmp(offset);
2c0262af
FB
972 }
973}
974
191f9a93 975static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 976 int extra, TCGv var)
2c0262af
FB
977{
978 int val, rm;
b26eefb6 979 TCGv offset;
3b46e624 980
2c0262af
FB
981 if (insn & (1 << 22)) {
982 /* immediate */
983 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
984 if (!(insn & (1 << 23)))
985 val = -val;
18acad92 986 val += extra;
537730b9 987 if (val != 0)
b0109805 988 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
989 } else {
990 /* register */
191f9a93 991 if (extra)
b0109805 992 tcg_gen_addi_i32(var, var, extra);
2c0262af 993 rm = (insn) & 0xf;
b26eefb6 994 offset = load_reg(s, rm);
2c0262af 995 if (!(insn & (1 << 23)))
b0109805 996 tcg_gen_sub_i32(var, var, offset);
2c0262af 997 else
b0109805 998 tcg_gen_add_i32(var, var, offset);
b26eefb6 999 dead_tmp(offset);
2c0262af
FB
1000 }
1001}
1002
4373f3ce
PB
1003#define VFP_OP2(name) \
1004static inline void gen_vfp_##name(int dp) \
1005{ \
1006 if (dp) \
1007 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
1008 else \
1009 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
1010}
1011
4373f3ce 1012#define VFP_OP1i(name) \
9ee6e8bb
PB
1013static inline void gen_vfp_##name(int dp, int arg) \
1014{ \
1015 if (dp) \
1016 gen_op_vfp_##name##d(arg); \
1017 else \
1018 gen_op_vfp_##name##s(arg); \
1019}
1020
4373f3ce
PB
1021VFP_OP2(add)
1022VFP_OP2(sub)
1023VFP_OP2(mul)
1024VFP_OP2(div)
1025
1026#undef VFP_OP2
1027
1028static inline void gen_vfp_abs(int dp)
1029{
1030 if (dp)
1031 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1032 else
1033 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1034}
1035
1036static inline void gen_vfp_neg(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1040 else
1041 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1042}
1043
1044static inline void gen_vfp_sqrt(int dp)
1045{
1046 if (dp)
1047 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1048 else
1049 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1050}
1051
1052static inline void gen_vfp_cmp(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1056 else
1057 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1058}
1059
1060static inline void gen_vfp_cmpe(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1064 else
1065 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1066}
1067
1068static inline void gen_vfp_F1_ld0(int dp)
1069{
1070 if (dp)
1071 tcg_gen_movi_i64(cpu_F0d, 0);
1072 else
1073 tcg_gen_movi_i32(cpu_F0s, 0);
1074}
1075
1076static inline void gen_vfp_uito(int dp)
1077{
1078 if (dp)
1079 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1080 else
1081 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1082}
1083
1084static inline void gen_vfp_sito(int dp)
1085{
1086 if (dp)
1087 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1088 else
1089 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1090}
1091
1092static inline void gen_vfp_toui(int dp)
1093{
1094 if (dp)
1095 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1096 else
1097 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1098}
1099
1100static inline void gen_vfp_touiz(int dp)
1101{
1102 if (dp)
1103 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1104 else
1105 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1106}
1107
1108static inline void gen_vfp_tosi(int dp)
1109{
1110 if (dp)
1111 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1112 else
1113 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1114}
1115
1116static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1117{
1118 if (dp)
4373f3ce 1119 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1120 else
4373f3ce
PB
1121 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1122}
1123
1124#define VFP_GEN_FIX(name) \
1125static inline void gen_vfp_##name(int dp, int shift) \
1126{ \
1127 if (dp) \
1128 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1129 else \
1130 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1131}
4373f3ce
PB
1132VFP_GEN_FIX(tosh)
1133VFP_GEN_FIX(tosl)
1134VFP_GEN_FIX(touh)
1135VFP_GEN_FIX(toul)
1136VFP_GEN_FIX(shto)
1137VFP_GEN_FIX(slto)
1138VFP_GEN_FIX(uhto)
1139VFP_GEN_FIX(ulto)
1140#undef VFP_GEN_FIX
9ee6e8bb 1141
b5ff1b31
FB
1142static inline void gen_vfp_ld(DisasContext *s, int dp)
1143{
1144 if (dp)
4373f3ce 1145 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1146 else
4373f3ce 1147 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1148}
1149
1150static inline void gen_vfp_st(DisasContext *s, int dp)
1151{
1152 if (dp)
4373f3ce 1153 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1154 else
4373f3ce 1155 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1156}
1157
8e96005d
FB
1158static inline long
1159vfp_reg_offset (int dp, int reg)
1160{
1161 if (dp)
1162 return offsetof(CPUARMState, vfp.regs[reg]);
1163 else if (reg & 1) {
1164 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1165 + offsetof(CPU_DoubleU, l.upper);
1166 } else {
1167 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1168 + offsetof(CPU_DoubleU, l.lower);
1169 }
1170}
9ee6e8bb
PB
1171
1172/* Return the offset of a 32-bit piece of a NEON register.
1173 zero is the least significant end of the register. */
1174static inline long
1175neon_reg_offset (int reg, int n)
1176{
1177 int sreg;
1178 sreg = reg * 2 + n;
1179 return vfp_reg_offset(0, sreg);
1180}
1181
ad69471c
PB
1182/* FIXME: Remove these. */
1183#define neon_T0 cpu_T[0]
1184#define neon_T1 cpu_T[1]
1185#define NEON_GET_REG(T, reg, n) \
1186 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1187#define NEON_SET_REG(T, reg, n) \
1188 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1189
8f8e3aa4
PB
1190static TCGv neon_load_reg(int reg, int pass)
1191{
1192 TCGv tmp = new_tmp();
1193 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1194 return tmp;
1195}
1196
1197static void neon_store_reg(int reg, int pass, TCGv var)
1198{
1199 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1200 dead_tmp(var);
1201}
1202
ad69471c
PB
1203static inline void neon_load_reg64(TCGv var, int reg)
1204{
1205 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1206}
1207
1208static inline void neon_store_reg64(TCGv var, int reg)
1209{
1210 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1211}
1212
4373f3ce
PB
1213#define tcg_gen_ld_f32 tcg_gen_ld_i32
1214#define tcg_gen_ld_f64 tcg_gen_ld_i64
1215#define tcg_gen_st_f32 tcg_gen_st_i32
1216#define tcg_gen_st_f64 tcg_gen_st_i64
1217
b7bcbe95
FB
1218static inline void gen_mov_F0_vreg(int dp, int reg)
1219{
1220 if (dp)
4373f3ce 1221 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1222 else
4373f3ce 1223 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1224}
1225
1226static inline void gen_mov_F1_vreg(int dp, int reg)
1227{
1228 if (dp)
4373f3ce 1229 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1230 else
4373f3ce 1231 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1232}
1233
1234static inline void gen_mov_vreg_F0(int dp, int reg)
1235{
1236 if (dp)
4373f3ce 1237 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1238 else
4373f3ce 1239 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1240}
1241
18c9b560
AZ
1242#define ARM_CP_RW_BIT (1 << 20)
1243
e677137d
PB
1244static inline void iwmmxt_load_reg(TCGv var, int reg)
1245{
1246 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1247}
1248
1249static inline void iwmmxt_store_reg(TCGv var, int reg)
1250{
1251 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1252}
1253
1254static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1255{
1256 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1257}
1258
1259static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1260{
1261 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1262}
1263
1264static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1265{
1266 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1267}
1268
1269static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1270{
1271 iwmmxt_store_reg(cpu_M0, rn);
1272}
1273
1274static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1275{
1276 iwmmxt_load_reg(cpu_M0, rn);
1277}
1278
1279static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1280{
1281 iwmmxt_load_reg(cpu_V1, rn);
1282 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1283}
1284
1285static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1286{
1287 iwmmxt_load_reg(cpu_V1, rn);
1288 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1289}
1290
1291static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1292{
1293 iwmmxt_load_reg(cpu_V1, rn);
1294 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1295}
1296
1297#define IWMMXT_OP(name) \
1298static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1299{ \
1300 iwmmxt_load_reg(cpu_V1, rn); \
1301 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1302}
1303
1304#define IWMMXT_OP_ENV(name) \
1305static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1306{ \
1307 iwmmxt_load_reg(cpu_V1, rn); \
1308 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1309}
1310
1311#define IWMMXT_OP_ENV_SIZE(name) \
1312IWMMXT_OP_ENV(name##b) \
1313IWMMXT_OP_ENV(name##w) \
1314IWMMXT_OP_ENV(name##l)
1315
1316#define IWMMXT_OP_ENV1(name) \
1317static inline void gen_op_iwmmxt_##name##_M0(void) \
1318{ \
1319 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1320}
1321
1322IWMMXT_OP(maddsq)
1323IWMMXT_OP(madduq)
1324IWMMXT_OP(sadb)
1325IWMMXT_OP(sadw)
1326IWMMXT_OP(mulslw)
1327IWMMXT_OP(mulshw)
1328IWMMXT_OP(mululw)
1329IWMMXT_OP(muluhw)
1330IWMMXT_OP(macsw)
1331IWMMXT_OP(macuw)
1332
1333IWMMXT_OP_ENV_SIZE(unpackl)
1334IWMMXT_OP_ENV_SIZE(unpackh)
1335
1336IWMMXT_OP_ENV1(unpacklub)
1337IWMMXT_OP_ENV1(unpackluw)
1338IWMMXT_OP_ENV1(unpacklul)
1339IWMMXT_OP_ENV1(unpackhub)
1340IWMMXT_OP_ENV1(unpackhuw)
1341IWMMXT_OP_ENV1(unpackhul)
1342IWMMXT_OP_ENV1(unpacklsb)
1343IWMMXT_OP_ENV1(unpacklsw)
1344IWMMXT_OP_ENV1(unpacklsl)
1345IWMMXT_OP_ENV1(unpackhsb)
1346IWMMXT_OP_ENV1(unpackhsw)
1347IWMMXT_OP_ENV1(unpackhsl)
1348
1349IWMMXT_OP_ENV_SIZE(cmpeq)
1350IWMMXT_OP_ENV_SIZE(cmpgtu)
1351IWMMXT_OP_ENV_SIZE(cmpgts)
1352
1353IWMMXT_OP_ENV_SIZE(mins)
1354IWMMXT_OP_ENV_SIZE(minu)
1355IWMMXT_OP_ENV_SIZE(maxs)
1356IWMMXT_OP_ENV_SIZE(maxu)
1357
1358IWMMXT_OP_ENV_SIZE(subn)
1359IWMMXT_OP_ENV_SIZE(addn)
1360IWMMXT_OP_ENV_SIZE(subu)
1361IWMMXT_OP_ENV_SIZE(addu)
1362IWMMXT_OP_ENV_SIZE(subs)
1363IWMMXT_OP_ENV_SIZE(adds)
1364
1365IWMMXT_OP_ENV(avgb0)
1366IWMMXT_OP_ENV(avgb1)
1367IWMMXT_OP_ENV(avgw0)
1368IWMMXT_OP_ENV(avgw1)
1369
1370IWMMXT_OP(msadb)
1371
1372IWMMXT_OP_ENV(packuw)
1373IWMMXT_OP_ENV(packul)
1374IWMMXT_OP_ENV(packuq)
1375IWMMXT_OP_ENV(packsw)
1376IWMMXT_OP_ENV(packsl)
1377IWMMXT_OP_ENV(packsq)
1378
1379static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1380{
1381 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1382}
1383
1384static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1385{
1386 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1387}
1388
1389static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1390{
1391 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1392}
1393
1394static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1395{
1396 iwmmxt_load_reg(cpu_V1, rn);
1397 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1398}
1399
1400static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1401{
1402 TCGv tmp = tcg_const_i32(shift);
1403 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1404}
1405
1406static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1407{
1408 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1409 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1410 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1411}
1412
1413static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1414{
1415 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1416 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1417 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1418}
1419
1420static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1421{
1422 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1423 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1424 if (mask != ~0u)
1425 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1426}
1427
1428static void gen_op_iwmmxt_set_mup(void)
1429{
1430 TCGv tmp;
1431 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1432 tcg_gen_ori_i32(tmp, tmp, 2);
1433 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1434}
1435
1436static void gen_op_iwmmxt_set_cup(void)
1437{
1438 TCGv tmp;
1439 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1440 tcg_gen_ori_i32(tmp, tmp, 1);
1441 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1442}
1443
1444static void gen_op_iwmmxt_setpsr_nz(void)
1445{
1446 TCGv tmp = new_tmp();
1447 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1448 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1449}
1450
1451static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1452{
1453 iwmmxt_load_reg(cpu_V1, rn);
1454 tcg_gen_andi_i64(cpu_V1, cpu_V1, 0xffffffffu);
1455 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1456}
1457
1458
1459static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1460{
1461 iwmmxt_load_reg(cpu_V0, rn);
1462 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1463 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1464 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1465}
1466
1467static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1468{
1469 tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
1470 tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
1471 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
1472 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
1473 iwmmxt_store_reg(cpu_V0, rn);
1474}
1475
18c9b560
AZ
1476static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1477{
1478 int rd;
1479 uint32_t offset;
1480
1481 rd = (insn >> 16) & 0xf;
1482 gen_movl_T1_reg(s, rd);
1483
1484 offset = (insn & 0xff) << ((insn >> 7) & 2);
1485 if (insn & (1 << 24)) {
1486 /* Pre indexed */
1487 if (insn & (1 << 23))
1488 gen_op_addl_T1_im(offset);
1489 else
1490 gen_op_addl_T1_im(-offset);
1491
1492 if (insn & (1 << 21))
1493 gen_movl_reg_T1(s, rd);
1494 } else if (insn & (1 << 21)) {
1495 /* Post indexed */
1496 if (insn & (1 << 23))
1497 gen_op_movl_T0_im(offset);
1498 else
1499 gen_op_movl_T0_im(- offset);
1500 gen_op_addl_T0_T1();
1501 gen_movl_reg_T0(s, rd);
1502 } else if (!(insn & (1 << 23)))
1503 return 1;
1504 return 0;
1505}
1506
1507static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1508{
1509 int rd = (insn >> 0) & 0xf;
1510
1511 if (insn & (1 << 8))
1512 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1513 return 1;
1514 else
1515 gen_op_iwmmxt_movl_T0_wCx(rd);
1516 else
e677137d 1517 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1518
1519 gen_op_movl_T1_im(mask);
1520 gen_op_andl_T0_T1();
1521 return 0;
1522}
1523
1524/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1525 (ie. an undefined instruction). */
1526static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1527{
1528 int rd, wrd;
1529 int rdhi, rdlo, rd0, rd1, i;
b0109805 1530 TCGv tmp;
18c9b560
AZ
1531
1532 if ((insn & 0x0e000e00) == 0x0c000000) {
1533 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1534 wrd = insn & 0xf;
1535 rdlo = (insn >> 12) & 0xf;
1536 rdhi = (insn >> 16) & 0xf;
1537 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1538 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1539 gen_movl_reg_T0(s, rdlo);
1540 gen_movl_reg_T1(s, rdhi);
1541 } else { /* TMCRR */
1542 gen_movl_T0_reg(s, rdlo);
1543 gen_movl_T1_reg(s, rdhi);
e677137d 1544 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1545 gen_op_iwmmxt_set_mup();
1546 }
1547 return 0;
1548 }
1549
1550 wrd = (insn >> 12) & 0xf;
1551 if (gen_iwmmxt_address(s, insn))
1552 return 1;
1553 if (insn & ARM_CP_RW_BIT) {
1554 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1555 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1556 tcg_gen_mov_i32(cpu_T[0], tmp);
1557 dead_tmp(tmp);
18c9b560
AZ
1558 gen_op_iwmmxt_movl_wCx_T0(wrd);
1559 } else {
e677137d
PB
1560 i = 1;
1561 if (insn & (1 << 8)) {
1562 if (insn & (1 << 22)) { /* WLDRD */
1563 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1564 i = 0;
1565 } else { /* WLDRW wRd */
1566 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1567 }
1568 } else {
1569 if (insn & (1 << 22)) { /* WLDRH */
1570 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1571 } else { /* WLDRB */
1572 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1573 }
1574 }
1575 if (i) {
1576 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1577 dead_tmp(tmp);
1578 }
18c9b560
AZ
1579 gen_op_iwmmxt_movq_wRn_M0(wrd);
1580 }
1581 } else {
1582 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1583 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1584 tmp = new_tmp();
1585 tcg_gen_mov_i32(tmp, cpu_T[0]);
1586 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1587 } else {
1588 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1589 tmp = new_tmp();
1590 if (insn & (1 << 8)) {
1591 if (insn & (1 << 22)) { /* WSTRD */
1592 dead_tmp(tmp);
1593 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1594 } else { /* WSTRW wRd */
1595 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1596 gen_st32(tmp, cpu_T[1], IS_USER(s));
1597 }
1598 } else {
1599 if (insn & (1 << 22)) { /* WSTRH */
1600 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1601 gen_st16(tmp, cpu_T[1], IS_USER(s));
1602 } else { /* WSTRB */
1603 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1604 gen_st8(tmp, cpu_T[1], IS_USER(s));
1605 }
1606 }
18c9b560
AZ
1607 }
1608 }
1609 return 0;
1610 }
1611
1612 if ((insn & 0x0f000000) != 0x0e000000)
1613 return 1;
1614
1615 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1616 case 0x000: /* WOR */
1617 wrd = (insn >> 12) & 0xf;
1618 rd0 = (insn >> 0) & 0xf;
1619 rd1 = (insn >> 16) & 0xf;
1620 gen_op_iwmmxt_movq_M0_wRn(rd0);
1621 gen_op_iwmmxt_orq_M0_wRn(rd1);
1622 gen_op_iwmmxt_setpsr_nz();
1623 gen_op_iwmmxt_movq_wRn_M0(wrd);
1624 gen_op_iwmmxt_set_mup();
1625 gen_op_iwmmxt_set_cup();
1626 break;
1627 case 0x011: /* TMCR */
1628 if (insn & 0xf)
1629 return 1;
1630 rd = (insn >> 12) & 0xf;
1631 wrd = (insn >> 16) & 0xf;
1632 switch (wrd) {
1633 case ARM_IWMMXT_wCID:
1634 case ARM_IWMMXT_wCASF:
1635 break;
1636 case ARM_IWMMXT_wCon:
1637 gen_op_iwmmxt_set_cup();
1638 /* Fall through. */
1639 case ARM_IWMMXT_wCSSF:
1640 gen_op_iwmmxt_movl_T0_wCx(wrd);
1641 gen_movl_T1_reg(s, rd);
1642 gen_op_bicl_T0_T1();
1643 gen_op_iwmmxt_movl_wCx_T0(wrd);
1644 break;
1645 case ARM_IWMMXT_wCGR0:
1646 case ARM_IWMMXT_wCGR1:
1647 case ARM_IWMMXT_wCGR2:
1648 case ARM_IWMMXT_wCGR3:
1649 gen_op_iwmmxt_set_cup();
1650 gen_movl_reg_T0(s, rd);
1651 gen_op_iwmmxt_movl_wCx_T0(wrd);
1652 break;
1653 default:
1654 return 1;
1655 }
1656 break;
1657 case 0x100: /* WXOR */
1658 wrd = (insn >> 12) & 0xf;
1659 rd0 = (insn >> 0) & 0xf;
1660 rd1 = (insn >> 16) & 0xf;
1661 gen_op_iwmmxt_movq_M0_wRn(rd0);
1662 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1663 gen_op_iwmmxt_setpsr_nz();
1664 gen_op_iwmmxt_movq_wRn_M0(wrd);
1665 gen_op_iwmmxt_set_mup();
1666 gen_op_iwmmxt_set_cup();
1667 break;
1668 case 0x111: /* TMRC */
1669 if (insn & 0xf)
1670 return 1;
1671 rd = (insn >> 12) & 0xf;
1672 wrd = (insn >> 16) & 0xf;
1673 gen_op_iwmmxt_movl_T0_wCx(wrd);
1674 gen_movl_reg_T0(s, rd);
1675 break;
1676 case 0x300: /* WANDN */
1677 wrd = (insn >> 12) & 0xf;
1678 rd0 = (insn >> 0) & 0xf;
1679 rd1 = (insn >> 16) & 0xf;
1680 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1681 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1682 gen_op_iwmmxt_andq_M0_wRn(rd1);
1683 gen_op_iwmmxt_setpsr_nz();
1684 gen_op_iwmmxt_movq_wRn_M0(wrd);
1685 gen_op_iwmmxt_set_mup();
1686 gen_op_iwmmxt_set_cup();
1687 break;
1688 case 0x200: /* WAND */
1689 wrd = (insn >> 12) & 0xf;
1690 rd0 = (insn >> 0) & 0xf;
1691 rd1 = (insn >> 16) & 0xf;
1692 gen_op_iwmmxt_movq_M0_wRn(rd0);
1693 gen_op_iwmmxt_andq_M0_wRn(rd1);
1694 gen_op_iwmmxt_setpsr_nz();
1695 gen_op_iwmmxt_movq_wRn_M0(wrd);
1696 gen_op_iwmmxt_set_mup();
1697 gen_op_iwmmxt_set_cup();
1698 break;
1699 case 0x810: case 0xa10: /* WMADD */
1700 wrd = (insn >> 12) & 0xf;
1701 rd0 = (insn >> 0) & 0xf;
1702 rd1 = (insn >> 16) & 0xf;
1703 gen_op_iwmmxt_movq_M0_wRn(rd0);
1704 if (insn & (1 << 21))
1705 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1706 else
1707 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1708 gen_op_iwmmxt_movq_wRn_M0(wrd);
1709 gen_op_iwmmxt_set_mup();
1710 break;
1711 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1712 wrd = (insn >> 12) & 0xf;
1713 rd0 = (insn >> 16) & 0xf;
1714 rd1 = (insn >> 0) & 0xf;
1715 gen_op_iwmmxt_movq_M0_wRn(rd0);
1716 switch ((insn >> 22) & 3) {
1717 case 0:
1718 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1719 break;
1720 case 1:
1721 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1722 break;
1723 case 2:
1724 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1725 break;
1726 case 3:
1727 return 1;
1728 }
1729 gen_op_iwmmxt_movq_wRn_M0(wrd);
1730 gen_op_iwmmxt_set_mup();
1731 gen_op_iwmmxt_set_cup();
1732 break;
1733 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1734 wrd = (insn >> 12) & 0xf;
1735 rd0 = (insn >> 16) & 0xf;
1736 rd1 = (insn >> 0) & 0xf;
1737 gen_op_iwmmxt_movq_M0_wRn(rd0);
1738 switch ((insn >> 22) & 3) {
1739 case 0:
1740 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1741 break;
1742 case 1:
1743 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1744 break;
1745 case 2:
1746 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1747 break;
1748 case 3:
1749 return 1;
1750 }
1751 gen_op_iwmmxt_movq_wRn_M0(wrd);
1752 gen_op_iwmmxt_set_mup();
1753 gen_op_iwmmxt_set_cup();
1754 break;
1755 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1756 wrd = (insn >> 12) & 0xf;
1757 rd0 = (insn >> 16) & 0xf;
1758 rd1 = (insn >> 0) & 0xf;
1759 gen_op_iwmmxt_movq_M0_wRn(rd0);
1760 if (insn & (1 << 22))
1761 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1762 else
1763 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1764 if (!(insn & (1 << 20)))
1765 gen_op_iwmmxt_addl_M0_wRn(wrd);
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1767 gen_op_iwmmxt_set_mup();
1768 break;
1769 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1770 wrd = (insn >> 12) & 0xf;
1771 rd0 = (insn >> 16) & 0xf;
1772 rd1 = (insn >> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1774 if (insn & (1 << 21)) {
1775 if (insn & (1 << 20))
1776 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1777 else
1778 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1779 } else {
1780 if (insn & (1 << 20))
1781 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1782 else
1783 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1784 }
18c9b560
AZ
1785 gen_op_iwmmxt_movq_wRn_M0(wrd);
1786 gen_op_iwmmxt_set_mup();
1787 break;
1788 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1789 wrd = (insn >> 12) & 0xf;
1790 rd0 = (insn >> 16) & 0xf;
1791 rd1 = (insn >> 0) & 0xf;
1792 gen_op_iwmmxt_movq_M0_wRn(rd0);
1793 if (insn & (1 << 21))
1794 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1795 else
1796 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1797 if (!(insn & (1 << 20))) {
e677137d
PB
1798 iwmmxt_load_reg(cpu_V1, wrd);
1799 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1800 }
1801 gen_op_iwmmxt_movq_wRn_M0(wrd);
1802 gen_op_iwmmxt_set_mup();
1803 break;
1804 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1805 wrd = (insn >> 12) & 0xf;
1806 rd0 = (insn >> 16) & 0xf;
1807 rd1 = (insn >> 0) & 0xf;
1808 gen_op_iwmmxt_movq_M0_wRn(rd0);
1809 switch ((insn >> 22) & 3) {
1810 case 0:
1811 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1812 break;
1813 case 1:
1814 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1815 break;
1816 case 2:
1817 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1818 break;
1819 case 3:
1820 return 1;
1821 }
1822 gen_op_iwmmxt_movq_wRn_M0(wrd);
1823 gen_op_iwmmxt_set_mup();
1824 gen_op_iwmmxt_set_cup();
1825 break;
1826 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1827 wrd = (insn >> 12) & 0xf;
1828 rd0 = (insn >> 16) & 0xf;
1829 rd1 = (insn >> 0) & 0xf;
1830 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1831 if (insn & (1 << 22)) {
1832 if (insn & (1 << 20))
1833 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1834 else
1835 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1836 } else {
1837 if (insn & (1 << 20))
1838 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1839 else
1840 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1841 }
18c9b560
AZ
1842 gen_op_iwmmxt_movq_wRn_M0(wrd);
1843 gen_op_iwmmxt_set_mup();
1844 gen_op_iwmmxt_set_cup();
1845 break;
1846 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1847 wrd = (insn >> 12) & 0xf;
1848 rd0 = (insn >> 16) & 0xf;
1849 rd1 = (insn >> 0) & 0xf;
1850 gen_op_iwmmxt_movq_M0_wRn(rd0);
1851 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1852 gen_op_movl_T1_im(7);
1853 gen_op_andl_T0_T1();
1854 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1855 gen_op_iwmmxt_movq_wRn_M0(wrd);
1856 gen_op_iwmmxt_set_mup();
1857 break;
1858 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1859 rd = (insn >> 12) & 0xf;
1860 wrd = (insn >> 16) & 0xf;
1861 gen_movl_T0_reg(s, rd);
1862 gen_op_iwmmxt_movq_M0_wRn(wrd);
1863 switch ((insn >> 6) & 3) {
1864 case 0:
1865 gen_op_movl_T1_im(0xff);
1866 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1867 break;
1868 case 1:
1869 gen_op_movl_T1_im(0xffff);
1870 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1871 break;
1872 case 2:
1873 gen_op_movl_T1_im(0xffffffff);
1874 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1875 break;
1876 case 3:
1877 return 1;
1878 }
1879 gen_op_iwmmxt_movq_wRn_M0(wrd);
1880 gen_op_iwmmxt_set_mup();
1881 break;
1882 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1883 rd = (insn >> 12) & 0xf;
1884 wrd = (insn >> 16) & 0xf;
1885 if (rd == 15)
1886 return 1;
1887 gen_op_iwmmxt_movq_M0_wRn(wrd);
1888 switch ((insn >> 22) & 3) {
1889 case 0:
1890 if (insn & 8)
1891 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1892 else {
e677137d 1893 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1894 }
1895 break;
1896 case 1:
1897 if (insn & 8)
1898 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1899 else {
e677137d 1900 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1901 }
1902 break;
1903 case 2:
e677137d 1904 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1905 break;
1906 case 3:
1907 return 1;
1908 }
b26eefb6 1909 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1910 break;
1911 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1912 if ((insn & 0x000ff008) != 0x0003f000)
1913 return 1;
1914 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1915 switch ((insn >> 22) & 3) {
1916 case 0:
1917 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1918 break;
1919 case 1:
1920 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1921 break;
1922 case 2:
1923 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1924 break;
1925 case 3:
1926 return 1;
1927 }
1928 gen_op_shll_T1_im(28);
d9ba4830 1929 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1930 break;
1931 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1932 rd = (insn >> 12) & 0xf;
1933 wrd = (insn >> 16) & 0xf;
1934 gen_movl_T0_reg(s, rd);
1935 switch ((insn >> 6) & 3) {
1936 case 0:
e677137d 1937 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1938 break;
1939 case 1:
e677137d 1940 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1941 break;
1942 case 2:
e677137d 1943 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1944 break;
1945 case 3:
1946 return 1;
1947 }
1948 gen_op_iwmmxt_movq_wRn_M0(wrd);
1949 gen_op_iwmmxt_set_mup();
1950 break;
1951 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1952 if ((insn & 0x000ff00f) != 0x0003f000)
1953 return 1;
1954 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1955 switch ((insn >> 22) & 3) {
1956 case 0:
1957 for (i = 0; i < 7; i ++) {
1958 gen_op_shll_T1_im(4);
1959 gen_op_andl_T0_T1();
1960 }
1961 break;
1962 case 1:
1963 for (i = 0; i < 3; i ++) {
1964 gen_op_shll_T1_im(8);
1965 gen_op_andl_T0_T1();
1966 }
1967 break;
1968 case 2:
1969 gen_op_shll_T1_im(16);
1970 gen_op_andl_T0_T1();
1971 break;
1972 case 3:
1973 return 1;
1974 }
d9ba4830 1975 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1976 break;
1977 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1978 wrd = (insn >> 12) & 0xf;
1979 rd0 = (insn >> 16) & 0xf;
1980 gen_op_iwmmxt_movq_M0_wRn(rd0);
1981 switch ((insn >> 22) & 3) {
1982 case 0:
e677137d 1983 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1984 break;
1985 case 1:
e677137d 1986 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1987 break;
1988 case 2:
e677137d 1989 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1990 break;
1991 case 3:
1992 return 1;
1993 }
1994 gen_op_iwmmxt_movq_wRn_M0(wrd);
1995 gen_op_iwmmxt_set_mup();
1996 break;
1997 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1998 if ((insn & 0x000ff00f) != 0x0003f000)
1999 return 1;
2000 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
2001 switch ((insn >> 22) & 3) {
2002 case 0:
2003 for (i = 0; i < 7; i ++) {
2004 gen_op_shll_T1_im(4);
2005 gen_op_orl_T0_T1();
2006 }
2007 break;
2008 case 1:
2009 for (i = 0; i < 3; i ++) {
2010 gen_op_shll_T1_im(8);
2011 gen_op_orl_T0_T1();
2012 }
2013 break;
2014 case 2:
2015 gen_op_shll_T1_im(16);
2016 gen_op_orl_T0_T1();
2017 break;
2018 case 3:
2019 return 1;
2020 }
d9ba4830 2021 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
2022 break;
2023 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2024 rd = (insn >> 12) & 0xf;
2025 rd0 = (insn >> 16) & 0xf;
2026 if ((insn & 0xf) != 0)
2027 return 1;
2028 gen_op_iwmmxt_movq_M0_wRn(rd0);
2029 switch ((insn >> 22) & 3) {
2030 case 0:
e677137d 2031 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2032 break;
2033 case 1:
e677137d 2034 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2035 break;
2036 case 2:
e677137d 2037 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2038 break;
2039 case 3:
2040 return 1;
2041 }
2042 gen_movl_reg_T0(s, rd);
2043 break;
2044 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2045 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2046 wrd = (insn >> 12) & 0xf;
2047 rd0 = (insn >> 16) & 0xf;
2048 rd1 = (insn >> 0) & 0xf;
2049 gen_op_iwmmxt_movq_M0_wRn(rd0);
2050 switch ((insn >> 22) & 3) {
2051 case 0:
2052 if (insn & (1 << 21))
2053 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2054 else
2055 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2056 break;
2057 case 1:
2058 if (insn & (1 << 21))
2059 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2060 else
2061 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2062 break;
2063 case 2:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2066 else
2067 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2068 break;
2069 case 3:
2070 return 1;
2071 }
2072 gen_op_iwmmxt_movq_wRn_M0(wrd);
2073 gen_op_iwmmxt_set_mup();
2074 gen_op_iwmmxt_set_cup();
2075 break;
2076 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2077 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2078 wrd = (insn >> 12) & 0xf;
2079 rd0 = (insn >> 16) & 0xf;
2080 gen_op_iwmmxt_movq_M0_wRn(rd0);
2081 switch ((insn >> 22) & 3) {
2082 case 0:
2083 if (insn & (1 << 21))
2084 gen_op_iwmmxt_unpacklsb_M0();
2085 else
2086 gen_op_iwmmxt_unpacklub_M0();
2087 break;
2088 case 1:
2089 if (insn & (1 << 21))
2090 gen_op_iwmmxt_unpacklsw_M0();
2091 else
2092 gen_op_iwmmxt_unpackluw_M0();
2093 break;
2094 case 2:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_unpacklsl_M0();
2097 else
2098 gen_op_iwmmxt_unpacklul_M0();
2099 break;
2100 case 3:
2101 return 1;
2102 }
2103 gen_op_iwmmxt_movq_wRn_M0(wrd);
2104 gen_op_iwmmxt_set_mup();
2105 gen_op_iwmmxt_set_cup();
2106 break;
2107 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2108 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2109 wrd = (insn >> 12) & 0xf;
2110 rd0 = (insn >> 16) & 0xf;
2111 gen_op_iwmmxt_movq_M0_wRn(rd0);
2112 switch ((insn >> 22) & 3) {
2113 case 0:
2114 if (insn & (1 << 21))
2115 gen_op_iwmmxt_unpackhsb_M0();
2116 else
2117 gen_op_iwmmxt_unpackhub_M0();
2118 break;
2119 case 1:
2120 if (insn & (1 << 21))
2121 gen_op_iwmmxt_unpackhsw_M0();
2122 else
2123 gen_op_iwmmxt_unpackhuw_M0();
2124 break;
2125 case 2:
2126 if (insn & (1 << 21))
2127 gen_op_iwmmxt_unpackhsl_M0();
2128 else
2129 gen_op_iwmmxt_unpackhul_M0();
2130 break;
2131 case 3:
2132 return 1;
2133 }
2134 gen_op_iwmmxt_movq_wRn_M0(wrd);
2135 gen_op_iwmmxt_set_mup();
2136 gen_op_iwmmxt_set_cup();
2137 break;
2138 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2139 case 0x214: case 0x614: case 0xa14: case 0xe14:
2140 wrd = (insn >> 12) & 0xf;
2141 rd0 = (insn >> 16) & 0xf;
2142 gen_op_iwmmxt_movq_M0_wRn(rd0);
2143 if (gen_iwmmxt_shift(insn, 0xff))
2144 return 1;
2145 switch ((insn >> 22) & 3) {
2146 case 0:
2147 return 1;
2148 case 1:
e677137d 2149 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2150 break;
2151 case 2:
e677137d 2152 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2153 break;
2154 case 3:
e677137d 2155 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2156 break;
2157 }
2158 gen_op_iwmmxt_movq_wRn_M0(wrd);
2159 gen_op_iwmmxt_set_mup();
2160 gen_op_iwmmxt_set_cup();
2161 break;
2162 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2163 case 0x014: case 0x414: case 0x814: case 0xc14:
2164 wrd = (insn >> 12) & 0xf;
2165 rd0 = (insn >> 16) & 0xf;
2166 gen_op_iwmmxt_movq_M0_wRn(rd0);
2167 if (gen_iwmmxt_shift(insn, 0xff))
2168 return 1;
2169 switch ((insn >> 22) & 3) {
2170 case 0:
2171 return 1;
2172 case 1:
e677137d 2173 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2174 break;
2175 case 2:
e677137d 2176 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2177 break;
2178 case 3:
e677137d 2179 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2180 break;
2181 }
2182 gen_op_iwmmxt_movq_wRn_M0(wrd);
2183 gen_op_iwmmxt_set_mup();
2184 gen_op_iwmmxt_set_cup();
2185 break;
2186 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2187 case 0x114: case 0x514: case 0x914: case 0xd14:
2188 wrd = (insn >> 12) & 0xf;
2189 rd0 = (insn >> 16) & 0xf;
2190 gen_op_iwmmxt_movq_M0_wRn(rd0);
2191 if (gen_iwmmxt_shift(insn, 0xff))
2192 return 1;
2193 switch ((insn >> 22) & 3) {
2194 case 0:
2195 return 1;
2196 case 1:
e677137d 2197 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2198 break;
2199 case 2:
e677137d 2200 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2201 break;
2202 case 3:
e677137d 2203 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2204 break;
2205 }
2206 gen_op_iwmmxt_movq_wRn_M0(wrd);
2207 gen_op_iwmmxt_set_mup();
2208 gen_op_iwmmxt_set_cup();
2209 break;
2210 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2211 case 0x314: case 0x714: case 0xb14: case 0xf14:
2212 wrd = (insn >> 12) & 0xf;
2213 rd0 = (insn >> 16) & 0xf;
2214 gen_op_iwmmxt_movq_M0_wRn(rd0);
2215 switch ((insn >> 22) & 3) {
2216 case 0:
2217 return 1;
2218 case 1:
2219 if (gen_iwmmxt_shift(insn, 0xf))
2220 return 1;
e677137d 2221 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2222 break;
2223 case 2:
2224 if (gen_iwmmxt_shift(insn, 0x1f))
2225 return 1;
e677137d 2226 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2227 break;
2228 case 3:
2229 if (gen_iwmmxt_shift(insn, 0x3f))
2230 return 1;
e677137d 2231 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2232 break;
2233 }
2234 gen_op_iwmmxt_movq_wRn_M0(wrd);
2235 gen_op_iwmmxt_set_mup();
2236 gen_op_iwmmxt_set_cup();
2237 break;
2238 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2239 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2240 wrd = (insn >> 12) & 0xf;
2241 rd0 = (insn >> 16) & 0xf;
2242 rd1 = (insn >> 0) & 0xf;
2243 gen_op_iwmmxt_movq_M0_wRn(rd0);
2244 switch ((insn >> 22) & 3) {
2245 case 0:
2246 if (insn & (1 << 21))
2247 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2248 else
2249 gen_op_iwmmxt_minub_M0_wRn(rd1);
2250 break;
2251 case 1:
2252 if (insn & (1 << 21))
2253 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2254 else
2255 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2256 break;
2257 case 2:
2258 if (insn & (1 << 21))
2259 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2260 else
2261 gen_op_iwmmxt_minul_M0_wRn(rd1);
2262 break;
2263 case 3:
2264 return 1;
2265 }
2266 gen_op_iwmmxt_movq_wRn_M0(wrd);
2267 gen_op_iwmmxt_set_mup();
2268 break;
2269 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2270 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2271 wrd = (insn >> 12) & 0xf;
2272 rd0 = (insn >> 16) & 0xf;
2273 rd1 = (insn >> 0) & 0xf;
2274 gen_op_iwmmxt_movq_M0_wRn(rd0);
2275 switch ((insn >> 22) & 3) {
2276 case 0:
2277 if (insn & (1 << 21))
2278 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2279 else
2280 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2281 break;
2282 case 1:
2283 if (insn & (1 << 21))
2284 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2285 else
2286 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2287 break;
2288 case 2:
2289 if (insn & (1 << 21))
2290 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2291 else
2292 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2293 break;
2294 case 3:
2295 return 1;
2296 }
2297 gen_op_iwmmxt_movq_wRn_M0(wrd);
2298 gen_op_iwmmxt_set_mup();
2299 break;
2300 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2301 case 0x402: case 0x502: case 0x602: case 0x702:
2302 wrd = (insn >> 12) & 0xf;
2303 rd0 = (insn >> 16) & 0xf;
2304 rd1 = (insn >> 0) & 0xf;
2305 gen_op_iwmmxt_movq_M0_wRn(rd0);
2306 gen_op_movl_T0_im((insn >> 20) & 3);
2307 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2308 gen_op_iwmmxt_movq_wRn_M0(wrd);
2309 gen_op_iwmmxt_set_mup();
2310 break;
2311 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2312 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2313 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2314 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2315 wrd = (insn >> 12) & 0xf;
2316 rd0 = (insn >> 16) & 0xf;
2317 rd1 = (insn >> 0) & 0xf;
2318 gen_op_iwmmxt_movq_M0_wRn(rd0);
2319 switch ((insn >> 20) & 0xf) {
2320 case 0x0:
2321 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2322 break;
2323 case 0x1:
2324 gen_op_iwmmxt_subub_M0_wRn(rd1);
2325 break;
2326 case 0x3:
2327 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2328 break;
2329 case 0x4:
2330 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2331 break;
2332 case 0x5:
2333 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2334 break;
2335 case 0x7:
2336 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2337 break;
2338 case 0x8:
2339 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2340 break;
2341 case 0x9:
2342 gen_op_iwmmxt_subul_M0_wRn(rd1);
2343 break;
2344 case 0xb:
2345 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2346 break;
2347 default:
2348 return 1;
2349 }
2350 gen_op_iwmmxt_movq_wRn_M0(wrd);
2351 gen_op_iwmmxt_set_mup();
2352 gen_op_iwmmxt_set_cup();
2353 break;
2354 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2355 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2356 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2357 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2358 wrd = (insn >> 12) & 0xf;
2359 rd0 = (insn >> 16) & 0xf;
2360 gen_op_iwmmxt_movq_M0_wRn(rd0);
2361 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2362 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2363 gen_op_iwmmxt_movq_wRn_M0(wrd);
2364 gen_op_iwmmxt_set_mup();
2365 gen_op_iwmmxt_set_cup();
2366 break;
2367 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2368 case 0x418: case 0x518: case 0x618: case 0x718:
2369 case 0x818: case 0x918: case 0xa18: case 0xb18:
2370 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2371 wrd = (insn >> 12) & 0xf;
2372 rd0 = (insn >> 16) & 0xf;
2373 rd1 = (insn >> 0) & 0xf;
2374 gen_op_iwmmxt_movq_M0_wRn(rd0);
2375 switch ((insn >> 20) & 0xf) {
2376 case 0x0:
2377 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2378 break;
2379 case 0x1:
2380 gen_op_iwmmxt_addub_M0_wRn(rd1);
2381 break;
2382 case 0x3:
2383 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2384 break;
2385 case 0x4:
2386 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2387 break;
2388 case 0x5:
2389 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2390 break;
2391 case 0x7:
2392 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2393 break;
2394 case 0x8:
2395 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2396 break;
2397 case 0x9:
2398 gen_op_iwmmxt_addul_M0_wRn(rd1);
2399 break;
2400 case 0xb:
2401 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2402 break;
2403 default:
2404 return 1;
2405 }
2406 gen_op_iwmmxt_movq_wRn_M0(wrd);
2407 gen_op_iwmmxt_set_mup();
2408 gen_op_iwmmxt_set_cup();
2409 break;
2410 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2411 case 0x408: case 0x508: case 0x608: case 0x708:
2412 case 0x808: case 0x908: case 0xa08: case 0xb08:
2413 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2414 wrd = (insn >> 12) & 0xf;
2415 rd0 = (insn >> 16) & 0xf;
2416 rd1 = (insn >> 0) & 0xf;
2417 gen_op_iwmmxt_movq_M0_wRn(rd0);
2418 if (!(insn & (1 << 20)))
2419 return 1;
2420 switch ((insn >> 22) & 3) {
2421 case 0:
2422 return 1;
2423 case 1:
2424 if (insn & (1 << 21))
2425 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2426 else
2427 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2428 break;
2429 case 2:
2430 if (insn & (1 << 21))
2431 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2432 else
2433 gen_op_iwmmxt_packul_M0_wRn(rd1);
2434 break;
2435 case 3:
2436 if (insn & (1 << 21))
2437 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2438 else
2439 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2440 break;
2441 }
2442 gen_op_iwmmxt_movq_wRn_M0(wrd);
2443 gen_op_iwmmxt_set_mup();
2444 gen_op_iwmmxt_set_cup();
2445 break;
2446 case 0x201: case 0x203: case 0x205: case 0x207:
2447 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2448 case 0x211: case 0x213: case 0x215: case 0x217:
2449 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2450 wrd = (insn >> 5) & 0xf;
2451 rd0 = (insn >> 12) & 0xf;
2452 rd1 = (insn >> 0) & 0xf;
2453 if (rd0 == 0xf || rd1 == 0xf)
2454 return 1;
2455 gen_op_iwmmxt_movq_M0_wRn(wrd);
2456 switch ((insn >> 16) & 0xf) {
2457 case 0x0: /* TMIA */
b26eefb6
PB
2458 gen_movl_T0_reg(s, rd0);
2459 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2460 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2461 break;
2462 case 0x8: /* TMIAPH */
b26eefb6
PB
2463 gen_movl_T0_reg(s, rd0);
2464 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2465 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2466 break;
2467 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2468 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2469 if (insn & (1 << 16))
2470 gen_op_shrl_T1_im(16);
2471 gen_op_movl_T0_T1();
b26eefb6 2472 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2473 if (insn & (1 << 17))
2474 gen_op_shrl_T1_im(16);
2475 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2476 break;
2477 default:
2478 return 1;
2479 }
2480 gen_op_iwmmxt_movq_wRn_M0(wrd);
2481 gen_op_iwmmxt_set_mup();
2482 break;
2483 default:
2484 return 1;
2485 }
2486
2487 return 0;
2488}
2489
2490/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2491 (ie. an undefined instruction). */
2492static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2493{
2494 int acc, rd0, rd1, rdhi, rdlo;
2495
2496 if ((insn & 0x0ff00f10) == 0x0e200010) {
2497 /* Multiply with Internal Accumulate Format */
2498 rd0 = (insn >> 12) & 0xf;
2499 rd1 = insn & 0xf;
2500 acc = (insn >> 5) & 7;
2501
2502 if (acc != 0)
2503 return 1;
2504
2505 switch ((insn >> 16) & 0xf) {
2506 case 0x0: /* MIA */
b26eefb6
PB
2507 gen_movl_T0_reg(s, rd0);
2508 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2509 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2510 break;
2511 case 0x8: /* MIAPH */
b26eefb6
PB
2512 gen_movl_T0_reg(s, rd0);
2513 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2514 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2515 break;
2516 case 0xc: /* MIABB */
2517 case 0xd: /* MIABT */
2518 case 0xe: /* MIATB */
2519 case 0xf: /* MIATT */
b26eefb6 2520 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2521 if (insn & (1 << 16))
2522 gen_op_shrl_T1_im(16);
2523 gen_op_movl_T0_T1();
b26eefb6 2524 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2525 if (insn & (1 << 17))
2526 gen_op_shrl_T1_im(16);
2527 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2528 break;
2529 default:
2530 return 1;
2531 }
2532
2533 gen_op_iwmmxt_movq_wRn_M0(acc);
2534 return 0;
2535 }
2536
2537 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2538 /* Internal Accumulator Access Format */
2539 rdhi = (insn >> 16) & 0xf;
2540 rdlo = (insn >> 12) & 0xf;
2541 acc = insn & 7;
2542
2543 if (acc != 0)
2544 return 1;
2545
2546 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2547 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2548 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2549 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2550 gen_op_andl_T0_T1();
b26eefb6 2551 gen_movl_reg_T0(s, rdhi);
18c9b560 2552 } else { /* MAR */
b26eefb6
PB
2553 gen_movl_T0_reg(s, rdlo);
2554 gen_movl_T1_reg(s, rdhi);
e677137d 2555 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2556 }
2557 return 0;
2558 }
2559
2560 return 1;
2561}
2562
c1713132
AZ
2563/* Disassemble system coprocessor instruction. Return nonzero if
2564 instruction is not defined. */
2565static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2566{
8984bd2e 2567 TCGv tmp;
c1713132
AZ
2568 uint32_t rd = (insn >> 12) & 0xf;
2569 uint32_t cp = (insn >> 8) & 0xf;
2570 if (IS_USER(s)) {
2571 return 1;
2572 }
2573
18c9b560 2574 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2575 if (!env->cp[cp].cp_read)
2576 return 1;
8984bd2e
PB
2577 gen_set_pc_im(s->pc);
2578 tmp = new_tmp();
2579 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2580 store_reg(s, rd, tmp);
c1713132
AZ
2581 } else {
2582 if (!env->cp[cp].cp_write)
2583 return 1;
8984bd2e
PB
2584 gen_set_pc_im(s->pc);
2585 tmp = load_reg(s, rd);
2586 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
c1713132
AZ
2587 }
2588 return 0;
2589}
2590
9ee6e8bb
PB
2591static int cp15_user_ok(uint32_t insn)
2592{
2593 int cpn = (insn >> 16) & 0xf;
2594 int cpm = insn & 0xf;
2595 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2596
2597 if (cpn == 13 && cpm == 0) {
2598 /* TLS register. */
2599 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2600 return 1;
2601 }
2602 if (cpn == 7) {
2603 /* ISB, DSB, DMB. */
2604 if ((cpm == 5 && op == 4)
2605 || (cpm == 10 && (op == 4 || op == 5)))
2606 return 1;
2607 }
2608 return 0;
2609}
2610
b5ff1b31
FB
2611/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2612 instruction is not defined. */
a90b7318 2613static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2614{
2615 uint32_t rd;
8984bd2e 2616 TCGv tmp;
b5ff1b31 2617
9ee6e8bb
PB
2618 /* M profile cores use memory mapped registers instead of cp15. */
2619 if (arm_feature(env, ARM_FEATURE_M))
2620 return 1;
2621
2622 if ((insn & (1 << 25)) == 0) {
2623 if (insn & (1 << 20)) {
2624 /* mrrc */
2625 return 1;
2626 }
2627 /* mcrr. Used for block cache operations, so implement as no-op. */
2628 return 0;
2629 }
2630 if ((insn & (1 << 4)) == 0) {
2631 /* cdp */
2632 return 1;
2633 }
2634 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2635 return 1;
2636 }
9332f9da
FB
2637 if ((insn & 0x0fff0fff) == 0x0e070f90
2638 || (insn & 0x0fff0fff) == 0x0e070f58) {
2639 /* Wait for interrupt. */
8984bd2e 2640 gen_set_pc_im(s->pc);
9ee6e8bb 2641 s->is_jmp = DISAS_WFI;
9332f9da
FB
2642 return 0;
2643 }
b5ff1b31 2644 rd = (insn >> 12) & 0xf;
18c9b560 2645 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2646 tmp = new_tmp();
2647 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2648 /* If the destination register is r15 then sets condition codes. */
2649 if (rd != 15)
8984bd2e
PB
2650 store_reg(s, rd, tmp);
2651 else
2652 dead_tmp(tmp);
b5ff1b31 2653 } else {
8984bd2e
PB
2654 tmp = load_reg(s, rd);
2655 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2656 dead_tmp(tmp);
a90b7318
AZ
2657 /* Normally we would always end the TB here, but Linux
2658 * arch/arm/mach-pxa/sleep.S expects two instructions following
2659 * an MMU enable to execute from cache. Imitate this behaviour. */
2660 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2661 (insn & 0x0fff0fff) != 0x0e010f10)
2662 gen_lookup_tb(s);
b5ff1b31 2663 }
b5ff1b31
FB
2664 return 0;
2665}
2666
9ee6e8bb
PB
2667#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2668#define VFP_SREG(insn, bigbit, smallbit) \
2669 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2670#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2671 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2672 reg = (((insn) >> (bigbit)) & 0x0f) \
2673 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2674 } else { \
2675 if (insn & (1 << (smallbit))) \
2676 return 1; \
2677 reg = ((insn) >> (bigbit)) & 0x0f; \
2678 }} while (0)
2679
2680#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2681#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2682#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2683#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2684#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2685#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2686
4373f3ce
PB
2687/* Move between integer and VFP cores. */
2688static TCGv gen_vfp_mrs(void)
2689{
2690 TCGv tmp = new_tmp();
2691 tcg_gen_mov_i32(tmp, cpu_F0s);
2692 return tmp;
2693}
2694
2695static void gen_vfp_msr(TCGv tmp)
2696{
2697 tcg_gen_mov_i32(cpu_F0s, tmp);
2698 dead_tmp(tmp);
2699}
2700
9ee6e8bb
PB
2701static inline int
2702vfp_enabled(CPUState * env)
2703{
2704 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2705}
2706
ad69471c
PB
2707static void gen_neon_dup_u8(TCGv var, int shift)
2708{
2709 TCGv tmp = new_tmp();
2710 if (shift)
2711 tcg_gen_shri_i32(var, var, shift);
2712 tcg_gen_andi_i32(var, var, 0xff);
2713 tcg_gen_shli_i32(tmp, var, 8);
2714 tcg_gen_or_i32(var, var, tmp);
2715 tcg_gen_shli_i32(tmp, var, 16);
2716 tcg_gen_or_i32(var, var, tmp);
2717 dead_tmp(tmp);
2718}
2719
2720static void gen_neon_dup_low16(TCGv var)
2721{
2722 TCGv tmp = new_tmp();
2723 tcg_gen_andi_i32(var, var, 0xffff);
2724 tcg_gen_shli_i32(tmp, var, 16);
2725 tcg_gen_or_i32(var, var, tmp);
2726 dead_tmp(tmp);
2727}
2728
2729static void gen_neon_dup_high16(TCGv var)
2730{
2731 TCGv tmp = new_tmp();
2732 tcg_gen_andi_i32(var, var, 0xffff0000);
2733 tcg_gen_shri_i32(tmp, var, 16);
2734 tcg_gen_or_i32(var, var, tmp);
2735 dead_tmp(tmp);
2736}
2737
b7bcbe95
FB
2738/* Disassemble a VFP instruction. Returns nonzero if an error occured
2739 (ie. an undefined instruction). */
2740static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2741{
2742 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2743 int dp, veclen;
4373f3ce 2744 TCGv tmp;
ad69471c 2745 TCGv tmp2;
b7bcbe95 2746
40f137e1
PB
2747 if (!arm_feature(env, ARM_FEATURE_VFP))
2748 return 1;
2749
9ee6e8bb
PB
2750 if (!vfp_enabled(env)) {
2751 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2752 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2753 return 1;
2754 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2755 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2756 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2757 return 1;
2758 }
b7bcbe95
FB
2759 dp = ((insn & 0xf00) == 0xb00);
2760 switch ((insn >> 24) & 0xf) {
2761 case 0xe:
2762 if (insn & (1 << 4)) {
2763 /* single register transfer */
b7bcbe95
FB
2764 rd = (insn >> 12) & 0xf;
2765 if (dp) {
9ee6e8bb
PB
2766 int size;
2767 int pass;
2768
2769 VFP_DREG_N(rn, insn);
2770 if (insn & 0xf)
b7bcbe95 2771 return 1;
9ee6e8bb
PB
2772 if (insn & 0x00c00060
2773 && !arm_feature(env, ARM_FEATURE_NEON))
2774 return 1;
2775
2776 pass = (insn >> 21) & 1;
2777 if (insn & (1 << 22)) {
2778 size = 0;
2779 offset = ((insn >> 5) & 3) * 8;
2780 } else if (insn & (1 << 5)) {
2781 size = 1;
2782 offset = (insn & (1 << 6)) ? 16 : 0;
2783 } else {
2784 size = 2;
2785 offset = 0;
2786 }
18c9b560 2787 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2788 /* vfp->arm */
ad69471c 2789 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2790 switch (size) {
2791 case 0:
9ee6e8bb 2792 if (offset)
ad69471c 2793 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2794 if (insn & (1 << 23))
ad69471c 2795 gen_uxtb(tmp);
9ee6e8bb 2796 else
ad69471c 2797 gen_sxtb(tmp);
9ee6e8bb
PB
2798 break;
2799 case 1:
9ee6e8bb
PB
2800 if (insn & (1 << 23)) {
2801 if (offset) {
ad69471c 2802 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2803 } else {
ad69471c 2804 gen_uxth(tmp);
9ee6e8bb
PB
2805 }
2806 } else {
2807 if (offset) {
ad69471c 2808 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2809 } else {
ad69471c 2810 gen_sxth(tmp);
9ee6e8bb
PB
2811 }
2812 }
2813 break;
2814 case 2:
9ee6e8bb
PB
2815 break;
2816 }
ad69471c 2817 store_reg(s, rd, tmp);
b7bcbe95
FB
2818 } else {
2819 /* arm->vfp */
ad69471c 2820 tmp = load_reg(s, rd);
9ee6e8bb
PB
2821 if (insn & (1 << 23)) {
2822 /* VDUP */
2823 if (size == 0) {
ad69471c 2824 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2825 } else if (size == 1) {
ad69471c 2826 gen_neon_dup_low16(tmp);
9ee6e8bb 2827 }
ad69471c
PB
2828 tmp2 = new_tmp();
2829 tcg_gen_mov_i32(tmp2, tmp);
2830 neon_store_reg(rn, 0, tmp2);
2831 neon_store_reg(rn, 0, tmp);
9ee6e8bb
PB
2832 } else {
2833 /* VMOV */
2834 switch (size) {
2835 case 0:
ad69471c
PB
2836 tmp2 = neon_load_reg(rn, pass);
2837 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2838 dead_tmp(tmp2);
9ee6e8bb
PB
2839 break;
2840 case 1:
ad69471c
PB
2841 tmp2 = neon_load_reg(rn, pass);
2842 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2843 dead_tmp(tmp2);
9ee6e8bb
PB
2844 break;
2845 case 2:
9ee6e8bb
PB
2846 break;
2847 }
ad69471c 2848 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2849 }
b7bcbe95 2850 }
9ee6e8bb
PB
2851 } else { /* !dp */
2852 if ((insn & 0x6f) != 0x00)
2853 return 1;
2854 rn = VFP_SREG_N(insn);
18c9b560 2855 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2856 /* vfp->arm */
2857 if (insn & (1 << 21)) {
2858 /* system register */
40f137e1 2859 rn >>= 1;
9ee6e8bb 2860
b7bcbe95 2861 switch (rn) {
40f137e1 2862 case ARM_VFP_FPSID:
4373f3ce 2863 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2864 VFP3 restricts all id registers to privileged
2865 accesses. */
2866 if (IS_USER(s)
2867 && arm_feature(env, ARM_FEATURE_VFP3))
2868 return 1;
4373f3ce 2869 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2870 break;
40f137e1 2871 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2872 if (IS_USER(s))
2873 return 1;
4373f3ce 2874 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2875 break;
40f137e1
PB
2876 case ARM_VFP_FPINST:
2877 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2878 /* Not present in VFP3. */
2879 if (IS_USER(s)
2880 || arm_feature(env, ARM_FEATURE_VFP3))
2881 return 1;
4373f3ce 2882 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2883 break;
40f137e1 2884 case ARM_VFP_FPSCR:
4373f3ce
PB
2885 if (rd == 15) {
2886 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2887 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2888 } else {
2889 tmp = new_tmp();
2890 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2891 }
b7bcbe95 2892 break;
9ee6e8bb
PB
2893 case ARM_VFP_MVFR0:
2894 case ARM_VFP_MVFR1:
2895 if (IS_USER(s)
2896 || !arm_feature(env, ARM_FEATURE_VFP3))
2897 return 1;
4373f3ce 2898 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2899 break;
b7bcbe95
FB
2900 default:
2901 return 1;
2902 }
2903 } else {
2904 gen_mov_F0_vreg(0, rn);
4373f3ce 2905 tmp = gen_vfp_mrs();
b7bcbe95
FB
2906 }
2907 if (rd == 15) {
b5ff1b31 2908 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2909 gen_set_nzcv(tmp);
2910 dead_tmp(tmp);
2911 } else {
2912 store_reg(s, rd, tmp);
2913 }
b7bcbe95
FB
2914 } else {
2915 /* arm->vfp */
4373f3ce 2916 tmp = load_reg(s, rd);
b7bcbe95 2917 if (insn & (1 << 21)) {
40f137e1 2918 rn >>= 1;
b7bcbe95
FB
2919 /* system register */
2920 switch (rn) {
40f137e1 2921 case ARM_VFP_FPSID:
9ee6e8bb
PB
2922 case ARM_VFP_MVFR0:
2923 case ARM_VFP_MVFR1:
b7bcbe95
FB
2924 /* Writes are ignored. */
2925 break;
40f137e1 2926 case ARM_VFP_FPSCR:
4373f3ce
PB
2927 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2928 dead_tmp(tmp);
b5ff1b31 2929 gen_lookup_tb(s);
b7bcbe95 2930 break;
40f137e1 2931 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2932 if (IS_USER(s))
2933 return 1;
4373f3ce 2934 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2935 gen_lookup_tb(s);
2936 break;
2937 case ARM_VFP_FPINST:
2938 case ARM_VFP_FPINST2:
4373f3ce 2939 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2940 break;
b7bcbe95
FB
2941 default:
2942 return 1;
2943 }
2944 } else {
4373f3ce 2945 gen_vfp_msr(tmp);
b7bcbe95
FB
2946 gen_mov_vreg_F0(0, rn);
2947 }
2948 }
2949 }
2950 } else {
2951 /* data processing */
2952 /* The opcode is in bits 23, 21, 20 and 6. */
2953 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2954 if (dp) {
2955 if (op == 15) {
2956 /* rn is opcode */
2957 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2958 } else {
2959 /* rn is register number */
9ee6e8bb 2960 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2961 }
2962
2963 if (op == 15 && (rn == 15 || rn > 17)) {
2964 /* Integer or single precision destination. */
9ee6e8bb 2965 rd = VFP_SREG_D(insn);
b7bcbe95 2966 } else {
9ee6e8bb 2967 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2968 }
2969
2970 if (op == 15 && (rn == 16 || rn == 17)) {
2971 /* Integer source. */
2972 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2973 } else {
9ee6e8bb 2974 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2975 }
2976 } else {
9ee6e8bb 2977 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2978 if (op == 15 && rn == 15) {
2979 /* Double precision destination. */
9ee6e8bb
PB
2980 VFP_DREG_D(rd, insn);
2981 } else {
2982 rd = VFP_SREG_D(insn);
2983 }
2984 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2985 }
2986
2987 veclen = env->vfp.vec_len;
2988 if (op == 15 && rn > 3)
2989 veclen = 0;
2990
2991 /* Shut up compiler warnings. */
2992 delta_m = 0;
2993 delta_d = 0;
2994 bank_mask = 0;
3b46e624 2995
b7bcbe95
FB
2996 if (veclen > 0) {
2997 if (dp)
2998 bank_mask = 0xc;
2999 else
3000 bank_mask = 0x18;
3001
3002 /* Figure out what type of vector operation this is. */
3003 if ((rd & bank_mask) == 0) {
3004 /* scalar */
3005 veclen = 0;
3006 } else {
3007 if (dp)
3008 delta_d = (env->vfp.vec_stride >> 1) + 1;
3009 else
3010 delta_d = env->vfp.vec_stride + 1;
3011
3012 if ((rm & bank_mask) == 0) {
3013 /* mixed scalar/vector */
3014 delta_m = 0;
3015 } else {
3016 /* vector */
3017 delta_m = delta_d;
3018 }
3019 }
3020 }
3021
3022 /* Load the initial operands. */
3023 if (op == 15) {
3024 switch (rn) {
3025 case 16:
3026 case 17:
3027 /* Integer source */
3028 gen_mov_F0_vreg(0, rm);
3029 break;
3030 case 8:
3031 case 9:
3032 /* Compare */
3033 gen_mov_F0_vreg(dp, rd);
3034 gen_mov_F1_vreg(dp, rm);
3035 break;
3036 case 10:
3037 case 11:
3038 /* Compare with zero */
3039 gen_mov_F0_vreg(dp, rd);
3040 gen_vfp_F1_ld0(dp);
3041 break;
9ee6e8bb
PB
3042 case 20:
3043 case 21:
3044 case 22:
3045 case 23:
3046 /* Source and destination the same. */
3047 gen_mov_F0_vreg(dp, rd);
3048 break;
b7bcbe95
FB
3049 default:
3050 /* One source operand. */
3051 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3052 break;
b7bcbe95
FB
3053 }
3054 } else {
3055 /* Two source operands. */
3056 gen_mov_F0_vreg(dp, rn);
3057 gen_mov_F1_vreg(dp, rm);
3058 }
3059
3060 for (;;) {
3061 /* Perform the calculation. */
3062 switch (op) {
3063 case 0: /* mac: fd + (fn * fm) */
3064 gen_vfp_mul(dp);
3065 gen_mov_F1_vreg(dp, rd);
3066 gen_vfp_add(dp);
3067 break;
3068 case 1: /* nmac: fd - (fn * fm) */
3069 gen_vfp_mul(dp);
3070 gen_vfp_neg(dp);
3071 gen_mov_F1_vreg(dp, rd);
3072 gen_vfp_add(dp);
3073 break;
3074 case 2: /* msc: -fd + (fn * fm) */
3075 gen_vfp_mul(dp);
3076 gen_mov_F1_vreg(dp, rd);
3077 gen_vfp_sub(dp);
3078 break;
3079 case 3: /* nmsc: -fd - (fn * fm) */
3080 gen_vfp_mul(dp);
3081 gen_mov_F1_vreg(dp, rd);
3082 gen_vfp_add(dp);
3083 gen_vfp_neg(dp);
3084 break;
3085 case 4: /* mul: fn * fm */
3086 gen_vfp_mul(dp);
3087 break;
3088 case 5: /* nmul: -(fn * fm) */
3089 gen_vfp_mul(dp);
3090 gen_vfp_neg(dp);
3091 break;
3092 case 6: /* add: fn + fm */
3093 gen_vfp_add(dp);
3094 break;
3095 case 7: /* sub: fn - fm */
3096 gen_vfp_sub(dp);
3097 break;
3098 case 8: /* div: fn / fm */
3099 gen_vfp_div(dp);
3100 break;
9ee6e8bb
PB
3101 case 14: /* fconst */
3102 if (!arm_feature(env, ARM_FEATURE_VFP3))
3103 return 1;
3104
3105 n = (insn << 12) & 0x80000000;
3106 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3107 if (dp) {
3108 if (i & 0x40)
3109 i |= 0x3f80;
3110 else
3111 i |= 0x4000;
3112 n |= i << 16;
4373f3ce 3113 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3114 } else {
3115 if (i & 0x40)
3116 i |= 0x780;
3117 else
3118 i |= 0x800;
3119 n |= i << 19;
4373f3ce 3120 tcg_gen_movi_i32(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb 3121 }
9ee6e8bb 3122 break;
b7bcbe95
FB
3123 case 15: /* extension space */
3124 switch (rn) {
3125 case 0: /* cpy */
3126 /* no-op */
3127 break;
3128 case 1: /* abs */
3129 gen_vfp_abs(dp);
3130 break;
3131 case 2: /* neg */
3132 gen_vfp_neg(dp);
3133 break;
3134 case 3: /* sqrt */
3135 gen_vfp_sqrt(dp);
3136 break;
3137 case 8: /* cmp */
3138 gen_vfp_cmp(dp);
3139 break;
3140 case 9: /* cmpe */
3141 gen_vfp_cmpe(dp);
3142 break;
3143 case 10: /* cmpz */
3144 gen_vfp_cmp(dp);
3145 break;
3146 case 11: /* cmpez */
3147 gen_vfp_F1_ld0(dp);
3148 gen_vfp_cmpe(dp);
3149 break;
3150 case 15: /* single<->double conversion */
3151 if (dp)
4373f3ce 3152 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3153 else
4373f3ce 3154 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3155 break;
3156 case 16: /* fuito */
3157 gen_vfp_uito(dp);
3158 break;
3159 case 17: /* fsito */
3160 gen_vfp_sito(dp);
3161 break;
9ee6e8bb
PB
3162 case 20: /* fshto */
3163 if (!arm_feature(env, ARM_FEATURE_VFP3))
3164 return 1;
3165 gen_vfp_shto(dp, rm);
3166 break;
3167 case 21: /* fslto */
3168 if (!arm_feature(env, ARM_FEATURE_VFP3))
3169 return 1;
3170 gen_vfp_slto(dp, rm);
3171 break;
3172 case 22: /* fuhto */
3173 if (!arm_feature(env, ARM_FEATURE_VFP3))
3174 return 1;
3175 gen_vfp_uhto(dp, rm);
3176 break;
3177 case 23: /* fulto */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
3180 gen_vfp_ulto(dp, rm);
3181 break;
b7bcbe95
FB
3182 case 24: /* ftoui */
3183 gen_vfp_toui(dp);
3184 break;
3185 case 25: /* ftouiz */
3186 gen_vfp_touiz(dp);
3187 break;
3188 case 26: /* ftosi */
3189 gen_vfp_tosi(dp);
3190 break;
3191 case 27: /* ftosiz */
3192 gen_vfp_tosiz(dp);
3193 break;
9ee6e8bb
PB
3194 case 28: /* ftosh */
3195 if (!arm_feature(env, ARM_FEATURE_VFP3))
3196 return 1;
3197 gen_vfp_tosh(dp, rm);
3198 break;
3199 case 29: /* ftosl */
3200 if (!arm_feature(env, ARM_FEATURE_VFP3))
3201 return 1;
3202 gen_vfp_tosl(dp, rm);
3203 break;
3204 case 30: /* ftouh */
3205 if (!arm_feature(env, ARM_FEATURE_VFP3))
3206 return 1;
3207 gen_vfp_touh(dp, rm);
3208 break;
3209 case 31: /* ftoul */
3210 if (!arm_feature(env, ARM_FEATURE_VFP3))
3211 return 1;
3212 gen_vfp_toul(dp, rm);
3213 break;
b7bcbe95
FB
3214 default: /* undefined */
3215 printf ("rn:%d\n", rn);
3216 return 1;
3217 }
3218 break;
3219 default: /* undefined */
3220 printf ("op:%d\n", op);
3221 return 1;
3222 }
3223
3224 /* Write back the result. */
3225 if (op == 15 && (rn >= 8 && rn <= 11))
3226 ; /* Comparison, do nothing. */
3227 else if (op == 15 && rn > 17)
3228 /* Integer result. */
3229 gen_mov_vreg_F0(0, rd);
3230 else if (op == 15 && rn == 15)
3231 /* conversion */
3232 gen_mov_vreg_F0(!dp, rd);
3233 else
3234 gen_mov_vreg_F0(dp, rd);
3235
3236 /* break out of the loop if we have finished */
3237 if (veclen == 0)
3238 break;
3239
3240 if (op == 15 && delta_m == 0) {
3241 /* single source one-many */
3242 while (veclen--) {
3243 rd = ((rd + delta_d) & (bank_mask - 1))
3244 | (rd & bank_mask);
3245 gen_mov_vreg_F0(dp, rd);
3246 }
3247 break;
3248 }
3249 /* Setup the next operands. */
3250 veclen--;
3251 rd = ((rd + delta_d) & (bank_mask - 1))
3252 | (rd & bank_mask);
3253
3254 if (op == 15) {
3255 /* One source operand. */
3256 rm = ((rm + delta_m) & (bank_mask - 1))
3257 | (rm & bank_mask);
3258 gen_mov_F0_vreg(dp, rm);
3259 } else {
3260 /* Two source operands. */
3261 rn = ((rn + delta_d) & (bank_mask - 1))
3262 | (rn & bank_mask);
3263 gen_mov_F0_vreg(dp, rn);
3264 if (delta_m) {
3265 rm = ((rm + delta_m) & (bank_mask - 1))
3266 | (rm & bank_mask);
3267 gen_mov_F1_vreg(dp, rm);
3268 }
3269 }
3270 }
3271 }
3272 break;
3273 case 0xc:
3274 case 0xd:
9ee6e8bb 3275 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3276 /* two-register transfer */
3277 rn = (insn >> 16) & 0xf;
3278 rd = (insn >> 12) & 0xf;
3279 if (dp) {
9ee6e8bb
PB
3280 VFP_DREG_M(rm, insn);
3281 } else {
3282 rm = VFP_SREG_M(insn);
3283 }
b7bcbe95 3284
18c9b560 3285 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3286 /* vfp->arm */
3287 if (dp) {
4373f3ce
PB
3288 gen_mov_F0_vreg(0, rm * 2);
3289 tmp = gen_vfp_mrs();
3290 store_reg(s, rd, tmp);
3291 gen_mov_F0_vreg(0, rm * 2 + 1);
3292 tmp = gen_vfp_mrs();
3293 store_reg(s, rn, tmp);
b7bcbe95
FB
3294 } else {
3295 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3296 tmp = gen_vfp_mrs();
3297 store_reg(s, rn, tmp);
b7bcbe95 3298 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3299 tmp = gen_vfp_mrs();
3300 store_reg(s, rd, tmp);
b7bcbe95
FB
3301 }
3302 } else {
3303 /* arm->vfp */
3304 if (dp) {
4373f3ce
PB
3305 tmp = load_reg(s, rd);
3306 gen_vfp_msr(tmp);
3307 gen_mov_vreg_F0(0, rm * 2);
3308 tmp = load_reg(s, rn);
3309 gen_vfp_msr(tmp);
3310 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3311 } else {
4373f3ce
PB
3312 tmp = load_reg(s, rn);
3313 gen_vfp_msr(tmp);
b7bcbe95 3314 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3315 tmp = load_reg(s, rd);
3316 gen_vfp_msr(tmp);
b7bcbe95
FB
3317 gen_mov_vreg_F0(0, rm + 1);
3318 }
3319 }
3320 } else {
3321 /* Load/store */
3322 rn = (insn >> 16) & 0xf;
3323 if (dp)
9ee6e8bb 3324 VFP_DREG_D(rd, insn);
b7bcbe95 3325 else
9ee6e8bb
PB
3326 rd = VFP_SREG_D(insn);
3327 if (s->thumb && rn == 15) {
3328 gen_op_movl_T1_im(s->pc & ~2);
3329 } else {
3330 gen_movl_T1_reg(s, rn);
3331 }
b7bcbe95
FB
3332 if ((insn & 0x01200000) == 0x01000000) {
3333 /* Single load/store */
3334 offset = (insn & 0xff) << 2;
3335 if ((insn & (1 << 23)) == 0)
3336 offset = -offset;
3337 gen_op_addl_T1_im(offset);
3338 if (insn & (1 << 20)) {
b5ff1b31 3339 gen_vfp_ld(s, dp);
b7bcbe95
FB
3340 gen_mov_vreg_F0(dp, rd);
3341 } else {
3342 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3343 gen_vfp_st(s, dp);
b7bcbe95
FB
3344 }
3345 } else {
3346 /* load/store multiple */
3347 if (dp)
3348 n = (insn >> 1) & 0x7f;
3349 else
3350 n = insn & 0xff;
3351
3352 if (insn & (1 << 24)) /* pre-decrement */
3353 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3354
3355 if (dp)
3356 offset = 8;
3357 else
3358 offset = 4;
3359 for (i = 0; i < n; i++) {
18c9b560 3360 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3361 /* load */
b5ff1b31 3362 gen_vfp_ld(s, dp);
b7bcbe95
FB
3363 gen_mov_vreg_F0(dp, rd + i);
3364 } else {
3365 /* store */
3366 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3367 gen_vfp_st(s, dp);
b7bcbe95
FB
3368 }
3369 gen_op_addl_T1_im(offset);
3370 }
3371 if (insn & (1 << 21)) {
3372 /* writeback */
3373 if (insn & (1 << 24))
3374 offset = -offset * n;
3375 else if (dp && (insn & 1))
3376 offset = 4;
3377 else
3378 offset = 0;
3379
3380 if (offset != 0)
3381 gen_op_addl_T1_im(offset);
3382 gen_movl_reg_T1(s, rn);
3383 }
3384 }
3385 }
3386 break;
3387 default:
3388 /* Should never happen. */
3389 return 1;
3390 }
3391 return 0;
3392}
3393
6e256c93 3394static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3395{
6e256c93
FB
3396 TranslationBlock *tb;
3397
3398 tb = s->tb;
3399 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3400 tcg_gen_goto_tb(n);
8984bd2e 3401 gen_set_pc_im(dest);
57fec1fe 3402 tcg_gen_exit_tb((long)tb + n);
6e256c93 3403 } else {
8984bd2e 3404 gen_set_pc_im(dest);
57fec1fe 3405 tcg_gen_exit_tb(0);
6e256c93 3406 }
c53be334
FB
3407}
3408
8aaca4c0
FB
3409static inline void gen_jmp (DisasContext *s, uint32_t dest)
3410{
3411 if (__builtin_expect(s->singlestep_enabled, 0)) {
3412 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3413 if (s->thumb)
d9ba4830
PB
3414 dest |= 1;
3415 gen_bx_im(s, dest);
8aaca4c0 3416 } else {
6e256c93 3417 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3418 s->is_jmp = DISAS_TB_JUMP;
3419 }
3420}
3421
d9ba4830 3422static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3423{
ee097184 3424 if (x)
d9ba4830 3425 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3426 else
d9ba4830 3427 gen_sxth(t0);
ee097184 3428 if (y)
d9ba4830 3429 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3430 else
d9ba4830
PB
3431 gen_sxth(t1);
3432 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3433}
3434
3435/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3436static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3437 uint32_t mask;
3438
3439 mask = 0;
3440 if (flags & (1 << 0))
3441 mask |= 0xff;
3442 if (flags & (1 << 1))
3443 mask |= 0xff00;
3444 if (flags & (1 << 2))
3445 mask |= 0xff0000;
3446 if (flags & (1 << 3))
3447 mask |= 0xff000000;
9ee6e8bb 3448
2ae23e75 3449 /* Mask out undefined bits. */
9ee6e8bb
PB
3450 mask &= ~CPSR_RESERVED;
3451 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3452 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3453 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3454 mask &= ~CPSR_IT;
9ee6e8bb 3455 /* Mask out execution state bits. */
2ae23e75 3456 if (!spsr)
e160c51c 3457 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3458 /* Mask out privileged bits. */
3459 if (IS_USER(s))
9ee6e8bb 3460 mask &= CPSR_USER;
b5ff1b31
FB
3461 return mask;
3462}
3463
3464/* Returns nonzero if access to the PSR is not permitted. */
3465static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3466{
d9ba4830 3467 TCGv tmp;
b5ff1b31
FB
3468 if (spsr) {
3469 /* ??? This is also undefined in system mode. */
3470 if (IS_USER(s))
3471 return 1;
d9ba4830
PB
3472
3473 tmp = load_cpu_field(spsr);
3474 tcg_gen_andi_i32(tmp, tmp, ~mask);
3475 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3476 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3477 store_cpu_field(tmp, spsr);
b5ff1b31 3478 } else {
d9ba4830 3479 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3480 }
3481 gen_lookup_tb(s);
3482 return 0;
3483}
3484
9ee6e8bb 3485/* Generate an old-style exception return. */
b5ff1b31
FB
3486static void gen_exception_return(DisasContext *s)
3487{
d9ba4830 3488 TCGv tmp;
b26eefb6 3489 gen_set_pc_T0();
d9ba4830
PB
3490 tmp = load_cpu_field(spsr);
3491 gen_set_cpsr(tmp, 0xffffffff);
3492 dead_tmp(tmp);
b5ff1b31
FB
3493 s->is_jmp = DISAS_UPDATE;
3494}
3495
b0109805
PB
3496/* Generate a v6 exception return. Marks both values as dead. */
3497static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3498{
b0109805
PB
3499 gen_set_cpsr(cpsr, 0xffffffff);
3500 dead_tmp(cpsr);
3501 store_reg(s, 15, pc);
9ee6e8bb
PB
3502 s->is_jmp = DISAS_UPDATE;
3503}
3b46e624 3504
9ee6e8bb
PB
3505static inline void
3506gen_set_condexec (DisasContext *s)
3507{
3508 if (s->condexec_mask) {
8f01245e
PB
3509 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3510 TCGv tmp = new_tmp();
3511 tcg_gen_movi_i32(tmp, val);
d9ba4830 3512 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3513 }
3514}
3b46e624 3515
9ee6e8bb
PB
3516static void gen_nop_hint(DisasContext *s, int val)
3517{
3518 switch (val) {
3519 case 3: /* wfi */
8984bd2e 3520 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3521 s->is_jmp = DISAS_WFI;
3522 break;
3523 case 2: /* wfe */
3524 case 4: /* sev */
3525 /* TODO: Implement SEV and WFE. May help SMP performance. */
3526 default: /* nop */
3527 break;
3528 }
3529}
99c475ab 3530
ad69471c
PB
3531/* These macros help make the code more readable when migrating from the
3532 old dyngen helpers. They should probably be removed when
3533 T0/T1 are removed. */
3534#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3535#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3536
ad69471c 3537#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3538
3539static inline int gen_neon_add(int size)
3540{
3541 switch (size) {
ad69471c
PB
3542 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3543 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3544 case 2: gen_op_addl_T0_T1(); break;
3545 default: return 1;
3546 }
3547 return 0;
3548}
3549
ad69471c
PB
3550static inline void gen_neon_rsb(int size)
3551{
3552 switch (size) {
3553 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3554 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3555 case 2: gen_op_rsbl_T0_T1(); break;
3556 default: return;
3557 }
3558}
3559
3560/* 32-bit pairwise ops end up the same as the elementwise versions. */
3561#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3562#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3563#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3564#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3565
3566/* FIXME: This is wrong. They set the wrong overflow bit. */
3567#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3568#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3569#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3570#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3571
3572#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3573 switch ((size << 1) | u) { \
3574 case 0: \
3575 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3576 break; \
3577 case 1: \
3578 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3579 break; \
3580 case 2: \
3581 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3582 break; \
3583 case 3: \
3584 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3585 break; \
3586 case 4: \
3587 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3588 break; \
3589 case 5: \
3590 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3591 break; \
3592 default: return 1; \
3593 }} while (0)
9ee6e8bb
PB
3594
3595#define GEN_NEON_INTEGER_OP(name) do { \
3596 switch ((size << 1) | u) { \
ad69471c
PB
3597 case 0: \
3598 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3599 break; \
3600 case 1: \
3601 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3602 break; \
3603 case 2: \
3604 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3605 break; \
3606 case 3: \
3607 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3608 break; \
3609 case 4: \
3610 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3611 break; \
3612 case 5: \
3613 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3614 break; \
9ee6e8bb
PB
3615 default: return 1; \
3616 }} while (0)
3617
3618static inline void
3619gen_neon_movl_scratch_T0(int scratch)
3620{
3621 uint32_t offset;
3622
3623 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3624 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3625}
3626
3627static inline void
3628gen_neon_movl_scratch_T1(int scratch)
3629{
3630 uint32_t offset;
3631
3632 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3633 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3634}
3635
3636static inline void
3637gen_neon_movl_T0_scratch(int scratch)
3638{
3639 uint32_t offset;
3640
3641 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3642 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3643}
3644
3645static inline void
3646gen_neon_movl_T1_scratch(int scratch)
3647{
3648 uint32_t offset;
3649
3650 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3651 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3652}
3653
3654static inline void gen_neon_get_scalar(int size, int reg)
3655{
3656 if (size == 1) {
3657 NEON_GET_REG(T0, reg >> 1, reg & 1);
3658 } else {
3659 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3660 if (reg & 1)
ad69471c 3661 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3662 else
ad69471c 3663 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3664 }
3665}
3666
3667static void gen_neon_unzip(int reg, int q, int tmp, int size)
3668{
3669 int n;
3670
3671 for (n = 0; n < q + 1; n += 2) {
3672 NEON_GET_REG(T0, reg, n);
3673 NEON_GET_REG(T0, reg, n + n);
3674 switch (size) {
ad69471c
PB
3675 case 0: gen_helper_neon_unzip_u8(); break;
3676 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3677 case 2: /* no-op */; break;
3678 default: abort();
3679 }
3680 gen_neon_movl_scratch_T0(tmp + n);
3681 gen_neon_movl_scratch_T1(tmp + n + 1);
3682 }
3683}
3684
3685static struct {
3686 int nregs;
3687 int interleave;
3688 int spacing;
3689} neon_ls_element_type[11] = {
3690 {4, 4, 1},
3691 {4, 4, 2},
3692 {4, 1, 1},
3693 {4, 2, 1},
3694 {3, 3, 1},
3695 {3, 3, 2},
3696 {3, 1, 1},
3697 {1, 1, 1},
3698 {2, 2, 1},
3699 {2, 2, 2},
3700 {2, 1, 1}
3701};
3702
3703/* Translate a NEON load/store element instruction. Return nonzero if the
3704 instruction is invalid. */
3705static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3706{
3707 int rd, rn, rm;
3708 int op;
3709 int nregs;
3710 int interleave;
3711 int stride;
3712 int size;
3713 int reg;
3714 int pass;
3715 int load;
3716 int shift;
9ee6e8bb 3717 int n;
b0109805 3718 TCGv tmp;
8f8e3aa4 3719 TCGv tmp2;
9ee6e8bb
PB
3720
3721 if (!vfp_enabled(env))
3722 return 1;
3723 VFP_DREG_D(rd, insn);
3724 rn = (insn >> 16) & 0xf;
3725 rm = insn & 0xf;
3726 load = (insn & (1 << 21)) != 0;
3727 if ((insn & (1 << 23)) == 0) {
3728 /* Load store all elements. */
3729 op = (insn >> 8) & 0xf;
3730 size = (insn >> 6) & 3;
3731 if (op > 10 || size == 3)
3732 return 1;
3733 nregs = neon_ls_element_type[op].nregs;
3734 interleave = neon_ls_element_type[op].interleave;
3735 gen_movl_T1_reg(s, rn);
3736 stride = (1 << size) * interleave;
3737 for (reg = 0; reg < nregs; reg++) {
3738 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3739 gen_movl_T1_reg(s, rn);
3740 gen_op_addl_T1_im((1 << size) * reg);
3741 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3742 gen_movl_T1_reg(s, rn);
3743 gen_op_addl_T1_im(1 << size);
3744 }
3745 for (pass = 0; pass < 2; pass++) {
3746 if (size == 2) {
3747 if (load) {
b0109805 3748 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3749 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3750 } else {
ad69471c 3751 tmp = neon_load_reg(rd, pass);
b0109805 3752 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3753 }
3754 gen_op_addl_T1_im(stride);
3755 } else if (size == 1) {
3756 if (load) {
b0109805 3757 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3758 gen_op_addl_T1_im(stride);
8f8e3aa4 3759 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3760 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3761 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3762 dead_tmp(tmp2);
3763 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3764 } else {
8f8e3aa4
PB
3765 tmp = neon_load_reg(rd, pass);
3766 tmp2 = new_tmp();
3767 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3768 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3769 gen_op_addl_T1_im(stride);
8f8e3aa4 3770 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3771 gen_op_addl_T1_im(stride);
3772 }
3773 } else /* size == 0 */ {
3774 if (load) {
9ee6e8bb 3775 for (n = 0; n < 4; n++) {
b0109805 3776 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3777 gen_op_addl_T1_im(stride);
3778 if (n == 0) {
8f8e3aa4 3779 tmp2 = tmp;
9ee6e8bb 3780 } else {
8f8e3aa4
PB
3781 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3782 dead_tmp(tmp);
9ee6e8bb 3783 }
9ee6e8bb 3784 }
8f8e3aa4 3785 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3786 } else {
8f8e3aa4 3787 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3788 for (n = 0; n < 4; n++) {
8f8e3aa4 3789 tmp = new_tmp();
9ee6e8bb 3790 if (n == 0) {
8f8e3aa4 3791 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3792 } else {
8f8e3aa4 3793 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3794 }
b0109805 3795 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3796 gen_op_addl_T1_im(stride);
9ee6e8bb 3797 }
8f8e3aa4 3798 dead_tmp(tmp2);
9ee6e8bb
PB
3799 }
3800 }
3801 }
3802 rd += neon_ls_element_type[op].spacing;
3803 }
3804 stride = nregs * 8;
3805 } else {
3806 size = (insn >> 10) & 3;
3807 if (size == 3) {
3808 /* Load single element to all lanes. */
3809 if (!load)
3810 return 1;
3811 size = (insn >> 6) & 3;
3812 nregs = ((insn >> 8) & 3) + 1;
3813 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3814 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3815 for (reg = 0; reg < nregs; reg++) {
3816 switch (size) {
3817 case 0:
b0109805 3818 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3819 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3820 break;
3821 case 1:
b0109805 3822 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3823 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3824 break;
3825 case 2:
b0109805 3826 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3827 break;
3828 case 3:
3829 return 1;
99c475ab 3830 }
9ee6e8bb 3831 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3832 tmp2 = new_tmp();
3833 tcg_gen_mov_i32(tmp2, tmp);
3834 neon_store_reg(rd, 0, tmp2);
3835 neon_store_reg(rd, 0, tmp);
9ee6e8bb
PB
3836 rd += stride;
3837 }
3838 stride = (1 << size) * nregs;
3839 } else {
3840 /* Single element. */
3841 pass = (insn >> 7) & 1;
3842 switch (size) {
3843 case 0:
3844 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3845 stride = 1;
3846 break;
3847 case 1:
3848 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3849 stride = (insn & (1 << 5)) ? 2 : 1;
3850 break;
3851 case 2:
3852 shift = 0;
9ee6e8bb
PB
3853 stride = (insn & (1 << 6)) ? 2 : 1;
3854 break;
3855 default:
3856 abort();
3857 }
3858 nregs = ((insn >> 8) & 3) + 1;
3859 gen_movl_T1_reg(s, rn);
3860 for (reg = 0; reg < nregs; reg++) {
3861 if (load) {
9ee6e8bb
PB
3862 switch (size) {
3863 case 0:
b0109805 3864 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3865 break;
3866 case 1:
b0109805 3867 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3868 break;
3869 case 2:
b0109805 3870 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3871 break;
3872 }
3873 if (size != 2) {
8f8e3aa4
PB
3874 tmp2 = neon_load_reg(rd, pass);
3875 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3876 dead_tmp(tmp2);
9ee6e8bb 3877 }
8f8e3aa4 3878 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3879 } else { /* Store */
8f8e3aa4
PB
3880 tmp = neon_load_reg(rd, pass);
3881 if (shift)
3882 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3883 switch (size) {
3884 case 0:
b0109805 3885 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3886 break;
3887 case 1:
b0109805 3888 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3889 break;
3890 case 2:
b0109805 3891 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3892 break;
99c475ab 3893 }
99c475ab 3894 }
9ee6e8bb
PB
3895 rd += stride;
3896 gen_op_addl_T1_im(1 << size);
99c475ab 3897 }
9ee6e8bb 3898 stride = nregs * (1 << size);
99c475ab 3899 }
9ee6e8bb
PB
3900 }
3901 if (rm != 15) {
b26eefb6
PB
3902 TCGv base;
3903
3904 base = load_reg(s, rn);
9ee6e8bb 3905 if (rm == 13) {
b26eefb6 3906 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3907 } else {
b26eefb6
PB
3908 TCGv index;
3909 index = load_reg(s, rm);
3910 tcg_gen_add_i32(base, base, index);
3911 dead_tmp(index);
9ee6e8bb 3912 }
b26eefb6 3913 store_reg(s, rn, base);
9ee6e8bb
PB
3914 }
3915 return 0;
3916}
3b46e624 3917
8f8e3aa4
PB
3918/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3919static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3920{
3921 tcg_gen_and_i32(t, t, c);
3922 tcg_gen_bic_i32(f, f, c);
3923 tcg_gen_or_i32(dest, t, f);
3924}
3925
ad69471c
PB
3926static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3927{
3928 switch (size) {
3929 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3930 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3931 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3932 default: abort();
3933 }
3934}
3935
3936static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3937{
3938 switch (size) {
3939 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3940 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3941 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3942 default: abort();
3943 }
3944}
3945
3946static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3947{
3948 switch (size) {
3949 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3950 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3951 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3952 default: abort();
3953 }
3954}
3955
3956static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3957 int q, int u)
3958{
3959 if (q) {
3960 if (u) {
3961 switch (size) {
3962 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3963 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3964 default: abort();
3965 }
3966 } else {
3967 switch (size) {
3968 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3969 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3970 default: abort();
3971 }
3972 }
3973 } else {
3974 if (u) {
3975 switch (size) {
3976 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3977 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3978 default: abort();
3979 }
3980 } else {
3981 switch (size) {
3982 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3983 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3984 default: abort();
3985 }
3986 }
3987 }
3988}
3989
3990static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3991{
3992 if (u) {
3993 switch (size) {
3994 case 0: gen_helper_neon_widen_u8(dest, src); break;
3995 case 1: gen_helper_neon_widen_u16(dest, src); break;
3996 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3997 default: abort();
3998 }
3999 } else {
4000 switch (size) {
4001 case 0: gen_helper_neon_widen_s8(dest, src); break;
4002 case 1: gen_helper_neon_widen_s16(dest, src); break;
4003 case 2: tcg_gen_ext_i32_i64(dest, src); break;
4004 default: abort();
4005 }
4006 }
4007 dead_tmp(src);
4008}
4009
4010static inline void gen_neon_addl(int size)
4011{
4012 switch (size) {
4013 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
4014 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
4015 case 2: tcg_gen_add_i64(CPU_V001); break;
4016 default: abort();
4017 }
4018}
4019
4020static inline void gen_neon_subl(int size)
4021{
4022 switch (size) {
4023 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4024 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4025 case 2: tcg_gen_sub_i64(CPU_V001); break;
4026 default: abort();
4027 }
4028}
4029
4030static inline void gen_neon_negl(TCGv var, int size)
4031{
4032 switch (size) {
4033 case 0: gen_helper_neon_negl_u16(var, var); break;
4034 case 1: gen_helper_neon_negl_u32(var, var); break;
4035 case 2: gen_helper_neon_negl_u64(var, var); break;
4036 default: abort();
4037 }
4038}
4039
4040static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4041{
4042 switch (size) {
4043 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4044 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4045 default: abort();
4046 }
4047}
4048
4049static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4050{
4051 TCGv tmp;
4052
4053 switch ((size << 1) | u) {
4054 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4055 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4056 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4057 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4058 case 4:
4059 tmp = gen_muls_i64_i32(a, b);
4060 tcg_gen_mov_i64(dest, tmp);
4061 break;
4062 case 5:
4063 tmp = gen_mulu_i64_i32(a, b);
4064 tcg_gen_mov_i64(dest, tmp);
4065 break;
4066 default: abort();
4067 }
4068 if (size < 2) {
4069 dead_tmp(b);
4070 dead_tmp(a);
4071 }
4072}
4073
9ee6e8bb
PB
4074/* Translate a NEON data processing instruction. Return nonzero if the
4075 instruction is invalid.
ad69471c
PB
4076 We process data in a mixture of 32-bit and 64-bit chunks.
4077 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4078
9ee6e8bb
PB
4079static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4080{
4081 int op;
4082 int q;
4083 int rd, rn, rm;
4084 int size;
4085 int shift;
4086 int pass;
4087 int count;
4088 int pairwise;
4089 int u;
4090 int n;
4091 uint32_t imm;
8f8e3aa4
PB
4092 TCGv tmp;
4093 TCGv tmp2;
4094 TCGv tmp3;
9ee6e8bb
PB
4095
4096 if (!vfp_enabled(env))
4097 return 1;
4098 q = (insn & (1 << 6)) != 0;
4099 u = (insn >> 24) & 1;
4100 VFP_DREG_D(rd, insn);
4101 VFP_DREG_N(rn, insn);
4102 VFP_DREG_M(rm, insn);
4103 size = (insn >> 20) & 3;
4104 if ((insn & (1 << 23)) == 0) {
4105 /* Three register same length. */
4106 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4107 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4108 || op == 10 || op == 11 || op == 16)) {
4109 /* 64-bit element instructions. */
9ee6e8bb 4110 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4111 neon_load_reg64(cpu_V0, rn + pass);
4112 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4113 switch (op) {
4114 case 1: /* VQADD */
4115 if (u) {
ad69471c 4116 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4117 } else {
ad69471c 4118 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4119 }
9ee6e8bb
PB
4120 break;
4121 case 5: /* VQSUB */
4122 if (u) {
ad69471c
PB
4123 gen_helper_neon_sub_saturate_u64(CPU_V001);
4124 } else {
4125 gen_helper_neon_sub_saturate_s64(CPU_V001);
4126 }
4127 break;
4128 case 8: /* VSHL */
4129 if (u) {
4130 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4131 } else {
4132 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4133 }
4134 break;
4135 case 9: /* VQSHL */
4136 if (u) {
4137 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4138 cpu_V0, cpu_V0);
4139 } else {
4140 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4141 cpu_V1, cpu_V0);
4142 }
4143 break;
4144 case 10: /* VRSHL */
4145 if (u) {
4146 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4147 } else {
ad69471c
PB
4148 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4149 }
4150 break;
4151 case 11: /* VQRSHL */
4152 if (u) {
4153 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4154 cpu_V1, cpu_V0);
4155 } else {
4156 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4157 cpu_V1, cpu_V0);
1e8d4eec 4158 }
9ee6e8bb
PB
4159 break;
4160 case 16:
4161 if (u) {
ad69471c 4162 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4163 } else {
ad69471c 4164 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4165 }
4166 break;
4167 default:
4168 abort();
2c0262af 4169 }
ad69471c 4170 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4171 }
9ee6e8bb 4172 return 0;
2c0262af 4173 }
9ee6e8bb
PB
4174 switch (op) {
4175 case 8: /* VSHL */
4176 case 9: /* VQSHL */
4177 case 10: /* VRSHL */
ad69471c 4178 case 11: /* VQRSHL */
9ee6e8bb 4179 {
ad69471c
PB
4180 int rtmp;
4181 /* Shift instruction operands are reversed. */
4182 rtmp = rn;
9ee6e8bb 4183 rn = rm;
ad69471c 4184 rm = rtmp;
9ee6e8bb
PB
4185 pairwise = 0;
4186 }
2c0262af 4187 break;
9ee6e8bb
PB
4188 case 20: /* VPMAX */
4189 case 21: /* VPMIN */
4190 case 23: /* VPADD */
4191 pairwise = 1;
2c0262af 4192 break;
9ee6e8bb
PB
4193 case 26: /* VPADD (float) */
4194 pairwise = (u && size < 2);
2c0262af 4195 break;
9ee6e8bb
PB
4196 case 30: /* VPMIN/VPMAX (float) */
4197 pairwise = u;
2c0262af 4198 break;
9ee6e8bb
PB
4199 default:
4200 pairwise = 0;
2c0262af 4201 break;
9ee6e8bb
PB
4202 }
4203 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4204
4205 if (pairwise) {
4206 /* Pairwise. */
4207 if (q)
4208 n = (pass & 1) * 2;
2c0262af 4209 else
9ee6e8bb
PB
4210 n = 0;
4211 if (pass < q + 1) {
4212 NEON_GET_REG(T0, rn, n);
4213 NEON_GET_REG(T1, rn, n + 1);
4214 } else {
4215 NEON_GET_REG(T0, rm, n);
4216 NEON_GET_REG(T1, rm, n + 1);
4217 }
4218 } else {
4219 /* Elementwise. */
4220 NEON_GET_REG(T0, rn, pass);
4221 NEON_GET_REG(T1, rm, pass);
4222 }
4223 switch (op) {
4224 case 0: /* VHADD */
4225 GEN_NEON_INTEGER_OP(hadd);
4226 break;
4227 case 1: /* VQADD */
ad69471c 4228 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4229 break;
9ee6e8bb
PB
4230 case 2: /* VRHADD */
4231 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4232 break;
9ee6e8bb
PB
4233 case 3: /* Logic ops. */
4234 switch ((u << 2) | size) {
4235 case 0: /* VAND */
2c0262af 4236 gen_op_andl_T0_T1();
9ee6e8bb
PB
4237 break;
4238 case 1: /* BIC */
4239 gen_op_bicl_T0_T1();
4240 break;
4241 case 2: /* VORR */
4242 gen_op_orl_T0_T1();
4243 break;
4244 case 3: /* VORN */
4245 gen_op_notl_T1();
4246 gen_op_orl_T0_T1();
4247 break;
4248 case 4: /* VEOR */
4249 gen_op_xorl_T0_T1();
4250 break;
4251 case 5: /* VBSL */
8f8e3aa4
PB
4252 tmp = neon_load_reg(rd, pass);
4253 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4254 dead_tmp(tmp);
9ee6e8bb
PB
4255 break;
4256 case 6: /* VBIT */
8f8e3aa4
PB
4257 tmp = neon_load_reg(rd, pass);
4258 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4259 dead_tmp(tmp);
9ee6e8bb
PB
4260 break;
4261 case 7: /* VBIF */
8f8e3aa4
PB
4262 tmp = neon_load_reg(rd, pass);
4263 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4264 dead_tmp(tmp);
9ee6e8bb 4265 break;
2c0262af
FB
4266 }
4267 break;
9ee6e8bb
PB
4268 case 4: /* VHSUB */
4269 GEN_NEON_INTEGER_OP(hsub);
4270 break;
4271 case 5: /* VQSUB */
ad69471c 4272 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4273 break;
9ee6e8bb
PB
4274 case 6: /* VCGT */
4275 GEN_NEON_INTEGER_OP(cgt);
4276 break;
4277 case 7: /* VCGE */
4278 GEN_NEON_INTEGER_OP(cge);
4279 break;
4280 case 8: /* VSHL */
ad69471c 4281 GEN_NEON_INTEGER_OP(shl);
2c0262af 4282 break;
9ee6e8bb 4283 case 9: /* VQSHL */
ad69471c 4284 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4285 break;
9ee6e8bb 4286 case 10: /* VRSHL */
ad69471c 4287 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4288 break;
9ee6e8bb 4289 case 11: /* VQRSHL */
ad69471c 4290 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4291 break;
4292 case 12: /* VMAX */
4293 GEN_NEON_INTEGER_OP(max);
4294 break;
4295 case 13: /* VMIN */
4296 GEN_NEON_INTEGER_OP(min);
4297 break;
4298 case 14: /* VABD */
4299 GEN_NEON_INTEGER_OP(abd);
4300 break;
4301 case 15: /* VABA */
4302 GEN_NEON_INTEGER_OP(abd);
4303 NEON_GET_REG(T1, rd, pass);
4304 gen_neon_add(size);
4305 break;
4306 case 16:
4307 if (!u) { /* VADD */
4308 if (gen_neon_add(size))
4309 return 1;
4310 } else { /* VSUB */
4311 switch (size) {
ad69471c
PB
4312 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4313 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4314 case 2: gen_op_subl_T0_T1(); break;
4315 default: return 1;
4316 }
4317 }
4318 break;
4319 case 17:
4320 if (!u) { /* VTST */
4321 switch (size) {
ad69471c
PB
4322 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4323 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4324 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4325 default: return 1;
4326 }
4327 } else { /* VCEQ */
4328 switch (size) {
ad69471c
PB
4329 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4330 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4331 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4332 default: return 1;
4333 }
4334 }
4335 break;
4336 case 18: /* Multiply. */
4337 switch (size) {
ad69471c
PB
4338 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4339 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4340 case 2: gen_op_mul_T0_T1(); break;
4341 default: return 1;
4342 }
4343 NEON_GET_REG(T1, rd, pass);
4344 if (u) { /* VMLS */
ad69471c 4345 gen_neon_rsb(size);
9ee6e8bb
PB
4346 } else { /* VMLA */
4347 gen_neon_add(size);
4348 }
4349 break;
4350 case 19: /* VMUL */
4351 if (u) { /* polynomial */
ad69471c 4352 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4353 } else { /* Integer */
4354 switch (size) {
ad69471c
PB
4355 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4356 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4357 case 2: gen_op_mul_T0_T1(); break;
4358 default: return 1;
4359 }
4360 }
4361 break;
4362 case 20: /* VPMAX */
4363 GEN_NEON_INTEGER_OP(pmax);
4364 break;
4365 case 21: /* VPMIN */
4366 GEN_NEON_INTEGER_OP(pmin);
4367 break;
4368 case 22: /* Hultiply high. */
4369 if (!u) { /* VQDMULH */
4370 switch (size) {
ad69471c
PB
4371 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4372 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4373 default: return 1;
4374 }
4375 } else { /* VQRDHMUL */
4376 switch (size) {
ad69471c
PB
4377 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4378 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4379 default: return 1;
4380 }
4381 }
4382 break;
4383 case 23: /* VPADD */
4384 if (u)
4385 return 1;
4386 switch (size) {
ad69471c
PB
4387 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4388 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4389 case 2: gen_op_addl_T0_T1(); break;
4390 default: return 1;
4391 }
4392 break;
4393 case 26: /* Floating point arithnetic. */
4394 switch ((u << 2) | size) {
4395 case 0: /* VADD */
ad69471c 4396 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4397 break;
4398 case 2: /* VSUB */
ad69471c 4399 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4400 break;
4401 case 4: /* VPADD */
ad69471c 4402 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4403 break;
4404 case 6: /* VABD */
ad69471c 4405 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4406 break;
4407 default:
4408 return 1;
4409 }
4410 break;
4411 case 27: /* Float multiply. */
ad69471c 4412 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4413 if (!u) {
4414 NEON_GET_REG(T1, rd, pass);
4415 if (size == 0) {
ad69471c 4416 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4417 } else {
ad69471c 4418 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4419 }
4420 }
4421 break;
4422 case 28: /* Float compare. */
4423 if (!u) {
ad69471c 4424 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4425 } else {
9ee6e8bb 4426 if (size == 0)
ad69471c 4427 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4428 else
ad69471c 4429 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4430 }
2c0262af 4431 break;
9ee6e8bb
PB
4432 case 29: /* Float compare absolute. */
4433 if (!u)
4434 return 1;
4435 if (size == 0)
ad69471c 4436 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4437 else
ad69471c 4438 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4439 break;
9ee6e8bb
PB
4440 case 30: /* Float min/max. */
4441 if (size == 0)
ad69471c 4442 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4443 else
ad69471c 4444 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4445 break;
4446 case 31:
4447 if (size == 0)
4373f3ce 4448 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4449 else
4373f3ce 4450 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4451 break;
9ee6e8bb
PB
4452 default:
4453 abort();
2c0262af 4454 }
9ee6e8bb
PB
4455 /* Save the result. For elementwise operations we can put it
4456 straight into the destination register. For pairwise operations
4457 we have to be careful to avoid clobbering the source operands. */
4458 if (pairwise && rd == rm) {
4459 gen_neon_movl_scratch_T0(pass);
4460 } else {
4461 NEON_SET_REG(T0, rd, pass);
4462 }
4463
4464 } /* for pass */
4465 if (pairwise && rd == rm) {
4466 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4467 gen_neon_movl_T0_scratch(pass);
4468 NEON_SET_REG(T0, rd, pass);
4469 }
4470 }
ad69471c 4471 /* End of 3 register same size operations. */
9ee6e8bb
PB
4472 } else if (insn & (1 << 4)) {
4473 if ((insn & 0x00380080) != 0) {
4474 /* Two registers and shift. */
4475 op = (insn >> 8) & 0xf;
4476 if (insn & (1 << 7)) {
4477 /* 64-bit shift. */
4478 size = 3;
4479 } else {
4480 size = 2;
4481 while ((insn & (1 << (size + 19))) == 0)
4482 size--;
4483 }
4484 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4485 /* To avoid excessive dumplication of ops we implement shift
4486 by immediate using the variable shift operations. */
4487 if (op < 8) {
4488 /* Shift by immediate:
4489 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4490 /* Right shifts are encoded as N - shift, where N is the
4491 element size in bits. */
4492 if (op <= 4)
4493 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4494 if (size == 3) {
4495 count = q + 1;
4496 } else {
4497 count = q ? 4: 2;
4498 }
4499 switch (size) {
4500 case 0:
4501 imm = (uint8_t) shift;
4502 imm |= imm << 8;
4503 imm |= imm << 16;
4504 break;
4505 case 1:
4506 imm = (uint16_t) shift;
4507 imm |= imm << 16;
4508 break;
4509 case 2:
4510 case 3:
4511 imm = shift;
4512 break;
4513 default:
4514 abort();
4515 }
4516
4517 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4518 if (size == 3) {
4519 neon_load_reg64(cpu_V0, rm + pass);
4520 tcg_gen_movi_i64(cpu_V1, imm);
4521 switch (op) {
4522 case 0: /* VSHR */
4523 case 1: /* VSRA */
4524 if (u)
4525 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4526 else
ad69471c 4527 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4528 break;
ad69471c
PB
4529 case 2: /* VRSHR */
4530 case 3: /* VRSRA */
4531 if (u)
4532 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4533 else
ad69471c 4534 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4535 break;
ad69471c
PB
4536 case 4: /* VSRI */
4537 if (!u)
4538 return 1;
4539 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4540 break;
4541 case 5: /* VSHL, VSLI */
4542 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4543 break;
4544 case 6: /* VQSHL */
4545 if (u)
4546 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4547 else
ad69471c
PB
4548 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4549 break;
4550 case 7: /* VQSHLU */
4551 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4552 break;
9ee6e8bb 4553 }
ad69471c
PB
4554 if (op == 1 || op == 3) {
4555 /* Accumulate. */
4556 neon_load_reg64(cpu_V0, rd + pass);
4557 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4558 } else if (op == 4 || (op == 5 && u)) {
4559 /* Insert */
4560 cpu_abort(env, "VS[LR]I.64 not implemented");
4561 }
4562 neon_store_reg64(cpu_V0, rd + pass);
4563 } else { /* size < 3 */
4564 /* Operands in T0 and T1. */
4565 gen_op_movl_T1_im(imm);
4566 NEON_GET_REG(T0, rm, pass);
4567 switch (op) {
4568 case 0: /* VSHR */
4569 case 1: /* VSRA */
4570 GEN_NEON_INTEGER_OP(shl);
4571 break;
4572 case 2: /* VRSHR */
4573 case 3: /* VRSRA */
4574 GEN_NEON_INTEGER_OP(rshl);
4575 break;
4576 case 4: /* VSRI */
4577 if (!u)
4578 return 1;
4579 GEN_NEON_INTEGER_OP(shl);
4580 break;
4581 case 5: /* VSHL, VSLI */
4582 switch (size) {
4583 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4584 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4585 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4586 default: return 1;
4587 }
4588 break;
4589 case 6: /* VQSHL */
4590 GEN_NEON_INTEGER_OP_ENV(qshl);
4591 break;
4592 case 7: /* VQSHLU */
4593 switch (size) {
4594 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4595 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4596 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4597 default: return 1;
4598 }
4599 break;
4600 }
4601
4602 if (op == 1 || op == 3) {
4603 /* Accumulate. */
4604 NEON_GET_REG(T1, rd, pass);
4605 gen_neon_add(size);
4606 } else if (op == 4 || (op == 5 && u)) {
4607 /* Insert */
4608 switch (size) {
4609 case 0:
4610 if (op == 4)
4611 imm = 0xff >> -shift;
4612 else
4613 imm = (uint8_t)(0xff << shift);
4614 imm |= imm << 8;
4615 imm |= imm << 16;
4616 break;
4617 case 1:
4618 if (op == 4)
4619 imm = 0xffff >> -shift;
4620 else
4621 imm = (uint16_t)(0xffff << shift);
4622 imm |= imm << 16;
4623 break;
4624 case 2:
4625 if (op == 4)
4626 imm = 0xffffffffu >> -shift;
4627 else
4628 imm = 0xffffffffu << shift;
4629 break;
4630 default:
4631 abort();
4632 }
4633 tmp = neon_load_reg(rd, pass);
4634 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4635 tcg_gen_andi_i32(tmp, tmp, ~imm);
4636 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4637 }
9ee6e8bb
PB
4638 NEON_SET_REG(T0, rd, pass);
4639 }
4640 } /* for pass */
4641 } else if (op < 10) {
ad69471c 4642 /* Shift by immediate and narrow:
9ee6e8bb
PB
4643 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4644 shift = shift - (1 << (size + 3));
4645 size++;
9ee6e8bb
PB
4646 switch (size) {
4647 case 1:
ad69471c 4648 imm = (uint16_t)shift;
9ee6e8bb 4649 imm |= imm << 16;
ad69471c 4650 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
4651 break;
4652 case 2:
ad69471c
PB
4653 imm = (uint32_t)shift;
4654 tmp2 = tcg_const_i32(imm);
9ee6e8bb 4655 case 3:
ad69471c 4656 tmp2 = tcg_const_i64(shift);
9ee6e8bb
PB
4657 break;
4658 default:
4659 abort();
4660 }
4661
ad69471c
PB
4662 for (pass = 0; pass < 2; pass++) {
4663 if (size == 3) {
4664 neon_load_reg64(cpu_V0, rm + pass);
4665 if (q) {
4666 if (u)
4667 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4668 else
4669 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4670 } else {
4671 if (u)
4672 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4673 else
4674 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4675 }
2c0262af 4676 } else {
ad69471c
PB
4677 tmp = neon_load_reg(rm + pass, 0);
4678 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4679 tcg_gen_extu_i32_i64(cpu_V0, tmp);
4680 dead_tmp(tmp);
4681 tmp = neon_load_reg(rm + pass, 1);
4682 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4683 tcg_gen_extu_i32_i64(cpu_V1, tmp);
4684 dead_tmp(tmp);
4685 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
4686 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4687 }
ad69471c
PB
4688 tmp = new_tmp();
4689 if (op == 8 && !u) {
4690 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4691 } else {
ad69471c
PB
4692 if (op == 8)
4693 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4694 else
ad69471c
PB
4695 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4696 }
4697 if (pass == 0) {
4698 tmp2 = tmp;
4699 } else {
4700 neon_store_reg(rd, 0, tmp2);
4701 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4702 }
4703 } /* for pass */
4704 } else if (op == 10) {
4705 /* VSHLL */
ad69471c 4706 if (q || size == 3)
9ee6e8bb 4707 return 1;
ad69471c
PB
4708 tmp = neon_load_reg(rm, 0);
4709 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4710 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4711 if (pass == 1)
4712 tmp = tmp2;
4713
4714 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4715
9ee6e8bb
PB
4716 if (shift != 0) {
4717 /* The shift is less than the width of the source
ad69471c
PB
4718 type, so we can just shift the whole register. */
4719 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4720 if (size < 2 || !u) {
4721 uint64_t imm64;
4722 if (size == 0) {
4723 imm = (0xffu >> (8 - shift));
4724 imm |= imm << 16;
4725 } else {
4726 imm = 0xffff >> (16 - shift);
9ee6e8bb 4727 }
ad69471c
PB
4728 imm64 = imm | (((uint64_t)imm) << 32);
4729 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4730 }
4731 }
ad69471c 4732 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4733 }
4734 } else if (op == 15 || op == 16) {
4735 /* VCVT fixed-point. */
4736 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4737 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4738 if (op & 1) {
4739 if (u)
4373f3ce 4740 gen_vfp_ulto(0, shift);
9ee6e8bb 4741 else
4373f3ce 4742 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4743 } else {
4744 if (u)
4373f3ce 4745 gen_vfp_toul(0, shift);
9ee6e8bb 4746 else
4373f3ce 4747 gen_vfp_tosl(0, shift);
2c0262af 4748 }
4373f3ce 4749 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4750 }
4751 } else {
9ee6e8bb
PB
4752 return 1;
4753 }
4754 } else { /* (insn & 0x00380080) == 0 */
4755 int invert;
4756
4757 op = (insn >> 8) & 0xf;
4758 /* One register and immediate. */
4759 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4760 invert = (insn & (1 << 5)) != 0;
4761 switch (op) {
4762 case 0: case 1:
4763 /* no-op */
4764 break;
4765 case 2: case 3:
4766 imm <<= 8;
4767 break;
4768 case 4: case 5:
4769 imm <<= 16;
4770 break;
4771 case 6: case 7:
4772 imm <<= 24;
4773 break;
4774 case 8: case 9:
4775 imm |= imm << 16;
4776 break;
4777 case 10: case 11:
4778 imm = (imm << 8) | (imm << 24);
4779 break;
4780 case 12:
4781 imm = (imm < 8) | 0xff;
4782 break;
4783 case 13:
4784 imm = (imm << 16) | 0xffff;
4785 break;
4786 case 14:
4787 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4788 if (invert)
4789 imm = ~imm;
4790 break;
4791 case 15:
4792 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4793 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4794 break;
4795 }
4796 if (invert)
4797 imm = ~imm;
4798
4799 if (op != 14 || !invert)
4800 gen_op_movl_T1_im(imm);
4801
4802 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4803 if (op & 1 && op < 12) {
ad69471c 4804 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4805 if (invert) {
4806 /* The immediate value has already been inverted, so
4807 BIC becomes AND. */
ad69471c 4808 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4809 } else {
ad69471c 4810 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4811 }
9ee6e8bb 4812 } else {
ad69471c
PB
4813 /* VMOV, VMVN. */
4814 tmp = new_tmp();
9ee6e8bb 4815 if (op == 14 && invert) {
ad69471c
PB
4816 uint32_t val;
4817 val = 0;
9ee6e8bb
PB
4818 for (n = 0; n < 4; n++) {
4819 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4820 val |= 0xff << (n * 8);
9ee6e8bb 4821 }
ad69471c
PB
4822 tcg_gen_movi_i32(tmp, val);
4823 } else {
4824 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4825 }
9ee6e8bb 4826 }
ad69471c 4827 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4828 }
4829 }
4830 } else { /* (insn & 0x00800010 == 0x00800010) */
4831 if (size != 3) {
4832 op = (insn >> 8) & 0xf;
4833 if ((insn & (1 << 6)) == 0) {
4834 /* Three registers of different lengths. */
4835 int src1_wide;
4836 int src2_wide;
4837 int prewiden;
4838 /* prewiden, src1_wide, src2_wide */
4839 static const int neon_3reg_wide[16][3] = {
4840 {1, 0, 0}, /* VADDL */
4841 {1, 1, 0}, /* VADDW */
4842 {1, 0, 0}, /* VSUBL */
4843 {1, 1, 0}, /* VSUBW */
4844 {0, 1, 1}, /* VADDHN */
4845 {0, 0, 0}, /* VABAL */
4846 {0, 1, 1}, /* VSUBHN */
4847 {0, 0, 0}, /* VABDL */
4848 {0, 0, 0}, /* VMLAL */
4849 {0, 0, 0}, /* VQDMLAL */
4850 {0, 0, 0}, /* VMLSL */
4851 {0, 0, 0}, /* VQDMLSL */
4852 {0, 0, 0}, /* Integer VMULL */
4853 {0, 0, 0}, /* VQDMULL */
4854 {0, 0, 0} /* Polynomial VMULL */
4855 };
4856
4857 prewiden = neon_3reg_wide[op][0];
4858 src1_wide = neon_3reg_wide[op][1];
4859 src2_wide = neon_3reg_wide[op][2];
4860
ad69471c
PB
4861 if (size == 0 && (op == 9 || op == 11 || op == 13))
4862 return 1;
4863
9ee6e8bb
PB
4864 /* Avoid overlapping operands. Wide source operands are
4865 always aligned so will never overlap with wide
4866 destinations in problematic ways. */
8f8e3aa4
PB
4867 if (rd == rm && !src2_wide) {
4868 NEON_GET_REG(T0, rm, 1);
4869 gen_neon_movl_scratch_T0(2);
4870 } else if (rd == rn && !src1_wide) {
4871 NEON_GET_REG(T0, rn, 1);
4872 gen_neon_movl_scratch_T0(2);
9ee6e8bb
PB
4873 }
4874 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4875 if (src1_wide) {
4876 neon_load_reg64(cpu_V0, rn + pass);
9ee6e8bb 4877 } else {
ad69471c
PB
4878 if (pass == 1 && rd == rn) {
4879 gen_neon_movl_T0_scratch(2);
4880 tmp = new_tmp();
4881 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4882 } else {
ad69471c
PB
4883 tmp = neon_load_reg(rn, pass);
4884 }
4885 if (prewiden) {
4886 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4887 }
4888 }
ad69471c
PB
4889 if (src2_wide) {
4890 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4891 } else {
ad69471c 4892 if (pass == 1 && rd == rm) {
8f8e3aa4 4893 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4894 tmp2 = new_tmp();
4895 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4896 } else {
ad69471c
PB
4897 tmp2 = neon_load_reg(rm, pass);
4898 }
4899 if (prewiden) {
4900 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4901 }
9ee6e8bb
PB
4902 }
4903 switch (op) {
4904 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4905 gen_neon_addl(size);
9ee6e8bb
PB
4906 break;
4907 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4908 gen_neon_subl(size);
9ee6e8bb
PB
4909 break;
4910 case 5: case 7: /* VABAL, VABDL */
4911 switch ((size << 1) | u) {
ad69471c
PB
4912 case 0:
4913 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4914 break;
4915 case 1:
4916 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4917 break;
4918 case 2:
4919 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4920 break;
4921 case 3:
4922 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4923 break;
4924 case 4:
4925 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4926 break;
4927 case 5:
4928 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4929 break;
9ee6e8bb
PB
4930 default: abort();
4931 }
ad69471c
PB
4932 dead_tmp(tmp2);
4933 dead_tmp(tmp);
9ee6e8bb
PB
4934 break;
4935 case 8: case 9: case 10: case 11: case 12: case 13:
4936 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4937 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4938 break;
4939 case 14: /* Polynomial VMULL */
4940 cpu_abort(env, "Polynomial VMULL not implemented");
4941
4942 default: /* 15 is RESERVED. */
4943 return 1;
4944 }
4945 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4946 /* Accumulate. */
4947 if (op == 10 || op == 11) {
ad69471c 4948 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4949 }
4950
9ee6e8bb 4951 if (op != 13) {
ad69471c 4952 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4953 }
4954
4955 switch (op) {
4956 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4957 gen_neon_addl(size);
9ee6e8bb
PB
4958 break;
4959 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4960 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4961 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4962 break;
9ee6e8bb
PB
4963 /* Fall through. */
4964 case 13: /* VQDMULL */
ad69471c 4965 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4966 break;
4967 default:
4968 abort();
4969 }
ad69471c 4970 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4971 } else if (op == 4 || op == 6) {
4972 /* Narrowing operation. */
ad69471c 4973 tmp = new_tmp();
9ee6e8bb
PB
4974 if (u) {
4975 switch (size) {
ad69471c
PB
4976 case 0:
4977 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4978 break;
4979 case 1:
4980 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4981 break;
4982 case 2:
4983 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4984 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4985 break;
9ee6e8bb
PB
4986 default: abort();
4987 }
4988 } else {
4989 switch (size) {
ad69471c
PB
4990 case 0:
4991 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4992 break;
4993 case 1:
4994 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4995 break;
4996 case 2:
4997 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4998 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4999 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
5000 break;
9ee6e8bb
PB
5001 default: abort();
5002 }
5003 }
ad69471c
PB
5004 if (pass == 0) {
5005 tmp3 = tmp;
5006 } else {
5007 neon_store_reg(rd, 0, tmp3);
5008 neon_store_reg(rd, 1, tmp);
5009 }
9ee6e8bb
PB
5010 } else {
5011 /* Write back the result. */
ad69471c 5012 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5013 }
5014 }
5015 } else {
5016 /* Two registers and a scalar. */
5017 switch (op) {
5018 case 0: /* Integer VMLA scalar */
5019 case 1: /* Float VMLA scalar */
5020 case 4: /* Integer VMLS scalar */
5021 case 5: /* Floating point VMLS scalar */
5022 case 8: /* Integer VMUL scalar */
5023 case 9: /* Floating point VMUL scalar */
5024 case 12: /* VQDMULH scalar */
5025 case 13: /* VQRDMULH scalar */
5026 gen_neon_get_scalar(size, rm);
8f8e3aa4 5027 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5028 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5029 if (pass != 0)
8f8e3aa4 5030 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5031 NEON_GET_REG(T1, rn, pass);
5032 if (op == 12) {
5033 if (size == 1) {
ad69471c 5034 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5035 } else {
ad69471c 5036 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5037 }
5038 } else if (op == 13) {
5039 if (size == 1) {
ad69471c 5040 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5041 } else {
ad69471c 5042 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5043 }
5044 } else if (op & 1) {
ad69471c 5045 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5046 } else {
5047 switch (size) {
ad69471c
PB
5048 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5049 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5050 case 2: gen_op_mul_T0_T1(); break;
5051 default: return 1;
5052 }
5053 }
5054 if (op < 8) {
5055 /* Accumulate. */
5056 NEON_GET_REG(T1, rd, pass);
5057 switch (op) {
5058 case 0:
5059 gen_neon_add(size);
5060 break;
5061 case 1:
ad69471c 5062 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5063 break;
5064 case 4:
ad69471c 5065 gen_neon_rsb(size);
9ee6e8bb
PB
5066 break;
5067 case 5:
ad69471c 5068 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5069 break;
5070 default:
5071 abort();
5072 }
5073 }
5074 NEON_SET_REG(T0, rd, pass);
5075 }
5076 break;
5077 case 2: /* VMLAL sclar */
5078 case 3: /* VQDMLAL scalar */
5079 case 6: /* VMLSL scalar */
5080 case 7: /* VQDMLSL scalar */
5081 case 10: /* VMULL scalar */
5082 case 11: /* VQDMULL scalar */
ad69471c
PB
5083 if (size == 0 && (op == 3 || op == 7 || op == 11))
5084 return 1;
5085
9ee6e8bb 5086 gen_neon_get_scalar(size, rm);
ad69471c
PB
5087 NEON_GET_REG(T1, rn, 1);
5088
9ee6e8bb 5089 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5090 if (pass == 0) {
5091 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5092 } else {
ad69471c
PB
5093 tmp = new_tmp();
5094 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5095 }
ad69471c
PB
5096 tmp2 = new_tmp();
5097 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5098 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5099 if (op == 6 || op == 7) {
ad69471c
PB
5100 gen_neon_negl(cpu_V0, size);
5101 }
5102 if (op != 11) {
5103 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5104 }
9ee6e8bb
PB
5105 switch (op) {
5106 case 2: case 6:
ad69471c 5107 gen_neon_addl(size);
9ee6e8bb
PB
5108 break;
5109 case 3: case 7:
ad69471c
PB
5110 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5111 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5112 break;
5113 case 10:
5114 /* no-op */
5115 break;
5116 case 11:
ad69471c 5117 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5118 break;
5119 default:
5120 abort();
5121 }
ad69471c 5122 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5123 }
5124 break;
5125 default: /* 14 and 15 are RESERVED */
5126 return 1;
5127 }
5128 }
5129 } else { /* size == 3 */
5130 if (!u) {
5131 /* Extract. */
9ee6e8bb 5132 imm = (insn >> 8) & 0xf;
ad69471c
PB
5133 count = q + 1;
5134
5135 if (imm > 7 && !q)
5136 return 1;
5137
5138 if (imm == 0) {
5139 neon_load_reg64(cpu_V0, rn);
5140 if (q) {
5141 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5142 }
ad69471c
PB
5143 } else if (imm == 8) {
5144 neon_load_reg64(cpu_V0, rn + 1);
5145 if (q) {
5146 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5147 }
ad69471c
PB
5148 } else if (q) {
5149 tmp = tcg_temp_new(TCG_TYPE_I64);
5150 if (imm < 8) {
5151 neon_load_reg64(cpu_V0, rn);
5152 neon_load_reg64(tmp, rn + 1);
5153 } else {
5154 neon_load_reg64(cpu_V0, rn + 1);
5155 neon_load_reg64(tmp, rm);
5156 }
5157 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5158 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5159 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5160 if (imm < 8) {
5161 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5162 } else {
ad69471c
PB
5163 neon_load_reg64(cpu_V1, rm + 1);
5164 imm -= 8;
9ee6e8bb 5165 }
ad69471c
PB
5166 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5167 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5168 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5169 } else {
5170 neon_load_reg64(cpu_V0, rn);
5171 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5172 neon_load_reg64(cpu_V1, rm);
5173 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5174 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5175 }
5176 neon_store_reg64(cpu_V0, rd);
5177 if (q) {
5178 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5179 }
5180 } else if ((insn & (1 << 11)) == 0) {
5181 /* Two register misc. */
5182 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5183 size = (insn >> 18) & 3;
5184 switch (op) {
5185 case 0: /* VREV64 */
5186 if (size == 3)
5187 return 1;
5188 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5189 NEON_GET_REG(T0, rm, pass * 2);
5190 NEON_GET_REG(T1, rm, pass * 2 + 1);
5191 switch (size) {
b0109805 5192 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5193 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5194 case 2: /* no-op */ break;
5195 default: abort();
5196 }
5197 NEON_SET_REG(T0, rd, pass * 2 + 1);
5198 if (size == 2) {
5199 NEON_SET_REG(T1, rd, pass * 2);
5200 } else {
5201 gen_op_movl_T0_T1();
5202 switch (size) {
b0109805 5203 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5204 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5205 default: abort();
5206 }
5207 NEON_SET_REG(T0, rd, pass * 2);
5208 }
5209 }
5210 break;
5211 case 4: case 5: /* VPADDL */
5212 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5213 if (size == 3)
5214 return 1;
ad69471c
PB
5215 for (pass = 0; pass < q + 1; pass++) {
5216 tmp = neon_load_reg(rm, pass * 2);
5217 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5218 tmp = neon_load_reg(rm, pass * 2 + 1);
5219 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5220 switch (size) {
5221 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5222 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5223 case 2: tcg_gen_add_i64(CPU_V001); break;
5224 default: abort();
5225 }
9ee6e8bb
PB
5226 if (op >= 12) {
5227 /* Accumulate. */
ad69471c
PB
5228 neon_load_reg64(cpu_V1, rd + pass);
5229 gen_neon_addl(size);
9ee6e8bb 5230 }
ad69471c 5231 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5232 }
5233 break;
5234 case 33: /* VTRN */
5235 if (size == 2) {
5236 for (n = 0; n < (q ? 4 : 2); n += 2) {
5237 NEON_GET_REG(T0, rm, n);
5238 NEON_GET_REG(T1, rd, n + 1);
5239 NEON_SET_REG(T1, rm, n);
5240 NEON_SET_REG(T0, rd, n + 1);
5241 }
5242 } else {
5243 goto elementwise;
5244 }
5245 break;
5246 case 34: /* VUZP */
5247 /* Reg Before After
5248 Rd A3 A2 A1 A0 B2 B0 A2 A0
5249 Rm B3 B2 B1 B0 B3 B1 A3 A1
5250 */
5251 if (size == 3)
5252 return 1;
5253 gen_neon_unzip(rd, q, 0, size);
5254 gen_neon_unzip(rm, q, 4, size);
5255 if (q) {
5256 static int unzip_order_q[8] =
5257 {0, 2, 4, 6, 1, 3, 5, 7};
5258 for (n = 0; n < 8; n++) {
5259 int reg = (n < 4) ? rd : rm;
5260 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5261 NEON_SET_REG(T0, reg, n % 4);
5262 }
5263 } else {
5264 static int unzip_order[4] =
5265 {0, 4, 1, 5};
5266 for (n = 0; n < 4; n++) {
5267 int reg = (n < 2) ? rd : rm;
5268 gen_neon_movl_T0_scratch(unzip_order[n]);
5269 NEON_SET_REG(T0, reg, n % 2);
5270 }
5271 }
5272 break;
5273 case 35: /* VZIP */
5274 /* Reg Before After
5275 Rd A3 A2 A1 A0 B1 A1 B0 A0
5276 Rm B3 B2 B1 B0 B3 A3 B2 A2
5277 */
5278 if (size == 3)
5279 return 1;
5280 count = (q ? 4 : 2);
5281 for (n = 0; n < count; n++) {
5282 NEON_GET_REG(T0, rd, n);
5283 NEON_GET_REG(T1, rd, n);
5284 switch (size) {
ad69471c
PB
5285 case 0: gen_helper_neon_zip_u8(); break;
5286 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5287 case 2: /* no-op */; break;
5288 default: abort();
5289 }
5290 gen_neon_movl_scratch_T0(n * 2);
5291 gen_neon_movl_scratch_T1(n * 2 + 1);
5292 }
5293 for (n = 0; n < count * 2; n++) {
5294 int reg = (n < count) ? rd : rm;
5295 gen_neon_movl_T0_scratch(n);
5296 NEON_SET_REG(T0, reg, n % count);
5297 }
5298 break;
5299 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5300 if (size == 3)
5301 return 1;
9ee6e8bb 5302 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5303 neon_load_reg64(cpu_V0, rm + pass);
5304 tmp = new_tmp();
9ee6e8bb 5305 if (op == 36 && q == 0) {
ad69471c 5306 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5307 } else if (q) {
ad69471c 5308 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5309 } else {
ad69471c
PB
5310 gen_neon_narrow_sats(size, tmp, cpu_V0);
5311 }
5312 if (pass == 0) {
5313 tmp2 = tmp;
5314 } else {
5315 neon_store_reg(rd, 0, tmp2);
5316 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5317 }
9ee6e8bb
PB
5318 }
5319 break;
5320 case 38: /* VSHLL */
ad69471c 5321 if (q || size == 3)
9ee6e8bb 5322 return 1;
ad69471c
PB
5323 tmp = neon_load_reg(rm, 0);
5324 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5325 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5326 if (pass == 1)
5327 tmp = tmp2;
5328 gen_neon_widen(cpu_V0, tmp, size, 1);
5329 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5330 }
5331 break;
5332 default:
5333 elementwise:
5334 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5335 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5336 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5337 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5338 } else {
5339 NEON_GET_REG(T0, rm, pass);
5340 }
5341 switch (op) {
5342 case 1: /* VREV32 */
5343 switch (size) {
b0109805 5344 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5345 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5346 default: return 1;
5347 }
5348 break;
5349 case 2: /* VREV16 */
5350 if (size != 0)
5351 return 1;
3670669c 5352 gen_rev16(cpu_T[0]);
9ee6e8bb 5353 break;
9ee6e8bb
PB
5354 case 8: /* CLS */
5355 switch (size) {
ad69471c
PB
5356 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5357 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5358 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5359 default: return 1;
5360 }
5361 break;
5362 case 9: /* CLZ */
5363 switch (size) {
ad69471c
PB
5364 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5365 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5366 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5367 default: return 1;
5368 }
5369 break;
5370 case 10: /* CNT */
5371 if (size != 0)
5372 return 1;
ad69471c 5373 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5374 break;
5375 case 11: /* VNOT */
5376 if (size != 0)
5377 return 1;
5378 gen_op_notl_T0();
5379 break;
5380 case 14: /* VQABS */
5381 switch (size) {
ad69471c
PB
5382 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5383 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5384 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5385 default: return 1;
5386 }
5387 break;
5388 case 15: /* VQNEG */
5389 switch (size) {
ad69471c
PB
5390 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5391 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5392 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5393 default: return 1;
5394 }
5395 break;
5396 case 16: case 19: /* VCGT #0, VCLE #0 */
5397 gen_op_movl_T1_im(0);
5398 switch(size) {
ad69471c
PB
5399 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5400 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5401 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5402 default: return 1;
5403 }
5404 if (op == 19)
5405 gen_op_notl_T0();
5406 break;
5407 case 17: case 20: /* VCGE #0, VCLT #0 */
5408 gen_op_movl_T1_im(0);
5409 switch(size) {
ad69471c
PB
5410 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5411 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5412 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5413 default: return 1;
5414 }
5415 if (op == 20)
5416 gen_op_notl_T0();
5417 break;
5418 case 18: /* VCEQ #0 */
5419 gen_op_movl_T1_im(0);
5420 switch(size) {
ad69471c
PB
5421 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5422 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5423 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5424 default: return 1;
5425 }
5426 break;
5427 case 22: /* VABS */
5428 switch(size) {
ad69471c
PB
5429 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5430 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5431 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5432 default: return 1;
5433 }
5434 break;
5435 case 23: /* VNEG */
5436 gen_op_movl_T1_im(0);
ad69471c
PB
5437 if (size == 3)
5438 return 1;
5439 gen_neon_rsb(size);
9ee6e8bb
PB
5440 break;
5441 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5442 gen_op_movl_T1_im(0);
ad69471c 5443 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5444 if (op == 27)
5445 gen_op_notl_T0();
5446 break;
5447 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5448 gen_op_movl_T1_im(0);
ad69471c 5449 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5450 if (op == 28)
5451 gen_op_notl_T0();
5452 break;
5453 case 26: /* Float VCEQ #0 */
5454 gen_op_movl_T1_im(0);
ad69471c 5455 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5456 break;
5457 case 30: /* Float VABS */
4373f3ce 5458 gen_vfp_abs(0);
9ee6e8bb
PB
5459 break;
5460 case 31: /* Float VNEG */
4373f3ce 5461 gen_vfp_neg(0);
9ee6e8bb
PB
5462 break;
5463 case 32: /* VSWP */
5464 NEON_GET_REG(T1, rd, pass);
5465 NEON_SET_REG(T1, rm, pass);
5466 break;
5467 case 33: /* VTRN */
5468 NEON_GET_REG(T1, rd, pass);
5469 switch (size) {
ad69471c
PB
5470 case 0: gen_helper_neon_trn_u8(); break;
5471 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5472 case 2: abort();
5473 default: return 1;
5474 }
5475 NEON_SET_REG(T1, rm, pass);
5476 break;
5477 case 56: /* Integer VRECPE */
4373f3ce 5478 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5479 break;
5480 case 57: /* Integer VRSQRTE */
4373f3ce 5481 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5482 break;
5483 case 58: /* Float VRECPE */
4373f3ce 5484 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5485 break;
5486 case 59: /* Float VRSQRTE */
4373f3ce 5487 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5488 break;
5489 case 60: /* VCVT.F32.S32 */
4373f3ce 5490 gen_vfp_tosiz(0);
9ee6e8bb
PB
5491 break;
5492 case 61: /* VCVT.F32.U32 */
4373f3ce 5493 gen_vfp_touiz(0);
9ee6e8bb
PB
5494 break;
5495 case 62: /* VCVT.S32.F32 */
4373f3ce 5496 gen_vfp_sito(0);
9ee6e8bb
PB
5497 break;
5498 case 63: /* VCVT.U32.F32 */
4373f3ce 5499 gen_vfp_uito(0);
9ee6e8bb
PB
5500 break;
5501 default:
5502 /* Reserved: 21, 29, 39-56 */
5503 return 1;
5504 }
5505 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5506 tcg_gen_st_f32(cpu_F0s, cpu_env,
5507 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5508 } else {
5509 NEON_SET_REG(T0, rd, pass);
5510 }
5511 }
5512 break;
5513 }
5514 } else if ((insn & (1 << 10)) == 0) {
5515 /* VTBL, VTBX. */
5516 n = (insn >> 5) & 0x18;
9ee6e8bb 5517 if (insn & (1 << 6)) {
8f8e3aa4 5518 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5519 } else {
8f8e3aa4
PB
5520 tmp = new_tmp();
5521 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5522 }
8f8e3aa4
PB
5523 tmp2 = neon_load_reg(rm, 0);
5524 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5525 tcg_const_i32(n));
9ee6e8bb 5526 if (insn & (1 << 6)) {
8f8e3aa4 5527 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5528 } else {
8f8e3aa4
PB
5529 tmp = new_tmp();
5530 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5531 }
8f8e3aa4
PB
5532 tmp3 = neon_load_reg(rm, 1);
5533 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5534 tcg_const_i32(n));
5535 neon_store_reg(rd, 0, tmp2);
5536 neon_store_reg(rd, 1, tmp2);
9ee6e8bb
PB
5537 } else if ((insn & 0x380) == 0) {
5538 /* VDUP */
5539 if (insn & (1 << 19)) {
5540 NEON_SET_REG(T0, rm, 1);
5541 } else {
5542 NEON_SET_REG(T0, rm, 0);
5543 }
5544 if (insn & (1 << 16)) {
ad69471c 5545 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5546 } else if (insn & (1 << 17)) {
5547 if ((insn >> 18) & 1)
ad69471c 5548 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5549 else
ad69471c 5550 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5551 }
5552 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5553 NEON_SET_REG(T0, rd, pass);
5554 }
5555 } else {
5556 return 1;
5557 }
5558 }
5559 }
5560 return 0;
5561}
5562
5563static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5564{
5565 int cpnum;
5566
5567 cpnum = (insn >> 8) & 0xf;
5568 if (arm_feature(env, ARM_FEATURE_XSCALE)
5569 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5570 return 1;
5571
5572 switch (cpnum) {
5573 case 0:
5574 case 1:
5575 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5576 return disas_iwmmxt_insn(env, s, insn);
5577 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5578 return disas_dsp_insn(env, s, insn);
5579 }
5580 return 1;
5581 case 10:
5582 case 11:
5583 return disas_vfp_insn (env, s, insn);
5584 case 15:
5585 return disas_cp15_insn (env, s, insn);
5586 default:
5587 /* Unknown coprocessor. See if the board has hooked it. */
5588 return disas_cp_insn (env, s, insn);
5589 }
5590}
5591
5e3f878a
PB
5592
5593/* Store a 64-bit value to a register pair. Clobbers val. */
5594static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5595{
5596 TCGv tmp;
5597 tmp = new_tmp();
5598 tcg_gen_trunc_i64_i32(tmp, val);
5599 store_reg(s, rlow, tmp);
5600 tmp = new_tmp();
5601 tcg_gen_shri_i64(val, val, 32);
5602 tcg_gen_trunc_i64_i32(tmp, val);
5603 store_reg(s, rhigh, tmp);
5604}
5605
5606/* load a 32-bit value from a register and perform a 64-bit accumulate. */
5607static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5608{
5609 TCGv tmp;
5610 TCGv tmp2;
5611
5612 /* Load 64-bit value rd:rn. */
5613 tmp = tcg_temp_new(TCG_TYPE_I64);
5614 tmp2 = load_reg(s, rlow);
5615 tcg_gen_extu_i32_i64(tmp, tmp2);
5616 dead_tmp(tmp2);
5617 tcg_gen_add_i64(val, val, tmp);
5618}
5619
5620/* load and add a 64-bit value from a register pair. */
5621static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5622{
5623 TCGv tmp;
5624 TCGv tmp2;
5625
5626 /* Load 64-bit value rd:rn. */
5627 tmp = tcg_temp_new(TCG_TYPE_I64);
5628 tmp2 = load_reg(s, rhigh);
5629 tcg_gen_extu_i32_i64(tmp, tmp2);
5630 dead_tmp(tmp2);
5631 tcg_gen_shli_i64(tmp, tmp, 32);
5632 tcg_gen_add_i64(val, val, tmp);
5633
5634 tmp2 = load_reg(s, rlow);
5635 tcg_gen_extu_i32_i64(tmp, tmp2);
5636 dead_tmp(tmp2);
5637 tcg_gen_add_i64(val, val, tmp);
5638}
5639
5640/* Set N and Z flags from a 64-bit value. */
5641static void gen_logicq_cc(TCGv val)
5642{
5643 TCGv tmp = new_tmp();
5644 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5645 gen_logic_CC(tmp);
5646 dead_tmp(tmp);
5e3f878a
PB
5647}
5648
9ee6e8bb
PB
5649static void disas_arm_insn(CPUState * env, DisasContext *s)
5650{
5651 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5652 TCGv tmp;
3670669c 5653 TCGv tmp2;
6ddbc6e4 5654 TCGv tmp3;
b0109805 5655 TCGv addr;
9ee6e8bb
PB
5656
5657 insn = ldl_code(s->pc);
5658 s->pc += 4;
5659
5660 /* M variants do not implement ARM mode. */
5661 if (IS_M(env))
5662 goto illegal_op;
5663 cond = insn >> 28;
5664 if (cond == 0xf){
5665 /* Unconditional instructions. */
5666 if (((insn >> 25) & 7) == 1) {
5667 /* NEON Data processing. */
5668 if (!arm_feature(env, ARM_FEATURE_NEON))
5669 goto illegal_op;
5670
5671 if (disas_neon_data_insn(env, s, insn))
5672 goto illegal_op;
5673 return;
5674 }
5675 if ((insn & 0x0f100000) == 0x04000000) {
5676 /* NEON load/store. */
5677 if (!arm_feature(env, ARM_FEATURE_NEON))
5678 goto illegal_op;
5679
5680 if (disas_neon_ls_insn(env, s, insn))
5681 goto illegal_op;
5682 return;
5683 }
5684 if ((insn & 0x0d70f000) == 0x0550f000)
5685 return; /* PLD */
5686 else if ((insn & 0x0ffffdff) == 0x01010000) {
5687 ARCH(6);
5688 /* setend */
5689 if (insn & (1 << 9)) {
5690 /* BE8 mode not implemented. */
5691 goto illegal_op;
5692 }
5693 return;
5694 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5695 switch ((insn >> 4) & 0xf) {
5696 case 1: /* clrex */
5697 ARCH(6K);
8f8e3aa4 5698 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5699 return;
5700 case 4: /* dsb */
5701 case 5: /* dmb */
5702 case 6: /* isb */
5703 ARCH(7);
5704 /* We don't emulate caches so these are a no-op. */
5705 return;
5706 default:
5707 goto illegal_op;
5708 }
5709 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5710 /* srs */
5711 uint32_t offset;
5712 if (IS_USER(s))
5713 goto illegal_op;
5714 ARCH(6);
5715 op1 = (insn & 0x1f);
5716 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5717 addr = load_reg(s, 13);
9ee6e8bb 5718 } else {
b0109805
PB
5719 addr = new_tmp();
5720 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5721 }
5722 i = (insn >> 23) & 3;
5723 switch (i) {
5724 case 0: offset = -4; break; /* DA */
5725 case 1: offset = -8; break; /* DB */
5726 case 2: offset = 0; break; /* IA */
5727 case 3: offset = 4; break; /* IB */
5728 default: abort();
5729 }
5730 if (offset)
b0109805
PB
5731 tcg_gen_addi_i32(addr, addr, offset);
5732 tmp = load_reg(s, 14);
5733 gen_st32(tmp, addr, 0);
5734 tmp = new_tmp();
5735 gen_helper_cpsr_read(tmp);
5736 tcg_gen_addi_i32(addr, addr, 4);
5737 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5738 if (insn & (1 << 21)) {
5739 /* Base writeback. */
5740 switch (i) {
5741 case 0: offset = -8; break;
5742 case 1: offset = -4; break;
5743 case 2: offset = 4; break;
5744 case 3: offset = 0; break;
5745 default: abort();
5746 }
5747 if (offset)
b0109805 5748 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5749 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5750 gen_movl_reg_T1(s, 13);
5751 } else {
b0109805 5752 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5753 }
b0109805
PB
5754 } else {
5755 dead_tmp(addr);
9ee6e8bb
PB
5756 }
5757 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5758 /* rfe */
5759 uint32_t offset;
5760 if (IS_USER(s))
5761 goto illegal_op;
5762 ARCH(6);
5763 rn = (insn >> 16) & 0xf;
b0109805 5764 addr = load_reg(s, rn);
9ee6e8bb
PB
5765 i = (insn >> 23) & 3;
5766 switch (i) {
b0109805
PB
5767 case 0: offset = -4; break; /* DA */
5768 case 1: offset = -8; break; /* DB */
5769 case 2: offset = 0; break; /* IA */
5770 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5771 default: abort();
5772 }
5773 if (offset)
b0109805
PB
5774 tcg_gen_addi_i32(addr, addr, offset);
5775 /* Load PC into tmp and CPSR into tmp2. */
5776 tmp = gen_ld32(addr, 0);
5777 tcg_gen_addi_i32(addr, addr, 4);
5778 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5779 if (insn & (1 << 21)) {
5780 /* Base writeback. */
5781 switch (i) {
b0109805
PB
5782 case 0: offset = -8; break;
5783 case 1: offset = -4; break;
5784 case 2: offset = 4; break;
5785 case 3: offset = 0; break;
9ee6e8bb
PB
5786 default: abort();
5787 }
5788 if (offset)
b0109805
PB
5789 tcg_gen_addi_i32(addr, addr, offset);
5790 store_reg(s, rn, addr);
5791 } else {
5792 dead_tmp(addr);
9ee6e8bb 5793 }
b0109805 5794 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5795 } else if ((insn & 0x0e000000) == 0x0a000000) {
5796 /* branch link and change to thumb (blx <offset>) */
5797 int32_t offset;
5798
5799 val = (uint32_t)s->pc;
d9ba4830
PB
5800 tmp = new_tmp();
5801 tcg_gen_movi_i32(tmp, val);
5802 store_reg(s, 14, tmp);
9ee6e8bb
PB
5803 /* Sign-extend the 24-bit offset */
5804 offset = (((int32_t)insn) << 8) >> 8;
5805 /* offset * 4 + bit24 * 2 + (thumb bit) */
5806 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5807 /* pipeline offset */
5808 val += 4;
d9ba4830 5809 gen_bx_im(s, val);
9ee6e8bb
PB
5810 return;
5811 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5812 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5813 /* iWMMXt register transfer. */
5814 if (env->cp15.c15_cpar & (1 << 1))
5815 if (!disas_iwmmxt_insn(env, s, insn))
5816 return;
5817 }
5818 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5819 /* Coprocessor double register transfer. */
5820 } else if ((insn & 0x0f000010) == 0x0e000010) {
5821 /* Additional coprocessor register transfer. */
5822 } else if ((insn & 0x0ff10010) == 0x01000000) {
5823 uint32_t mask;
5824 uint32_t val;
5825 /* cps (privileged) */
5826 if (IS_USER(s))
5827 return;
5828 mask = val = 0;
5829 if (insn & (1 << 19)) {
5830 if (insn & (1 << 8))
5831 mask |= CPSR_A;
5832 if (insn & (1 << 7))
5833 mask |= CPSR_I;
5834 if (insn & (1 << 6))
5835 mask |= CPSR_F;
5836 if (insn & (1 << 18))
5837 val |= mask;
5838 }
5839 if (insn & (1 << 14)) {
5840 mask |= CPSR_M;
5841 val |= (insn & 0x1f);
5842 }
5843 if (mask) {
5844 gen_op_movl_T0_im(val);
5845 gen_set_psr_T0(s, mask, 0);
5846 }
5847 return;
5848 }
5849 goto illegal_op;
5850 }
5851 if (cond != 0xe) {
5852 /* if not always execute, we generate a conditional jump to
5853 next instruction */
5854 s->condlabel = gen_new_label();
d9ba4830 5855 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5856 s->condjmp = 1;
5857 }
5858 if ((insn & 0x0f900000) == 0x03000000) {
5859 if ((insn & (1 << 21)) == 0) {
5860 ARCH(6T2);
5861 rd = (insn >> 12) & 0xf;
5862 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5863 if ((insn & (1 << 22)) == 0) {
5864 /* MOVW */
5e3f878a
PB
5865 tmp = new_tmp();
5866 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5867 } else {
5868 /* MOVT */
5e3f878a
PB
5869 tmp = load_reg(s, rd);
5870 tcg_gen_andi_i32(tmp, tmp, 0xffff);
5871 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5872 }
5e3f878a 5873 store_reg(s, rd, tmp);
9ee6e8bb
PB
5874 } else {
5875 if (((insn >> 12) & 0xf) != 0xf)
5876 goto illegal_op;
5877 if (((insn >> 16) & 0xf) == 0) {
5878 gen_nop_hint(s, insn & 0xff);
5879 } else {
5880 /* CPSR = immediate */
5881 val = insn & 0xff;
5882 shift = ((insn >> 8) & 0xf) * 2;
5883 if (shift)
5884 val = (val >> shift) | (val << (32 - shift));
5885 gen_op_movl_T0_im(val);
5886 i = ((insn & (1 << 22)) != 0);
5887 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5888 goto illegal_op;
5889 }
5890 }
5891 } else if ((insn & 0x0f900000) == 0x01000000
5892 && (insn & 0x00000090) != 0x00000090) {
5893 /* miscellaneous instructions */
5894 op1 = (insn >> 21) & 3;
5895 sh = (insn >> 4) & 0xf;
5896 rm = insn & 0xf;
5897 switch (sh) {
5898 case 0x0: /* move program status register */
5899 if (op1 & 1) {
5900 /* PSR = reg */
5901 gen_movl_T0_reg(s, rm);
5902 i = ((op1 & 2) != 0);
5903 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5904 goto illegal_op;
5905 } else {
5906 /* reg = PSR */
5907 rd = (insn >> 12) & 0xf;
5908 if (op1 & 2) {
5909 if (IS_USER(s))
5910 goto illegal_op;
d9ba4830 5911 tmp = load_cpu_field(spsr);
9ee6e8bb 5912 } else {
d9ba4830
PB
5913 tmp = new_tmp();
5914 gen_helper_cpsr_read(tmp);
9ee6e8bb 5915 }
d9ba4830 5916 store_reg(s, rd, tmp);
9ee6e8bb
PB
5917 }
5918 break;
5919 case 0x1:
5920 if (op1 == 1) {
5921 /* branch/exchange thumb (bx). */
d9ba4830
PB
5922 tmp = load_reg(s, rm);
5923 gen_bx(s, tmp);
9ee6e8bb
PB
5924 } else if (op1 == 3) {
5925 /* clz */
5926 rd = (insn >> 12) & 0xf;
1497c961
PB
5927 tmp = load_reg(s, rm);
5928 gen_helper_clz(tmp, tmp);
5929 store_reg(s, rd, tmp);
9ee6e8bb
PB
5930 } else {
5931 goto illegal_op;
5932 }
5933 break;
5934 case 0x2:
5935 if (op1 == 1) {
5936 ARCH(5J); /* bxj */
5937 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5938 tmp = load_reg(s, rm);
5939 gen_bx(s, tmp);
9ee6e8bb
PB
5940 } else {
5941 goto illegal_op;
5942 }
5943 break;
5944 case 0x3:
5945 if (op1 != 1)
5946 goto illegal_op;
5947
5948 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5949 tmp = load_reg(s, rm);
5950 tmp2 = new_tmp();
5951 tcg_gen_movi_i32(tmp2, s->pc);
5952 store_reg(s, 14, tmp2);
5953 gen_bx(s, tmp);
9ee6e8bb
PB
5954 break;
5955 case 0x5: /* saturating add/subtract */
5956 rd = (insn >> 12) & 0xf;
5957 rn = (insn >> 16) & 0xf;
5e3f878a
PB
5958 tmp = load_reg(s, rn);
5959 tmp2 = load_reg(s, rn);
9ee6e8bb 5960 if (op1 & 2)
5e3f878a 5961 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5962 if (op1 & 1)
5e3f878a 5963 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5964 else
5e3f878a
PB
5965 gen_helper_add_saturate(tmp, tmp, tmp2);
5966 dead_tmp(tmp2);
5967 store_reg(s, rd, tmp);
9ee6e8bb
PB
5968 break;
5969 case 7: /* bkpt */
5970 gen_set_condexec(s);
5e3f878a 5971 gen_set_pc_im(s->pc - 4);
d9ba4830 5972 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5973 s->is_jmp = DISAS_JUMP;
5974 break;
5975 case 0x8: /* signed multiply */
5976 case 0xa:
5977 case 0xc:
5978 case 0xe:
5979 rs = (insn >> 8) & 0xf;
5980 rn = (insn >> 12) & 0xf;
5981 rd = (insn >> 16) & 0xf;
5982 if (op1 == 1) {
5983 /* (32 * 16) >> 16 */
5e3f878a
PB
5984 tmp = load_reg(s, rm);
5985 tmp2 = load_reg(s, rs);
9ee6e8bb 5986 if (sh & 4)
5e3f878a 5987 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5988 else
5e3f878a
PB
5989 gen_sxth(tmp2);
5990 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5991 tcg_gen_shri_i64(tmp2, tmp2, 16);
5992 tmp = new_tmp();
5993 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 5994 if ((sh & 2) == 0) {
5e3f878a
PB
5995 tmp2 = load_reg(s, rn);
5996 gen_helper_add_setq(tmp, tmp, tmp2);
5997 dead_tmp(tmp2);
9ee6e8bb 5998 }
5e3f878a 5999 store_reg(s, rd, tmp);
9ee6e8bb
PB
6000 } else {
6001 /* 16 * 16 */
5e3f878a
PB
6002 tmp = load_reg(s, rm);
6003 tmp2 = load_reg(s, rs);
6004 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
6005 dead_tmp(tmp2);
9ee6e8bb 6006 if (op1 == 2) {
5e3f878a
PB
6007 tmp = tcg_temp_new(TCG_TYPE_I64);
6008 tcg_gen_ext_i32_i64(tmp, cpu_T[0]);
6009 gen_addq(s, tmp, rn, rd);
6010 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
6011 } else {
6012 if (op1 == 0) {
5e3f878a
PB
6013 tmp2 = load_reg(s, rn);
6014 gen_helper_add_setq(tmp, tmp, tmp2);
6015 dead_tmp(tmp2);
9ee6e8bb 6016 }
5e3f878a 6017 store_reg(s, rd, tmp);
9ee6e8bb
PB
6018 }
6019 }
6020 break;
6021 default:
6022 goto illegal_op;
6023 }
6024 } else if (((insn & 0x0e000000) == 0 &&
6025 (insn & 0x00000090) != 0x90) ||
6026 ((insn & 0x0e000000) == (1 << 25))) {
6027 int set_cc, logic_cc, shiftop;
6028
6029 op1 = (insn >> 21) & 0xf;
6030 set_cc = (insn >> 20) & 1;
6031 logic_cc = table_logic_cc[op1] & set_cc;
6032
6033 /* data processing instruction */
6034 if (insn & (1 << 25)) {
6035 /* immediate operand */
6036 val = insn & 0xff;
6037 shift = ((insn >> 8) & 0xf) * 2;
6038 if (shift)
6039 val = (val >> shift) | (val << (32 - shift));
6040 gen_op_movl_T1_im(val);
6041 if (logic_cc && shift)
b26eefb6 6042 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6043 } else {
6044 /* register */
6045 rm = (insn) & 0xf;
6046 gen_movl_T1_reg(s, rm);
6047 shiftop = (insn >> 5) & 3;
6048 if (!(insn & (1 << 4))) {
6049 shift = (insn >> 7) & 0x1f;
9a119ff6 6050 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6051 } else {
6052 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6053 tmp = load_reg(s, rs);
6054 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6055 }
6056 }
6057 if (op1 != 0x0f && op1 != 0x0d) {
6058 rn = (insn >> 16) & 0xf;
6059 gen_movl_T0_reg(s, rn);
6060 }
6061 rd = (insn >> 12) & 0xf;
6062 switch(op1) {
6063 case 0x00:
6064 gen_op_andl_T0_T1();
6065 gen_movl_reg_T0(s, rd);
6066 if (logic_cc)
6067 gen_op_logic_T0_cc();
6068 break;
6069 case 0x01:
6070 gen_op_xorl_T0_T1();
6071 gen_movl_reg_T0(s, rd);
6072 if (logic_cc)
6073 gen_op_logic_T0_cc();
6074 break;
6075 case 0x02:
6076 if (set_cc && rd == 15) {
6077 /* SUBS r15, ... is used for exception return. */
6078 if (IS_USER(s))
6079 goto illegal_op;
6080 gen_op_subl_T0_T1_cc();
6081 gen_exception_return(s);
6082 } else {
6083 if (set_cc)
6084 gen_op_subl_T0_T1_cc();
6085 else
6086 gen_op_subl_T0_T1();
6087 gen_movl_reg_T0(s, rd);
6088 }
6089 break;
6090 case 0x03:
6091 if (set_cc)
6092 gen_op_rsbl_T0_T1_cc();
6093 else
6094 gen_op_rsbl_T0_T1();
6095 gen_movl_reg_T0(s, rd);
6096 break;
6097 case 0x04:
6098 if (set_cc)
6099 gen_op_addl_T0_T1_cc();
6100 else
6101 gen_op_addl_T0_T1();
6102 gen_movl_reg_T0(s, rd);
6103 break;
6104 case 0x05:
6105 if (set_cc)
6106 gen_op_adcl_T0_T1_cc();
6107 else
b26eefb6 6108 gen_adc_T0_T1();
9ee6e8bb
PB
6109 gen_movl_reg_T0(s, rd);
6110 break;
6111 case 0x06:
6112 if (set_cc)
6113 gen_op_sbcl_T0_T1_cc();
6114 else
3670669c 6115 gen_sbc_T0_T1();
9ee6e8bb
PB
6116 gen_movl_reg_T0(s, rd);
6117 break;
6118 case 0x07:
6119 if (set_cc)
6120 gen_op_rscl_T0_T1_cc();
6121 else
3670669c 6122 gen_rsc_T0_T1();
9ee6e8bb
PB
6123 gen_movl_reg_T0(s, rd);
6124 break;
6125 case 0x08:
6126 if (set_cc) {
6127 gen_op_andl_T0_T1();
6128 gen_op_logic_T0_cc();
6129 }
6130 break;
6131 case 0x09:
6132 if (set_cc) {
6133 gen_op_xorl_T0_T1();
6134 gen_op_logic_T0_cc();
6135 }
6136 break;
6137 case 0x0a:
6138 if (set_cc) {
6139 gen_op_subl_T0_T1_cc();
6140 }
6141 break;
6142 case 0x0b:
6143 if (set_cc) {
6144 gen_op_addl_T0_T1_cc();
6145 }
6146 break;
6147 case 0x0c:
6148 gen_op_orl_T0_T1();
6149 gen_movl_reg_T0(s, rd);
6150 if (logic_cc)
6151 gen_op_logic_T0_cc();
6152 break;
6153 case 0x0d:
6154 if (logic_cc && rd == 15) {
6155 /* MOVS r15, ... is used for exception return. */
6156 if (IS_USER(s))
6157 goto illegal_op;
6158 gen_op_movl_T0_T1();
6159 gen_exception_return(s);
6160 } else {
6161 gen_movl_reg_T1(s, rd);
6162 if (logic_cc)
6163 gen_op_logic_T1_cc();
6164 }
6165 break;
6166 case 0x0e:
6167 gen_op_bicl_T0_T1();
6168 gen_movl_reg_T0(s, rd);
6169 if (logic_cc)
6170 gen_op_logic_T0_cc();
6171 break;
6172 default:
6173 case 0x0f:
6174 gen_op_notl_T1();
6175 gen_movl_reg_T1(s, rd);
6176 if (logic_cc)
6177 gen_op_logic_T1_cc();
6178 break;
6179 }
6180 } else {
6181 /* other instructions */
6182 op1 = (insn >> 24) & 0xf;
6183 switch(op1) {
6184 case 0x0:
6185 case 0x1:
6186 /* multiplies, extra load/stores */
6187 sh = (insn >> 5) & 3;
6188 if (sh == 0) {
6189 if (op1 == 0x0) {
6190 rd = (insn >> 16) & 0xf;
6191 rn = (insn >> 12) & 0xf;
6192 rs = (insn >> 8) & 0xf;
6193 rm = (insn) & 0xf;
6194 op1 = (insn >> 20) & 0xf;
6195 switch (op1) {
6196 case 0: case 1: case 2: case 3: case 6:
6197 /* 32 bit mul */
5e3f878a
PB
6198 tmp = load_reg(s, rs);
6199 tmp2 = load_reg(s, rm);
6200 tcg_gen_mul_i32(tmp, tmp, tmp2);
6201 dead_tmp(tmp2);
9ee6e8bb
PB
6202 if (insn & (1 << 22)) {
6203 /* Subtract (mls) */
6204 ARCH(6T2);
5e3f878a
PB
6205 tmp2 = load_reg(s, rn);
6206 tcg_gen_sub_i32(tmp, tmp2, tmp);
6207 dead_tmp(tmp2);
9ee6e8bb
PB
6208 } else if (insn & (1 << 21)) {
6209 /* Add */
5e3f878a
PB
6210 tmp2 = load_reg(s, rn);
6211 tcg_gen_add_i32(tmp, tmp, tmp2);
6212 dead_tmp(tmp2);
9ee6e8bb
PB
6213 }
6214 if (insn & (1 << 20))
5e3f878a
PB
6215 gen_logic_CC(tmp);
6216 store_reg(s, rd, tmp);
9ee6e8bb
PB
6217 break;
6218 default:
6219 /* 64 bit mul */
5e3f878a
PB
6220 tmp = load_reg(s, rs);
6221 tmp2 = load_reg(s, rm);
9ee6e8bb 6222 if (insn & (1 << 22))
5e3f878a 6223 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6224 else
5e3f878a 6225 tmp = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6226 if (insn & (1 << 21)) /* mult accumulate */
5e3f878a 6227 gen_addq(s, tmp, rn, rd);
9ee6e8bb
PB
6228 if (!(insn & (1 << 23))) { /* double accumulate */
6229 ARCH(6);
5e3f878a
PB
6230 gen_addq_lo(s, tmp, rn);
6231 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
6232 }
6233 if (insn & (1 << 20))
5e3f878a
PB
6234 gen_logicq_cc(tmp);
6235 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
6236 break;
6237 }
6238 } else {
6239 rn = (insn >> 16) & 0xf;
6240 rd = (insn >> 12) & 0xf;
6241 if (insn & (1 << 23)) {
6242 /* load/store exclusive */
6243 gen_movl_T1_reg(s, rn);
72f1c62f 6244 addr = cpu_T[1];
9ee6e8bb 6245 if (insn & (1 << 20)) {
8f8e3aa4
PB
6246 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6247 tmp = gen_ld32(addr, IS_USER(s));
6248 store_reg(s, rd, tmp);
9ee6e8bb 6249 } else {
8f8e3aa4 6250 int label = gen_new_label();
9ee6e8bb 6251 rm = insn & 0xf;
8f8e3aa4
PB
6252 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6253 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
6254 tcg_const_i32(0), label);
6255 tmp = load_reg(s,rm);
6256 gen_st32(tmp, cpu_T[1], IS_USER(s));
2637a3be 6257 gen_set_label(label);
8f8e3aa4 6258 gen_movl_reg_T0(s, rd);
9ee6e8bb 6259 }
9ee6e8bb
PB
6260 } else {
6261 /* SWP instruction */
6262 rm = (insn) & 0xf;
6263
8984bd2e
PB
6264 /* ??? This is not really atomic. However we know
6265 we never have multiple CPUs running in parallel,
6266 so it is good enough. */
6267 addr = load_reg(s, rn);
6268 tmp = load_reg(s, rm);
9ee6e8bb 6269 if (insn & (1 << 22)) {
8984bd2e
PB
6270 tmp2 = gen_ld8u(addr, IS_USER(s));
6271 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6272 } else {
8984bd2e
PB
6273 tmp2 = gen_ld32(addr, IS_USER(s));
6274 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6275 }
8984bd2e
PB
6276 dead_tmp(addr);
6277 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6278 }
6279 }
6280 } else {
6281 int address_offset;
6282 int load;
6283 /* Misc load/store */
6284 rn = (insn >> 16) & 0xf;
6285 rd = (insn >> 12) & 0xf;
b0109805 6286 addr = load_reg(s, rn);
9ee6e8bb 6287 if (insn & (1 << 24))
b0109805 6288 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6289 address_offset = 0;
6290 if (insn & (1 << 20)) {
6291 /* load */
6292 switch(sh) {
6293 case 1:
b0109805 6294 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6295 break;
6296 case 2:
b0109805 6297 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6298 break;
6299 default:
6300 case 3:
b0109805 6301 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6302 break;
6303 }
6304 load = 1;
6305 } else if (sh & 2) {
6306 /* doubleword */
6307 if (sh & 1) {
6308 /* store */
b0109805
PB
6309 tmp = load_reg(s, rd);
6310 gen_st32(tmp, addr, IS_USER(s));
6311 tcg_gen_addi_i32(addr, addr, 4);
6312 tmp = load_reg(s, rd + 1);
6313 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6314 load = 0;
6315 } else {
6316 /* load */
b0109805
PB
6317 tmp = gen_ld32(addr, IS_USER(s));
6318 store_reg(s, rd, tmp);
6319 tcg_gen_addi_i32(addr, addr, 4);
6320 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6321 rd++;
6322 load = 1;
6323 }
6324 address_offset = -4;
6325 } else {
6326 /* store */
b0109805
PB
6327 tmp = load_reg(s, rd);
6328 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6329 load = 0;
6330 }
6331 /* Perform base writeback before the loaded value to
6332 ensure correct behavior with overlapping index registers.
6333 ldrd with base writeback is is undefined if the
6334 destination and index registers overlap. */
6335 if (!(insn & (1 << 24))) {
b0109805
PB
6336 gen_add_datah_offset(s, insn, address_offset, addr);
6337 store_reg(s, rn, addr);
9ee6e8bb
PB
6338 } else if (insn & (1 << 21)) {
6339 if (address_offset)
b0109805
PB
6340 tcg_gen_addi_i32(addr, addr, address_offset);
6341 store_reg(s, rn, addr);
6342 } else {
6343 dead_tmp(addr);
9ee6e8bb
PB
6344 }
6345 if (load) {
6346 /* Complete the load. */
b0109805 6347 store_reg(s, rd, tmp);
9ee6e8bb
PB
6348 }
6349 }
6350 break;
6351 case 0x4:
6352 case 0x5:
6353 goto do_ldst;
6354 case 0x6:
6355 case 0x7:
6356 if (insn & (1 << 4)) {
6357 ARCH(6);
6358 /* Armv6 Media instructions. */
6359 rm = insn & 0xf;
6360 rn = (insn >> 16) & 0xf;
2c0262af 6361 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6362 rs = (insn >> 8) & 0xf;
6363 switch ((insn >> 23) & 3) {
6364 case 0: /* Parallel add/subtract. */
6365 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6366 tmp = load_reg(s, rn);
6367 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6368 sh = (insn >> 5) & 7;
6369 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6370 goto illegal_op;
6ddbc6e4
PB
6371 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6372 dead_tmp(tmp2);
6373 store_reg(s, rd, tmp);
9ee6e8bb
PB
6374 break;
6375 case 1:
6376 if ((insn & 0x00700020) == 0) {
6c95676b 6377 /* Halfword pack. */
3670669c
PB
6378 tmp = load_reg(s, rn);
6379 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6380 shift = (insn >> 7) & 0x1f;
6381 if (shift)
3670669c
PB
6382 tcg_gen_shli_i32(tmp2, tmp2, shift);
6383 if (insn & (1 << 6)) {
6384 /* pkhtb */
6385 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6386 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
6387 } else {
6388 /* pkhbt */
6389 tcg_gen_andi_i32(tmp, tmp, 0xffff);
6390 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6391 }
6392 tcg_gen_or_i32(tmp, tmp, tmp2);
6393 store_reg(s, rd, tmp);
9ee6e8bb
PB
6394 } else if ((insn & 0x00200020) == 0x00200000) {
6395 /* [us]sat */
6ddbc6e4 6396 tmp = load_reg(s, rm);
9ee6e8bb
PB
6397 shift = (insn >> 7) & 0x1f;
6398 if (insn & (1 << 6)) {
6399 if (shift == 0)
6400 shift = 31;
6ddbc6e4 6401 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6402 } else {
6ddbc6e4 6403 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6404 }
6405 sh = (insn >> 16) & 0x1f;
6406 if (sh != 0) {
6407 if (insn & (1 << 22))
6ddbc6e4 6408 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6409 else
6ddbc6e4 6410 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6411 }
6ddbc6e4 6412 store_reg(s, rd, tmp);
9ee6e8bb
PB
6413 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6414 /* [us]sat16 */
6ddbc6e4 6415 tmp = load_reg(s, rm);
9ee6e8bb
PB
6416 sh = (insn >> 16) & 0x1f;
6417 if (sh != 0) {
6418 if (insn & (1 << 22))
6ddbc6e4 6419 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6420 else
6ddbc6e4 6421 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6422 }
6ddbc6e4 6423 store_reg(s, rd, tmp);
9ee6e8bb
PB
6424 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6425 /* Select bytes. */
6ddbc6e4
PB
6426 tmp = load_reg(s, rn);
6427 tmp2 = load_reg(s, rm);
6428 tmp3 = new_tmp();
6429 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6430 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6431 dead_tmp(tmp3);
6432 dead_tmp(tmp2);
6433 store_reg(s, rd, tmp);
9ee6e8bb 6434 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6435 tmp = load_reg(s, rm);
9ee6e8bb
PB
6436 shift = (insn >> 10) & 3;
6437 /* ??? In many cases it's not neccessary to do a
6438 rotate, a shift is sufficient. */
6439 if (shift != 0)
5e3f878a 6440 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6441 op1 = (insn >> 20) & 7;
6442 switch (op1) {
5e3f878a
PB
6443 case 0: gen_sxtb16(tmp); break;
6444 case 2: gen_sxtb(tmp); break;
6445 case 3: gen_sxth(tmp); break;
6446 case 4: gen_uxtb16(tmp); break;
6447 case 6: gen_uxtb(tmp); break;
6448 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6449 default: goto illegal_op;
6450 }
6451 if (rn != 15) {
5e3f878a 6452 tmp2 = load_reg(s, rn);
9ee6e8bb 6453 if ((op1 & 3) == 0) {
5e3f878a 6454 gen_add16(tmp, tmp2);
9ee6e8bb 6455 } else {
5e3f878a
PB
6456 tcg_gen_add_i32(tmp, tmp, tmp2);
6457 dead_tmp(tmp2);
9ee6e8bb
PB
6458 }
6459 }
6c95676b 6460 store_reg(s, rd, tmp);
9ee6e8bb
PB
6461 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6462 /* rev */
b0109805 6463 tmp = load_reg(s, rm);
9ee6e8bb
PB
6464 if (insn & (1 << 22)) {
6465 if (insn & (1 << 7)) {
b0109805 6466 gen_revsh(tmp);
9ee6e8bb
PB
6467 } else {
6468 ARCH(6T2);
b0109805 6469 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6470 }
6471 } else {
6472 if (insn & (1 << 7))
b0109805 6473 gen_rev16(tmp);
9ee6e8bb 6474 else
b0109805 6475 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6476 }
b0109805 6477 store_reg(s, rd, tmp);
9ee6e8bb
PB
6478 } else {
6479 goto illegal_op;
6480 }
6481 break;
6482 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6483 tmp = load_reg(s, rm);
6484 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6485 if (insn & (1 << 20)) {
6486 /* Signed multiply most significant [accumulate]. */
5e3f878a 6487 tmp2 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6488 if (insn & (1 << 5))
5e3f878a
PB
6489 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6490 tcg_gen_shri_i64(tmp2, tmp2, 32);
6491 tmp = new_tmp();
6492 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 6493 if (rn != 15) {
5e3f878a 6494 tmp2 = load_reg(s, rn);
9ee6e8bb 6495 if (insn & (1 << 6)) {
5e3f878a 6496 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6497 } else {
5e3f878a 6498 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6499 }
5e3f878a 6500 dead_tmp(tmp2);
9ee6e8bb 6501 }
5e3f878a 6502 store_reg(s, rd, tmp);
9ee6e8bb
PB
6503 } else {
6504 if (insn & (1 << 5))
5e3f878a
PB
6505 gen_swap_half(tmp2);
6506 gen_smul_dual(tmp, tmp2);
6507 /* This addition cannot overflow. */
6508 if (insn & (1 << 6)) {
6509 tcg_gen_sub_i32(tmp, tmp, tmp2);
6510 } else {
6511 tcg_gen_add_i32(tmp, tmp, tmp2);
6512 }
6513 dead_tmp(tmp2);
9ee6e8bb 6514 if (insn & (1 << 22)) {
5e3f878a
PB
6515 /* smlald, smlsld */
6516 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6517 tcg_gen_ext_i32_i64(tmp2, tmp);
6518 dead_tmp(tmp);
6519 gen_addq(s, tmp2, rn, rd);
6520 gen_storeq_reg(s, rn, rd, tmp2);
9ee6e8bb 6521 } else {
5e3f878a 6522 /* smuad, smusd, smlad, smlsd */
9ee6e8bb
PB
6523 if (rn != 15)
6524 {
5e3f878a
PB
6525 tmp2 = load_reg(s, rn);
6526 gen_helper_add_setq(tmp, tmp, tmp2);
6527 dead_tmp(tmp2);
9ee6e8bb 6528 }
5e3f878a 6529 store_reg(s, rd, tmp);
9ee6e8bb
PB
6530 }
6531 }
6532 break;
6533 case 3:
6534 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6535 switch (op1) {
6536 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6537 ARCH(6);
6538 tmp = load_reg(s, rm);
6539 tmp2 = load_reg(s, rs);
6540 gen_helper_usad8(tmp, tmp, tmp2);
6541 dead_tmp(tmp2);
9ee6e8bb 6542 if (rn != 15) {
6ddbc6e4
PB
6543 tmp2 = load_reg(s, rn);
6544 tcg_gen_add_i32(tmp, tmp, tmp2);
6545 dead_tmp(tmp2);
9ee6e8bb 6546 }
6ddbc6e4 6547 store_reg(s, rd, tmp);
9ee6e8bb
PB
6548 break;
6549 case 0x20: case 0x24: case 0x28: case 0x2c:
6550 /* Bitfield insert/clear. */
6551 ARCH(6T2);
6552 shift = (insn >> 7) & 0x1f;
6553 i = (insn >> 16) & 0x1f;
6554 i = i + 1 - shift;
6555 if (rm == 15) {
5e3f878a
PB
6556 tmp = new_tmp();
6557 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6558 } else {
5e3f878a 6559 tmp = load_reg(s, rm);
9ee6e8bb
PB
6560 }
6561 if (i != 32) {
5e3f878a 6562 tmp2 = load_reg(s, rd);
8f8e3aa4 6563 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6564 dead_tmp(tmp2);
9ee6e8bb 6565 }
5e3f878a 6566 store_reg(s, rd, tmp);
9ee6e8bb
PB
6567 break;
6568 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6569 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6570 tmp = load_reg(s, rm);
9ee6e8bb
PB
6571 shift = (insn >> 7) & 0x1f;
6572 i = ((insn >> 16) & 0x1f) + 1;
6573 if (shift + i > 32)
6574 goto illegal_op;
6575 if (i < 32) {
6576 if (op1 & 0x20) {
5e3f878a 6577 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6578 } else {
5e3f878a 6579 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6580 }
6581 }
5e3f878a 6582 store_reg(s, rd, tmp);
9ee6e8bb
PB
6583 break;
6584 default:
6585 goto illegal_op;
6586 }
6587 break;
6588 }
6589 break;
6590 }
6591 do_ldst:
6592 /* Check for undefined extension instructions
6593 * per the ARM Bible IE:
6594 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6595 */
6596 sh = (0xf << 20) | (0xf << 4);
6597 if (op1 == 0x7 && ((insn & sh) == sh))
6598 {
6599 goto illegal_op;
6600 }
6601 /* load/store byte/word */
6602 rn = (insn >> 16) & 0xf;
6603 rd = (insn >> 12) & 0xf;
b0109805 6604 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6605 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6606 if (insn & (1 << 24))
b0109805 6607 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6608 if (insn & (1 << 20)) {
6609 /* load */
6610 s->is_mem = 1;
9ee6e8bb 6611 if (insn & (1 << 22)) {
b0109805 6612 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6613 } else {
b0109805 6614 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6615 }
9ee6e8bb
PB
6616 } else {
6617 /* store */
b0109805 6618 tmp = load_reg(s, rd);
9ee6e8bb 6619 if (insn & (1 << 22))
b0109805 6620 gen_st8(tmp, tmp2, i);
9ee6e8bb 6621 else
b0109805 6622 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6623 }
6624 if (!(insn & (1 << 24))) {
b0109805
PB
6625 gen_add_data_offset(s, insn, tmp2);
6626 store_reg(s, rn, tmp2);
6627 } else if (insn & (1 << 21)) {
6628 store_reg(s, rn, tmp2);
6629 } else {
6630 dead_tmp(tmp2);
9ee6e8bb
PB
6631 }
6632 if (insn & (1 << 20)) {
6633 /* Complete the load. */
6634 if (rd == 15)
b0109805 6635 gen_bx(s, tmp);
9ee6e8bb 6636 else
b0109805 6637 store_reg(s, rd, tmp);
9ee6e8bb
PB
6638 }
6639 break;
6640 case 0x08:
6641 case 0x09:
6642 {
6643 int j, n, user, loaded_base;
b0109805 6644 TCGv loaded_var;
9ee6e8bb
PB
6645 /* load/store multiple words */
6646 /* XXX: store correct base if write back */
6647 user = 0;
6648 if (insn & (1 << 22)) {
6649 if (IS_USER(s))
6650 goto illegal_op; /* only usable in supervisor mode */
6651
6652 if ((insn & (1 << 15)) == 0)
6653 user = 1;
6654 }
6655 rn = (insn >> 16) & 0xf;
b0109805 6656 addr = load_reg(s, rn);
9ee6e8bb
PB
6657
6658 /* compute total size */
6659 loaded_base = 0;
6660 n = 0;
6661 for(i=0;i<16;i++) {
6662 if (insn & (1 << i))
6663 n++;
6664 }
6665 /* XXX: test invalid n == 0 case ? */
6666 if (insn & (1 << 23)) {
6667 if (insn & (1 << 24)) {
6668 /* pre increment */
b0109805 6669 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6670 } else {
6671 /* post increment */
6672 }
6673 } else {
6674 if (insn & (1 << 24)) {
6675 /* pre decrement */
b0109805 6676 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6677 } else {
6678 /* post decrement */
6679 if (n != 1)
b0109805 6680 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6681 }
6682 }
6683 j = 0;
6684 for(i=0;i<16;i++) {
6685 if (insn & (1 << i)) {
6686 if (insn & (1 << 20)) {
6687 /* load */
b0109805 6688 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6689 if (i == 15) {
b0109805 6690 gen_bx(s, tmp);
9ee6e8bb 6691 } else if (user) {
b0109805
PB
6692 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6693 dead_tmp(tmp);
9ee6e8bb 6694 } else if (i == rn) {
b0109805 6695 loaded_var = tmp;
9ee6e8bb
PB
6696 loaded_base = 1;
6697 } else {
b0109805 6698 store_reg(s, i, tmp);
9ee6e8bb
PB
6699 }
6700 } else {
6701 /* store */
6702 if (i == 15) {
6703 /* special case: r15 = PC + 8 */
6704 val = (long)s->pc + 4;
b0109805
PB
6705 tmp = new_tmp();
6706 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6707 } else if (user) {
b0109805
PB
6708 tmp = new_tmp();
6709 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6710 } else {
b0109805 6711 tmp = load_reg(s, i);
9ee6e8bb 6712 }
b0109805 6713 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6714 }
6715 j++;
6716 /* no need to add after the last transfer */
6717 if (j != n)
b0109805 6718 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6719 }
6720 }
6721 if (insn & (1 << 21)) {
6722 /* write back */
6723 if (insn & (1 << 23)) {
6724 if (insn & (1 << 24)) {
6725 /* pre increment */
6726 } else {
6727 /* post increment */
b0109805 6728 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6729 }
6730 } else {
6731 if (insn & (1 << 24)) {
6732 /* pre decrement */
6733 if (n != 1)
b0109805 6734 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6735 } else {
6736 /* post decrement */
b0109805 6737 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6738 }
6739 }
b0109805
PB
6740 store_reg(s, rn, addr);
6741 } else {
6742 dead_tmp(addr);
9ee6e8bb
PB
6743 }
6744 if (loaded_base) {
b0109805 6745 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6746 }
6747 if ((insn & (1 << 22)) && !user) {
6748 /* Restore CPSR from SPSR. */
d9ba4830
PB
6749 tmp = load_cpu_field(spsr);
6750 gen_set_cpsr(tmp, 0xffffffff);
6751 dead_tmp(tmp);
9ee6e8bb
PB
6752 s->is_jmp = DISAS_UPDATE;
6753 }
6754 }
6755 break;
6756 case 0xa:
6757 case 0xb:
6758 {
6759 int32_t offset;
6760
6761 /* branch (and link) */
6762 val = (int32_t)s->pc;
6763 if (insn & (1 << 24)) {
5e3f878a
PB
6764 tmp = new_tmp();
6765 tcg_gen_movi_i32(tmp, val);
6766 store_reg(s, 14, tmp);
9ee6e8bb
PB
6767 }
6768 offset = (((int32_t)insn << 8) >> 8);
6769 val += (offset << 2) + 4;
6770 gen_jmp(s, val);
6771 }
6772 break;
6773 case 0xc:
6774 case 0xd:
6775 case 0xe:
6776 /* Coprocessor. */
6777 if (disas_coproc_insn(env, s, insn))
6778 goto illegal_op;
6779 break;
6780 case 0xf:
6781 /* swi */
5e3f878a 6782 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6783 s->is_jmp = DISAS_SWI;
6784 break;
6785 default:
6786 illegal_op:
6787 gen_set_condexec(s);
5e3f878a 6788 gen_set_pc_im(s->pc - 4);
d9ba4830 6789 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6790 s->is_jmp = DISAS_JUMP;
6791 break;
6792 }
6793 }
6794}
6795
6796/* Return true if this is a Thumb-2 logical op. */
6797static int
6798thumb2_logic_op(int op)
6799{
6800 return (op < 8);
6801}
6802
6803/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6804 then set condition code flags based on the result of the operation.
6805 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6806 to the high bit of T1.
6807 Returns zero if the opcode is valid. */
6808
6809static int
6810gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6811{
6812 int logic_cc;
6813
6814 logic_cc = 0;
6815 switch (op) {
6816 case 0: /* and */
6817 gen_op_andl_T0_T1();
6818 logic_cc = conds;
6819 break;
6820 case 1: /* bic */
6821 gen_op_bicl_T0_T1();
6822 logic_cc = conds;
6823 break;
6824 case 2: /* orr */
6825 gen_op_orl_T0_T1();
6826 logic_cc = conds;
6827 break;
6828 case 3: /* orn */
6829 gen_op_notl_T1();
6830 gen_op_orl_T0_T1();
6831 logic_cc = conds;
6832 break;
6833 case 4: /* eor */
6834 gen_op_xorl_T0_T1();
6835 logic_cc = conds;
6836 break;
6837 case 8: /* add */
6838 if (conds)
6839 gen_op_addl_T0_T1_cc();
6840 else
6841 gen_op_addl_T0_T1();
6842 break;
6843 case 10: /* adc */
6844 if (conds)
6845 gen_op_adcl_T0_T1_cc();
6846 else
b26eefb6 6847 gen_adc_T0_T1();
9ee6e8bb
PB
6848 break;
6849 case 11: /* sbc */
6850 if (conds)
6851 gen_op_sbcl_T0_T1_cc();
6852 else
3670669c 6853 gen_sbc_T0_T1();
9ee6e8bb
PB
6854 break;
6855 case 13: /* sub */
6856 if (conds)
6857 gen_op_subl_T0_T1_cc();
6858 else
6859 gen_op_subl_T0_T1();
6860 break;
6861 case 14: /* rsb */
6862 if (conds)
6863 gen_op_rsbl_T0_T1_cc();
6864 else
6865 gen_op_rsbl_T0_T1();
6866 break;
6867 default: /* 5, 6, 7, 9, 12, 15. */
6868 return 1;
6869 }
6870 if (logic_cc) {
6871 gen_op_logic_T0_cc();
6872 if (shifter_out)
b26eefb6 6873 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6874 }
6875 return 0;
6876}
6877
6878/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6879 is not legal. */
6880static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6881{
b0109805 6882 uint32_t insn, imm, shift, offset;
9ee6e8bb 6883 uint32_t rd, rn, rm, rs;
b26eefb6 6884 TCGv tmp;
6ddbc6e4
PB
6885 TCGv tmp2;
6886 TCGv tmp3;
b0109805 6887 TCGv addr;
9ee6e8bb
PB
6888 int op;
6889 int shiftop;
6890 int conds;
6891 int logic_cc;
6892
6893 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6894 || arm_feature (env, ARM_FEATURE_M))) {
6895 /* Thumb-1 cores may need to tread bl and blx as a pair of
6896 16-bit instructions to get correct prefetch abort behavior. */
6897 insn = insn_hw1;
6898 if ((insn & (1 << 12)) == 0) {
6899 /* Second half of blx. */
6900 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6901 tmp = load_reg(s, 14);
6902 tcg_gen_addi_i32(tmp, tmp, offset);
6903 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6904
d9ba4830 6905 tmp2 = new_tmp();
b0109805 6906 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6907 store_reg(s, 14, tmp2);
6908 gen_bx(s, tmp);
9ee6e8bb
PB
6909 return 0;
6910 }
6911 if (insn & (1 << 11)) {
6912 /* Second half of bl. */
6913 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6914 tmp = load_reg(s, 14);
6a0d8a1d 6915 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6916
d9ba4830 6917 tmp2 = new_tmp();
b0109805 6918 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6919 store_reg(s, 14, tmp2);
6920 gen_bx(s, tmp);
9ee6e8bb
PB
6921 return 0;
6922 }
6923 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6924 /* Instruction spans a page boundary. Implement it as two
6925 16-bit instructions in case the second half causes an
6926 prefetch abort. */
6927 offset = ((int32_t)insn << 21) >> 9;
b0109805 6928 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6929 gen_movl_reg_T0(s, 14);
6930 return 0;
6931 }
6932 /* Fall through to 32-bit decode. */
6933 }
6934
6935 insn = lduw_code(s->pc);
6936 s->pc += 2;
6937 insn |= (uint32_t)insn_hw1 << 16;
6938
6939 if ((insn & 0xf800e800) != 0xf000e800) {
6940 ARCH(6T2);
6941 }
6942
6943 rn = (insn >> 16) & 0xf;
6944 rs = (insn >> 12) & 0xf;
6945 rd = (insn >> 8) & 0xf;
6946 rm = insn & 0xf;
6947 switch ((insn >> 25) & 0xf) {
6948 case 0: case 1: case 2: case 3:
6949 /* 16-bit instructions. Should never happen. */
6950 abort();
6951 case 4:
6952 if (insn & (1 << 22)) {
6953 /* Other load/store, table branch. */
6954 if (insn & 0x01200000) {
6955 /* Load/store doubleword. */
6956 if (rn == 15) {
b0109805
PB
6957 addr = new_tmp();
6958 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6959 } else {
b0109805 6960 addr = load_reg(s, rn);
9ee6e8bb
PB
6961 }
6962 offset = (insn & 0xff) * 4;
6963 if ((insn & (1 << 23)) == 0)
6964 offset = -offset;
6965 if (insn & (1 << 24)) {
b0109805 6966 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6967 offset = 0;
6968 }
6969 if (insn & (1 << 20)) {
6970 /* ldrd */
b0109805
PB
6971 tmp = gen_ld32(addr, IS_USER(s));
6972 store_reg(s, rs, tmp);
6973 tcg_gen_addi_i32(addr, addr, 4);
6974 tmp = gen_ld32(addr, IS_USER(s));
6975 store_reg(s, rd, tmp);
9ee6e8bb
PB
6976 } else {
6977 /* strd */
b0109805
PB
6978 tmp = load_reg(s, rs);
6979 gen_st32(tmp, addr, IS_USER(s));
6980 tcg_gen_addi_i32(addr, addr, 4);
6981 tmp = load_reg(s, rd);
6982 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6983 }
6984 if (insn & (1 << 21)) {
6985 /* Base writeback. */
6986 if (rn == 15)
6987 goto illegal_op;
b0109805
PB
6988 tcg_gen_addi_i32(addr, addr, offset - 4);
6989 store_reg(s, rn, addr);
6990 } else {
6991 dead_tmp(addr);
9ee6e8bb
PB
6992 }
6993 } else if ((insn & (1 << 23)) == 0) {
6994 /* Load/store exclusive word. */
2c0262af 6995 gen_movl_T1_reg(s, rn);
72f1c62f 6996 addr = cpu_T[1];
2c0262af 6997 if (insn & (1 << 20)) {
8f8e3aa4
PB
6998 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6999 tmp = gen_ld32(addr, IS_USER(s));
7000 store_reg(s, rd, tmp);
9ee6e8bb 7001 } else {
8f8e3aa4
PB
7002 int label = gen_new_label();
7003 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7004 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
7005 tcg_const_i32(0), label);
7006 tmp = load_reg(s, rs);
7007 gen_st32(tmp, cpu_T[1], IS_USER(s));
7008 gen_set_label(label);
7009 gen_movl_reg_T0(s, rd);
9ee6e8bb 7010 }
9ee6e8bb
PB
7011 } else if ((insn & (1 << 6)) == 0) {
7012 /* Table Branch. */
7013 if (rn == 15) {
b0109805
PB
7014 addr = new_tmp();
7015 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 7016 } else {
b0109805 7017 addr = load_reg(s, rn);
9ee6e8bb 7018 }
b26eefb6 7019 tmp = load_reg(s, rm);
b0109805 7020 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7021 if (insn & (1 << 4)) {
7022 /* tbh */
b0109805 7023 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7024 dead_tmp(tmp);
b0109805 7025 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7026 } else { /* tbb */
b26eefb6 7027 dead_tmp(tmp);
b0109805 7028 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7029 }
b0109805
PB
7030 dead_tmp(addr);
7031 tcg_gen_shli_i32(tmp, tmp, 1);
7032 tcg_gen_addi_i32(tmp, tmp, s->pc);
7033 store_reg(s, 15, tmp);
9ee6e8bb
PB
7034 } else {
7035 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7036 /* ??? These are not really atomic. However we know
7037 we never have multiple CPUs running in parallel,
7038 so it is good enough. */
9ee6e8bb 7039 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7040 /* Must use a global reg for the address because we have
7041 a conditional branch in the store instruction. */
9ee6e8bb 7042 gen_movl_T1_reg(s, rn);
8f8e3aa4 7043 addr = cpu_T[1];
9ee6e8bb 7044 if (insn & (1 << 20)) {
8f8e3aa4 7045 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7046 switch (op) {
7047 case 0:
8f8e3aa4 7048 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7049 break;
2c0262af 7050 case 1:
8f8e3aa4 7051 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7052 break;
9ee6e8bb 7053 case 3:
8f8e3aa4
PB
7054 tmp = gen_ld32(addr, IS_USER(s));
7055 tcg_gen_addi_i32(addr, addr, 4);
7056 tmp2 = gen_ld32(addr, IS_USER(s));
7057 store_reg(s, rd, tmp2);
2c0262af
FB
7058 break;
7059 default:
9ee6e8bb
PB
7060 goto illegal_op;
7061 }
8f8e3aa4 7062 store_reg(s, rs, tmp);
9ee6e8bb 7063 } else {
8f8e3aa4
PB
7064 int label = gen_new_label();
7065 /* Must use a global that is not killed by the branch. */
7066 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7067 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0),
7068 label);
7069 tmp = load_reg(s, rs);
9ee6e8bb
PB
7070 switch (op) {
7071 case 0:
8f8e3aa4 7072 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7073 break;
7074 case 1:
8f8e3aa4 7075 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7076 break;
2c0262af 7077 case 3:
8f8e3aa4
PB
7078 gen_st32(tmp, addr, IS_USER(s));
7079 tcg_gen_addi_i32(addr, addr, 4);
7080 tmp = load_reg(s, rd);
7081 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7082 break;
9ee6e8bb
PB
7083 default:
7084 goto illegal_op;
2c0262af 7085 }
8f8e3aa4 7086 gen_set_label(label);
9ee6e8bb
PB
7087 gen_movl_reg_T0(s, rm);
7088 }
7089 }
7090 } else {
7091 /* Load/store multiple, RFE, SRS. */
7092 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7093 /* Not available in user mode. */
b0109805 7094 if (IS_USER(s))
9ee6e8bb
PB
7095 goto illegal_op;
7096 if (insn & (1 << 20)) {
7097 /* rfe */
b0109805
PB
7098 addr = load_reg(s, rn);
7099 if ((insn & (1 << 24)) == 0)
7100 tcg_gen_addi_i32(addr, addr, -8);
7101 /* Load PC into tmp and CPSR into tmp2. */
7102 tmp = gen_ld32(addr, 0);
7103 tcg_gen_addi_i32(addr, addr, 4);
7104 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7105 if (insn & (1 << 21)) {
7106 /* Base writeback. */
b0109805
PB
7107 if (insn & (1 << 24)) {
7108 tcg_gen_addi_i32(addr, addr, 4);
7109 } else {
7110 tcg_gen_addi_i32(addr, addr, -4);
7111 }
7112 store_reg(s, rn, addr);
7113 } else {
7114 dead_tmp(addr);
9ee6e8bb 7115 }
b0109805 7116 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7117 } else {
7118 /* srs */
7119 op = (insn & 0x1f);
7120 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7121 addr = load_reg(s, 13);
9ee6e8bb 7122 } else {
b0109805
PB
7123 addr = new_tmp();
7124 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7125 }
7126 if ((insn & (1 << 24)) == 0) {
b0109805 7127 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7128 }
b0109805
PB
7129 tmp = load_reg(s, 14);
7130 gen_st32(tmp, addr, 0);
7131 tcg_gen_addi_i32(addr, addr, 4);
7132 tmp = new_tmp();
7133 gen_helper_cpsr_read(tmp);
7134 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7135 if (insn & (1 << 21)) {
7136 if ((insn & (1 << 24)) == 0) {
b0109805 7137 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7138 } else {
b0109805 7139 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7140 }
7141 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7142 store_reg(s, 13, addr);
9ee6e8bb 7143 } else {
b0109805
PB
7144 gen_helper_set_r13_banked(cpu_env,
7145 tcg_const_i32(op), addr);
9ee6e8bb 7146 }
b0109805
PB
7147 } else {
7148 dead_tmp(addr);
9ee6e8bb
PB
7149 }
7150 }
7151 } else {
7152 int i;
7153 /* Load/store multiple. */
b0109805 7154 addr = load_reg(s, rn);
9ee6e8bb
PB
7155 offset = 0;
7156 for (i = 0; i < 16; i++) {
7157 if (insn & (1 << i))
7158 offset += 4;
7159 }
7160 if (insn & (1 << 24)) {
b0109805 7161 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7162 }
7163
7164 for (i = 0; i < 16; i++) {
7165 if ((insn & (1 << i)) == 0)
7166 continue;
7167 if (insn & (1 << 20)) {
7168 /* Load. */
b0109805 7169 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7170 if (i == 15) {
b0109805 7171 gen_bx(s, tmp);
9ee6e8bb 7172 } else {
b0109805 7173 store_reg(s, i, tmp);
9ee6e8bb
PB
7174 }
7175 } else {
7176 /* Store. */
b0109805
PB
7177 tmp = load_reg(s, i);
7178 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7179 }
b0109805 7180 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7181 }
7182 if (insn & (1 << 21)) {
7183 /* Base register writeback. */
7184 if (insn & (1 << 24)) {
b0109805 7185 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7186 }
7187 /* Fault if writeback register is in register list. */
7188 if (insn & (1 << rn))
7189 goto illegal_op;
b0109805
PB
7190 store_reg(s, rn, addr);
7191 } else {
7192 dead_tmp(addr);
9ee6e8bb
PB
7193 }
7194 }
7195 }
7196 break;
7197 case 5: /* Data processing register constant shift. */
7198 if (rn == 15)
7199 gen_op_movl_T0_im(0);
7200 else
7201 gen_movl_T0_reg(s, rn);
7202 gen_movl_T1_reg(s, rm);
7203 op = (insn >> 21) & 0xf;
7204 shiftop = (insn >> 4) & 3;
7205 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7206 conds = (insn & (1 << 20)) != 0;
7207 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7208 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7209 if (gen_thumb2_data_op(s, op, conds, 0))
7210 goto illegal_op;
7211 if (rd != 15)
7212 gen_movl_reg_T0(s, rd);
7213 break;
7214 case 13: /* Misc data processing. */
7215 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7216 if (op < 4 && (insn & 0xf000) != 0xf000)
7217 goto illegal_op;
7218 switch (op) {
7219 case 0: /* Register controlled shift. */
8984bd2e
PB
7220 tmp = load_reg(s, rn);
7221 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7222 if ((insn & 0x70) != 0)
7223 goto illegal_op;
7224 op = (insn >> 21) & 3;
8984bd2e
PB
7225 logic_cc = (insn & (1 << 20)) != 0;
7226 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7227 if (logic_cc)
7228 gen_logic_CC(tmp);
7229 store_reg(s, rd, tmp);
9ee6e8bb
PB
7230 break;
7231 case 1: /* Sign/zero extend. */
5e3f878a 7232 tmp = load_reg(s, rm);
9ee6e8bb
PB
7233 shift = (insn >> 4) & 3;
7234 /* ??? In many cases it's not neccessary to do a
7235 rotate, a shift is sufficient. */
7236 if (shift != 0)
5e3f878a 7237 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7238 op = (insn >> 20) & 7;
7239 switch (op) {
5e3f878a
PB
7240 case 0: gen_sxth(tmp); break;
7241 case 1: gen_uxth(tmp); break;
7242 case 2: gen_sxtb16(tmp); break;
7243 case 3: gen_uxtb16(tmp); break;
7244 case 4: gen_sxtb(tmp); break;
7245 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7246 default: goto illegal_op;
7247 }
7248 if (rn != 15) {
5e3f878a 7249 tmp2 = load_reg(s, rn);
9ee6e8bb 7250 if ((op >> 1) == 1) {
5e3f878a 7251 gen_add16(tmp, tmp2);
9ee6e8bb 7252 } else {
5e3f878a
PB
7253 tcg_gen_add_i32(tmp, tmp, tmp2);
7254 dead_tmp(tmp2);
9ee6e8bb
PB
7255 }
7256 }
5e3f878a 7257 store_reg(s, rd, tmp);
9ee6e8bb
PB
7258 break;
7259 case 2: /* SIMD add/subtract. */
7260 op = (insn >> 20) & 7;
7261 shift = (insn >> 4) & 7;
7262 if ((op & 3) == 3 || (shift & 3) == 3)
7263 goto illegal_op;
6ddbc6e4
PB
7264 tmp = load_reg(s, rn);
7265 tmp2 = load_reg(s, rm);
7266 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7267 dead_tmp(tmp2);
7268 store_reg(s, rd, tmp);
9ee6e8bb
PB
7269 break;
7270 case 3: /* Other data processing. */
7271 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7272 if (op < 4) {
7273 /* Saturating add/subtract. */
d9ba4830
PB
7274 tmp = load_reg(s, rn);
7275 tmp2 = load_reg(s, rm);
9ee6e8bb 7276 if (op & 2)
d9ba4830 7277 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7278 if (op & 1)
d9ba4830 7279 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7280 else
d9ba4830
PB
7281 gen_helper_add_saturate(tmp, tmp, tmp2);
7282 dead_tmp(tmp2);
9ee6e8bb 7283 } else {
d9ba4830 7284 tmp = load_reg(s, rn);
9ee6e8bb
PB
7285 switch (op) {
7286 case 0x0a: /* rbit */
d9ba4830 7287 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7288 break;
7289 case 0x08: /* rev */
d9ba4830 7290 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7291 break;
7292 case 0x09: /* rev16 */
d9ba4830 7293 gen_rev16(tmp);
9ee6e8bb
PB
7294 break;
7295 case 0x0b: /* revsh */
d9ba4830 7296 gen_revsh(tmp);
9ee6e8bb
PB
7297 break;
7298 case 0x10: /* sel */
d9ba4830 7299 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7300 tmp3 = new_tmp();
7301 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7302 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7303 dead_tmp(tmp3);
d9ba4830 7304 dead_tmp(tmp2);
9ee6e8bb
PB
7305 break;
7306 case 0x18: /* clz */
d9ba4830 7307 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7308 break;
7309 default:
7310 goto illegal_op;
7311 }
7312 }
d9ba4830 7313 store_reg(s, rd, tmp);
9ee6e8bb
PB
7314 break;
7315 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7316 op = (insn >> 4) & 0xf;
d9ba4830
PB
7317 tmp = load_reg(s, rn);
7318 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7319 switch ((insn >> 20) & 7) {
7320 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7321 tcg_gen_mul_i32(tmp, tmp, tmp2);
7322 dead_tmp(tmp2);
9ee6e8bb 7323 if (rs != 15) {
d9ba4830 7324 tmp2 = load_reg(s, rs);
9ee6e8bb 7325 if (op)
d9ba4830 7326 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7327 else
d9ba4830
PB
7328 tcg_gen_add_i32(tmp, tmp, tmp2);
7329 dead_tmp(tmp2);
9ee6e8bb 7330 }
9ee6e8bb
PB
7331 break;
7332 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7333 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7334 dead_tmp(tmp2);
9ee6e8bb 7335 if (rs != 15) {
d9ba4830
PB
7336 tmp2 = load_reg(s, rs);
7337 gen_helper_add_setq(tmp, tmp, tmp2);
7338 dead_tmp(tmp2);
9ee6e8bb 7339 }
9ee6e8bb
PB
7340 break;
7341 case 2: /* Dual multiply add. */
7342 case 4: /* Dual multiply subtract. */
7343 if (op)
d9ba4830
PB
7344 gen_swap_half(tmp2);
7345 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7346 /* This addition cannot overflow. */
7347 if (insn & (1 << 22)) {
d9ba4830 7348 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7349 } else {
d9ba4830 7350 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7351 }
d9ba4830 7352 dead_tmp(tmp2);
9ee6e8bb
PB
7353 if (rs != 15)
7354 {
d9ba4830
PB
7355 tmp2 = load_reg(s, rs);
7356 gen_helper_add_setq(tmp, tmp, tmp2);
7357 dead_tmp(tmp2);
9ee6e8bb 7358 }
9ee6e8bb
PB
7359 break;
7360 case 3: /* 32 * 16 -> 32msb */
7361 if (op)
d9ba4830 7362 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7363 else
d9ba4830 7364 gen_sxth(tmp2);
5e3f878a
PB
7365 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7366 tcg_gen_shri_i64(tmp2, tmp2, 16);
7367 tmp = new_tmp();
7368 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb
PB
7369 if (rs != 15)
7370 {
d9ba4830
PB
7371 tmp2 = load_reg(s, rs);
7372 gen_helper_add_setq(tmp, tmp, tmp2);
7373 dead_tmp(tmp2);
9ee6e8bb 7374 }
9ee6e8bb
PB
7375 break;
7376 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7377 gen_imull(tmp, tmp2);
7378 if (insn & (1 << 5)) {
7379 gen_roundqd(tmp, tmp2);
7380 dead_tmp(tmp2);
7381 } else {
7382 dead_tmp(tmp);
7383 tmp = tmp2;
7384 }
9ee6e8bb 7385 if (rs != 15) {
d9ba4830 7386 tmp2 = load_reg(s, rs);
9ee6e8bb 7387 if (insn & (1 << 21)) {
d9ba4830 7388 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7389 } else {
d9ba4830 7390 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7391 }
d9ba4830 7392 dead_tmp(tmp2);
2c0262af 7393 }
9ee6e8bb
PB
7394 break;
7395 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7396 gen_helper_usad8(tmp, tmp, tmp2);
7397 dead_tmp(tmp2);
9ee6e8bb 7398 if (rs != 15) {
d9ba4830
PB
7399 tmp2 = load_reg(s, rs);
7400 tcg_gen_add_i32(tmp, tmp, tmp2);
7401 dead_tmp(tmp2);
5fd46862 7402 }
9ee6e8bb 7403 break;
2c0262af 7404 }
d9ba4830 7405 store_reg(s, rd, tmp);
2c0262af 7406 break;
9ee6e8bb
PB
7407 case 6: case 7: /* 64-bit multiply, Divide. */
7408 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7409 tmp = load_reg(s, rn);
7410 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7411 if ((op & 0x50) == 0x10) {
7412 /* sdiv, udiv */
7413 if (!arm_feature(env, ARM_FEATURE_DIV))
7414 goto illegal_op;
7415 if (op & 0x20)
5e3f878a 7416 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7417 else
5e3f878a
PB
7418 gen_helper_sdiv(tmp, tmp, tmp2);
7419 dead_tmp(tmp2);
7420 store_reg(s, rd, tmp);
9ee6e8bb
PB
7421 } else if ((op & 0xe) == 0xc) {
7422 /* Dual multiply accumulate long. */
7423 if (op & 1)
5e3f878a
PB
7424 gen_swap_half(tmp2);
7425 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7426 if (op & 0x10) {
5e3f878a 7427 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7428 } else {
5e3f878a 7429 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7430 }
5e3f878a
PB
7431 dead_tmp(tmp2);
7432 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7433 gen_addq(s, tmp, rs, rd);
7434 gen_storeq_reg(s, rs, rd, tmp);
2c0262af 7435 } else {
9ee6e8bb
PB
7436 if (op & 0x20) {
7437 /* Unsigned 64-bit multiply */
5e3f878a 7438 tmp = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7439 } else {
9ee6e8bb
PB
7440 if (op & 8) {
7441 /* smlalxy */
5e3f878a
PB
7442 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7443 dead_tmp(tmp2);
7444 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7445 tcg_gen_ext_i32_i64(tmp2, tmp);
7446 dead_tmp(tmp);
7447 tmp = tmp2;
9ee6e8bb
PB
7448 } else {
7449 /* Signed 64-bit multiply */
5e3f878a 7450 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7451 }
b5ff1b31 7452 }
9ee6e8bb
PB
7453 if (op & 4) {
7454 /* umaal */
5e3f878a
PB
7455 gen_addq_lo(s, tmp, rs);
7456 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
7457 } else if (op & 0x40) {
7458 /* 64-bit accumulate. */
5e3f878a 7459 gen_addq(s, tmp, rs, rd);
9ee6e8bb 7460 }
5e3f878a 7461 gen_storeq_reg(s, rs, rd, tmp);
5fd46862 7462 }
2c0262af 7463 break;
9ee6e8bb
PB
7464 }
7465 break;
7466 case 6: case 7: case 14: case 15:
7467 /* Coprocessor. */
7468 if (((insn >> 24) & 3) == 3) {
7469 /* Translate into the equivalent ARM encoding. */
7470 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7471 if (disas_neon_data_insn(env, s, insn))
7472 goto illegal_op;
7473 } else {
7474 if (insn & (1 << 28))
7475 goto illegal_op;
7476 if (disas_coproc_insn (env, s, insn))
7477 goto illegal_op;
7478 }
7479 break;
7480 case 8: case 9: case 10: case 11:
7481 if (insn & (1 << 15)) {
7482 /* Branches, misc control. */
7483 if (insn & 0x5000) {
7484 /* Unconditional branch. */
7485 /* signextend(hw1[10:0]) -> offset[:12]. */
7486 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7487 /* hw1[10:0] -> offset[11:1]. */
7488 offset |= (insn & 0x7ff) << 1;
7489 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7490 offset[24:22] already have the same value because of the
7491 sign extension above. */
7492 offset ^= ((~insn) & (1 << 13)) << 10;
7493 offset ^= ((~insn) & (1 << 11)) << 11;
7494
9ee6e8bb
PB
7495 if (insn & (1 << 14)) {
7496 /* Branch and link. */
b0109805 7497 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7498 gen_movl_reg_T1(s, 14);
b5ff1b31 7499 }
3b46e624 7500
b0109805 7501 offset += s->pc;
9ee6e8bb
PB
7502 if (insn & (1 << 12)) {
7503 /* b/bl */
b0109805 7504 gen_jmp(s, offset);
9ee6e8bb
PB
7505 } else {
7506 /* blx */
b0109805
PB
7507 offset &= ~(uint32_t)2;
7508 gen_bx_im(s, offset);
2c0262af 7509 }
9ee6e8bb
PB
7510 } else if (((insn >> 23) & 7) == 7) {
7511 /* Misc control */
7512 if (insn & (1 << 13))
7513 goto illegal_op;
7514
7515 if (insn & (1 << 26)) {
7516 /* Secure monitor call (v6Z) */
7517 goto illegal_op; /* not implemented. */
2c0262af 7518 } else {
9ee6e8bb
PB
7519 op = (insn >> 20) & 7;
7520 switch (op) {
7521 case 0: /* msr cpsr. */
7522 if (IS_M(env)) {
8984bd2e
PB
7523 tmp = load_reg(s, rn);
7524 addr = tcg_const_i32(insn & 0xff);
7525 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7526 gen_lookup_tb(s);
7527 break;
7528 }
7529 /* fall through */
7530 case 1: /* msr spsr. */
7531 if (IS_M(env))
7532 goto illegal_op;
7533 gen_movl_T0_reg(s, rn);
7534 if (gen_set_psr_T0(s,
7535 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7536 op == 1))
7537 goto illegal_op;
7538 break;
7539 case 2: /* cps, nop-hint. */
7540 if (((insn >> 8) & 7) == 0) {
7541 gen_nop_hint(s, insn & 0xff);
7542 }
7543 /* Implemented as NOP in user mode. */
7544 if (IS_USER(s))
7545 break;
7546 offset = 0;
7547 imm = 0;
7548 if (insn & (1 << 10)) {
7549 if (insn & (1 << 7))
7550 offset |= CPSR_A;
7551 if (insn & (1 << 6))
7552 offset |= CPSR_I;
7553 if (insn & (1 << 5))
7554 offset |= CPSR_F;
7555 if (insn & (1 << 9))
7556 imm = CPSR_A | CPSR_I | CPSR_F;
7557 }
7558 if (insn & (1 << 8)) {
7559 offset |= 0x1f;
7560 imm |= (insn & 0x1f);
7561 }
7562 if (offset) {
7563 gen_op_movl_T0_im(imm);
7564 gen_set_psr_T0(s, offset, 0);
7565 }
7566 break;
7567 case 3: /* Special control operations. */
7568 op = (insn >> 4) & 0xf;
7569 switch (op) {
7570 case 2: /* clrex */
8f8e3aa4 7571 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7572 break;
7573 case 4: /* dsb */
7574 case 5: /* dmb */
7575 case 6: /* isb */
7576 /* These execute as NOPs. */
7577 ARCH(7);
7578 break;
7579 default:
7580 goto illegal_op;
7581 }
7582 break;
7583 case 4: /* bxj */
7584 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7585 tmp = load_reg(s, rn);
7586 gen_bx(s, tmp);
9ee6e8bb
PB
7587 break;
7588 case 5: /* Exception return. */
7589 /* Unpredictable in user mode. */
7590 goto illegal_op;
7591 case 6: /* mrs cpsr. */
8984bd2e 7592 tmp = new_tmp();
9ee6e8bb 7593 if (IS_M(env)) {
8984bd2e
PB
7594 addr = tcg_const_i32(insn & 0xff);
7595 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7596 } else {
8984bd2e 7597 gen_helper_cpsr_read(tmp);
9ee6e8bb 7598 }
8984bd2e 7599 store_reg(s, rd, tmp);
9ee6e8bb
PB
7600 break;
7601 case 7: /* mrs spsr. */
7602 /* Not accessible in user mode. */
7603 if (IS_USER(s) || IS_M(env))
7604 goto illegal_op;
d9ba4830
PB
7605 tmp = load_cpu_field(spsr);
7606 store_reg(s, rd, tmp);
9ee6e8bb 7607 break;
2c0262af
FB
7608 }
7609 }
9ee6e8bb
PB
7610 } else {
7611 /* Conditional branch. */
7612 op = (insn >> 22) & 0xf;
7613 /* Generate a conditional jump to next instruction. */
7614 s->condlabel = gen_new_label();
d9ba4830 7615 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7616 s->condjmp = 1;
7617
7618 /* offset[11:1] = insn[10:0] */
7619 offset = (insn & 0x7ff) << 1;
7620 /* offset[17:12] = insn[21:16]. */
7621 offset |= (insn & 0x003f0000) >> 4;
7622 /* offset[31:20] = insn[26]. */
7623 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7624 /* offset[18] = insn[13]. */
7625 offset |= (insn & (1 << 13)) << 5;
7626 /* offset[19] = insn[11]. */
7627 offset |= (insn & (1 << 11)) << 8;
7628
7629 /* jump to the offset */
b0109805 7630 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7631 }
7632 } else {
7633 /* Data processing immediate. */
7634 if (insn & (1 << 25)) {
7635 if (insn & (1 << 24)) {
7636 if (insn & (1 << 20))
7637 goto illegal_op;
7638 /* Bitfield/Saturate. */
7639 op = (insn >> 21) & 7;
7640 imm = insn & 0x1f;
7641 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7642 if (rn == 15) {
7643 tmp = new_tmp();
7644 tcg_gen_movi_i32(tmp, 0);
7645 } else {
7646 tmp = load_reg(s, rn);
7647 }
9ee6e8bb
PB
7648 switch (op) {
7649 case 2: /* Signed bitfield extract. */
7650 imm++;
7651 if (shift + imm > 32)
7652 goto illegal_op;
7653 if (imm < 32)
6ddbc6e4 7654 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7655 break;
7656 case 6: /* Unsigned bitfield extract. */
7657 imm++;
7658 if (shift + imm > 32)
7659 goto illegal_op;
7660 if (imm < 32)
6ddbc6e4 7661 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7662 break;
7663 case 3: /* Bitfield insert/clear. */
7664 if (imm < shift)
7665 goto illegal_op;
7666 imm = imm + 1 - shift;
7667 if (imm != 32) {
6ddbc6e4 7668 tmp2 = load_reg(s, rd);
8f8e3aa4 7669 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7670 dead_tmp(tmp2);
9ee6e8bb
PB
7671 }
7672 break;
7673 case 7:
7674 goto illegal_op;
7675 default: /* Saturate. */
9ee6e8bb
PB
7676 if (shift) {
7677 if (op & 1)
6ddbc6e4 7678 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7679 else
6ddbc6e4 7680 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7681 }
6ddbc6e4 7682 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7683 if (op & 4) {
7684 /* Unsigned. */
9ee6e8bb 7685 if ((op & 1) && shift == 0)
6ddbc6e4 7686 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7687 else
6ddbc6e4 7688 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7689 } else {
9ee6e8bb 7690 /* Signed. */
9ee6e8bb 7691 if ((op & 1) && shift == 0)
6ddbc6e4 7692 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7693 else
6ddbc6e4 7694 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7695 }
9ee6e8bb 7696 break;
2c0262af 7697 }
6ddbc6e4 7698 store_reg(s, rd, tmp);
9ee6e8bb
PB
7699 } else {
7700 imm = ((insn & 0x04000000) >> 15)
7701 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7702 if (insn & (1 << 22)) {
7703 /* 16-bit immediate. */
7704 imm |= (insn >> 4) & 0xf000;
7705 if (insn & (1 << 23)) {
7706 /* movt */
5e3f878a
PB
7707 tmp = load_reg(s, rd);
7708 tcg_gen_andi_i32(tmp, tmp, 0xffff);
7709 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7710 } else {
9ee6e8bb 7711 /* movw */
5e3f878a
PB
7712 tmp = new_tmp();
7713 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7714 }
7715 } else {
9ee6e8bb
PB
7716 /* Add/sub 12-bit immediate. */
7717 if (rn == 15) {
b0109805 7718 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7719 if (insn & (1 << 23))
b0109805 7720 offset -= imm;
9ee6e8bb 7721 else
b0109805 7722 offset += imm;
5e3f878a
PB
7723 tmp = new_tmp();
7724 tcg_gen_movi_i32(tmp, offset);
2c0262af 7725 } else {
5e3f878a 7726 tmp = load_reg(s, rn);
9ee6e8bb 7727 if (insn & (1 << 23))
5e3f878a 7728 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7729 else
5e3f878a 7730 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7731 }
9ee6e8bb 7732 }
5e3f878a 7733 store_reg(s, rd, tmp);
191abaa2 7734 }
9ee6e8bb
PB
7735 } else {
7736 int shifter_out = 0;
7737 /* modified 12-bit immediate. */
7738 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7739 imm = (insn & 0xff);
7740 switch (shift) {
7741 case 0: /* XY */
7742 /* Nothing to do. */
7743 break;
7744 case 1: /* 00XY00XY */
7745 imm |= imm << 16;
7746 break;
7747 case 2: /* XY00XY00 */
7748 imm |= imm << 16;
7749 imm <<= 8;
7750 break;
7751 case 3: /* XYXYXYXY */
7752 imm |= imm << 16;
7753 imm |= imm << 8;
7754 break;
7755 default: /* Rotated constant. */
7756 shift = (shift << 1) | (imm >> 7);
7757 imm |= 0x80;
7758 imm = imm << (32 - shift);
7759 shifter_out = 1;
7760 break;
b5ff1b31 7761 }
9ee6e8bb
PB
7762 gen_op_movl_T1_im(imm);
7763 rn = (insn >> 16) & 0xf;
7764 if (rn == 15)
7765 gen_op_movl_T0_im(0);
7766 else
7767 gen_movl_T0_reg(s, rn);
7768 op = (insn >> 21) & 0xf;
7769 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7770 shifter_out))
7771 goto illegal_op;
7772 rd = (insn >> 8) & 0xf;
7773 if (rd != 15) {
7774 gen_movl_reg_T0(s, rd);
2c0262af 7775 }
2c0262af 7776 }
9ee6e8bb
PB
7777 }
7778 break;
7779 case 12: /* Load/store single data item. */
7780 {
7781 int postinc = 0;
7782 int writeback = 0;
b0109805 7783 int user;
9ee6e8bb
PB
7784 if ((insn & 0x01100000) == 0x01000000) {
7785 if (disas_neon_ls_insn(env, s, insn))
c1713132 7786 goto illegal_op;
9ee6e8bb
PB
7787 break;
7788 }
b0109805 7789 user = IS_USER(s);
9ee6e8bb 7790 if (rn == 15) {
b0109805 7791 addr = new_tmp();
9ee6e8bb
PB
7792 /* PC relative. */
7793 /* s->pc has already been incremented by 4. */
7794 imm = s->pc & 0xfffffffc;
7795 if (insn & (1 << 23))
7796 imm += insn & 0xfff;
7797 else
7798 imm -= insn & 0xfff;
b0109805 7799 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7800 } else {
b0109805 7801 addr = load_reg(s, rn);
9ee6e8bb
PB
7802 if (insn & (1 << 23)) {
7803 /* Positive offset. */
7804 imm = insn & 0xfff;
b0109805 7805 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7806 } else {
7807 op = (insn >> 8) & 7;
7808 imm = insn & 0xff;
7809 switch (op) {
7810 case 0: case 8: /* Shifted Register. */
7811 shift = (insn >> 4) & 0xf;
7812 if (shift > 3)
18c9b560 7813 goto illegal_op;
b26eefb6 7814 tmp = load_reg(s, rm);
9ee6e8bb 7815 if (shift)
b26eefb6 7816 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7817 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7818 dead_tmp(tmp);
9ee6e8bb
PB
7819 break;
7820 case 4: /* Negative offset. */
b0109805 7821 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7822 break;
7823 case 6: /* User privilege. */
b0109805
PB
7824 tcg_gen_addi_i32(addr, addr, imm);
7825 user = 1;
9ee6e8bb
PB
7826 break;
7827 case 1: /* Post-decrement. */
7828 imm = -imm;
7829 /* Fall through. */
7830 case 3: /* Post-increment. */
9ee6e8bb
PB
7831 postinc = 1;
7832 writeback = 1;
7833 break;
7834 case 5: /* Pre-decrement. */
7835 imm = -imm;
7836 /* Fall through. */
7837 case 7: /* Pre-increment. */
b0109805 7838 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7839 writeback = 1;
7840 break;
7841 default:
b7bcbe95 7842 goto illegal_op;
9ee6e8bb
PB
7843 }
7844 }
7845 }
7846 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7847 if (insn & (1 << 20)) {
7848 /* Load. */
7849 if (rs == 15 && op != 2) {
7850 if (op & 2)
b5ff1b31 7851 goto illegal_op;
9ee6e8bb
PB
7852 /* Memory hint. Implemented as NOP. */
7853 } else {
7854 switch (op) {
b0109805
PB
7855 case 0: tmp = gen_ld8u(addr, user); break;
7856 case 4: tmp = gen_ld8s(addr, user); break;
7857 case 1: tmp = gen_ld16u(addr, user); break;
7858 case 5: tmp = gen_ld16s(addr, user); break;
7859 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7860 default: goto illegal_op;
7861 }
7862 if (rs == 15) {
b0109805 7863 gen_bx(s, tmp);
9ee6e8bb 7864 } else {
b0109805 7865 store_reg(s, rs, tmp);
9ee6e8bb
PB
7866 }
7867 }
7868 } else {
7869 /* Store. */
7870 if (rs == 15)
b7bcbe95 7871 goto illegal_op;
b0109805 7872 tmp = load_reg(s, rs);
9ee6e8bb 7873 switch (op) {
b0109805
PB
7874 case 0: gen_st8(tmp, addr, user); break;
7875 case 1: gen_st16(tmp, addr, user); break;
7876 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7877 default: goto illegal_op;
b7bcbe95 7878 }
2c0262af 7879 }
9ee6e8bb 7880 if (postinc)
b0109805
PB
7881 tcg_gen_addi_i32(addr, addr, imm);
7882 if (writeback) {
7883 store_reg(s, rn, addr);
7884 } else {
7885 dead_tmp(addr);
7886 }
9ee6e8bb
PB
7887 }
7888 break;
7889 default:
7890 goto illegal_op;
2c0262af 7891 }
9ee6e8bb
PB
7892 return 0;
7893illegal_op:
7894 return 1;
2c0262af
FB
7895}
7896
9ee6e8bb 7897static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7898{
7899 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7900 int32_t offset;
7901 int i;
b26eefb6 7902 TCGv tmp;
d9ba4830 7903 TCGv tmp2;
b0109805 7904 TCGv addr;
99c475ab 7905
9ee6e8bb
PB
7906 if (s->condexec_mask) {
7907 cond = s->condexec_cond;
7908 s->condlabel = gen_new_label();
d9ba4830 7909 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7910 s->condjmp = 1;
7911 }
7912
b5ff1b31 7913 insn = lduw_code(s->pc);
99c475ab 7914 s->pc += 2;
b5ff1b31 7915
99c475ab
FB
7916 switch (insn >> 12) {
7917 case 0: case 1:
7918 rd = insn & 7;
7919 op = (insn >> 11) & 3;
7920 if (op == 3) {
7921 /* add/subtract */
7922 rn = (insn >> 3) & 7;
7923 gen_movl_T0_reg(s, rn);
7924 if (insn & (1 << 10)) {
7925 /* immediate */
7926 gen_op_movl_T1_im((insn >> 6) & 7);
7927 } else {
7928 /* reg */
7929 rm = (insn >> 6) & 7;
7930 gen_movl_T1_reg(s, rm);
7931 }
9ee6e8bb
PB
7932 if (insn & (1 << 9)) {
7933 if (s->condexec_mask)
7934 gen_op_subl_T0_T1();
7935 else
7936 gen_op_subl_T0_T1_cc();
7937 } else {
7938 if (s->condexec_mask)
7939 gen_op_addl_T0_T1();
7940 else
7941 gen_op_addl_T0_T1_cc();
7942 }
99c475ab
FB
7943 gen_movl_reg_T0(s, rd);
7944 } else {
7945 /* shift immediate */
7946 rm = (insn >> 3) & 7;
7947 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7948 tmp = load_reg(s, rm);
7949 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7950 if (!s->condexec_mask)
7951 gen_logic_CC(tmp);
7952 store_reg(s, rd, tmp);
99c475ab
FB
7953 }
7954 break;
7955 case 2: case 3:
7956 /* arithmetic large immediate */
7957 op = (insn >> 11) & 3;
7958 rd = (insn >> 8) & 0x7;
7959 if (op == 0) {
7960 gen_op_movl_T0_im(insn & 0xff);
7961 } else {
7962 gen_movl_T0_reg(s, rd);
7963 gen_op_movl_T1_im(insn & 0xff);
7964 }
7965 switch (op) {
7966 case 0: /* mov */
9ee6e8bb
PB
7967 if (!s->condexec_mask)
7968 gen_op_logic_T0_cc();
99c475ab
FB
7969 break;
7970 case 1: /* cmp */
7971 gen_op_subl_T0_T1_cc();
7972 break;
7973 case 2: /* add */
9ee6e8bb
PB
7974 if (s->condexec_mask)
7975 gen_op_addl_T0_T1();
7976 else
7977 gen_op_addl_T0_T1_cc();
99c475ab
FB
7978 break;
7979 case 3: /* sub */
9ee6e8bb
PB
7980 if (s->condexec_mask)
7981 gen_op_subl_T0_T1();
7982 else
7983 gen_op_subl_T0_T1_cc();
99c475ab
FB
7984 break;
7985 }
7986 if (op != 1)
7987 gen_movl_reg_T0(s, rd);
7988 break;
7989 case 4:
7990 if (insn & (1 << 11)) {
7991 rd = (insn >> 8) & 7;
5899f386
FB
7992 /* load pc-relative. Bit 1 of PC is ignored. */
7993 val = s->pc + 2 + ((insn & 0xff) * 4);
7994 val &= ~(uint32_t)2;
b0109805
PB
7995 addr = new_tmp();
7996 tcg_gen_movi_i32(addr, val);
7997 tmp = gen_ld32(addr, IS_USER(s));
7998 dead_tmp(addr);
7999 store_reg(s, rd, tmp);
99c475ab
FB
8000 break;
8001 }
8002 if (insn & (1 << 10)) {
8003 /* data processing extended or blx */
8004 rd = (insn & 7) | ((insn >> 4) & 8);
8005 rm = (insn >> 3) & 0xf;
8006 op = (insn >> 8) & 3;
8007 switch (op) {
8008 case 0: /* add */
8009 gen_movl_T0_reg(s, rd);
8010 gen_movl_T1_reg(s, rm);
8011 gen_op_addl_T0_T1();
8012 gen_movl_reg_T0(s, rd);
8013 break;
8014 case 1: /* cmp */
8015 gen_movl_T0_reg(s, rd);
8016 gen_movl_T1_reg(s, rm);
8017 gen_op_subl_T0_T1_cc();
8018 break;
8019 case 2: /* mov/cpy */
8020 gen_movl_T0_reg(s, rm);
8021 gen_movl_reg_T0(s, rd);
8022 break;
8023 case 3:/* branch [and link] exchange thumb register */
b0109805 8024 tmp = load_reg(s, rm);
99c475ab
FB
8025 if (insn & (1 << 7)) {
8026 val = (uint32_t)s->pc | 1;
b0109805
PB
8027 tmp2 = new_tmp();
8028 tcg_gen_movi_i32(tmp2, val);
8029 store_reg(s, 14, tmp2);
99c475ab 8030 }
d9ba4830 8031 gen_bx(s, tmp);
99c475ab
FB
8032 break;
8033 }
8034 break;
8035 }
8036
8037 /* data processing register */
8038 rd = insn & 7;
8039 rm = (insn >> 3) & 7;
8040 op = (insn >> 6) & 0xf;
8041 if (op == 2 || op == 3 || op == 4 || op == 7) {
8042 /* the shift/rotate ops want the operands backwards */
8043 val = rm;
8044 rm = rd;
8045 rd = val;
8046 val = 1;
8047 } else {
8048 val = 0;
8049 }
8050
8051 if (op == 9) /* neg */
8052 gen_op_movl_T0_im(0);
8053 else if (op != 0xf) /* mvn doesn't read its first operand */
8054 gen_movl_T0_reg(s, rd);
8055
8056 gen_movl_T1_reg(s, rm);
5899f386 8057 switch (op) {
99c475ab
FB
8058 case 0x0: /* and */
8059 gen_op_andl_T0_T1();
9ee6e8bb
PB
8060 if (!s->condexec_mask)
8061 gen_op_logic_T0_cc();
99c475ab
FB
8062 break;
8063 case 0x1: /* eor */
8064 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8065 if (!s->condexec_mask)
8066 gen_op_logic_T0_cc();
99c475ab
FB
8067 break;
8068 case 0x2: /* lsl */
9ee6e8bb 8069 if (s->condexec_mask) {
8984bd2e 8070 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8071 } else {
8984bd2e 8072 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8073 gen_op_logic_T1_cc();
8074 }
99c475ab
FB
8075 break;
8076 case 0x3: /* lsr */
9ee6e8bb 8077 if (s->condexec_mask) {
8984bd2e 8078 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8079 } else {
8984bd2e 8080 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8081 gen_op_logic_T1_cc();
8082 }
99c475ab
FB
8083 break;
8084 case 0x4: /* asr */
9ee6e8bb 8085 if (s->condexec_mask) {
8984bd2e 8086 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8087 } else {
8984bd2e 8088 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8089 gen_op_logic_T1_cc();
8090 }
99c475ab
FB
8091 break;
8092 case 0x5: /* adc */
9ee6e8bb 8093 if (s->condexec_mask)
b26eefb6 8094 gen_adc_T0_T1();
9ee6e8bb
PB
8095 else
8096 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8097 break;
8098 case 0x6: /* sbc */
9ee6e8bb 8099 if (s->condexec_mask)
3670669c 8100 gen_sbc_T0_T1();
9ee6e8bb
PB
8101 else
8102 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8103 break;
8104 case 0x7: /* ror */
9ee6e8bb 8105 if (s->condexec_mask) {
8984bd2e 8106 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8107 } else {
8984bd2e 8108 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8109 gen_op_logic_T1_cc();
8110 }
99c475ab
FB
8111 break;
8112 case 0x8: /* tst */
8113 gen_op_andl_T0_T1();
8114 gen_op_logic_T0_cc();
8115 rd = 16;
5899f386 8116 break;
99c475ab 8117 case 0x9: /* neg */
9ee6e8bb
PB
8118 if (s->condexec_mask)
8119 gen_op_subl_T0_T1();
8120 else
8121 gen_op_subl_T0_T1_cc();
99c475ab
FB
8122 break;
8123 case 0xa: /* cmp */
8124 gen_op_subl_T0_T1_cc();
8125 rd = 16;
8126 break;
8127 case 0xb: /* cmn */
8128 gen_op_addl_T0_T1_cc();
8129 rd = 16;
8130 break;
8131 case 0xc: /* orr */
8132 gen_op_orl_T0_T1();
9ee6e8bb
PB
8133 if (!s->condexec_mask)
8134 gen_op_logic_T0_cc();
99c475ab
FB
8135 break;
8136 case 0xd: /* mul */
8137 gen_op_mull_T0_T1();
9ee6e8bb
PB
8138 if (!s->condexec_mask)
8139 gen_op_logic_T0_cc();
99c475ab
FB
8140 break;
8141 case 0xe: /* bic */
8142 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8143 if (!s->condexec_mask)
8144 gen_op_logic_T0_cc();
99c475ab
FB
8145 break;
8146 case 0xf: /* mvn */
8147 gen_op_notl_T1();
9ee6e8bb
PB
8148 if (!s->condexec_mask)
8149 gen_op_logic_T1_cc();
99c475ab 8150 val = 1;
5899f386 8151 rm = rd;
99c475ab
FB
8152 break;
8153 }
8154 if (rd != 16) {
8155 if (val)
5899f386 8156 gen_movl_reg_T1(s, rm);
99c475ab
FB
8157 else
8158 gen_movl_reg_T0(s, rd);
8159 }
8160 break;
8161
8162 case 5:
8163 /* load/store register offset. */
8164 rd = insn & 7;
8165 rn = (insn >> 3) & 7;
8166 rm = (insn >> 6) & 7;
8167 op = (insn >> 9) & 7;
b0109805 8168 addr = load_reg(s, rn);
b26eefb6 8169 tmp = load_reg(s, rm);
b0109805 8170 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8171 dead_tmp(tmp);
99c475ab
FB
8172
8173 if (op < 3) /* store */
b0109805 8174 tmp = load_reg(s, rd);
99c475ab
FB
8175
8176 switch (op) {
8177 case 0: /* str */
b0109805 8178 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8179 break;
8180 case 1: /* strh */
b0109805 8181 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8182 break;
8183 case 2: /* strb */
b0109805 8184 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8185 break;
8186 case 3: /* ldrsb */
b0109805 8187 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8188 break;
8189 case 4: /* ldr */
b0109805 8190 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8191 break;
8192 case 5: /* ldrh */
b0109805 8193 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8194 break;
8195 case 6: /* ldrb */
b0109805 8196 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8197 break;
8198 case 7: /* ldrsh */
b0109805 8199 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8200 break;
8201 }
8202 if (op >= 3) /* load */
b0109805
PB
8203 store_reg(s, rd, tmp);
8204 dead_tmp(addr);
99c475ab
FB
8205 break;
8206
8207 case 6:
8208 /* load/store word immediate offset */
8209 rd = insn & 7;
8210 rn = (insn >> 3) & 7;
b0109805 8211 addr = load_reg(s, rn);
99c475ab 8212 val = (insn >> 4) & 0x7c;
b0109805 8213 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8214
8215 if (insn & (1 << 11)) {
8216 /* load */
b0109805
PB
8217 tmp = gen_ld32(addr, IS_USER(s));
8218 store_reg(s, rd, tmp);
99c475ab
FB
8219 } else {
8220 /* store */
b0109805
PB
8221 tmp = load_reg(s, rd);
8222 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8223 }
b0109805 8224 dead_tmp(addr);
99c475ab
FB
8225 break;
8226
8227 case 7:
8228 /* load/store byte immediate offset */
8229 rd = insn & 7;
8230 rn = (insn >> 3) & 7;
b0109805 8231 addr = load_reg(s, rn);
99c475ab 8232 val = (insn >> 6) & 0x1f;
b0109805 8233 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8234
8235 if (insn & (1 << 11)) {
8236 /* load */
b0109805
PB
8237 tmp = gen_ld8u(addr, IS_USER(s));
8238 store_reg(s, rd, tmp);
99c475ab
FB
8239 } else {
8240 /* store */
b0109805
PB
8241 tmp = load_reg(s, rd);
8242 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8243 }
b0109805 8244 dead_tmp(addr);
99c475ab
FB
8245 break;
8246
8247 case 8:
8248 /* load/store halfword immediate offset */
8249 rd = insn & 7;
8250 rn = (insn >> 3) & 7;
b0109805 8251 addr = load_reg(s, rn);
99c475ab 8252 val = (insn >> 5) & 0x3e;
b0109805 8253 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8254
8255 if (insn & (1 << 11)) {
8256 /* load */
b0109805
PB
8257 tmp = gen_ld16u(addr, IS_USER(s));
8258 store_reg(s, rd, tmp);
99c475ab
FB
8259 } else {
8260 /* store */
b0109805
PB
8261 tmp = load_reg(s, rd);
8262 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8263 }
b0109805 8264 dead_tmp(addr);
99c475ab
FB
8265 break;
8266
8267 case 9:
8268 /* load/store from stack */
8269 rd = (insn >> 8) & 7;
b0109805 8270 addr = load_reg(s, 13);
99c475ab 8271 val = (insn & 0xff) * 4;
b0109805 8272 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8273
8274 if (insn & (1 << 11)) {
8275 /* load */
b0109805
PB
8276 tmp = gen_ld32(addr, IS_USER(s));
8277 store_reg(s, rd, tmp);
99c475ab
FB
8278 } else {
8279 /* store */
b0109805
PB
8280 tmp = load_reg(s, rd);
8281 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8282 }
b0109805 8283 dead_tmp(addr);
99c475ab
FB
8284 break;
8285
8286 case 10:
8287 /* add to high reg */
8288 rd = (insn >> 8) & 7;
5899f386
FB
8289 if (insn & (1 << 11)) {
8290 /* SP */
5e3f878a 8291 tmp = load_reg(s, 13);
5899f386
FB
8292 } else {
8293 /* PC. bit 1 is ignored. */
5e3f878a
PB
8294 tmp = new_tmp();
8295 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8296 }
99c475ab 8297 val = (insn & 0xff) * 4;
5e3f878a
PB
8298 tcg_gen_addi_i32(tmp, tmp, val);
8299 store_reg(s, rd, tmp);
99c475ab
FB
8300 break;
8301
8302 case 11:
8303 /* misc */
8304 op = (insn >> 8) & 0xf;
8305 switch (op) {
8306 case 0:
8307 /* adjust stack pointer */
b26eefb6 8308 tmp = load_reg(s, 13);
99c475ab
FB
8309 val = (insn & 0x7f) * 4;
8310 if (insn & (1 << 7))
6a0d8a1d 8311 val = -(int32_t)val;
b26eefb6
PB
8312 tcg_gen_addi_i32(tmp, tmp, val);
8313 store_reg(s, 13, tmp);
99c475ab
FB
8314 break;
8315
9ee6e8bb
PB
8316 case 2: /* sign/zero extend. */
8317 ARCH(6);
8318 rd = insn & 7;
8319 rm = (insn >> 3) & 7;
b0109805 8320 tmp = load_reg(s, rm);
9ee6e8bb 8321 switch ((insn >> 6) & 3) {
b0109805
PB
8322 case 0: gen_sxth(tmp); break;
8323 case 1: gen_sxtb(tmp); break;
8324 case 2: gen_uxth(tmp); break;
8325 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8326 }
b0109805 8327 store_reg(s, rd, tmp);
9ee6e8bb 8328 break;
99c475ab
FB
8329 case 4: case 5: case 0xc: case 0xd:
8330 /* push/pop */
b0109805 8331 addr = load_reg(s, 13);
5899f386
FB
8332 if (insn & (1 << 8))
8333 offset = 4;
99c475ab 8334 else
5899f386
FB
8335 offset = 0;
8336 for (i = 0; i < 8; i++) {
8337 if (insn & (1 << i))
8338 offset += 4;
8339 }
8340 if ((insn & (1 << 11)) == 0) {
b0109805 8341 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8342 }
99c475ab
FB
8343 for (i = 0; i < 8; i++) {
8344 if (insn & (1 << i)) {
8345 if (insn & (1 << 11)) {
8346 /* pop */
b0109805
PB
8347 tmp = gen_ld32(addr, IS_USER(s));
8348 store_reg(s, i, tmp);
99c475ab
FB
8349 } else {
8350 /* push */
b0109805
PB
8351 tmp = load_reg(s, i);
8352 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8353 }
5899f386 8354 /* advance to the next address. */
b0109805 8355 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8356 }
8357 }
8358 if (insn & (1 << 8)) {
8359 if (insn & (1 << 11)) {
8360 /* pop pc */
b0109805 8361 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8362 /* don't set the pc until the rest of the instruction
8363 has completed */
8364 } else {
8365 /* push lr */
b0109805
PB
8366 tmp = load_reg(s, 14);
8367 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8368 }
b0109805 8369 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8370 }
5899f386 8371 if ((insn & (1 << 11)) == 0) {
b0109805 8372 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8373 }
99c475ab 8374 /* write back the new stack pointer */
b0109805 8375 store_reg(s, 13, addr);
99c475ab
FB
8376 /* set the new PC value */
8377 if ((insn & 0x0900) == 0x0900)
b0109805 8378 gen_bx(s, tmp);
99c475ab
FB
8379 break;
8380
9ee6e8bb
PB
8381 case 1: case 3: case 9: case 11: /* czb */
8382 rm = insn & 7;
d9ba4830
PB
8383 tmp = load_reg(s, rm);
8384 tmp2 = tcg_const_i32(0);
9ee6e8bb
PB
8385 s->condlabel = gen_new_label();
8386 s->condjmp = 1;
8387 if (insn & (1 << 11))
d9ba4830 8388 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
9ee6e8bb 8389 else
d9ba4830
PB
8390 tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
8391 dead_tmp(tmp);
9ee6e8bb
PB
8392 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8393 val = (uint32_t)s->pc + 2;
8394 val += offset;
8395 gen_jmp(s, val);
8396 break;
8397
8398 case 15: /* IT, nop-hint. */
8399 if ((insn & 0xf) == 0) {
8400 gen_nop_hint(s, (insn >> 4) & 0xf);
8401 break;
8402 }
8403 /* If Then. */
8404 s->condexec_cond = (insn >> 4) & 0xe;
8405 s->condexec_mask = insn & 0x1f;
8406 /* No actual code generated for this insn, just setup state. */
8407 break;
8408
06c949e6 8409 case 0xe: /* bkpt */
9ee6e8bb 8410 gen_set_condexec(s);
5e3f878a 8411 gen_set_pc_im(s->pc - 2);
d9ba4830 8412 gen_exception(EXCP_BKPT);
06c949e6
PB
8413 s->is_jmp = DISAS_JUMP;
8414 break;
8415
9ee6e8bb
PB
8416 case 0xa: /* rev */
8417 ARCH(6);
8418 rn = (insn >> 3) & 0x7;
8419 rd = insn & 0x7;
b0109805 8420 tmp = load_reg(s, rn);
9ee6e8bb 8421 switch ((insn >> 6) & 3) {
b0109805
PB
8422 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8423 case 1: gen_rev16(tmp); break;
8424 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8425 default: goto illegal_op;
8426 }
b0109805 8427 store_reg(s, rd, tmp);
9ee6e8bb
PB
8428 break;
8429
8430 case 6: /* cps */
8431 ARCH(6);
8432 if (IS_USER(s))
8433 break;
8434 if (IS_M(env)) {
8984bd2e 8435 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8436 /* PRIMASK */
8984bd2e
PB
8437 if (insn & 1) {
8438 addr = tcg_const_i32(16);
8439 gen_helper_v7m_msr(cpu_env, addr, tmp);
8440 }
9ee6e8bb 8441 /* FAULTMASK */
8984bd2e
PB
8442 if (insn & 2) {
8443 addr = tcg_const_i32(17);
8444 gen_helper_v7m_msr(cpu_env, addr, tmp);
8445 }
9ee6e8bb
PB
8446 gen_lookup_tb(s);
8447 } else {
8448 if (insn & (1 << 4))
8449 shift = CPSR_A | CPSR_I | CPSR_F;
8450 else
8451 shift = 0;
8452
8453 val = ((insn & 7) << 6) & shift;
8454 gen_op_movl_T0_im(val);
8455 gen_set_psr_T0(s, shift, 0);
8456 }
8457 break;
8458
99c475ab
FB
8459 default:
8460 goto undef;
8461 }
8462 break;
8463
8464 case 12:
8465 /* load/store multiple */
8466 rn = (insn >> 8) & 0x7;
b0109805 8467 addr = load_reg(s, rn);
99c475ab
FB
8468 for (i = 0; i < 8; i++) {
8469 if (insn & (1 << i)) {
99c475ab
FB
8470 if (insn & (1 << 11)) {
8471 /* load */
b0109805
PB
8472 tmp = gen_ld32(addr, IS_USER(s));
8473 store_reg(s, i, tmp);
99c475ab
FB
8474 } else {
8475 /* store */
b0109805
PB
8476 tmp = load_reg(s, i);
8477 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8478 }
5899f386 8479 /* advance to the next address */
b0109805 8480 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8481 }
8482 }
5899f386 8483 /* Base register writeback. */
b0109805
PB
8484 if ((insn & (1 << rn)) == 0) {
8485 store_reg(s, rn, addr);
8486 } else {
8487 dead_tmp(addr);
8488 }
99c475ab
FB
8489 break;
8490
8491 case 13:
8492 /* conditional branch or swi */
8493 cond = (insn >> 8) & 0xf;
8494 if (cond == 0xe)
8495 goto undef;
8496
8497 if (cond == 0xf) {
8498 /* swi */
9ee6e8bb 8499 gen_set_condexec(s);
5e3f878a 8500 gen_set_pc_im(s->pc | 1);
9ee6e8bb 8501 s->is_jmp = DISAS_SWI;
99c475ab
FB
8502 break;
8503 }
8504 /* generate a conditional jump to next instruction */
e50e6a20 8505 s->condlabel = gen_new_label();
d9ba4830 8506 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8507 s->condjmp = 1;
99c475ab
FB
8508 gen_movl_T1_reg(s, 15);
8509
8510 /* jump to the offset */
5899f386 8511 val = (uint32_t)s->pc + 2;
99c475ab 8512 offset = ((int32_t)insn << 24) >> 24;
5899f386 8513 val += offset << 1;
8aaca4c0 8514 gen_jmp(s, val);
99c475ab
FB
8515 break;
8516
8517 case 14:
358bf29e 8518 if (insn & (1 << 11)) {
9ee6e8bb
PB
8519 if (disas_thumb2_insn(env, s, insn))
8520 goto undef32;
358bf29e
PB
8521 break;
8522 }
9ee6e8bb 8523 /* unconditional branch */
99c475ab
FB
8524 val = (uint32_t)s->pc;
8525 offset = ((int32_t)insn << 21) >> 21;
8526 val += (offset << 1) + 2;
8aaca4c0 8527 gen_jmp(s, val);
99c475ab
FB
8528 break;
8529
8530 case 15:
9ee6e8bb 8531 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8532 goto undef32;
9ee6e8bb 8533 break;
99c475ab
FB
8534 }
8535 return;
9ee6e8bb
PB
8536undef32:
8537 gen_set_condexec(s);
5e3f878a 8538 gen_set_pc_im(s->pc - 4);
d9ba4830 8539 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8540 s->is_jmp = DISAS_JUMP;
8541 return;
8542illegal_op:
99c475ab 8543undef:
9ee6e8bb 8544 gen_set_condexec(s);
5e3f878a 8545 gen_set_pc_im(s->pc - 2);
d9ba4830 8546 gen_exception(EXCP_UDEF);
99c475ab
FB
8547 s->is_jmp = DISAS_JUMP;
8548}
8549
2c0262af
FB
8550/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8551 basic block 'tb'. If search_pc is TRUE, also generate PC
8552 information for each intermediate instruction. */
5fafdf24
TS
8553static inline int gen_intermediate_code_internal(CPUState *env,
8554 TranslationBlock *tb,
2c0262af
FB
8555 int search_pc)
8556{
8557 DisasContext dc1, *dc = &dc1;
8558 uint16_t *gen_opc_end;
8559 int j, lj;
0fa85d43 8560 target_ulong pc_start;
b5ff1b31 8561 uint32_t next_page_start;
3b46e624 8562
2c0262af 8563 /* generate intermediate code */
b26eefb6
PB
8564 num_temps = 0;
8565 memset(temps, 0, sizeof(temps));
8566
0fa85d43 8567 pc_start = tb->pc;
3b46e624 8568
2c0262af
FB
8569 dc->tb = tb;
8570
2c0262af 8571 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8572
8573 dc->is_jmp = DISAS_NEXT;
8574 dc->pc = pc_start;
8aaca4c0 8575 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8576 dc->condjmp = 0;
5899f386 8577 dc->thumb = env->thumb;
9ee6e8bb
PB
8578 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8579 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8580 dc->is_mem = 0;
b5ff1b31 8581#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8582 if (IS_M(env)) {
8583 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8584 } else {
8585 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8586 }
b5ff1b31 8587#endif
4373f3ce
PB
8588 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8589 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8590 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8591 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
ad69471c
PB
8592 cpu_V0 = cpu_F0d;
8593 cpu_V1 = cpu_F1d;
e677137d
PB
8594 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8595 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
b5ff1b31 8596 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8597 lj = -1;
9ee6e8bb
PB
8598 /* Reset the conditional execution bits immediately. This avoids
8599 complications trying to do it at the end of the block. */
8600 if (env->condexec_bits)
8f01245e
PB
8601 {
8602 TCGv tmp = new_tmp();
8603 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8604 store_cpu_field(tmp, condexec_bits);
8f01245e 8605 }
2c0262af 8606 do {
9ee6e8bb
PB
8607#ifndef CONFIG_USER_ONLY
8608 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8609 /* We always get here via a jump, so know we are not in a
8610 conditional execution block. */
d9ba4830 8611 gen_exception(EXCP_EXCEPTION_EXIT);
9ee6e8bb
PB
8612 }
8613#endif
8614
1fddef4b
FB
8615 if (env->nb_breakpoints > 0) {
8616 for(j = 0; j < env->nb_breakpoints; j++) {
8617 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 8618 gen_set_condexec(dc);
5e3f878a 8619 gen_set_pc_im(dc->pc);
d9ba4830 8620 gen_exception(EXCP_DEBUG);
1fddef4b 8621 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8622 /* Advance PC so that clearing the breakpoint will
8623 invalidate this TB. */
8624 dc->pc += 2;
8625 goto done_generating;
1fddef4b
FB
8626 break;
8627 }
8628 }
8629 }
2c0262af
FB
8630 if (search_pc) {
8631 j = gen_opc_ptr - gen_opc_buf;
8632 if (lj < j) {
8633 lj++;
8634 while (lj < j)
8635 gen_opc_instr_start[lj++] = 0;
8636 }
0fa85d43 8637 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
8638 gen_opc_instr_start[lj] = 1;
8639 }
e50e6a20 8640
9ee6e8bb
PB
8641 if (env->thumb) {
8642 disas_thumb_insn(env, dc);
8643 if (dc->condexec_mask) {
8644 dc->condexec_cond = (dc->condexec_cond & 0xe)
8645 | ((dc->condexec_mask >> 4) & 1);
8646 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8647 if (dc->condexec_mask == 0) {
8648 dc->condexec_cond = 0;
8649 }
8650 }
8651 } else {
8652 disas_arm_insn(env, dc);
8653 }
b26eefb6
PB
8654 if (num_temps) {
8655 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8656 num_temps = 0;
8657 }
e50e6a20
FB
8658
8659 if (dc->condjmp && !dc->is_jmp) {
8660 gen_set_label(dc->condlabel);
8661 dc->condjmp = 0;
8662 }
6658ffb8
PB
8663 /* Terminate the TB on memory ops if watchpoints are present. */
8664 /* FIXME: This should be replacd by the deterministic execution
8665 * IRQ raising bits. */
8666 if (dc->is_mem && env->nb_watchpoints)
8667 break;
8668
e50e6a20
FB
8669 /* Translation stops when a conditional branch is enoutered.
8670 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
8671 * Also stop translation when a page boundary is reached. This
8672 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
8673 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8674 !env->singlestep_enabled &&
b5ff1b31 8675 dc->pc < next_page_start);
9ee6e8bb 8676
b5ff1b31 8677 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8678 instruction was a conditional branch or trap, and the PC has
8679 already been written. */
8aaca4c0
FB
8680 if (__builtin_expect(env->singlestep_enabled, 0)) {
8681 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8682 if (dc->condjmp) {
9ee6e8bb
PB
8683 gen_set_condexec(dc);
8684 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8685 gen_exception(EXCP_SWI);
9ee6e8bb 8686 } else {
d9ba4830 8687 gen_exception(EXCP_DEBUG);
9ee6e8bb 8688 }
e50e6a20
FB
8689 gen_set_label(dc->condlabel);
8690 }
8691 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8692 gen_set_pc_im(dc->pc);
e50e6a20 8693 dc->condjmp = 0;
8aaca4c0 8694 }
9ee6e8bb
PB
8695 gen_set_condexec(dc);
8696 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8697 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8698 } else {
8699 /* FIXME: Single stepping a WFI insn will not halt
8700 the CPU. */
d9ba4830 8701 gen_exception(EXCP_DEBUG);
9ee6e8bb 8702 }
8aaca4c0 8703 } else {
9ee6e8bb
PB
8704 /* While branches must always occur at the end of an IT block,
8705 there are a few other things that can cause us to terminate
8706 the TB in the middel of an IT block:
8707 - Exception generating instructions (bkpt, swi, undefined).
8708 - Page boundaries.
8709 - Hardware watchpoints.
8710 Hardware breakpoints have already been handled and skip this code.
8711 */
8712 gen_set_condexec(dc);
8aaca4c0 8713 switch(dc->is_jmp) {
8aaca4c0 8714 case DISAS_NEXT:
6e256c93 8715 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8716 break;
8717 default:
8718 case DISAS_JUMP:
8719 case DISAS_UPDATE:
8720 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8721 tcg_gen_exit_tb(0);
8aaca4c0
FB
8722 break;
8723 case DISAS_TB_JUMP:
8724 /* nothing more to generate */
8725 break;
9ee6e8bb 8726 case DISAS_WFI:
d9ba4830 8727 gen_helper_wfi();
9ee6e8bb
PB
8728 break;
8729 case DISAS_SWI:
d9ba4830 8730 gen_exception(EXCP_SWI);
9ee6e8bb 8731 break;
8aaca4c0 8732 }
e50e6a20
FB
8733 if (dc->condjmp) {
8734 gen_set_label(dc->condlabel);
9ee6e8bb 8735 gen_set_condexec(dc);
6e256c93 8736 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8737 dc->condjmp = 0;
8738 }
2c0262af 8739 }
9ee6e8bb 8740done_generating:
2c0262af
FB
8741 *gen_opc_ptr = INDEX_op_end;
8742
8743#ifdef DEBUG_DISAS
e19e89a5 8744 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8745 fprintf(logfile, "----------------\n");
8746 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8747 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8748 fprintf(logfile, "\n");
8749 }
8750#endif
b5ff1b31
FB
8751 if (search_pc) {
8752 j = gen_opc_ptr - gen_opc_buf;
8753 lj++;
8754 while (lj <= j)
8755 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8756 } else {
2c0262af 8757 tb->size = dc->pc - pc_start;
b5ff1b31 8758 }
2c0262af
FB
8759 return 0;
8760}
8761
8762int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8763{
8764 return gen_intermediate_code_internal(env, tb, 0);
8765}
8766
8767int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8768{
8769 return gen_intermediate_code_internal(env, tb, 1);
8770}
8771
b5ff1b31
FB
8772static const char *cpu_mode_names[16] = {
8773 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8774 "???", "???", "???", "und", "???", "???", "???", "sys"
8775};
9ee6e8bb 8776
5fafdf24 8777void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8778 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8779 int flags)
2c0262af
FB
8780{
8781 int i;
bc380d17 8782 union {
b7bcbe95
FB
8783 uint32_t i;
8784 float s;
8785 } s0, s1;
8786 CPU_DoubleU d;
a94a6abf
PB
8787 /* ??? This assumes float64 and double have the same layout.
8788 Oh well, it's only debug dumps. */
8789 union {
8790 float64 f64;
8791 double d;
8792 } d0;
b5ff1b31 8793 uint32_t psr;
2c0262af
FB
8794
8795 for(i=0;i<16;i++) {
7fe48483 8796 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8797 if ((i % 4) == 3)
7fe48483 8798 cpu_fprintf(f, "\n");
2c0262af 8799 else
7fe48483 8800 cpu_fprintf(f, " ");
2c0262af 8801 }
b5ff1b31 8802 psr = cpsr_read(env);
687fa640
TS
8803 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8804 psr,
b5ff1b31
FB
8805 psr & (1 << 31) ? 'N' : '-',
8806 psr & (1 << 30) ? 'Z' : '-',
8807 psr & (1 << 29) ? 'C' : '-',
8808 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8809 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8810 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8811
5e3f878a 8812#if 0
b7bcbe95 8813 for (i = 0; i < 16; i++) {
8e96005d
FB
8814 d.d = env->vfp.regs[i];
8815 s0.i = d.l.lower;
8816 s1.i = d.l.upper;
a94a6abf
PB
8817 d0.f64 = d.d;
8818 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8819 i * 2, (int)s0.i, s0.s,
a94a6abf 8820 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8821 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8822 d0.d);
b7bcbe95 8823 }
40f137e1 8824 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8825#endif
2c0262af 8826}
a6b025d3 8827