]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
cmpxchg8b fix - added cmpxchg16b
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6 79static TCGv cpu_env;
ad69471c 80/* We reuse the same 64-bit temporaries for efficiency. */
e677137d 81static TCGv cpu_V0, cpu_V1, cpu_M0;
ad69471c 82
b26eefb6 83/* FIXME: These should be removed. */
8f8e3aa4 84static TCGv cpu_T[2];
4373f3ce 85static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
b26eefb6
PB
86
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91
92 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
b26eefb6
PB
94}
95
96/* The code generator doesn't like lots of temporaries, so maintain our own
97 cache for reuse within a function. */
98#define MAX_TEMPS 8
99static int num_temps;
100static TCGv temps[MAX_TEMPS];
101
102/* Allocate a temporary variable. */
103static TCGv new_tmp(void)
104{
105 TCGv tmp;
106 if (num_temps == MAX_TEMPS)
107 abort();
108
109 if (GET_TCGV(temps[num_temps]))
110 return temps[num_temps++];
111
112 tmp = tcg_temp_new(TCG_TYPE_I32);
113 temps[num_temps++] = tmp;
114 return tmp;
115}
116
117/* Release a temporary variable. */
118static void dead_tmp(TCGv tmp)
119{
120 int i;
121 num_temps--;
122 i = num_temps;
123 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
124 return;
125
126 /* Shuffle this temp to the last slot. */
127 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
128 i--;
129 while (i < num_temps) {
130 temps[i] = temps[i + 1];
131 i++;
132 }
133 temps[i] = tmp;
134}
135
d9ba4830
PB
136static inline TCGv load_cpu_offset(int offset)
137{
138 TCGv tmp = new_tmp();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141}
142
143#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144
145static inline void store_cpu_offset(TCGv var, int offset)
146{
147 tcg_gen_st_i32(var, cpu_env, offset);
148 dead_tmp(var);
149}
150
151#define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUState, name))
153
b26eefb6
PB
154/* Set a variable to the value of a CPU register. */
155static void load_reg_var(DisasContext *s, TCGv var, int reg)
156{
157 if (reg == 15) {
158 uint32_t addr;
159 /* normaly, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
167 }
168}
169
170/* Create a new temporary and set it to the value of a CPU register. */
171static inline TCGv load_reg(DisasContext *s, int reg)
172{
173 TCGv tmp = new_tmp();
174 load_reg_var(s, tmp, reg);
175 return tmp;
176}
177
178/* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180static void store_reg(DisasContext *s, int reg, TCGv var)
181{
182 if (reg == 15) {
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
185 }
186 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
187 dead_tmp(var);
188}
189
190
191/* Basic operations. */
192#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6 193#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
b26eefb6
PB
194#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
196
197#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
198#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
199#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
201
8984bd2e
PB
202#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
204#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
207#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
208
b26eefb6
PB
209#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
213#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
214#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
215#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
216
217#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
218#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
220#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
221#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
222
223/* Value extensions. */
86831435
PB
224#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
226#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
1497c961
PB
229#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
231
232#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 233
d9ba4830
PB
234#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235/* Set NZCV flags from the high 4 bits of var. */
236#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238static void gen_exception(int excp)
239{
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244}
245
3670669c
PB
246static void gen_smul_dual(TCGv a, TCGv b)
247{
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
3670669c
PB
250 tcg_gen_ext8s_i32(tmp1, a);
251 tcg_gen_ext8s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259}
260
261/* Byteswap each halfword. */
262static void gen_rev16(TCGv var)
263{
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271}
272
273/* Byteswap low halfword and sign extend. */
274static void gen_revsh(TCGv var)
275{
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283}
284
285/* Unsigned bitfield extract. */
286static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287{
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291}
292
293/* Signed bitfield extract. */
294static void gen_sbfx(TCGv var, int shift, int width)
295{
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306}
307
308/* Bitfield insertion. Insert val into base. Clobbers base and val. */
309static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310{
3670669c 311 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
314 tcg_gen_or_i32(dest, base, val);
315}
316
d9ba4830
PB
317/* Round the top 32 bits of a 64-bit value. */
318static void gen_roundqd(TCGv a, TCGv b)
3670669c 319{
d9ba4830
PB
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
3670669c
PB
322}
323
8f01245e
PB
324/* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
5e3f878a
PB
326/* 32x32->64 multiply. Marks inputs as dead. */
327static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
328{
329 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
340static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
341{
342 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351}
352
8f01245e
PB
353/* Unsigned 32x32->64 multiply. */
354static void gen_op_mull_T0_T1(void)
355{
356 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365}
366
367/* Signed 32x32->64 multiply. */
d9ba4830 368static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
369{
370 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
372
d9ba4830
PB
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 376 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 377 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
378 tcg_gen_trunc_i64_i32(b, tmp1);
379}
380#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
381
8f01245e
PB
382/* Swap low and high halfwords. */
383static void gen_swap_half(TCGv var)
384{
385 TCGv tmp = new_tmp();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
3670669c 389 dead_tmp(tmp);
8f01245e
PB
390}
391
b26eefb6
PB
392/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
394 t0 &= ~0x8000;
395 t1 &= ~0x8000;
396 t0 = (t0 + t1) ^ tmp;
397 */
398
399static void gen_add16(TCGv t0, TCGv t1)
400{
401 TCGv tmp = new_tmp();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
408 dead_tmp(tmp);
409 dead_tmp(t1);
410}
411
9a119ff6
PB
412#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413
b26eefb6
PB
414/* Set CF to the top bit of var. */
415static void gen_set_CF_bit31(TCGv var)
416{
417 TCGv tmp = new_tmp();
418 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 419 gen_set_CF(var);
b26eefb6
PB
420 dead_tmp(tmp);
421}
422
423/* Set N and Z flags from var. */
424static inline void gen_logic_CC(TCGv var)
425{
6fbe23d5
PB
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
427 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
428}
429
430/* T0 += T1 + CF. */
431static void gen_adc_T0_T1(void)
432{
d9ba4830 433 TCGv tmp;
b26eefb6 434 gen_op_addl_T0_T1();
d9ba4830 435 tmp = load_cpu_field(CF);
b26eefb6
PB
436 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
437 dead_tmp(tmp);
438}
439
3670669c
PB
440/* dest = T0 - T1 + CF - 1. */
441static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
442{
d9ba4830 443 TCGv tmp;
3670669c 444 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 445 tmp = load_cpu_field(CF);
3670669c
PB
446 tcg_gen_add_i32(dest, dest, tmp);
447 tcg_gen_subi_i32(dest, dest, 1);
448 dead_tmp(tmp);
449}
450
451#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
452#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453
b26eefb6
PB
454/* T0 &= ~T1. Clobbers T1. */
455/* FIXME: Implement bic natively. */
8f8e3aa4
PB
456static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
457{
458 TCGv tmp = new_tmp();
459 tcg_gen_not_i32(tmp, t1);
460 tcg_gen_and_i32(dest, t0, tmp);
461 dead_tmp(tmp);
462}
b26eefb6
PB
463static inline void gen_op_bicl_T0_T1(void)
464{
465 gen_op_notl_T1();
466 gen_op_andl_T0_T1();
467}
468
ad69471c
PB
469/* FIXME: Implement this natively. */
470#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471
b26eefb6
PB
472/* FIXME: Implement this natively. */
473static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
474{
475 TCGv tmp;
476
477 if (i == 0)
478 return;
479
480 tmp = new_tmp();
481 tcg_gen_shri_i32(tmp, t1, i);
482 tcg_gen_shli_i32(t1, t1, 32 - i);
483 tcg_gen_or_i32(t0, t1, tmp);
484 dead_tmp(tmp);
485}
486
9a119ff6 487static void shifter_out_im(TCGv var, int shift)
b26eefb6 488{
9a119ff6
PB
489 TCGv tmp = new_tmp();
490 if (shift == 0) {
491 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 492 } else {
9a119ff6
PB
493 tcg_gen_shri_i32(tmp, var, shift);
494 if (shift != 31);
495 tcg_gen_andi_i32(tmp, tmp, 1);
496 }
497 gen_set_CF(tmp);
498 dead_tmp(tmp);
499}
b26eefb6 500
9a119ff6
PB
501/* Shift by immediate. Includes special handling for shift == 0. */
502static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
503{
504 switch (shiftop) {
505 case 0: /* LSL */
506 if (shift != 0) {
507 if (flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
510 }
511 break;
512 case 1: /* LSR */
513 if (shift == 0) {
514 if (flags) {
515 tcg_gen_shri_i32(var, var, 31);
516 gen_set_CF(var);
517 }
518 tcg_gen_movi_i32(var, 0);
519 } else {
520 if (flags)
521 shifter_out_im(var, shift - 1);
522 tcg_gen_shri_i32(var, var, shift);
523 }
524 break;
525 case 2: /* ASR */
526 if (shift == 0)
527 shift = 32;
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 if (shift == 32)
531 shift = 31;
532 tcg_gen_sari_i32(var, var, shift);
533 break;
534 case 3: /* ROR/RRX */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 tcg_gen_rori_i32(var, var, shift); break;
539 } else {
d9ba4830 540 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
541 if (flags)
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
544 tcg_gen_shli_i32(tmp, tmp, 31);
545 tcg_gen_or_i32(var, var, tmp);
546 dead_tmp(tmp);
b26eefb6
PB
547 }
548 }
549};
550
8984bd2e
PB
551static inline void gen_arm_shift_reg(TCGv var, int shiftop,
552 TCGv shift, int flags)
553{
554 if (flags) {
555 switch (shiftop) {
556 case 0: gen_helper_shl_cc(var, var, shift); break;
557 case 1: gen_helper_shr_cc(var, var, shift); break;
558 case 2: gen_helper_sar_cc(var, var, shift); break;
559 case 3: gen_helper_ror_cc(var, var, shift); break;
560 }
561 } else {
562 switch (shiftop) {
563 case 0: gen_helper_shl(var, var, shift); break;
564 case 1: gen_helper_shr(var, var, shift); break;
565 case 2: gen_helper_sar(var, var, shift); break;
566 case 3: gen_helper_ror(var, var, shift); break;
567 }
568 }
569 dead_tmp(shift);
570}
571
6ddbc6e4
PB
572#define PAS_OP(pfx) \
573 switch (op2) { \
574 case 0: gen_pas_helper(glue(pfx,add16)); break; \
575 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
577 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 4: gen_pas_helper(glue(pfx,add8)); break; \
579 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 }
d9ba4830 581static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
582{
583 TCGv tmp;
584
585 switch (op1) {
586#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 case 1:
588 tmp = tcg_temp_new(TCG_TYPE_PTR);
589 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
590 PAS_OP(s)
591 break;
592 case 5:
593 tmp = tcg_temp_new(TCG_TYPE_PTR);
594 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
595 PAS_OP(u)
596 break;
597#undef gen_pas_helper
598#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
599 case 2:
600 PAS_OP(q);
601 break;
602 case 3:
603 PAS_OP(sh);
604 break;
605 case 6:
606 PAS_OP(uq);
607 break;
608 case 7:
609 PAS_OP(uh);
610 break;
611#undef gen_pas_helper
612 }
613}
9ee6e8bb
PB
614#undef PAS_OP
615
6ddbc6e4
PB
616/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
617#define PAS_OP(pfx) \
618 switch (op2) { \
619 case 0: gen_pas_helper(glue(pfx,add8)); break; \
620 case 1: gen_pas_helper(glue(pfx,add16)); break; \
621 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
622 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
623 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
624 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 }
d9ba4830 626static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
627{
628 TCGv tmp;
629
630 switch (op1) {
631#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 case 0:
633 tmp = tcg_temp_new(TCG_TYPE_PTR);
634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
635 PAS_OP(s)
636 break;
637 case 4:
638 tmp = tcg_temp_new(TCG_TYPE_PTR);
639 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
640 PAS_OP(u)
641 break;
642#undef gen_pas_helper
643#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
644 case 1:
645 PAS_OP(q);
646 break;
647 case 2:
648 PAS_OP(sh);
649 break;
650 case 5:
651 PAS_OP(uq);
652 break;
653 case 6:
654 PAS_OP(uh);
655 break;
656#undef gen_pas_helper
657 }
658}
9ee6e8bb
PB
659#undef PAS_OP
660
d9ba4830
PB
661static void gen_test_cc(int cc, int label)
662{
663 TCGv tmp;
664 TCGv tmp2;
665 TCGv zero;
666 int inv;
667
668 zero = tcg_const_i32(0);
669 switch (cc) {
670 case 0: /* eq: Z */
6fbe23d5 671 tmp = load_cpu_field(ZF);
d9ba4830
PB
672 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
673 break;
674 case 1: /* ne: !Z */
6fbe23d5 675 tmp = load_cpu_field(ZF);
d9ba4830
PB
676 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
677 break;
678 case 2: /* cs: C */
679 tmp = load_cpu_field(CF);
680 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
681 break;
682 case 3: /* cc: !C */
683 tmp = load_cpu_field(CF);
684 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
685 break;
686 case 4: /* mi: N */
6fbe23d5 687 tmp = load_cpu_field(NF);
d9ba4830
PB
688 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
689 break;
690 case 5: /* pl: !N */
6fbe23d5 691 tmp = load_cpu_field(NF);
d9ba4830
PB
692 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
693 break;
694 case 6: /* vs: V */
695 tmp = load_cpu_field(VF);
696 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
697 break;
698 case 7: /* vc: !V */
699 tmp = load_cpu_field(VF);
700 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
701 break;
702 case 8: /* hi: C && !Z */
703 inv = gen_new_label();
704 tmp = load_cpu_field(CF);
705 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
706 dead_tmp(tmp);
6fbe23d5 707 tmp = load_cpu_field(ZF);
d9ba4830
PB
708 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
709 gen_set_label(inv);
710 break;
711 case 9: /* ls: !C || Z */
712 tmp = load_cpu_field(CF);
713 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
714 dead_tmp(tmp);
6fbe23d5 715 tmp = load_cpu_field(ZF);
d9ba4830
PB
716 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
717 break;
718 case 10: /* ge: N == V -> N ^ V == 0 */
719 tmp = load_cpu_field(VF);
6fbe23d5 720 tmp2 = load_cpu_field(NF);
d9ba4830
PB
721 tcg_gen_xor_i32(tmp, tmp, tmp2);
722 dead_tmp(tmp2);
723 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
724 break;
725 case 11: /* lt: N != V -> N ^ V != 0 */
726 tmp = load_cpu_field(VF);
6fbe23d5 727 tmp2 = load_cpu_field(NF);
d9ba4830
PB
728 tcg_gen_xor_i32(tmp, tmp, tmp2);
729 dead_tmp(tmp2);
730 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
731 break;
732 case 12: /* gt: !Z && N == V */
733 inv = gen_new_label();
6fbe23d5 734 tmp = load_cpu_field(ZF);
d9ba4830
PB
735 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
736 dead_tmp(tmp);
737 tmp = load_cpu_field(VF);
6fbe23d5 738 tmp2 = load_cpu_field(NF);
d9ba4830
PB
739 tcg_gen_xor_i32(tmp, tmp, tmp2);
740 dead_tmp(tmp2);
741 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
742 gen_set_label(inv);
743 break;
744 case 13: /* le: Z || N != V */
6fbe23d5 745 tmp = load_cpu_field(ZF);
d9ba4830
PB
746 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
747 dead_tmp(tmp);
748 tmp = load_cpu_field(VF);
6fbe23d5 749 tmp2 = load_cpu_field(NF);
d9ba4830
PB
750 tcg_gen_xor_i32(tmp, tmp, tmp2);
751 dead_tmp(tmp2);
752 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
753 break;
754 default:
755 fprintf(stderr, "Bad condition code 0x%x\n", cc);
756 abort();
757 }
758 dead_tmp(tmp);
759}
2c0262af
FB
760
761const uint8_t table_logic_cc[16] = {
762 1, /* and */
763 1, /* xor */
764 0, /* sub */
765 0, /* rsb */
766 0, /* add */
767 0, /* adc */
768 0, /* sbc */
769 0, /* rsc */
770 1, /* andl */
771 1, /* xorl */
772 0, /* cmp */
773 0, /* cmn */
774 1, /* orr */
775 1, /* mov */
776 1, /* bic */
777 1, /* mvn */
778};
3b46e624 779
d9ba4830
PB
780/* Set PC and Thumb state from an immediate address. */
781static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 782{
b26eefb6 783 TCGv tmp;
99c475ab 784
b26eefb6
PB
785 s->is_jmp = DISAS_UPDATE;
786 tmp = new_tmp();
d9ba4830
PB
787 if (s->thumb != (addr & 1)) {
788 tcg_gen_movi_i32(tmp, addr & 1);
789 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
790 }
791 tcg_gen_movi_i32(tmp, addr & ~1);
792 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 793 dead_tmp(tmp);
d9ba4830
PB
794}
795
796/* Set PC and Thumb state from var. var is marked as dead. */
797static inline void gen_bx(DisasContext *s, TCGv var)
798{
799 TCGv tmp;
800
801 s->is_jmp = DISAS_UPDATE;
802 tmp = new_tmp();
803 tcg_gen_andi_i32(tmp, var, 1);
804 store_cpu_field(tmp, thumb);
805 tcg_gen_andi_i32(var, var, ~1);
806 store_cpu_field(var, regs[15]);
807}
808
809/* TODO: This should be removed. Use gen_bx instead. */
810static inline void gen_bx_T0(DisasContext *s)
811{
812 TCGv tmp = new_tmp();
813 tcg_gen_mov_i32(tmp, cpu_T[0]);
814 gen_bx(s, tmp);
b26eefb6 815}
b5ff1b31
FB
816
817#if defined(CONFIG_USER_ONLY)
818#define gen_ldst(name, s) gen_op_##name##_raw()
819#else
820#define gen_ldst(name, s) do { \
6658ffb8 821 s->is_mem = 1; \
b5ff1b31
FB
822 if (IS_USER(s)) \
823 gen_op_##name##_user(); \
824 else \
825 gen_op_##name##_kernel(); \
826 } while (0)
827#endif
b0109805
PB
828static inline TCGv gen_ld8s(TCGv addr, int index)
829{
830 TCGv tmp = new_tmp();
831 tcg_gen_qemu_ld8s(tmp, addr, index);
832 return tmp;
833}
834static inline TCGv gen_ld8u(TCGv addr, int index)
835{
836 TCGv tmp = new_tmp();
837 tcg_gen_qemu_ld8u(tmp, addr, index);
838 return tmp;
839}
840static inline TCGv gen_ld16s(TCGv addr, int index)
841{
842 TCGv tmp = new_tmp();
843 tcg_gen_qemu_ld16s(tmp, addr, index);
844 return tmp;
845}
846static inline TCGv gen_ld16u(TCGv addr, int index)
847{
848 TCGv tmp = new_tmp();
849 tcg_gen_qemu_ld16u(tmp, addr, index);
850 return tmp;
851}
852static inline TCGv gen_ld32(TCGv addr, int index)
853{
854 TCGv tmp = new_tmp();
855 tcg_gen_qemu_ld32u(tmp, addr, index);
856 return tmp;
857}
858static inline void gen_st8(TCGv val, TCGv addr, int index)
859{
860 tcg_gen_qemu_st8(val, addr, index);
861 dead_tmp(val);
862}
863static inline void gen_st16(TCGv val, TCGv addr, int index)
864{
865 tcg_gen_qemu_st16(val, addr, index);
866 dead_tmp(val);
867}
868static inline void gen_st32(TCGv val, TCGv addr, int index)
869{
870 tcg_gen_qemu_st32(val, addr, index);
871 dead_tmp(val);
872}
b5ff1b31 873
2c0262af
FB
874static inline void gen_movl_T0_reg(DisasContext *s, int reg)
875{
b26eefb6 876 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
877}
878
879static inline void gen_movl_T1_reg(DisasContext *s, int reg)
880{
b26eefb6 881 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
882}
883
884static inline void gen_movl_T2_reg(DisasContext *s, int reg)
885{
b26eefb6
PB
886 load_reg_var(s, cpu_T[2], reg);
887}
888
5e3f878a
PB
889static inline void gen_set_pc_im(uint32_t val)
890{
891 TCGv tmp = new_tmp();
892 tcg_gen_movi_i32(tmp, val);
893 store_cpu_field(tmp, regs[15]);
894}
895
2c0262af
FB
896static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
897{
b26eefb6
PB
898 TCGv tmp;
899 if (reg == 15) {
900 tmp = new_tmp();
901 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
902 } else {
903 tmp = cpu_T[t];
904 }
905 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 906 if (reg == 15) {
b26eefb6 907 dead_tmp(tmp);
2c0262af
FB
908 s->is_jmp = DISAS_JUMP;
909 }
910}
911
912static inline void gen_movl_reg_T0(DisasContext *s, int reg)
913{
914 gen_movl_reg_TN(s, reg, 0);
915}
916
917static inline void gen_movl_reg_T1(DisasContext *s, int reg)
918{
919 gen_movl_reg_TN(s, reg, 1);
920}
921
b5ff1b31
FB
922/* Force a TB lookup after an instruction that changes the CPU state. */
923static inline void gen_lookup_tb(DisasContext *s)
924{
925 gen_op_movl_T0_im(s->pc);
926 gen_movl_reg_T0(s, 15);
927 s->is_jmp = DISAS_UPDATE;
928}
929
b0109805
PB
930static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
931 TCGv var)
2c0262af 932{
1e8d4eec 933 int val, rm, shift, shiftop;
b26eefb6 934 TCGv offset;
2c0262af
FB
935
936 if (!(insn & (1 << 25))) {
937 /* immediate */
938 val = insn & 0xfff;
939 if (!(insn & (1 << 23)))
940 val = -val;
537730b9 941 if (val != 0)
b0109805 942 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
943 } else {
944 /* shift/register */
945 rm = (insn) & 0xf;
946 shift = (insn >> 7) & 0x1f;
1e8d4eec 947 shiftop = (insn >> 5) & 3;
b26eefb6 948 offset = load_reg(s, rm);
9a119ff6 949 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 950 if (!(insn & (1 << 23)))
b0109805 951 tcg_gen_sub_i32(var, var, offset);
2c0262af 952 else
b0109805 953 tcg_gen_add_i32(var, var, offset);
b26eefb6 954 dead_tmp(offset);
2c0262af
FB
955 }
956}
957
191f9a93 958static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 959 int extra, TCGv var)
2c0262af
FB
960{
961 int val, rm;
b26eefb6 962 TCGv offset;
3b46e624 963
2c0262af
FB
964 if (insn & (1 << 22)) {
965 /* immediate */
966 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
967 if (!(insn & (1 << 23)))
968 val = -val;
18acad92 969 val += extra;
537730b9 970 if (val != 0)
b0109805 971 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
972 } else {
973 /* register */
191f9a93 974 if (extra)
b0109805 975 tcg_gen_addi_i32(var, var, extra);
2c0262af 976 rm = (insn) & 0xf;
b26eefb6 977 offset = load_reg(s, rm);
2c0262af 978 if (!(insn & (1 << 23)))
b0109805 979 tcg_gen_sub_i32(var, var, offset);
2c0262af 980 else
b0109805 981 tcg_gen_add_i32(var, var, offset);
b26eefb6 982 dead_tmp(offset);
2c0262af
FB
983 }
984}
985
4373f3ce
PB
986#define VFP_OP2(name) \
987static inline void gen_vfp_##name(int dp) \
988{ \
989 if (dp) \
990 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
991 else \
992 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
993}
994
5b340b51 995#define VFP_OP1(name) \
9ee6e8bb
PB
996static inline void gen_vfp_##name(int dp, int arg) \
997{ \
998 if (dp) \
999 gen_op_vfp_##name##d(arg); \
1000 else \
1001 gen_op_vfp_##name##s(arg); \
1002}
1003
4373f3ce
PB
1004VFP_OP2(add)
1005VFP_OP2(sub)
1006VFP_OP2(mul)
1007VFP_OP2(div)
1008
1009#undef VFP_OP2
1010
1011static inline void gen_vfp_abs(int dp)
1012{
1013 if (dp)
1014 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1015 else
1016 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1017}
1018
1019static inline void gen_vfp_neg(int dp)
1020{
1021 if (dp)
1022 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1023 else
1024 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1025}
1026
1027static inline void gen_vfp_sqrt(int dp)
1028{
1029 if (dp)
1030 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1031 else
1032 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1033}
1034
1035static inline void gen_vfp_cmp(int dp)
1036{
1037 if (dp)
1038 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1039 else
1040 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1041}
1042
1043static inline void gen_vfp_cmpe(int dp)
1044{
1045 if (dp)
1046 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1047 else
1048 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1049}
1050
1051static inline void gen_vfp_F1_ld0(int dp)
1052{
1053 if (dp)
5b340b51 1054 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1055 else
5b340b51 1056 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1057}
1058
1059static inline void gen_vfp_uito(int dp)
1060{
1061 if (dp)
1062 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1063 else
1064 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1065}
1066
1067static inline void gen_vfp_sito(int dp)
1068{
1069 if (dp)
66230e0d 1070 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1071 else
66230e0d 1072 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1073}
1074
1075static inline void gen_vfp_toui(int dp)
1076{
1077 if (dp)
1078 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1079 else
1080 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1081}
1082
1083static inline void gen_vfp_touiz(int dp)
1084{
1085 if (dp)
1086 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1087 else
1088 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1089}
1090
1091static inline void gen_vfp_tosi(int dp)
1092{
1093 if (dp)
1094 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1095 else
1096 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1097}
1098
1099static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1100{
1101 if (dp)
4373f3ce 1102 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1103 else
4373f3ce
PB
1104 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1105}
1106
1107#define VFP_GEN_FIX(name) \
1108static inline void gen_vfp_##name(int dp, int shift) \
1109{ \
1110 if (dp) \
1111 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1112 else \
1113 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1114}
4373f3ce
PB
1115VFP_GEN_FIX(tosh)
1116VFP_GEN_FIX(tosl)
1117VFP_GEN_FIX(touh)
1118VFP_GEN_FIX(toul)
1119VFP_GEN_FIX(shto)
1120VFP_GEN_FIX(slto)
1121VFP_GEN_FIX(uhto)
1122VFP_GEN_FIX(ulto)
1123#undef VFP_GEN_FIX
9ee6e8bb 1124
b5ff1b31
FB
1125static inline void gen_vfp_ld(DisasContext *s, int dp)
1126{
1127 if (dp)
4373f3ce 1128 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1129 else
4373f3ce 1130 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1131}
1132
1133static inline void gen_vfp_st(DisasContext *s, int dp)
1134{
1135 if (dp)
4373f3ce 1136 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1137 else
4373f3ce 1138 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1139}
1140
8e96005d
FB
1141static inline long
1142vfp_reg_offset (int dp, int reg)
1143{
1144 if (dp)
1145 return offsetof(CPUARMState, vfp.regs[reg]);
1146 else if (reg & 1) {
1147 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1148 + offsetof(CPU_DoubleU, l.upper);
1149 } else {
1150 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1151 + offsetof(CPU_DoubleU, l.lower);
1152 }
1153}
9ee6e8bb
PB
1154
1155/* Return the offset of a 32-bit piece of a NEON register.
1156 zero is the least significant end of the register. */
1157static inline long
1158neon_reg_offset (int reg, int n)
1159{
1160 int sreg;
1161 sreg = reg * 2 + n;
1162 return vfp_reg_offset(0, sreg);
1163}
1164
ad69471c
PB
1165/* FIXME: Remove these. */
1166#define neon_T0 cpu_T[0]
1167#define neon_T1 cpu_T[1]
1168#define NEON_GET_REG(T, reg, n) \
1169 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1170#define NEON_SET_REG(T, reg, n) \
1171 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1172
8f8e3aa4
PB
1173static TCGv neon_load_reg(int reg, int pass)
1174{
1175 TCGv tmp = new_tmp();
1176 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1177 return tmp;
1178}
1179
1180static void neon_store_reg(int reg, int pass, TCGv var)
1181{
1182 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1183 dead_tmp(var);
1184}
1185
ad69471c
PB
1186static inline void neon_load_reg64(TCGv var, int reg)
1187{
1188 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1189}
1190
1191static inline void neon_store_reg64(TCGv var, int reg)
1192{
1193 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1194}
1195
4373f3ce
PB
1196#define tcg_gen_ld_f32 tcg_gen_ld_i32
1197#define tcg_gen_ld_f64 tcg_gen_ld_i64
1198#define tcg_gen_st_f32 tcg_gen_st_i32
1199#define tcg_gen_st_f64 tcg_gen_st_i64
1200
b7bcbe95
FB
1201static inline void gen_mov_F0_vreg(int dp, int reg)
1202{
1203 if (dp)
4373f3ce 1204 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1205 else
4373f3ce 1206 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1207}
1208
1209static inline void gen_mov_F1_vreg(int dp, int reg)
1210{
1211 if (dp)
4373f3ce 1212 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1213 else
4373f3ce 1214 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1215}
1216
1217static inline void gen_mov_vreg_F0(int dp, int reg)
1218{
1219 if (dp)
4373f3ce 1220 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1221 else
4373f3ce 1222 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1223}
1224
18c9b560
AZ
1225#define ARM_CP_RW_BIT (1 << 20)
1226
e677137d
PB
1227static inline void iwmmxt_load_reg(TCGv var, int reg)
1228{
1229 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1230}
1231
1232static inline void iwmmxt_store_reg(TCGv var, int reg)
1233{
1234 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1235}
1236
1237static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1238{
1239 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1240}
1241
1242static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1243{
1244 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1245}
1246
1247static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1248{
1249 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1250}
1251
1252static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1253{
1254 iwmmxt_store_reg(cpu_M0, rn);
1255}
1256
1257static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1258{
1259 iwmmxt_load_reg(cpu_M0, rn);
1260}
1261
1262static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1263{
1264 iwmmxt_load_reg(cpu_V1, rn);
1265 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1266}
1267
1268static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1269{
1270 iwmmxt_load_reg(cpu_V1, rn);
1271 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1272}
1273
1274static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1275{
1276 iwmmxt_load_reg(cpu_V1, rn);
1277 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1278}
1279
1280#define IWMMXT_OP(name) \
1281static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1282{ \
1283 iwmmxt_load_reg(cpu_V1, rn); \
1284 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1285}
1286
1287#define IWMMXT_OP_ENV(name) \
1288static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1289{ \
1290 iwmmxt_load_reg(cpu_V1, rn); \
1291 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1292}
1293
1294#define IWMMXT_OP_ENV_SIZE(name) \
1295IWMMXT_OP_ENV(name##b) \
1296IWMMXT_OP_ENV(name##w) \
1297IWMMXT_OP_ENV(name##l)
1298
1299#define IWMMXT_OP_ENV1(name) \
1300static inline void gen_op_iwmmxt_##name##_M0(void) \
1301{ \
1302 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1303}
1304
1305IWMMXT_OP(maddsq)
1306IWMMXT_OP(madduq)
1307IWMMXT_OP(sadb)
1308IWMMXT_OP(sadw)
1309IWMMXT_OP(mulslw)
1310IWMMXT_OP(mulshw)
1311IWMMXT_OP(mululw)
1312IWMMXT_OP(muluhw)
1313IWMMXT_OP(macsw)
1314IWMMXT_OP(macuw)
1315
1316IWMMXT_OP_ENV_SIZE(unpackl)
1317IWMMXT_OP_ENV_SIZE(unpackh)
1318
1319IWMMXT_OP_ENV1(unpacklub)
1320IWMMXT_OP_ENV1(unpackluw)
1321IWMMXT_OP_ENV1(unpacklul)
1322IWMMXT_OP_ENV1(unpackhub)
1323IWMMXT_OP_ENV1(unpackhuw)
1324IWMMXT_OP_ENV1(unpackhul)
1325IWMMXT_OP_ENV1(unpacklsb)
1326IWMMXT_OP_ENV1(unpacklsw)
1327IWMMXT_OP_ENV1(unpacklsl)
1328IWMMXT_OP_ENV1(unpackhsb)
1329IWMMXT_OP_ENV1(unpackhsw)
1330IWMMXT_OP_ENV1(unpackhsl)
1331
1332IWMMXT_OP_ENV_SIZE(cmpeq)
1333IWMMXT_OP_ENV_SIZE(cmpgtu)
1334IWMMXT_OP_ENV_SIZE(cmpgts)
1335
1336IWMMXT_OP_ENV_SIZE(mins)
1337IWMMXT_OP_ENV_SIZE(minu)
1338IWMMXT_OP_ENV_SIZE(maxs)
1339IWMMXT_OP_ENV_SIZE(maxu)
1340
1341IWMMXT_OP_ENV_SIZE(subn)
1342IWMMXT_OP_ENV_SIZE(addn)
1343IWMMXT_OP_ENV_SIZE(subu)
1344IWMMXT_OP_ENV_SIZE(addu)
1345IWMMXT_OP_ENV_SIZE(subs)
1346IWMMXT_OP_ENV_SIZE(adds)
1347
1348IWMMXT_OP_ENV(avgb0)
1349IWMMXT_OP_ENV(avgb1)
1350IWMMXT_OP_ENV(avgw0)
1351IWMMXT_OP_ENV(avgw1)
1352
1353IWMMXT_OP(msadb)
1354
1355IWMMXT_OP_ENV(packuw)
1356IWMMXT_OP_ENV(packul)
1357IWMMXT_OP_ENV(packuq)
1358IWMMXT_OP_ENV(packsw)
1359IWMMXT_OP_ENV(packsl)
1360IWMMXT_OP_ENV(packsq)
1361
1362static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1363{
1364 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1365}
1366
1367static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1368{
1369 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1370}
1371
1372static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1373{
1374 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1375}
1376
1377static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1378{
1379 iwmmxt_load_reg(cpu_V1, rn);
1380 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1381}
1382
1383static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1384{
1385 TCGv tmp = tcg_const_i32(shift);
1386 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1387}
1388
1389static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1390{
1391 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1392 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1393 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1394}
1395
1396static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1397{
1398 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1399 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1400 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1401}
1402
1403static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1404{
1405 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1406 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1407 if (mask != ~0u)
1408 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1409}
1410
1411static void gen_op_iwmmxt_set_mup(void)
1412{
1413 TCGv tmp;
1414 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1415 tcg_gen_ori_i32(tmp, tmp, 2);
1416 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1417}
1418
1419static void gen_op_iwmmxt_set_cup(void)
1420{
1421 TCGv tmp;
1422 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1423 tcg_gen_ori_i32(tmp, tmp, 1);
1424 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1425}
1426
1427static void gen_op_iwmmxt_setpsr_nz(void)
1428{
1429 TCGv tmp = new_tmp();
1430 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1431 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1432}
1433
1434static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1435{
1436 iwmmxt_load_reg(cpu_V1, rn);
86831435 1437 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1438 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1439}
1440
1441
1442static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1443{
1444 iwmmxt_load_reg(cpu_V0, rn);
1445 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1446 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1447 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1448}
1449
1450static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1451{
1452 tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
1453 tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
1454 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
1455 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
1456 iwmmxt_store_reg(cpu_V0, rn);
1457}
1458
18c9b560
AZ
1459static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1460{
1461 int rd;
1462 uint32_t offset;
1463
1464 rd = (insn >> 16) & 0xf;
1465 gen_movl_T1_reg(s, rd);
1466
1467 offset = (insn & 0xff) << ((insn >> 7) & 2);
1468 if (insn & (1 << 24)) {
1469 /* Pre indexed */
1470 if (insn & (1 << 23))
1471 gen_op_addl_T1_im(offset);
1472 else
1473 gen_op_addl_T1_im(-offset);
1474
1475 if (insn & (1 << 21))
1476 gen_movl_reg_T1(s, rd);
1477 } else if (insn & (1 << 21)) {
1478 /* Post indexed */
1479 if (insn & (1 << 23))
1480 gen_op_movl_T0_im(offset);
1481 else
1482 gen_op_movl_T0_im(- offset);
1483 gen_op_addl_T0_T1();
1484 gen_movl_reg_T0(s, rd);
1485 } else if (!(insn & (1 << 23)))
1486 return 1;
1487 return 0;
1488}
1489
1490static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1491{
1492 int rd = (insn >> 0) & 0xf;
1493
1494 if (insn & (1 << 8))
1495 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1496 return 1;
1497 else
1498 gen_op_iwmmxt_movl_T0_wCx(rd);
1499 else
e677137d 1500 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1501
1502 gen_op_movl_T1_im(mask);
1503 gen_op_andl_T0_T1();
1504 return 0;
1505}
1506
1507/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1508 (ie. an undefined instruction). */
1509static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1510{
1511 int rd, wrd;
1512 int rdhi, rdlo, rd0, rd1, i;
b0109805 1513 TCGv tmp;
18c9b560
AZ
1514
1515 if ((insn & 0x0e000e00) == 0x0c000000) {
1516 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1517 wrd = insn & 0xf;
1518 rdlo = (insn >> 12) & 0xf;
1519 rdhi = (insn >> 16) & 0xf;
1520 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1521 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1522 gen_movl_reg_T0(s, rdlo);
1523 gen_movl_reg_T1(s, rdhi);
1524 } else { /* TMCRR */
1525 gen_movl_T0_reg(s, rdlo);
1526 gen_movl_T1_reg(s, rdhi);
e677137d 1527 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1528 gen_op_iwmmxt_set_mup();
1529 }
1530 return 0;
1531 }
1532
1533 wrd = (insn >> 12) & 0xf;
1534 if (gen_iwmmxt_address(s, insn))
1535 return 1;
1536 if (insn & ARM_CP_RW_BIT) {
1537 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1538 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1539 tcg_gen_mov_i32(cpu_T[0], tmp);
1540 dead_tmp(tmp);
18c9b560
AZ
1541 gen_op_iwmmxt_movl_wCx_T0(wrd);
1542 } else {
e677137d
PB
1543 i = 1;
1544 if (insn & (1 << 8)) {
1545 if (insn & (1 << 22)) { /* WLDRD */
1546 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1547 i = 0;
1548 } else { /* WLDRW wRd */
1549 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1550 }
1551 } else {
1552 if (insn & (1 << 22)) { /* WLDRH */
1553 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1554 } else { /* WLDRB */
1555 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1556 }
1557 }
1558 if (i) {
1559 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1560 dead_tmp(tmp);
1561 }
18c9b560
AZ
1562 gen_op_iwmmxt_movq_wRn_M0(wrd);
1563 }
1564 } else {
1565 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1566 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1567 tmp = new_tmp();
1568 tcg_gen_mov_i32(tmp, cpu_T[0]);
1569 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1570 } else {
1571 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1572 tmp = new_tmp();
1573 if (insn & (1 << 8)) {
1574 if (insn & (1 << 22)) { /* WSTRD */
1575 dead_tmp(tmp);
1576 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1577 } else { /* WSTRW wRd */
1578 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1579 gen_st32(tmp, cpu_T[1], IS_USER(s));
1580 }
1581 } else {
1582 if (insn & (1 << 22)) { /* WSTRH */
1583 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1584 gen_st16(tmp, cpu_T[1], IS_USER(s));
1585 } else { /* WSTRB */
1586 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1587 gen_st8(tmp, cpu_T[1], IS_USER(s));
1588 }
1589 }
18c9b560
AZ
1590 }
1591 }
1592 return 0;
1593 }
1594
1595 if ((insn & 0x0f000000) != 0x0e000000)
1596 return 1;
1597
1598 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1599 case 0x000: /* WOR */
1600 wrd = (insn >> 12) & 0xf;
1601 rd0 = (insn >> 0) & 0xf;
1602 rd1 = (insn >> 16) & 0xf;
1603 gen_op_iwmmxt_movq_M0_wRn(rd0);
1604 gen_op_iwmmxt_orq_M0_wRn(rd1);
1605 gen_op_iwmmxt_setpsr_nz();
1606 gen_op_iwmmxt_movq_wRn_M0(wrd);
1607 gen_op_iwmmxt_set_mup();
1608 gen_op_iwmmxt_set_cup();
1609 break;
1610 case 0x011: /* TMCR */
1611 if (insn & 0xf)
1612 return 1;
1613 rd = (insn >> 12) & 0xf;
1614 wrd = (insn >> 16) & 0xf;
1615 switch (wrd) {
1616 case ARM_IWMMXT_wCID:
1617 case ARM_IWMMXT_wCASF:
1618 break;
1619 case ARM_IWMMXT_wCon:
1620 gen_op_iwmmxt_set_cup();
1621 /* Fall through. */
1622 case ARM_IWMMXT_wCSSF:
1623 gen_op_iwmmxt_movl_T0_wCx(wrd);
1624 gen_movl_T1_reg(s, rd);
1625 gen_op_bicl_T0_T1();
1626 gen_op_iwmmxt_movl_wCx_T0(wrd);
1627 break;
1628 case ARM_IWMMXT_wCGR0:
1629 case ARM_IWMMXT_wCGR1:
1630 case ARM_IWMMXT_wCGR2:
1631 case ARM_IWMMXT_wCGR3:
1632 gen_op_iwmmxt_set_cup();
1633 gen_movl_reg_T0(s, rd);
1634 gen_op_iwmmxt_movl_wCx_T0(wrd);
1635 break;
1636 default:
1637 return 1;
1638 }
1639 break;
1640 case 0x100: /* WXOR */
1641 wrd = (insn >> 12) & 0xf;
1642 rd0 = (insn >> 0) & 0xf;
1643 rd1 = (insn >> 16) & 0xf;
1644 gen_op_iwmmxt_movq_M0_wRn(rd0);
1645 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1646 gen_op_iwmmxt_setpsr_nz();
1647 gen_op_iwmmxt_movq_wRn_M0(wrd);
1648 gen_op_iwmmxt_set_mup();
1649 gen_op_iwmmxt_set_cup();
1650 break;
1651 case 0x111: /* TMRC */
1652 if (insn & 0xf)
1653 return 1;
1654 rd = (insn >> 12) & 0xf;
1655 wrd = (insn >> 16) & 0xf;
1656 gen_op_iwmmxt_movl_T0_wCx(wrd);
1657 gen_movl_reg_T0(s, rd);
1658 break;
1659 case 0x300: /* WANDN */
1660 wrd = (insn >> 12) & 0xf;
1661 rd0 = (insn >> 0) & 0xf;
1662 rd1 = (insn >> 16) & 0xf;
1663 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1664 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1665 gen_op_iwmmxt_andq_M0_wRn(rd1);
1666 gen_op_iwmmxt_setpsr_nz();
1667 gen_op_iwmmxt_movq_wRn_M0(wrd);
1668 gen_op_iwmmxt_set_mup();
1669 gen_op_iwmmxt_set_cup();
1670 break;
1671 case 0x200: /* WAND */
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 0) & 0xf;
1674 rd1 = (insn >> 16) & 0xf;
1675 gen_op_iwmmxt_movq_M0_wRn(rd0);
1676 gen_op_iwmmxt_andq_M0_wRn(rd1);
1677 gen_op_iwmmxt_setpsr_nz();
1678 gen_op_iwmmxt_movq_wRn_M0(wrd);
1679 gen_op_iwmmxt_set_mup();
1680 gen_op_iwmmxt_set_cup();
1681 break;
1682 case 0x810: case 0xa10: /* WMADD */
1683 wrd = (insn >> 12) & 0xf;
1684 rd0 = (insn >> 0) & 0xf;
1685 rd1 = (insn >> 16) & 0xf;
1686 gen_op_iwmmxt_movq_M0_wRn(rd0);
1687 if (insn & (1 << 21))
1688 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1689 else
1690 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1691 gen_op_iwmmxt_movq_wRn_M0(wrd);
1692 gen_op_iwmmxt_set_mup();
1693 break;
1694 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1695 wrd = (insn >> 12) & 0xf;
1696 rd0 = (insn >> 16) & 0xf;
1697 rd1 = (insn >> 0) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 switch ((insn >> 22) & 3) {
1700 case 0:
1701 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1702 break;
1703 case 1:
1704 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1705 break;
1706 case 2:
1707 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1708 break;
1709 case 3:
1710 return 1;
1711 }
1712 gen_op_iwmmxt_movq_wRn_M0(wrd);
1713 gen_op_iwmmxt_set_mup();
1714 gen_op_iwmmxt_set_cup();
1715 break;
1716 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1717 wrd = (insn >> 12) & 0xf;
1718 rd0 = (insn >> 16) & 0xf;
1719 rd1 = (insn >> 0) & 0xf;
1720 gen_op_iwmmxt_movq_M0_wRn(rd0);
1721 switch ((insn >> 22) & 3) {
1722 case 0:
1723 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1724 break;
1725 case 1:
1726 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1727 break;
1728 case 2:
1729 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1730 break;
1731 case 3:
1732 return 1;
1733 }
1734 gen_op_iwmmxt_movq_wRn_M0(wrd);
1735 gen_op_iwmmxt_set_mup();
1736 gen_op_iwmmxt_set_cup();
1737 break;
1738 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1739 wrd = (insn >> 12) & 0xf;
1740 rd0 = (insn >> 16) & 0xf;
1741 rd1 = (insn >> 0) & 0xf;
1742 gen_op_iwmmxt_movq_M0_wRn(rd0);
1743 if (insn & (1 << 22))
1744 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1745 else
1746 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1747 if (!(insn & (1 << 20)))
1748 gen_op_iwmmxt_addl_M0_wRn(wrd);
1749 gen_op_iwmmxt_movq_wRn_M0(wrd);
1750 gen_op_iwmmxt_set_mup();
1751 break;
1752 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1753 wrd = (insn >> 12) & 0xf;
1754 rd0 = (insn >> 16) & 0xf;
1755 rd1 = (insn >> 0) & 0xf;
1756 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1757 if (insn & (1 << 21)) {
1758 if (insn & (1 << 20))
1759 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1760 else
1761 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1762 } else {
1763 if (insn & (1 << 20))
1764 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1765 else
1766 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1767 }
18c9b560
AZ
1768 gen_op_iwmmxt_movq_wRn_M0(wrd);
1769 gen_op_iwmmxt_set_mup();
1770 break;
1771 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1772 wrd = (insn >> 12) & 0xf;
1773 rd0 = (insn >> 16) & 0xf;
1774 rd1 = (insn >> 0) & 0xf;
1775 gen_op_iwmmxt_movq_M0_wRn(rd0);
1776 if (insn & (1 << 21))
1777 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1778 else
1779 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1780 if (!(insn & (1 << 20))) {
e677137d
PB
1781 iwmmxt_load_reg(cpu_V1, wrd);
1782 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1783 }
1784 gen_op_iwmmxt_movq_wRn_M0(wrd);
1785 gen_op_iwmmxt_set_mup();
1786 break;
1787 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1788 wrd = (insn >> 12) & 0xf;
1789 rd0 = (insn >> 16) & 0xf;
1790 rd1 = (insn >> 0) & 0xf;
1791 gen_op_iwmmxt_movq_M0_wRn(rd0);
1792 switch ((insn >> 22) & 3) {
1793 case 0:
1794 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1795 break;
1796 case 1:
1797 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1798 break;
1799 case 2:
1800 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1801 break;
1802 case 3:
1803 return 1;
1804 }
1805 gen_op_iwmmxt_movq_wRn_M0(wrd);
1806 gen_op_iwmmxt_set_mup();
1807 gen_op_iwmmxt_set_cup();
1808 break;
1809 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1810 wrd = (insn >> 12) & 0xf;
1811 rd0 = (insn >> 16) & 0xf;
1812 rd1 = (insn >> 0) & 0xf;
1813 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1814 if (insn & (1 << 22)) {
1815 if (insn & (1 << 20))
1816 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1817 else
1818 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1819 } else {
1820 if (insn & (1 << 20))
1821 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1822 else
1823 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1824 }
18c9b560
AZ
1825 gen_op_iwmmxt_movq_wRn_M0(wrd);
1826 gen_op_iwmmxt_set_mup();
1827 gen_op_iwmmxt_set_cup();
1828 break;
1829 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1830 wrd = (insn >> 12) & 0xf;
1831 rd0 = (insn >> 16) & 0xf;
1832 rd1 = (insn >> 0) & 0xf;
1833 gen_op_iwmmxt_movq_M0_wRn(rd0);
1834 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1835 gen_op_movl_T1_im(7);
1836 gen_op_andl_T0_T1();
1837 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1838 gen_op_iwmmxt_movq_wRn_M0(wrd);
1839 gen_op_iwmmxt_set_mup();
1840 break;
1841 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1842 rd = (insn >> 12) & 0xf;
1843 wrd = (insn >> 16) & 0xf;
1844 gen_movl_T0_reg(s, rd);
1845 gen_op_iwmmxt_movq_M0_wRn(wrd);
1846 switch ((insn >> 6) & 3) {
1847 case 0:
1848 gen_op_movl_T1_im(0xff);
1849 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1850 break;
1851 case 1:
1852 gen_op_movl_T1_im(0xffff);
1853 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1854 break;
1855 case 2:
1856 gen_op_movl_T1_im(0xffffffff);
1857 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1858 break;
1859 case 3:
1860 return 1;
1861 }
1862 gen_op_iwmmxt_movq_wRn_M0(wrd);
1863 gen_op_iwmmxt_set_mup();
1864 break;
1865 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1866 rd = (insn >> 12) & 0xf;
1867 wrd = (insn >> 16) & 0xf;
1868 if (rd == 15)
1869 return 1;
1870 gen_op_iwmmxt_movq_M0_wRn(wrd);
1871 switch ((insn >> 22) & 3) {
1872 case 0:
1873 if (insn & 8)
1874 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1875 else {
e677137d 1876 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1877 }
1878 break;
1879 case 1:
1880 if (insn & 8)
1881 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1882 else {
e677137d 1883 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1884 }
1885 break;
1886 case 2:
e677137d 1887 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1888 break;
1889 case 3:
1890 return 1;
1891 }
b26eefb6 1892 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1893 break;
1894 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1895 if ((insn & 0x000ff008) != 0x0003f000)
1896 return 1;
1897 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1898 switch ((insn >> 22) & 3) {
1899 case 0:
1900 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1901 break;
1902 case 1:
1903 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1904 break;
1905 case 2:
1906 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1907 break;
1908 case 3:
1909 return 1;
1910 }
1911 gen_op_shll_T1_im(28);
d9ba4830 1912 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1913 break;
1914 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1915 rd = (insn >> 12) & 0xf;
1916 wrd = (insn >> 16) & 0xf;
1917 gen_movl_T0_reg(s, rd);
1918 switch ((insn >> 6) & 3) {
1919 case 0:
e677137d 1920 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1921 break;
1922 case 1:
e677137d 1923 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1924 break;
1925 case 2:
e677137d 1926 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1927 break;
1928 case 3:
1929 return 1;
1930 }
1931 gen_op_iwmmxt_movq_wRn_M0(wrd);
1932 gen_op_iwmmxt_set_mup();
1933 break;
1934 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1935 if ((insn & 0x000ff00f) != 0x0003f000)
1936 return 1;
1937 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1938 switch ((insn >> 22) & 3) {
1939 case 0:
1940 for (i = 0; i < 7; i ++) {
1941 gen_op_shll_T1_im(4);
1942 gen_op_andl_T0_T1();
1943 }
1944 break;
1945 case 1:
1946 for (i = 0; i < 3; i ++) {
1947 gen_op_shll_T1_im(8);
1948 gen_op_andl_T0_T1();
1949 }
1950 break;
1951 case 2:
1952 gen_op_shll_T1_im(16);
1953 gen_op_andl_T0_T1();
1954 break;
1955 case 3:
1956 return 1;
1957 }
d9ba4830 1958 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1959 break;
1960 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1961 wrd = (insn >> 12) & 0xf;
1962 rd0 = (insn >> 16) & 0xf;
1963 gen_op_iwmmxt_movq_M0_wRn(rd0);
1964 switch ((insn >> 22) & 3) {
1965 case 0:
e677137d 1966 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1967 break;
1968 case 1:
e677137d 1969 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1970 break;
1971 case 2:
e677137d 1972 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1973 break;
1974 case 3:
1975 return 1;
1976 }
1977 gen_op_iwmmxt_movq_wRn_M0(wrd);
1978 gen_op_iwmmxt_set_mup();
1979 break;
1980 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1981 if ((insn & 0x000ff00f) != 0x0003f000)
1982 return 1;
1983 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1984 switch ((insn >> 22) & 3) {
1985 case 0:
1986 for (i = 0; i < 7; i ++) {
1987 gen_op_shll_T1_im(4);
1988 gen_op_orl_T0_T1();
1989 }
1990 break;
1991 case 1:
1992 for (i = 0; i < 3; i ++) {
1993 gen_op_shll_T1_im(8);
1994 gen_op_orl_T0_T1();
1995 }
1996 break;
1997 case 2:
1998 gen_op_shll_T1_im(16);
1999 gen_op_orl_T0_T1();
2000 break;
2001 case 3:
2002 return 1;
2003 }
d9ba4830 2004 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
2005 break;
2006 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2007 rd = (insn >> 12) & 0xf;
2008 rd0 = (insn >> 16) & 0xf;
2009 if ((insn & 0xf) != 0)
2010 return 1;
2011 gen_op_iwmmxt_movq_M0_wRn(rd0);
2012 switch ((insn >> 22) & 3) {
2013 case 0:
e677137d 2014 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2015 break;
2016 case 1:
e677137d 2017 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2018 break;
2019 case 2:
e677137d 2020 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2021 break;
2022 case 3:
2023 return 1;
2024 }
2025 gen_movl_reg_T0(s, rd);
2026 break;
2027 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2028 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2029 wrd = (insn >> 12) & 0xf;
2030 rd0 = (insn >> 16) & 0xf;
2031 rd1 = (insn >> 0) & 0xf;
2032 gen_op_iwmmxt_movq_M0_wRn(rd0);
2033 switch ((insn >> 22) & 3) {
2034 case 0:
2035 if (insn & (1 << 21))
2036 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2037 else
2038 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2039 break;
2040 case 1:
2041 if (insn & (1 << 21))
2042 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2043 else
2044 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2045 break;
2046 case 2:
2047 if (insn & (1 << 21))
2048 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2049 else
2050 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2051 break;
2052 case 3:
2053 return 1;
2054 }
2055 gen_op_iwmmxt_movq_wRn_M0(wrd);
2056 gen_op_iwmmxt_set_mup();
2057 gen_op_iwmmxt_set_cup();
2058 break;
2059 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2060 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2061 wrd = (insn >> 12) & 0xf;
2062 rd0 = (insn >> 16) & 0xf;
2063 gen_op_iwmmxt_movq_M0_wRn(rd0);
2064 switch ((insn >> 22) & 3) {
2065 case 0:
2066 if (insn & (1 << 21))
2067 gen_op_iwmmxt_unpacklsb_M0();
2068 else
2069 gen_op_iwmmxt_unpacklub_M0();
2070 break;
2071 case 1:
2072 if (insn & (1 << 21))
2073 gen_op_iwmmxt_unpacklsw_M0();
2074 else
2075 gen_op_iwmmxt_unpackluw_M0();
2076 break;
2077 case 2:
2078 if (insn & (1 << 21))
2079 gen_op_iwmmxt_unpacklsl_M0();
2080 else
2081 gen_op_iwmmxt_unpacklul_M0();
2082 break;
2083 case 3:
2084 return 1;
2085 }
2086 gen_op_iwmmxt_movq_wRn_M0(wrd);
2087 gen_op_iwmmxt_set_mup();
2088 gen_op_iwmmxt_set_cup();
2089 break;
2090 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2091 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2092 wrd = (insn >> 12) & 0xf;
2093 rd0 = (insn >> 16) & 0xf;
2094 gen_op_iwmmxt_movq_M0_wRn(rd0);
2095 switch ((insn >> 22) & 3) {
2096 case 0:
2097 if (insn & (1 << 21))
2098 gen_op_iwmmxt_unpackhsb_M0();
2099 else
2100 gen_op_iwmmxt_unpackhub_M0();
2101 break;
2102 case 1:
2103 if (insn & (1 << 21))
2104 gen_op_iwmmxt_unpackhsw_M0();
2105 else
2106 gen_op_iwmmxt_unpackhuw_M0();
2107 break;
2108 case 2:
2109 if (insn & (1 << 21))
2110 gen_op_iwmmxt_unpackhsl_M0();
2111 else
2112 gen_op_iwmmxt_unpackhul_M0();
2113 break;
2114 case 3:
2115 return 1;
2116 }
2117 gen_op_iwmmxt_movq_wRn_M0(wrd);
2118 gen_op_iwmmxt_set_mup();
2119 gen_op_iwmmxt_set_cup();
2120 break;
2121 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2122 case 0x214: case 0x614: case 0xa14: case 0xe14:
2123 wrd = (insn >> 12) & 0xf;
2124 rd0 = (insn >> 16) & 0xf;
2125 gen_op_iwmmxt_movq_M0_wRn(rd0);
2126 if (gen_iwmmxt_shift(insn, 0xff))
2127 return 1;
2128 switch ((insn >> 22) & 3) {
2129 case 0:
2130 return 1;
2131 case 1:
e677137d 2132 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2133 break;
2134 case 2:
e677137d 2135 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2136 break;
2137 case 3:
e677137d 2138 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2139 break;
2140 }
2141 gen_op_iwmmxt_movq_wRn_M0(wrd);
2142 gen_op_iwmmxt_set_mup();
2143 gen_op_iwmmxt_set_cup();
2144 break;
2145 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2146 case 0x014: case 0x414: case 0x814: case 0xc14:
2147 wrd = (insn >> 12) & 0xf;
2148 rd0 = (insn >> 16) & 0xf;
2149 gen_op_iwmmxt_movq_M0_wRn(rd0);
2150 if (gen_iwmmxt_shift(insn, 0xff))
2151 return 1;
2152 switch ((insn >> 22) & 3) {
2153 case 0:
2154 return 1;
2155 case 1:
e677137d 2156 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2157 break;
2158 case 2:
e677137d 2159 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2160 break;
2161 case 3:
e677137d 2162 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2163 break;
2164 }
2165 gen_op_iwmmxt_movq_wRn_M0(wrd);
2166 gen_op_iwmmxt_set_mup();
2167 gen_op_iwmmxt_set_cup();
2168 break;
2169 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2170 case 0x114: case 0x514: case 0x914: case 0xd14:
2171 wrd = (insn >> 12) & 0xf;
2172 rd0 = (insn >> 16) & 0xf;
2173 gen_op_iwmmxt_movq_M0_wRn(rd0);
2174 if (gen_iwmmxt_shift(insn, 0xff))
2175 return 1;
2176 switch ((insn >> 22) & 3) {
2177 case 0:
2178 return 1;
2179 case 1:
e677137d 2180 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2181 break;
2182 case 2:
e677137d 2183 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2184 break;
2185 case 3:
e677137d 2186 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2187 break;
2188 }
2189 gen_op_iwmmxt_movq_wRn_M0(wrd);
2190 gen_op_iwmmxt_set_mup();
2191 gen_op_iwmmxt_set_cup();
2192 break;
2193 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2194 case 0x314: case 0x714: case 0xb14: case 0xf14:
2195 wrd = (insn >> 12) & 0xf;
2196 rd0 = (insn >> 16) & 0xf;
2197 gen_op_iwmmxt_movq_M0_wRn(rd0);
2198 switch ((insn >> 22) & 3) {
2199 case 0:
2200 return 1;
2201 case 1:
2202 if (gen_iwmmxt_shift(insn, 0xf))
2203 return 1;
e677137d 2204 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2205 break;
2206 case 2:
2207 if (gen_iwmmxt_shift(insn, 0x1f))
2208 return 1;
e677137d 2209 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2210 break;
2211 case 3:
2212 if (gen_iwmmxt_shift(insn, 0x3f))
2213 return 1;
e677137d 2214 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2215 break;
2216 }
2217 gen_op_iwmmxt_movq_wRn_M0(wrd);
2218 gen_op_iwmmxt_set_mup();
2219 gen_op_iwmmxt_set_cup();
2220 break;
2221 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2222 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2223 wrd = (insn >> 12) & 0xf;
2224 rd0 = (insn >> 16) & 0xf;
2225 rd1 = (insn >> 0) & 0xf;
2226 gen_op_iwmmxt_movq_M0_wRn(rd0);
2227 switch ((insn >> 22) & 3) {
2228 case 0:
2229 if (insn & (1 << 21))
2230 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2231 else
2232 gen_op_iwmmxt_minub_M0_wRn(rd1);
2233 break;
2234 case 1:
2235 if (insn & (1 << 21))
2236 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2237 else
2238 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2239 break;
2240 case 2:
2241 if (insn & (1 << 21))
2242 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2243 else
2244 gen_op_iwmmxt_minul_M0_wRn(rd1);
2245 break;
2246 case 3:
2247 return 1;
2248 }
2249 gen_op_iwmmxt_movq_wRn_M0(wrd);
2250 gen_op_iwmmxt_set_mup();
2251 break;
2252 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2253 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2254 wrd = (insn >> 12) & 0xf;
2255 rd0 = (insn >> 16) & 0xf;
2256 rd1 = (insn >> 0) & 0xf;
2257 gen_op_iwmmxt_movq_M0_wRn(rd0);
2258 switch ((insn >> 22) & 3) {
2259 case 0:
2260 if (insn & (1 << 21))
2261 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2262 else
2263 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2264 break;
2265 case 1:
2266 if (insn & (1 << 21))
2267 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2268 else
2269 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2270 break;
2271 case 2:
2272 if (insn & (1 << 21))
2273 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2274 else
2275 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2276 break;
2277 case 3:
2278 return 1;
2279 }
2280 gen_op_iwmmxt_movq_wRn_M0(wrd);
2281 gen_op_iwmmxt_set_mup();
2282 break;
2283 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2284 case 0x402: case 0x502: case 0x602: case 0x702:
2285 wrd = (insn >> 12) & 0xf;
2286 rd0 = (insn >> 16) & 0xf;
2287 rd1 = (insn >> 0) & 0xf;
2288 gen_op_iwmmxt_movq_M0_wRn(rd0);
2289 gen_op_movl_T0_im((insn >> 20) & 3);
2290 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2291 gen_op_iwmmxt_movq_wRn_M0(wrd);
2292 gen_op_iwmmxt_set_mup();
2293 break;
2294 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2295 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2296 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2297 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2298 wrd = (insn >> 12) & 0xf;
2299 rd0 = (insn >> 16) & 0xf;
2300 rd1 = (insn >> 0) & 0xf;
2301 gen_op_iwmmxt_movq_M0_wRn(rd0);
2302 switch ((insn >> 20) & 0xf) {
2303 case 0x0:
2304 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2305 break;
2306 case 0x1:
2307 gen_op_iwmmxt_subub_M0_wRn(rd1);
2308 break;
2309 case 0x3:
2310 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2311 break;
2312 case 0x4:
2313 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2314 break;
2315 case 0x5:
2316 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2317 break;
2318 case 0x7:
2319 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2320 break;
2321 case 0x8:
2322 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2323 break;
2324 case 0x9:
2325 gen_op_iwmmxt_subul_M0_wRn(rd1);
2326 break;
2327 case 0xb:
2328 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2329 break;
2330 default:
2331 return 1;
2332 }
2333 gen_op_iwmmxt_movq_wRn_M0(wrd);
2334 gen_op_iwmmxt_set_mup();
2335 gen_op_iwmmxt_set_cup();
2336 break;
2337 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2338 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2339 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2340 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2341 wrd = (insn >> 12) & 0xf;
2342 rd0 = (insn >> 16) & 0xf;
2343 gen_op_iwmmxt_movq_M0_wRn(rd0);
2344 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2345 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2346 gen_op_iwmmxt_movq_wRn_M0(wrd);
2347 gen_op_iwmmxt_set_mup();
2348 gen_op_iwmmxt_set_cup();
2349 break;
2350 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2351 case 0x418: case 0x518: case 0x618: case 0x718:
2352 case 0x818: case 0x918: case 0xa18: case 0xb18:
2353 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2354 wrd = (insn >> 12) & 0xf;
2355 rd0 = (insn >> 16) & 0xf;
2356 rd1 = (insn >> 0) & 0xf;
2357 gen_op_iwmmxt_movq_M0_wRn(rd0);
2358 switch ((insn >> 20) & 0xf) {
2359 case 0x0:
2360 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2361 break;
2362 case 0x1:
2363 gen_op_iwmmxt_addub_M0_wRn(rd1);
2364 break;
2365 case 0x3:
2366 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2367 break;
2368 case 0x4:
2369 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2370 break;
2371 case 0x5:
2372 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2373 break;
2374 case 0x7:
2375 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2376 break;
2377 case 0x8:
2378 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2379 break;
2380 case 0x9:
2381 gen_op_iwmmxt_addul_M0_wRn(rd1);
2382 break;
2383 case 0xb:
2384 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2385 break;
2386 default:
2387 return 1;
2388 }
2389 gen_op_iwmmxt_movq_wRn_M0(wrd);
2390 gen_op_iwmmxt_set_mup();
2391 gen_op_iwmmxt_set_cup();
2392 break;
2393 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2394 case 0x408: case 0x508: case 0x608: case 0x708:
2395 case 0x808: case 0x908: case 0xa08: case 0xb08:
2396 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2397 wrd = (insn >> 12) & 0xf;
2398 rd0 = (insn >> 16) & 0xf;
2399 rd1 = (insn >> 0) & 0xf;
2400 gen_op_iwmmxt_movq_M0_wRn(rd0);
2401 if (!(insn & (1 << 20)))
2402 return 1;
2403 switch ((insn >> 22) & 3) {
2404 case 0:
2405 return 1;
2406 case 1:
2407 if (insn & (1 << 21))
2408 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2409 else
2410 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2411 break;
2412 case 2:
2413 if (insn & (1 << 21))
2414 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2415 else
2416 gen_op_iwmmxt_packul_M0_wRn(rd1);
2417 break;
2418 case 3:
2419 if (insn & (1 << 21))
2420 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2421 else
2422 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2423 break;
2424 }
2425 gen_op_iwmmxt_movq_wRn_M0(wrd);
2426 gen_op_iwmmxt_set_mup();
2427 gen_op_iwmmxt_set_cup();
2428 break;
2429 case 0x201: case 0x203: case 0x205: case 0x207:
2430 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2431 case 0x211: case 0x213: case 0x215: case 0x217:
2432 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2433 wrd = (insn >> 5) & 0xf;
2434 rd0 = (insn >> 12) & 0xf;
2435 rd1 = (insn >> 0) & 0xf;
2436 if (rd0 == 0xf || rd1 == 0xf)
2437 return 1;
2438 gen_op_iwmmxt_movq_M0_wRn(wrd);
2439 switch ((insn >> 16) & 0xf) {
2440 case 0x0: /* TMIA */
b26eefb6
PB
2441 gen_movl_T0_reg(s, rd0);
2442 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2443 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2444 break;
2445 case 0x8: /* TMIAPH */
b26eefb6
PB
2446 gen_movl_T0_reg(s, rd0);
2447 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2448 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2449 break;
2450 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2451 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2452 if (insn & (1 << 16))
2453 gen_op_shrl_T1_im(16);
2454 gen_op_movl_T0_T1();
b26eefb6 2455 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2456 if (insn & (1 << 17))
2457 gen_op_shrl_T1_im(16);
2458 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2459 break;
2460 default:
2461 return 1;
2462 }
2463 gen_op_iwmmxt_movq_wRn_M0(wrd);
2464 gen_op_iwmmxt_set_mup();
2465 break;
2466 default:
2467 return 1;
2468 }
2469
2470 return 0;
2471}
2472
2473/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2474 (ie. an undefined instruction). */
2475static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2476{
2477 int acc, rd0, rd1, rdhi, rdlo;
2478
2479 if ((insn & 0x0ff00f10) == 0x0e200010) {
2480 /* Multiply with Internal Accumulate Format */
2481 rd0 = (insn >> 12) & 0xf;
2482 rd1 = insn & 0xf;
2483 acc = (insn >> 5) & 7;
2484
2485 if (acc != 0)
2486 return 1;
2487
2488 switch ((insn >> 16) & 0xf) {
2489 case 0x0: /* MIA */
b26eefb6
PB
2490 gen_movl_T0_reg(s, rd0);
2491 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2492 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2493 break;
2494 case 0x8: /* MIAPH */
b26eefb6
PB
2495 gen_movl_T0_reg(s, rd0);
2496 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2497 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2498 break;
2499 case 0xc: /* MIABB */
2500 case 0xd: /* MIABT */
2501 case 0xe: /* MIATB */
2502 case 0xf: /* MIATT */
b26eefb6 2503 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2504 if (insn & (1 << 16))
2505 gen_op_shrl_T1_im(16);
2506 gen_op_movl_T0_T1();
b26eefb6 2507 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2508 if (insn & (1 << 17))
2509 gen_op_shrl_T1_im(16);
2510 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2511 break;
2512 default:
2513 return 1;
2514 }
2515
2516 gen_op_iwmmxt_movq_wRn_M0(acc);
2517 return 0;
2518 }
2519
2520 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2521 /* Internal Accumulator Access Format */
2522 rdhi = (insn >> 16) & 0xf;
2523 rdlo = (insn >> 12) & 0xf;
2524 acc = insn & 7;
2525
2526 if (acc != 0)
2527 return 1;
2528
2529 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2530 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2531 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2532 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2533 gen_op_andl_T0_T1();
b26eefb6 2534 gen_movl_reg_T0(s, rdhi);
18c9b560 2535 } else { /* MAR */
b26eefb6
PB
2536 gen_movl_T0_reg(s, rdlo);
2537 gen_movl_T1_reg(s, rdhi);
e677137d 2538 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2539 }
2540 return 0;
2541 }
2542
2543 return 1;
2544}
2545
c1713132
AZ
2546/* Disassemble system coprocessor instruction. Return nonzero if
2547 instruction is not defined. */
2548static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2549{
8984bd2e 2550 TCGv tmp;
c1713132
AZ
2551 uint32_t rd = (insn >> 12) & 0xf;
2552 uint32_t cp = (insn >> 8) & 0xf;
2553 if (IS_USER(s)) {
2554 return 1;
2555 }
2556
18c9b560 2557 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2558 if (!env->cp[cp].cp_read)
2559 return 1;
8984bd2e
PB
2560 gen_set_pc_im(s->pc);
2561 tmp = new_tmp();
2562 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2563 store_reg(s, rd, tmp);
c1713132
AZ
2564 } else {
2565 if (!env->cp[cp].cp_write)
2566 return 1;
8984bd2e
PB
2567 gen_set_pc_im(s->pc);
2568 tmp = load_reg(s, rd);
2569 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
c1713132
AZ
2570 }
2571 return 0;
2572}
2573
9ee6e8bb
PB
2574static int cp15_user_ok(uint32_t insn)
2575{
2576 int cpn = (insn >> 16) & 0xf;
2577 int cpm = insn & 0xf;
2578 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2579
2580 if (cpn == 13 && cpm == 0) {
2581 /* TLS register. */
2582 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2583 return 1;
2584 }
2585 if (cpn == 7) {
2586 /* ISB, DSB, DMB. */
2587 if ((cpm == 5 && op == 4)
2588 || (cpm == 10 && (op == 4 || op == 5)))
2589 return 1;
2590 }
2591 return 0;
2592}
2593
b5ff1b31
FB
2594/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2595 instruction is not defined. */
a90b7318 2596static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2597{
2598 uint32_t rd;
8984bd2e 2599 TCGv tmp;
b5ff1b31 2600
9ee6e8bb
PB
2601 /* M profile cores use memory mapped registers instead of cp15. */
2602 if (arm_feature(env, ARM_FEATURE_M))
2603 return 1;
2604
2605 if ((insn & (1 << 25)) == 0) {
2606 if (insn & (1 << 20)) {
2607 /* mrrc */
2608 return 1;
2609 }
2610 /* mcrr. Used for block cache operations, so implement as no-op. */
2611 return 0;
2612 }
2613 if ((insn & (1 << 4)) == 0) {
2614 /* cdp */
2615 return 1;
2616 }
2617 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2618 return 1;
2619 }
9332f9da
FB
2620 if ((insn & 0x0fff0fff) == 0x0e070f90
2621 || (insn & 0x0fff0fff) == 0x0e070f58) {
2622 /* Wait for interrupt. */
8984bd2e 2623 gen_set_pc_im(s->pc);
9ee6e8bb 2624 s->is_jmp = DISAS_WFI;
9332f9da
FB
2625 return 0;
2626 }
b5ff1b31 2627 rd = (insn >> 12) & 0xf;
18c9b560 2628 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2629 tmp = new_tmp();
2630 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2631 /* If the destination register is r15 then sets condition codes. */
2632 if (rd != 15)
8984bd2e
PB
2633 store_reg(s, rd, tmp);
2634 else
2635 dead_tmp(tmp);
b5ff1b31 2636 } else {
8984bd2e
PB
2637 tmp = load_reg(s, rd);
2638 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2639 dead_tmp(tmp);
a90b7318
AZ
2640 /* Normally we would always end the TB here, but Linux
2641 * arch/arm/mach-pxa/sleep.S expects two instructions following
2642 * an MMU enable to execute from cache. Imitate this behaviour. */
2643 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2644 (insn & 0x0fff0fff) != 0x0e010f10)
2645 gen_lookup_tb(s);
b5ff1b31 2646 }
b5ff1b31
FB
2647 return 0;
2648}
2649
9ee6e8bb
PB
2650#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2651#define VFP_SREG(insn, bigbit, smallbit) \
2652 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2653#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2654 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2655 reg = (((insn) >> (bigbit)) & 0x0f) \
2656 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2657 } else { \
2658 if (insn & (1 << (smallbit))) \
2659 return 1; \
2660 reg = ((insn) >> (bigbit)) & 0x0f; \
2661 }} while (0)
2662
2663#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2664#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2665#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2666#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2667#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2668#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2669
4373f3ce
PB
2670/* Move between integer and VFP cores. */
2671static TCGv gen_vfp_mrs(void)
2672{
2673 TCGv tmp = new_tmp();
2674 tcg_gen_mov_i32(tmp, cpu_F0s);
2675 return tmp;
2676}
2677
2678static void gen_vfp_msr(TCGv tmp)
2679{
2680 tcg_gen_mov_i32(cpu_F0s, tmp);
2681 dead_tmp(tmp);
2682}
2683
9ee6e8bb
PB
2684static inline int
2685vfp_enabled(CPUState * env)
2686{
2687 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2688}
2689
ad69471c
PB
2690static void gen_neon_dup_u8(TCGv var, int shift)
2691{
2692 TCGv tmp = new_tmp();
2693 if (shift)
2694 tcg_gen_shri_i32(var, var, shift);
86831435 2695 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2696 tcg_gen_shli_i32(tmp, var, 8);
2697 tcg_gen_or_i32(var, var, tmp);
2698 tcg_gen_shli_i32(tmp, var, 16);
2699 tcg_gen_or_i32(var, var, tmp);
2700 dead_tmp(tmp);
2701}
2702
2703static void gen_neon_dup_low16(TCGv var)
2704{
2705 TCGv tmp = new_tmp();
86831435 2706 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2707 tcg_gen_shli_i32(tmp, var, 16);
2708 tcg_gen_or_i32(var, var, tmp);
2709 dead_tmp(tmp);
2710}
2711
2712static void gen_neon_dup_high16(TCGv var)
2713{
2714 TCGv tmp = new_tmp();
2715 tcg_gen_andi_i32(var, var, 0xffff0000);
2716 tcg_gen_shri_i32(tmp, var, 16);
2717 tcg_gen_or_i32(var, var, tmp);
2718 dead_tmp(tmp);
2719}
2720
b7bcbe95
FB
2721/* Disassemble a VFP instruction. Returns nonzero if an error occured
2722 (ie. an undefined instruction). */
2723static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2724{
2725 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2726 int dp, veclen;
4373f3ce 2727 TCGv tmp;
ad69471c 2728 TCGv tmp2;
b7bcbe95 2729
40f137e1
PB
2730 if (!arm_feature(env, ARM_FEATURE_VFP))
2731 return 1;
2732
9ee6e8bb
PB
2733 if (!vfp_enabled(env)) {
2734 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2735 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2736 return 1;
2737 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2738 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2739 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2740 return 1;
2741 }
b7bcbe95
FB
2742 dp = ((insn & 0xf00) == 0xb00);
2743 switch ((insn >> 24) & 0xf) {
2744 case 0xe:
2745 if (insn & (1 << 4)) {
2746 /* single register transfer */
b7bcbe95
FB
2747 rd = (insn >> 12) & 0xf;
2748 if (dp) {
9ee6e8bb
PB
2749 int size;
2750 int pass;
2751
2752 VFP_DREG_N(rn, insn);
2753 if (insn & 0xf)
b7bcbe95 2754 return 1;
9ee6e8bb
PB
2755 if (insn & 0x00c00060
2756 && !arm_feature(env, ARM_FEATURE_NEON))
2757 return 1;
2758
2759 pass = (insn >> 21) & 1;
2760 if (insn & (1 << 22)) {
2761 size = 0;
2762 offset = ((insn >> 5) & 3) * 8;
2763 } else if (insn & (1 << 5)) {
2764 size = 1;
2765 offset = (insn & (1 << 6)) ? 16 : 0;
2766 } else {
2767 size = 2;
2768 offset = 0;
2769 }
18c9b560 2770 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2771 /* vfp->arm */
ad69471c 2772 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2773 switch (size) {
2774 case 0:
9ee6e8bb 2775 if (offset)
ad69471c 2776 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2777 if (insn & (1 << 23))
ad69471c 2778 gen_uxtb(tmp);
9ee6e8bb 2779 else
ad69471c 2780 gen_sxtb(tmp);
9ee6e8bb
PB
2781 break;
2782 case 1:
9ee6e8bb
PB
2783 if (insn & (1 << 23)) {
2784 if (offset) {
ad69471c 2785 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2786 } else {
ad69471c 2787 gen_uxth(tmp);
9ee6e8bb
PB
2788 }
2789 } else {
2790 if (offset) {
ad69471c 2791 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2792 } else {
ad69471c 2793 gen_sxth(tmp);
9ee6e8bb
PB
2794 }
2795 }
2796 break;
2797 case 2:
9ee6e8bb
PB
2798 break;
2799 }
ad69471c 2800 store_reg(s, rd, tmp);
b7bcbe95
FB
2801 } else {
2802 /* arm->vfp */
ad69471c 2803 tmp = load_reg(s, rd);
9ee6e8bb
PB
2804 if (insn & (1 << 23)) {
2805 /* VDUP */
2806 if (size == 0) {
ad69471c 2807 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2808 } else if (size == 1) {
ad69471c 2809 gen_neon_dup_low16(tmp);
9ee6e8bb 2810 }
ad69471c
PB
2811 tmp2 = new_tmp();
2812 tcg_gen_mov_i32(tmp2, tmp);
2813 neon_store_reg(rn, 0, tmp2);
2814 neon_store_reg(rn, 0, tmp);
9ee6e8bb
PB
2815 } else {
2816 /* VMOV */
2817 switch (size) {
2818 case 0:
ad69471c
PB
2819 tmp2 = neon_load_reg(rn, pass);
2820 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2821 dead_tmp(tmp2);
9ee6e8bb
PB
2822 break;
2823 case 1:
ad69471c
PB
2824 tmp2 = neon_load_reg(rn, pass);
2825 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2826 dead_tmp(tmp2);
9ee6e8bb
PB
2827 break;
2828 case 2:
9ee6e8bb
PB
2829 break;
2830 }
ad69471c 2831 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2832 }
b7bcbe95 2833 }
9ee6e8bb
PB
2834 } else { /* !dp */
2835 if ((insn & 0x6f) != 0x00)
2836 return 1;
2837 rn = VFP_SREG_N(insn);
18c9b560 2838 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2839 /* vfp->arm */
2840 if (insn & (1 << 21)) {
2841 /* system register */
40f137e1 2842 rn >>= 1;
9ee6e8bb 2843
b7bcbe95 2844 switch (rn) {
40f137e1 2845 case ARM_VFP_FPSID:
4373f3ce 2846 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2847 VFP3 restricts all id registers to privileged
2848 accesses. */
2849 if (IS_USER(s)
2850 && arm_feature(env, ARM_FEATURE_VFP3))
2851 return 1;
4373f3ce 2852 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2853 break;
40f137e1 2854 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2855 if (IS_USER(s))
2856 return 1;
4373f3ce 2857 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2858 break;
40f137e1
PB
2859 case ARM_VFP_FPINST:
2860 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2861 /* Not present in VFP3. */
2862 if (IS_USER(s)
2863 || arm_feature(env, ARM_FEATURE_VFP3))
2864 return 1;
4373f3ce 2865 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2866 break;
40f137e1 2867 case ARM_VFP_FPSCR:
601d70b9 2868 if (rd == 15) {
4373f3ce
PB
2869 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2870 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2871 } else {
2872 tmp = new_tmp();
2873 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2874 }
b7bcbe95 2875 break;
9ee6e8bb
PB
2876 case ARM_VFP_MVFR0:
2877 case ARM_VFP_MVFR1:
2878 if (IS_USER(s)
2879 || !arm_feature(env, ARM_FEATURE_VFP3))
2880 return 1;
4373f3ce 2881 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2882 break;
b7bcbe95
FB
2883 default:
2884 return 1;
2885 }
2886 } else {
2887 gen_mov_F0_vreg(0, rn);
4373f3ce 2888 tmp = gen_vfp_mrs();
b7bcbe95
FB
2889 }
2890 if (rd == 15) {
b5ff1b31 2891 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2892 gen_set_nzcv(tmp);
2893 dead_tmp(tmp);
2894 } else {
2895 store_reg(s, rd, tmp);
2896 }
b7bcbe95
FB
2897 } else {
2898 /* arm->vfp */
4373f3ce 2899 tmp = load_reg(s, rd);
b7bcbe95 2900 if (insn & (1 << 21)) {
40f137e1 2901 rn >>= 1;
b7bcbe95
FB
2902 /* system register */
2903 switch (rn) {
40f137e1 2904 case ARM_VFP_FPSID:
9ee6e8bb
PB
2905 case ARM_VFP_MVFR0:
2906 case ARM_VFP_MVFR1:
b7bcbe95
FB
2907 /* Writes are ignored. */
2908 break;
40f137e1 2909 case ARM_VFP_FPSCR:
4373f3ce
PB
2910 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2911 dead_tmp(tmp);
b5ff1b31 2912 gen_lookup_tb(s);
b7bcbe95 2913 break;
40f137e1 2914 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2915 if (IS_USER(s))
2916 return 1;
4373f3ce 2917 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2918 gen_lookup_tb(s);
2919 break;
2920 case ARM_VFP_FPINST:
2921 case ARM_VFP_FPINST2:
4373f3ce 2922 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2923 break;
b7bcbe95
FB
2924 default:
2925 return 1;
2926 }
2927 } else {
4373f3ce 2928 gen_vfp_msr(tmp);
b7bcbe95
FB
2929 gen_mov_vreg_F0(0, rn);
2930 }
2931 }
2932 }
2933 } else {
2934 /* data processing */
2935 /* The opcode is in bits 23, 21, 20 and 6. */
2936 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2937 if (dp) {
2938 if (op == 15) {
2939 /* rn is opcode */
2940 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2941 } else {
2942 /* rn is register number */
9ee6e8bb 2943 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2944 }
2945
2946 if (op == 15 && (rn == 15 || rn > 17)) {
2947 /* Integer or single precision destination. */
9ee6e8bb 2948 rd = VFP_SREG_D(insn);
b7bcbe95 2949 } else {
9ee6e8bb 2950 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2951 }
2952
2953 if (op == 15 && (rn == 16 || rn == 17)) {
2954 /* Integer source. */
2955 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2956 } else {
9ee6e8bb 2957 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2958 }
2959 } else {
9ee6e8bb 2960 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2961 if (op == 15 && rn == 15) {
2962 /* Double precision destination. */
9ee6e8bb
PB
2963 VFP_DREG_D(rd, insn);
2964 } else {
2965 rd = VFP_SREG_D(insn);
2966 }
2967 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2968 }
2969
2970 veclen = env->vfp.vec_len;
2971 if (op == 15 && rn > 3)
2972 veclen = 0;
2973
2974 /* Shut up compiler warnings. */
2975 delta_m = 0;
2976 delta_d = 0;
2977 bank_mask = 0;
3b46e624 2978
b7bcbe95
FB
2979 if (veclen > 0) {
2980 if (dp)
2981 bank_mask = 0xc;
2982 else
2983 bank_mask = 0x18;
2984
2985 /* Figure out what type of vector operation this is. */
2986 if ((rd & bank_mask) == 0) {
2987 /* scalar */
2988 veclen = 0;
2989 } else {
2990 if (dp)
2991 delta_d = (env->vfp.vec_stride >> 1) + 1;
2992 else
2993 delta_d = env->vfp.vec_stride + 1;
2994
2995 if ((rm & bank_mask) == 0) {
2996 /* mixed scalar/vector */
2997 delta_m = 0;
2998 } else {
2999 /* vector */
3000 delta_m = delta_d;
3001 }
3002 }
3003 }
3004
3005 /* Load the initial operands. */
3006 if (op == 15) {
3007 switch (rn) {
3008 case 16:
3009 case 17:
3010 /* Integer source */
3011 gen_mov_F0_vreg(0, rm);
3012 break;
3013 case 8:
3014 case 9:
3015 /* Compare */
3016 gen_mov_F0_vreg(dp, rd);
3017 gen_mov_F1_vreg(dp, rm);
3018 break;
3019 case 10:
3020 case 11:
3021 /* Compare with zero */
3022 gen_mov_F0_vreg(dp, rd);
3023 gen_vfp_F1_ld0(dp);
3024 break;
9ee6e8bb
PB
3025 case 20:
3026 case 21:
3027 case 22:
3028 case 23:
3029 /* Source and destination the same. */
3030 gen_mov_F0_vreg(dp, rd);
3031 break;
b7bcbe95
FB
3032 default:
3033 /* One source operand. */
3034 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3035 break;
b7bcbe95
FB
3036 }
3037 } else {
3038 /* Two source operands. */
3039 gen_mov_F0_vreg(dp, rn);
3040 gen_mov_F1_vreg(dp, rm);
3041 }
3042
3043 for (;;) {
3044 /* Perform the calculation. */
3045 switch (op) {
3046 case 0: /* mac: fd + (fn * fm) */
3047 gen_vfp_mul(dp);
3048 gen_mov_F1_vreg(dp, rd);
3049 gen_vfp_add(dp);
3050 break;
3051 case 1: /* nmac: fd - (fn * fm) */
3052 gen_vfp_mul(dp);
3053 gen_vfp_neg(dp);
3054 gen_mov_F1_vreg(dp, rd);
3055 gen_vfp_add(dp);
3056 break;
3057 case 2: /* msc: -fd + (fn * fm) */
3058 gen_vfp_mul(dp);
3059 gen_mov_F1_vreg(dp, rd);
3060 gen_vfp_sub(dp);
3061 break;
3062 case 3: /* nmsc: -fd - (fn * fm) */
3063 gen_vfp_mul(dp);
3064 gen_mov_F1_vreg(dp, rd);
3065 gen_vfp_add(dp);
3066 gen_vfp_neg(dp);
3067 break;
3068 case 4: /* mul: fn * fm */
3069 gen_vfp_mul(dp);
3070 break;
3071 case 5: /* nmul: -(fn * fm) */
3072 gen_vfp_mul(dp);
3073 gen_vfp_neg(dp);
3074 break;
3075 case 6: /* add: fn + fm */
3076 gen_vfp_add(dp);
3077 break;
3078 case 7: /* sub: fn - fm */
3079 gen_vfp_sub(dp);
3080 break;
3081 case 8: /* div: fn / fm */
3082 gen_vfp_div(dp);
3083 break;
9ee6e8bb
PB
3084 case 14: /* fconst */
3085 if (!arm_feature(env, ARM_FEATURE_VFP3))
3086 return 1;
3087
3088 n = (insn << 12) & 0x80000000;
3089 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3090 if (dp) {
3091 if (i & 0x40)
3092 i |= 0x3f80;
3093 else
3094 i |= 0x4000;
3095 n |= i << 16;
4373f3ce 3096 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3097 } else {
3098 if (i & 0x40)
3099 i |= 0x780;
3100 else
3101 i |= 0x800;
3102 n |= i << 19;
5b340b51 3103 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3104 }
9ee6e8bb 3105 break;
b7bcbe95
FB
3106 case 15: /* extension space */
3107 switch (rn) {
3108 case 0: /* cpy */
3109 /* no-op */
3110 break;
3111 case 1: /* abs */
3112 gen_vfp_abs(dp);
3113 break;
3114 case 2: /* neg */
3115 gen_vfp_neg(dp);
3116 break;
3117 case 3: /* sqrt */
3118 gen_vfp_sqrt(dp);
3119 break;
3120 case 8: /* cmp */
3121 gen_vfp_cmp(dp);
3122 break;
3123 case 9: /* cmpe */
3124 gen_vfp_cmpe(dp);
3125 break;
3126 case 10: /* cmpz */
3127 gen_vfp_cmp(dp);
3128 break;
3129 case 11: /* cmpez */
3130 gen_vfp_F1_ld0(dp);
3131 gen_vfp_cmpe(dp);
3132 break;
3133 case 15: /* single<->double conversion */
3134 if (dp)
4373f3ce 3135 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3136 else
4373f3ce 3137 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3138 break;
3139 case 16: /* fuito */
3140 gen_vfp_uito(dp);
3141 break;
3142 case 17: /* fsito */
3143 gen_vfp_sito(dp);
3144 break;
9ee6e8bb
PB
3145 case 20: /* fshto */
3146 if (!arm_feature(env, ARM_FEATURE_VFP3))
3147 return 1;
3148 gen_vfp_shto(dp, rm);
3149 break;
3150 case 21: /* fslto */
3151 if (!arm_feature(env, ARM_FEATURE_VFP3))
3152 return 1;
3153 gen_vfp_slto(dp, rm);
3154 break;
3155 case 22: /* fuhto */
3156 if (!arm_feature(env, ARM_FEATURE_VFP3))
3157 return 1;
3158 gen_vfp_uhto(dp, rm);
3159 break;
3160 case 23: /* fulto */
3161 if (!arm_feature(env, ARM_FEATURE_VFP3))
3162 return 1;
3163 gen_vfp_ulto(dp, rm);
3164 break;
b7bcbe95
FB
3165 case 24: /* ftoui */
3166 gen_vfp_toui(dp);
3167 break;
3168 case 25: /* ftouiz */
3169 gen_vfp_touiz(dp);
3170 break;
3171 case 26: /* ftosi */
3172 gen_vfp_tosi(dp);
3173 break;
3174 case 27: /* ftosiz */
3175 gen_vfp_tosiz(dp);
3176 break;
9ee6e8bb
PB
3177 case 28: /* ftosh */
3178 if (!arm_feature(env, ARM_FEATURE_VFP3))
3179 return 1;
3180 gen_vfp_tosh(dp, rm);
3181 break;
3182 case 29: /* ftosl */
3183 if (!arm_feature(env, ARM_FEATURE_VFP3))
3184 return 1;
3185 gen_vfp_tosl(dp, rm);
3186 break;
3187 case 30: /* ftouh */
3188 if (!arm_feature(env, ARM_FEATURE_VFP3))
3189 return 1;
3190 gen_vfp_touh(dp, rm);
3191 break;
3192 case 31: /* ftoul */
3193 if (!arm_feature(env, ARM_FEATURE_VFP3))
3194 return 1;
3195 gen_vfp_toul(dp, rm);
3196 break;
b7bcbe95
FB
3197 default: /* undefined */
3198 printf ("rn:%d\n", rn);
3199 return 1;
3200 }
3201 break;
3202 default: /* undefined */
3203 printf ("op:%d\n", op);
3204 return 1;
3205 }
3206
3207 /* Write back the result. */
3208 if (op == 15 && (rn >= 8 && rn <= 11))
3209 ; /* Comparison, do nothing. */
3210 else if (op == 15 && rn > 17)
3211 /* Integer result. */
3212 gen_mov_vreg_F0(0, rd);
3213 else if (op == 15 && rn == 15)
3214 /* conversion */
3215 gen_mov_vreg_F0(!dp, rd);
3216 else
3217 gen_mov_vreg_F0(dp, rd);
3218
3219 /* break out of the loop if we have finished */
3220 if (veclen == 0)
3221 break;
3222
3223 if (op == 15 && delta_m == 0) {
3224 /* single source one-many */
3225 while (veclen--) {
3226 rd = ((rd + delta_d) & (bank_mask - 1))
3227 | (rd & bank_mask);
3228 gen_mov_vreg_F0(dp, rd);
3229 }
3230 break;
3231 }
3232 /* Setup the next operands. */
3233 veclen--;
3234 rd = ((rd + delta_d) & (bank_mask - 1))
3235 | (rd & bank_mask);
3236
3237 if (op == 15) {
3238 /* One source operand. */
3239 rm = ((rm + delta_m) & (bank_mask - 1))
3240 | (rm & bank_mask);
3241 gen_mov_F0_vreg(dp, rm);
3242 } else {
3243 /* Two source operands. */
3244 rn = ((rn + delta_d) & (bank_mask - 1))
3245 | (rn & bank_mask);
3246 gen_mov_F0_vreg(dp, rn);
3247 if (delta_m) {
3248 rm = ((rm + delta_m) & (bank_mask - 1))
3249 | (rm & bank_mask);
3250 gen_mov_F1_vreg(dp, rm);
3251 }
3252 }
3253 }
3254 }
3255 break;
3256 case 0xc:
3257 case 0xd:
9ee6e8bb 3258 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3259 /* two-register transfer */
3260 rn = (insn >> 16) & 0xf;
3261 rd = (insn >> 12) & 0xf;
3262 if (dp) {
9ee6e8bb
PB
3263 VFP_DREG_M(rm, insn);
3264 } else {
3265 rm = VFP_SREG_M(insn);
3266 }
b7bcbe95 3267
18c9b560 3268 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3269 /* vfp->arm */
3270 if (dp) {
4373f3ce
PB
3271 gen_mov_F0_vreg(0, rm * 2);
3272 tmp = gen_vfp_mrs();
3273 store_reg(s, rd, tmp);
3274 gen_mov_F0_vreg(0, rm * 2 + 1);
3275 tmp = gen_vfp_mrs();
3276 store_reg(s, rn, tmp);
b7bcbe95
FB
3277 } else {
3278 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3279 tmp = gen_vfp_mrs();
3280 store_reg(s, rn, tmp);
b7bcbe95 3281 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3282 tmp = gen_vfp_mrs();
3283 store_reg(s, rd, tmp);
b7bcbe95
FB
3284 }
3285 } else {
3286 /* arm->vfp */
3287 if (dp) {
4373f3ce
PB
3288 tmp = load_reg(s, rd);
3289 gen_vfp_msr(tmp);
3290 gen_mov_vreg_F0(0, rm * 2);
3291 tmp = load_reg(s, rn);
3292 gen_vfp_msr(tmp);
3293 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3294 } else {
4373f3ce
PB
3295 tmp = load_reg(s, rn);
3296 gen_vfp_msr(tmp);
b7bcbe95 3297 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3298 tmp = load_reg(s, rd);
3299 gen_vfp_msr(tmp);
b7bcbe95
FB
3300 gen_mov_vreg_F0(0, rm + 1);
3301 }
3302 }
3303 } else {
3304 /* Load/store */
3305 rn = (insn >> 16) & 0xf;
3306 if (dp)
9ee6e8bb 3307 VFP_DREG_D(rd, insn);
b7bcbe95 3308 else
9ee6e8bb
PB
3309 rd = VFP_SREG_D(insn);
3310 if (s->thumb && rn == 15) {
3311 gen_op_movl_T1_im(s->pc & ~2);
3312 } else {
3313 gen_movl_T1_reg(s, rn);
3314 }
b7bcbe95
FB
3315 if ((insn & 0x01200000) == 0x01000000) {
3316 /* Single load/store */
3317 offset = (insn & 0xff) << 2;
3318 if ((insn & (1 << 23)) == 0)
3319 offset = -offset;
3320 gen_op_addl_T1_im(offset);
3321 if (insn & (1 << 20)) {
b5ff1b31 3322 gen_vfp_ld(s, dp);
b7bcbe95
FB
3323 gen_mov_vreg_F0(dp, rd);
3324 } else {
3325 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3326 gen_vfp_st(s, dp);
b7bcbe95
FB
3327 }
3328 } else {
3329 /* load/store multiple */
3330 if (dp)
3331 n = (insn >> 1) & 0x7f;
3332 else
3333 n = insn & 0xff;
3334
3335 if (insn & (1 << 24)) /* pre-decrement */
3336 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3337
3338 if (dp)
3339 offset = 8;
3340 else
3341 offset = 4;
3342 for (i = 0; i < n; i++) {
18c9b560 3343 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3344 /* load */
b5ff1b31 3345 gen_vfp_ld(s, dp);
b7bcbe95
FB
3346 gen_mov_vreg_F0(dp, rd + i);
3347 } else {
3348 /* store */
3349 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3350 gen_vfp_st(s, dp);
b7bcbe95
FB
3351 }
3352 gen_op_addl_T1_im(offset);
3353 }
3354 if (insn & (1 << 21)) {
3355 /* writeback */
3356 if (insn & (1 << 24))
3357 offset = -offset * n;
3358 else if (dp && (insn & 1))
3359 offset = 4;
3360 else
3361 offset = 0;
3362
3363 if (offset != 0)
3364 gen_op_addl_T1_im(offset);
3365 gen_movl_reg_T1(s, rn);
3366 }
3367 }
3368 }
3369 break;
3370 default:
3371 /* Should never happen. */
3372 return 1;
3373 }
3374 return 0;
3375}
3376
6e256c93 3377static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3378{
6e256c93
FB
3379 TranslationBlock *tb;
3380
3381 tb = s->tb;
3382 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3383 tcg_gen_goto_tb(n);
8984bd2e 3384 gen_set_pc_im(dest);
57fec1fe 3385 tcg_gen_exit_tb((long)tb + n);
6e256c93 3386 } else {
8984bd2e 3387 gen_set_pc_im(dest);
57fec1fe 3388 tcg_gen_exit_tb(0);
6e256c93 3389 }
c53be334
FB
3390}
3391
8aaca4c0
FB
3392static inline void gen_jmp (DisasContext *s, uint32_t dest)
3393{
3394 if (__builtin_expect(s->singlestep_enabled, 0)) {
3395 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3396 if (s->thumb)
d9ba4830
PB
3397 dest |= 1;
3398 gen_bx_im(s, dest);
8aaca4c0 3399 } else {
6e256c93 3400 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3401 s->is_jmp = DISAS_TB_JUMP;
3402 }
3403}
3404
d9ba4830 3405static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3406{
ee097184 3407 if (x)
d9ba4830 3408 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3409 else
d9ba4830 3410 gen_sxth(t0);
ee097184 3411 if (y)
d9ba4830 3412 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3413 else
d9ba4830
PB
3414 gen_sxth(t1);
3415 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3416}
3417
3418/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3419static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3420 uint32_t mask;
3421
3422 mask = 0;
3423 if (flags & (1 << 0))
3424 mask |= 0xff;
3425 if (flags & (1 << 1))
3426 mask |= 0xff00;
3427 if (flags & (1 << 2))
3428 mask |= 0xff0000;
3429 if (flags & (1 << 3))
3430 mask |= 0xff000000;
9ee6e8bb 3431
2ae23e75 3432 /* Mask out undefined bits. */
9ee6e8bb
PB
3433 mask &= ~CPSR_RESERVED;
3434 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3435 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3436 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3437 mask &= ~CPSR_IT;
9ee6e8bb 3438 /* Mask out execution state bits. */
2ae23e75 3439 if (!spsr)
e160c51c 3440 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3441 /* Mask out privileged bits. */
3442 if (IS_USER(s))
9ee6e8bb 3443 mask &= CPSR_USER;
b5ff1b31
FB
3444 return mask;
3445}
3446
3447/* Returns nonzero if access to the PSR is not permitted. */
3448static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3449{
d9ba4830 3450 TCGv tmp;
b5ff1b31
FB
3451 if (spsr) {
3452 /* ??? This is also undefined in system mode. */
3453 if (IS_USER(s))
3454 return 1;
d9ba4830
PB
3455
3456 tmp = load_cpu_field(spsr);
3457 tcg_gen_andi_i32(tmp, tmp, ~mask);
3458 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3459 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3460 store_cpu_field(tmp, spsr);
b5ff1b31 3461 } else {
d9ba4830 3462 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3463 }
3464 gen_lookup_tb(s);
3465 return 0;
3466}
3467
9ee6e8bb 3468/* Generate an old-style exception return. */
b5ff1b31
FB
3469static void gen_exception_return(DisasContext *s)
3470{
d9ba4830 3471 TCGv tmp;
e22f8f39 3472 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3473 tmp = load_cpu_field(spsr);
3474 gen_set_cpsr(tmp, 0xffffffff);
3475 dead_tmp(tmp);
b5ff1b31
FB
3476 s->is_jmp = DISAS_UPDATE;
3477}
3478
b0109805
PB
3479/* Generate a v6 exception return. Marks both values as dead. */
3480static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3481{
b0109805
PB
3482 gen_set_cpsr(cpsr, 0xffffffff);
3483 dead_tmp(cpsr);
3484 store_reg(s, 15, pc);
9ee6e8bb
PB
3485 s->is_jmp = DISAS_UPDATE;
3486}
3b46e624 3487
9ee6e8bb
PB
3488static inline void
3489gen_set_condexec (DisasContext *s)
3490{
3491 if (s->condexec_mask) {
8f01245e
PB
3492 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3493 TCGv tmp = new_tmp();
3494 tcg_gen_movi_i32(tmp, val);
d9ba4830 3495 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3496 }
3497}
3b46e624 3498
9ee6e8bb
PB
3499static void gen_nop_hint(DisasContext *s, int val)
3500{
3501 switch (val) {
3502 case 3: /* wfi */
8984bd2e 3503 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3504 s->is_jmp = DISAS_WFI;
3505 break;
3506 case 2: /* wfe */
3507 case 4: /* sev */
3508 /* TODO: Implement SEV and WFE. May help SMP performance. */
3509 default: /* nop */
3510 break;
3511 }
3512}
99c475ab 3513
ad69471c
PB
3514/* These macros help make the code more readable when migrating from the
3515 old dyngen helpers. They should probably be removed when
3516 T0/T1 are removed. */
3517#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3518#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3519
ad69471c 3520#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3521
3522static inline int gen_neon_add(int size)
3523{
3524 switch (size) {
ad69471c
PB
3525 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3526 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3527 case 2: gen_op_addl_T0_T1(); break;
3528 default: return 1;
3529 }
3530 return 0;
3531}
3532
ad69471c
PB
3533static inline void gen_neon_rsb(int size)
3534{
3535 switch (size) {
3536 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3537 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3538 case 2: gen_op_rsbl_T0_T1(); break;
3539 default: return;
3540 }
3541}
3542
3543/* 32-bit pairwise ops end up the same as the elementwise versions. */
3544#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3545#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3546#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3547#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3548
3549/* FIXME: This is wrong. They set the wrong overflow bit. */
3550#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3551#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3552#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3553#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3554
3555#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3556 switch ((size << 1) | u) { \
3557 case 0: \
3558 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3559 break; \
3560 case 1: \
3561 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3562 break; \
3563 case 2: \
3564 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3565 break; \
3566 case 3: \
3567 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3568 break; \
3569 case 4: \
3570 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3571 break; \
3572 case 5: \
3573 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3574 break; \
3575 default: return 1; \
3576 }} while (0)
9ee6e8bb
PB
3577
3578#define GEN_NEON_INTEGER_OP(name) do { \
3579 switch ((size << 1) | u) { \
ad69471c
PB
3580 case 0: \
3581 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3582 break; \
3583 case 1: \
3584 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3585 break; \
3586 case 2: \
3587 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3588 break; \
3589 case 3: \
3590 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3591 break; \
3592 case 4: \
3593 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3594 break; \
3595 case 5: \
3596 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3597 break; \
9ee6e8bb
PB
3598 default: return 1; \
3599 }} while (0)
3600
3601static inline void
3602gen_neon_movl_scratch_T0(int scratch)
3603{
3604 uint32_t offset;
3605
3606 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3607 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3608}
3609
3610static inline void
3611gen_neon_movl_scratch_T1(int scratch)
3612{
3613 uint32_t offset;
3614
3615 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3616 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3617}
3618
3619static inline void
3620gen_neon_movl_T0_scratch(int scratch)
3621{
3622 uint32_t offset;
3623
3624 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3625 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3626}
3627
3628static inline void
3629gen_neon_movl_T1_scratch(int scratch)
3630{
3631 uint32_t offset;
3632
3633 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3634 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3635}
3636
3637static inline void gen_neon_get_scalar(int size, int reg)
3638{
3639 if (size == 1) {
3640 NEON_GET_REG(T0, reg >> 1, reg & 1);
3641 } else {
3642 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3643 if (reg & 1)
ad69471c 3644 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3645 else
ad69471c 3646 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3647 }
3648}
3649
3650static void gen_neon_unzip(int reg, int q, int tmp, int size)
3651{
3652 int n;
3653
3654 for (n = 0; n < q + 1; n += 2) {
3655 NEON_GET_REG(T0, reg, n);
3656 NEON_GET_REG(T0, reg, n + n);
3657 switch (size) {
ad69471c
PB
3658 case 0: gen_helper_neon_unzip_u8(); break;
3659 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3660 case 2: /* no-op */; break;
3661 default: abort();
3662 }
3663 gen_neon_movl_scratch_T0(tmp + n);
3664 gen_neon_movl_scratch_T1(tmp + n + 1);
3665 }
3666}
3667
3668static struct {
3669 int nregs;
3670 int interleave;
3671 int spacing;
3672} neon_ls_element_type[11] = {
3673 {4, 4, 1},
3674 {4, 4, 2},
3675 {4, 1, 1},
3676 {4, 2, 1},
3677 {3, 3, 1},
3678 {3, 3, 2},
3679 {3, 1, 1},
3680 {1, 1, 1},
3681 {2, 2, 1},
3682 {2, 2, 2},
3683 {2, 1, 1}
3684};
3685
3686/* Translate a NEON load/store element instruction. Return nonzero if the
3687 instruction is invalid. */
3688static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3689{
3690 int rd, rn, rm;
3691 int op;
3692 int nregs;
3693 int interleave;
3694 int stride;
3695 int size;
3696 int reg;
3697 int pass;
3698 int load;
3699 int shift;
9ee6e8bb 3700 int n;
b0109805 3701 TCGv tmp;
8f8e3aa4 3702 TCGv tmp2;
9ee6e8bb
PB
3703
3704 if (!vfp_enabled(env))
3705 return 1;
3706 VFP_DREG_D(rd, insn);
3707 rn = (insn >> 16) & 0xf;
3708 rm = insn & 0xf;
3709 load = (insn & (1 << 21)) != 0;
3710 if ((insn & (1 << 23)) == 0) {
3711 /* Load store all elements. */
3712 op = (insn >> 8) & 0xf;
3713 size = (insn >> 6) & 3;
3714 if (op > 10 || size == 3)
3715 return 1;
3716 nregs = neon_ls_element_type[op].nregs;
3717 interleave = neon_ls_element_type[op].interleave;
3718 gen_movl_T1_reg(s, rn);
3719 stride = (1 << size) * interleave;
3720 for (reg = 0; reg < nregs; reg++) {
3721 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3722 gen_movl_T1_reg(s, rn);
3723 gen_op_addl_T1_im((1 << size) * reg);
3724 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3725 gen_movl_T1_reg(s, rn);
3726 gen_op_addl_T1_im(1 << size);
3727 }
3728 for (pass = 0; pass < 2; pass++) {
3729 if (size == 2) {
3730 if (load) {
b0109805 3731 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3732 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3733 } else {
ad69471c 3734 tmp = neon_load_reg(rd, pass);
b0109805 3735 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3736 }
3737 gen_op_addl_T1_im(stride);
3738 } else if (size == 1) {
3739 if (load) {
b0109805 3740 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3741 gen_op_addl_T1_im(stride);
8f8e3aa4 3742 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3743 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3744 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3745 dead_tmp(tmp2);
3746 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3747 } else {
8f8e3aa4
PB
3748 tmp = neon_load_reg(rd, pass);
3749 tmp2 = new_tmp();
3750 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3751 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3752 gen_op_addl_T1_im(stride);
8f8e3aa4 3753 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3754 gen_op_addl_T1_im(stride);
3755 }
3756 } else /* size == 0 */ {
3757 if (load) {
9ee6e8bb 3758 for (n = 0; n < 4; n++) {
b0109805 3759 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3760 gen_op_addl_T1_im(stride);
3761 if (n == 0) {
8f8e3aa4 3762 tmp2 = tmp;
9ee6e8bb 3763 } else {
8f8e3aa4
PB
3764 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3765 dead_tmp(tmp);
9ee6e8bb 3766 }
9ee6e8bb 3767 }
8f8e3aa4 3768 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3769 } else {
8f8e3aa4 3770 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3771 for (n = 0; n < 4; n++) {
8f8e3aa4 3772 tmp = new_tmp();
9ee6e8bb 3773 if (n == 0) {
8f8e3aa4 3774 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3775 } else {
8f8e3aa4 3776 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3777 }
b0109805 3778 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3779 gen_op_addl_T1_im(stride);
9ee6e8bb 3780 }
8f8e3aa4 3781 dead_tmp(tmp2);
9ee6e8bb
PB
3782 }
3783 }
3784 }
3785 rd += neon_ls_element_type[op].spacing;
3786 }
3787 stride = nregs * 8;
3788 } else {
3789 size = (insn >> 10) & 3;
3790 if (size == 3) {
3791 /* Load single element to all lanes. */
3792 if (!load)
3793 return 1;
3794 size = (insn >> 6) & 3;
3795 nregs = ((insn >> 8) & 3) + 1;
3796 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3797 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3798 for (reg = 0; reg < nregs; reg++) {
3799 switch (size) {
3800 case 0:
b0109805 3801 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3802 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3803 break;
3804 case 1:
b0109805 3805 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3806 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3807 break;
3808 case 2:
b0109805 3809 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3810 break;
3811 case 3:
3812 return 1;
99c475ab 3813 }
9ee6e8bb 3814 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3815 tmp2 = new_tmp();
3816 tcg_gen_mov_i32(tmp2, tmp);
3817 neon_store_reg(rd, 0, tmp2);
3818 neon_store_reg(rd, 0, tmp);
9ee6e8bb
PB
3819 rd += stride;
3820 }
3821 stride = (1 << size) * nregs;
3822 } else {
3823 /* Single element. */
3824 pass = (insn >> 7) & 1;
3825 switch (size) {
3826 case 0:
3827 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3828 stride = 1;
3829 break;
3830 case 1:
3831 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3832 stride = (insn & (1 << 5)) ? 2 : 1;
3833 break;
3834 case 2:
3835 shift = 0;
9ee6e8bb
PB
3836 stride = (insn & (1 << 6)) ? 2 : 1;
3837 break;
3838 default:
3839 abort();
3840 }
3841 nregs = ((insn >> 8) & 3) + 1;
3842 gen_movl_T1_reg(s, rn);
3843 for (reg = 0; reg < nregs; reg++) {
3844 if (load) {
9ee6e8bb
PB
3845 switch (size) {
3846 case 0:
b0109805 3847 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3848 break;
3849 case 1:
b0109805 3850 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3851 break;
3852 case 2:
b0109805 3853 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3854 break;
3855 }
3856 if (size != 2) {
8f8e3aa4
PB
3857 tmp2 = neon_load_reg(rd, pass);
3858 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3859 dead_tmp(tmp2);
9ee6e8bb 3860 }
8f8e3aa4 3861 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3862 } else { /* Store */
8f8e3aa4
PB
3863 tmp = neon_load_reg(rd, pass);
3864 if (shift)
3865 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3866 switch (size) {
3867 case 0:
b0109805 3868 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3869 break;
3870 case 1:
b0109805 3871 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3872 break;
3873 case 2:
b0109805 3874 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3875 break;
99c475ab 3876 }
99c475ab 3877 }
9ee6e8bb
PB
3878 rd += stride;
3879 gen_op_addl_T1_im(1 << size);
99c475ab 3880 }
9ee6e8bb 3881 stride = nregs * (1 << size);
99c475ab 3882 }
9ee6e8bb
PB
3883 }
3884 if (rm != 15) {
b26eefb6
PB
3885 TCGv base;
3886
3887 base = load_reg(s, rn);
9ee6e8bb 3888 if (rm == 13) {
b26eefb6 3889 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3890 } else {
b26eefb6
PB
3891 TCGv index;
3892 index = load_reg(s, rm);
3893 tcg_gen_add_i32(base, base, index);
3894 dead_tmp(index);
9ee6e8bb 3895 }
b26eefb6 3896 store_reg(s, rn, base);
9ee6e8bb
PB
3897 }
3898 return 0;
3899}
3b46e624 3900
8f8e3aa4
PB
3901/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3902static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3903{
3904 tcg_gen_and_i32(t, t, c);
3905 tcg_gen_bic_i32(f, f, c);
3906 tcg_gen_or_i32(dest, t, f);
3907}
3908
ad69471c
PB
3909static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3910{
3911 switch (size) {
3912 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3913 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3914 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3915 default: abort();
3916 }
3917}
3918
3919static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3920{
3921 switch (size) {
3922 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3923 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3924 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3925 default: abort();
3926 }
3927}
3928
3929static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3930{
3931 switch (size) {
3932 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3933 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3934 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3935 default: abort();
3936 }
3937}
3938
3939static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3940 int q, int u)
3941{
3942 if (q) {
3943 if (u) {
3944 switch (size) {
3945 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3946 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3947 default: abort();
3948 }
3949 } else {
3950 switch (size) {
3951 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3952 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3953 default: abort();
3954 }
3955 }
3956 } else {
3957 if (u) {
3958 switch (size) {
3959 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3960 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3961 default: abort();
3962 }
3963 } else {
3964 switch (size) {
3965 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3966 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3967 default: abort();
3968 }
3969 }
3970 }
3971}
3972
3973static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3974{
3975 if (u) {
3976 switch (size) {
3977 case 0: gen_helper_neon_widen_u8(dest, src); break;
3978 case 1: gen_helper_neon_widen_u16(dest, src); break;
3979 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3980 default: abort();
3981 }
3982 } else {
3983 switch (size) {
3984 case 0: gen_helper_neon_widen_s8(dest, src); break;
3985 case 1: gen_helper_neon_widen_s16(dest, src); break;
3986 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3987 default: abort();
3988 }
3989 }
3990 dead_tmp(src);
3991}
3992
3993static inline void gen_neon_addl(int size)
3994{
3995 switch (size) {
3996 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3997 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3998 case 2: tcg_gen_add_i64(CPU_V001); break;
3999 default: abort();
4000 }
4001}
4002
4003static inline void gen_neon_subl(int size)
4004{
4005 switch (size) {
4006 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4007 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4008 case 2: tcg_gen_sub_i64(CPU_V001); break;
4009 default: abort();
4010 }
4011}
4012
4013static inline void gen_neon_negl(TCGv var, int size)
4014{
4015 switch (size) {
4016 case 0: gen_helper_neon_negl_u16(var, var); break;
4017 case 1: gen_helper_neon_negl_u32(var, var); break;
4018 case 2: gen_helper_neon_negl_u64(var, var); break;
4019 default: abort();
4020 }
4021}
4022
4023static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4024{
4025 switch (size) {
4026 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4027 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4028 default: abort();
4029 }
4030}
4031
4032static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4033{
4034 TCGv tmp;
4035
4036 switch ((size << 1) | u) {
4037 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4038 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4039 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4040 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4041 case 4:
4042 tmp = gen_muls_i64_i32(a, b);
4043 tcg_gen_mov_i64(dest, tmp);
4044 break;
4045 case 5:
4046 tmp = gen_mulu_i64_i32(a, b);
4047 tcg_gen_mov_i64(dest, tmp);
4048 break;
4049 default: abort();
4050 }
4051 if (size < 2) {
4052 dead_tmp(b);
4053 dead_tmp(a);
4054 }
4055}
4056
9ee6e8bb
PB
4057/* Translate a NEON data processing instruction. Return nonzero if the
4058 instruction is invalid.
ad69471c
PB
4059 We process data in a mixture of 32-bit and 64-bit chunks.
4060 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4061
9ee6e8bb
PB
4062static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4063{
4064 int op;
4065 int q;
4066 int rd, rn, rm;
4067 int size;
4068 int shift;
4069 int pass;
4070 int count;
4071 int pairwise;
4072 int u;
4073 int n;
4074 uint32_t imm;
8f8e3aa4
PB
4075 TCGv tmp;
4076 TCGv tmp2;
4077 TCGv tmp3;
9ee6e8bb
PB
4078
4079 if (!vfp_enabled(env))
4080 return 1;
4081 q = (insn & (1 << 6)) != 0;
4082 u = (insn >> 24) & 1;
4083 VFP_DREG_D(rd, insn);
4084 VFP_DREG_N(rn, insn);
4085 VFP_DREG_M(rm, insn);
4086 size = (insn >> 20) & 3;
4087 if ((insn & (1 << 23)) == 0) {
4088 /* Three register same length. */
4089 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4090 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4091 || op == 10 || op == 11 || op == 16)) {
4092 /* 64-bit element instructions. */
9ee6e8bb 4093 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4094 neon_load_reg64(cpu_V0, rn + pass);
4095 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4096 switch (op) {
4097 case 1: /* VQADD */
4098 if (u) {
ad69471c 4099 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4100 } else {
ad69471c 4101 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4102 }
9ee6e8bb
PB
4103 break;
4104 case 5: /* VQSUB */
4105 if (u) {
ad69471c
PB
4106 gen_helper_neon_sub_saturate_u64(CPU_V001);
4107 } else {
4108 gen_helper_neon_sub_saturate_s64(CPU_V001);
4109 }
4110 break;
4111 case 8: /* VSHL */
4112 if (u) {
4113 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4114 } else {
4115 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4116 }
4117 break;
4118 case 9: /* VQSHL */
4119 if (u) {
4120 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4121 cpu_V0, cpu_V0);
4122 } else {
4123 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4124 cpu_V1, cpu_V0);
4125 }
4126 break;
4127 case 10: /* VRSHL */
4128 if (u) {
4129 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4130 } else {
ad69471c
PB
4131 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4132 }
4133 break;
4134 case 11: /* VQRSHL */
4135 if (u) {
4136 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4137 cpu_V1, cpu_V0);
4138 } else {
4139 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4140 cpu_V1, cpu_V0);
1e8d4eec 4141 }
9ee6e8bb
PB
4142 break;
4143 case 16:
4144 if (u) {
ad69471c 4145 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4146 } else {
ad69471c 4147 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4148 }
4149 break;
4150 default:
4151 abort();
2c0262af 4152 }
ad69471c 4153 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4154 }
9ee6e8bb 4155 return 0;
2c0262af 4156 }
9ee6e8bb
PB
4157 switch (op) {
4158 case 8: /* VSHL */
4159 case 9: /* VQSHL */
4160 case 10: /* VRSHL */
ad69471c 4161 case 11: /* VQRSHL */
9ee6e8bb 4162 {
ad69471c
PB
4163 int rtmp;
4164 /* Shift instruction operands are reversed. */
4165 rtmp = rn;
9ee6e8bb 4166 rn = rm;
ad69471c 4167 rm = rtmp;
9ee6e8bb
PB
4168 pairwise = 0;
4169 }
2c0262af 4170 break;
9ee6e8bb
PB
4171 case 20: /* VPMAX */
4172 case 21: /* VPMIN */
4173 case 23: /* VPADD */
4174 pairwise = 1;
2c0262af 4175 break;
9ee6e8bb
PB
4176 case 26: /* VPADD (float) */
4177 pairwise = (u && size < 2);
2c0262af 4178 break;
9ee6e8bb
PB
4179 case 30: /* VPMIN/VPMAX (float) */
4180 pairwise = u;
2c0262af 4181 break;
9ee6e8bb
PB
4182 default:
4183 pairwise = 0;
2c0262af 4184 break;
9ee6e8bb
PB
4185 }
4186 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4187
4188 if (pairwise) {
4189 /* Pairwise. */
4190 if (q)
4191 n = (pass & 1) * 2;
2c0262af 4192 else
9ee6e8bb
PB
4193 n = 0;
4194 if (pass < q + 1) {
4195 NEON_GET_REG(T0, rn, n);
4196 NEON_GET_REG(T1, rn, n + 1);
4197 } else {
4198 NEON_GET_REG(T0, rm, n);
4199 NEON_GET_REG(T1, rm, n + 1);
4200 }
4201 } else {
4202 /* Elementwise. */
4203 NEON_GET_REG(T0, rn, pass);
4204 NEON_GET_REG(T1, rm, pass);
4205 }
4206 switch (op) {
4207 case 0: /* VHADD */
4208 GEN_NEON_INTEGER_OP(hadd);
4209 break;
4210 case 1: /* VQADD */
ad69471c 4211 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4212 break;
9ee6e8bb
PB
4213 case 2: /* VRHADD */
4214 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4215 break;
9ee6e8bb
PB
4216 case 3: /* Logic ops. */
4217 switch ((u << 2) | size) {
4218 case 0: /* VAND */
2c0262af 4219 gen_op_andl_T0_T1();
9ee6e8bb
PB
4220 break;
4221 case 1: /* BIC */
4222 gen_op_bicl_T0_T1();
4223 break;
4224 case 2: /* VORR */
4225 gen_op_orl_T0_T1();
4226 break;
4227 case 3: /* VORN */
4228 gen_op_notl_T1();
4229 gen_op_orl_T0_T1();
4230 break;
4231 case 4: /* VEOR */
4232 gen_op_xorl_T0_T1();
4233 break;
4234 case 5: /* VBSL */
8f8e3aa4
PB
4235 tmp = neon_load_reg(rd, pass);
4236 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4237 dead_tmp(tmp);
9ee6e8bb
PB
4238 break;
4239 case 6: /* VBIT */
8f8e3aa4
PB
4240 tmp = neon_load_reg(rd, pass);
4241 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4242 dead_tmp(tmp);
9ee6e8bb
PB
4243 break;
4244 case 7: /* VBIF */
8f8e3aa4
PB
4245 tmp = neon_load_reg(rd, pass);
4246 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4247 dead_tmp(tmp);
9ee6e8bb 4248 break;
2c0262af
FB
4249 }
4250 break;
9ee6e8bb
PB
4251 case 4: /* VHSUB */
4252 GEN_NEON_INTEGER_OP(hsub);
4253 break;
4254 case 5: /* VQSUB */
ad69471c 4255 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4256 break;
9ee6e8bb
PB
4257 case 6: /* VCGT */
4258 GEN_NEON_INTEGER_OP(cgt);
4259 break;
4260 case 7: /* VCGE */
4261 GEN_NEON_INTEGER_OP(cge);
4262 break;
4263 case 8: /* VSHL */
ad69471c 4264 GEN_NEON_INTEGER_OP(shl);
2c0262af 4265 break;
9ee6e8bb 4266 case 9: /* VQSHL */
ad69471c 4267 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4268 break;
9ee6e8bb 4269 case 10: /* VRSHL */
ad69471c 4270 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4271 break;
9ee6e8bb 4272 case 11: /* VQRSHL */
ad69471c 4273 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4274 break;
4275 case 12: /* VMAX */
4276 GEN_NEON_INTEGER_OP(max);
4277 break;
4278 case 13: /* VMIN */
4279 GEN_NEON_INTEGER_OP(min);
4280 break;
4281 case 14: /* VABD */
4282 GEN_NEON_INTEGER_OP(abd);
4283 break;
4284 case 15: /* VABA */
4285 GEN_NEON_INTEGER_OP(abd);
4286 NEON_GET_REG(T1, rd, pass);
4287 gen_neon_add(size);
4288 break;
4289 case 16:
4290 if (!u) { /* VADD */
4291 if (gen_neon_add(size))
4292 return 1;
4293 } else { /* VSUB */
4294 switch (size) {
ad69471c
PB
4295 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4296 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4297 case 2: gen_op_subl_T0_T1(); break;
4298 default: return 1;
4299 }
4300 }
4301 break;
4302 case 17:
4303 if (!u) { /* VTST */
4304 switch (size) {
ad69471c
PB
4305 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4306 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4307 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4308 default: return 1;
4309 }
4310 } else { /* VCEQ */
4311 switch (size) {
ad69471c
PB
4312 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4313 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4314 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4315 default: return 1;
4316 }
4317 }
4318 break;
4319 case 18: /* Multiply. */
4320 switch (size) {
ad69471c
PB
4321 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4322 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4323 case 2: gen_op_mul_T0_T1(); break;
4324 default: return 1;
4325 }
4326 NEON_GET_REG(T1, rd, pass);
4327 if (u) { /* VMLS */
ad69471c 4328 gen_neon_rsb(size);
9ee6e8bb
PB
4329 } else { /* VMLA */
4330 gen_neon_add(size);
4331 }
4332 break;
4333 case 19: /* VMUL */
4334 if (u) { /* polynomial */
ad69471c 4335 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4336 } else { /* Integer */
4337 switch (size) {
ad69471c
PB
4338 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4339 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4340 case 2: gen_op_mul_T0_T1(); break;
4341 default: return 1;
4342 }
4343 }
4344 break;
4345 case 20: /* VPMAX */
4346 GEN_NEON_INTEGER_OP(pmax);
4347 break;
4348 case 21: /* VPMIN */
4349 GEN_NEON_INTEGER_OP(pmin);
4350 break;
4351 case 22: /* Hultiply high. */
4352 if (!u) { /* VQDMULH */
4353 switch (size) {
ad69471c
PB
4354 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4355 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4356 default: return 1;
4357 }
4358 } else { /* VQRDHMUL */
4359 switch (size) {
ad69471c
PB
4360 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4361 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4362 default: return 1;
4363 }
4364 }
4365 break;
4366 case 23: /* VPADD */
4367 if (u)
4368 return 1;
4369 switch (size) {
ad69471c
PB
4370 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4371 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4372 case 2: gen_op_addl_T0_T1(); break;
4373 default: return 1;
4374 }
4375 break;
4376 case 26: /* Floating point arithnetic. */
4377 switch ((u << 2) | size) {
4378 case 0: /* VADD */
ad69471c 4379 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4380 break;
4381 case 2: /* VSUB */
ad69471c 4382 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4383 break;
4384 case 4: /* VPADD */
ad69471c 4385 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4386 break;
4387 case 6: /* VABD */
ad69471c 4388 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4389 break;
4390 default:
4391 return 1;
4392 }
4393 break;
4394 case 27: /* Float multiply. */
ad69471c 4395 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4396 if (!u) {
4397 NEON_GET_REG(T1, rd, pass);
4398 if (size == 0) {
ad69471c 4399 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4400 } else {
ad69471c 4401 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4402 }
4403 }
4404 break;
4405 case 28: /* Float compare. */
4406 if (!u) {
ad69471c 4407 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4408 } else {
9ee6e8bb 4409 if (size == 0)
ad69471c 4410 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4411 else
ad69471c 4412 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4413 }
2c0262af 4414 break;
9ee6e8bb
PB
4415 case 29: /* Float compare absolute. */
4416 if (!u)
4417 return 1;
4418 if (size == 0)
ad69471c 4419 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4420 else
ad69471c 4421 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4422 break;
9ee6e8bb
PB
4423 case 30: /* Float min/max. */
4424 if (size == 0)
ad69471c 4425 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4426 else
ad69471c 4427 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4428 break;
4429 case 31:
4430 if (size == 0)
4373f3ce 4431 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4432 else
4373f3ce 4433 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4434 break;
9ee6e8bb
PB
4435 default:
4436 abort();
2c0262af 4437 }
9ee6e8bb
PB
4438 /* Save the result. For elementwise operations we can put it
4439 straight into the destination register. For pairwise operations
4440 we have to be careful to avoid clobbering the source operands. */
4441 if (pairwise && rd == rm) {
4442 gen_neon_movl_scratch_T0(pass);
4443 } else {
4444 NEON_SET_REG(T0, rd, pass);
4445 }
4446
4447 } /* for pass */
4448 if (pairwise && rd == rm) {
4449 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4450 gen_neon_movl_T0_scratch(pass);
4451 NEON_SET_REG(T0, rd, pass);
4452 }
4453 }
ad69471c 4454 /* End of 3 register same size operations. */
9ee6e8bb
PB
4455 } else if (insn & (1 << 4)) {
4456 if ((insn & 0x00380080) != 0) {
4457 /* Two registers and shift. */
4458 op = (insn >> 8) & 0xf;
4459 if (insn & (1 << 7)) {
4460 /* 64-bit shift. */
4461 size = 3;
4462 } else {
4463 size = 2;
4464 while ((insn & (1 << (size + 19))) == 0)
4465 size--;
4466 }
4467 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4468 /* To avoid excessive dumplication of ops we implement shift
4469 by immediate using the variable shift operations. */
4470 if (op < 8) {
4471 /* Shift by immediate:
4472 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4473 /* Right shifts are encoded as N - shift, where N is the
4474 element size in bits. */
4475 if (op <= 4)
4476 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4477 if (size == 3) {
4478 count = q + 1;
4479 } else {
4480 count = q ? 4: 2;
4481 }
4482 switch (size) {
4483 case 0:
4484 imm = (uint8_t) shift;
4485 imm |= imm << 8;
4486 imm |= imm << 16;
4487 break;
4488 case 1:
4489 imm = (uint16_t) shift;
4490 imm |= imm << 16;
4491 break;
4492 case 2:
4493 case 3:
4494 imm = shift;
4495 break;
4496 default:
4497 abort();
4498 }
4499
4500 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4501 if (size == 3) {
4502 neon_load_reg64(cpu_V0, rm + pass);
4503 tcg_gen_movi_i64(cpu_V1, imm);
4504 switch (op) {
4505 case 0: /* VSHR */
4506 case 1: /* VSRA */
4507 if (u)
4508 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4509 else
ad69471c 4510 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4511 break;
ad69471c
PB
4512 case 2: /* VRSHR */
4513 case 3: /* VRSRA */
4514 if (u)
4515 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4516 else
ad69471c 4517 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4518 break;
ad69471c
PB
4519 case 4: /* VSRI */
4520 if (!u)
4521 return 1;
4522 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4523 break;
4524 case 5: /* VSHL, VSLI */
4525 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4526 break;
4527 case 6: /* VQSHL */
4528 if (u)
4529 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4530 else
ad69471c
PB
4531 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4532 break;
4533 case 7: /* VQSHLU */
4534 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4535 break;
9ee6e8bb 4536 }
ad69471c
PB
4537 if (op == 1 || op == 3) {
4538 /* Accumulate. */
4539 neon_load_reg64(cpu_V0, rd + pass);
4540 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4541 } else if (op == 4 || (op == 5 && u)) {
4542 /* Insert */
4543 cpu_abort(env, "VS[LR]I.64 not implemented");
4544 }
4545 neon_store_reg64(cpu_V0, rd + pass);
4546 } else { /* size < 3 */
4547 /* Operands in T0 and T1. */
4548 gen_op_movl_T1_im(imm);
4549 NEON_GET_REG(T0, rm, pass);
4550 switch (op) {
4551 case 0: /* VSHR */
4552 case 1: /* VSRA */
4553 GEN_NEON_INTEGER_OP(shl);
4554 break;
4555 case 2: /* VRSHR */
4556 case 3: /* VRSRA */
4557 GEN_NEON_INTEGER_OP(rshl);
4558 break;
4559 case 4: /* VSRI */
4560 if (!u)
4561 return 1;
4562 GEN_NEON_INTEGER_OP(shl);
4563 break;
4564 case 5: /* VSHL, VSLI */
4565 switch (size) {
4566 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4567 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4568 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4569 default: return 1;
4570 }
4571 break;
4572 case 6: /* VQSHL */
4573 GEN_NEON_INTEGER_OP_ENV(qshl);
4574 break;
4575 case 7: /* VQSHLU */
4576 switch (size) {
4577 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4578 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4579 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4580 default: return 1;
4581 }
4582 break;
4583 }
4584
4585 if (op == 1 || op == 3) {
4586 /* Accumulate. */
4587 NEON_GET_REG(T1, rd, pass);
4588 gen_neon_add(size);
4589 } else if (op == 4 || (op == 5 && u)) {
4590 /* Insert */
4591 switch (size) {
4592 case 0:
4593 if (op == 4)
4594 imm = 0xff >> -shift;
4595 else
4596 imm = (uint8_t)(0xff << shift);
4597 imm |= imm << 8;
4598 imm |= imm << 16;
4599 break;
4600 case 1:
4601 if (op == 4)
4602 imm = 0xffff >> -shift;
4603 else
4604 imm = (uint16_t)(0xffff << shift);
4605 imm |= imm << 16;
4606 break;
4607 case 2:
4608 if (op == 4)
4609 imm = 0xffffffffu >> -shift;
4610 else
4611 imm = 0xffffffffu << shift;
4612 break;
4613 default:
4614 abort();
4615 }
4616 tmp = neon_load_reg(rd, pass);
4617 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4618 tcg_gen_andi_i32(tmp, tmp, ~imm);
4619 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4620 }
9ee6e8bb
PB
4621 NEON_SET_REG(T0, rd, pass);
4622 }
4623 } /* for pass */
4624 } else if (op < 10) {
ad69471c 4625 /* Shift by immediate and narrow:
9ee6e8bb
PB
4626 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4627 shift = shift - (1 << (size + 3));
4628 size++;
9ee6e8bb
PB
4629 switch (size) {
4630 case 1:
ad69471c 4631 imm = (uint16_t)shift;
9ee6e8bb 4632 imm |= imm << 16;
ad69471c 4633 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
4634 break;
4635 case 2:
ad69471c
PB
4636 imm = (uint32_t)shift;
4637 tmp2 = tcg_const_i32(imm);
9ee6e8bb 4638 case 3:
ad69471c 4639 tmp2 = tcg_const_i64(shift);
9ee6e8bb
PB
4640 break;
4641 default:
4642 abort();
4643 }
4644
ad69471c
PB
4645 for (pass = 0; pass < 2; pass++) {
4646 if (size == 3) {
4647 neon_load_reg64(cpu_V0, rm + pass);
4648 if (q) {
4649 if (u)
4650 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4651 else
4652 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4653 } else {
4654 if (u)
4655 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4656 else
4657 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4658 }
2c0262af 4659 } else {
ad69471c
PB
4660 tmp = neon_load_reg(rm + pass, 0);
4661 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4662 tcg_gen_extu_i32_i64(cpu_V0, tmp);
4663 dead_tmp(tmp);
4664 tmp = neon_load_reg(rm + pass, 1);
4665 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4666 tcg_gen_extu_i32_i64(cpu_V1, tmp);
4667 dead_tmp(tmp);
4668 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
4669 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4670 }
ad69471c
PB
4671 tmp = new_tmp();
4672 if (op == 8 && !u) {
4673 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4674 } else {
ad69471c
PB
4675 if (op == 8)
4676 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4677 else
ad69471c
PB
4678 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4679 }
4680 if (pass == 0) {
4681 tmp2 = tmp;
4682 } else {
4683 neon_store_reg(rd, 0, tmp2);
4684 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4685 }
4686 } /* for pass */
4687 } else if (op == 10) {
4688 /* VSHLL */
ad69471c 4689 if (q || size == 3)
9ee6e8bb 4690 return 1;
ad69471c
PB
4691 tmp = neon_load_reg(rm, 0);
4692 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4693 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4694 if (pass == 1)
4695 tmp = tmp2;
4696
4697 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4698
9ee6e8bb
PB
4699 if (shift != 0) {
4700 /* The shift is less than the width of the source
ad69471c
PB
4701 type, so we can just shift the whole register. */
4702 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4703 if (size < 2 || !u) {
4704 uint64_t imm64;
4705 if (size == 0) {
4706 imm = (0xffu >> (8 - shift));
4707 imm |= imm << 16;
4708 } else {
4709 imm = 0xffff >> (16 - shift);
9ee6e8bb 4710 }
ad69471c
PB
4711 imm64 = imm | (((uint64_t)imm) << 32);
4712 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4713 }
4714 }
ad69471c 4715 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4716 }
4717 } else if (op == 15 || op == 16) {
4718 /* VCVT fixed-point. */
4719 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4720 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4721 if (op & 1) {
4722 if (u)
4373f3ce 4723 gen_vfp_ulto(0, shift);
9ee6e8bb 4724 else
4373f3ce 4725 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4726 } else {
4727 if (u)
4373f3ce 4728 gen_vfp_toul(0, shift);
9ee6e8bb 4729 else
4373f3ce 4730 gen_vfp_tosl(0, shift);
2c0262af 4731 }
4373f3ce 4732 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4733 }
4734 } else {
9ee6e8bb
PB
4735 return 1;
4736 }
4737 } else { /* (insn & 0x00380080) == 0 */
4738 int invert;
4739
4740 op = (insn >> 8) & 0xf;
4741 /* One register and immediate. */
4742 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4743 invert = (insn & (1 << 5)) != 0;
4744 switch (op) {
4745 case 0: case 1:
4746 /* no-op */
4747 break;
4748 case 2: case 3:
4749 imm <<= 8;
4750 break;
4751 case 4: case 5:
4752 imm <<= 16;
4753 break;
4754 case 6: case 7:
4755 imm <<= 24;
4756 break;
4757 case 8: case 9:
4758 imm |= imm << 16;
4759 break;
4760 case 10: case 11:
4761 imm = (imm << 8) | (imm << 24);
4762 break;
4763 case 12:
4764 imm = (imm < 8) | 0xff;
4765 break;
4766 case 13:
4767 imm = (imm << 16) | 0xffff;
4768 break;
4769 case 14:
4770 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4771 if (invert)
4772 imm = ~imm;
4773 break;
4774 case 15:
4775 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4776 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4777 break;
4778 }
4779 if (invert)
4780 imm = ~imm;
4781
4782 if (op != 14 || !invert)
4783 gen_op_movl_T1_im(imm);
4784
4785 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4786 if (op & 1 && op < 12) {
ad69471c 4787 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4788 if (invert) {
4789 /* The immediate value has already been inverted, so
4790 BIC becomes AND. */
ad69471c 4791 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4792 } else {
ad69471c 4793 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4794 }
9ee6e8bb 4795 } else {
ad69471c
PB
4796 /* VMOV, VMVN. */
4797 tmp = new_tmp();
9ee6e8bb 4798 if (op == 14 && invert) {
ad69471c
PB
4799 uint32_t val;
4800 val = 0;
9ee6e8bb
PB
4801 for (n = 0; n < 4; n++) {
4802 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4803 val |= 0xff << (n * 8);
9ee6e8bb 4804 }
ad69471c
PB
4805 tcg_gen_movi_i32(tmp, val);
4806 } else {
4807 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4808 }
9ee6e8bb 4809 }
ad69471c 4810 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4811 }
4812 }
4813 } else { /* (insn & 0x00800010 == 0x00800010) */
4814 if (size != 3) {
4815 op = (insn >> 8) & 0xf;
4816 if ((insn & (1 << 6)) == 0) {
4817 /* Three registers of different lengths. */
4818 int src1_wide;
4819 int src2_wide;
4820 int prewiden;
4821 /* prewiden, src1_wide, src2_wide */
4822 static const int neon_3reg_wide[16][3] = {
4823 {1, 0, 0}, /* VADDL */
4824 {1, 1, 0}, /* VADDW */
4825 {1, 0, 0}, /* VSUBL */
4826 {1, 1, 0}, /* VSUBW */
4827 {0, 1, 1}, /* VADDHN */
4828 {0, 0, 0}, /* VABAL */
4829 {0, 1, 1}, /* VSUBHN */
4830 {0, 0, 0}, /* VABDL */
4831 {0, 0, 0}, /* VMLAL */
4832 {0, 0, 0}, /* VQDMLAL */
4833 {0, 0, 0}, /* VMLSL */
4834 {0, 0, 0}, /* VQDMLSL */
4835 {0, 0, 0}, /* Integer VMULL */
4836 {0, 0, 0}, /* VQDMULL */
4837 {0, 0, 0} /* Polynomial VMULL */
4838 };
4839
4840 prewiden = neon_3reg_wide[op][0];
4841 src1_wide = neon_3reg_wide[op][1];
4842 src2_wide = neon_3reg_wide[op][2];
4843
ad69471c
PB
4844 if (size == 0 && (op == 9 || op == 11 || op == 13))
4845 return 1;
4846
9ee6e8bb
PB
4847 /* Avoid overlapping operands. Wide source operands are
4848 always aligned so will never overlap with wide
4849 destinations in problematic ways. */
8f8e3aa4
PB
4850 if (rd == rm && !src2_wide) {
4851 NEON_GET_REG(T0, rm, 1);
4852 gen_neon_movl_scratch_T0(2);
4853 } else if (rd == rn && !src1_wide) {
4854 NEON_GET_REG(T0, rn, 1);
4855 gen_neon_movl_scratch_T0(2);
9ee6e8bb
PB
4856 }
4857 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4858 if (src1_wide) {
4859 neon_load_reg64(cpu_V0, rn + pass);
9ee6e8bb 4860 } else {
ad69471c
PB
4861 if (pass == 1 && rd == rn) {
4862 gen_neon_movl_T0_scratch(2);
4863 tmp = new_tmp();
4864 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4865 } else {
ad69471c
PB
4866 tmp = neon_load_reg(rn, pass);
4867 }
4868 if (prewiden) {
4869 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4870 }
4871 }
ad69471c
PB
4872 if (src2_wide) {
4873 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4874 } else {
ad69471c 4875 if (pass == 1 && rd == rm) {
8f8e3aa4 4876 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4877 tmp2 = new_tmp();
4878 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4879 } else {
ad69471c
PB
4880 tmp2 = neon_load_reg(rm, pass);
4881 }
4882 if (prewiden) {
4883 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4884 }
9ee6e8bb
PB
4885 }
4886 switch (op) {
4887 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4888 gen_neon_addl(size);
9ee6e8bb
PB
4889 break;
4890 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4891 gen_neon_subl(size);
9ee6e8bb
PB
4892 break;
4893 case 5: case 7: /* VABAL, VABDL */
4894 switch ((size << 1) | u) {
ad69471c
PB
4895 case 0:
4896 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4897 break;
4898 case 1:
4899 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4900 break;
4901 case 2:
4902 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4903 break;
4904 case 3:
4905 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4906 break;
4907 case 4:
4908 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4909 break;
4910 case 5:
4911 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4912 break;
9ee6e8bb
PB
4913 default: abort();
4914 }
ad69471c
PB
4915 dead_tmp(tmp2);
4916 dead_tmp(tmp);
9ee6e8bb
PB
4917 break;
4918 case 8: case 9: case 10: case 11: case 12: case 13:
4919 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4920 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4921 break;
4922 case 14: /* Polynomial VMULL */
4923 cpu_abort(env, "Polynomial VMULL not implemented");
4924
4925 default: /* 15 is RESERVED. */
4926 return 1;
4927 }
4928 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4929 /* Accumulate. */
4930 if (op == 10 || op == 11) {
ad69471c 4931 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4932 }
4933
9ee6e8bb 4934 if (op != 13) {
ad69471c 4935 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4936 }
4937
4938 switch (op) {
4939 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4940 gen_neon_addl(size);
9ee6e8bb
PB
4941 break;
4942 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4943 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4944 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4945 break;
9ee6e8bb
PB
4946 /* Fall through. */
4947 case 13: /* VQDMULL */
ad69471c 4948 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4949 break;
4950 default:
4951 abort();
4952 }
ad69471c 4953 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4954 } else if (op == 4 || op == 6) {
4955 /* Narrowing operation. */
ad69471c 4956 tmp = new_tmp();
9ee6e8bb
PB
4957 if (u) {
4958 switch (size) {
ad69471c
PB
4959 case 0:
4960 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4961 break;
4962 case 1:
4963 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4964 break;
4965 case 2:
4966 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4967 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4968 break;
9ee6e8bb
PB
4969 default: abort();
4970 }
4971 } else {
4972 switch (size) {
ad69471c
PB
4973 case 0:
4974 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4975 break;
4976 case 1:
4977 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4978 break;
4979 case 2:
4980 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4981 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4982 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4983 break;
9ee6e8bb
PB
4984 default: abort();
4985 }
4986 }
ad69471c
PB
4987 if (pass == 0) {
4988 tmp3 = tmp;
4989 } else {
4990 neon_store_reg(rd, 0, tmp3);
4991 neon_store_reg(rd, 1, tmp);
4992 }
9ee6e8bb
PB
4993 } else {
4994 /* Write back the result. */
ad69471c 4995 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4996 }
4997 }
4998 } else {
4999 /* Two registers and a scalar. */
5000 switch (op) {
5001 case 0: /* Integer VMLA scalar */
5002 case 1: /* Float VMLA scalar */
5003 case 4: /* Integer VMLS scalar */
5004 case 5: /* Floating point VMLS scalar */
5005 case 8: /* Integer VMUL scalar */
5006 case 9: /* Floating point VMUL scalar */
5007 case 12: /* VQDMULH scalar */
5008 case 13: /* VQRDMULH scalar */
5009 gen_neon_get_scalar(size, rm);
8f8e3aa4 5010 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5011 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5012 if (pass != 0)
8f8e3aa4 5013 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5014 NEON_GET_REG(T1, rn, pass);
5015 if (op == 12) {
5016 if (size == 1) {
ad69471c 5017 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5018 } else {
ad69471c 5019 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5020 }
5021 } else if (op == 13) {
5022 if (size == 1) {
ad69471c 5023 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5024 } else {
ad69471c 5025 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5026 }
5027 } else if (op & 1) {
ad69471c 5028 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5029 } else {
5030 switch (size) {
ad69471c
PB
5031 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5032 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5033 case 2: gen_op_mul_T0_T1(); break;
5034 default: return 1;
5035 }
5036 }
5037 if (op < 8) {
5038 /* Accumulate. */
5039 NEON_GET_REG(T1, rd, pass);
5040 switch (op) {
5041 case 0:
5042 gen_neon_add(size);
5043 break;
5044 case 1:
ad69471c 5045 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5046 break;
5047 case 4:
ad69471c 5048 gen_neon_rsb(size);
9ee6e8bb
PB
5049 break;
5050 case 5:
ad69471c 5051 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5052 break;
5053 default:
5054 abort();
5055 }
5056 }
5057 NEON_SET_REG(T0, rd, pass);
5058 }
5059 break;
5060 case 2: /* VMLAL sclar */
5061 case 3: /* VQDMLAL scalar */
5062 case 6: /* VMLSL scalar */
5063 case 7: /* VQDMLSL scalar */
5064 case 10: /* VMULL scalar */
5065 case 11: /* VQDMULL scalar */
ad69471c
PB
5066 if (size == 0 && (op == 3 || op == 7 || op == 11))
5067 return 1;
5068
9ee6e8bb 5069 gen_neon_get_scalar(size, rm);
ad69471c
PB
5070 NEON_GET_REG(T1, rn, 1);
5071
9ee6e8bb 5072 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5073 if (pass == 0) {
5074 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5075 } else {
ad69471c
PB
5076 tmp = new_tmp();
5077 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5078 }
ad69471c
PB
5079 tmp2 = new_tmp();
5080 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5081 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5082 if (op == 6 || op == 7) {
ad69471c
PB
5083 gen_neon_negl(cpu_V0, size);
5084 }
5085 if (op != 11) {
5086 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5087 }
9ee6e8bb
PB
5088 switch (op) {
5089 case 2: case 6:
ad69471c 5090 gen_neon_addl(size);
9ee6e8bb
PB
5091 break;
5092 case 3: case 7:
ad69471c
PB
5093 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5094 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5095 break;
5096 case 10:
5097 /* no-op */
5098 break;
5099 case 11:
ad69471c 5100 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5101 break;
5102 default:
5103 abort();
5104 }
ad69471c 5105 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5106 }
5107 break;
5108 default: /* 14 and 15 are RESERVED */
5109 return 1;
5110 }
5111 }
5112 } else { /* size == 3 */
5113 if (!u) {
5114 /* Extract. */
9ee6e8bb 5115 imm = (insn >> 8) & 0xf;
ad69471c
PB
5116 count = q + 1;
5117
5118 if (imm > 7 && !q)
5119 return 1;
5120
5121 if (imm == 0) {
5122 neon_load_reg64(cpu_V0, rn);
5123 if (q) {
5124 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5125 }
ad69471c
PB
5126 } else if (imm == 8) {
5127 neon_load_reg64(cpu_V0, rn + 1);
5128 if (q) {
5129 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5130 }
ad69471c
PB
5131 } else if (q) {
5132 tmp = tcg_temp_new(TCG_TYPE_I64);
5133 if (imm < 8) {
5134 neon_load_reg64(cpu_V0, rn);
5135 neon_load_reg64(tmp, rn + 1);
5136 } else {
5137 neon_load_reg64(cpu_V0, rn + 1);
5138 neon_load_reg64(tmp, rm);
5139 }
5140 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5141 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5142 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5143 if (imm < 8) {
5144 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5145 } else {
ad69471c
PB
5146 neon_load_reg64(cpu_V1, rm + 1);
5147 imm -= 8;
9ee6e8bb 5148 }
ad69471c
PB
5149 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5150 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5151 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5152 } else {
5153 neon_load_reg64(cpu_V0, rn);
5154 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5155 neon_load_reg64(cpu_V1, rm);
5156 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5157 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5158 }
5159 neon_store_reg64(cpu_V0, rd);
5160 if (q) {
5161 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5162 }
5163 } else if ((insn & (1 << 11)) == 0) {
5164 /* Two register misc. */
5165 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5166 size = (insn >> 18) & 3;
5167 switch (op) {
5168 case 0: /* VREV64 */
5169 if (size == 3)
5170 return 1;
5171 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5172 NEON_GET_REG(T0, rm, pass * 2);
5173 NEON_GET_REG(T1, rm, pass * 2 + 1);
5174 switch (size) {
b0109805 5175 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5176 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5177 case 2: /* no-op */ break;
5178 default: abort();
5179 }
5180 NEON_SET_REG(T0, rd, pass * 2 + 1);
5181 if (size == 2) {
5182 NEON_SET_REG(T1, rd, pass * 2);
5183 } else {
5184 gen_op_movl_T0_T1();
5185 switch (size) {
b0109805 5186 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5187 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5188 default: abort();
5189 }
5190 NEON_SET_REG(T0, rd, pass * 2);
5191 }
5192 }
5193 break;
5194 case 4: case 5: /* VPADDL */
5195 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5196 if (size == 3)
5197 return 1;
ad69471c
PB
5198 for (pass = 0; pass < q + 1; pass++) {
5199 tmp = neon_load_reg(rm, pass * 2);
5200 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5201 tmp = neon_load_reg(rm, pass * 2 + 1);
5202 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5203 switch (size) {
5204 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5205 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5206 case 2: tcg_gen_add_i64(CPU_V001); break;
5207 default: abort();
5208 }
9ee6e8bb
PB
5209 if (op >= 12) {
5210 /* Accumulate. */
ad69471c
PB
5211 neon_load_reg64(cpu_V1, rd + pass);
5212 gen_neon_addl(size);
9ee6e8bb 5213 }
ad69471c 5214 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5215 }
5216 break;
5217 case 33: /* VTRN */
5218 if (size == 2) {
5219 for (n = 0; n < (q ? 4 : 2); n += 2) {
5220 NEON_GET_REG(T0, rm, n);
5221 NEON_GET_REG(T1, rd, n + 1);
5222 NEON_SET_REG(T1, rm, n);
5223 NEON_SET_REG(T0, rd, n + 1);
5224 }
5225 } else {
5226 goto elementwise;
5227 }
5228 break;
5229 case 34: /* VUZP */
5230 /* Reg Before After
5231 Rd A3 A2 A1 A0 B2 B0 A2 A0
5232 Rm B3 B2 B1 B0 B3 B1 A3 A1
5233 */
5234 if (size == 3)
5235 return 1;
5236 gen_neon_unzip(rd, q, 0, size);
5237 gen_neon_unzip(rm, q, 4, size);
5238 if (q) {
5239 static int unzip_order_q[8] =
5240 {0, 2, 4, 6, 1, 3, 5, 7};
5241 for (n = 0; n < 8; n++) {
5242 int reg = (n < 4) ? rd : rm;
5243 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5244 NEON_SET_REG(T0, reg, n % 4);
5245 }
5246 } else {
5247 static int unzip_order[4] =
5248 {0, 4, 1, 5};
5249 for (n = 0; n < 4; n++) {
5250 int reg = (n < 2) ? rd : rm;
5251 gen_neon_movl_T0_scratch(unzip_order[n]);
5252 NEON_SET_REG(T0, reg, n % 2);
5253 }
5254 }
5255 break;
5256 case 35: /* VZIP */
5257 /* Reg Before After
5258 Rd A3 A2 A1 A0 B1 A1 B0 A0
5259 Rm B3 B2 B1 B0 B3 A3 B2 A2
5260 */
5261 if (size == 3)
5262 return 1;
5263 count = (q ? 4 : 2);
5264 for (n = 0; n < count; n++) {
5265 NEON_GET_REG(T0, rd, n);
5266 NEON_GET_REG(T1, rd, n);
5267 switch (size) {
ad69471c
PB
5268 case 0: gen_helper_neon_zip_u8(); break;
5269 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5270 case 2: /* no-op */; break;
5271 default: abort();
5272 }
5273 gen_neon_movl_scratch_T0(n * 2);
5274 gen_neon_movl_scratch_T1(n * 2 + 1);
5275 }
5276 for (n = 0; n < count * 2; n++) {
5277 int reg = (n < count) ? rd : rm;
5278 gen_neon_movl_T0_scratch(n);
5279 NEON_SET_REG(T0, reg, n % count);
5280 }
5281 break;
5282 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5283 if (size == 3)
5284 return 1;
9ee6e8bb 5285 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5286 neon_load_reg64(cpu_V0, rm + pass);
5287 tmp = new_tmp();
9ee6e8bb 5288 if (op == 36 && q == 0) {
ad69471c 5289 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5290 } else if (q) {
ad69471c 5291 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5292 } else {
ad69471c
PB
5293 gen_neon_narrow_sats(size, tmp, cpu_V0);
5294 }
5295 if (pass == 0) {
5296 tmp2 = tmp;
5297 } else {
5298 neon_store_reg(rd, 0, tmp2);
5299 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5300 }
9ee6e8bb
PB
5301 }
5302 break;
5303 case 38: /* VSHLL */
ad69471c 5304 if (q || size == 3)
9ee6e8bb 5305 return 1;
ad69471c
PB
5306 tmp = neon_load_reg(rm, 0);
5307 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5308 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5309 if (pass == 1)
5310 tmp = tmp2;
5311 gen_neon_widen(cpu_V0, tmp, size, 1);
5312 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5313 }
5314 break;
5315 default:
5316 elementwise:
5317 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5318 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5319 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5320 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5321 } else {
5322 NEON_GET_REG(T0, rm, pass);
5323 }
5324 switch (op) {
5325 case 1: /* VREV32 */
5326 switch (size) {
b0109805 5327 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5328 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5329 default: return 1;
5330 }
5331 break;
5332 case 2: /* VREV16 */
5333 if (size != 0)
5334 return 1;
3670669c 5335 gen_rev16(cpu_T[0]);
9ee6e8bb 5336 break;
9ee6e8bb
PB
5337 case 8: /* CLS */
5338 switch (size) {
ad69471c
PB
5339 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5340 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5341 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5342 default: return 1;
5343 }
5344 break;
5345 case 9: /* CLZ */
5346 switch (size) {
ad69471c
PB
5347 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5348 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5349 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5350 default: return 1;
5351 }
5352 break;
5353 case 10: /* CNT */
5354 if (size != 0)
5355 return 1;
ad69471c 5356 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5357 break;
5358 case 11: /* VNOT */
5359 if (size != 0)
5360 return 1;
5361 gen_op_notl_T0();
5362 break;
5363 case 14: /* VQABS */
5364 switch (size) {
ad69471c
PB
5365 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5366 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5367 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5368 default: return 1;
5369 }
5370 break;
5371 case 15: /* VQNEG */
5372 switch (size) {
ad69471c
PB
5373 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5374 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5375 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5376 default: return 1;
5377 }
5378 break;
5379 case 16: case 19: /* VCGT #0, VCLE #0 */
5380 gen_op_movl_T1_im(0);
5381 switch(size) {
ad69471c
PB
5382 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5383 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5384 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5385 default: return 1;
5386 }
5387 if (op == 19)
5388 gen_op_notl_T0();
5389 break;
5390 case 17: case 20: /* VCGE #0, VCLT #0 */
5391 gen_op_movl_T1_im(0);
5392 switch(size) {
ad69471c
PB
5393 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5394 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5395 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5396 default: return 1;
5397 }
5398 if (op == 20)
5399 gen_op_notl_T0();
5400 break;
5401 case 18: /* VCEQ #0 */
5402 gen_op_movl_T1_im(0);
5403 switch(size) {
ad69471c
PB
5404 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5405 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5406 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5407 default: return 1;
5408 }
5409 break;
5410 case 22: /* VABS */
5411 switch(size) {
ad69471c
PB
5412 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5413 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5414 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5415 default: return 1;
5416 }
5417 break;
5418 case 23: /* VNEG */
5419 gen_op_movl_T1_im(0);
ad69471c
PB
5420 if (size == 3)
5421 return 1;
5422 gen_neon_rsb(size);
9ee6e8bb
PB
5423 break;
5424 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5425 gen_op_movl_T1_im(0);
ad69471c 5426 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5427 if (op == 27)
5428 gen_op_notl_T0();
5429 break;
5430 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5431 gen_op_movl_T1_im(0);
ad69471c 5432 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5433 if (op == 28)
5434 gen_op_notl_T0();
5435 break;
5436 case 26: /* Float VCEQ #0 */
5437 gen_op_movl_T1_im(0);
ad69471c 5438 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5439 break;
5440 case 30: /* Float VABS */
4373f3ce 5441 gen_vfp_abs(0);
9ee6e8bb
PB
5442 break;
5443 case 31: /* Float VNEG */
4373f3ce 5444 gen_vfp_neg(0);
9ee6e8bb
PB
5445 break;
5446 case 32: /* VSWP */
5447 NEON_GET_REG(T1, rd, pass);
5448 NEON_SET_REG(T1, rm, pass);
5449 break;
5450 case 33: /* VTRN */
5451 NEON_GET_REG(T1, rd, pass);
5452 switch (size) {
ad69471c
PB
5453 case 0: gen_helper_neon_trn_u8(); break;
5454 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5455 case 2: abort();
5456 default: return 1;
5457 }
5458 NEON_SET_REG(T1, rm, pass);
5459 break;
5460 case 56: /* Integer VRECPE */
4373f3ce 5461 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5462 break;
5463 case 57: /* Integer VRSQRTE */
4373f3ce 5464 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5465 break;
5466 case 58: /* Float VRECPE */
4373f3ce 5467 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5468 break;
5469 case 59: /* Float VRSQRTE */
4373f3ce 5470 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5471 break;
5472 case 60: /* VCVT.F32.S32 */
4373f3ce 5473 gen_vfp_tosiz(0);
9ee6e8bb
PB
5474 break;
5475 case 61: /* VCVT.F32.U32 */
4373f3ce 5476 gen_vfp_touiz(0);
9ee6e8bb
PB
5477 break;
5478 case 62: /* VCVT.S32.F32 */
4373f3ce 5479 gen_vfp_sito(0);
9ee6e8bb
PB
5480 break;
5481 case 63: /* VCVT.U32.F32 */
4373f3ce 5482 gen_vfp_uito(0);
9ee6e8bb
PB
5483 break;
5484 default:
5485 /* Reserved: 21, 29, 39-56 */
5486 return 1;
5487 }
5488 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5489 tcg_gen_st_f32(cpu_F0s, cpu_env,
5490 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5491 } else {
5492 NEON_SET_REG(T0, rd, pass);
5493 }
5494 }
5495 break;
5496 }
5497 } else if ((insn & (1 << 10)) == 0) {
5498 /* VTBL, VTBX. */
5499 n = (insn >> 5) & 0x18;
9ee6e8bb 5500 if (insn & (1 << 6)) {
8f8e3aa4 5501 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5502 } else {
8f8e3aa4
PB
5503 tmp = new_tmp();
5504 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5505 }
8f8e3aa4
PB
5506 tmp2 = neon_load_reg(rm, 0);
5507 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5508 tcg_const_i32(n));
9ee6e8bb 5509 if (insn & (1 << 6)) {
8f8e3aa4 5510 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5511 } else {
8f8e3aa4
PB
5512 tmp = new_tmp();
5513 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5514 }
8f8e3aa4
PB
5515 tmp3 = neon_load_reg(rm, 1);
5516 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5517 tcg_const_i32(n));
5518 neon_store_reg(rd, 0, tmp2);
5519 neon_store_reg(rd, 1, tmp2);
9ee6e8bb
PB
5520 } else if ((insn & 0x380) == 0) {
5521 /* VDUP */
5522 if (insn & (1 << 19)) {
5523 NEON_SET_REG(T0, rm, 1);
5524 } else {
5525 NEON_SET_REG(T0, rm, 0);
5526 }
5527 if (insn & (1 << 16)) {
ad69471c 5528 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5529 } else if (insn & (1 << 17)) {
5530 if ((insn >> 18) & 1)
ad69471c 5531 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5532 else
ad69471c 5533 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5534 }
5535 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5536 NEON_SET_REG(T0, rd, pass);
5537 }
5538 } else {
5539 return 1;
5540 }
5541 }
5542 }
5543 return 0;
5544}
5545
5546static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5547{
5548 int cpnum;
5549
5550 cpnum = (insn >> 8) & 0xf;
5551 if (arm_feature(env, ARM_FEATURE_XSCALE)
5552 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5553 return 1;
5554
5555 switch (cpnum) {
5556 case 0:
5557 case 1:
5558 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5559 return disas_iwmmxt_insn(env, s, insn);
5560 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5561 return disas_dsp_insn(env, s, insn);
5562 }
5563 return 1;
5564 case 10:
5565 case 11:
5566 return disas_vfp_insn (env, s, insn);
5567 case 15:
5568 return disas_cp15_insn (env, s, insn);
5569 default:
5570 /* Unknown coprocessor. See if the board has hooked it. */
5571 return disas_cp_insn (env, s, insn);
5572 }
5573}
5574
5e3f878a
PB
5575
5576/* Store a 64-bit value to a register pair. Clobbers val. */
5577static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5578{
5579 TCGv tmp;
5580 tmp = new_tmp();
5581 tcg_gen_trunc_i64_i32(tmp, val);
5582 store_reg(s, rlow, tmp);
5583 tmp = new_tmp();
5584 tcg_gen_shri_i64(val, val, 32);
5585 tcg_gen_trunc_i64_i32(tmp, val);
5586 store_reg(s, rhigh, tmp);
5587}
5588
5589/* load a 32-bit value from a register and perform a 64-bit accumulate. */
5590static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5591{
5592 TCGv tmp;
5593 TCGv tmp2;
5594
5595 /* Load 64-bit value rd:rn. */
5596 tmp = tcg_temp_new(TCG_TYPE_I64);
5597 tmp2 = load_reg(s, rlow);
5598 tcg_gen_extu_i32_i64(tmp, tmp2);
5599 dead_tmp(tmp2);
5600 tcg_gen_add_i64(val, val, tmp);
5601}
5602
5603/* load and add a 64-bit value from a register pair. */
5604static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5605{
5606 TCGv tmp;
5607 TCGv tmp2;
5608
5609 /* Load 64-bit value rd:rn. */
5610 tmp = tcg_temp_new(TCG_TYPE_I64);
5611 tmp2 = load_reg(s, rhigh);
5612 tcg_gen_extu_i32_i64(tmp, tmp2);
5613 dead_tmp(tmp2);
5614 tcg_gen_shli_i64(tmp, tmp, 32);
5615 tcg_gen_add_i64(val, val, tmp);
5616
5617 tmp2 = load_reg(s, rlow);
5618 tcg_gen_extu_i32_i64(tmp, tmp2);
5619 dead_tmp(tmp2);
5620 tcg_gen_add_i64(val, val, tmp);
5621}
5622
5623/* Set N and Z flags from a 64-bit value. */
5624static void gen_logicq_cc(TCGv val)
5625{
5626 TCGv tmp = new_tmp();
5627 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5628 gen_logic_CC(tmp);
5629 dead_tmp(tmp);
5e3f878a
PB
5630}
5631
9ee6e8bb
PB
5632static void disas_arm_insn(CPUState * env, DisasContext *s)
5633{
5634 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5635 TCGv tmp;
3670669c 5636 TCGv tmp2;
6ddbc6e4 5637 TCGv tmp3;
b0109805 5638 TCGv addr;
9ee6e8bb
PB
5639
5640 insn = ldl_code(s->pc);
5641 s->pc += 4;
5642
5643 /* M variants do not implement ARM mode. */
5644 if (IS_M(env))
5645 goto illegal_op;
5646 cond = insn >> 28;
5647 if (cond == 0xf){
5648 /* Unconditional instructions. */
5649 if (((insn >> 25) & 7) == 1) {
5650 /* NEON Data processing. */
5651 if (!arm_feature(env, ARM_FEATURE_NEON))
5652 goto illegal_op;
5653
5654 if (disas_neon_data_insn(env, s, insn))
5655 goto illegal_op;
5656 return;
5657 }
5658 if ((insn & 0x0f100000) == 0x04000000) {
5659 /* NEON load/store. */
5660 if (!arm_feature(env, ARM_FEATURE_NEON))
5661 goto illegal_op;
5662
5663 if (disas_neon_ls_insn(env, s, insn))
5664 goto illegal_op;
5665 return;
5666 }
5667 if ((insn & 0x0d70f000) == 0x0550f000)
5668 return; /* PLD */
5669 else if ((insn & 0x0ffffdff) == 0x01010000) {
5670 ARCH(6);
5671 /* setend */
5672 if (insn & (1 << 9)) {
5673 /* BE8 mode not implemented. */
5674 goto illegal_op;
5675 }
5676 return;
5677 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5678 switch ((insn >> 4) & 0xf) {
5679 case 1: /* clrex */
5680 ARCH(6K);
8f8e3aa4 5681 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5682 return;
5683 case 4: /* dsb */
5684 case 5: /* dmb */
5685 case 6: /* isb */
5686 ARCH(7);
5687 /* We don't emulate caches so these are a no-op. */
5688 return;
5689 default:
5690 goto illegal_op;
5691 }
5692 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5693 /* srs */
5694 uint32_t offset;
5695 if (IS_USER(s))
5696 goto illegal_op;
5697 ARCH(6);
5698 op1 = (insn & 0x1f);
5699 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5700 addr = load_reg(s, 13);
9ee6e8bb 5701 } else {
b0109805
PB
5702 addr = new_tmp();
5703 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5704 }
5705 i = (insn >> 23) & 3;
5706 switch (i) {
5707 case 0: offset = -4; break; /* DA */
5708 case 1: offset = -8; break; /* DB */
5709 case 2: offset = 0; break; /* IA */
5710 case 3: offset = 4; break; /* IB */
5711 default: abort();
5712 }
5713 if (offset)
b0109805
PB
5714 tcg_gen_addi_i32(addr, addr, offset);
5715 tmp = load_reg(s, 14);
5716 gen_st32(tmp, addr, 0);
5717 tmp = new_tmp();
5718 gen_helper_cpsr_read(tmp);
5719 tcg_gen_addi_i32(addr, addr, 4);
5720 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5721 if (insn & (1 << 21)) {
5722 /* Base writeback. */
5723 switch (i) {
5724 case 0: offset = -8; break;
5725 case 1: offset = -4; break;
5726 case 2: offset = 4; break;
5727 case 3: offset = 0; break;
5728 default: abort();
5729 }
5730 if (offset)
b0109805 5731 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5732 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5733 gen_movl_reg_T1(s, 13);
5734 } else {
b0109805 5735 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5736 }
b0109805
PB
5737 } else {
5738 dead_tmp(addr);
9ee6e8bb
PB
5739 }
5740 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5741 /* rfe */
5742 uint32_t offset;
5743 if (IS_USER(s))
5744 goto illegal_op;
5745 ARCH(6);
5746 rn = (insn >> 16) & 0xf;
b0109805 5747 addr = load_reg(s, rn);
9ee6e8bb
PB
5748 i = (insn >> 23) & 3;
5749 switch (i) {
b0109805
PB
5750 case 0: offset = -4; break; /* DA */
5751 case 1: offset = -8; break; /* DB */
5752 case 2: offset = 0; break; /* IA */
5753 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5754 default: abort();
5755 }
5756 if (offset)
b0109805
PB
5757 tcg_gen_addi_i32(addr, addr, offset);
5758 /* Load PC into tmp and CPSR into tmp2. */
5759 tmp = gen_ld32(addr, 0);
5760 tcg_gen_addi_i32(addr, addr, 4);
5761 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5762 if (insn & (1 << 21)) {
5763 /* Base writeback. */
5764 switch (i) {
b0109805
PB
5765 case 0: offset = -8; break;
5766 case 1: offset = -4; break;
5767 case 2: offset = 4; break;
5768 case 3: offset = 0; break;
9ee6e8bb
PB
5769 default: abort();
5770 }
5771 if (offset)
b0109805
PB
5772 tcg_gen_addi_i32(addr, addr, offset);
5773 store_reg(s, rn, addr);
5774 } else {
5775 dead_tmp(addr);
9ee6e8bb 5776 }
b0109805 5777 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5778 } else if ((insn & 0x0e000000) == 0x0a000000) {
5779 /* branch link and change to thumb (blx <offset>) */
5780 int32_t offset;
5781
5782 val = (uint32_t)s->pc;
d9ba4830
PB
5783 tmp = new_tmp();
5784 tcg_gen_movi_i32(tmp, val);
5785 store_reg(s, 14, tmp);
9ee6e8bb
PB
5786 /* Sign-extend the 24-bit offset */
5787 offset = (((int32_t)insn) << 8) >> 8;
5788 /* offset * 4 + bit24 * 2 + (thumb bit) */
5789 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5790 /* pipeline offset */
5791 val += 4;
d9ba4830 5792 gen_bx_im(s, val);
9ee6e8bb
PB
5793 return;
5794 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5795 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5796 /* iWMMXt register transfer. */
5797 if (env->cp15.c15_cpar & (1 << 1))
5798 if (!disas_iwmmxt_insn(env, s, insn))
5799 return;
5800 }
5801 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5802 /* Coprocessor double register transfer. */
5803 } else if ((insn & 0x0f000010) == 0x0e000010) {
5804 /* Additional coprocessor register transfer. */
5805 } else if ((insn & 0x0ff10010) == 0x01000000) {
5806 uint32_t mask;
5807 uint32_t val;
5808 /* cps (privileged) */
5809 if (IS_USER(s))
5810 return;
5811 mask = val = 0;
5812 if (insn & (1 << 19)) {
5813 if (insn & (1 << 8))
5814 mask |= CPSR_A;
5815 if (insn & (1 << 7))
5816 mask |= CPSR_I;
5817 if (insn & (1 << 6))
5818 mask |= CPSR_F;
5819 if (insn & (1 << 18))
5820 val |= mask;
5821 }
5822 if (insn & (1 << 14)) {
5823 mask |= CPSR_M;
5824 val |= (insn & 0x1f);
5825 }
5826 if (mask) {
5827 gen_op_movl_T0_im(val);
5828 gen_set_psr_T0(s, mask, 0);
5829 }
5830 return;
5831 }
5832 goto illegal_op;
5833 }
5834 if (cond != 0xe) {
5835 /* if not always execute, we generate a conditional jump to
5836 next instruction */
5837 s->condlabel = gen_new_label();
d9ba4830 5838 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5839 s->condjmp = 1;
5840 }
5841 if ((insn & 0x0f900000) == 0x03000000) {
5842 if ((insn & (1 << 21)) == 0) {
5843 ARCH(6T2);
5844 rd = (insn >> 12) & 0xf;
5845 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5846 if ((insn & (1 << 22)) == 0) {
5847 /* MOVW */
5e3f878a
PB
5848 tmp = new_tmp();
5849 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5850 } else {
5851 /* MOVT */
5e3f878a 5852 tmp = load_reg(s, rd);
86831435 5853 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5854 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5855 }
5e3f878a 5856 store_reg(s, rd, tmp);
9ee6e8bb
PB
5857 } else {
5858 if (((insn >> 12) & 0xf) != 0xf)
5859 goto illegal_op;
5860 if (((insn >> 16) & 0xf) == 0) {
5861 gen_nop_hint(s, insn & 0xff);
5862 } else {
5863 /* CPSR = immediate */
5864 val = insn & 0xff;
5865 shift = ((insn >> 8) & 0xf) * 2;
5866 if (shift)
5867 val = (val >> shift) | (val << (32 - shift));
5868 gen_op_movl_T0_im(val);
5869 i = ((insn & (1 << 22)) != 0);
5870 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5871 goto illegal_op;
5872 }
5873 }
5874 } else if ((insn & 0x0f900000) == 0x01000000
5875 && (insn & 0x00000090) != 0x00000090) {
5876 /* miscellaneous instructions */
5877 op1 = (insn >> 21) & 3;
5878 sh = (insn >> 4) & 0xf;
5879 rm = insn & 0xf;
5880 switch (sh) {
5881 case 0x0: /* move program status register */
5882 if (op1 & 1) {
5883 /* PSR = reg */
5884 gen_movl_T0_reg(s, rm);
5885 i = ((op1 & 2) != 0);
5886 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5887 goto illegal_op;
5888 } else {
5889 /* reg = PSR */
5890 rd = (insn >> 12) & 0xf;
5891 if (op1 & 2) {
5892 if (IS_USER(s))
5893 goto illegal_op;
d9ba4830 5894 tmp = load_cpu_field(spsr);
9ee6e8bb 5895 } else {
d9ba4830
PB
5896 tmp = new_tmp();
5897 gen_helper_cpsr_read(tmp);
9ee6e8bb 5898 }
d9ba4830 5899 store_reg(s, rd, tmp);
9ee6e8bb
PB
5900 }
5901 break;
5902 case 0x1:
5903 if (op1 == 1) {
5904 /* branch/exchange thumb (bx). */
d9ba4830
PB
5905 tmp = load_reg(s, rm);
5906 gen_bx(s, tmp);
9ee6e8bb
PB
5907 } else if (op1 == 3) {
5908 /* clz */
5909 rd = (insn >> 12) & 0xf;
1497c961
PB
5910 tmp = load_reg(s, rm);
5911 gen_helper_clz(tmp, tmp);
5912 store_reg(s, rd, tmp);
9ee6e8bb
PB
5913 } else {
5914 goto illegal_op;
5915 }
5916 break;
5917 case 0x2:
5918 if (op1 == 1) {
5919 ARCH(5J); /* bxj */
5920 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5921 tmp = load_reg(s, rm);
5922 gen_bx(s, tmp);
9ee6e8bb
PB
5923 } else {
5924 goto illegal_op;
5925 }
5926 break;
5927 case 0x3:
5928 if (op1 != 1)
5929 goto illegal_op;
5930
5931 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5932 tmp = load_reg(s, rm);
5933 tmp2 = new_tmp();
5934 tcg_gen_movi_i32(tmp2, s->pc);
5935 store_reg(s, 14, tmp2);
5936 gen_bx(s, tmp);
9ee6e8bb
PB
5937 break;
5938 case 0x5: /* saturating add/subtract */
5939 rd = (insn >> 12) & 0xf;
5940 rn = (insn >> 16) & 0xf;
5e3f878a
PB
5941 tmp = load_reg(s, rn);
5942 tmp2 = load_reg(s, rn);
9ee6e8bb 5943 if (op1 & 2)
5e3f878a 5944 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5945 if (op1 & 1)
5e3f878a 5946 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5947 else
5e3f878a
PB
5948 gen_helper_add_saturate(tmp, tmp, tmp2);
5949 dead_tmp(tmp2);
5950 store_reg(s, rd, tmp);
9ee6e8bb
PB
5951 break;
5952 case 7: /* bkpt */
5953 gen_set_condexec(s);
5e3f878a 5954 gen_set_pc_im(s->pc - 4);
d9ba4830 5955 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5956 s->is_jmp = DISAS_JUMP;
5957 break;
5958 case 0x8: /* signed multiply */
5959 case 0xa:
5960 case 0xc:
5961 case 0xe:
5962 rs = (insn >> 8) & 0xf;
5963 rn = (insn >> 12) & 0xf;
5964 rd = (insn >> 16) & 0xf;
5965 if (op1 == 1) {
5966 /* (32 * 16) >> 16 */
5e3f878a
PB
5967 tmp = load_reg(s, rm);
5968 tmp2 = load_reg(s, rs);
9ee6e8bb 5969 if (sh & 4)
5e3f878a 5970 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5971 else
5e3f878a
PB
5972 gen_sxth(tmp2);
5973 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5974 tcg_gen_shri_i64(tmp2, tmp2, 16);
5975 tmp = new_tmp();
5976 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 5977 if ((sh & 2) == 0) {
5e3f878a
PB
5978 tmp2 = load_reg(s, rn);
5979 gen_helper_add_setq(tmp, tmp, tmp2);
5980 dead_tmp(tmp2);
9ee6e8bb 5981 }
5e3f878a 5982 store_reg(s, rd, tmp);
9ee6e8bb
PB
5983 } else {
5984 /* 16 * 16 */
5e3f878a
PB
5985 tmp = load_reg(s, rm);
5986 tmp2 = load_reg(s, rs);
5987 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5988 dead_tmp(tmp2);
9ee6e8bb 5989 if (op1 == 2) {
5e3f878a
PB
5990 tmp = tcg_temp_new(TCG_TYPE_I64);
5991 tcg_gen_ext_i32_i64(tmp, cpu_T[0]);
5992 gen_addq(s, tmp, rn, rd);
5993 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
5994 } else {
5995 if (op1 == 0) {
5e3f878a
PB
5996 tmp2 = load_reg(s, rn);
5997 gen_helper_add_setq(tmp, tmp, tmp2);
5998 dead_tmp(tmp2);
9ee6e8bb 5999 }
5e3f878a 6000 store_reg(s, rd, tmp);
9ee6e8bb
PB
6001 }
6002 }
6003 break;
6004 default:
6005 goto illegal_op;
6006 }
6007 } else if (((insn & 0x0e000000) == 0 &&
6008 (insn & 0x00000090) != 0x90) ||
6009 ((insn & 0x0e000000) == (1 << 25))) {
6010 int set_cc, logic_cc, shiftop;
6011
6012 op1 = (insn >> 21) & 0xf;
6013 set_cc = (insn >> 20) & 1;
6014 logic_cc = table_logic_cc[op1] & set_cc;
6015
6016 /* data processing instruction */
6017 if (insn & (1 << 25)) {
6018 /* immediate operand */
6019 val = insn & 0xff;
6020 shift = ((insn >> 8) & 0xf) * 2;
6021 if (shift)
6022 val = (val >> shift) | (val << (32 - shift));
6023 gen_op_movl_T1_im(val);
6024 if (logic_cc && shift)
b26eefb6 6025 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6026 } else {
6027 /* register */
6028 rm = (insn) & 0xf;
6029 gen_movl_T1_reg(s, rm);
6030 shiftop = (insn >> 5) & 3;
6031 if (!(insn & (1 << 4))) {
6032 shift = (insn >> 7) & 0x1f;
9a119ff6 6033 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6034 } else {
6035 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6036 tmp = load_reg(s, rs);
6037 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6038 }
6039 }
6040 if (op1 != 0x0f && op1 != 0x0d) {
6041 rn = (insn >> 16) & 0xf;
6042 gen_movl_T0_reg(s, rn);
6043 }
6044 rd = (insn >> 12) & 0xf;
6045 switch(op1) {
6046 case 0x00:
6047 gen_op_andl_T0_T1();
6048 gen_movl_reg_T0(s, rd);
6049 if (logic_cc)
6050 gen_op_logic_T0_cc();
6051 break;
6052 case 0x01:
6053 gen_op_xorl_T0_T1();
6054 gen_movl_reg_T0(s, rd);
6055 if (logic_cc)
6056 gen_op_logic_T0_cc();
6057 break;
6058 case 0x02:
6059 if (set_cc && rd == 15) {
6060 /* SUBS r15, ... is used for exception return. */
6061 if (IS_USER(s))
6062 goto illegal_op;
6063 gen_op_subl_T0_T1_cc();
6064 gen_exception_return(s);
6065 } else {
6066 if (set_cc)
6067 gen_op_subl_T0_T1_cc();
6068 else
6069 gen_op_subl_T0_T1();
6070 gen_movl_reg_T0(s, rd);
6071 }
6072 break;
6073 case 0x03:
6074 if (set_cc)
6075 gen_op_rsbl_T0_T1_cc();
6076 else
6077 gen_op_rsbl_T0_T1();
6078 gen_movl_reg_T0(s, rd);
6079 break;
6080 case 0x04:
6081 if (set_cc)
6082 gen_op_addl_T0_T1_cc();
6083 else
6084 gen_op_addl_T0_T1();
6085 gen_movl_reg_T0(s, rd);
6086 break;
6087 case 0x05:
6088 if (set_cc)
6089 gen_op_adcl_T0_T1_cc();
6090 else
b26eefb6 6091 gen_adc_T0_T1();
9ee6e8bb
PB
6092 gen_movl_reg_T0(s, rd);
6093 break;
6094 case 0x06:
6095 if (set_cc)
6096 gen_op_sbcl_T0_T1_cc();
6097 else
3670669c 6098 gen_sbc_T0_T1();
9ee6e8bb
PB
6099 gen_movl_reg_T0(s, rd);
6100 break;
6101 case 0x07:
6102 if (set_cc)
6103 gen_op_rscl_T0_T1_cc();
6104 else
3670669c 6105 gen_rsc_T0_T1();
9ee6e8bb
PB
6106 gen_movl_reg_T0(s, rd);
6107 break;
6108 case 0x08:
6109 if (set_cc) {
6110 gen_op_andl_T0_T1();
6111 gen_op_logic_T0_cc();
6112 }
6113 break;
6114 case 0x09:
6115 if (set_cc) {
6116 gen_op_xorl_T0_T1();
6117 gen_op_logic_T0_cc();
6118 }
6119 break;
6120 case 0x0a:
6121 if (set_cc) {
6122 gen_op_subl_T0_T1_cc();
6123 }
6124 break;
6125 case 0x0b:
6126 if (set_cc) {
6127 gen_op_addl_T0_T1_cc();
6128 }
6129 break;
6130 case 0x0c:
6131 gen_op_orl_T0_T1();
6132 gen_movl_reg_T0(s, rd);
6133 if (logic_cc)
6134 gen_op_logic_T0_cc();
6135 break;
6136 case 0x0d:
6137 if (logic_cc && rd == 15) {
6138 /* MOVS r15, ... is used for exception return. */
6139 if (IS_USER(s))
6140 goto illegal_op;
6141 gen_op_movl_T0_T1();
6142 gen_exception_return(s);
6143 } else {
6144 gen_movl_reg_T1(s, rd);
6145 if (logic_cc)
6146 gen_op_logic_T1_cc();
6147 }
6148 break;
6149 case 0x0e:
6150 gen_op_bicl_T0_T1();
6151 gen_movl_reg_T0(s, rd);
6152 if (logic_cc)
6153 gen_op_logic_T0_cc();
6154 break;
6155 default:
6156 case 0x0f:
6157 gen_op_notl_T1();
6158 gen_movl_reg_T1(s, rd);
6159 if (logic_cc)
6160 gen_op_logic_T1_cc();
6161 break;
6162 }
6163 } else {
6164 /* other instructions */
6165 op1 = (insn >> 24) & 0xf;
6166 switch(op1) {
6167 case 0x0:
6168 case 0x1:
6169 /* multiplies, extra load/stores */
6170 sh = (insn >> 5) & 3;
6171 if (sh == 0) {
6172 if (op1 == 0x0) {
6173 rd = (insn >> 16) & 0xf;
6174 rn = (insn >> 12) & 0xf;
6175 rs = (insn >> 8) & 0xf;
6176 rm = (insn) & 0xf;
6177 op1 = (insn >> 20) & 0xf;
6178 switch (op1) {
6179 case 0: case 1: case 2: case 3: case 6:
6180 /* 32 bit mul */
5e3f878a
PB
6181 tmp = load_reg(s, rs);
6182 tmp2 = load_reg(s, rm);
6183 tcg_gen_mul_i32(tmp, tmp, tmp2);
6184 dead_tmp(tmp2);
9ee6e8bb
PB
6185 if (insn & (1 << 22)) {
6186 /* Subtract (mls) */
6187 ARCH(6T2);
5e3f878a
PB
6188 tmp2 = load_reg(s, rn);
6189 tcg_gen_sub_i32(tmp, tmp2, tmp);
6190 dead_tmp(tmp2);
9ee6e8bb
PB
6191 } else if (insn & (1 << 21)) {
6192 /* Add */
5e3f878a
PB
6193 tmp2 = load_reg(s, rn);
6194 tcg_gen_add_i32(tmp, tmp, tmp2);
6195 dead_tmp(tmp2);
9ee6e8bb
PB
6196 }
6197 if (insn & (1 << 20))
5e3f878a
PB
6198 gen_logic_CC(tmp);
6199 store_reg(s, rd, tmp);
9ee6e8bb
PB
6200 break;
6201 default:
6202 /* 64 bit mul */
5e3f878a
PB
6203 tmp = load_reg(s, rs);
6204 tmp2 = load_reg(s, rm);
9ee6e8bb 6205 if (insn & (1 << 22))
5e3f878a 6206 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6207 else
5e3f878a 6208 tmp = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6209 if (insn & (1 << 21)) /* mult accumulate */
5e3f878a 6210 gen_addq(s, tmp, rn, rd);
9ee6e8bb
PB
6211 if (!(insn & (1 << 23))) { /* double accumulate */
6212 ARCH(6);
5e3f878a
PB
6213 gen_addq_lo(s, tmp, rn);
6214 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
6215 }
6216 if (insn & (1 << 20))
5e3f878a
PB
6217 gen_logicq_cc(tmp);
6218 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
6219 break;
6220 }
6221 } else {
6222 rn = (insn >> 16) & 0xf;
6223 rd = (insn >> 12) & 0xf;
6224 if (insn & (1 << 23)) {
6225 /* load/store exclusive */
6226 gen_movl_T1_reg(s, rn);
72f1c62f 6227 addr = cpu_T[1];
9ee6e8bb 6228 if (insn & (1 << 20)) {
8f8e3aa4
PB
6229 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6230 tmp = gen_ld32(addr, IS_USER(s));
6231 store_reg(s, rd, tmp);
9ee6e8bb 6232 } else {
8f8e3aa4 6233 int label = gen_new_label();
9ee6e8bb 6234 rm = insn & 0xf;
8f8e3aa4
PB
6235 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6236 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
6237 tcg_const_i32(0), label);
6238 tmp = load_reg(s,rm);
6239 gen_st32(tmp, cpu_T[1], IS_USER(s));
2637a3be 6240 gen_set_label(label);
8f8e3aa4 6241 gen_movl_reg_T0(s, rd);
9ee6e8bb 6242 }
9ee6e8bb
PB
6243 } else {
6244 /* SWP instruction */
6245 rm = (insn) & 0xf;
6246
8984bd2e
PB
6247 /* ??? This is not really atomic. However we know
6248 we never have multiple CPUs running in parallel,
6249 so it is good enough. */
6250 addr = load_reg(s, rn);
6251 tmp = load_reg(s, rm);
9ee6e8bb 6252 if (insn & (1 << 22)) {
8984bd2e
PB
6253 tmp2 = gen_ld8u(addr, IS_USER(s));
6254 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6255 } else {
8984bd2e
PB
6256 tmp2 = gen_ld32(addr, IS_USER(s));
6257 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6258 }
8984bd2e
PB
6259 dead_tmp(addr);
6260 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6261 }
6262 }
6263 } else {
6264 int address_offset;
6265 int load;
6266 /* Misc load/store */
6267 rn = (insn >> 16) & 0xf;
6268 rd = (insn >> 12) & 0xf;
b0109805 6269 addr = load_reg(s, rn);
9ee6e8bb 6270 if (insn & (1 << 24))
b0109805 6271 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6272 address_offset = 0;
6273 if (insn & (1 << 20)) {
6274 /* load */
6275 switch(sh) {
6276 case 1:
b0109805 6277 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6278 break;
6279 case 2:
b0109805 6280 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6281 break;
6282 default:
6283 case 3:
b0109805 6284 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6285 break;
6286 }
6287 load = 1;
6288 } else if (sh & 2) {
6289 /* doubleword */
6290 if (sh & 1) {
6291 /* store */
b0109805
PB
6292 tmp = load_reg(s, rd);
6293 gen_st32(tmp, addr, IS_USER(s));
6294 tcg_gen_addi_i32(addr, addr, 4);
6295 tmp = load_reg(s, rd + 1);
6296 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6297 load = 0;
6298 } else {
6299 /* load */
b0109805
PB
6300 tmp = gen_ld32(addr, IS_USER(s));
6301 store_reg(s, rd, tmp);
6302 tcg_gen_addi_i32(addr, addr, 4);
6303 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6304 rd++;
6305 load = 1;
6306 }
6307 address_offset = -4;
6308 } else {
6309 /* store */
b0109805
PB
6310 tmp = load_reg(s, rd);
6311 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6312 load = 0;
6313 }
6314 /* Perform base writeback before the loaded value to
6315 ensure correct behavior with overlapping index registers.
6316 ldrd with base writeback is is undefined if the
6317 destination and index registers overlap. */
6318 if (!(insn & (1 << 24))) {
b0109805
PB
6319 gen_add_datah_offset(s, insn, address_offset, addr);
6320 store_reg(s, rn, addr);
9ee6e8bb
PB
6321 } else if (insn & (1 << 21)) {
6322 if (address_offset)
b0109805
PB
6323 tcg_gen_addi_i32(addr, addr, address_offset);
6324 store_reg(s, rn, addr);
6325 } else {
6326 dead_tmp(addr);
9ee6e8bb
PB
6327 }
6328 if (load) {
6329 /* Complete the load. */
b0109805 6330 store_reg(s, rd, tmp);
9ee6e8bb
PB
6331 }
6332 }
6333 break;
6334 case 0x4:
6335 case 0x5:
6336 goto do_ldst;
6337 case 0x6:
6338 case 0x7:
6339 if (insn & (1 << 4)) {
6340 ARCH(6);
6341 /* Armv6 Media instructions. */
6342 rm = insn & 0xf;
6343 rn = (insn >> 16) & 0xf;
2c0262af 6344 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6345 rs = (insn >> 8) & 0xf;
6346 switch ((insn >> 23) & 3) {
6347 case 0: /* Parallel add/subtract. */
6348 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6349 tmp = load_reg(s, rn);
6350 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6351 sh = (insn >> 5) & 7;
6352 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6353 goto illegal_op;
6ddbc6e4
PB
6354 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6355 dead_tmp(tmp2);
6356 store_reg(s, rd, tmp);
9ee6e8bb
PB
6357 break;
6358 case 1:
6359 if ((insn & 0x00700020) == 0) {
6c95676b 6360 /* Halfword pack. */
3670669c
PB
6361 tmp = load_reg(s, rn);
6362 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6363 shift = (insn >> 7) & 0x1f;
6364 if (shift)
3670669c
PB
6365 tcg_gen_shli_i32(tmp2, tmp2, shift);
6366 if (insn & (1 << 6)) {
6367 /* pkhtb */
6368 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6369 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6370 } else {
6371 /* pkhbt */
86831435 6372 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6373 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6374 }
6375 tcg_gen_or_i32(tmp, tmp, tmp2);
6376 store_reg(s, rd, tmp);
9ee6e8bb
PB
6377 } else if ((insn & 0x00200020) == 0x00200000) {
6378 /* [us]sat */
6ddbc6e4 6379 tmp = load_reg(s, rm);
9ee6e8bb
PB
6380 shift = (insn >> 7) & 0x1f;
6381 if (insn & (1 << 6)) {
6382 if (shift == 0)
6383 shift = 31;
6ddbc6e4 6384 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6385 } else {
6ddbc6e4 6386 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6387 }
6388 sh = (insn >> 16) & 0x1f;
6389 if (sh != 0) {
6390 if (insn & (1 << 22))
6ddbc6e4 6391 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6392 else
6ddbc6e4 6393 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6394 }
6ddbc6e4 6395 store_reg(s, rd, tmp);
9ee6e8bb
PB
6396 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6397 /* [us]sat16 */
6ddbc6e4 6398 tmp = load_reg(s, rm);
9ee6e8bb
PB
6399 sh = (insn >> 16) & 0x1f;
6400 if (sh != 0) {
6401 if (insn & (1 << 22))
6ddbc6e4 6402 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6403 else
6ddbc6e4 6404 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6405 }
6ddbc6e4 6406 store_reg(s, rd, tmp);
9ee6e8bb
PB
6407 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6408 /* Select bytes. */
6ddbc6e4
PB
6409 tmp = load_reg(s, rn);
6410 tmp2 = load_reg(s, rm);
6411 tmp3 = new_tmp();
6412 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6413 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6414 dead_tmp(tmp3);
6415 dead_tmp(tmp2);
6416 store_reg(s, rd, tmp);
9ee6e8bb 6417 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6418 tmp = load_reg(s, rm);
9ee6e8bb
PB
6419 shift = (insn >> 10) & 3;
6420 /* ??? In many cases it's not neccessary to do a
6421 rotate, a shift is sufficient. */
6422 if (shift != 0)
5e3f878a 6423 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6424 op1 = (insn >> 20) & 7;
6425 switch (op1) {
5e3f878a
PB
6426 case 0: gen_sxtb16(tmp); break;
6427 case 2: gen_sxtb(tmp); break;
6428 case 3: gen_sxth(tmp); break;
6429 case 4: gen_uxtb16(tmp); break;
6430 case 6: gen_uxtb(tmp); break;
6431 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6432 default: goto illegal_op;
6433 }
6434 if (rn != 15) {
5e3f878a 6435 tmp2 = load_reg(s, rn);
9ee6e8bb 6436 if ((op1 & 3) == 0) {
5e3f878a 6437 gen_add16(tmp, tmp2);
9ee6e8bb 6438 } else {
5e3f878a
PB
6439 tcg_gen_add_i32(tmp, tmp, tmp2);
6440 dead_tmp(tmp2);
9ee6e8bb
PB
6441 }
6442 }
6c95676b 6443 store_reg(s, rd, tmp);
9ee6e8bb
PB
6444 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6445 /* rev */
b0109805 6446 tmp = load_reg(s, rm);
9ee6e8bb
PB
6447 if (insn & (1 << 22)) {
6448 if (insn & (1 << 7)) {
b0109805 6449 gen_revsh(tmp);
9ee6e8bb
PB
6450 } else {
6451 ARCH(6T2);
b0109805 6452 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6453 }
6454 } else {
6455 if (insn & (1 << 7))
b0109805 6456 gen_rev16(tmp);
9ee6e8bb 6457 else
b0109805 6458 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6459 }
b0109805 6460 store_reg(s, rd, tmp);
9ee6e8bb
PB
6461 } else {
6462 goto illegal_op;
6463 }
6464 break;
6465 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6466 tmp = load_reg(s, rm);
6467 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6468 if (insn & (1 << 20)) {
6469 /* Signed multiply most significant [accumulate]. */
5e3f878a 6470 tmp2 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6471 if (insn & (1 << 5))
5e3f878a
PB
6472 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6473 tcg_gen_shri_i64(tmp2, tmp2, 32);
6474 tmp = new_tmp();
6475 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 6476 if (rn != 15) {
5e3f878a 6477 tmp2 = load_reg(s, rn);
9ee6e8bb 6478 if (insn & (1 << 6)) {
5e3f878a 6479 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6480 } else {
5e3f878a 6481 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6482 }
5e3f878a 6483 dead_tmp(tmp2);
9ee6e8bb 6484 }
5e3f878a 6485 store_reg(s, rd, tmp);
9ee6e8bb
PB
6486 } else {
6487 if (insn & (1 << 5))
5e3f878a
PB
6488 gen_swap_half(tmp2);
6489 gen_smul_dual(tmp, tmp2);
6490 /* This addition cannot overflow. */
6491 if (insn & (1 << 6)) {
6492 tcg_gen_sub_i32(tmp, tmp, tmp2);
6493 } else {
6494 tcg_gen_add_i32(tmp, tmp, tmp2);
6495 }
6496 dead_tmp(tmp2);
9ee6e8bb 6497 if (insn & (1 << 22)) {
5e3f878a
PB
6498 /* smlald, smlsld */
6499 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6500 tcg_gen_ext_i32_i64(tmp2, tmp);
6501 dead_tmp(tmp);
6502 gen_addq(s, tmp2, rn, rd);
6503 gen_storeq_reg(s, rn, rd, tmp2);
9ee6e8bb 6504 } else {
5e3f878a 6505 /* smuad, smusd, smlad, smlsd */
9ee6e8bb
PB
6506 if (rn != 15)
6507 {
5e3f878a
PB
6508 tmp2 = load_reg(s, rn);
6509 gen_helper_add_setq(tmp, tmp, tmp2);
6510 dead_tmp(tmp2);
9ee6e8bb 6511 }
5e3f878a 6512 store_reg(s, rd, tmp);
9ee6e8bb
PB
6513 }
6514 }
6515 break;
6516 case 3:
6517 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6518 switch (op1) {
6519 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6520 ARCH(6);
6521 tmp = load_reg(s, rm);
6522 tmp2 = load_reg(s, rs);
6523 gen_helper_usad8(tmp, tmp, tmp2);
6524 dead_tmp(tmp2);
9ee6e8bb 6525 if (rn != 15) {
6ddbc6e4
PB
6526 tmp2 = load_reg(s, rn);
6527 tcg_gen_add_i32(tmp, tmp, tmp2);
6528 dead_tmp(tmp2);
9ee6e8bb 6529 }
6ddbc6e4 6530 store_reg(s, rd, tmp);
9ee6e8bb
PB
6531 break;
6532 case 0x20: case 0x24: case 0x28: case 0x2c:
6533 /* Bitfield insert/clear. */
6534 ARCH(6T2);
6535 shift = (insn >> 7) & 0x1f;
6536 i = (insn >> 16) & 0x1f;
6537 i = i + 1 - shift;
6538 if (rm == 15) {
5e3f878a
PB
6539 tmp = new_tmp();
6540 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6541 } else {
5e3f878a 6542 tmp = load_reg(s, rm);
9ee6e8bb
PB
6543 }
6544 if (i != 32) {
5e3f878a 6545 tmp2 = load_reg(s, rd);
8f8e3aa4 6546 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6547 dead_tmp(tmp2);
9ee6e8bb 6548 }
5e3f878a 6549 store_reg(s, rd, tmp);
9ee6e8bb
PB
6550 break;
6551 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6552 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6553 tmp = load_reg(s, rm);
9ee6e8bb
PB
6554 shift = (insn >> 7) & 0x1f;
6555 i = ((insn >> 16) & 0x1f) + 1;
6556 if (shift + i > 32)
6557 goto illegal_op;
6558 if (i < 32) {
6559 if (op1 & 0x20) {
5e3f878a 6560 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6561 } else {
5e3f878a 6562 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6563 }
6564 }
5e3f878a 6565 store_reg(s, rd, tmp);
9ee6e8bb
PB
6566 break;
6567 default:
6568 goto illegal_op;
6569 }
6570 break;
6571 }
6572 break;
6573 }
6574 do_ldst:
6575 /* Check for undefined extension instructions
6576 * per the ARM Bible IE:
6577 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6578 */
6579 sh = (0xf << 20) | (0xf << 4);
6580 if (op1 == 0x7 && ((insn & sh) == sh))
6581 {
6582 goto illegal_op;
6583 }
6584 /* load/store byte/word */
6585 rn = (insn >> 16) & 0xf;
6586 rd = (insn >> 12) & 0xf;
b0109805 6587 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6588 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6589 if (insn & (1 << 24))
b0109805 6590 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6591 if (insn & (1 << 20)) {
6592 /* load */
6593 s->is_mem = 1;
9ee6e8bb 6594 if (insn & (1 << 22)) {
b0109805 6595 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6596 } else {
b0109805 6597 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6598 }
9ee6e8bb
PB
6599 } else {
6600 /* store */
b0109805 6601 tmp = load_reg(s, rd);
9ee6e8bb 6602 if (insn & (1 << 22))
b0109805 6603 gen_st8(tmp, tmp2, i);
9ee6e8bb 6604 else
b0109805 6605 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6606 }
6607 if (!(insn & (1 << 24))) {
b0109805
PB
6608 gen_add_data_offset(s, insn, tmp2);
6609 store_reg(s, rn, tmp2);
6610 } else if (insn & (1 << 21)) {
6611 store_reg(s, rn, tmp2);
6612 } else {
6613 dead_tmp(tmp2);
9ee6e8bb
PB
6614 }
6615 if (insn & (1 << 20)) {
6616 /* Complete the load. */
6617 if (rd == 15)
b0109805 6618 gen_bx(s, tmp);
9ee6e8bb 6619 else
b0109805 6620 store_reg(s, rd, tmp);
9ee6e8bb
PB
6621 }
6622 break;
6623 case 0x08:
6624 case 0x09:
6625 {
6626 int j, n, user, loaded_base;
b0109805 6627 TCGv loaded_var;
9ee6e8bb
PB
6628 /* load/store multiple words */
6629 /* XXX: store correct base if write back */
6630 user = 0;
6631 if (insn & (1 << 22)) {
6632 if (IS_USER(s))
6633 goto illegal_op; /* only usable in supervisor mode */
6634
6635 if ((insn & (1 << 15)) == 0)
6636 user = 1;
6637 }
6638 rn = (insn >> 16) & 0xf;
b0109805 6639 addr = load_reg(s, rn);
9ee6e8bb
PB
6640
6641 /* compute total size */
6642 loaded_base = 0;
6643 n = 0;
6644 for(i=0;i<16;i++) {
6645 if (insn & (1 << i))
6646 n++;
6647 }
6648 /* XXX: test invalid n == 0 case ? */
6649 if (insn & (1 << 23)) {
6650 if (insn & (1 << 24)) {
6651 /* pre increment */
b0109805 6652 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6653 } else {
6654 /* post increment */
6655 }
6656 } else {
6657 if (insn & (1 << 24)) {
6658 /* pre decrement */
b0109805 6659 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6660 } else {
6661 /* post decrement */
6662 if (n != 1)
b0109805 6663 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6664 }
6665 }
6666 j = 0;
6667 for(i=0;i<16;i++) {
6668 if (insn & (1 << i)) {
6669 if (insn & (1 << 20)) {
6670 /* load */
b0109805 6671 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6672 if (i == 15) {
b0109805 6673 gen_bx(s, tmp);
9ee6e8bb 6674 } else if (user) {
b0109805
PB
6675 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6676 dead_tmp(tmp);
9ee6e8bb 6677 } else if (i == rn) {
b0109805 6678 loaded_var = tmp;
9ee6e8bb
PB
6679 loaded_base = 1;
6680 } else {
b0109805 6681 store_reg(s, i, tmp);
9ee6e8bb
PB
6682 }
6683 } else {
6684 /* store */
6685 if (i == 15) {
6686 /* special case: r15 = PC + 8 */
6687 val = (long)s->pc + 4;
b0109805
PB
6688 tmp = new_tmp();
6689 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6690 } else if (user) {
b0109805
PB
6691 tmp = new_tmp();
6692 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6693 } else {
b0109805 6694 tmp = load_reg(s, i);
9ee6e8bb 6695 }
b0109805 6696 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6697 }
6698 j++;
6699 /* no need to add after the last transfer */
6700 if (j != n)
b0109805 6701 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6702 }
6703 }
6704 if (insn & (1 << 21)) {
6705 /* write back */
6706 if (insn & (1 << 23)) {
6707 if (insn & (1 << 24)) {
6708 /* pre increment */
6709 } else {
6710 /* post increment */
b0109805 6711 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6712 }
6713 } else {
6714 if (insn & (1 << 24)) {
6715 /* pre decrement */
6716 if (n != 1)
b0109805 6717 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6718 } else {
6719 /* post decrement */
b0109805 6720 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6721 }
6722 }
b0109805
PB
6723 store_reg(s, rn, addr);
6724 } else {
6725 dead_tmp(addr);
9ee6e8bb
PB
6726 }
6727 if (loaded_base) {
b0109805 6728 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6729 }
6730 if ((insn & (1 << 22)) && !user) {
6731 /* Restore CPSR from SPSR. */
d9ba4830
PB
6732 tmp = load_cpu_field(spsr);
6733 gen_set_cpsr(tmp, 0xffffffff);
6734 dead_tmp(tmp);
9ee6e8bb
PB
6735 s->is_jmp = DISAS_UPDATE;
6736 }
6737 }
6738 break;
6739 case 0xa:
6740 case 0xb:
6741 {
6742 int32_t offset;
6743
6744 /* branch (and link) */
6745 val = (int32_t)s->pc;
6746 if (insn & (1 << 24)) {
5e3f878a
PB
6747 tmp = new_tmp();
6748 tcg_gen_movi_i32(tmp, val);
6749 store_reg(s, 14, tmp);
9ee6e8bb
PB
6750 }
6751 offset = (((int32_t)insn << 8) >> 8);
6752 val += (offset << 2) + 4;
6753 gen_jmp(s, val);
6754 }
6755 break;
6756 case 0xc:
6757 case 0xd:
6758 case 0xe:
6759 /* Coprocessor. */
6760 if (disas_coproc_insn(env, s, insn))
6761 goto illegal_op;
6762 break;
6763 case 0xf:
6764 /* swi */
5e3f878a 6765 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6766 s->is_jmp = DISAS_SWI;
6767 break;
6768 default:
6769 illegal_op:
6770 gen_set_condexec(s);
5e3f878a 6771 gen_set_pc_im(s->pc - 4);
d9ba4830 6772 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6773 s->is_jmp = DISAS_JUMP;
6774 break;
6775 }
6776 }
6777}
6778
6779/* Return true if this is a Thumb-2 logical op. */
6780static int
6781thumb2_logic_op(int op)
6782{
6783 return (op < 8);
6784}
6785
6786/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6787 then set condition code flags based on the result of the operation.
6788 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6789 to the high bit of T1.
6790 Returns zero if the opcode is valid. */
6791
6792static int
6793gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6794{
6795 int logic_cc;
6796
6797 logic_cc = 0;
6798 switch (op) {
6799 case 0: /* and */
6800 gen_op_andl_T0_T1();
6801 logic_cc = conds;
6802 break;
6803 case 1: /* bic */
6804 gen_op_bicl_T0_T1();
6805 logic_cc = conds;
6806 break;
6807 case 2: /* orr */
6808 gen_op_orl_T0_T1();
6809 logic_cc = conds;
6810 break;
6811 case 3: /* orn */
6812 gen_op_notl_T1();
6813 gen_op_orl_T0_T1();
6814 logic_cc = conds;
6815 break;
6816 case 4: /* eor */
6817 gen_op_xorl_T0_T1();
6818 logic_cc = conds;
6819 break;
6820 case 8: /* add */
6821 if (conds)
6822 gen_op_addl_T0_T1_cc();
6823 else
6824 gen_op_addl_T0_T1();
6825 break;
6826 case 10: /* adc */
6827 if (conds)
6828 gen_op_adcl_T0_T1_cc();
6829 else
b26eefb6 6830 gen_adc_T0_T1();
9ee6e8bb
PB
6831 break;
6832 case 11: /* sbc */
6833 if (conds)
6834 gen_op_sbcl_T0_T1_cc();
6835 else
3670669c 6836 gen_sbc_T0_T1();
9ee6e8bb
PB
6837 break;
6838 case 13: /* sub */
6839 if (conds)
6840 gen_op_subl_T0_T1_cc();
6841 else
6842 gen_op_subl_T0_T1();
6843 break;
6844 case 14: /* rsb */
6845 if (conds)
6846 gen_op_rsbl_T0_T1_cc();
6847 else
6848 gen_op_rsbl_T0_T1();
6849 break;
6850 default: /* 5, 6, 7, 9, 12, 15. */
6851 return 1;
6852 }
6853 if (logic_cc) {
6854 gen_op_logic_T0_cc();
6855 if (shifter_out)
b26eefb6 6856 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6857 }
6858 return 0;
6859}
6860
6861/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6862 is not legal. */
6863static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6864{
b0109805 6865 uint32_t insn, imm, shift, offset;
9ee6e8bb 6866 uint32_t rd, rn, rm, rs;
b26eefb6 6867 TCGv tmp;
6ddbc6e4
PB
6868 TCGv tmp2;
6869 TCGv tmp3;
b0109805 6870 TCGv addr;
9ee6e8bb
PB
6871 int op;
6872 int shiftop;
6873 int conds;
6874 int logic_cc;
6875
6876 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6877 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6878 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6879 16-bit instructions to get correct prefetch abort behavior. */
6880 insn = insn_hw1;
6881 if ((insn & (1 << 12)) == 0) {
6882 /* Second half of blx. */
6883 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6884 tmp = load_reg(s, 14);
6885 tcg_gen_addi_i32(tmp, tmp, offset);
6886 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6887
d9ba4830 6888 tmp2 = new_tmp();
b0109805 6889 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6890 store_reg(s, 14, tmp2);
6891 gen_bx(s, tmp);
9ee6e8bb
PB
6892 return 0;
6893 }
6894 if (insn & (1 << 11)) {
6895 /* Second half of bl. */
6896 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6897 tmp = load_reg(s, 14);
6a0d8a1d 6898 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6899
d9ba4830 6900 tmp2 = new_tmp();
b0109805 6901 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6902 store_reg(s, 14, tmp2);
6903 gen_bx(s, tmp);
9ee6e8bb
PB
6904 return 0;
6905 }
6906 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6907 /* Instruction spans a page boundary. Implement it as two
6908 16-bit instructions in case the second half causes an
6909 prefetch abort. */
6910 offset = ((int32_t)insn << 21) >> 9;
b0109805 6911 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6912 gen_movl_reg_T0(s, 14);
6913 return 0;
6914 }
6915 /* Fall through to 32-bit decode. */
6916 }
6917
6918 insn = lduw_code(s->pc);
6919 s->pc += 2;
6920 insn |= (uint32_t)insn_hw1 << 16;
6921
6922 if ((insn & 0xf800e800) != 0xf000e800) {
6923 ARCH(6T2);
6924 }
6925
6926 rn = (insn >> 16) & 0xf;
6927 rs = (insn >> 12) & 0xf;
6928 rd = (insn >> 8) & 0xf;
6929 rm = insn & 0xf;
6930 switch ((insn >> 25) & 0xf) {
6931 case 0: case 1: case 2: case 3:
6932 /* 16-bit instructions. Should never happen. */
6933 abort();
6934 case 4:
6935 if (insn & (1 << 22)) {
6936 /* Other load/store, table branch. */
6937 if (insn & 0x01200000) {
6938 /* Load/store doubleword. */
6939 if (rn == 15) {
b0109805
PB
6940 addr = new_tmp();
6941 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6942 } else {
b0109805 6943 addr = load_reg(s, rn);
9ee6e8bb
PB
6944 }
6945 offset = (insn & 0xff) * 4;
6946 if ((insn & (1 << 23)) == 0)
6947 offset = -offset;
6948 if (insn & (1 << 24)) {
b0109805 6949 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6950 offset = 0;
6951 }
6952 if (insn & (1 << 20)) {
6953 /* ldrd */
b0109805
PB
6954 tmp = gen_ld32(addr, IS_USER(s));
6955 store_reg(s, rs, tmp);
6956 tcg_gen_addi_i32(addr, addr, 4);
6957 tmp = gen_ld32(addr, IS_USER(s));
6958 store_reg(s, rd, tmp);
9ee6e8bb
PB
6959 } else {
6960 /* strd */
b0109805
PB
6961 tmp = load_reg(s, rs);
6962 gen_st32(tmp, addr, IS_USER(s));
6963 tcg_gen_addi_i32(addr, addr, 4);
6964 tmp = load_reg(s, rd);
6965 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6966 }
6967 if (insn & (1 << 21)) {
6968 /* Base writeback. */
6969 if (rn == 15)
6970 goto illegal_op;
b0109805
PB
6971 tcg_gen_addi_i32(addr, addr, offset - 4);
6972 store_reg(s, rn, addr);
6973 } else {
6974 dead_tmp(addr);
9ee6e8bb
PB
6975 }
6976 } else if ((insn & (1 << 23)) == 0) {
6977 /* Load/store exclusive word. */
2c0262af 6978 gen_movl_T1_reg(s, rn);
72f1c62f 6979 addr = cpu_T[1];
2c0262af 6980 if (insn & (1 << 20)) {
8f8e3aa4
PB
6981 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6982 tmp = gen_ld32(addr, IS_USER(s));
6983 store_reg(s, rd, tmp);
9ee6e8bb 6984 } else {
8f8e3aa4
PB
6985 int label = gen_new_label();
6986 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
6987 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0],
6988 tcg_const_i32(0), label);
6989 tmp = load_reg(s, rs);
6990 gen_st32(tmp, cpu_T[1], IS_USER(s));
6991 gen_set_label(label);
6992 gen_movl_reg_T0(s, rd);
9ee6e8bb 6993 }
9ee6e8bb
PB
6994 } else if ((insn & (1 << 6)) == 0) {
6995 /* Table Branch. */
6996 if (rn == 15) {
b0109805
PB
6997 addr = new_tmp();
6998 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 6999 } else {
b0109805 7000 addr = load_reg(s, rn);
9ee6e8bb 7001 }
b26eefb6 7002 tmp = load_reg(s, rm);
b0109805 7003 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7004 if (insn & (1 << 4)) {
7005 /* tbh */
b0109805 7006 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7007 dead_tmp(tmp);
b0109805 7008 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7009 } else { /* tbb */
b26eefb6 7010 dead_tmp(tmp);
b0109805 7011 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7012 }
b0109805
PB
7013 dead_tmp(addr);
7014 tcg_gen_shli_i32(tmp, tmp, 1);
7015 tcg_gen_addi_i32(tmp, tmp, s->pc);
7016 store_reg(s, 15, tmp);
9ee6e8bb
PB
7017 } else {
7018 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7019 /* ??? These are not really atomic. However we know
7020 we never have multiple CPUs running in parallel,
7021 so it is good enough. */
9ee6e8bb 7022 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7023 /* Must use a global reg for the address because we have
7024 a conditional branch in the store instruction. */
9ee6e8bb 7025 gen_movl_T1_reg(s, rn);
8f8e3aa4 7026 addr = cpu_T[1];
9ee6e8bb 7027 if (insn & (1 << 20)) {
8f8e3aa4 7028 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7029 switch (op) {
7030 case 0:
8f8e3aa4 7031 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7032 break;
2c0262af 7033 case 1:
8f8e3aa4 7034 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7035 break;
9ee6e8bb 7036 case 3:
8f8e3aa4
PB
7037 tmp = gen_ld32(addr, IS_USER(s));
7038 tcg_gen_addi_i32(addr, addr, 4);
7039 tmp2 = gen_ld32(addr, IS_USER(s));
7040 store_reg(s, rd, tmp2);
2c0262af
FB
7041 break;
7042 default:
9ee6e8bb
PB
7043 goto illegal_op;
7044 }
8f8e3aa4 7045 store_reg(s, rs, tmp);
9ee6e8bb 7046 } else {
8f8e3aa4
PB
7047 int label = gen_new_label();
7048 /* Must use a global that is not killed by the branch. */
7049 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
7050 tcg_gen_brcond_i32(TCG_COND_NE, cpu_T[0], tcg_const_i32(0),
7051 label);
7052 tmp = load_reg(s, rs);
9ee6e8bb
PB
7053 switch (op) {
7054 case 0:
8f8e3aa4 7055 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7056 break;
7057 case 1:
8f8e3aa4 7058 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7059 break;
2c0262af 7060 case 3:
8f8e3aa4
PB
7061 gen_st32(tmp, addr, IS_USER(s));
7062 tcg_gen_addi_i32(addr, addr, 4);
7063 tmp = load_reg(s, rd);
7064 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7065 break;
9ee6e8bb
PB
7066 default:
7067 goto illegal_op;
2c0262af 7068 }
8f8e3aa4 7069 gen_set_label(label);
9ee6e8bb
PB
7070 gen_movl_reg_T0(s, rm);
7071 }
7072 }
7073 } else {
7074 /* Load/store multiple, RFE, SRS. */
7075 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7076 /* Not available in user mode. */
b0109805 7077 if (IS_USER(s))
9ee6e8bb
PB
7078 goto illegal_op;
7079 if (insn & (1 << 20)) {
7080 /* rfe */
b0109805
PB
7081 addr = load_reg(s, rn);
7082 if ((insn & (1 << 24)) == 0)
7083 tcg_gen_addi_i32(addr, addr, -8);
7084 /* Load PC into tmp and CPSR into tmp2. */
7085 tmp = gen_ld32(addr, 0);
7086 tcg_gen_addi_i32(addr, addr, 4);
7087 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7088 if (insn & (1 << 21)) {
7089 /* Base writeback. */
b0109805
PB
7090 if (insn & (1 << 24)) {
7091 tcg_gen_addi_i32(addr, addr, 4);
7092 } else {
7093 tcg_gen_addi_i32(addr, addr, -4);
7094 }
7095 store_reg(s, rn, addr);
7096 } else {
7097 dead_tmp(addr);
9ee6e8bb 7098 }
b0109805 7099 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7100 } else {
7101 /* srs */
7102 op = (insn & 0x1f);
7103 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7104 addr = load_reg(s, 13);
9ee6e8bb 7105 } else {
b0109805
PB
7106 addr = new_tmp();
7107 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7108 }
7109 if ((insn & (1 << 24)) == 0) {
b0109805 7110 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7111 }
b0109805
PB
7112 tmp = load_reg(s, 14);
7113 gen_st32(tmp, addr, 0);
7114 tcg_gen_addi_i32(addr, addr, 4);
7115 tmp = new_tmp();
7116 gen_helper_cpsr_read(tmp);
7117 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7118 if (insn & (1 << 21)) {
7119 if ((insn & (1 << 24)) == 0) {
b0109805 7120 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7121 } else {
b0109805 7122 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7123 }
7124 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7125 store_reg(s, 13, addr);
9ee6e8bb 7126 } else {
b0109805
PB
7127 gen_helper_set_r13_banked(cpu_env,
7128 tcg_const_i32(op), addr);
9ee6e8bb 7129 }
b0109805
PB
7130 } else {
7131 dead_tmp(addr);
9ee6e8bb
PB
7132 }
7133 }
7134 } else {
7135 int i;
7136 /* Load/store multiple. */
b0109805 7137 addr = load_reg(s, rn);
9ee6e8bb
PB
7138 offset = 0;
7139 for (i = 0; i < 16; i++) {
7140 if (insn & (1 << i))
7141 offset += 4;
7142 }
7143 if (insn & (1 << 24)) {
b0109805 7144 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7145 }
7146
7147 for (i = 0; i < 16; i++) {
7148 if ((insn & (1 << i)) == 0)
7149 continue;
7150 if (insn & (1 << 20)) {
7151 /* Load. */
b0109805 7152 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7153 if (i == 15) {
b0109805 7154 gen_bx(s, tmp);
9ee6e8bb 7155 } else {
b0109805 7156 store_reg(s, i, tmp);
9ee6e8bb
PB
7157 }
7158 } else {
7159 /* Store. */
b0109805
PB
7160 tmp = load_reg(s, i);
7161 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7162 }
b0109805 7163 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7164 }
7165 if (insn & (1 << 21)) {
7166 /* Base register writeback. */
7167 if (insn & (1 << 24)) {
b0109805 7168 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7169 }
7170 /* Fault if writeback register is in register list. */
7171 if (insn & (1 << rn))
7172 goto illegal_op;
b0109805
PB
7173 store_reg(s, rn, addr);
7174 } else {
7175 dead_tmp(addr);
9ee6e8bb
PB
7176 }
7177 }
7178 }
7179 break;
7180 case 5: /* Data processing register constant shift. */
7181 if (rn == 15)
7182 gen_op_movl_T0_im(0);
7183 else
7184 gen_movl_T0_reg(s, rn);
7185 gen_movl_T1_reg(s, rm);
7186 op = (insn >> 21) & 0xf;
7187 shiftop = (insn >> 4) & 3;
7188 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7189 conds = (insn & (1 << 20)) != 0;
7190 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7191 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7192 if (gen_thumb2_data_op(s, op, conds, 0))
7193 goto illegal_op;
7194 if (rd != 15)
7195 gen_movl_reg_T0(s, rd);
7196 break;
7197 case 13: /* Misc data processing. */
7198 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7199 if (op < 4 && (insn & 0xf000) != 0xf000)
7200 goto illegal_op;
7201 switch (op) {
7202 case 0: /* Register controlled shift. */
8984bd2e
PB
7203 tmp = load_reg(s, rn);
7204 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7205 if ((insn & 0x70) != 0)
7206 goto illegal_op;
7207 op = (insn >> 21) & 3;
8984bd2e
PB
7208 logic_cc = (insn & (1 << 20)) != 0;
7209 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7210 if (logic_cc)
7211 gen_logic_CC(tmp);
7212 store_reg(s, rd, tmp);
9ee6e8bb
PB
7213 break;
7214 case 1: /* Sign/zero extend. */
5e3f878a 7215 tmp = load_reg(s, rm);
9ee6e8bb
PB
7216 shift = (insn >> 4) & 3;
7217 /* ??? In many cases it's not neccessary to do a
7218 rotate, a shift is sufficient. */
7219 if (shift != 0)
5e3f878a 7220 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7221 op = (insn >> 20) & 7;
7222 switch (op) {
5e3f878a
PB
7223 case 0: gen_sxth(tmp); break;
7224 case 1: gen_uxth(tmp); break;
7225 case 2: gen_sxtb16(tmp); break;
7226 case 3: gen_uxtb16(tmp); break;
7227 case 4: gen_sxtb(tmp); break;
7228 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7229 default: goto illegal_op;
7230 }
7231 if (rn != 15) {
5e3f878a 7232 tmp2 = load_reg(s, rn);
9ee6e8bb 7233 if ((op >> 1) == 1) {
5e3f878a 7234 gen_add16(tmp, tmp2);
9ee6e8bb 7235 } else {
5e3f878a
PB
7236 tcg_gen_add_i32(tmp, tmp, tmp2);
7237 dead_tmp(tmp2);
9ee6e8bb
PB
7238 }
7239 }
5e3f878a 7240 store_reg(s, rd, tmp);
9ee6e8bb
PB
7241 break;
7242 case 2: /* SIMD add/subtract. */
7243 op = (insn >> 20) & 7;
7244 shift = (insn >> 4) & 7;
7245 if ((op & 3) == 3 || (shift & 3) == 3)
7246 goto illegal_op;
6ddbc6e4
PB
7247 tmp = load_reg(s, rn);
7248 tmp2 = load_reg(s, rm);
7249 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7250 dead_tmp(tmp2);
7251 store_reg(s, rd, tmp);
9ee6e8bb
PB
7252 break;
7253 case 3: /* Other data processing. */
7254 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7255 if (op < 4) {
7256 /* Saturating add/subtract. */
d9ba4830
PB
7257 tmp = load_reg(s, rn);
7258 tmp2 = load_reg(s, rm);
9ee6e8bb 7259 if (op & 2)
d9ba4830 7260 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7261 if (op & 1)
d9ba4830 7262 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7263 else
d9ba4830
PB
7264 gen_helper_add_saturate(tmp, tmp, tmp2);
7265 dead_tmp(tmp2);
9ee6e8bb 7266 } else {
d9ba4830 7267 tmp = load_reg(s, rn);
9ee6e8bb
PB
7268 switch (op) {
7269 case 0x0a: /* rbit */
d9ba4830 7270 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7271 break;
7272 case 0x08: /* rev */
d9ba4830 7273 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7274 break;
7275 case 0x09: /* rev16 */
d9ba4830 7276 gen_rev16(tmp);
9ee6e8bb
PB
7277 break;
7278 case 0x0b: /* revsh */
d9ba4830 7279 gen_revsh(tmp);
9ee6e8bb
PB
7280 break;
7281 case 0x10: /* sel */
d9ba4830 7282 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7283 tmp3 = new_tmp();
7284 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7285 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7286 dead_tmp(tmp3);
d9ba4830 7287 dead_tmp(tmp2);
9ee6e8bb
PB
7288 break;
7289 case 0x18: /* clz */
d9ba4830 7290 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7291 break;
7292 default:
7293 goto illegal_op;
7294 }
7295 }
d9ba4830 7296 store_reg(s, rd, tmp);
9ee6e8bb
PB
7297 break;
7298 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7299 op = (insn >> 4) & 0xf;
d9ba4830
PB
7300 tmp = load_reg(s, rn);
7301 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7302 switch ((insn >> 20) & 7) {
7303 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7304 tcg_gen_mul_i32(tmp, tmp, tmp2);
7305 dead_tmp(tmp2);
9ee6e8bb 7306 if (rs != 15) {
d9ba4830 7307 tmp2 = load_reg(s, rs);
9ee6e8bb 7308 if (op)
d9ba4830 7309 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7310 else
d9ba4830
PB
7311 tcg_gen_add_i32(tmp, tmp, tmp2);
7312 dead_tmp(tmp2);
9ee6e8bb 7313 }
9ee6e8bb
PB
7314 break;
7315 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7316 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7317 dead_tmp(tmp2);
9ee6e8bb 7318 if (rs != 15) {
d9ba4830
PB
7319 tmp2 = load_reg(s, rs);
7320 gen_helper_add_setq(tmp, tmp, tmp2);
7321 dead_tmp(tmp2);
9ee6e8bb 7322 }
9ee6e8bb
PB
7323 break;
7324 case 2: /* Dual multiply add. */
7325 case 4: /* Dual multiply subtract. */
7326 if (op)
d9ba4830
PB
7327 gen_swap_half(tmp2);
7328 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7329 /* This addition cannot overflow. */
7330 if (insn & (1 << 22)) {
d9ba4830 7331 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7332 } else {
d9ba4830 7333 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7334 }
d9ba4830 7335 dead_tmp(tmp2);
9ee6e8bb
PB
7336 if (rs != 15)
7337 {
d9ba4830
PB
7338 tmp2 = load_reg(s, rs);
7339 gen_helper_add_setq(tmp, tmp, tmp2);
7340 dead_tmp(tmp2);
9ee6e8bb 7341 }
9ee6e8bb
PB
7342 break;
7343 case 3: /* 32 * 16 -> 32msb */
7344 if (op)
d9ba4830 7345 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7346 else
d9ba4830 7347 gen_sxth(tmp2);
5e3f878a
PB
7348 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7349 tcg_gen_shri_i64(tmp2, tmp2, 16);
7350 tmp = new_tmp();
7351 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb
PB
7352 if (rs != 15)
7353 {
d9ba4830
PB
7354 tmp2 = load_reg(s, rs);
7355 gen_helper_add_setq(tmp, tmp, tmp2);
7356 dead_tmp(tmp2);
9ee6e8bb 7357 }
9ee6e8bb
PB
7358 break;
7359 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7360 gen_imull(tmp, tmp2);
7361 if (insn & (1 << 5)) {
7362 gen_roundqd(tmp, tmp2);
7363 dead_tmp(tmp2);
7364 } else {
7365 dead_tmp(tmp);
7366 tmp = tmp2;
7367 }
9ee6e8bb 7368 if (rs != 15) {
d9ba4830 7369 tmp2 = load_reg(s, rs);
9ee6e8bb 7370 if (insn & (1 << 21)) {
d9ba4830 7371 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7372 } else {
d9ba4830 7373 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7374 }
d9ba4830 7375 dead_tmp(tmp2);
2c0262af 7376 }
9ee6e8bb
PB
7377 break;
7378 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7379 gen_helper_usad8(tmp, tmp, tmp2);
7380 dead_tmp(tmp2);
9ee6e8bb 7381 if (rs != 15) {
d9ba4830
PB
7382 tmp2 = load_reg(s, rs);
7383 tcg_gen_add_i32(tmp, tmp, tmp2);
7384 dead_tmp(tmp2);
5fd46862 7385 }
9ee6e8bb 7386 break;
2c0262af 7387 }
d9ba4830 7388 store_reg(s, rd, tmp);
2c0262af 7389 break;
9ee6e8bb
PB
7390 case 6: case 7: /* 64-bit multiply, Divide. */
7391 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7392 tmp = load_reg(s, rn);
7393 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7394 if ((op & 0x50) == 0x10) {
7395 /* sdiv, udiv */
7396 if (!arm_feature(env, ARM_FEATURE_DIV))
7397 goto illegal_op;
7398 if (op & 0x20)
5e3f878a 7399 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7400 else
5e3f878a
PB
7401 gen_helper_sdiv(tmp, tmp, tmp2);
7402 dead_tmp(tmp2);
7403 store_reg(s, rd, tmp);
9ee6e8bb
PB
7404 } else if ((op & 0xe) == 0xc) {
7405 /* Dual multiply accumulate long. */
7406 if (op & 1)
5e3f878a
PB
7407 gen_swap_half(tmp2);
7408 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7409 if (op & 0x10) {
5e3f878a 7410 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7411 } else {
5e3f878a 7412 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7413 }
5e3f878a
PB
7414 dead_tmp(tmp2);
7415 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7416 gen_addq(s, tmp, rs, rd);
7417 gen_storeq_reg(s, rs, rd, tmp);
2c0262af 7418 } else {
9ee6e8bb
PB
7419 if (op & 0x20) {
7420 /* Unsigned 64-bit multiply */
5e3f878a 7421 tmp = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7422 } else {
9ee6e8bb
PB
7423 if (op & 8) {
7424 /* smlalxy */
5e3f878a
PB
7425 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7426 dead_tmp(tmp2);
7427 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7428 tcg_gen_ext_i32_i64(tmp2, tmp);
7429 dead_tmp(tmp);
7430 tmp = tmp2;
9ee6e8bb
PB
7431 } else {
7432 /* Signed 64-bit multiply */
5e3f878a 7433 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7434 }
b5ff1b31 7435 }
9ee6e8bb
PB
7436 if (op & 4) {
7437 /* umaal */
5e3f878a
PB
7438 gen_addq_lo(s, tmp, rs);
7439 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
7440 } else if (op & 0x40) {
7441 /* 64-bit accumulate. */
5e3f878a 7442 gen_addq(s, tmp, rs, rd);
9ee6e8bb 7443 }
5e3f878a 7444 gen_storeq_reg(s, rs, rd, tmp);
5fd46862 7445 }
2c0262af 7446 break;
9ee6e8bb
PB
7447 }
7448 break;
7449 case 6: case 7: case 14: case 15:
7450 /* Coprocessor. */
7451 if (((insn >> 24) & 3) == 3) {
7452 /* Translate into the equivalent ARM encoding. */
7453 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7454 if (disas_neon_data_insn(env, s, insn))
7455 goto illegal_op;
7456 } else {
7457 if (insn & (1 << 28))
7458 goto illegal_op;
7459 if (disas_coproc_insn (env, s, insn))
7460 goto illegal_op;
7461 }
7462 break;
7463 case 8: case 9: case 10: case 11:
7464 if (insn & (1 << 15)) {
7465 /* Branches, misc control. */
7466 if (insn & 0x5000) {
7467 /* Unconditional branch. */
7468 /* signextend(hw1[10:0]) -> offset[:12]. */
7469 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7470 /* hw1[10:0] -> offset[11:1]. */
7471 offset |= (insn & 0x7ff) << 1;
7472 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7473 offset[24:22] already have the same value because of the
7474 sign extension above. */
7475 offset ^= ((~insn) & (1 << 13)) << 10;
7476 offset ^= ((~insn) & (1 << 11)) << 11;
7477
9ee6e8bb
PB
7478 if (insn & (1 << 14)) {
7479 /* Branch and link. */
b0109805 7480 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7481 gen_movl_reg_T1(s, 14);
b5ff1b31 7482 }
3b46e624 7483
b0109805 7484 offset += s->pc;
9ee6e8bb
PB
7485 if (insn & (1 << 12)) {
7486 /* b/bl */
b0109805 7487 gen_jmp(s, offset);
9ee6e8bb
PB
7488 } else {
7489 /* blx */
b0109805
PB
7490 offset &= ~(uint32_t)2;
7491 gen_bx_im(s, offset);
2c0262af 7492 }
9ee6e8bb
PB
7493 } else if (((insn >> 23) & 7) == 7) {
7494 /* Misc control */
7495 if (insn & (1 << 13))
7496 goto illegal_op;
7497
7498 if (insn & (1 << 26)) {
7499 /* Secure monitor call (v6Z) */
7500 goto illegal_op; /* not implemented. */
2c0262af 7501 } else {
9ee6e8bb
PB
7502 op = (insn >> 20) & 7;
7503 switch (op) {
7504 case 0: /* msr cpsr. */
7505 if (IS_M(env)) {
8984bd2e
PB
7506 tmp = load_reg(s, rn);
7507 addr = tcg_const_i32(insn & 0xff);
7508 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7509 gen_lookup_tb(s);
7510 break;
7511 }
7512 /* fall through */
7513 case 1: /* msr spsr. */
7514 if (IS_M(env))
7515 goto illegal_op;
7516 gen_movl_T0_reg(s, rn);
7517 if (gen_set_psr_T0(s,
7518 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7519 op == 1))
7520 goto illegal_op;
7521 break;
7522 case 2: /* cps, nop-hint. */
7523 if (((insn >> 8) & 7) == 0) {
7524 gen_nop_hint(s, insn & 0xff);
7525 }
7526 /* Implemented as NOP in user mode. */
7527 if (IS_USER(s))
7528 break;
7529 offset = 0;
7530 imm = 0;
7531 if (insn & (1 << 10)) {
7532 if (insn & (1 << 7))
7533 offset |= CPSR_A;
7534 if (insn & (1 << 6))
7535 offset |= CPSR_I;
7536 if (insn & (1 << 5))
7537 offset |= CPSR_F;
7538 if (insn & (1 << 9))
7539 imm = CPSR_A | CPSR_I | CPSR_F;
7540 }
7541 if (insn & (1 << 8)) {
7542 offset |= 0x1f;
7543 imm |= (insn & 0x1f);
7544 }
7545 if (offset) {
7546 gen_op_movl_T0_im(imm);
7547 gen_set_psr_T0(s, offset, 0);
7548 }
7549 break;
7550 case 3: /* Special control operations. */
7551 op = (insn >> 4) & 0xf;
7552 switch (op) {
7553 case 2: /* clrex */
8f8e3aa4 7554 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7555 break;
7556 case 4: /* dsb */
7557 case 5: /* dmb */
7558 case 6: /* isb */
7559 /* These execute as NOPs. */
7560 ARCH(7);
7561 break;
7562 default:
7563 goto illegal_op;
7564 }
7565 break;
7566 case 4: /* bxj */
7567 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7568 tmp = load_reg(s, rn);
7569 gen_bx(s, tmp);
9ee6e8bb
PB
7570 break;
7571 case 5: /* Exception return. */
7572 /* Unpredictable in user mode. */
7573 goto illegal_op;
7574 case 6: /* mrs cpsr. */
8984bd2e 7575 tmp = new_tmp();
9ee6e8bb 7576 if (IS_M(env)) {
8984bd2e
PB
7577 addr = tcg_const_i32(insn & 0xff);
7578 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7579 } else {
8984bd2e 7580 gen_helper_cpsr_read(tmp);
9ee6e8bb 7581 }
8984bd2e 7582 store_reg(s, rd, tmp);
9ee6e8bb
PB
7583 break;
7584 case 7: /* mrs spsr. */
7585 /* Not accessible in user mode. */
7586 if (IS_USER(s) || IS_M(env))
7587 goto illegal_op;
d9ba4830
PB
7588 tmp = load_cpu_field(spsr);
7589 store_reg(s, rd, tmp);
9ee6e8bb 7590 break;
2c0262af
FB
7591 }
7592 }
9ee6e8bb
PB
7593 } else {
7594 /* Conditional branch. */
7595 op = (insn >> 22) & 0xf;
7596 /* Generate a conditional jump to next instruction. */
7597 s->condlabel = gen_new_label();
d9ba4830 7598 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7599 s->condjmp = 1;
7600
7601 /* offset[11:1] = insn[10:0] */
7602 offset = (insn & 0x7ff) << 1;
7603 /* offset[17:12] = insn[21:16]. */
7604 offset |= (insn & 0x003f0000) >> 4;
7605 /* offset[31:20] = insn[26]. */
7606 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7607 /* offset[18] = insn[13]. */
7608 offset |= (insn & (1 << 13)) << 5;
7609 /* offset[19] = insn[11]. */
7610 offset |= (insn & (1 << 11)) << 8;
7611
7612 /* jump to the offset */
b0109805 7613 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7614 }
7615 } else {
7616 /* Data processing immediate. */
7617 if (insn & (1 << 25)) {
7618 if (insn & (1 << 24)) {
7619 if (insn & (1 << 20))
7620 goto illegal_op;
7621 /* Bitfield/Saturate. */
7622 op = (insn >> 21) & 7;
7623 imm = insn & 0x1f;
7624 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7625 if (rn == 15) {
7626 tmp = new_tmp();
7627 tcg_gen_movi_i32(tmp, 0);
7628 } else {
7629 tmp = load_reg(s, rn);
7630 }
9ee6e8bb
PB
7631 switch (op) {
7632 case 2: /* Signed bitfield extract. */
7633 imm++;
7634 if (shift + imm > 32)
7635 goto illegal_op;
7636 if (imm < 32)
6ddbc6e4 7637 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7638 break;
7639 case 6: /* Unsigned bitfield extract. */
7640 imm++;
7641 if (shift + imm > 32)
7642 goto illegal_op;
7643 if (imm < 32)
6ddbc6e4 7644 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7645 break;
7646 case 3: /* Bitfield insert/clear. */
7647 if (imm < shift)
7648 goto illegal_op;
7649 imm = imm + 1 - shift;
7650 if (imm != 32) {
6ddbc6e4 7651 tmp2 = load_reg(s, rd);
8f8e3aa4 7652 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7653 dead_tmp(tmp2);
9ee6e8bb
PB
7654 }
7655 break;
7656 case 7:
7657 goto illegal_op;
7658 default: /* Saturate. */
9ee6e8bb
PB
7659 if (shift) {
7660 if (op & 1)
6ddbc6e4 7661 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7662 else
6ddbc6e4 7663 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7664 }
6ddbc6e4 7665 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7666 if (op & 4) {
7667 /* Unsigned. */
9ee6e8bb 7668 if ((op & 1) && shift == 0)
6ddbc6e4 7669 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7670 else
6ddbc6e4 7671 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7672 } else {
9ee6e8bb 7673 /* Signed. */
9ee6e8bb 7674 if ((op & 1) && shift == 0)
6ddbc6e4 7675 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7676 else
6ddbc6e4 7677 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7678 }
9ee6e8bb 7679 break;
2c0262af 7680 }
6ddbc6e4 7681 store_reg(s, rd, tmp);
9ee6e8bb
PB
7682 } else {
7683 imm = ((insn & 0x04000000) >> 15)
7684 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7685 if (insn & (1 << 22)) {
7686 /* 16-bit immediate. */
7687 imm |= (insn >> 4) & 0xf000;
7688 if (insn & (1 << 23)) {
7689 /* movt */
5e3f878a 7690 tmp = load_reg(s, rd);
86831435 7691 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7692 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7693 } else {
9ee6e8bb 7694 /* movw */
5e3f878a
PB
7695 tmp = new_tmp();
7696 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7697 }
7698 } else {
9ee6e8bb
PB
7699 /* Add/sub 12-bit immediate. */
7700 if (rn == 15) {
b0109805 7701 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7702 if (insn & (1 << 23))
b0109805 7703 offset -= imm;
9ee6e8bb 7704 else
b0109805 7705 offset += imm;
5e3f878a
PB
7706 tmp = new_tmp();
7707 tcg_gen_movi_i32(tmp, offset);
2c0262af 7708 } else {
5e3f878a 7709 tmp = load_reg(s, rn);
9ee6e8bb 7710 if (insn & (1 << 23))
5e3f878a 7711 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7712 else
5e3f878a 7713 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7714 }
9ee6e8bb 7715 }
5e3f878a 7716 store_reg(s, rd, tmp);
191abaa2 7717 }
9ee6e8bb
PB
7718 } else {
7719 int shifter_out = 0;
7720 /* modified 12-bit immediate. */
7721 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7722 imm = (insn & 0xff);
7723 switch (shift) {
7724 case 0: /* XY */
7725 /* Nothing to do. */
7726 break;
7727 case 1: /* 00XY00XY */
7728 imm |= imm << 16;
7729 break;
7730 case 2: /* XY00XY00 */
7731 imm |= imm << 16;
7732 imm <<= 8;
7733 break;
7734 case 3: /* XYXYXYXY */
7735 imm |= imm << 16;
7736 imm |= imm << 8;
7737 break;
7738 default: /* Rotated constant. */
7739 shift = (shift << 1) | (imm >> 7);
7740 imm |= 0x80;
7741 imm = imm << (32 - shift);
7742 shifter_out = 1;
7743 break;
b5ff1b31 7744 }
9ee6e8bb
PB
7745 gen_op_movl_T1_im(imm);
7746 rn = (insn >> 16) & 0xf;
7747 if (rn == 15)
7748 gen_op_movl_T0_im(0);
7749 else
7750 gen_movl_T0_reg(s, rn);
7751 op = (insn >> 21) & 0xf;
7752 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7753 shifter_out))
7754 goto illegal_op;
7755 rd = (insn >> 8) & 0xf;
7756 if (rd != 15) {
7757 gen_movl_reg_T0(s, rd);
2c0262af 7758 }
2c0262af 7759 }
9ee6e8bb
PB
7760 }
7761 break;
7762 case 12: /* Load/store single data item. */
7763 {
7764 int postinc = 0;
7765 int writeback = 0;
b0109805 7766 int user;
9ee6e8bb
PB
7767 if ((insn & 0x01100000) == 0x01000000) {
7768 if (disas_neon_ls_insn(env, s, insn))
c1713132 7769 goto illegal_op;
9ee6e8bb
PB
7770 break;
7771 }
b0109805 7772 user = IS_USER(s);
9ee6e8bb 7773 if (rn == 15) {
b0109805 7774 addr = new_tmp();
9ee6e8bb
PB
7775 /* PC relative. */
7776 /* s->pc has already been incremented by 4. */
7777 imm = s->pc & 0xfffffffc;
7778 if (insn & (1 << 23))
7779 imm += insn & 0xfff;
7780 else
7781 imm -= insn & 0xfff;
b0109805 7782 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7783 } else {
b0109805 7784 addr = load_reg(s, rn);
9ee6e8bb
PB
7785 if (insn & (1 << 23)) {
7786 /* Positive offset. */
7787 imm = insn & 0xfff;
b0109805 7788 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7789 } else {
7790 op = (insn >> 8) & 7;
7791 imm = insn & 0xff;
7792 switch (op) {
7793 case 0: case 8: /* Shifted Register. */
7794 shift = (insn >> 4) & 0xf;
7795 if (shift > 3)
18c9b560 7796 goto illegal_op;
b26eefb6 7797 tmp = load_reg(s, rm);
9ee6e8bb 7798 if (shift)
b26eefb6 7799 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7800 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7801 dead_tmp(tmp);
9ee6e8bb
PB
7802 break;
7803 case 4: /* Negative offset. */
b0109805 7804 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7805 break;
7806 case 6: /* User privilege. */
b0109805
PB
7807 tcg_gen_addi_i32(addr, addr, imm);
7808 user = 1;
9ee6e8bb
PB
7809 break;
7810 case 1: /* Post-decrement. */
7811 imm = -imm;
7812 /* Fall through. */
7813 case 3: /* Post-increment. */
9ee6e8bb
PB
7814 postinc = 1;
7815 writeback = 1;
7816 break;
7817 case 5: /* Pre-decrement. */
7818 imm = -imm;
7819 /* Fall through. */
7820 case 7: /* Pre-increment. */
b0109805 7821 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7822 writeback = 1;
7823 break;
7824 default:
b7bcbe95 7825 goto illegal_op;
9ee6e8bb
PB
7826 }
7827 }
7828 }
7829 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7830 if (insn & (1 << 20)) {
7831 /* Load. */
7832 if (rs == 15 && op != 2) {
7833 if (op & 2)
b5ff1b31 7834 goto illegal_op;
9ee6e8bb
PB
7835 /* Memory hint. Implemented as NOP. */
7836 } else {
7837 switch (op) {
b0109805
PB
7838 case 0: tmp = gen_ld8u(addr, user); break;
7839 case 4: tmp = gen_ld8s(addr, user); break;
7840 case 1: tmp = gen_ld16u(addr, user); break;
7841 case 5: tmp = gen_ld16s(addr, user); break;
7842 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7843 default: goto illegal_op;
7844 }
7845 if (rs == 15) {
b0109805 7846 gen_bx(s, tmp);
9ee6e8bb 7847 } else {
b0109805 7848 store_reg(s, rs, tmp);
9ee6e8bb
PB
7849 }
7850 }
7851 } else {
7852 /* Store. */
7853 if (rs == 15)
b7bcbe95 7854 goto illegal_op;
b0109805 7855 tmp = load_reg(s, rs);
9ee6e8bb 7856 switch (op) {
b0109805
PB
7857 case 0: gen_st8(tmp, addr, user); break;
7858 case 1: gen_st16(tmp, addr, user); break;
7859 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7860 default: goto illegal_op;
b7bcbe95 7861 }
2c0262af 7862 }
9ee6e8bb 7863 if (postinc)
b0109805
PB
7864 tcg_gen_addi_i32(addr, addr, imm);
7865 if (writeback) {
7866 store_reg(s, rn, addr);
7867 } else {
7868 dead_tmp(addr);
7869 }
9ee6e8bb
PB
7870 }
7871 break;
7872 default:
7873 goto illegal_op;
2c0262af 7874 }
9ee6e8bb
PB
7875 return 0;
7876illegal_op:
7877 return 1;
2c0262af
FB
7878}
7879
9ee6e8bb 7880static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7881{
7882 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7883 int32_t offset;
7884 int i;
b26eefb6 7885 TCGv tmp;
d9ba4830 7886 TCGv tmp2;
b0109805 7887 TCGv addr;
99c475ab 7888
9ee6e8bb
PB
7889 if (s->condexec_mask) {
7890 cond = s->condexec_cond;
7891 s->condlabel = gen_new_label();
d9ba4830 7892 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7893 s->condjmp = 1;
7894 }
7895
b5ff1b31 7896 insn = lduw_code(s->pc);
99c475ab 7897 s->pc += 2;
b5ff1b31 7898
99c475ab
FB
7899 switch (insn >> 12) {
7900 case 0: case 1:
7901 rd = insn & 7;
7902 op = (insn >> 11) & 3;
7903 if (op == 3) {
7904 /* add/subtract */
7905 rn = (insn >> 3) & 7;
7906 gen_movl_T0_reg(s, rn);
7907 if (insn & (1 << 10)) {
7908 /* immediate */
7909 gen_op_movl_T1_im((insn >> 6) & 7);
7910 } else {
7911 /* reg */
7912 rm = (insn >> 6) & 7;
7913 gen_movl_T1_reg(s, rm);
7914 }
9ee6e8bb
PB
7915 if (insn & (1 << 9)) {
7916 if (s->condexec_mask)
7917 gen_op_subl_T0_T1();
7918 else
7919 gen_op_subl_T0_T1_cc();
7920 } else {
7921 if (s->condexec_mask)
7922 gen_op_addl_T0_T1();
7923 else
7924 gen_op_addl_T0_T1_cc();
7925 }
99c475ab
FB
7926 gen_movl_reg_T0(s, rd);
7927 } else {
7928 /* shift immediate */
7929 rm = (insn >> 3) & 7;
7930 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7931 tmp = load_reg(s, rm);
7932 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7933 if (!s->condexec_mask)
7934 gen_logic_CC(tmp);
7935 store_reg(s, rd, tmp);
99c475ab
FB
7936 }
7937 break;
7938 case 2: case 3:
7939 /* arithmetic large immediate */
7940 op = (insn >> 11) & 3;
7941 rd = (insn >> 8) & 0x7;
7942 if (op == 0) {
7943 gen_op_movl_T0_im(insn & 0xff);
7944 } else {
7945 gen_movl_T0_reg(s, rd);
7946 gen_op_movl_T1_im(insn & 0xff);
7947 }
7948 switch (op) {
7949 case 0: /* mov */
9ee6e8bb
PB
7950 if (!s->condexec_mask)
7951 gen_op_logic_T0_cc();
99c475ab
FB
7952 break;
7953 case 1: /* cmp */
7954 gen_op_subl_T0_T1_cc();
7955 break;
7956 case 2: /* add */
9ee6e8bb
PB
7957 if (s->condexec_mask)
7958 gen_op_addl_T0_T1();
7959 else
7960 gen_op_addl_T0_T1_cc();
99c475ab
FB
7961 break;
7962 case 3: /* sub */
9ee6e8bb
PB
7963 if (s->condexec_mask)
7964 gen_op_subl_T0_T1();
7965 else
7966 gen_op_subl_T0_T1_cc();
99c475ab
FB
7967 break;
7968 }
7969 if (op != 1)
7970 gen_movl_reg_T0(s, rd);
7971 break;
7972 case 4:
7973 if (insn & (1 << 11)) {
7974 rd = (insn >> 8) & 7;
5899f386
FB
7975 /* load pc-relative. Bit 1 of PC is ignored. */
7976 val = s->pc + 2 + ((insn & 0xff) * 4);
7977 val &= ~(uint32_t)2;
b0109805
PB
7978 addr = new_tmp();
7979 tcg_gen_movi_i32(addr, val);
7980 tmp = gen_ld32(addr, IS_USER(s));
7981 dead_tmp(addr);
7982 store_reg(s, rd, tmp);
99c475ab
FB
7983 break;
7984 }
7985 if (insn & (1 << 10)) {
7986 /* data processing extended or blx */
7987 rd = (insn & 7) | ((insn >> 4) & 8);
7988 rm = (insn >> 3) & 0xf;
7989 op = (insn >> 8) & 3;
7990 switch (op) {
7991 case 0: /* add */
7992 gen_movl_T0_reg(s, rd);
7993 gen_movl_T1_reg(s, rm);
7994 gen_op_addl_T0_T1();
7995 gen_movl_reg_T0(s, rd);
7996 break;
7997 case 1: /* cmp */
7998 gen_movl_T0_reg(s, rd);
7999 gen_movl_T1_reg(s, rm);
8000 gen_op_subl_T0_T1_cc();
8001 break;
8002 case 2: /* mov/cpy */
8003 gen_movl_T0_reg(s, rm);
8004 gen_movl_reg_T0(s, rd);
8005 break;
8006 case 3:/* branch [and link] exchange thumb register */
b0109805 8007 tmp = load_reg(s, rm);
99c475ab
FB
8008 if (insn & (1 << 7)) {
8009 val = (uint32_t)s->pc | 1;
b0109805
PB
8010 tmp2 = new_tmp();
8011 tcg_gen_movi_i32(tmp2, val);
8012 store_reg(s, 14, tmp2);
99c475ab 8013 }
d9ba4830 8014 gen_bx(s, tmp);
99c475ab
FB
8015 break;
8016 }
8017 break;
8018 }
8019
8020 /* data processing register */
8021 rd = insn & 7;
8022 rm = (insn >> 3) & 7;
8023 op = (insn >> 6) & 0xf;
8024 if (op == 2 || op == 3 || op == 4 || op == 7) {
8025 /* the shift/rotate ops want the operands backwards */
8026 val = rm;
8027 rm = rd;
8028 rd = val;
8029 val = 1;
8030 } else {
8031 val = 0;
8032 }
8033
8034 if (op == 9) /* neg */
8035 gen_op_movl_T0_im(0);
8036 else if (op != 0xf) /* mvn doesn't read its first operand */
8037 gen_movl_T0_reg(s, rd);
8038
8039 gen_movl_T1_reg(s, rm);
5899f386 8040 switch (op) {
99c475ab
FB
8041 case 0x0: /* and */
8042 gen_op_andl_T0_T1();
9ee6e8bb
PB
8043 if (!s->condexec_mask)
8044 gen_op_logic_T0_cc();
99c475ab
FB
8045 break;
8046 case 0x1: /* eor */
8047 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8048 if (!s->condexec_mask)
8049 gen_op_logic_T0_cc();
99c475ab
FB
8050 break;
8051 case 0x2: /* lsl */
9ee6e8bb 8052 if (s->condexec_mask) {
8984bd2e 8053 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8054 } else {
8984bd2e 8055 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8056 gen_op_logic_T1_cc();
8057 }
99c475ab
FB
8058 break;
8059 case 0x3: /* lsr */
9ee6e8bb 8060 if (s->condexec_mask) {
8984bd2e 8061 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8062 } else {
8984bd2e 8063 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8064 gen_op_logic_T1_cc();
8065 }
99c475ab
FB
8066 break;
8067 case 0x4: /* asr */
9ee6e8bb 8068 if (s->condexec_mask) {
8984bd2e 8069 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8070 } else {
8984bd2e 8071 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8072 gen_op_logic_T1_cc();
8073 }
99c475ab
FB
8074 break;
8075 case 0x5: /* adc */
9ee6e8bb 8076 if (s->condexec_mask)
b26eefb6 8077 gen_adc_T0_T1();
9ee6e8bb
PB
8078 else
8079 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8080 break;
8081 case 0x6: /* sbc */
9ee6e8bb 8082 if (s->condexec_mask)
3670669c 8083 gen_sbc_T0_T1();
9ee6e8bb
PB
8084 else
8085 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8086 break;
8087 case 0x7: /* ror */
9ee6e8bb 8088 if (s->condexec_mask) {
8984bd2e 8089 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8090 } else {
8984bd2e 8091 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8092 gen_op_logic_T1_cc();
8093 }
99c475ab
FB
8094 break;
8095 case 0x8: /* tst */
8096 gen_op_andl_T0_T1();
8097 gen_op_logic_T0_cc();
8098 rd = 16;
5899f386 8099 break;
99c475ab 8100 case 0x9: /* neg */
9ee6e8bb 8101 if (s->condexec_mask)
390efc54 8102 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8103 else
8104 gen_op_subl_T0_T1_cc();
99c475ab
FB
8105 break;
8106 case 0xa: /* cmp */
8107 gen_op_subl_T0_T1_cc();
8108 rd = 16;
8109 break;
8110 case 0xb: /* cmn */
8111 gen_op_addl_T0_T1_cc();
8112 rd = 16;
8113 break;
8114 case 0xc: /* orr */
8115 gen_op_orl_T0_T1();
9ee6e8bb
PB
8116 if (!s->condexec_mask)
8117 gen_op_logic_T0_cc();
99c475ab
FB
8118 break;
8119 case 0xd: /* mul */
8120 gen_op_mull_T0_T1();
9ee6e8bb
PB
8121 if (!s->condexec_mask)
8122 gen_op_logic_T0_cc();
99c475ab
FB
8123 break;
8124 case 0xe: /* bic */
8125 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8126 if (!s->condexec_mask)
8127 gen_op_logic_T0_cc();
99c475ab
FB
8128 break;
8129 case 0xf: /* mvn */
8130 gen_op_notl_T1();
9ee6e8bb
PB
8131 if (!s->condexec_mask)
8132 gen_op_logic_T1_cc();
99c475ab 8133 val = 1;
5899f386 8134 rm = rd;
99c475ab
FB
8135 break;
8136 }
8137 if (rd != 16) {
8138 if (val)
5899f386 8139 gen_movl_reg_T1(s, rm);
99c475ab
FB
8140 else
8141 gen_movl_reg_T0(s, rd);
8142 }
8143 break;
8144
8145 case 5:
8146 /* load/store register offset. */
8147 rd = insn & 7;
8148 rn = (insn >> 3) & 7;
8149 rm = (insn >> 6) & 7;
8150 op = (insn >> 9) & 7;
b0109805 8151 addr = load_reg(s, rn);
b26eefb6 8152 tmp = load_reg(s, rm);
b0109805 8153 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8154 dead_tmp(tmp);
99c475ab
FB
8155
8156 if (op < 3) /* store */
b0109805 8157 tmp = load_reg(s, rd);
99c475ab
FB
8158
8159 switch (op) {
8160 case 0: /* str */
b0109805 8161 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8162 break;
8163 case 1: /* strh */
b0109805 8164 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8165 break;
8166 case 2: /* strb */
b0109805 8167 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8168 break;
8169 case 3: /* ldrsb */
b0109805 8170 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8171 break;
8172 case 4: /* ldr */
b0109805 8173 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8174 break;
8175 case 5: /* ldrh */
b0109805 8176 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8177 break;
8178 case 6: /* ldrb */
b0109805 8179 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8180 break;
8181 case 7: /* ldrsh */
b0109805 8182 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8183 break;
8184 }
8185 if (op >= 3) /* load */
b0109805
PB
8186 store_reg(s, rd, tmp);
8187 dead_tmp(addr);
99c475ab
FB
8188 break;
8189
8190 case 6:
8191 /* load/store word immediate offset */
8192 rd = insn & 7;
8193 rn = (insn >> 3) & 7;
b0109805 8194 addr = load_reg(s, rn);
99c475ab 8195 val = (insn >> 4) & 0x7c;
b0109805 8196 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8197
8198 if (insn & (1 << 11)) {
8199 /* load */
b0109805
PB
8200 tmp = gen_ld32(addr, IS_USER(s));
8201 store_reg(s, rd, tmp);
99c475ab
FB
8202 } else {
8203 /* store */
b0109805
PB
8204 tmp = load_reg(s, rd);
8205 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8206 }
b0109805 8207 dead_tmp(addr);
99c475ab
FB
8208 break;
8209
8210 case 7:
8211 /* load/store byte immediate offset */
8212 rd = insn & 7;
8213 rn = (insn >> 3) & 7;
b0109805 8214 addr = load_reg(s, rn);
99c475ab 8215 val = (insn >> 6) & 0x1f;
b0109805 8216 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8217
8218 if (insn & (1 << 11)) {
8219 /* load */
b0109805
PB
8220 tmp = gen_ld8u(addr, IS_USER(s));
8221 store_reg(s, rd, tmp);
99c475ab
FB
8222 } else {
8223 /* store */
b0109805
PB
8224 tmp = load_reg(s, rd);
8225 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8226 }
b0109805 8227 dead_tmp(addr);
99c475ab
FB
8228 break;
8229
8230 case 8:
8231 /* load/store halfword immediate offset */
8232 rd = insn & 7;
8233 rn = (insn >> 3) & 7;
b0109805 8234 addr = load_reg(s, rn);
99c475ab 8235 val = (insn >> 5) & 0x3e;
b0109805 8236 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8237
8238 if (insn & (1 << 11)) {
8239 /* load */
b0109805
PB
8240 tmp = gen_ld16u(addr, IS_USER(s));
8241 store_reg(s, rd, tmp);
99c475ab
FB
8242 } else {
8243 /* store */
b0109805
PB
8244 tmp = load_reg(s, rd);
8245 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8246 }
b0109805 8247 dead_tmp(addr);
99c475ab
FB
8248 break;
8249
8250 case 9:
8251 /* load/store from stack */
8252 rd = (insn >> 8) & 7;
b0109805 8253 addr = load_reg(s, 13);
99c475ab 8254 val = (insn & 0xff) * 4;
b0109805 8255 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8256
8257 if (insn & (1 << 11)) {
8258 /* load */
b0109805
PB
8259 tmp = gen_ld32(addr, IS_USER(s));
8260 store_reg(s, rd, tmp);
99c475ab
FB
8261 } else {
8262 /* store */
b0109805
PB
8263 tmp = load_reg(s, rd);
8264 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8265 }
b0109805 8266 dead_tmp(addr);
99c475ab
FB
8267 break;
8268
8269 case 10:
8270 /* add to high reg */
8271 rd = (insn >> 8) & 7;
5899f386
FB
8272 if (insn & (1 << 11)) {
8273 /* SP */
5e3f878a 8274 tmp = load_reg(s, 13);
5899f386
FB
8275 } else {
8276 /* PC. bit 1 is ignored. */
5e3f878a
PB
8277 tmp = new_tmp();
8278 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8279 }
99c475ab 8280 val = (insn & 0xff) * 4;
5e3f878a
PB
8281 tcg_gen_addi_i32(tmp, tmp, val);
8282 store_reg(s, rd, tmp);
99c475ab
FB
8283 break;
8284
8285 case 11:
8286 /* misc */
8287 op = (insn >> 8) & 0xf;
8288 switch (op) {
8289 case 0:
8290 /* adjust stack pointer */
b26eefb6 8291 tmp = load_reg(s, 13);
99c475ab
FB
8292 val = (insn & 0x7f) * 4;
8293 if (insn & (1 << 7))
6a0d8a1d 8294 val = -(int32_t)val;
b26eefb6
PB
8295 tcg_gen_addi_i32(tmp, tmp, val);
8296 store_reg(s, 13, tmp);
99c475ab
FB
8297 break;
8298
9ee6e8bb
PB
8299 case 2: /* sign/zero extend. */
8300 ARCH(6);
8301 rd = insn & 7;
8302 rm = (insn >> 3) & 7;
b0109805 8303 tmp = load_reg(s, rm);
9ee6e8bb 8304 switch ((insn >> 6) & 3) {
b0109805
PB
8305 case 0: gen_sxth(tmp); break;
8306 case 1: gen_sxtb(tmp); break;
8307 case 2: gen_uxth(tmp); break;
8308 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8309 }
b0109805 8310 store_reg(s, rd, tmp);
9ee6e8bb 8311 break;
99c475ab
FB
8312 case 4: case 5: case 0xc: case 0xd:
8313 /* push/pop */
b0109805 8314 addr = load_reg(s, 13);
5899f386
FB
8315 if (insn & (1 << 8))
8316 offset = 4;
99c475ab 8317 else
5899f386
FB
8318 offset = 0;
8319 for (i = 0; i < 8; i++) {
8320 if (insn & (1 << i))
8321 offset += 4;
8322 }
8323 if ((insn & (1 << 11)) == 0) {
b0109805 8324 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8325 }
99c475ab
FB
8326 for (i = 0; i < 8; i++) {
8327 if (insn & (1 << i)) {
8328 if (insn & (1 << 11)) {
8329 /* pop */
b0109805
PB
8330 tmp = gen_ld32(addr, IS_USER(s));
8331 store_reg(s, i, tmp);
99c475ab
FB
8332 } else {
8333 /* push */
b0109805
PB
8334 tmp = load_reg(s, i);
8335 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8336 }
5899f386 8337 /* advance to the next address. */
b0109805 8338 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8339 }
8340 }
8341 if (insn & (1 << 8)) {
8342 if (insn & (1 << 11)) {
8343 /* pop pc */
b0109805 8344 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8345 /* don't set the pc until the rest of the instruction
8346 has completed */
8347 } else {
8348 /* push lr */
b0109805
PB
8349 tmp = load_reg(s, 14);
8350 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8351 }
b0109805 8352 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8353 }
5899f386 8354 if ((insn & (1 << 11)) == 0) {
b0109805 8355 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8356 }
99c475ab 8357 /* write back the new stack pointer */
b0109805 8358 store_reg(s, 13, addr);
99c475ab
FB
8359 /* set the new PC value */
8360 if ((insn & 0x0900) == 0x0900)
b0109805 8361 gen_bx(s, tmp);
99c475ab
FB
8362 break;
8363
9ee6e8bb
PB
8364 case 1: case 3: case 9: case 11: /* czb */
8365 rm = insn & 7;
d9ba4830
PB
8366 tmp = load_reg(s, rm);
8367 tmp2 = tcg_const_i32(0);
9ee6e8bb
PB
8368 s->condlabel = gen_new_label();
8369 s->condjmp = 1;
8370 if (insn & (1 << 11))
d9ba4830 8371 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
9ee6e8bb 8372 else
d9ba4830
PB
8373 tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
8374 dead_tmp(tmp);
9ee6e8bb
PB
8375 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8376 val = (uint32_t)s->pc + 2;
8377 val += offset;
8378 gen_jmp(s, val);
8379 break;
8380
8381 case 15: /* IT, nop-hint. */
8382 if ((insn & 0xf) == 0) {
8383 gen_nop_hint(s, (insn >> 4) & 0xf);
8384 break;
8385 }
8386 /* If Then. */
8387 s->condexec_cond = (insn >> 4) & 0xe;
8388 s->condexec_mask = insn & 0x1f;
8389 /* No actual code generated for this insn, just setup state. */
8390 break;
8391
06c949e6 8392 case 0xe: /* bkpt */
9ee6e8bb 8393 gen_set_condexec(s);
5e3f878a 8394 gen_set_pc_im(s->pc - 2);
d9ba4830 8395 gen_exception(EXCP_BKPT);
06c949e6
PB
8396 s->is_jmp = DISAS_JUMP;
8397 break;
8398
9ee6e8bb
PB
8399 case 0xa: /* rev */
8400 ARCH(6);
8401 rn = (insn >> 3) & 0x7;
8402 rd = insn & 0x7;
b0109805 8403 tmp = load_reg(s, rn);
9ee6e8bb 8404 switch ((insn >> 6) & 3) {
b0109805
PB
8405 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8406 case 1: gen_rev16(tmp); break;
8407 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8408 default: goto illegal_op;
8409 }
b0109805 8410 store_reg(s, rd, tmp);
9ee6e8bb
PB
8411 break;
8412
8413 case 6: /* cps */
8414 ARCH(6);
8415 if (IS_USER(s))
8416 break;
8417 if (IS_M(env)) {
8984bd2e 8418 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8419 /* PRIMASK */
8984bd2e
PB
8420 if (insn & 1) {
8421 addr = tcg_const_i32(16);
8422 gen_helper_v7m_msr(cpu_env, addr, tmp);
8423 }
9ee6e8bb 8424 /* FAULTMASK */
8984bd2e
PB
8425 if (insn & 2) {
8426 addr = tcg_const_i32(17);
8427 gen_helper_v7m_msr(cpu_env, addr, tmp);
8428 }
9ee6e8bb
PB
8429 gen_lookup_tb(s);
8430 } else {
8431 if (insn & (1 << 4))
8432 shift = CPSR_A | CPSR_I | CPSR_F;
8433 else
8434 shift = 0;
8435
8436 val = ((insn & 7) << 6) & shift;
8437 gen_op_movl_T0_im(val);
8438 gen_set_psr_T0(s, shift, 0);
8439 }
8440 break;
8441
99c475ab
FB
8442 default:
8443 goto undef;
8444 }
8445 break;
8446
8447 case 12:
8448 /* load/store multiple */
8449 rn = (insn >> 8) & 0x7;
b0109805 8450 addr = load_reg(s, rn);
99c475ab
FB
8451 for (i = 0; i < 8; i++) {
8452 if (insn & (1 << i)) {
99c475ab
FB
8453 if (insn & (1 << 11)) {
8454 /* load */
b0109805
PB
8455 tmp = gen_ld32(addr, IS_USER(s));
8456 store_reg(s, i, tmp);
99c475ab
FB
8457 } else {
8458 /* store */
b0109805
PB
8459 tmp = load_reg(s, i);
8460 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8461 }
5899f386 8462 /* advance to the next address */
b0109805 8463 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8464 }
8465 }
5899f386 8466 /* Base register writeback. */
b0109805
PB
8467 if ((insn & (1 << rn)) == 0) {
8468 store_reg(s, rn, addr);
8469 } else {
8470 dead_tmp(addr);
8471 }
99c475ab
FB
8472 break;
8473
8474 case 13:
8475 /* conditional branch or swi */
8476 cond = (insn >> 8) & 0xf;
8477 if (cond == 0xe)
8478 goto undef;
8479
8480 if (cond == 0xf) {
8481 /* swi */
9ee6e8bb 8482 gen_set_condexec(s);
422ebf69 8483 gen_set_pc_im(s->pc);
9ee6e8bb 8484 s->is_jmp = DISAS_SWI;
99c475ab
FB
8485 break;
8486 }
8487 /* generate a conditional jump to next instruction */
e50e6a20 8488 s->condlabel = gen_new_label();
d9ba4830 8489 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8490 s->condjmp = 1;
99c475ab
FB
8491 gen_movl_T1_reg(s, 15);
8492
8493 /* jump to the offset */
5899f386 8494 val = (uint32_t)s->pc + 2;
99c475ab 8495 offset = ((int32_t)insn << 24) >> 24;
5899f386 8496 val += offset << 1;
8aaca4c0 8497 gen_jmp(s, val);
99c475ab
FB
8498 break;
8499
8500 case 14:
358bf29e 8501 if (insn & (1 << 11)) {
9ee6e8bb
PB
8502 if (disas_thumb2_insn(env, s, insn))
8503 goto undef32;
358bf29e
PB
8504 break;
8505 }
9ee6e8bb 8506 /* unconditional branch */
99c475ab
FB
8507 val = (uint32_t)s->pc;
8508 offset = ((int32_t)insn << 21) >> 21;
8509 val += (offset << 1) + 2;
8aaca4c0 8510 gen_jmp(s, val);
99c475ab
FB
8511 break;
8512
8513 case 15:
9ee6e8bb 8514 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8515 goto undef32;
9ee6e8bb 8516 break;
99c475ab
FB
8517 }
8518 return;
9ee6e8bb
PB
8519undef32:
8520 gen_set_condexec(s);
5e3f878a 8521 gen_set_pc_im(s->pc - 4);
d9ba4830 8522 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8523 s->is_jmp = DISAS_JUMP;
8524 return;
8525illegal_op:
99c475ab 8526undef:
9ee6e8bb 8527 gen_set_condexec(s);
5e3f878a 8528 gen_set_pc_im(s->pc - 2);
d9ba4830 8529 gen_exception(EXCP_UDEF);
99c475ab
FB
8530 s->is_jmp = DISAS_JUMP;
8531}
8532
2c0262af
FB
8533/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8534 basic block 'tb'. If search_pc is TRUE, also generate PC
8535 information for each intermediate instruction. */
5fafdf24
TS
8536static inline int gen_intermediate_code_internal(CPUState *env,
8537 TranslationBlock *tb,
2c0262af
FB
8538 int search_pc)
8539{
8540 DisasContext dc1, *dc = &dc1;
8541 uint16_t *gen_opc_end;
8542 int j, lj;
0fa85d43 8543 target_ulong pc_start;
b5ff1b31 8544 uint32_t next_page_start;
3b46e624 8545
2c0262af 8546 /* generate intermediate code */
b26eefb6
PB
8547 num_temps = 0;
8548 memset(temps, 0, sizeof(temps));
8549
0fa85d43 8550 pc_start = tb->pc;
3b46e624 8551
2c0262af
FB
8552 dc->tb = tb;
8553
2c0262af 8554 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8555
8556 dc->is_jmp = DISAS_NEXT;
8557 dc->pc = pc_start;
8aaca4c0 8558 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8559 dc->condjmp = 0;
5899f386 8560 dc->thumb = env->thumb;
9ee6e8bb
PB
8561 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8562 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8563 dc->is_mem = 0;
b5ff1b31 8564#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8565 if (IS_M(env)) {
8566 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8567 } else {
8568 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8569 }
b5ff1b31 8570#endif
4373f3ce
PB
8571 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8572 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8573 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8574 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
ad69471c
PB
8575 cpu_V0 = cpu_F0d;
8576 cpu_V1 = cpu_F1d;
e677137d
PB
8577 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8578 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
b5ff1b31 8579 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8580 lj = -1;
9ee6e8bb
PB
8581 /* Reset the conditional execution bits immediately. This avoids
8582 complications trying to do it at the end of the block. */
8583 if (env->condexec_bits)
8f01245e
PB
8584 {
8585 TCGv tmp = new_tmp();
8586 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8587 store_cpu_field(tmp, condexec_bits);
8f01245e 8588 }
2c0262af 8589 do {
9ee6e8bb
PB
8590#ifndef CONFIG_USER_ONLY
8591 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8592 /* We always get here via a jump, so know we are not in a
8593 conditional execution block. */
d9ba4830 8594 gen_exception(EXCP_EXCEPTION_EXIT);
9ee6e8bb
PB
8595 }
8596#endif
8597
1fddef4b
FB
8598 if (env->nb_breakpoints > 0) {
8599 for(j = 0; j < env->nb_breakpoints; j++) {
8600 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 8601 gen_set_condexec(dc);
5e3f878a 8602 gen_set_pc_im(dc->pc);
d9ba4830 8603 gen_exception(EXCP_DEBUG);
1fddef4b 8604 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8605 /* Advance PC so that clearing the breakpoint will
8606 invalidate this TB. */
8607 dc->pc += 2;
8608 goto done_generating;
1fddef4b
FB
8609 break;
8610 }
8611 }
8612 }
2c0262af
FB
8613 if (search_pc) {
8614 j = gen_opc_ptr - gen_opc_buf;
8615 if (lj < j) {
8616 lj++;
8617 while (lj < j)
8618 gen_opc_instr_start[lj++] = 0;
8619 }
0fa85d43 8620 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
8621 gen_opc_instr_start[lj] = 1;
8622 }
e50e6a20 8623
9ee6e8bb
PB
8624 if (env->thumb) {
8625 disas_thumb_insn(env, dc);
8626 if (dc->condexec_mask) {
8627 dc->condexec_cond = (dc->condexec_cond & 0xe)
8628 | ((dc->condexec_mask >> 4) & 1);
8629 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8630 if (dc->condexec_mask == 0) {
8631 dc->condexec_cond = 0;
8632 }
8633 }
8634 } else {
8635 disas_arm_insn(env, dc);
8636 }
b26eefb6
PB
8637 if (num_temps) {
8638 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8639 num_temps = 0;
8640 }
e50e6a20
FB
8641
8642 if (dc->condjmp && !dc->is_jmp) {
8643 gen_set_label(dc->condlabel);
8644 dc->condjmp = 0;
8645 }
6658ffb8
PB
8646 /* Terminate the TB on memory ops if watchpoints are present. */
8647 /* FIXME: This should be replacd by the deterministic execution
8648 * IRQ raising bits. */
8649 if (dc->is_mem && env->nb_watchpoints)
8650 break;
8651
e50e6a20
FB
8652 /* Translation stops when a conditional branch is enoutered.
8653 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
8654 * Also stop translation when a page boundary is reached. This
8655 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
8656 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8657 !env->singlestep_enabled &&
b5ff1b31 8658 dc->pc < next_page_start);
9ee6e8bb 8659
b5ff1b31 8660 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8661 instruction was a conditional branch or trap, and the PC has
8662 already been written. */
8aaca4c0
FB
8663 if (__builtin_expect(env->singlestep_enabled, 0)) {
8664 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8665 if (dc->condjmp) {
9ee6e8bb
PB
8666 gen_set_condexec(dc);
8667 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8668 gen_exception(EXCP_SWI);
9ee6e8bb 8669 } else {
d9ba4830 8670 gen_exception(EXCP_DEBUG);
9ee6e8bb 8671 }
e50e6a20
FB
8672 gen_set_label(dc->condlabel);
8673 }
8674 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8675 gen_set_pc_im(dc->pc);
e50e6a20 8676 dc->condjmp = 0;
8aaca4c0 8677 }
9ee6e8bb
PB
8678 gen_set_condexec(dc);
8679 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8680 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8681 } else {
8682 /* FIXME: Single stepping a WFI insn will not halt
8683 the CPU. */
d9ba4830 8684 gen_exception(EXCP_DEBUG);
9ee6e8bb 8685 }
8aaca4c0 8686 } else {
9ee6e8bb
PB
8687 /* While branches must always occur at the end of an IT block,
8688 there are a few other things that can cause us to terminate
8689 the TB in the middel of an IT block:
8690 - Exception generating instructions (bkpt, swi, undefined).
8691 - Page boundaries.
8692 - Hardware watchpoints.
8693 Hardware breakpoints have already been handled and skip this code.
8694 */
8695 gen_set_condexec(dc);
8aaca4c0 8696 switch(dc->is_jmp) {
8aaca4c0 8697 case DISAS_NEXT:
6e256c93 8698 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8699 break;
8700 default:
8701 case DISAS_JUMP:
8702 case DISAS_UPDATE:
8703 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8704 tcg_gen_exit_tb(0);
8aaca4c0
FB
8705 break;
8706 case DISAS_TB_JUMP:
8707 /* nothing more to generate */
8708 break;
9ee6e8bb 8709 case DISAS_WFI:
d9ba4830 8710 gen_helper_wfi();
9ee6e8bb
PB
8711 break;
8712 case DISAS_SWI:
d9ba4830 8713 gen_exception(EXCP_SWI);
9ee6e8bb 8714 break;
8aaca4c0 8715 }
e50e6a20
FB
8716 if (dc->condjmp) {
8717 gen_set_label(dc->condlabel);
9ee6e8bb 8718 gen_set_condexec(dc);
6e256c93 8719 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8720 dc->condjmp = 0;
8721 }
2c0262af 8722 }
9ee6e8bb 8723done_generating:
2c0262af
FB
8724 *gen_opc_ptr = INDEX_op_end;
8725
8726#ifdef DEBUG_DISAS
e19e89a5 8727 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8728 fprintf(logfile, "----------------\n");
8729 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8730 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8731 fprintf(logfile, "\n");
8732 }
8733#endif
b5ff1b31
FB
8734 if (search_pc) {
8735 j = gen_opc_ptr - gen_opc_buf;
8736 lj++;
8737 while (lj <= j)
8738 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8739 } else {
2c0262af 8740 tb->size = dc->pc - pc_start;
b5ff1b31 8741 }
2c0262af
FB
8742 return 0;
8743}
8744
8745int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8746{
8747 return gen_intermediate_code_internal(env, tb, 0);
8748}
8749
8750int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8751{
8752 return gen_intermediate_code_internal(env, tb, 1);
8753}
8754
b5ff1b31
FB
8755static const char *cpu_mode_names[16] = {
8756 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8757 "???", "???", "???", "und", "???", "???", "???", "sys"
8758};
9ee6e8bb 8759
5fafdf24 8760void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8761 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8762 int flags)
2c0262af
FB
8763{
8764 int i;
bc380d17 8765 union {
b7bcbe95
FB
8766 uint32_t i;
8767 float s;
8768 } s0, s1;
8769 CPU_DoubleU d;
a94a6abf
PB
8770 /* ??? This assumes float64 and double have the same layout.
8771 Oh well, it's only debug dumps. */
8772 union {
8773 float64 f64;
8774 double d;
8775 } d0;
b5ff1b31 8776 uint32_t psr;
2c0262af
FB
8777
8778 for(i=0;i<16;i++) {
7fe48483 8779 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8780 if ((i % 4) == 3)
7fe48483 8781 cpu_fprintf(f, "\n");
2c0262af 8782 else
7fe48483 8783 cpu_fprintf(f, " ");
2c0262af 8784 }
b5ff1b31 8785 psr = cpsr_read(env);
687fa640
TS
8786 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8787 psr,
b5ff1b31
FB
8788 psr & (1 << 31) ? 'N' : '-',
8789 psr & (1 << 30) ? 'Z' : '-',
8790 psr & (1 << 29) ? 'C' : '-',
8791 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8792 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8793 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8794
5e3f878a 8795#if 0
b7bcbe95 8796 for (i = 0; i < 16; i++) {
8e96005d
FB
8797 d.d = env->vfp.regs[i];
8798 s0.i = d.l.lower;
8799 s1.i = d.l.upper;
a94a6abf
PB
8800 d0.f64 = d.d;
8801 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8802 i * 2, (int)s0.i, s0.s,
a94a6abf 8803 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8804 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8805 d0.d);
b7bcbe95 8806 }
40f137e1 8807 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8808#endif
2c0262af 8809}
a6b025d3 8810
d2856f1a
AJ
8811void gen_pc_load(CPUState *env, TranslationBlock *tb,
8812 unsigned long searched_pc, int pc_pos, void *puc)
8813{
8814 env->regs[15] = gen_opc_pc[pc_pos];
8815}