]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Fix shell quoting.
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6 79static TCGv cpu_env;
ad69471c 80/* We reuse the same 64-bit temporaries for efficiency. */
e677137d 81static TCGv cpu_V0, cpu_V1, cpu_M0;
ad69471c 82
b26eefb6 83/* FIXME: These should be removed. */
8f8e3aa4 84static TCGv cpu_T[2];
4373f3ce 85static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
b26eefb6
PB
86
87/* initialize TCG globals. */
88void arm_translate_init(void)
89{
90 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
91
92 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
93 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
b26eefb6
PB
94}
95
96/* The code generator doesn't like lots of temporaries, so maintain our own
97 cache for reuse within a function. */
98#define MAX_TEMPS 8
99static int num_temps;
100static TCGv temps[MAX_TEMPS];
101
102/* Allocate a temporary variable. */
103static TCGv new_tmp(void)
104{
105 TCGv tmp;
106 if (num_temps == MAX_TEMPS)
107 abort();
108
109 if (GET_TCGV(temps[num_temps]))
110 return temps[num_temps++];
111
112 tmp = tcg_temp_new(TCG_TYPE_I32);
113 temps[num_temps++] = tmp;
114 return tmp;
115}
116
117/* Release a temporary variable. */
118static void dead_tmp(TCGv tmp)
119{
120 int i;
121 num_temps--;
122 i = num_temps;
123 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
124 return;
125
126 /* Shuffle this temp to the last slot. */
127 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
128 i--;
129 while (i < num_temps) {
130 temps[i] = temps[i + 1];
131 i++;
132 }
133 temps[i] = tmp;
134}
135
d9ba4830
PB
136static inline TCGv load_cpu_offset(int offset)
137{
138 TCGv tmp = new_tmp();
139 tcg_gen_ld_i32(tmp, cpu_env, offset);
140 return tmp;
141}
142
143#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
144
145static inline void store_cpu_offset(TCGv var, int offset)
146{
147 tcg_gen_st_i32(var, cpu_env, offset);
148 dead_tmp(var);
149}
150
151#define store_cpu_field(var, name) \
152 store_cpu_offset(var, offsetof(CPUState, name))
153
b26eefb6
PB
154/* Set a variable to the value of a CPU register. */
155static void load_reg_var(DisasContext *s, TCGv var, int reg)
156{
157 if (reg == 15) {
158 uint32_t addr;
159 /* normaly, since we updated PC, we need only to add one insn */
160 if (s->thumb)
161 addr = (long)s->pc + 2;
162 else
163 addr = (long)s->pc + 4;
164 tcg_gen_movi_i32(var, addr);
165 } else {
166 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
167 }
168}
169
170/* Create a new temporary and set it to the value of a CPU register. */
171static inline TCGv load_reg(DisasContext *s, int reg)
172{
173 TCGv tmp = new_tmp();
174 load_reg_var(s, tmp, reg);
175 return tmp;
176}
177
178/* Set a CPU register. The source must be a temporary and will be
179 marked as dead. */
180static void store_reg(DisasContext *s, int reg, TCGv var)
181{
182 if (reg == 15) {
183 tcg_gen_andi_i32(var, var, ~1);
184 s->is_jmp = DISAS_JUMP;
185 }
186 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
187 dead_tmp(var);
188}
189
190
191/* Basic operations. */
192#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
b26eefb6 193#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
b26eefb6
PB
194#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
b26eefb6
PB
196
197#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
198#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
199#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
201
8984bd2e
PB
202#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
203#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
204#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
207#define gen_op_rscl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[1], cpu_T[0])
208
b26eefb6
PB
209#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
210#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
211#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
212#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
213#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
214#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
215#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
216
217#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
218#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
219#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
220#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
221#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
222
223/* Value extensions. */
86831435
PB
224#define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
225#define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
b26eefb6
PB
226#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
227#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
228
1497c961
PB
229#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
230#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
231
232#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 233
d9ba4830
PB
234#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
235/* Set NZCV flags from the high 4 bits of var. */
236#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
237
238static void gen_exception(int excp)
239{
240 TCGv tmp = new_tmp();
241 tcg_gen_movi_i32(tmp, excp);
242 gen_helper_exception(tmp);
243 dead_tmp(tmp);
244}
245
3670669c
PB
246static void gen_smul_dual(TCGv a, TCGv b)
247{
248 TCGv tmp1 = new_tmp();
249 TCGv tmp2 = new_tmp();
3670669c
PB
250 tcg_gen_ext8s_i32(tmp1, a);
251 tcg_gen_ext8s_i32(tmp2, b);
252 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
253 dead_tmp(tmp2);
254 tcg_gen_sari_i32(a, a, 16);
255 tcg_gen_sari_i32(b, b, 16);
256 tcg_gen_mul_i32(b, b, a);
257 tcg_gen_mov_i32(a, tmp1);
258 dead_tmp(tmp1);
259}
260
261/* Byteswap each halfword. */
262static void gen_rev16(TCGv var)
263{
264 TCGv tmp = new_tmp();
265 tcg_gen_shri_i32(tmp, var, 8);
266 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
267 tcg_gen_shli_i32(var, var, 8);
268 tcg_gen_andi_i32(var, var, 0xff00ff00);
269 tcg_gen_or_i32(var, var, tmp);
270 dead_tmp(tmp);
271}
272
273/* Byteswap low halfword and sign extend. */
274static void gen_revsh(TCGv var)
275{
276 TCGv tmp = new_tmp();
277 tcg_gen_shri_i32(tmp, var, 8);
278 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
279 tcg_gen_shli_i32(var, var, 8);
280 tcg_gen_ext8s_i32(var, var);
281 tcg_gen_or_i32(var, var, tmp);
282 dead_tmp(tmp);
283}
284
285/* Unsigned bitfield extract. */
286static void gen_ubfx(TCGv var, int shift, uint32_t mask)
287{
288 if (shift)
289 tcg_gen_shri_i32(var, var, shift);
290 tcg_gen_andi_i32(var, var, mask);
291}
292
293/* Signed bitfield extract. */
294static void gen_sbfx(TCGv var, int shift, int width)
295{
296 uint32_t signbit;
297
298 if (shift)
299 tcg_gen_sari_i32(var, var, shift);
300 if (shift + width < 32) {
301 signbit = 1u << (width - 1);
302 tcg_gen_andi_i32(var, var, (1u << width) - 1);
303 tcg_gen_xori_i32(var, var, signbit);
304 tcg_gen_subi_i32(var, var, signbit);
305 }
306}
307
308/* Bitfield insertion. Insert val into base. Clobbers base and val. */
309static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
310{
3670669c 311 tcg_gen_andi_i32(val, val, mask);
8f8e3aa4
PB
312 tcg_gen_shli_i32(val, val, shift);
313 tcg_gen_andi_i32(base, base, ~(mask << shift));
3670669c
PB
314 tcg_gen_or_i32(dest, base, val);
315}
316
d9ba4830
PB
317/* Round the top 32 bits of a 64-bit value. */
318static void gen_roundqd(TCGv a, TCGv b)
3670669c 319{
d9ba4830
PB
320 tcg_gen_shri_i32(a, a, 31);
321 tcg_gen_add_i32(a, a, b);
3670669c
PB
322}
323
8f01245e
PB
324/* FIXME: Most targets have native widening multiplication.
325 It would be good to use that instead of a full wide multiply. */
5e3f878a
PB
326/* 32x32->64 multiply. Marks inputs as dead. */
327static TCGv gen_mulu_i64_i32(TCGv a, TCGv b)
328{
329 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
330 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
331
332 tcg_gen_extu_i32_i64(tmp1, a);
333 dead_tmp(a);
334 tcg_gen_extu_i32_i64(tmp2, b);
335 dead_tmp(b);
336 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
337 return tmp1;
338}
339
340static TCGv gen_muls_i64_i32(TCGv a, TCGv b)
341{
342 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
343 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
344
345 tcg_gen_ext_i32_i64(tmp1, a);
346 dead_tmp(a);
347 tcg_gen_ext_i32_i64(tmp2, b);
348 dead_tmp(b);
349 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
350 return tmp1;
351}
352
8f01245e
PB
353/* Unsigned 32x32->64 multiply. */
354static void gen_op_mull_T0_T1(void)
355{
356 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
357 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
358
359 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
360 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
361 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
362 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
363 tcg_gen_shri_i64(tmp1, tmp1, 32);
364 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
365}
366
367/* Signed 32x32->64 multiply. */
d9ba4830 368static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
369{
370 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
371 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
372
d9ba4830
PB
373 tcg_gen_ext_i32_i64(tmp1, a);
374 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 375 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 376 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 377 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
378 tcg_gen_trunc_i64_i32(b, tmp1);
379}
380#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
381
8f01245e
PB
382/* Swap low and high halfwords. */
383static void gen_swap_half(TCGv var)
384{
385 TCGv tmp = new_tmp();
386 tcg_gen_shri_i32(tmp, var, 16);
387 tcg_gen_shli_i32(var, var, 16);
388 tcg_gen_or_i32(var, var, tmp);
3670669c 389 dead_tmp(tmp);
8f01245e
PB
390}
391
b26eefb6
PB
392/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
393 tmp = (t0 ^ t1) & 0x8000;
394 t0 &= ~0x8000;
395 t1 &= ~0x8000;
396 t0 = (t0 + t1) ^ tmp;
397 */
398
399static void gen_add16(TCGv t0, TCGv t1)
400{
401 TCGv tmp = new_tmp();
402 tcg_gen_xor_i32(tmp, t0, t1);
403 tcg_gen_andi_i32(tmp, tmp, 0x8000);
404 tcg_gen_andi_i32(t0, t0, ~0x8000);
405 tcg_gen_andi_i32(t1, t1, ~0x8000);
406 tcg_gen_add_i32(t0, t0, t1);
407 tcg_gen_xor_i32(t0, t0, tmp);
408 dead_tmp(tmp);
409 dead_tmp(t1);
410}
411
9a119ff6
PB
412#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
413
b26eefb6
PB
414/* Set CF to the top bit of var. */
415static void gen_set_CF_bit31(TCGv var)
416{
417 TCGv tmp = new_tmp();
418 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 419 gen_set_CF(var);
b26eefb6
PB
420 dead_tmp(tmp);
421}
422
423/* Set N and Z flags from var. */
424static inline void gen_logic_CC(TCGv var)
425{
6fbe23d5
PB
426 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
427 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
b26eefb6
PB
428}
429
430/* T0 += T1 + CF. */
431static void gen_adc_T0_T1(void)
432{
d9ba4830 433 TCGv tmp;
b26eefb6 434 gen_op_addl_T0_T1();
d9ba4830 435 tmp = load_cpu_field(CF);
b26eefb6
PB
436 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
437 dead_tmp(tmp);
438}
439
3670669c
PB
440/* dest = T0 - T1 + CF - 1. */
441static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
442{
d9ba4830 443 TCGv tmp;
3670669c 444 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 445 tmp = load_cpu_field(CF);
3670669c
PB
446 tcg_gen_add_i32(dest, dest, tmp);
447 tcg_gen_subi_i32(dest, dest, 1);
448 dead_tmp(tmp);
449}
450
451#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
452#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
453
b26eefb6
PB
454/* T0 &= ~T1. Clobbers T1. */
455/* FIXME: Implement bic natively. */
8f8e3aa4
PB
456static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
457{
458 TCGv tmp = new_tmp();
459 tcg_gen_not_i32(tmp, t1);
460 tcg_gen_and_i32(dest, t0, tmp);
461 dead_tmp(tmp);
462}
b26eefb6
PB
463static inline void gen_op_bicl_T0_T1(void)
464{
465 gen_op_notl_T1();
466 gen_op_andl_T0_T1();
467}
468
ad69471c
PB
469/* FIXME: Implement this natively. */
470#define tcg_gen_abs_i32(t0, t1) gen_helper_abs(t0, t1)
471
b26eefb6
PB
472/* FIXME: Implement this natively. */
473static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
474{
475 TCGv tmp;
476
477 if (i == 0)
478 return;
479
480 tmp = new_tmp();
481 tcg_gen_shri_i32(tmp, t1, i);
482 tcg_gen_shli_i32(t1, t1, 32 - i);
483 tcg_gen_or_i32(t0, t1, tmp);
484 dead_tmp(tmp);
485}
486
9a119ff6 487static void shifter_out_im(TCGv var, int shift)
b26eefb6 488{
9a119ff6
PB
489 TCGv tmp = new_tmp();
490 if (shift == 0) {
491 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 492 } else {
9a119ff6
PB
493 tcg_gen_shri_i32(tmp, var, shift);
494 if (shift != 31);
495 tcg_gen_andi_i32(tmp, tmp, 1);
496 }
497 gen_set_CF(tmp);
498 dead_tmp(tmp);
499}
b26eefb6 500
9a119ff6
PB
501/* Shift by immediate. Includes special handling for shift == 0. */
502static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
503{
504 switch (shiftop) {
505 case 0: /* LSL */
506 if (shift != 0) {
507 if (flags)
508 shifter_out_im(var, 32 - shift);
509 tcg_gen_shli_i32(var, var, shift);
510 }
511 break;
512 case 1: /* LSR */
513 if (shift == 0) {
514 if (flags) {
515 tcg_gen_shri_i32(var, var, 31);
516 gen_set_CF(var);
517 }
518 tcg_gen_movi_i32(var, 0);
519 } else {
520 if (flags)
521 shifter_out_im(var, shift - 1);
522 tcg_gen_shri_i32(var, var, shift);
523 }
524 break;
525 case 2: /* ASR */
526 if (shift == 0)
527 shift = 32;
528 if (flags)
529 shifter_out_im(var, shift - 1);
530 if (shift == 32)
531 shift = 31;
532 tcg_gen_sari_i32(var, var, shift);
533 break;
534 case 3: /* ROR/RRX */
535 if (shift != 0) {
536 if (flags)
537 shifter_out_im(var, shift - 1);
538 tcg_gen_rori_i32(var, var, shift); break;
539 } else {
d9ba4830 540 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
541 if (flags)
542 shifter_out_im(var, 0);
543 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
544 tcg_gen_shli_i32(tmp, tmp, 31);
545 tcg_gen_or_i32(var, var, tmp);
546 dead_tmp(tmp);
b26eefb6
PB
547 }
548 }
549};
550
8984bd2e
PB
551static inline void gen_arm_shift_reg(TCGv var, int shiftop,
552 TCGv shift, int flags)
553{
554 if (flags) {
555 switch (shiftop) {
556 case 0: gen_helper_shl_cc(var, var, shift); break;
557 case 1: gen_helper_shr_cc(var, var, shift); break;
558 case 2: gen_helper_sar_cc(var, var, shift); break;
559 case 3: gen_helper_ror_cc(var, var, shift); break;
560 }
561 } else {
562 switch (shiftop) {
563 case 0: gen_helper_shl(var, var, shift); break;
564 case 1: gen_helper_shr(var, var, shift); break;
565 case 2: gen_helper_sar(var, var, shift); break;
566 case 3: gen_helper_ror(var, var, shift); break;
567 }
568 }
569 dead_tmp(shift);
570}
571
6ddbc6e4
PB
572#define PAS_OP(pfx) \
573 switch (op2) { \
574 case 0: gen_pas_helper(glue(pfx,add16)); break; \
575 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
576 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
577 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
578 case 4: gen_pas_helper(glue(pfx,add8)); break; \
579 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
580 }
d9ba4830 581static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
582{
583 TCGv tmp;
584
585 switch (op1) {
586#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
587 case 1:
588 tmp = tcg_temp_new(TCG_TYPE_PTR);
589 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
590 PAS_OP(s)
591 break;
592 case 5:
593 tmp = tcg_temp_new(TCG_TYPE_PTR);
594 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
595 PAS_OP(u)
596 break;
597#undef gen_pas_helper
598#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
599 case 2:
600 PAS_OP(q);
601 break;
602 case 3:
603 PAS_OP(sh);
604 break;
605 case 6:
606 PAS_OP(uq);
607 break;
608 case 7:
609 PAS_OP(uh);
610 break;
611#undef gen_pas_helper
612 }
613}
9ee6e8bb
PB
614#undef PAS_OP
615
6ddbc6e4
PB
616/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
617#define PAS_OP(pfx) \
618 switch (op2) { \
619 case 0: gen_pas_helper(glue(pfx,add8)); break; \
620 case 1: gen_pas_helper(glue(pfx,add16)); break; \
621 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
622 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
623 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
624 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
625 }
d9ba4830 626static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
627{
628 TCGv tmp;
629
630 switch (op1) {
631#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
632 case 0:
633 tmp = tcg_temp_new(TCG_TYPE_PTR);
634 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
635 PAS_OP(s)
636 break;
637 case 4:
638 tmp = tcg_temp_new(TCG_TYPE_PTR);
639 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
640 PAS_OP(u)
641 break;
642#undef gen_pas_helper
643#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
644 case 1:
645 PAS_OP(q);
646 break;
647 case 2:
648 PAS_OP(sh);
649 break;
650 case 5:
651 PAS_OP(uq);
652 break;
653 case 6:
654 PAS_OP(uh);
655 break;
656#undef gen_pas_helper
657 }
658}
9ee6e8bb
PB
659#undef PAS_OP
660
d9ba4830
PB
661static void gen_test_cc(int cc, int label)
662{
663 TCGv tmp;
664 TCGv tmp2;
d9ba4830
PB
665 int inv;
666
d9ba4830
PB
667 switch (cc) {
668 case 0: /* eq: Z */
6fbe23d5 669 tmp = load_cpu_field(ZF);
cb63669a 670 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
671 break;
672 case 1: /* ne: !Z */
6fbe23d5 673 tmp = load_cpu_field(ZF);
cb63669a 674 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
675 break;
676 case 2: /* cs: C */
677 tmp = load_cpu_field(CF);
cb63669a 678 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
679 break;
680 case 3: /* cc: !C */
681 tmp = load_cpu_field(CF);
cb63669a 682 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
683 break;
684 case 4: /* mi: N */
6fbe23d5 685 tmp = load_cpu_field(NF);
cb63669a 686 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
687 break;
688 case 5: /* pl: !N */
6fbe23d5 689 tmp = load_cpu_field(NF);
cb63669a 690 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
691 break;
692 case 6: /* vs: V */
693 tmp = load_cpu_field(VF);
cb63669a 694 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
695 break;
696 case 7: /* vc: !V */
697 tmp = load_cpu_field(VF);
cb63669a 698 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
699 break;
700 case 8: /* hi: C && !Z */
701 inv = gen_new_label();
702 tmp = load_cpu_field(CF);
cb63669a 703 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830 704 dead_tmp(tmp);
6fbe23d5 705 tmp = load_cpu_field(ZF);
cb63669a 706 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
d9ba4830
PB
707 gen_set_label(inv);
708 break;
709 case 9: /* ls: !C || Z */
710 tmp = load_cpu_field(CF);
cb63669a 711 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830 712 dead_tmp(tmp);
6fbe23d5 713 tmp = load_cpu_field(ZF);
cb63669a 714 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
715 break;
716 case 10: /* ge: N == V -> N ^ V == 0 */
717 tmp = load_cpu_field(VF);
6fbe23d5 718 tmp2 = load_cpu_field(NF);
d9ba4830
PB
719 tcg_gen_xor_i32(tmp, tmp, tmp2);
720 dead_tmp(tmp2);
cb63669a 721 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
722 break;
723 case 11: /* lt: N != V -> N ^ V != 0 */
724 tmp = load_cpu_field(VF);
6fbe23d5 725 tmp2 = load_cpu_field(NF);
d9ba4830
PB
726 tcg_gen_xor_i32(tmp, tmp, tmp2);
727 dead_tmp(tmp2);
cb63669a 728 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
729 break;
730 case 12: /* gt: !Z && N == V */
731 inv = gen_new_label();
6fbe23d5 732 tmp = load_cpu_field(ZF);
cb63669a 733 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
d9ba4830
PB
734 dead_tmp(tmp);
735 tmp = load_cpu_field(VF);
6fbe23d5 736 tmp2 = load_cpu_field(NF);
d9ba4830
PB
737 tcg_gen_xor_i32(tmp, tmp, tmp2);
738 dead_tmp(tmp2);
cb63669a 739 tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
d9ba4830
PB
740 gen_set_label(inv);
741 break;
742 case 13: /* le: Z || N != V */
6fbe23d5 743 tmp = load_cpu_field(ZF);
cb63669a 744 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
d9ba4830
PB
745 dead_tmp(tmp);
746 tmp = load_cpu_field(VF);
6fbe23d5 747 tmp2 = load_cpu_field(NF);
d9ba4830
PB
748 tcg_gen_xor_i32(tmp, tmp, tmp2);
749 dead_tmp(tmp2);
cb63669a 750 tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
d9ba4830
PB
751 break;
752 default:
753 fprintf(stderr, "Bad condition code 0x%x\n", cc);
754 abort();
755 }
756 dead_tmp(tmp);
757}
2c0262af
FB
758
759const uint8_t table_logic_cc[16] = {
760 1, /* and */
761 1, /* xor */
762 0, /* sub */
763 0, /* rsb */
764 0, /* add */
765 0, /* adc */
766 0, /* sbc */
767 0, /* rsc */
768 1, /* andl */
769 1, /* xorl */
770 0, /* cmp */
771 0, /* cmn */
772 1, /* orr */
773 1, /* mov */
774 1, /* bic */
775 1, /* mvn */
776};
3b46e624 777
d9ba4830
PB
778/* Set PC and Thumb state from an immediate address. */
779static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 780{
b26eefb6 781 TCGv tmp;
99c475ab 782
b26eefb6
PB
783 s->is_jmp = DISAS_UPDATE;
784 tmp = new_tmp();
d9ba4830
PB
785 if (s->thumb != (addr & 1)) {
786 tcg_gen_movi_i32(tmp, addr & 1);
787 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
788 }
789 tcg_gen_movi_i32(tmp, addr & ~1);
790 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 791 dead_tmp(tmp);
d9ba4830
PB
792}
793
794/* Set PC and Thumb state from var. var is marked as dead. */
795static inline void gen_bx(DisasContext *s, TCGv var)
796{
797 TCGv tmp;
798
799 s->is_jmp = DISAS_UPDATE;
800 tmp = new_tmp();
801 tcg_gen_andi_i32(tmp, var, 1);
802 store_cpu_field(tmp, thumb);
803 tcg_gen_andi_i32(var, var, ~1);
804 store_cpu_field(var, regs[15]);
805}
806
807/* TODO: This should be removed. Use gen_bx instead. */
808static inline void gen_bx_T0(DisasContext *s)
809{
810 TCGv tmp = new_tmp();
811 tcg_gen_mov_i32(tmp, cpu_T[0]);
812 gen_bx(s, tmp);
b26eefb6 813}
b5ff1b31
FB
814
815#if defined(CONFIG_USER_ONLY)
816#define gen_ldst(name, s) gen_op_##name##_raw()
817#else
818#define gen_ldst(name, s) do { \
6658ffb8 819 s->is_mem = 1; \
b5ff1b31
FB
820 if (IS_USER(s)) \
821 gen_op_##name##_user(); \
822 else \
823 gen_op_##name##_kernel(); \
824 } while (0)
825#endif
b0109805
PB
826static inline TCGv gen_ld8s(TCGv addr, int index)
827{
828 TCGv tmp = new_tmp();
829 tcg_gen_qemu_ld8s(tmp, addr, index);
830 return tmp;
831}
832static inline TCGv gen_ld8u(TCGv addr, int index)
833{
834 TCGv tmp = new_tmp();
835 tcg_gen_qemu_ld8u(tmp, addr, index);
836 return tmp;
837}
838static inline TCGv gen_ld16s(TCGv addr, int index)
839{
840 TCGv tmp = new_tmp();
841 tcg_gen_qemu_ld16s(tmp, addr, index);
842 return tmp;
843}
844static inline TCGv gen_ld16u(TCGv addr, int index)
845{
846 TCGv tmp = new_tmp();
847 tcg_gen_qemu_ld16u(tmp, addr, index);
848 return tmp;
849}
850static inline TCGv gen_ld32(TCGv addr, int index)
851{
852 TCGv tmp = new_tmp();
853 tcg_gen_qemu_ld32u(tmp, addr, index);
854 return tmp;
855}
856static inline void gen_st8(TCGv val, TCGv addr, int index)
857{
858 tcg_gen_qemu_st8(val, addr, index);
859 dead_tmp(val);
860}
861static inline void gen_st16(TCGv val, TCGv addr, int index)
862{
863 tcg_gen_qemu_st16(val, addr, index);
864 dead_tmp(val);
865}
866static inline void gen_st32(TCGv val, TCGv addr, int index)
867{
868 tcg_gen_qemu_st32(val, addr, index);
869 dead_tmp(val);
870}
b5ff1b31 871
2c0262af
FB
872static inline void gen_movl_T0_reg(DisasContext *s, int reg)
873{
b26eefb6 874 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
875}
876
877static inline void gen_movl_T1_reg(DisasContext *s, int reg)
878{
b26eefb6 879 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
880}
881
882static inline void gen_movl_T2_reg(DisasContext *s, int reg)
883{
b26eefb6
PB
884 load_reg_var(s, cpu_T[2], reg);
885}
886
5e3f878a
PB
887static inline void gen_set_pc_im(uint32_t val)
888{
889 TCGv tmp = new_tmp();
890 tcg_gen_movi_i32(tmp, val);
891 store_cpu_field(tmp, regs[15]);
892}
893
2c0262af
FB
894static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
895{
b26eefb6
PB
896 TCGv tmp;
897 if (reg == 15) {
898 tmp = new_tmp();
899 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
900 } else {
901 tmp = cpu_T[t];
902 }
903 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 904 if (reg == 15) {
b26eefb6 905 dead_tmp(tmp);
2c0262af
FB
906 s->is_jmp = DISAS_JUMP;
907 }
908}
909
910static inline void gen_movl_reg_T0(DisasContext *s, int reg)
911{
912 gen_movl_reg_TN(s, reg, 0);
913}
914
915static inline void gen_movl_reg_T1(DisasContext *s, int reg)
916{
917 gen_movl_reg_TN(s, reg, 1);
918}
919
b5ff1b31
FB
920/* Force a TB lookup after an instruction that changes the CPU state. */
921static inline void gen_lookup_tb(DisasContext *s)
922{
923 gen_op_movl_T0_im(s->pc);
924 gen_movl_reg_T0(s, 15);
925 s->is_jmp = DISAS_UPDATE;
926}
927
b0109805
PB
928static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
929 TCGv var)
2c0262af 930{
1e8d4eec 931 int val, rm, shift, shiftop;
b26eefb6 932 TCGv offset;
2c0262af
FB
933
934 if (!(insn & (1 << 25))) {
935 /* immediate */
936 val = insn & 0xfff;
937 if (!(insn & (1 << 23)))
938 val = -val;
537730b9 939 if (val != 0)
b0109805 940 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
941 } else {
942 /* shift/register */
943 rm = (insn) & 0xf;
944 shift = (insn >> 7) & 0x1f;
1e8d4eec 945 shiftop = (insn >> 5) & 3;
b26eefb6 946 offset = load_reg(s, rm);
9a119ff6 947 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 948 if (!(insn & (1 << 23)))
b0109805 949 tcg_gen_sub_i32(var, var, offset);
2c0262af 950 else
b0109805 951 tcg_gen_add_i32(var, var, offset);
b26eefb6 952 dead_tmp(offset);
2c0262af
FB
953 }
954}
955
191f9a93 956static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 957 int extra, TCGv var)
2c0262af
FB
958{
959 int val, rm;
b26eefb6 960 TCGv offset;
3b46e624 961
2c0262af
FB
962 if (insn & (1 << 22)) {
963 /* immediate */
964 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
965 if (!(insn & (1 << 23)))
966 val = -val;
18acad92 967 val += extra;
537730b9 968 if (val != 0)
b0109805 969 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
970 } else {
971 /* register */
191f9a93 972 if (extra)
b0109805 973 tcg_gen_addi_i32(var, var, extra);
2c0262af 974 rm = (insn) & 0xf;
b26eefb6 975 offset = load_reg(s, rm);
2c0262af 976 if (!(insn & (1 << 23)))
b0109805 977 tcg_gen_sub_i32(var, var, offset);
2c0262af 978 else
b0109805 979 tcg_gen_add_i32(var, var, offset);
b26eefb6 980 dead_tmp(offset);
2c0262af
FB
981 }
982}
983
4373f3ce
PB
984#define VFP_OP2(name) \
985static inline void gen_vfp_##name(int dp) \
986{ \
987 if (dp) \
988 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
989 else \
990 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
991}
992
5b340b51 993#define VFP_OP1(name) \
9ee6e8bb
PB
994static inline void gen_vfp_##name(int dp, int arg) \
995{ \
996 if (dp) \
997 gen_op_vfp_##name##d(arg); \
998 else \
999 gen_op_vfp_##name##s(arg); \
1000}
1001
4373f3ce
PB
1002VFP_OP2(add)
1003VFP_OP2(sub)
1004VFP_OP2(mul)
1005VFP_OP2(div)
1006
1007#undef VFP_OP2
1008
1009static inline void gen_vfp_abs(int dp)
1010{
1011 if (dp)
1012 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
1013 else
1014 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
1015}
1016
1017static inline void gen_vfp_neg(int dp)
1018{
1019 if (dp)
1020 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1021 else
1022 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1023}
1024
1025static inline void gen_vfp_sqrt(int dp)
1026{
1027 if (dp)
1028 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1029 else
1030 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1031}
1032
1033static inline void gen_vfp_cmp(int dp)
1034{
1035 if (dp)
1036 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1037 else
1038 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1039}
1040
1041static inline void gen_vfp_cmpe(int dp)
1042{
1043 if (dp)
1044 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1045 else
1046 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1047}
1048
1049static inline void gen_vfp_F1_ld0(int dp)
1050{
1051 if (dp)
5b340b51 1052 tcg_gen_movi_i64(cpu_F1d, 0);
4373f3ce 1053 else
5b340b51 1054 tcg_gen_movi_i32(cpu_F1s, 0);
4373f3ce
PB
1055}
1056
1057static inline void gen_vfp_uito(int dp)
1058{
1059 if (dp)
1060 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1061 else
1062 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1063}
1064
1065static inline void gen_vfp_sito(int dp)
1066{
1067 if (dp)
66230e0d 1068 gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
4373f3ce 1069 else
66230e0d 1070 gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
4373f3ce
PB
1071}
1072
1073static inline void gen_vfp_toui(int dp)
1074{
1075 if (dp)
1076 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1077 else
1078 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1079}
1080
1081static inline void gen_vfp_touiz(int dp)
1082{
1083 if (dp)
1084 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1085 else
1086 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1087}
1088
1089static inline void gen_vfp_tosi(int dp)
1090{
1091 if (dp)
1092 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1093 else
1094 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1095}
1096
1097static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1098{
1099 if (dp)
4373f3ce 1100 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1101 else
4373f3ce
PB
1102 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1103}
1104
1105#define VFP_GEN_FIX(name) \
1106static inline void gen_vfp_##name(int dp, int shift) \
1107{ \
1108 if (dp) \
1109 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1110 else \
1111 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1112}
4373f3ce
PB
1113VFP_GEN_FIX(tosh)
1114VFP_GEN_FIX(tosl)
1115VFP_GEN_FIX(touh)
1116VFP_GEN_FIX(toul)
1117VFP_GEN_FIX(shto)
1118VFP_GEN_FIX(slto)
1119VFP_GEN_FIX(uhto)
1120VFP_GEN_FIX(ulto)
1121#undef VFP_GEN_FIX
9ee6e8bb 1122
b5ff1b31
FB
1123static inline void gen_vfp_ld(DisasContext *s, int dp)
1124{
1125 if (dp)
4373f3ce 1126 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1127 else
4373f3ce 1128 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1129}
1130
1131static inline void gen_vfp_st(DisasContext *s, int dp)
1132{
1133 if (dp)
4373f3ce 1134 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1135 else
4373f3ce 1136 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1137}
1138
8e96005d
FB
1139static inline long
1140vfp_reg_offset (int dp, int reg)
1141{
1142 if (dp)
1143 return offsetof(CPUARMState, vfp.regs[reg]);
1144 else if (reg & 1) {
1145 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1146 + offsetof(CPU_DoubleU, l.upper);
1147 } else {
1148 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1149 + offsetof(CPU_DoubleU, l.lower);
1150 }
1151}
9ee6e8bb
PB
1152
1153/* Return the offset of a 32-bit piece of a NEON register.
1154 zero is the least significant end of the register. */
1155static inline long
1156neon_reg_offset (int reg, int n)
1157{
1158 int sreg;
1159 sreg = reg * 2 + n;
1160 return vfp_reg_offset(0, sreg);
1161}
1162
ad69471c
PB
1163/* FIXME: Remove these. */
1164#define neon_T0 cpu_T[0]
1165#define neon_T1 cpu_T[1]
1166#define NEON_GET_REG(T, reg, n) \
1167 tcg_gen_ld_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
1168#define NEON_SET_REG(T, reg, n) \
1169 tcg_gen_st_i32(neon_##T, cpu_env, neon_reg_offset(reg, n))
9ee6e8bb 1170
8f8e3aa4
PB
1171static TCGv neon_load_reg(int reg, int pass)
1172{
1173 TCGv tmp = new_tmp();
1174 tcg_gen_ld_i32(tmp, cpu_env, neon_reg_offset(reg, pass));
1175 return tmp;
1176}
1177
1178static void neon_store_reg(int reg, int pass, TCGv var)
1179{
1180 tcg_gen_st_i32(var, cpu_env, neon_reg_offset(reg, pass));
1181 dead_tmp(var);
1182}
1183
ad69471c
PB
1184static inline void neon_load_reg64(TCGv var, int reg)
1185{
1186 tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(1, reg));
1187}
1188
1189static inline void neon_store_reg64(TCGv var, int reg)
1190{
1191 tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(1, reg));
1192}
1193
4373f3ce
PB
1194#define tcg_gen_ld_f32 tcg_gen_ld_i32
1195#define tcg_gen_ld_f64 tcg_gen_ld_i64
1196#define tcg_gen_st_f32 tcg_gen_st_i32
1197#define tcg_gen_st_f64 tcg_gen_st_i64
1198
b7bcbe95
FB
1199static inline void gen_mov_F0_vreg(int dp, int reg)
1200{
1201 if (dp)
4373f3ce 1202 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1203 else
4373f3ce 1204 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1205}
1206
1207static inline void gen_mov_F1_vreg(int dp, int reg)
1208{
1209 if (dp)
4373f3ce 1210 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1211 else
4373f3ce 1212 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1213}
1214
1215static inline void gen_mov_vreg_F0(int dp, int reg)
1216{
1217 if (dp)
4373f3ce 1218 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1219 else
4373f3ce 1220 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1221}
1222
18c9b560
AZ
1223#define ARM_CP_RW_BIT (1 << 20)
1224
e677137d
PB
1225static inline void iwmmxt_load_reg(TCGv var, int reg)
1226{
1227 tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1228}
1229
1230static inline void iwmmxt_store_reg(TCGv var, int reg)
1231{
1232 tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
1233}
1234
1235static inline void gen_op_iwmmxt_movl_wCx_T0(int reg)
1236{
1237 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1238}
1239
1240static inline void gen_op_iwmmxt_movl_T0_wCx(int reg)
1241{
1242 tcg_gen_ld_i32(cpu_T[0], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1243}
1244
1245static inline void gen_op_iwmmxt_movl_T1_wCx(int reg)
1246{
1247 tcg_gen_ld_i32(cpu_T[1], cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
1248}
1249
1250static inline void gen_op_iwmmxt_movq_wRn_M0(int rn)
1251{
1252 iwmmxt_store_reg(cpu_M0, rn);
1253}
1254
1255static inline void gen_op_iwmmxt_movq_M0_wRn(int rn)
1256{
1257 iwmmxt_load_reg(cpu_M0, rn);
1258}
1259
1260static inline void gen_op_iwmmxt_orq_M0_wRn(int rn)
1261{
1262 iwmmxt_load_reg(cpu_V1, rn);
1263 tcg_gen_or_i64(cpu_M0, cpu_M0, cpu_V1);
1264}
1265
1266static inline void gen_op_iwmmxt_andq_M0_wRn(int rn)
1267{
1268 iwmmxt_load_reg(cpu_V1, rn);
1269 tcg_gen_and_i64(cpu_M0, cpu_M0, cpu_V1);
1270}
1271
1272static inline void gen_op_iwmmxt_xorq_M0_wRn(int rn)
1273{
1274 iwmmxt_load_reg(cpu_V1, rn);
1275 tcg_gen_xor_i64(cpu_M0, cpu_M0, cpu_V1);
1276}
1277
1278#define IWMMXT_OP(name) \
1279static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1280{ \
1281 iwmmxt_load_reg(cpu_V1, rn); \
1282 gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
1283}
1284
1285#define IWMMXT_OP_ENV(name) \
1286static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
1287{ \
1288 iwmmxt_load_reg(cpu_V1, rn); \
1289 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
1290}
1291
1292#define IWMMXT_OP_ENV_SIZE(name) \
1293IWMMXT_OP_ENV(name##b) \
1294IWMMXT_OP_ENV(name##w) \
1295IWMMXT_OP_ENV(name##l)
1296
1297#define IWMMXT_OP_ENV1(name) \
1298static inline void gen_op_iwmmxt_##name##_M0(void) \
1299{ \
1300 gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
1301}
1302
1303IWMMXT_OP(maddsq)
1304IWMMXT_OP(madduq)
1305IWMMXT_OP(sadb)
1306IWMMXT_OP(sadw)
1307IWMMXT_OP(mulslw)
1308IWMMXT_OP(mulshw)
1309IWMMXT_OP(mululw)
1310IWMMXT_OP(muluhw)
1311IWMMXT_OP(macsw)
1312IWMMXT_OP(macuw)
1313
1314IWMMXT_OP_ENV_SIZE(unpackl)
1315IWMMXT_OP_ENV_SIZE(unpackh)
1316
1317IWMMXT_OP_ENV1(unpacklub)
1318IWMMXT_OP_ENV1(unpackluw)
1319IWMMXT_OP_ENV1(unpacklul)
1320IWMMXT_OP_ENV1(unpackhub)
1321IWMMXT_OP_ENV1(unpackhuw)
1322IWMMXT_OP_ENV1(unpackhul)
1323IWMMXT_OP_ENV1(unpacklsb)
1324IWMMXT_OP_ENV1(unpacklsw)
1325IWMMXT_OP_ENV1(unpacklsl)
1326IWMMXT_OP_ENV1(unpackhsb)
1327IWMMXT_OP_ENV1(unpackhsw)
1328IWMMXT_OP_ENV1(unpackhsl)
1329
1330IWMMXT_OP_ENV_SIZE(cmpeq)
1331IWMMXT_OP_ENV_SIZE(cmpgtu)
1332IWMMXT_OP_ENV_SIZE(cmpgts)
1333
1334IWMMXT_OP_ENV_SIZE(mins)
1335IWMMXT_OP_ENV_SIZE(minu)
1336IWMMXT_OP_ENV_SIZE(maxs)
1337IWMMXT_OP_ENV_SIZE(maxu)
1338
1339IWMMXT_OP_ENV_SIZE(subn)
1340IWMMXT_OP_ENV_SIZE(addn)
1341IWMMXT_OP_ENV_SIZE(subu)
1342IWMMXT_OP_ENV_SIZE(addu)
1343IWMMXT_OP_ENV_SIZE(subs)
1344IWMMXT_OP_ENV_SIZE(adds)
1345
1346IWMMXT_OP_ENV(avgb0)
1347IWMMXT_OP_ENV(avgb1)
1348IWMMXT_OP_ENV(avgw0)
1349IWMMXT_OP_ENV(avgw1)
1350
1351IWMMXT_OP(msadb)
1352
1353IWMMXT_OP_ENV(packuw)
1354IWMMXT_OP_ENV(packul)
1355IWMMXT_OP_ENV(packuq)
1356IWMMXT_OP_ENV(packsw)
1357IWMMXT_OP_ENV(packsl)
1358IWMMXT_OP_ENV(packsq)
1359
1360static inline void gen_op_iwmmxt_muladdsl_M0_T0_T1(void)
1361{
1362 gen_helper_iwmmxt_muladdsl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1363}
1364
1365static inline void gen_op_iwmmxt_muladdsw_M0_T0_T1(void)
1366{
1367 gen_helper_iwmmxt_muladdsw(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1368}
1369
1370static inline void gen_op_iwmmxt_muladdswl_M0_T0_T1(void)
1371{
1372 gen_helper_iwmmxt_muladdswl(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1]);
1373}
1374
1375static inline void gen_op_iwmmxt_align_M0_T0_wRn(int rn)
1376{
1377 iwmmxt_load_reg(cpu_V1, rn);
1378 gen_helper_iwmmxt_align(cpu_M0, cpu_M0, cpu_V1, cpu_T[0]);
1379}
1380
1381static inline void gen_op_iwmmxt_insr_M0_T0_T1(int shift)
1382{
1383 TCGv tmp = tcg_const_i32(shift);
1384 gen_helper_iwmmxt_insr(cpu_M0, cpu_M0, cpu_T[0], cpu_T[1], tmp);
1385}
1386
1387static inline void gen_op_iwmmxt_extrsb_T0_M0(int shift)
1388{
1389 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1390 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1391 tcg_gen_ext8s_i32(cpu_T[0], cpu_T[0]);
1392}
1393
1394static inline void gen_op_iwmmxt_extrsw_T0_M0(int shift)
1395{
1396 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1397 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1398 tcg_gen_ext16s_i32(cpu_T[0], cpu_T[0]);
1399}
1400
1401static inline void gen_op_iwmmxt_extru_T0_M0(int shift, uint32_t mask)
1402{
1403 tcg_gen_shri_i64(cpu_M0, cpu_M0, shift);
1404 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_M0);
1405 if (mask != ~0u)
1406 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
1407}
1408
1409static void gen_op_iwmmxt_set_mup(void)
1410{
1411 TCGv tmp;
1412 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1413 tcg_gen_ori_i32(tmp, tmp, 2);
1414 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1415}
1416
1417static void gen_op_iwmmxt_set_cup(void)
1418{
1419 TCGv tmp;
1420 tmp = load_cpu_field(iwmmxt.cregs[ARM_IWMMXT_wCon]);
1421 tcg_gen_ori_i32(tmp, tmp, 1);
1422 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCon]);
1423}
1424
1425static void gen_op_iwmmxt_setpsr_nz(void)
1426{
1427 TCGv tmp = new_tmp();
1428 gen_helper_iwmmxt_setpsr_nz(tmp, cpu_M0);
1429 store_cpu_field(tmp, iwmmxt.cregs[ARM_IWMMXT_wCASF]);
1430}
1431
1432static inline void gen_op_iwmmxt_addl_M0_wRn(int rn)
1433{
1434 iwmmxt_load_reg(cpu_V1, rn);
86831435 1435 tcg_gen_ext32u_i64(cpu_V1, cpu_V1);
e677137d
PB
1436 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
1437}
1438
1439
1440static void gen_iwmmxt_movl_T0_T1_wRn(int rn)
1441{
1442 iwmmxt_load_reg(cpu_V0, rn);
1443 tcg_gen_trunc_i64_i32(cpu_T[0], cpu_V0);
1444 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
1445 tcg_gen_trunc_i64_i32(cpu_T[1], cpu_V0);
1446}
1447
1448static void gen_iwmmxt_movl_wRn_T0_T1(int rn)
1449{
1450 tcg_gen_extu_i32_i64(cpu_V0, cpu_T[0]);
1451 tcg_gen_extu_i32_i64(cpu_V1, cpu_T[0]);
1452 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
1453 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
1454 iwmmxt_store_reg(cpu_V0, rn);
1455}
1456
18c9b560
AZ
1457static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1458{
1459 int rd;
1460 uint32_t offset;
1461
1462 rd = (insn >> 16) & 0xf;
1463 gen_movl_T1_reg(s, rd);
1464
1465 offset = (insn & 0xff) << ((insn >> 7) & 2);
1466 if (insn & (1 << 24)) {
1467 /* Pre indexed */
1468 if (insn & (1 << 23))
1469 gen_op_addl_T1_im(offset);
1470 else
1471 gen_op_addl_T1_im(-offset);
1472
1473 if (insn & (1 << 21))
1474 gen_movl_reg_T1(s, rd);
1475 } else if (insn & (1 << 21)) {
1476 /* Post indexed */
1477 if (insn & (1 << 23))
1478 gen_op_movl_T0_im(offset);
1479 else
1480 gen_op_movl_T0_im(- offset);
1481 gen_op_addl_T0_T1();
1482 gen_movl_reg_T0(s, rd);
1483 } else if (!(insn & (1 << 23)))
1484 return 1;
1485 return 0;
1486}
1487
1488static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1489{
1490 int rd = (insn >> 0) & 0xf;
1491
1492 if (insn & (1 << 8))
1493 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1494 return 1;
1495 else
1496 gen_op_iwmmxt_movl_T0_wCx(rd);
1497 else
e677137d 1498 gen_iwmmxt_movl_T0_T1_wRn(rd);
18c9b560
AZ
1499
1500 gen_op_movl_T1_im(mask);
1501 gen_op_andl_T0_T1();
1502 return 0;
1503}
1504
1505/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1506 (ie. an undefined instruction). */
1507static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1508{
1509 int rd, wrd;
1510 int rdhi, rdlo, rd0, rd1, i;
b0109805 1511 TCGv tmp;
18c9b560
AZ
1512
1513 if ((insn & 0x0e000e00) == 0x0c000000) {
1514 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1515 wrd = insn & 0xf;
1516 rdlo = (insn >> 12) & 0xf;
1517 rdhi = (insn >> 16) & 0xf;
1518 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
e677137d 1519 gen_iwmmxt_movl_T0_T1_wRn(wrd);
18c9b560
AZ
1520 gen_movl_reg_T0(s, rdlo);
1521 gen_movl_reg_T1(s, rdhi);
1522 } else { /* TMCRR */
1523 gen_movl_T0_reg(s, rdlo);
1524 gen_movl_T1_reg(s, rdhi);
e677137d 1525 gen_iwmmxt_movl_wRn_T0_T1(wrd);
18c9b560
AZ
1526 gen_op_iwmmxt_set_mup();
1527 }
1528 return 0;
1529 }
1530
1531 wrd = (insn >> 12) & 0xf;
1532 if (gen_iwmmxt_address(s, insn))
1533 return 1;
1534 if (insn & ARM_CP_RW_BIT) {
1535 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1536 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1537 tcg_gen_mov_i32(cpu_T[0], tmp);
1538 dead_tmp(tmp);
18c9b560
AZ
1539 gen_op_iwmmxt_movl_wCx_T0(wrd);
1540 } else {
e677137d
PB
1541 i = 1;
1542 if (insn & (1 << 8)) {
1543 if (insn & (1 << 22)) { /* WLDRD */
1544 tcg_gen_qemu_ld64(cpu_M0, cpu_T[1], IS_USER(s));
1545 i = 0;
1546 } else { /* WLDRW wRd */
1547 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1548 }
1549 } else {
1550 if (insn & (1 << 22)) { /* WLDRH */
1551 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
1552 } else { /* WLDRB */
1553 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
1554 }
1555 }
1556 if (i) {
1557 tcg_gen_extu_i32_i64(cpu_M0, tmp);
1558 dead_tmp(tmp);
1559 }
18c9b560
AZ
1560 gen_op_iwmmxt_movq_wRn_M0(wrd);
1561 }
1562 } else {
1563 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1564 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1565 tmp = new_tmp();
1566 tcg_gen_mov_i32(tmp, cpu_T[0]);
1567 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1568 } else {
1569 gen_op_iwmmxt_movq_M0_wRn(wrd);
e677137d
PB
1570 tmp = new_tmp();
1571 if (insn & (1 << 8)) {
1572 if (insn & (1 << 22)) { /* WSTRD */
1573 dead_tmp(tmp);
1574 tcg_gen_qemu_st64(cpu_M0, cpu_T[1], IS_USER(s));
1575 } else { /* WSTRW wRd */
1576 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1577 gen_st32(tmp, cpu_T[1], IS_USER(s));
1578 }
1579 } else {
1580 if (insn & (1 << 22)) { /* WSTRH */
1581 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1582 gen_st16(tmp, cpu_T[1], IS_USER(s));
1583 } else { /* WSTRB */
1584 tcg_gen_trunc_i64_i32(tmp, cpu_M0);
1585 gen_st8(tmp, cpu_T[1], IS_USER(s));
1586 }
1587 }
18c9b560
AZ
1588 }
1589 }
1590 return 0;
1591 }
1592
1593 if ((insn & 0x0f000000) != 0x0e000000)
1594 return 1;
1595
1596 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1597 case 0x000: /* WOR */
1598 wrd = (insn >> 12) & 0xf;
1599 rd0 = (insn >> 0) & 0xf;
1600 rd1 = (insn >> 16) & 0xf;
1601 gen_op_iwmmxt_movq_M0_wRn(rd0);
1602 gen_op_iwmmxt_orq_M0_wRn(rd1);
1603 gen_op_iwmmxt_setpsr_nz();
1604 gen_op_iwmmxt_movq_wRn_M0(wrd);
1605 gen_op_iwmmxt_set_mup();
1606 gen_op_iwmmxt_set_cup();
1607 break;
1608 case 0x011: /* TMCR */
1609 if (insn & 0xf)
1610 return 1;
1611 rd = (insn >> 12) & 0xf;
1612 wrd = (insn >> 16) & 0xf;
1613 switch (wrd) {
1614 case ARM_IWMMXT_wCID:
1615 case ARM_IWMMXT_wCASF:
1616 break;
1617 case ARM_IWMMXT_wCon:
1618 gen_op_iwmmxt_set_cup();
1619 /* Fall through. */
1620 case ARM_IWMMXT_wCSSF:
1621 gen_op_iwmmxt_movl_T0_wCx(wrd);
1622 gen_movl_T1_reg(s, rd);
1623 gen_op_bicl_T0_T1();
1624 gen_op_iwmmxt_movl_wCx_T0(wrd);
1625 break;
1626 case ARM_IWMMXT_wCGR0:
1627 case ARM_IWMMXT_wCGR1:
1628 case ARM_IWMMXT_wCGR2:
1629 case ARM_IWMMXT_wCGR3:
1630 gen_op_iwmmxt_set_cup();
1631 gen_movl_reg_T0(s, rd);
1632 gen_op_iwmmxt_movl_wCx_T0(wrd);
1633 break;
1634 default:
1635 return 1;
1636 }
1637 break;
1638 case 0x100: /* WXOR */
1639 wrd = (insn >> 12) & 0xf;
1640 rd0 = (insn >> 0) & 0xf;
1641 rd1 = (insn >> 16) & 0xf;
1642 gen_op_iwmmxt_movq_M0_wRn(rd0);
1643 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1644 gen_op_iwmmxt_setpsr_nz();
1645 gen_op_iwmmxt_movq_wRn_M0(wrd);
1646 gen_op_iwmmxt_set_mup();
1647 gen_op_iwmmxt_set_cup();
1648 break;
1649 case 0x111: /* TMRC */
1650 if (insn & 0xf)
1651 return 1;
1652 rd = (insn >> 12) & 0xf;
1653 wrd = (insn >> 16) & 0xf;
1654 gen_op_iwmmxt_movl_T0_wCx(wrd);
1655 gen_movl_reg_T0(s, rd);
1656 break;
1657 case 0x300: /* WANDN */
1658 wrd = (insn >> 12) & 0xf;
1659 rd0 = (insn >> 0) & 0xf;
1660 rd1 = (insn >> 16) & 0xf;
1661 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d 1662 tcg_gen_neg_i64(cpu_M0, cpu_M0);
18c9b560
AZ
1663 gen_op_iwmmxt_andq_M0_wRn(rd1);
1664 gen_op_iwmmxt_setpsr_nz();
1665 gen_op_iwmmxt_movq_wRn_M0(wrd);
1666 gen_op_iwmmxt_set_mup();
1667 gen_op_iwmmxt_set_cup();
1668 break;
1669 case 0x200: /* WAND */
1670 wrd = (insn >> 12) & 0xf;
1671 rd0 = (insn >> 0) & 0xf;
1672 rd1 = (insn >> 16) & 0xf;
1673 gen_op_iwmmxt_movq_M0_wRn(rd0);
1674 gen_op_iwmmxt_andq_M0_wRn(rd1);
1675 gen_op_iwmmxt_setpsr_nz();
1676 gen_op_iwmmxt_movq_wRn_M0(wrd);
1677 gen_op_iwmmxt_set_mup();
1678 gen_op_iwmmxt_set_cup();
1679 break;
1680 case 0x810: case 0xa10: /* WMADD */
1681 wrd = (insn >> 12) & 0xf;
1682 rd0 = (insn >> 0) & 0xf;
1683 rd1 = (insn >> 16) & 0xf;
1684 gen_op_iwmmxt_movq_M0_wRn(rd0);
1685 if (insn & (1 << 21))
1686 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1687 else
1688 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1689 gen_op_iwmmxt_movq_wRn_M0(wrd);
1690 gen_op_iwmmxt_set_mup();
1691 break;
1692 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1693 wrd = (insn >> 12) & 0xf;
1694 rd0 = (insn >> 16) & 0xf;
1695 rd1 = (insn >> 0) & 0xf;
1696 gen_op_iwmmxt_movq_M0_wRn(rd0);
1697 switch ((insn >> 22) & 3) {
1698 case 0:
1699 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1700 break;
1701 case 1:
1702 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1703 break;
1704 case 2:
1705 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1706 break;
1707 case 3:
1708 return 1;
1709 }
1710 gen_op_iwmmxt_movq_wRn_M0(wrd);
1711 gen_op_iwmmxt_set_mup();
1712 gen_op_iwmmxt_set_cup();
1713 break;
1714 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1715 wrd = (insn >> 12) & 0xf;
1716 rd0 = (insn >> 16) & 0xf;
1717 rd1 = (insn >> 0) & 0xf;
1718 gen_op_iwmmxt_movq_M0_wRn(rd0);
1719 switch ((insn >> 22) & 3) {
1720 case 0:
1721 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1722 break;
1723 case 1:
1724 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1725 break;
1726 case 2:
1727 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1728 break;
1729 case 3:
1730 return 1;
1731 }
1732 gen_op_iwmmxt_movq_wRn_M0(wrd);
1733 gen_op_iwmmxt_set_mup();
1734 gen_op_iwmmxt_set_cup();
1735 break;
1736 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1737 wrd = (insn >> 12) & 0xf;
1738 rd0 = (insn >> 16) & 0xf;
1739 rd1 = (insn >> 0) & 0xf;
1740 gen_op_iwmmxt_movq_M0_wRn(rd0);
1741 if (insn & (1 << 22))
1742 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1743 else
1744 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1745 if (!(insn & (1 << 20)))
1746 gen_op_iwmmxt_addl_M0_wRn(wrd);
1747 gen_op_iwmmxt_movq_wRn_M0(wrd);
1748 gen_op_iwmmxt_set_mup();
1749 break;
1750 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1751 wrd = (insn >> 12) & 0xf;
1752 rd0 = (insn >> 16) & 0xf;
1753 rd1 = (insn >> 0) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1755 if (insn & (1 << 21)) {
1756 if (insn & (1 << 20))
1757 gen_op_iwmmxt_mulshw_M0_wRn(rd1);
1758 else
1759 gen_op_iwmmxt_mulslw_M0_wRn(rd1);
1760 } else {
1761 if (insn & (1 << 20))
1762 gen_op_iwmmxt_muluhw_M0_wRn(rd1);
1763 else
1764 gen_op_iwmmxt_mululw_M0_wRn(rd1);
1765 }
18c9b560
AZ
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1767 gen_op_iwmmxt_set_mup();
1768 break;
1769 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1770 wrd = (insn >> 12) & 0xf;
1771 rd0 = (insn >> 16) & 0xf;
1772 rd1 = (insn >> 0) & 0xf;
1773 gen_op_iwmmxt_movq_M0_wRn(rd0);
1774 if (insn & (1 << 21))
1775 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1776 else
1777 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1778 if (!(insn & (1 << 20))) {
e677137d
PB
1779 iwmmxt_load_reg(cpu_V1, wrd);
1780 tcg_gen_add_i64(cpu_M0, cpu_M0, cpu_V1);
18c9b560
AZ
1781 }
1782 gen_op_iwmmxt_movq_wRn_M0(wrd);
1783 gen_op_iwmmxt_set_mup();
1784 break;
1785 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1786 wrd = (insn >> 12) & 0xf;
1787 rd0 = (insn >> 16) & 0xf;
1788 rd1 = (insn >> 0) & 0xf;
1789 gen_op_iwmmxt_movq_M0_wRn(rd0);
1790 switch ((insn >> 22) & 3) {
1791 case 0:
1792 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1793 break;
1794 case 1:
1795 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1796 break;
1797 case 2:
1798 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1799 break;
1800 case 3:
1801 return 1;
1802 }
1803 gen_op_iwmmxt_movq_wRn_M0(wrd);
1804 gen_op_iwmmxt_set_mup();
1805 gen_op_iwmmxt_set_cup();
1806 break;
1807 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1808 wrd = (insn >> 12) & 0xf;
1809 rd0 = (insn >> 16) & 0xf;
1810 rd1 = (insn >> 0) & 0xf;
1811 gen_op_iwmmxt_movq_M0_wRn(rd0);
e677137d
PB
1812 if (insn & (1 << 22)) {
1813 if (insn & (1 << 20))
1814 gen_op_iwmmxt_avgw1_M0_wRn(rd1);
1815 else
1816 gen_op_iwmmxt_avgw0_M0_wRn(rd1);
1817 } else {
1818 if (insn & (1 << 20))
1819 gen_op_iwmmxt_avgb1_M0_wRn(rd1);
1820 else
1821 gen_op_iwmmxt_avgb0_M0_wRn(rd1);
1822 }
18c9b560
AZ
1823 gen_op_iwmmxt_movq_wRn_M0(wrd);
1824 gen_op_iwmmxt_set_mup();
1825 gen_op_iwmmxt_set_cup();
1826 break;
1827 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1828 wrd = (insn >> 12) & 0xf;
1829 rd0 = (insn >> 16) & 0xf;
1830 rd1 = (insn >> 0) & 0xf;
1831 gen_op_iwmmxt_movq_M0_wRn(rd0);
1832 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1833 gen_op_movl_T1_im(7);
1834 gen_op_andl_T0_T1();
1835 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1836 gen_op_iwmmxt_movq_wRn_M0(wrd);
1837 gen_op_iwmmxt_set_mup();
1838 break;
1839 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1840 rd = (insn >> 12) & 0xf;
1841 wrd = (insn >> 16) & 0xf;
1842 gen_movl_T0_reg(s, rd);
1843 gen_op_iwmmxt_movq_M0_wRn(wrd);
1844 switch ((insn >> 6) & 3) {
1845 case 0:
1846 gen_op_movl_T1_im(0xff);
1847 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1848 break;
1849 case 1:
1850 gen_op_movl_T1_im(0xffff);
1851 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1852 break;
1853 case 2:
1854 gen_op_movl_T1_im(0xffffffff);
1855 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1856 break;
1857 case 3:
1858 return 1;
1859 }
1860 gen_op_iwmmxt_movq_wRn_M0(wrd);
1861 gen_op_iwmmxt_set_mup();
1862 break;
1863 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1864 rd = (insn >> 12) & 0xf;
1865 wrd = (insn >> 16) & 0xf;
1866 if (rd == 15)
1867 return 1;
1868 gen_op_iwmmxt_movq_M0_wRn(wrd);
1869 switch ((insn >> 22) & 3) {
1870 case 0:
1871 if (insn & 8)
1872 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1873 else {
e677137d 1874 gen_op_iwmmxt_extru_T0_M0((insn & 7) << 3, 0xff);
18c9b560
AZ
1875 }
1876 break;
1877 case 1:
1878 if (insn & 8)
1879 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1880 else {
e677137d 1881 gen_op_iwmmxt_extru_T0_M0((insn & 3) << 4, 0xffff);
18c9b560
AZ
1882 }
1883 break;
1884 case 2:
e677137d 1885 gen_op_iwmmxt_extru_T0_M0((insn & 1) << 5, ~0u);
18c9b560
AZ
1886 break;
1887 case 3:
1888 return 1;
1889 }
b26eefb6 1890 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1891 break;
1892 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1893 if ((insn & 0x000ff008) != 0x0003f000)
1894 return 1;
1895 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1896 switch ((insn >> 22) & 3) {
1897 case 0:
1898 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1899 break;
1900 case 1:
1901 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1902 break;
1903 case 2:
1904 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1905 break;
1906 case 3:
1907 return 1;
1908 }
1909 gen_op_shll_T1_im(28);
d9ba4830 1910 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1911 break;
1912 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1913 rd = (insn >> 12) & 0xf;
1914 wrd = (insn >> 16) & 0xf;
1915 gen_movl_T0_reg(s, rd);
1916 switch ((insn >> 6) & 3) {
1917 case 0:
e677137d 1918 gen_helper_iwmmxt_bcstb(cpu_M0, cpu_T[0]);
18c9b560
AZ
1919 break;
1920 case 1:
e677137d 1921 gen_helper_iwmmxt_bcstw(cpu_M0, cpu_T[0]);
18c9b560
AZ
1922 break;
1923 case 2:
e677137d 1924 gen_helper_iwmmxt_bcstl(cpu_M0, cpu_T[0]);
18c9b560
AZ
1925 break;
1926 case 3:
1927 return 1;
1928 }
1929 gen_op_iwmmxt_movq_wRn_M0(wrd);
1930 gen_op_iwmmxt_set_mup();
1931 break;
1932 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1933 if ((insn & 0x000ff00f) != 0x0003f000)
1934 return 1;
1935 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1936 switch ((insn >> 22) & 3) {
1937 case 0:
1938 for (i = 0; i < 7; i ++) {
1939 gen_op_shll_T1_im(4);
1940 gen_op_andl_T0_T1();
1941 }
1942 break;
1943 case 1:
1944 for (i = 0; i < 3; i ++) {
1945 gen_op_shll_T1_im(8);
1946 gen_op_andl_T0_T1();
1947 }
1948 break;
1949 case 2:
1950 gen_op_shll_T1_im(16);
1951 gen_op_andl_T0_T1();
1952 break;
1953 case 3:
1954 return 1;
1955 }
d9ba4830 1956 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1957 break;
1958 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1959 wrd = (insn >> 12) & 0xf;
1960 rd0 = (insn >> 16) & 0xf;
1961 gen_op_iwmmxt_movq_M0_wRn(rd0);
1962 switch ((insn >> 22) & 3) {
1963 case 0:
e677137d 1964 gen_helper_iwmmxt_addcb(cpu_M0, cpu_M0);
18c9b560
AZ
1965 break;
1966 case 1:
e677137d 1967 gen_helper_iwmmxt_addcw(cpu_M0, cpu_M0);
18c9b560
AZ
1968 break;
1969 case 2:
e677137d 1970 gen_helper_iwmmxt_addcl(cpu_M0, cpu_M0);
18c9b560
AZ
1971 break;
1972 case 3:
1973 return 1;
1974 }
1975 gen_op_iwmmxt_movq_wRn_M0(wrd);
1976 gen_op_iwmmxt_set_mup();
1977 break;
1978 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1979 if ((insn & 0x000ff00f) != 0x0003f000)
1980 return 1;
1981 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1982 switch ((insn >> 22) & 3) {
1983 case 0:
1984 for (i = 0; i < 7; i ++) {
1985 gen_op_shll_T1_im(4);
1986 gen_op_orl_T0_T1();
1987 }
1988 break;
1989 case 1:
1990 for (i = 0; i < 3; i ++) {
1991 gen_op_shll_T1_im(8);
1992 gen_op_orl_T0_T1();
1993 }
1994 break;
1995 case 2:
1996 gen_op_shll_T1_im(16);
1997 gen_op_orl_T0_T1();
1998 break;
1999 case 3:
2000 return 1;
2001 }
d9ba4830 2002 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
2003 break;
2004 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
2005 rd = (insn >> 12) & 0xf;
2006 rd0 = (insn >> 16) & 0xf;
2007 if ((insn & 0xf) != 0)
2008 return 1;
2009 gen_op_iwmmxt_movq_M0_wRn(rd0);
2010 switch ((insn >> 22) & 3) {
2011 case 0:
e677137d 2012 gen_helper_iwmmxt_msbb(cpu_T[0], cpu_M0);
18c9b560
AZ
2013 break;
2014 case 1:
e677137d 2015 gen_helper_iwmmxt_msbw(cpu_T[0], cpu_M0);
18c9b560
AZ
2016 break;
2017 case 2:
e677137d 2018 gen_helper_iwmmxt_msbl(cpu_T[0], cpu_M0);
18c9b560
AZ
2019 break;
2020 case 3:
2021 return 1;
2022 }
2023 gen_movl_reg_T0(s, rd);
2024 break;
2025 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
2026 case 0x906: case 0xb06: case 0xd06: case 0xf06:
2027 wrd = (insn >> 12) & 0xf;
2028 rd0 = (insn >> 16) & 0xf;
2029 rd1 = (insn >> 0) & 0xf;
2030 gen_op_iwmmxt_movq_M0_wRn(rd0);
2031 switch ((insn >> 22) & 3) {
2032 case 0:
2033 if (insn & (1 << 21))
2034 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
2035 else
2036 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
2037 break;
2038 case 1:
2039 if (insn & (1 << 21))
2040 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
2041 else
2042 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
2043 break;
2044 case 2:
2045 if (insn & (1 << 21))
2046 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
2047 else
2048 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
2049 break;
2050 case 3:
2051 return 1;
2052 }
2053 gen_op_iwmmxt_movq_wRn_M0(wrd);
2054 gen_op_iwmmxt_set_mup();
2055 gen_op_iwmmxt_set_cup();
2056 break;
2057 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
2058 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
2059 wrd = (insn >> 12) & 0xf;
2060 rd0 = (insn >> 16) & 0xf;
2061 gen_op_iwmmxt_movq_M0_wRn(rd0);
2062 switch ((insn >> 22) & 3) {
2063 case 0:
2064 if (insn & (1 << 21))
2065 gen_op_iwmmxt_unpacklsb_M0();
2066 else
2067 gen_op_iwmmxt_unpacklub_M0();
2068 break;
2069 case 1:
2070 if (insn & (1 << 21))
2071 gen_op_iwmmxt_unpacklsw_M0();
2072 else
2073 gen_op_iwmmxt_unpackluw_M0();
2074 break;
2075 case 2:
2076 if (insn & (1 << 21))
2077 gen_op_iwmmxt_unpacklsl_M0();
2078 else
2079 gen_op_iwmmxt_unpacklul_M0();
2080 break;
2081 case 3:
2082 return 1;
2083 }
2084 gen_op_iwmmxt_movq_wRn_M0(wrd);
2085 gen_op_iwmmxt_set_mup();
2086 gen_op_iwmmxt_set_cup();
2087 break;
2088 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
2089 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
2090 wrd = (insn >> 12) & 0xf;
2091 rd0 = (insn >> 16) & 0xf;
2092 gen_op_iwmmxt_movq_M0_wRn(rd0);
2093 switch ((insn >> 22) & 3) {
2094 case 0:
2095 if (insn & (1 << 21))
2096 gen_op_iwmmxt_unpackhsb_M0();
2097 else
2098 gen_op_iwmmxt_unpackhub_M0();
2099 break;
2100 case 1:
2101 if (insn & (1 << 21))
2102 gen_op_iwmmxt_unpackhsw_M0();
2103 else
2104 gen_op_iwmmxt_unpackhuw_M0();
2105 break;
2106 case 2:
2107 if (insn & (1 << 21))
2108 gen_op_iwmmxt_unpackhsl_M0();
2109 else
2110 gen_op_iwmmxt_unpackhul_M0();
2111 break;
2112 case 3:
2113 return 1;
2114 }
2115 gen_op_iwmmxt_movq_wRn_M0(wrd);
2116 gen_op_iwmmxt_set_mup();
2117 gen_op_iwmmxt_set_cup();
2118 break;
2119 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
2120 case 0x214: case 0x614: case 0xa14: case 0xe14:
2121 wrd = (insn >> 12) & 0xf;
2122 rd0 = (insn >> 16) & 0xf;
2123 gen_op_iwmmxt_movq_M0_wRn(rd0);
2124 if (gen_iwmmxt_shift(insn, 0xff))
2125 return 1;
2126 switch ((insn >> 22) & 3) {
2127 case 0:
2128 return 1;
2129 case 1:
e677137d 2130 gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2131 break;
2132 case 2:
e677137d 2133 gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2134 break;
2135 case 3:
e677137d 2136 gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2137 break;
2138 }
2139 gen_op_iwmmxt_movq_wRn_M0(wrd);
2140 gen_op_iwmmxt_set_mup();
2141 gen_op_iwmmxt_set_cup();
2142 break;
2143 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
2144 case 0x014: case 0x414: case 0x814: case 0xc14:
2145 wrd = (insn >> 12) & 0xf;
2146 rd0 = (insn >> 16) & 0xf;
2147 gen_op_iwmmxt_movq_M0_wRn(rd0);
2148 if (gen_iwmmxt_shift(insn, 0xff))
2149 return 1;
2150 switch ((insn >> 22) & 3) {
2151 case 0:
2152 return 1;
2153 case 1:
e677137d 2154 gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2155 break;
2156 case 2:
e677137d 2157 gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2158 break;
2159 case 3:
e677137d 2160 gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2161 break;
2162 }
2163 gen_op_iwmmxt_movq_wRn_M0(wrd);
2164 gen_op_iwmmxt_set_mup();
2165 gen_op_iwmmxt_set_cup();
2166 break;
2167 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
2168 case 0x114: case 0x514: case 0x914: case 0xd14:
2169 wrd = (insn >> 12) & 0xf;
2170 rd0 = (insn >> 16) & 0xf;
2171 gen_op_iwmmxt_movq_M0_wRn(rd0);
2172 if (gen_iwmmxt_shift(insn, 0xff))
2173 return 1;
2174 switch ((insn >> 22) & 3) {
2175 case 0:
2176 return 1;
2177 case 1:
e677137d 2178 gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2179 break;
2180 case 2:
e677137d 2181 gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2182 break;
2183 case 3:
e677137d 2184 gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2185 break;
2186 }
2187 gen_op_iwmmxt_movq_wRn_M0(wrd);
2188 gen_op_iwmmxt_set_mup();
2189 gen_op_iwmmxt_set_cup();
2190 break;
2191 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
2192 case 0x314: case 0x714: case 0xb14: case 0xf14:
2193 wrd = (insn >> 12) & 0xf;
2194 rd0 = (insn >> 16) & 0xf;
2195 gen_op_iwmmxt_movq_M0_wRn(rd0);
2196 switch ((insn >> 22) & 3) {
2197 case 0:
2198 return 1;
2199 case 1:
2200 if (gen_iwmmxt_shift(insn, 0xf))
2201 return 1;
e677137d 2202 gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2203 break;
2204 case 2:
2205 if (gen_iwmmxt_shift(insn, 0x1f))
2206 return 1;
e677137d 2207 gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2208 break;
2209 case 3:
2210 if (gen_iwmmxt_shift(insn, 0x3f))
2211 return 1;
e677137d 2212 gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2213 break;
2214 }
2215 gen_op_iwmmxt_movq_wRn_M0(wrd);
2216 gen_op_iwmmxt_set_mup();
2217 gen_op_iwmmxt_set_cup();
2218 break;
2219 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
2220 case 0x916: case 0xb16: case 0xd16: case 0xf16:
2221 wrd = (insn >> 12) & 0xf;
2222 rd0 = (insn >> 16) & 0xf;
2223 rd1 = (insn >> 0) & 0xf;
2224 gen_op_iwmmxt_movq_M0_wRn(rd0);
2225 switch ((insn >> 22) & 3) {
2226 case 0:
2227 if (insn & (1 << 21))
2228 gen_op_iwmmxt_minsb_M0_wRn(rd1);
2229 else
2230 gen_op_iwmmxt_minub_M0_wRn(rd1);
2231 break;
2232 case 1:
2233 if (insn & (1 << 21))
2234 gen_op_iwmmxt_minsw_M0_wRn(rd1);
2235 else
2236 gen_op_iwmmxt_minuw_M0_wRn(rd1);
2237 break;
2238 case 2:
2239 if (insn & (1 << 21))
2240 gen_op_iwmmxt_minsl_M0_wRn(rd1);
2241 else
2242 gen_op_iwmmxt_minul_M0_wRn(rd1);
2243 break;
2244 case 3:
2245 return 1;
2246 }
2247 gen_op_iwmmxt_movq_wRn_M0(wrd);
2248 gen_op_iwmmxt_set_mup();
2249 break;
2250 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
2251 case 0x816: case 0xa16: case 0xc16: case 0xe16:
2252 wrd = (insn >> 12) & 0xf;
2253 rd0 = (insn >> 16) & 0xf;
2254 rd1 = (insn >> 0) & 0xf;
2255 gen_op_iwmmxt_movq_M0_wRn(rd0);
2256 switch ((insn >> 22) & 3) {
2257 case 0:
2258 if (insn & (1 << 21))
2259 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
2260 else
2261 gen_op_iwmmxt_maxub_M0_wRn(rd1);
2262 break;
2263 case 1:
2264 if (insn & (1 << 21))
2265 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
2266 else
2267 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
2268 break;
2269 case 2:
2270 if (insn & (1 << 21))
2271 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
2272 else
2273 gen_op_iwmmxt_maxul_M0_wRn(rd1);
2274 break;
2275 case 3:
2276 return 1;
2277 }
2278 gen_op_iwmmxt_movq_wRn_M0(wrd);
2279 gen_op_iwmmxt_set_mup();
2280 break;
2281 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
2282 case 0x402: case 0x502: case 0x602: case 0x702:
2283 wrd = (insn >> 12) & 0xf;
2284 rd0 = (insn >> 16) & 0xf;
2285 rd1 = (insn >> 0) & 0xf;
2286 gen_op_iwmmxt_movq_M0_wRn(rd0);
2287 gen_op_movl_T0_im((insn >> 20) & 3);
2288 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
2289 gen_op_iwmmxt_movq_wRn_M0(wrd);
2290 gen_op_iwmmxt_set_mup();
2291 break;
2292 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
2293 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
2294 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
2295 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
2296 wrd = (insn >> 12) & 0xf;
2297 rd0 = (insn >> 16) & 0xf;
2298 rd1 = (insn >> 0) & 0xf;
2299 gen_op_iwmmxt_movq_M0_wRn(rd0);
2300 switch ((insn >> 20) & 0xf) {
2301 case 0x0:
2302 gen_op_iwmmxt_subnb_M0_wRn(rd1);
2303 break;
2304 case 0x1:
2305 gen_op_iwmmxt_subub_M0_wRn(rd1);
2306 break;
2307 case 0x3:
2308 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2309 break;
2310 case 0x4:
2311 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2312 break;
2313 case 0x5:
2314 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2315 break;
2316 case 0x7:
2317 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2318 break;
2319 case 0x8:
2320 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2321 break;
2322 case 0x9:
2323 gen_op_iwmmxt_subul_M0_wRn(rd1);
2324 break;
2325 case 0xb:
2326 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2327 break;
2328 default:
2329 return 1;
2330 }
2331 gen_op_iwmmxt_movq_wRn_M0(wrd);
2332 gen_op_iwmmxt_set_mup();
2333 gen_op_iwmmxt_set_cup();
2334 break;
2335 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2336 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2337 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2338 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2339 wrd = (insn >> 12) & 0xf;
2340 rd0 = (insn >> 16) & 0xf;
2341 gen_op_iwmmxt_movq_M0_wRn(rd0);
2342 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
e677137d 2343 gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, cpu_T[0]);
18c9b560
AZ
2344 gen_op_iwmmxt_movq_wRn_M0(wrd);
2345 gen_op_iwmmxt_set_mup();
2346 gen_op_iwmmxt_set_cup();
2347 break;
2348 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2349 case 0x418: case 0x518: case 0x618: case 0x718:
2350 case 0x818: case 0x918: case 0xa18: case 0xb18:
2351 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2352 wrd = (insn >> 12) & 0xf;
2353 rd0 = (insn >> 16) & 0xf;
2354 rd1 = (insn >> 0) & 0xf;
2355 gen_op_iwmmxt_movq_M0_wRn(rd0);
2356 switch ((insn >> 20) & 0xf) {
2357 case 0x0:
2358 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2359 break;
2360 case 0x1:
2361 gen_op_iwmmxt_addub_M0_wRn(rd1);
2362 break;
2363 case 0x3:
2364 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2365 break;
2366 case 0x4:
2367 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2368 break;
2369 case 0x5:
2370 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2371 break;
2372 case 0x7:
2373 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2374 break;
2375 case 0x8:
2376 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2377 break;
2378 case 0x9:
2379 gen_op_iwmmxt_addul_M0_wRn(rd1);
2380 break;
2381 case 0xb:
2382 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2383 break;
2384 default:
2385 return 1;
2386 }
2387 gen_op_iwmmxt_movq_wRn_M0(wrd);
2388 gen_op_iwmmxt_set_mup();
2389 gen_op_iwmmxt_set_cup();
2390 break;
2391 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2392 case 0x408: case 0x508: case 0x608: case 0x708:
2393 case 0x808: case 0x908: case 0xa08: case 0xb08:
2394 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2395 wrd = (insn >> 12) & 0xf;
2396 rd0 = (insn >> 16) & 0xf;
2397 rd1 = (insn >> 0) & 0xf;
2398 gen_op_iwmmxt_movq_M0_wRn(rd0);
2399 if (!(insn & (1 << 20)))
2400 return 1;
2401 switch ((insn >> 22) & 3) {
2402 case 0:
2403 return 1;
2404 case 1:
2405 if (insn & (1 << 21))
2406 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2407 else
2408 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2409 break;
2410 case 2:
2411 if (insn & (1 << 21))
2412 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2413 else
2414 gen_op_iwmmxt_packul_M0_wRn(rd1);
2415 break;
2416 case 3:
2417 if (insn & (1 << 21))
2418 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2419 else
2420 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2421 break;
2422 }
2423 gen_op_iwmmxt_movq_wRn_M0(wrd);
2424 gen_op_iwmmxt_set_mup();
2425 gen_op_iwmmxt_set_cup();
2426 break;
2427 case 0x201: case 0x203: case 0x205: case 0x207:
2428 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2429 case 0x211: case 0x213: case 0x215: case 0x217:
2430 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2431 wrd = (insn >> 5) & 0xf;
2432 rd0 = (insn >> 12) & 0xf;
2433 rd1 = (insn >> 0) & 0xf;
2434 if (rd0 == 0xf || rd1 == 0xf)
2435 return 1;
2436 gen_op_iwmmxt_movq_M0_wRn(wrd);
2437 switch ((insn >> 16) & 0xf) {
2438 case 0x0: /* TMIA */
b26eefb6
PB
2439 gen_movl_T0_reg(s, rd0);
2440 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2441 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2442 break;
2443 case 0x8: /* TMIAPH */
b26eefb6
PB
2444 gen_movl_T0_reg(s, rd0);
2445 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2446 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2447 break;
2448 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2449 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2450 if (insn & (1 << 16))
2451 gen_op_shrl_T1_im(16);
2452 gen_op_movl_T0_T1();
b26eefb6 2453 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2454 if (insn & (1 << 17))
2455 gen_op_shrl_T1_im(16);
2456 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2457 break;
2458 default:
2459 return 1;
2460 }
2461 gen_op_iwmmxt_movq_wRn_M0(wrd);
2462 gen_op_iwmmxt_set_mup();
2463 break;
2464 default:
2465 return 1;
2466 }
2467
2468 return 0;
2469}
2470
2471/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2472 (ie. an undefined instruction). */
2473static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2474{
2475 int acc, rd0, rd1, rdhi, rdlo;
2476
2477 if ((insn & 0x0ff00f10) == 0x0e200010) {
2478 /* Multiply with Internal Accumulate Format */
2479 rd0 = (insn >> 12) & 0xf;
2480 rd1 = insn & 0xf;
2481 acc = (insn >> 5) & 7;
2482
2483 if (acc != 0)
2484 return 1;
2485
2486 switch ((insn >> 16) & 0xf) {
2487 case 0x0: /* MIA */
b26eefb6
PB
2488 gen_movl_T0_reg(s, rd0);
2489 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2490 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2491 break;
2492 case 0x8: /* MIAPH */
b26eefb6
PB
2493 gen_movl_T0_reg(s, rd0);
2494 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2495 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2496 break;
2497 case 0xc: /* MIABB */
2498 case 0xd: /* MIABT */
2499 case 0xe: /* MIATB */
2500 case 0xf: /* MIATT */
b26eefb6 2501 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2502 if (insn & (1 << 16))
2503 gen_op_shrl_T1_im(16);
2504 gen_op_movl_T0_T1();
b26eefb6 2505 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2506 if (insn & (1 << 17))
2507 gen_op_shrl_T1_im(16);
2508 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2509 break;
2510 default:
2511 return 1;
2512 }
2513
2514 gen_op_iwmmxt_movq_wRn_M0(acc);
2515 return 0;
2516 }
2517
2518 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2519 /* Internal Accumulator Access Format */
2520 rdhi = (insn >> 16) & 0xf;
2521 rdlo = (insn >> 12) & 0xf;
2522 acc = insn & 7;
2523
2524 if (acc != 0)
2525 return 1;
2526
2527 if (insn & ARM_CP_RW_BIT) { /* MRA */
e677137d 2528 gen_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2529 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2530 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2531 gen_op_andl_T0_T1();
b26eefb6 2532 gen_movl_reg_T0(s, rdhi);
18c9b560 2533 } else { /* MAR */
b26eefb6
PB
2534 gen_movl_T0_reg(s, rdlo);
2535 gen_movl_T1_reg(s, rdhi);
e677137d 2536 gen_iwmmxt_movl_wRn_T0_T1(acc);
18c9b560
AZ
2537 }
2538 return 0;
2539 }
2540
2541 return 1;
2542}
2543
c1713132
AZ
2544/* Disassemble system coprocessor instruction. Return nonzero if
2545 instruction is not defined. */
2546static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2547{
8984bd2e 2548 TCGv tmp;
c1713132
AZ
2549 uint32_t rd = (insn >> 12) & 0xf;
2550 uint32_t cp = (insn >> 8) & 0xf;
2551 if (IS_USER(s)) {
2552 return 1;
2553 }
2554
18c9b560 2555 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2556 if (!env->cp[cp].cp_read)
2557 return 1;
8984bd2e
PB
2558 gen_set_pc_im(s->pc);
2559 tmp = new_tmp();
2560 gen_helper_get_cp(tmp, cpu_env, tcg_const_i32(insn));
2561 store_reg(s, rd, tmp);
c1713132
AZ
2562 } else {
2563 if (!env->cp[cp].cp_write)
2564 return 1;
8984bd2e
PB
2565 gen_set_pc_im(s->pc);
2566 tmp = load_reg(s, rd);
2567 gen_helper_set_cp(cpu_env, tcg_const_i32(insn), tmp);
a60de947 2568 dead_tmp(tmp);
c1713132
AZ
2569 }
2570 return 0;
2571}
2572
9ee6e8bb
PB
2573static int cp15_user_ok(uint32_t insn)
2574{
2575 int cpn = (insn >> 16) & 0xf;
2576 int cpm = insn & 0xf;
2577 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2578
2579 if (cpn == 13 && cpm == 0) {
2580 /* TLS register. */
2581 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2582 return 1;
2583 }
2584 if (cpn == 7) {
2585 /* ISB, DSB, DMB. */
2586 if ((cpm == 5 && op == 4)
2587 || (cpm == 10 && (op == 4 || op == 5)))
2588 return 1;
2589 }
2590 return 0;
2591}
2592
b5ff1b31
FB
2593/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2594 instruction is not defined. */
a90b7318 2595static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2596{
2597 uint32_t rd;
8984bd2e 2598 TCGv tmp;
b5ff1b31 2599
9ee6e8bb
PB
2600 /* M profile cores use memory mapped registers instead of cp15. */
2601 if (arm_feature(env, ARM_FEATURE_M))
2602 return 1;
2603
2604 if ((insn & (1 << 25)) == 0) {
2605 if (insn & (1 << 20)) {
2606 /* mrrc */
2607 return 1;
2608 }
2609 /* mcrr. Used for block cache operations, so implement as no-op. */
2610 return 0;
2611 }
2612 if ((insn & (1 << 4)) == 0) {
2613 /* cdp */
2614 return 1;
2615 }
2616 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2617 return 1;
2618 }
9332f9da
FB
2619 if ((insn & 0x0fff0fff) == 0x0e070f90
2620 || (insn & 0x0fff0fff) == 0x0e070f58) {
2621 /* Wait for interrupt. */
8984bd2e 2622 gen_set_pc_im(s->pc);
9ee6e8bb 2623 s->is_jmp = DISAS_WFI;
9332f9da
FB
2624 return 0;
2625 }
b5ff1b31 2626 rd = (insn >> 12) & 0xf;
18c9b560 2627 if (insn & ARM_CP_RW_BIT) {
8984bd2e
PB
2628 tmp = new_tmp();
2629 gen_helper_get_cp15(tmp, cpu_env, tcg_const_i32(insn));
b5ff1b31
FB
2630 /* If the destination register is r15 then sets condition codes. */
2631 if (rd != 15)
8984bd2e
PB
2632 store_reg(s, rd, tmp);
2633 else
2634 dead_tmp(tmp);
b5ff1b31 2635 } else {
8984bd2e
PB
2636 tmp = load_reg(s, rd);
2637 gen_helper_set_cp15(cpu_env, tcg_const_i32(insn), tmp);
2638 dead_tmp(tmp);
a90b7318
AZ
2639 /* Normally we would always end the TB here, but Linux
2640 * arch/arm/mach-pxa/sleep.S expects two instructions following
2641 * an MMU enable to execute from cache. Imitate this behaviour. */
2642 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2643 (insn & 0x0fff0fff) != 0x0e010f10)
2644 gen_lookup_tb(s);
b5ff1b31 2645 }
b5ff1b31
FB
2646 return 0;
2647}
2648
9ee6e8bb
PB
2649#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2650#define VFP_SREG(insn, bigbit, smallbit) \
2651 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2652#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2653 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2654 reg = (((insn) >> (bigbit)) & 0x0f) \
2655 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2656 } else { \
2657 if (insn & (1 << (smallbit))) \
2658 return 1; \
2659 reg = ((insn) >> (bigbit)) & 0x0f; \
2660 }} while (0)
2661
2662#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2663#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2664#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2665#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2666#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2667#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2668
4373f3ce
PB
2669/* Move between integer and VFP cores. */
2670static TCGv gen_vfp_mrs(void)
2671{
2672 TCGv tmp = new_tmp();
2673 tcg_gen_mov_i32(tmp, cpu_F0s);
2674 return tmp;
2675}
2676
2677static void gen_vfp_msr(TCGv tmp)
2678{
2679 tcg_gen_mov_i32(cpu_F0s, tmp);
2680 dead_tmp(tmp);
2681}
2682
9ee6e8bb
PB
2683static inline int
2684vfp_enabled(CPUState * env)
2685{
2686 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2687}
2688
ad69471c
PB
2689static void gen_neon_dup_u8(TCGv var, int shift)
2690{
2691 TCGv tmp = new_tmp();
2692 if (shift)
2693 tcg_gen_shri_i32(var, var, shift);
86831435 2694 tcg_gen_ext8u_i32(var, var);
ad69471c
PB
2695 tcg_gen_shli_i32(tmp, var, 8);
2696 tcg_gen_or_i32(var, var, tmp);
2697 tcg_gen_shli_i32(tmp, var, 16);
2698 tcg_gen_or_i32(var, var, tmp);
2699 dead_tmp(tmp);
2700}
2701
2702static void gen_neon_dup_low16(TCGv var)
2703{
2704 TCGv tmp = new_tmp();
86831435 2705 tcg_gen_ext16u_i32(var, var);
ad69471c
PB
2706 tcg_gen_shli_i32(tmp, var, 16);
2707 tcg_gen_or_i32(var, var, tmp);
2708 dead_tmp(tmp);
2709}
2710
2711static void gen_neon_dup_high16(TCGv var)
2712{
2713 TCGv tmp = new_tmp();
2714 tcg_gen_andi_i32(var, var, 0xffff0000);
2715 tcg_gen_shri_i32(tmp, var, 16);
2716 tcg_gen_or_i32(var, var, tmp);
2717 dead_tmp(tmp);
2718}
2719
b7bcbe95
FB
2720/* Disassemble a VFP instruction. Returns nonzero if an error occured
2721 (ie. an undefined instruction). */
2722static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2723{
2724 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2725 int dp, veclen;
4373f3ce 2726 TCGv tmp;
ad69471c 2727 TCGv tmp2;
b7bcbe95 2728
40f137e1
PB
2729 if (!arm_feature(env, ARM_FEATURE_VFP))
2730 return 1;
2731
9ee6e8bb
PB
2732 if (!vfp_enabled(env)) {
2733 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2734 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2735 return 1;
2736 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2737 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2738 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2739 return 1;
2740 }
b7bcbe95
FB
2741 dp = ((insn & 0xf00) == 0xb00);
2742 switch ((insn >> 24) & 0xf) {
2743 case 0xe:
2744 if (insn & (1 << 4)) {
2745 /* single register transfer */
b7bcbe95
FB
2746 rd = (insn >> 12) & 0xf;
2747 if (dp) {
9ee6e8bb
PB
2748 int size;
2749 int pass;
2750
2751 VFP_DREG_N(rn, insn);
2752 if (insn & 0xf)
b7bcbe95 2753 return 1;
9ee6e8bb
PB
2754 if (insn & 0x00c00060
2755 && !arm_feature(env, ARM_FEATURE_NEON))
2756 return 1;
2757
2758 pass = (insn >> 21) & 1;
2759 if (insn & (1 << 22)) {
2760 size = 0;
2761 offset = ((insn >> 5) & 3) * 8;
2762 } else if (insn & (1 << 5)) {
2763 size = 1;
2764 offset = (insn & (1 << 6)) ? 16 : 0;
2765 } else {
2766 size = 2;
2767 offset = 0;
2768 }
18c9b560 2769 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2770 /* vfp->arm */
ad69471c 2771 tmp = neon_load_reg(rn, pass);
9ee6e8bb
PB
2772 switch (size) {
2773 case 0:
9ee6e8bb 2774 if (offset)
ad69471c 2775 tcg_gen_shri_i32(tmp, tmp, offset);
9ee6e8bb 2776 if (insn & (1 << 23))
ad69471c 2777 gen_uxtb(tmp);
9ee6e8bb 2778 else
ad69471c 2779 gen_sxtb(tmp);
9ee6e8bb
PB
2780 break;
2781 case 1:
9ee6e8bb
PB
2782 if (insn & (1 << 23)) {
2783 if (offset) {
ad69471c 2784 tcg_gen_shri_i32(tmp, tmp, 16);
9ee6e8bb 2785 } else {
ad69471c 2786 gen_uxth(tmp);
9ee6e8bb
PB
2787 }
2788 } else {
2789 if (offset) {
ad69471c 2790 tcg_gen_sari_i32(tmp, tmp, 16);
9ee6e8bb 2791 } else {
ad69471c 2792 gen_sxth(tmp);
9ee6e8bb
PB
2793 }
2794 }
2795 break;
2796 case 2:
9ee6e8bb
PB
2797 break;
2798 }
ad69471c 2799 store_reg(s, rd, tmp);
b7bcbe95
FB
2800 } else {
2801 /* arm->vfp */
ad69471c 2802 tmp = load_reg(s, rd);
9ee6e8bb
PB
2803 if (insn & (1 << 23)) {
2804 /* VDUP */
2805 if (size == 0) {
ad69471c 2806 gen_neon_dup_u8(tmp, 0);
9ee6e8bb 2807 } else if (size == 1) {
ad69471c 2808 gen_neon_dup_low16(tmp);
9ee6e8bb 2809 }
ad69471c
PB
2810 tmp2 = new_tmp();
2811 tcg_gen_mov_i32(tmp2, tmp);
2812 neon_store_reg(rn, 0, tmp2);
2813 neon_store_reg(rn, 0, tmp);
9ee6e8bb
PB
2814 } else {
2815 /* VMOV */
2816 switch (size) {
2817 case 0:
ad69471c
PB
2818 tmp2 = neon_load_reg(rn, pass);
2819 gen_bfi(tmp, tmp2, tmp, offset, 0xff);
2820 dead_tmp(tmp2);
9ee6e8bb
PB
2821 break;
2822 case 1:
ad69471c
PB
2823 tmp2 = neon_load_reg(rn, pass);
2824 gen_bfi(tmp, tmp2, tmp, offset, 0xffff);
2825 dead_tmp(tmp2);
9ee6e8bb
PB
2826 break;
2827 case 2:
9ee6e8bb
PB
2828 break;
2829 }
ad69471c 2830 neon_store_reg(rn, pass, tmp);
9ee6e8bb 2831 }
b7bcbe95 2832 }
9ee6e8bb
PB
2833 } else { /* !dp */
2834 if ((insn & 0x6f) != 0x00)
2835 return 1;
2836 rn = VFP_SREG_N(insn);
18c9b560 2837 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2838 /* vfp->arm */
2839 if (insn & (1 << 21)) {
2840 /* system register */
40f137e1 2841 rn >>= 1;
9ee6e8bb 2842
b7bcbe95 2843 switch (rn) {
40f137e1 2844 case ARM_VFP_FPSID:
4373f3ce 2845 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2846 VFP3 restricts all id registers to privileged
2847 accesses. */
2848 if (IS_USER(s)
2849 && arm_feature(env, ARM_FEATURE_VFP3))
2850 return 1;
4373f3ce 2851 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2852 break;
40f137e1 2853 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2854 if (IS_USER(s))
2855 return 1;
4373f3ce 2856 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2857 break;
40f137e1
PB
2858 case ARM_VFP_FPINST:
2859 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2860 /* Not present in VFP3. */
2861 if (IS_USER(s)
2862 || arm_feature(env, ARM_FEATURE_VFP3))
2863 return 1;
4373f3ce 2864 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2865 break;
40f137e1 2866 case ARM_VFP_FPSCR:
601d70b9 2867 if (rd == 15) {
4373f3ce
PB
2868 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2869 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2870 } else {
2871 tmp = new_tmp();
2872 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2873 }
b7bcbe95 2874 break;
9ee6e8bb
PB
2875 case ARM_VFP_MVFR0:
2876 case ARM_VFP_MVFR1:
2877 if (IS_USER(s)
2878 || !arm_feature(env, ARM_FEATURE_VFP3))
2879 return 1;
4373f3ce 2880 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2881 break;
b7bcbe95
FB
2882 default:
2883 return 1;
2884 }
2885 } else {
2886 gen_mov_F0_vreg(0, rn);
4373f3ce 2887 tmp = gen_vfp_mrs();
b7bcbe95
FB
2888 }
2889 if (rd == 15) {
b5ff1b31 2890 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2891 gen_set_nzcv(tmp);
2892 dead_tmp(tmp);
2893 } else {
2894 store_reg(s, rd, tmp);
2895 }
b7bcbe95
FB
2896 } else {
2897 /* arm->vfp */
4373f3ce 2898 tmp = load_reg(s, rd);
b7bcbe95 2899 if (insn & (1 << 21)) {
40f137e1 2900 rn >>= 1;
b7bcbe95
FB
2901 /* system register */
2902 switch (rn) {
40f137e1 2903 case ARM_VFP_FPSID:
9ee6e8bb
PB
2904 case ARM_VFP_MVFR0:
2905 case ARM_VFP_MVFR1:
b7bcbe95
FB
2906 /* Writes are ignored. */
2907 break;
40f137e1 2908 case ARM_VFP_FPSCR:
4373f3ce
PB
2909 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2910 dead_tmp(tmp);
b5ff1b31 2911 gen_lookup_tb(s);
b7bcbe95 2912 break;
40f137e1 2913 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2914 if (IS_USER(s))
2915 return 1;
4373f3ce 2916 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2917 gen_lookup_tb(s);
2918 break;
2919 case ARM_VFP_FPINST:
2920 case ARM_VFP_FPINST2:
4373f3ce 2921 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2922 break;
b7bcbe95
FB
2923 default:
2924 return 1;
2925 }
2926 } else {
4373f3ce 2927 gen_vfp_msr(tmp);
b7bcbe95
FB
2928 gen_mov_vreg_F0(0, rn);
2929 }
2930 }
2931 }
2932 } else {
2933 /* data processing */
2934 /* The opcode is in bits 23, 21, 20 and 6. */
2935 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2936 if (dp) {
2937 if (op == 15) {
2938 /* rn is opcode */
2939 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2940 } else {
2941 /* rn is register number */
9ee6e8bb 2942 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2943 }
2944
2945 if (op == 15 && (rn == 15 || rn > 17)) {
2946 /* Integer or single precision destination. */
9ee6e8bb 2947 rd = VFP_SREG_D(insn);
b7bcbe95 2948 } else {
9ee6e8bb 2949 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2950 }
2951
2952 if (op == 15 && (rn == 16 || rn == 17)) {
2953 /* Integer source. */
2954 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2955 } else {
9ee6e8bb 2956 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2957 }
2958 } else {
9ee6e8bb 2959 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2960 if (op == 15 && rn == 15) {
2961 /* Double precision destination. */
9ee6e8bb
PB
2962 VFP_DREG_D(rd, insn);
2963 } else {
2964 rd = VFP_SREG_D(insn);
2965 }
2966 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2967 }
2968
2969 veclen = env->vfp.vec_len;
2970 if (op == 15 && rn > 3)
2971 veclen = 0;
2972
2973 /* Shut up compiler warnings. */
2974 delta_m = 0;
2975 delta_d = 0;
2976 bank_mask = 0;
3b46e624 2977
b7bcbe95
FB
2978 if (veclen > 0) {
2979 if (dp)
2980 bank_mask = 0xc;
2981 else
2982 bank_mask = 0x18;
2983
2984 /* Figure out what type of vector operation this is. */
2985 if ((rd & bank_mask) == 0) {
2986 /* scalar */
2987 veclen = 0;
2988 } else {
2989 if (dp)
2990 delta_d = (env->vfp.vec_stride >> 1) + 1;
2991 else
2992 delta_d = env->vfp.vec_stride + 1;
2993
2994 if ((rm & bank_mask) == 0) {
2995 /* mixed scalar/vector */
2996 delta_m = 0;
2997 } else {
2998 /* vector */
2999 delta_m = delta_d;
3000 }
3001 }
3002 }
3003
3004 /* Load the initial operands. */
3005 if (op == 15) {
3006 switch (rn) {
3007 case 16:
3008 case 17:
3009 /* Integer source */
3010 gen_mov_F0_vreg(0, rm);
3011 break;
3012 case 8:
3013 case 9:
3014 /* Compare */
3015 gen_mov_F0_vreg(dp, rd);
3016 gen_mov_F1_vreg(dp, rm);
3017 break;
3018 case 10:
3019 case 11:
3020 /* Compare with zero */
3021 gen_mov_F0_vreg(dp, rd);
3022 gen_vfp_F1_ld0(dp);
3023 break;
9ee6e8bb
PB
3024 case 20:
3025 case 21:
3026 case 22:
3027 case 23:
3028 /* Source and destination the same. */
3029 gen_mov_F0_vreg(dp, rd);
3030 break;
b7bcbe95
FB
3031 default:
3032 /* One source operand. */
3033 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 3034 break;
b7bcbe95
FB
3035 }
3036 } else {
3037 /* Two source operands. */
3038 gen_mov_F0_vreg(dp, rn);
3039 gen_mov_F1_vreg(dp, rm);
3040 }
3041
3042 for (;;) {
3043 /* Perform the calculation. */
3044 switch (op) {
3045 case 0: /* mac: fd + (fn * fm) */
3046 gen_vfp_mul(dp);
3047 gen_mov_F1_vreg(dp, rd);
3048 gen_vfp_add(dp);
3049 break;
3050 case 1: /* nmac: fd - (fn * fm) */
3051 gen_vfp_mul(dp);
3052 gen_vfp_neg(dp);
3053 gen_mov_F1_vreg(dp, rd);
3054 gen_vfp_add(dp);
3055 break;
3056 case 2: /* msc: -fd + (fn * fm) */
3057 gen_vfp_mul(dp);
3058 gen_mov_F1_vreg(dp, rd);
3059 gen_vfp_sub(dp);
3060 break;
3061 case 3: /* nmsc: -fd - (fn * fm) */
3062 gen_vfp_mul(dp);
3063 gen_mov_F1_vreg(dp, rd);
3064 gen_vfp_add(dp);
3065 gen_vfp_neg(dp);
3066 break;
3067 case 4: /* mul: fn * fm */
3068 gen_vfp_mul(dp);
3069 break;
3070 case 5: /* nmul: -(fn * fm) */
3071 gen_vfp_mul(dp);
3072 gen_vfp_neg(dp);
3073 break;
3074 case 6: /* add: fn + fm */
3075 gen_vfp_add(dp);
3076 break;
3077 case 7: /* sub: fn - fm */
3078 gen_vfp_sub(dp);
3079 break;
3080 case 8: /* div: fn / fm */
3081 gen_vfp_div(dp);
3082 break;
9ee6e8bb
PB
3083 case 14: /* fconst */
3084 if (!arm_feature(env, ARM_FEATURE_VFP3))
3085 return 1;
3086
3087 n = (insn << 12) & 0x80000000;
3088 i = ((insn >> 12) & 0x70) | (insn & 0xf);
3089 if (dp) {
3090 if (i & 0x40)
3091 i |= 0x3f80;
3092 else
3093 i |= 0x4000;
3094 n |= i << 16;
4373f3ce 3095 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
3096 } else {
3097 if (i & 0x40)
3098 i |= 0x780;
3099 else
3100 i |= 0x800;
3101 n |= i << 19;
5b340b51 3102 tcg_gen_movi_i32(cpu_F0s, n);
9ee6e8bb 3103 }
9ee6e8bb 3104 break;
b7bcbe95
FB
3105 case 15: /* extension space */
3106 switch (rn) {
3107 case 0: /* cpy */
3108 /* no-op */
3109 break;
3110 case 1: /* abs */
3111 gen_vfp_abs(dp);
3112 break;
3113 case 2: /* neg */
3114 gen_vfp_neg(dp);
3115 break;
3116 case 3: /* sqrt */
3117 gen_vfp_sqrt(dp);
3118 break;
3119 case 8: /* cmp */
3120 gen_vfp_cmp(dp);
3121 break;
3122 case 9: /* cmpe */
3123 gen_vfp_cmpe(dp);
3124 break;
3125 case 10: /* cmpz */
3126 gen_vfp_cmp(dp);
3127 break;
3128 case 11: /* cmpez */
3129 gen_vfp_F1_ld0(dp);
3130 gen_vfp_cmpe(dp);
3131 break;
3132 case 15: /* single<->double conversion */
3133 if (dp)
4373f3ce 3134 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 3135 else
4373f3ce 3136 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
3137 break;
3138 case 16: /* fuito */
3139 gen_vfp_uito(dp);
3140 break;
3141 case 17: /* fsito */
3142 gen_vfp_sito(dp);
3143 break;
9ee6e8bb
PB
3144 case 20: /* fshto */
3145 if (!arm_feature(env, ARM_FEATURE_VFP3))
3146 return 1;
3147 gen_vfp_shto(dp, rm);
3148 break;
3149 case 21: /* fslto */
3150 if (!arm_feature(env, ARM_FEATURE_VFP3))
3151 return 1;
3152 gen_vfp_slto(dp, rm);
3153 break;
3154 case 22: /* fuhto */
3155 if (!arm_feature(env, ARM_FEATURE_VFP3))
3156 return 1;
3157 gen_vfp_uhto(dp, rm);
3158 break;
3159 case 23: /* fulto */
3160 if (!arm_feature(env, ARM_FEATURE_VFP3))
3161 return 1;
3162 gen_vfp_ulto(dp, rm);
3163 break;
b7bcbe95
FB
3164 case 24: /* ftoui */
3165 gen_vfp_toui(dp);
3166 break;
3167 case 25: /* ftouiz */
3168 gen_vfp_touiz(dp);
3169 break;
3170 case 26: /* ftosi */
3171 gen_vfp_tosi(dp);
3172 break;
3173 case 27: /* ftosiz */
3174 gen_vfp_tosiz(dp);
3175 break;
9ee6e8bb
PB
3176 case 28: /* ftosh */
3177 if (!arm_feature(env, ARM_FEATURE_VFP3))
3178 return 1;
3179 gen_vfp_tosh(dp, rm);
3180 break;
3181 case 29: /* ftosl */
3182 if (!arm_feature(env, ARM_FEATURE_VFP3))
3183 return 1;
3184 gen_vfp_tosl(dp, rm);
3185 break;
3186 case 30: /* ftouh */
3187 if (!arm_feature(env, ARM_FEATURE_VFP3))
3188 return 1;
3189 gen_vfp_touh(dp, rm);
3190 break;
3191 case 31: /* ftoul */
3192 if (!arm_feature(env, ARM_FEATURE_VFP3))
3193 return 1;
3194 gen_vfp_toul(dp, rm);
3195 break;
b7bcbe95
FB
3196 default: /* undefined */
3197 printf ("rn:%d\n", rn);
3198 return 1;
3199 }
3200 break;
3201 default: /* undefined */
3202 printf ("op:%d\n", op);
3203 return 1;
3204 }
3205
3206 /* Write back the result. */
3207 if (op == 15 && (rn >= 8 && rn <= 11))
3208 ; /* Comparison, do nothing. */
3209 else if (op == 15 && rn > 17)
3210 /* Integer result. */
3211 gen_mov_vreg_F0(0, rd);
3212 else if (op == 15 && rn == 15)
3213 /* conversion */
3214 gen_mov_vreg_F0(!dp, rd);
3215 else
3216 gen_mov_vreg_F0(dp, rd);
3217
3218 /* break out of the loop if we have finished */
3219 if (veclen == 0)
3220 break;
3221
3222 if (op == 15 && delta_m == 0) {
3223 /* single source one-many */
3224 while (veclen--) {
3225 rd = ((rd + delta_d) & (bank_mask - 1))
3226 | (rd & bank_mask);
3227 gen_mov_vreg_F0(dp, rd);
3228 }
3229 break;
3230 }
3231 /* Setup the next operands. */
3232 veclen--;
3233 rd = ((rd + delta_d) & (bank_mask - 1))
3234 | (rd & bank_mask);
3235
3236 if (op == 15) {
3237 /* One source operand. */
3238 rm = ((rm + delta_m) & (bank_mask - 1))
3239 | (rm & bank_mask);
3240 gen_mov_F0_vreg(dp, rm);
3241 } else {
3242 /* Two source operands. */
3243 rn = ((rn + delta_d) & (bank_mask - 1))
3244 | (rn & bank_mask);
3245 gen_mov_F0_vreg(dp, rn);
3246 if (delta_m) {
3247 rm = ((rm + delta_m) & (bank_mask - 1))
3248 | (rm & bank_mask);
3249 gen_mov_F1_vreg(dp, rm);
3250 }
3251 }
3252 }
3253 }
3254 break;
3255 case 0xc:
3256 case 0xd:
9ee6e8bb 3257 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
3258 /* two-register transfer */
3259 rn = (insn >> 16) & 0xf;
3260 rd = (insn >> 12) & 0xf;
3261 if (dp) {
9ee6e8bb
PB
3262 VFP_DREG_M(rm, insn);
3263 } else {
3264 rm = VFP_SREG_M(insn);
3265 }
b7bcbe95 3266
18c9b560 3267 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
3268 /* vfp->arm */
3269 if (dp) {
4373f3ce
PB
3270 gen_mov_F0_vreg(0, rm * 2);
3271 tmp = gen_vfp_mrs();
3272 store_reg(s, rd, tmp);
3273 gen_mov_F0_vreg(0, rm * 2 + 1);
3274 tmp = gen_vfp_mrs();
3275 store_reg(s, rn, tmp);
b7bcbe95
FB
3276 } else {
3277 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
3278 tmp = gen_vfp_mrs();
3279 store_reg(s, rn, tmp);
b7bcbe95 3280 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
3281 tmp = gen_vfp_mrs();
3282 store_reg(s, rd, tmp);
b7bcbe95
FB
3283 }
3284 } else {
3285 /* arm->vfp */
3286 if (dp) {
4373f3ce
PB
3287 tmp = load_reg(s, rd);
3288 gen_vfp_msr(tmp);
3289 gen_mov_vreg_F0(0, rm * 2);
3290 tmp = load_reg(s, rn);
3291 gen_vfp_msr(tmp);
3292 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 3293 } else {
4373f3ce
PB
3294 tmp = load_reg(s, rn);
3295 gen_vfp_msr(tmp);
b7bcbe95 3296 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
3297 tmp = load_reg(s, rd);
3298 gen_vfp_msr(tmp);
b7bcbe95
FB
3299 gen_mov_vreg_F0(0, rm + 1);
3300 }
3301 }
3302 } else {
3303 /* Load/store */
3304 rn = (insn >> 16) & 0xf;
3305 if (dp)
9ee6e8bb 3306 VFP_DREG_D(rd, insn);
b7bcbe95 3307 else
9ee6e8bb
PB
3308 rd = VFP_SREG_D(insn);
3309 if (s->thumb && rn == 15) {
3310 gen_op_movl_T1_im(s->pc & ~2);
3311 } else {
3312 gen_movl_T1_reg(s, rn);
3313 }
b7bcbe95
FB
3314 if ((insn & 0x01200000) == 0x01000000) {
3315 /* Single load/store */
3316 offset = (insn & 0xff) << 2;
3317 if ((insn & (1 << 23)) == 0)
3318 offset = -offset;
3319 gen_op_addl_T1_im(offset);
3320 if (insn & (1 << 20)) {
b5ff1b31 3321 gen_vfp_ld(s, dp);
b7bcbe95
FB
3322 gen_mov_vreg_F0(dp, rd);
3323 } else {
3324 gen_mov_F0_vreg(dp, rd);
b5ff1b31 3325 gen_vfp_st(s, dp);
b7bcbe95
FB
3326 }
3327 } else {
3328 /* load/store multiple */
3329 if (dp)
3330 n = (insn >> 1) & 0x7f;
3331 else
3332 n = insn & 0xff;
3333
3334 if (insn & (1 << 24)) /* pre-decrement */
3335 gen_op_addl_T1_im(-((insn & 0xff) << 2));
3336
3337 if (dp)
3338 offset = 8;
3339 else
3340 offset = 4;
3341 for (i = 0; i < n; i++) {
18c9b560 3342 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3343 /* load */
b5ff1b31 3344 gen_vfp_ld(s, dp);
b7bcbe95
FB
3345 gen_mov_vreg_F0(dp, rd + i);
3346 } else {
3347 /* store */
3348 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3349 gen_vfp_st(s, dp);
b7bcbe95
FB
3350 }
3351 gen_op_addl_T1_im(offset);
3352 }
3353 if (insn & (1 << 21)) {
3354 /* writeback */
3355 if (insn & (1 << 24))
3356 offset = -offset * n;
3357 else if (dp && (insn & 1))
3358 offset = 4;
3359 else
3360 offset = 0;
3361
3362 if (offset != 0)
3363 gen_op_addl_T1_im(offset);
3364 gen_movl_reg_T1(s, rn);
3365 }
3366 }
3367 }
3368 break;
3369 default:
3370 /* Should never happen. */
3371 return 1;
3372 }
3373 return 0;
3374}
3375
6e256c93 3376static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3377{
6e256c93
FB
3378 TranslationBlock *tb;
3379
3380 tb = s->tb;
3381 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3382 tcg_gen_goto_tb(n);
8984bd2e 3383 gen_set_pc_im(dest);
57fec1fe 3384 tcg_gen_exit_tb((long)tb + n);
6e256c93 3385 } else {
8984bd2e 3386 gen_set_pc_im(dest);
57fec1fe 3387 tcg_gen_exit_tb(0);
6e256c93 3388 }
c53be334
FB
3389}
3390
8aaca4c0
FB
3391static inline void gen_jmp (DisasContext *s, uint32_t dest)
3392{
3393 if (__builtin_expect(s->singlestep_enabled, 0)) {
3394 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3395 if (s->thumb)
d9ba4830
PB
3396 dest |= 1;
3397 gen_bx_im(s, dest);
8aaca4c0 3398 } else {
6e256c93 3399 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3400 s->is_jmp = DISAS_TB_JUMP;
3401 }
3402}
3403
d9ba4830 3404static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3405{
ee097184 3406 if (x)
d9ba4830 3407 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3408 else
d9ba4830 3409 gen_sxth(t0);
ee097184 3410 if (y)
d9ba4830 3411 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3412 else
d9ba4830
PB
3413 gen_sxth(t1);
3414 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3415}
3416
3417/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3418static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3419 uint32_t mask;
3420
3421 mask = 0;
3422 if (flags & (1 << 0))
3423 mask |= 0xff;
3424 if (flags & (1 << 1))
3425 mask |= 0xff00;
3426 if (flags & (1 << 2))
3427 mask |= 0xff0000;
3428 if (flags & (1 << 3))
3429 mask |= 0xff000000;
9ee6e8bb 3430
2ae23e75 3431 /* Mask out undefined bits. */
9ee6e8bb
PB
3432 mask &= ~CPSR_RESERVED;
3433 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3434 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3435 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3436 mask &= ~CPSR_IT;
9ee6e8bb 3437 /* Mask out execution state bits. */
2ae23e75 3438 if (!spsr)
e160c51c 3439 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3440 /* Mask out privileged bits. */
3441 if (IS_USER(s))
9ee6e8bb 3442 mask &= CPSR_USER;
b5ff1b31
FB
3443 return mask;
3444}
3445
3446/* Returns nonzero if access to the PSR is not permitted. */
3447static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3448{
d9ba4830 3449 TCGv tmp;
b5ff1b31
FB
3450 if (spsr) {
3451 /* ??? This is also undefined in system mode. */
3452 if (IS_USER(s))
3453 return 1;
d9ba4830
PB
3454
3455 tmp = load_cpu_field(spsr);
3456 tcg_gen_andi_i32(tmp, tmp, ~mask);
3457 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3458 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3459 store_cpu_field(tmp, spsr);
b5ff1b31 3460 } else {
d9ba4830 3461 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3462 }
3463 gen_lookup_tb(s);
3464 return 0;
3465}
3466
9ee6e8bb 3467/* Generate an old-style exception return. */
b5ff1b31
FB
3468static void gen_exception_return(DisasContext *s)
3469{
d9ba4830 3470 TCGv tmp;
e22f8f39 3471 gen_movl_reg_T0(s, 15);
d9ba4830
PB
3472 tmp = load_cpu_field(spsr);
3473 gen_set_cpsr(tmp, 0xffffffff);
3474 dead_tmp(tmp);
b5ff1b31
FB
3475 s->is_jmp = DISAS_UPDATE;
3476}
3477
b0109805
PB
3478/* Generate a v6 exception return. Marks both values as dead. */
3479static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3480{
b0109805
PB
3481 gen_set_cpsr(cpsr, 0xffffffff);
3482 dead_tmp(cpsr);
3483 store_reg(s, 15, pc);
9ee6e8bb
PB
3484 s->is_jmp = DISAS_UPDATE;
3485}
3b46e624 3486
9ee6e8bb
PB
3487static inline void
3488gen_set_condexec (DisasContext *s)
3489{
3490 if (s->condexec_mask) {
8f01245e
PB
3491 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3492 TCGv tmp = new_tmp();
3493 tcg_gen_movi_i32(tmp, val);
d9ba4830 3494 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3495 }
3496}
3b46e624 3497
9ee6e8bb
PB
3498static void gen_nop_hint(DisasContext *s, int val)
3499{
3500 switch (val) {
3501 case 3: /* wfi */
8984bd2e 3502 gen_set_pc_im(s->pc);
9ee6e8bb
PB
3503 s->is_jmp = DISAS_WFI;
3504 break;
3505 case 2: /* wfe */
3506 case 4: /* sev */
3507 /* TODO: Implement SEV and WFE. May help SMP performance. */
3508 default: /* nop */
3509 break;
3510 }
3511}
99c475ab 3512
ad69471c
PB
3513/* These macros help make the code more readable when migrating from the
3514 old dyngen helpers. They should probably be removed when
3515 T0/T1 are removed. */
3516#define CPU_T001 cpu_T[0], cpu_T[0], cpu_T[1]
3517#define CPU_T0E01 cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]
9ee6e8bb 3518
ad69471c 3519#define CPU_V001 cpu_V0, cpu_V0, cpu_V1
9ee6e8bb
PB
3520
3521static inline int gen_neon_add(int size)
3522{
3523 switch (size) {
ad69471c
PB
3524 case 0: gen_helper_neon_add_u8(CPU_T001); break;
3525 case 1: gen_helper_neon_add_u16(CPU_T001); break;
9ee6e8bb
PB
3526 case 2: gen_op_addl_T0_T1(); break;
3527 default: return 1;
3528 }
3529 return 0;
3530}
3531
ad69471c
PB
3532static inline void gen_neon_rsb(int size)
3533{
3534 switch (size) {
3535 case 0: gen_helper_neon_sub_u8(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3536 case 1: gen_helper_neon_sub_u16(cpu_T[0], cpu_T[1], cpu_T[0]); break;
3537 case 2: gen_op_rsbl_T0_T1(); break;
3538 default: return;
3539 }
3540}
3541
3542/* 32-bit pairwise ops end up the same as the elementwise versions. */
3543#define gen_helper_neon_pmax_s32 gen_helper_neon_max_s32
3544#define gen_helper_neon_pmax_u32 gen_helper_neon_max_u32
3545#define gen_helper_neon_pmin_s32 gen_helper_neon_min_s32
3546#define gen_helper_neon_pmin_u32 gen_helper_neon_min_u32
3547
3548/* FIXME: This is wrong. They set the wrong overflow bit. */
3549#define gen_helper_neon_qadd_s32(a, e, b, c) gen_helper_add_saturate(a, b, c)
3550#define gen_helper_neon_qadd_u32(a, e, b, c) gen_helper_add_usaturate(a, b, c)
3551#define gen_helper_neon_qsub_s32(a, e, b, c) gen_helper_sub_saturate(a, b, c)
3552#define gen_helper_neon_qsub_u32(a, e, b, c) gen_helper_sub_usaturate(a, b, c)
3553
3554#define GEN_NEON_INTEGER_OP_ENV(name) do { \
3555 switch ((size << 1) | u) { \
3556 case 0: \
3557 gen_helper_neon_##name##_s8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3558 break; \
3559 case 1: \
3560 gen_helper_neon_##name##_u8(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3561 break; \
3562 case 2: \
3563 gen_helper_neon_##name##_s16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3564 break; \
3565 case 3: \
3566 gen_helper_neon_##name##_u16(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3567 break; \
3568 case 4: \
3569 gen_helper_neon_##name##_s32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3570 break; \
3571 case 5: \
3572 gen_helper_neon_##name##_u32(cpu_T[0], cpu_env, cpu_T[0], cpu_T[1]); \
3573 break; \
3574 default: return 1; \
3575 }} while (0)
9ee6e8bb
PB
3576
3577#define GEN_NEON_INTEGER_OP(name) do { \
3578 switch ((size << 1) | u) { \
ad69471c
PB
3579 case 0: \
3580 gen_helper_neon_##name##_s8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3581 break; \
3582 case 1: \
3583 gen_helper_neon_##name##_u8(cpu_T[0], cpu_T[0], cpu_T[1]); \
3584 break; \
3585 case 2: \
3586 gen_helper_neon_##name##_s16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3587 break; \
3588 case 3: \
3589 gen_helper_neon_##name##_u16(cpu_T[0], cpu_T[0], cpu_T[1]); \
3590 break; \
3591 case 4: \
3592 gen_helper_neon_##name##_s32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3593 break; \
3594 case 5: \
3595 gen_helper_neon_##name##_u32(cpu_T[0], cpu_T[0], cpu_T[1]); \
3596 break; \
9ee6e8bb
PB
3597 default: return 1; \
3598 }} while (0)
3599
3600static inline void
3601gen_neon_movl_scratch_T0(int scratch)
3602{
3603 uint32_t offset;
3604
3605 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3606 tcg_gen_st_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3607}
3608
3609static inline void
3610gen_neon_movl_scratch_T1(int scratch)
3611{
3612 uint32_t offset;
3613
3614 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3615 tcg_gen_st_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3616}
3617
3618static inline void
3619gen_neon_movl_T0_scratch(int scratch)
3620{
3621 uint32_t offset;
3622
3623 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3624 tcg_gen_ld_i32(cpu_T[0], cpu_env, offset);
9ee6e8bb
PB
3625}
3626
3627static inline void
3628gen_neon_movl_T1_scratch(int scratch)
3629{
3630 uint32_t offset;
3631
3632 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
ad69471c 3633 tcg_gen_ld_i32(cpu_T[1], cpu_env, offset);
9ee6e8bb
PB
3634}
3635
3636static inline void gen_neon_get_scalar(int size, int reg)
3637{
3638 if (size == 1) {
3639 NEON_GET_REG(T0, reg >> 1, reg & 1);
3640 } else {
3641 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3642 if (reg & 1)
ad69471c 3643 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb 3644 else
ad69471c 3645 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb
PB
3646 }
3647}
3648
3649static void gen_neon_unzip(int reg, int q, int tmp, int size)
3650{
3651 int n;
3652
3653 for (n = 0; n < q + 1; n += 2) {
3654 NEON_GET_REG(T0, reg, n);
3655 NEON_GET_REG(T0, reg, n + n);
3656 switch (size) {
ad69471c
PB
3657 case 0: gen_helper_neon_unzip_u8(); break;
3658 case 1: gen_helper_neon_zip_u16(); break; /* zip and unzip are the same. */
9ee6e8bb
PB
3659 case 2: /* no-op */; break;
3660 default: abort();
3661 }
3662 gen_neon_movl_scratch_T0(tmp + n);
3663 gen_neon_movl_scratch_T1(tmp + n + 1);
3664 }
3665}
3666
3667static struct {
3668 int nregs;
3669 int interleave;
3670 int spacing;
3671} neon_ls_element_type[11] = {
3672 {4, 4, 1},
3673 {4, 4, 2},
3674 {4, 1, 1},
3675 {4, 2, 1},
3676 {3, 3, 1},
3677 {3, 3, 2},
3678 {3, 1, 1},
3679 {1, 1, 1},
3680 {2, 2, 1},
3681 {2, 2, 2},
3682 {2, 1, 1}
3683};
3684
3685/* Translate a NEON load/store element instruction. Return nonzero if the
3686 instruction is invalid. */
3687static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3688{
3689 int rd, rn, rm;
3690 int op;
3691 int nregs;
3692 int interleave;
3693 int stride;
3694 int size;
3695 int reg;
3696 int pass;
3697 int load;
3698 int shift;
9ee6e8bb 3699 int n;
b0109805 3700 TCGv tmp;
8f8e3aa4 3701 TCGv tmp2;
9ee6e8bb
PB
3702
3703 if (!vfp_enabled(env))
3704 return 1;
3705 VFP_DREG_D(rd, insn);
3706 rn = (insn >> 16) & 0xf;
3707 rm = insn & 0xf;
3708 load = (insn & (1 << 21)) != 0;
3709 if ((insn & (1 << 23)) == 0) {
3710 /* Load store all elements. */
3711 op = (insn >> 8) & 0xf;
3712 size = (insn >> 6) & 3;
3713 if (op > 10 || size == 3)
3714 return 1;
3715 nregs = neon_ls_element_type[op].nregs;
3716 interleave = neon_ls_element_type[op].interleave;
3717 gen_movl_T1_reg(s, rn);
3718 stride = (1 << size) * interleave;
3719 for (reg = 0; reg < nregs; reg++) {
3720 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3721 gen_movl_T1_reg(s, rn);
3722 gen_op_addl_T1_im((1 << size) * reg);
3723 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3724 gen_movl_T1_reg(s, rn);
3725 gen_op_addl_T1_im(1 << size);
3726 }
3727 for (pass = 0; pass < 2; pass++) {
3728 if (size == 2) {
3729 if (load) {
b0109805 3730 tmp = gen_ld32(cpu_T[1], IS_USER(s));
ad69471c 3731 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3732 } else {
ad69471c 3733 tmp = neon_load_reg(rd, pass);
b0109805 3734 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3735 }
3736 gen_op_addl_T1_im(stride);
3737 } else if (size == 1) {
3738 if (load) {
b0109805 3739 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3740 gen_op_addl_T1_im(stride);
8f8e3aa4 3741 tmp2 = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb 3742 gen_op_addl_T1_im(stride);
8f8e3aa4
PB
3743 gen_bfi(tmp, tmp, tmp2, 16, 0xffff);
3744 dead_tmp(tmp2);
3745 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3746 } else {
8f8e3aa4
PB
3747 tmp = neon_load_reg(rd, pass);
3748 tmp2 = new_tmp();
3749 tcg_gen_shri_i32(tmp2, tmp, 16);
b0109805 3750 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3751 gen_op_addl_T1_im(stride);
8f8e3aa4 3752 gen_st16(tmp2, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3753 gen_op_addl_T1_im(stride);
3754 }
3755 } else /* size == 0 */ {
3756 if (load) {
9ee6e8bb 3757 for (n = 0; n < 4; n++) {
b0109805 3758 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3759 gen_op_addl_T1_im(stride);
3760 if (n == 0) {
8f8e3aa4 3761 tmp2 = tmp;
9ee6e8bb 3762 } else {
8f8e3aa4
PB
3763 gen_bfi(tmp2, tmp2, tmp, n * 8, 0xff);
3764 dead_tmp(tmp);
9ee6e8bb 3765 }
9ee6e8bb 3766 }
8f8e3aa4 3767 neon_store_reg(rd, pass, tmp2);
9ee6e8bb 3768 } else {
8f8e3aa4 3769 tmp2 = neon_load_reg(rd, pass);
9ee6e8bb 3770 for (n = 0; n < 4; n++) {
8f8e3aa4 3771 tmp = new_tmp();
9ee6e8bb 3772 if (n == 0) {
8f8e3aa4 3773 tcg_gen_mov_i32(tmp, tmp2);
9ee6e8bb 3774 } else {
8f8e3aa4 3775 tcg_gen_shri_i32(tmp, tmp2, n * 8);
9ee6e8bb 3776 }
b0109805 3777 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3778 gen_op_addl_T1_im(stride);
9ee6e8bb 3779 }
8f8e3aa4 3780 dead_tmp(tmp2);
9ee6e8bb
PB
3781 }
3782 }
3783 }
3784 rd += neon_ls_element_type[op].spacing;
3785 }
3786 stride = nregs * 8;
3787 } else {
3788 size = (insn >> 10) & 3;
3789 if (size == 3) {
3790 /* Load single element to all lanes. */
3791 if (!load)
3792 return 1;
3793 size = (insn >> 6) & 3;
3794 nregs = ((insn >> 8) & 3) + 1;
3795 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3796 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3797 for (reg = 0; reg < nregs; reg++) {
3798 switch (size) {
3799 case 0:
b0109805 3800 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
ad69471c 3801 gen_neon_dup_u8(tmp, 0);
9ee6e8bb
PB
3802 break;
3803 case 1:
b0109805 3804 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
ad69471c 3805 gen_neon_dup_low16(tmp);
9ee6e8bb
PB
3806 break;
3807 case 2:
b0109805 3808 tmp = gen_ld32(cpu_T[0], IS_USER(s));
9ee6e8bb
PB
3809 break;
3810 case 3:
3811 return 1;
99c475ab 3812 }
9ee6e8bb 3813 gen_op_addl_T1_im(1 << size);
ad69471c
PB
3814 tmp2 = new_tmp();
3815 tcg_gen_mov_i32(tmp2, tmp);
3816 neon_store_reg(rd, 0, tmp2);
3817 neon_store_reg(rd, 0, tmp);
9ee6e8bb
PB
3818 rd += stride;
3819 }
3820 stride = (1 << size) * nregs;
3821 } else {
3822 /* Single element. */
3823 pass = (insn >> 7) & 1;
3824 switch (size) {
3825 case 0:
3826 shift = ((insn >> 5) & 3) * 8;
9ee6e8bb
PB
3827 stride = 1;
3828 break;
3829 case 1:
3830 shift = ((insn >> 6) & 1) * 16;
9ee6e8bb
PB
3831 stride = (insn & (1 << 5)) ? 2 : 1;
3832 break;
3833 case 2:
3834 shift = 0;
9ee6e8bb
PB
3835 stride = (insn & (1 << 6)) ? 2 : 1;
3836 break;
3837 default:
3838 abort();
3839 }
3840 nregs = ((insn >> 8) & 3) + 1;
3841 gen_movl_T1_reg(s, rn);
3842 for (reg = 0; reg < nregs; reg++) {
3843 if (load) {
9ee6e8bb
PB
3844 switch (size) {
3845 case 0:
b0109805 3846 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3847 break;
3848 case 1:
b0109805 3849 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3850 break;
3851 case 2:
b0109805 3852 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3853 break;
3854 }
3855 if (size != 2) {
8f8e3aa4
PB
3856 tmp2 = neon_load_reg(rd, pass);
3857 gen_bfi(tmp, tmp2, tmp, shift, size ? 0xffff : 0xff);
3858 dead_tmp(tmp2);
9ee6e8bb 3859 }
8f8e3aa4 3860 neon_store_reg(rd, pass, tmp);
9ee6e8bb 3861 } else { /* Store */
8f8e3aa4
PB
3862 tmp = neon_load_reg(rd, pass);
3863 if (shift)
3864 tcg_gen_shri_i32(tmp, tmp, shift);
9ee6e8bb
PB
3865 switch (size) {
3866 case 0:
b0109805 3867 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3868 break;
3869 case 1:
b0109805 3870 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3871 break;
3872 case 2:
b0109805 3873 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3874 break;
99c475ab 3875 }
99c475ab 3876 }
9ee6e8bb
PB
3877 rd += stride;
3878 gen_op_addl_T1_im(1 << size);
99c475ab 3879 }
9ee6e8bb 3880 stride = nregs * (1 << size);
99c475ab 3881 }
9ee6e8bb
PB
3882 }
3883 if (rm != 15) {
b26eefb6
PB
3884 TCGv base;
3885
3886 base = load_reg(s, rn);
9ee6e8bb 3887 if (rm == 13) {
b26eefb6 3888 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3889 } else {
b26eefb6
PB
3890 TCGv index;
3891 index = load_reg(s, rm);
3892 tcg_gen_add_i32(base, base, index);
3893 dead_tmp(index);
9ee6e8bb 3894 }
b26eefb6 3895 store_reg(s, rn, base);
9ee6e8bb
PB
3896 }
3897 return 0;
3898}
3b46e624 3899
8f8e3aa4
PB
3900/* Bitwise select. dest = c ? t : f. Clobbers T and F. */
3901static void gen_neon_bsl(TCGv dest, TCGv t, TCGv f, TCGv c)
3902{
3903 tcg_gen_and_i32(t, t, c);
3904 tcg_gen_bic_i32(f, f, c);
3905 tcg_gen_or_i32(dest, t, f);
3906}
3907
ad69471c
PB
3908static inline void gen_neon_narrow(int size, TCGv dest, TCGv src)
3909{
3910 switch (size) {
3911 case 0: gen_helper_neon_narrow_u8(dest, src); break;
3912 case 1: gen_helper_neon_narrow_u16(dest, src); break;
3913 case 2: tcg_gen_trunc_i64_i32(dest, src); break;
3914 default: abort();
3915 }
3916}
3917
3918static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv src)
3919{
3920 switch (size) {
3921 case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
3922 case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
3923 case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
3924 default: abort();
3925 }
3926}
3927
3928static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv src)
3929{
3930 switch (size) {
3931 case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
3932 case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
3933 case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
3934 default: abort();
3935 }
3936}
3937
3938static inline void gen_neon_shift_narrow(int size, TCGv var, TCGv shift,
3939 int q, int u)
3940{
3941 if (q) {
3942 if (u) {
3943 switch (size) {
3944 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3945 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3946 default: abort();
3947 }
3948 } else {
3949 switch (size) {
3950 case 1: gen_helper_neon_rshl_s16(var, var, shift); break;
3951 case 2: gen_helper_neon_rshl_s32(var, var, shift); break;
3952 default: abort();
3953 }
3954 }
3955 } else {
3956 if (u) {
3957 switch (size) {
3958 case 1: gen_helper_neon_rshl_u16(var, var, shift); break;
3959 case 2: gen_helper_neon_rshl_u32(var, var, shift); break;
3960 default: abort();
3961 }
3962 } else {
3963 switch (size) {
3964 case 1: gen_helper_neon_shl_s16(var, var, shift); break;
3965 case 2: gen_helper_neon_shl_s32(var, var, shift); break;
3966 default: abort();
3967 }
3968 }
3969 }
3970}
3971
3972static inline void gen_neon_widen(TCGv dest, TCGv src, int size, int u)
3973{
3974 if (u) {
3975 switch (size) {
3976 case 0: gen_helper_neon_widen_u8(dest, src); break;
3977 case 1: gen_helper_neon_widen_u16(dest, src); break;
3978 case 2: tcg_gen_extu_i32_i64(dest, src); break;
3979 default: abort();
3980 }
3981 } else {
3982 switch (size) {
3983 case 0: gen_helper_neon_widen_s8(dest, src); break;
3984 case 1: gen_helper_neon_widen_s16(dest, src); break;
3985 case 2: tcg_gen_ext_i32_i64(dest, src); break;
3986 default: abort();
3987 }
3988 }
3989 dead_tmp(src);
3990}
3991
3992static inline void gen_neon_addl(int size)
3993{
3994 switch (size) {
3995 case 0: gen_helper_neon_addl_u16(CPU_V001); break;
3996 case 1: gen_helper_neon_addl_u32(CPU_V001); break;
3997 case 2: tcg_gen_add_i64(CPU_V001); break;
3998 default: abort();
3999 }
4000}
4001
4002static inline void gen_neon_subl(int size)
4003{
4004 switch (size) {
4005 case 0: gen_helper_neon_subl_u16(CPU_V001); break;
4006 case 1: gen_helper_neon_subl_u32(CPU_V001); break;
4007 case 2: tcg_gen_sub_i64(CPU_V001); break;
4008 default: abort();
4009 }
4010}
4011
4012static inline void gen_neon_negl(TCGv var, int size)
4013{
4014 switch (size) {
4015 case 0: gen_helper_neon_negl_u16(var, var); break;
4016 case 1: gen_helper_neon_negl_u32(var, var); break;
4017 case 2: gen_helper_neon_negl_u64(var, var); break;
4018 default: abort();
4019 }
4020}
4021
4022static inline void gen_neon_addl_saturate(TCGv op0, TCGv op1, int size)
4023{
4024 switch (size) {
4025 case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
4026 case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
4027 default: abort();
4028 }
4029}
4030
4031static inline void gen_neon_mull(TCGv dest, TCGv a, TCGv b, int size, int u)
4032{
4033 TCGv tmp;
4034
4035 switch ((size << 1) | u) {
4036 case 0: gen_helper_neon_mull_s8(dest, a, b); break;
4037 case 1: gen_helper_neon_mull_u8(dest, a, b); break;
4038 case 2: gen_helper_neon_mull_s16(dest, a, b); break;
4039 case 3: gen_helper_neon_mull_u16(dest, a, b); break;
4040 case 4:
4041 tmp = gen_muls_i64_i32(a, b);
4042 tcg_gen_mov_i64(dest, tmp);
4043 break;
4044 case 5:
4045 tmp = gen_mulu_i64_i32(a, b);
4046 tcg_gen_mov_i64(dest, tmp);
4047 break;
4048 default: abort();
4049 }
4050 if (size < 2) {
4051 dead_tmp(b);
4052 dead_tmp(a);
4053 }
4054}
4055
9ee6e8bb
PB
4056/* Translate a NEON data processing instruction. Return nonzero if the
4057 instruction is invalid.
ad69471c
PB
4058 We process data in a mixture of 32-bit and 64-bit chunks.
4059 Mostly we use 32-bit chunks so we can use normal scalar instructions. */
2c0262af 4060
9ee6e8bb
PB
4061static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
4062{
4063 int op;
4064 int q;
4065 int rd, rn, rm;
4066 int size;
4067 int shift;
4068 int pass;
4069 int count;
4070 int pairwise;
4071 int u;
4072 int n;
4073 uint32_t imm;
8f8e3aa4
PB
4074 TCGv tmp;
4075 TCGv tmp2;
4076 TCGv tmp3;
9ee6e8bb
PB
4077
4078 if (!vfp_enabled(env))
4079 return 1;
4080 q = (insn & (1 << 6)) != 0;
4081 u = (insn >> 24) & 1;
4082 VFP_DREG_D(rd, insn);
4083 VFP_DREG_N(rn, insn);
4084 VFP_DREG_M(rm, insn);
4085 size = (insn >> 20) & 3;
4086 if ((insn & (1 << 23)) == 0) {
4087 /* Three register same length. */
4088 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
ad69471c
PB
4089 if (size == 3 && (op == 1 || op == 5 || op == 8 || op == 9
4090 || op == 10 || op == 11 || op == 16)) {
4091 /* 64-bit element instructions. */
9ee6e8bb 4092 for (pass = 0; pass < (q ? 2 : 1); pass++) {
ad69471c
PB
4093 neon_load_reg64(cpu_V0, rn + pass);
4094 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb
PB
4095 switch (op) {
4096 case 1: /* VQADD */
4097 if (u) {
ad69471c 4098 gen_helper_neon_add_saturate_u64(CPU_V001);
2c0262af 4099 } else {
ad69471c 4100 gen_helper_neon_add_saturate_s64(CPU_V001);
2c0262af 4101 }
9ee6e8bb
PB
4102 break;
4103 case 5: /* VQSUB */
4104 if (u) {
ad69471c
PB
4105 gen_helper_neon_sub_saturate_u64(CPU_V001);
4106 } else {
4107 gen_helper_neon_sub_saturate_s64(CPU_V001);
4108 }
4109 break;
4110 case 8: /* VSHL */
4111 if (u) {
4112 gen_helper_neon_shl_u64(cpu_V0, cpu_V1, cpu_V0);
4113 } else {
4114 gen_helper_neon_shl_s64(cpu_V0, cpu_V1, cpu_V0);
4115 }
4116 break;
4117 case 9: /* VQSHL */
4118 if (u) {
4119 gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
4120 cpu_V0, cpu_V0);
4121 } else {
4122 gen_helper_neon_qshl_s64(cpu_V1, cpu_env,
4123 cpu_V1, cpu_V0);
4124 }
4125 break;
4126 case 10: /* VRSHL */
4127 if (u) {
4128 gen_helper_neon_rshl_u64(cpu_V0, cpu_V1, cpu_V0);
1e8d4eec 4129 } else {
ad69471c
PB
4130 gen_helper_neon_rshl_s64(cpu_V0, cpu_V1, cpu_V0);
4131 }
4132 break;
4133 case 11: /* VQRSHL */
4134 if (u) {
4135 gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
4136 cpu_V1, cpu_V0);
4137 } else {
4138 gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
4139 cpu_V1, cpu_V0);
1e8d4eec 4140 }
9ee6e8bb
PB
4141 break;
4142 case 16:
4143 if (u) {
ad69471c 4144 tcg_gen_sub_i64(CPU_V001);
9ee6e8bb 4145 } else {
ad69471c 4146 tcg_gen_add_i64(CPU_V001);
9ee6e8bb
PB
4147 }
4148 break;
4149 default:
4150 abort();
2c0262af 4151 }
ad69471c 4152 neon_store_reg64(cpu_V0, rd + pass);
2c0262af 4153 }
9ee6e8bb 4154 return 0;
2c0262af 4155 }
9ee6e8bb
PB
4156 switch (op) {
4157 case 8: /* VSHL */
4158 case 9: /* VQSHL */
4159 case 10: /* VRSHL */
ad69471c 4160 case 11: /* VQRSHL */
9ee6e8bb 4161 {
ad69471c
PB
4162 int rtmp;
4163 /* Shift instruction operands are reversed. */
4164 rtmp = rn;
9ee6e8bb 4165 rn = rm;
ad69471c 4166 rm = rtmp;
9ee6e8bb
PB
4167 pairwise = 0;
4168 }
2c0262af 4169 break;
9ee6e8bb
PB
4170 case 20: /* VPMAX */
4171 case 21: /* VPMIN */
4172 case 23: /* VPADD */
4173 pairwise = 1;
2c0262af 4174 break;
9ee6e8bb
PB
4175 case 26: /* VPADD (float) */
4176 pairwise = (u && size < 2);
2c0262af 4177 break;
9ee6e8bb
PB
4178 case 30: /* VPMIN/VPMAX (float) */
4179 pairwise = u;
2c0262af 4180 break;
9ee6e8bb
PB
4181 default:
4182 pairwise = 0;
2c0262af 4183 break;
9ee6e8bb
PB
4184 }
4185 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4186
4187 if (pairwise) {
4188 /* Pairwise. */
4189 if (q)
4190 n = (pass & 1) * 2;
2c0262af 4191 else
9ee6e8bb
PB
4192 n = 0;
4193 if (pass < q + 1) {
4194 NEON_GET_REG(T0, rn, n);
4195 NEON_GET_REG(T1, rn, n + 1);
4196 } else {
4197 NEON_GET_REG(T0, rm, n);
4198 NEON_GET_REG(T1, rm, n + 1);
4199 }
4200 } else {
4201 /* Elementwise. */
4202 NEON_GET_REG(T0, rn, pass);
4203 NEON_GET_REG(T1, rm, pass);
4204 }
4205 switch (op) {
4206 case 0: /* VHADD */
4207 GEN_NEON_INTEGER_OP(hadd);
4208 break;
4209 case 1: /* VQADD */
ad69471c 4210 GEN_NEON_INTEGER_OP_ENV(qadd);
2c0262af 4211 break;
9ee6e8bb
PB
4212 case 2: /* VRHADD */
4213 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 4214 break;
9ee6e8bb
PB
4215 case 3: /* Logic ops. */
4216 switch ((u << 2) | size) {
4217 case 0: /* VAND */
2c0262af 4218 gen_op_andl_T0_T1();
9ee6e8bb
PB
4219 break;
4220 case 1: /* BIC */
4221 gen_op_bicl_T0_T1();
4222 break;
4223 case 2: /* VORR */
4224 gen_op_orl_T0_T1();
4225 break;
4226 case 3: /* VORN */
4227 gen_op_notl_T1();
4228 gen_op_orl_T0_T1();
4229 break;
4230 case 4: /* VEOR */
4231 gen_op_xorl_T0_T1();
4232 break;
4233 case 5: /* VBSL */
8f8e3aa4
PB
4234 tmp = neon_load_reg(rd, pass);
4235 gen_neon_bsl(cpu_T[0], cpu_T[0], cpu_T[1], tmp);
4236 dead_tmp(tmp);
9ee6e8bb
PB
4237 break;
4238 case 6: /* VBIT */
8f8e3aa4
PB
4239 tmp = neon_load_reg(rd, pass);
4240 gen_neon_bsl(cpu_T[0], cpu_T[0], tmp, cpu_T[1]);
4241 dead_tmp(tmp);
9ee6e8bb
PB
4242 break;
4243 case 7: /* VBIF */
8f8e3aa4
PB
4244 tmp = neon_load_reg(rd, pass);
4245 gen_neon_bsl(cpu_T[0], tmp, cpu_T[0], cpu_T[1]);
4246 dead_tmp(tmp);
9ee6e8bb 4247 break;
2c0262af
FB
4248 }
4249 break;
9ee6e8bb
PB
4250 case 4: /* VHSUB */
4251 GEN_NEON_INTEGER_OP(hsub);
4252 break;
4253 case 5: /* VQSUB */
ad69471c 4254 GEN_NEON_INTEGER_OP_ENV(qsub);
2c0262af 4255 break;
9ee6e8bb
PB
4256 case 6: /* VCGT */
4257 GEN_NEON_INTEGER_OP(cgt);
4258 break;
4259 case 7: /* VCGE */
4260 GEN_NEON_INTEGER_OP(cge);
4261 break;
4262 case 8: /* VSHL */
ad69471c 4263 GEN_NEON_INTEGER_OP(shl);
2c0262af 4264 break;
9ee6e8bb 4265 case 9: /* VQSHL */
ad69471c 4266 GEN_NEON_INTEGER_OP_ENV(qshl);
2c0262af 4267 break;
9ee6e8bb 4268 case 10: /* VRSHL */
ad69471c 4269 GEN_NEON_INTEGER_OP(rshl);
2c0262af 4270 break;
9ee6e8bb 4271 case 11: /* VQRSHL */
ad69471c 4272 GEN_NEON_INTEGER_OP_ENV(qrshl);
9ee6e8bb
PB
4273 break;
4274 case 12: /* VMAX */
4275 GEN_NEON_INTEGER_OP(max);
4276 break;
4277 case 13: /* VMIN */
4278 GEN_NEON_INTEGER_OP(min);
4279 break;
4280 case 14: /* VABD */
4281 GEN_NEON_INTEGER_OP(abd);
4282 break;
4283 case 15: /* VABA */
4284 GEN_NEON_INTEGER_OP(abd);
4285 NEON_GET_REG(T1, rd, pass);
4286 gen_neon_add(size);
4287 break;
4288 case 16:
4289 if (!u) { /* VADD */
4290 if (gen_neon_add(size))
4291 return 1;
4292 } else { /* VSUB */
4293 switch (size) {
ad69471c
PB
4294 case 0: gen_helper_neon_sub_u8(CPU_T001); break;
4295 case 1: gen_helper_neon_sub_u16(CPU_T001); break;
9ee6e8bb
PB
4296 case 2: gen_op_subl_T0_T1(); break;
4297 default: return 1;
4298 }
4299 }
4300 break;
4301 case 17:
4302 if (!u) { /* VTST */
4303 switch (size) {
ad69471c
PB
4304 case 0: gen_helper_neon_tst_u8(CPU_T001); break;
4305 case 1: gen_helper_neon_tst_u16(CPU_T001); break;
4306 case 2: gen_helper_neon_tst_u32(CPU_T001); break;
9ee6e8bb
PB
4307 default: return 1;
4308 }
4309 } else { /* VCEQ */
4310 switch (size) {
ad69471c
PB
4311 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
4312 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
4313 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
4314 default: return 1;
4315 }
4316 }
4317 break;
4318 case 18: /* Multiply. */
4319 switch (size) {
ad69471c
PB
4320 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4321 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4322 case 2: gen_op_mul_T0_T1(); break;
4323 default: return 1;
4324 }
4325 NEON_GET_REG(T1, rd, pass);
4326 if (u) { /* VMLS */
ad69471c 4327 gen_neon_rsb(size);
9ee6e8bb
PB
4328 } else { /* VMLA */
4329 gen_neon_add(size);
4330 }
4331 break;
4332 case 19: /* VMUL */
4333 if (u) { /* polynomial */
ad69471c 4334 gen_helper_neon_mul_p8(CPU_T001);
9ee6e8bb
PB
4335 } else { /* Integer */
4336 switch (size) {
ad69471c
PB
4337 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
4338 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
4339 case 2: gen_op_mul_T0_T1(); break;
4340 default: return 1;
4341 }
4342 }
4343 break;
4344 case 20: /* VPMAX */
4345 GEN_NEON_INTEGER_OP(pmax);
4346 break;
4347 case 21: /* VPMIN */
4348 GEN_NEON_INTEGER_OP(pmin);
4349 break;
4350 case 22: /* Hultiply high. */
4351 if (!u) { /* VQDMULH */
4352 switch (size) {
ad69471c
PB
4353 case 1: gen_helper_neon_qdmulh_s16(CPU_T0E01); break;
4354 case 2: gen_helper_neon_qdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4355 default: return 1;
4356 }
4357 } else { /* VQRDHMUL */
4358 switch (size) {
ad69471c
PB
4359 case 1: gen_helper_neon_qrdmulh_s16(CPU_T0E01); break;
4360 case 2: gen_helper_neon_qrdmulh_s32(CPU_T0E01); break;
9ee6e8bb
PB
4361 default: return 1;
4362 }
4363 }
4364 break;
4365 case 23: /* VPADD */
4366 if (u)
4367 return 1;
4368 switch (size) {
ad69471c
PB
4369 case 0: gen_helper_neon_padd_u8(CPU_T001); break;
4370 case 1: gen_helper_neon_padd_u16(CPU_T001); break;
9ee6e8bb
PB
4371 case 2: gen_op_addl_T0_T1(); break;
4372 default: return 1;
4373 }
4374 break;
4375 case 26: /* Floating point arithnetic. */
4376 switch ((u << 2) | size) {
4377 case 0: /* VADD */
ad69471c 4378 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4379 break;
4380 case 2: /* VSUB */
ad69471c 4381 gen_helper_neon_sub_f32(CPU_T001);
9ee6e8bb
PB
4382 break;
4383 case 4: /* VPADD */
ad69471c 4384 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
4385 break;
4386 case 6: /* VABD */
ad69471c 4387 gen_helper_neon_abd_f32(CPU_T001);
9ee6e8bb
PB
4388 break;
4389 default:
4390 return 1;
4391 }
4392 break;
4393 case 27: /* Float multiply. */
ad69471c 4394 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
4395 if (!u) {
4396 NEON_GET_REG(T1, rd, pass);
4397 if (size == 0) {
ad69471c 4398 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb 4399 } else {
ad69471c 4400 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
4401 }
4402 }
4403 break;
4404 case 28: /* Float compare. */
4405 if (!u) {
ad69471c 4406 gen_helper_neon_ceq_f32(CPU_T001);
b5ff1b31 4407 } else {
9ee6e8bb 4408 if (size == 0)
ad69471c 4409 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb 4410 else
ad69471c 4411 gen_helper_neon_cgt_f32(CPU_T001);
b5ff1b31 4412 }
2c0262af 4413 break;
9ee6e8bb
PB
4414 case 29: /* Float compare absolute. */
4415 if (!u)
4416 return 1;
4417 if (size == 0)
ad69471c 4418 gen_helper_neon_acge_f32(CPU_T001);
9ee6e8bb 4419 else
ad69471c 4420 gen_helper_neon_acgt_f32(CPU_T001);
2c0262af 4421 break;
9ee6e8bb
PB
4422 case 30: /* Float min/max. */
4423 if (size == 0)
ad69471c 4424 gen_helper_neon_max_f32(CPU_T001);
9ee6e8bb 4425 else
ad69471c 4426 gen_helper_neon_min_f32(CPU_T001);
9ee6e8bb
PB
4427 break;
4428 case 31:
4429 if (size == 0)
4373f3ce 4430 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4431 else
4373f3ce 4432 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4433 break;
9ee6e8bb
PB
4434 default:
4435 abort();
2c0262af 4436 }
9ee6e8bb
PB
4437 /* Save the result. For elementwise operations we can put it
4438 straight into the destination register. For pairwise operations
4439 we have to be careful to avoid clobbering the source operands. */
4440 if (pairwise && rd == rm) {
4441 gen_neon_movl_scratch_T0(pass);
4442 } else {
4443 NEON_SET_REG(T0, rd, pass);
4444 }
4445
4446 } /* for pass */
4447 if (pairwise && rd == rm) {
4448 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4449 gen_neon_movl_T0_scratch(pass);
4450 NEON_SET_REG(T0, rd, pass);
4451 }
4452 }
ad69471c 4453 /* End of 3 register same size operations. */
9ee6e8bb
PB
4454 } else if (insn & (1 << 4)) {
4455 if ((insn & 0x00380080) != 0) {
4456 /* Two registers and shift. */
4457 op = (insn >> 8) & 0xf;
4458 if (insn & (1 << 7)) {
4459 /* 64-bit shift. */
4460 size = 3;
4461 } else {
4462 size = 2;
4463 while ((insn & (1 << (size + 19))) == 0)
4464 size--;
4465 }
4466 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4467 /* To avoid excessive dumplication of ops we implement shift
4468 by immediate using the variable shift operations. */
4469 if (op < 8) {
4470 /* Shift by immediate:
4471 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4472 /* Right shifts are encoded as N - shift, where N is the
4473 element size in bits. */
4474 if (op <= 4)
4475 shift = shift - (1 << (size + 3));
9ee6e8bb
PB
4476 if (size == 3) {
4477 count = q + 1;
4478 } else {
4479 count = q ? 4: 2;
4480 }
4481 switch (size) {
4482 case 0:
4483 imm = (uint8_t) shift;
4484 imm |= imm << 8;
4485 imm |= imm << 16;
4486 break;
4487 case 1:
4488 imm = (uint16_t) shift;
4489 imm |= imm << 16;
4490 break;
4491 case 2:
4492 case 3:
4493 imm = shift;
4494 break;
4495 default:
4496 abort();
4497 }
4498
4499 for (pass = 0; pass < count; pass++) {
ad69471c
PB
4500 if (size == 3) {
4501 neon_load_reg64(cpu_V0, rm + pass);
4502 tcg_gen_movi_i64(cpu_V1, imm);
4503 switch (op) {
4504 case 0: /* VSHR */
4505 case 1: /* VSRA */
4506 if (u)
4507 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4508 else
ad69471c 4509 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4510 break;
ad69471c
PB
4511 case 2: /* VRSHR */
4512 case 3: /* VRSRA */
4513 if (u)
4514 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4515 else
ad69471c 4516 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4517 break;
ad69471c
PB
4518 case 4: /* VSRI */
4519 if (!u)
4520 return 1;
4521 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4522 break;
4523 case 5: /* VSHL, VSLI */
4524 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
4525 break;
4526 case 6: /* VQSHL */
4527 if (u)
4528 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4529 else
ad69471c
PB
4530 gen_helper_neon_qshl_s64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
4531 break;
4532 case 7: /* VQSHLU */
4533 gen_helper_neon_qshl_u64(cpu_V0, cpu_env, cpu_V0, cpu_V1);
9ee6e8bb 4534 break;
9ee6e8bb 4535 }
ad69471c
PB
4536 if (op == 1 || op == 3) {
4537 /* Accumulate. */
4538 neon_load_reg64(cpu_V0, rd + pass);
4539 tcg_gen_add_i64(cpu_V0, cpu_V0, cpu_V1);
4540 } else if (op == 4 || (op == 5 && u)) {
4541 /* Insert */
4542 cpu_abort(env, "VS[LR]I.64 not implemented");
4543 }
4544 neon_store_reg64(cpu_V0, rd + pass);
4545 } else { /* size < 3 */
4546 /* Operands in T0 and T1. */
4547 gen_op_movl_T1_im(imm);
4548 NEON_GET_REG(T0, rm, pass);
4549 switch (op) {
4550 case 0: /* VSHR */
4551 case 1: /* VSRA */
4552 GEN_NEON_INTEGER_OP(shl);
4553 break;
4554 case 2: /* VRSHR */
4555 case 3: /* VRSRA */
4556 GEN_NEON_INTEGER_OP(rshl);
4557 break;
4558 case 4: /* VSRI */
4559 if (!u)
4560 return 1;
4561 GEN_NEON_INTEGER_OP(shl);
4562 break;
4563 case 5: /* VSHL, VSLI */
4564 switch (size) {
4565 case 0: gen_helper_neon_shl_u8(CPU_T001); break;
4566 case 1: gen_helper_neon_shl_u16(CPU_T001); break;
4567 case 2: gen_helper_neon_shl_u32(CPU_T001); break;
4568 default: return 1;
4569 }
4570 break;
4571 case 6: /* VQSHL */
4572 GEN_NEON_INTEGER_OP_ENV(qshl);
4573 break;
4574 case 7: /* VQSHLU */
4575 switch (size) {
4576 case 0: gen_helper_neon_qshl_u8(CPU_T0E01); break;
4577 case 1: gen_helper_neon_qshl_u16(CPU_T0E01); break;
4578 case 2: gen_helper_neon_qshl_u32(CPU_T0E01); break;
4579 default: return 1;
4580 }
4581 break;
4582 }
4583
4584 if (op == 1 || op == 3) {
4585 /* Accumulate. */
4586 NEON_GET_REG(T1, rd, pass);
4587 gen_neon_add(size);
4588 } else if (op == 4 || (op == 5 && u)) {
4589 /* Insert */
4590 switch (size) {
4591 case 0:
4592 if (op == 4)
4593 imm = 0xff >> -shift;
4594 else
4595 imm = (uint8_t)(0xff << shift);
4596 imm |= imm << 8;
4597 imm |= imm << 16;
4598 break;
4599 case 1:
4600 if (op == 4)
4601 imm = 0xffff >> -shift;
4602 else
4603 imm = (uint16_t)(0xffff << shift);
4604 imm |= imm << 16;
4605 break;
4606 case 2:
4607 if (op == 4)
4608 imm = 0xffffffffu >> -shift;
4609 else
4610 imm = 0xffffffffu << shift;
4611 break;
4612 default:
4613 abort();
4614 }
4615 tmp = neon_load_reg(rd, pass);
4616 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], imm);
4617 tcg_gen_andi_i32(tmp, tmp, ~imm);
4618 tcg_gen_or_i32(cpu_T[0], cpu_T[0], tmp);
4619 }
9ee6e8bb
PB
4620 NEON_SET_REG(T0, rd, pass);
4621 }
4622 } /* for pass */
4623 } else if (op < 10) {
ad69471c 4624 /* Shift by immediate and narrow:
9ee6e8bb
PB
4625 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4626 shift = shift - (1 << (size + 3));
4627 size++;
9ee6e8bb
PB
4628 switch (size) {
4629 case 1:
ad69471c 4630 imm = (uint16_t)shift;
9ee6e8bb 4631 imm |= imm << 16;
ad69471c 4632 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
4633 break;
4634 case 2:
ad69471c
PB
4635 imm = (uint32_t)shift;
4636 tmp2 = tcg_const_i32(imm);
9ee6e8bb 4637 case 3:
ad69471c 4638 tmp2 = tcg_const_i64(shift);
9ee6e8bb
PB
4639 break;
4640 default:
4641 abort();
4642 }
4643
ad69471c
PB
4644 for (pass = 0; pass < 2; pass++) {
4645 if (size == 3) {
4646 neon_load_reg64(cpu_V0, rm + pass);
4647 if (q) {
4648 if (u)
4649 gen_helper_neon_rshl_u64(cpu_V0, cpu_V0, tmp2);
4650 else
4651 gen_helper_neon_rshl_s64(cpu_V0, cpu_V0, tmp2);
4652 } else {
4653 if (u)
4654 gen_helper_neon_shl_u64(cpu_V0, cpu_V0, tmp2);
4655 else
4656 gen_helper_neon_shl_s64(cpu_V0, cpu_V0, tmp2);
4657 }
2c0262af 4658 } else {
ad69471c
PB
4659 tmp = neon_load_reg(rm + pass, 0);
4660 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4661 tcg_gen_extu_i32_i64(cpu_V0, tmp);
4662 dead_tmp(tmp);
4663 tmp = neon_load_reg(rm + pass, 1);
4664 gen_neon_shift_narrow(size, tmp, tmp2, q, u);
4665 tcg_gen_extu_i32_i64(cpu_V1, tmp);
4666 dead_tmp(tmp);
4667 tcg_gen_shli_i64(cpu_V1, cpu_V1, 32);
4668 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
9ee6e8bb 4669 }
ad69471c
PB
4670 tmp = new_tmp();
4671 if (op == 8 && !u) {
4672 gen_neon_narrow(size - 1, tmp, cpu_V0);
9ee6e8bb 4673 } else {
ad69471c
PB
4674 if (op == 8)
4675 gen_neon_narrow_sats(size - 1, tmp, cpu_V0);
9ee6e8bb 4676 else
ad69471c
PB
4677 gen_neon_narrow_satu(size - 1, tmp, cpu_V0);
4678 }
4679 if (pass == 0) {
4680 tmp2 = tmp;
4681 } else {
4682 neon_store_reg(rd, 0, tmp2);
4683 neon_store_reg(rd, 1, tmp);
9ee6e8bb
PB
4684 }
4685 } /* for pass */
4686 } else if (op == 10) {
4687 /* VSHLL */
ad69471c 4688 if (q || size == 3)
9ee6e8bb 4689 return 1;
ad69471c
PB
4690 tmp = neon_load_reg(rm, 0);
4691 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 4692 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4693 if (pass == 1)
4694 tmp = tmp2;
4695
4696 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb 4697
9ee6e8bb
PB
4698 if (shift != 0) {
4699 /* The shift is less than the width of the source
ad69471c
PB
4700 type, so we can just shift the whole register. */
4701 tcg_gen_shli_i64(cpu_V0, cpu_V0, shift);
4702 if (size < 2 || !u) {
4703 uint64_t imm64;
4704 if (size == 0) {
4705 imm = (0xffu >> (8 - shift));
4706 imm |= imm << 16;
4707 } else {
4708 imm = 0xffff >> (16 - shift);
9ee6e8bb 4709 }
ad69471c
PB
4710 imm64 = imm | (((uint64_t)imm) << 32);
4711 tcg_gen_andi_i64(cpu_V0, cpu_V0, imm64);
9ee6e8bb
PB
4712 }
4713 }
ad69471c 4714 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4715 }
4716 } else if (op == 15 || op == 16) {
4717 /* VCVT fixed-point. */
4718 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4719 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4720 if (op & 1) {
4721 if (u)
4373f3ce 4722 gen_vfp_ulto(0, shift);
9ee6e8bb 4723 else
4373f3ce 4724 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4725 } else {
4726 if (u)
4373f3ce 4727 gen_vfp_toul(0, shift);
9ee6e8bb 4728 else
4373f3ce 4729 gen_vfp_tosl(0, shift);
2c0262af 4730 }
4373f3ce 4731 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4732 }
4733 } else {
9ee6e8bb
PB
4734 return 1;
4735 }
4736 } else { /* (insn & 0x00380080) == 0 */
4737 int invert;
4738
4739 op = (insn >> 8) & 0xf;
4740 /* One register and immediate. */
4741 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4742 invert = (insn & (1 << 5)) != 0;
4743 switch (op) {
4744 case 0: case 1:
4745 /* no-op */
4746 break;
4747 case 2: case 3:
4748 imm <<= 8;
4749 break;
4750 case 4: case 5:
4751 imm <<= 16;
4752 break;
4753 case 6: case 7:
4754 imm <<= 24;
4755 break;
4756 case 8: case 9:
4757 imm |= imm << 16;
4758 break;
4759 case 10: case 11:
4760 imm = (imm << 8) | (imm << 24);
4761 break;
4762 case 12:
4763 imm = (imm < 8) | 0xff;
4764 break;
4765 case 13:
4766 imm = (imm << 16) | 0xffff;
4767 break;
4768 case 14:
4769 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4770 if (invert)
4771 imm = ~imm;
4772 break;
4773 case 15:
4774 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4775 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4776 break;
4777 }
4778 if (invert)
4779 imm = ~imm;
4780
4781 if (op != 14 || !invert)
4782 gen_op_movl_T1_im(imm);
4783
4784 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4785 if (op & 1 && op < 12) {
ad69471c 4786 tmp = neon_load_reg(rd, pass);
9ee6e8bb
PB
4787 if (invert) {
4788 /* The immediate value has already been inverted, so
4789 BIC becomes AND. */
ad69471c 4790 tcg_gen_andi_i32(tmp, tmp, imm);
9ee6e8bb 4791 } else {
ad69471c 4792 tcg_gen_ori_i32(tmp, tmp, imm);
9ee6e8bb 4793 }
9ee6e8bb 4794 } else {
ad69471c
PB
4795 /* VMOV, VMVN. */
4796 tmp = new_tmp();
9ee6e8bb 4797 if (op == 14 && invert) {
ad69471c
PB
4798 uint32_t val;
4799 val = 0;
9ee6e8bb
PB
4800 for (n = 0; n < 4; n++) {
4801 if (imm & (1 << (n + (pass & 1) * 4)))
ad69471c 4802 val |= 0xff << (n * 8);
9ee6e8bb 4803 }
ad69471c
PB
4804 tcg_gen_movi_i32(tmp, val);
4805 } else {
4806 tcg_gen_movi_i32(tmp, imm);
9ee6e8bb 4807 }
9ee6e8bb 4808 }
ad69471c 4809 neon_store_reg(rd, pass, tmp);
9ee6e8bb
PB
4810 }
4811 }
4812 } else { /* (insn & 0x00800010 == 0x00800010) */
4813 if (size != 3) {
4814 op = (insn >> 8) & 0xf;
4815 if ((insn & (1 << 6)) == 0) {
4816 /* Three registers of different lengths. */
4817 int src1_wide;
4818 int src2_wide;
4819 int prewiden;
4820 /* prewiden, src1_wide, src2_wide */
4821 static const int neon_3reg_wide[16][3] = {
4822 {1, 0, 0}, /* VADDL */
4823 {1, 1, 0}, /* VADDW */
4824 {1, 0, 0}, /* VSUBL */
4825 {1, 1, 0}, /* VSUBW */
4826 {0, 1, 1}, /* VADDHN */
4827 {0, 0, 0}, /* VABAL */
4828 {0, 1, 1}, /* VSUBHN */
4829 {0, 0, 0}, /* VABDL */
4830 {0, 0, 0}, /* VMLAL */
4831 {0, 0, 0}, /* VQDMLAL */
4832 {0, 0, 0}, /* VMLSL */
4833 {0, 0, 0}, /* VQDMLSL */
4834 {0, 0, 0}, /* Integer VMULL */
4835 {0, 0, 0}, /* VQDMULL */
4836 {0, 0, 0} /* Polynomial VMULL */
4837 };
4838
4839 prewiden = neon_3reg_wide[op][0];
4840 src1_wide = neon_3reg_wide[op][1];
4841 src2_wide = neon_3reg_wide[op][2];
4842
ad69471c
PB
4843 if (size == 0 && (op == 9 || op == 11 || op == 13))
4844 return 1;
4845
9ee6e8bb
PB
4846 /* Avoid overlapping operands. Wide source operands are
4847 always aligned so will never overlap with wide
4848 destinations in problematic ways. */
8f8e3aa4
PB
4849 if (rd == rm && !src2_wide) {
4850 NEON_GET_REG(T0, rm, 1);
4851 gen_neon_movl_scratch_T0(2);
4852 } else if (rd == rn && !src1_wide) {
4853 NEON_GET_REG(T0, rn, 1);
4854 gen_neon_movl_scratch_T0(2);
9ee6e8bb
PB
4855 }
4856 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
4857 if (src1_wide) {
4858 neon_load_reg64(cpu_V0, rn + pass);
9ee6e8bb 4859 } else {
ad69471c
PB
4860 if (pass == 1 && rd == rn) {
4861 gen_neon_movl_T0_scratch(2);
4862 tmp = new_tmp();
4863 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb 4864 } else {
ad69471c
PB
4865 tmp = neon_load_reg(rn, pass);
4866 }
4867 if (prewiden) {
4868 gen_neon_widen(cpu_V0, tmp, size, u);
9ee6e8bb
PB
4869 }
4870 }
ad69471c
PB
4871 if (src2_wide) {
4872 neon_load_reg64(cpu_V1, rm + pass);
9ee6e8bb 4873 } else {
ad69471c 4874 if (pass == 1 && rd == rm) {
8f8e3aa4 4875 gen_neon_movl_T0_scratch(2);
ad69471c
PB
4876 tmp2 = new_tmp();
4877 tcg_gen_mov_i32(tmp2, cpu_T[0]);
9ee6e8bb 4878 } else {
ad69471c
PB
4879 tmp2 = neon_load_reg(rm, pass);
4880 }
4881 if (prewiden) {
4882 gen_neon_widen(cpu_V1, tmp2, size, u);
9ee6e8bb 4883 }
9ee6e8bb
PB
4884 }
4885 switch (op) {
4886 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
ad69471c 4887 gen_neon_addl(size);
9ee6e8bb
PB
4888 break;
4889 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
ad69471c 4890 gen_neon_subl(size);
9ee6e8bb
PB
4891 break;
4892 case 5: case 7: /* VABAL, VABDL */
4893 switch ((size << 1) | u) {
ad69471c
PB
4894 case 0:
4895 gen_helper_neon_abdl_s16(cpu_V0, tmp, tmp2);
4896 break;
4897 case 1:
4898 gen_helper_neon_abdl_u16(cpu_V0, tmp, tmp2);
4899 break;
4900 case 2:
4901 gen_helper_neon_abdl_s32(cpu_V0, tmp, tmp2);
4902 break;
4903 case 3:
4904 gen_helper_neon_abdl_u32(cpu_V0, tmp, tmp2);
4905 break;
4906 case 4:
4907 gen_helper_neon_abdl_s64(cpu_V0, tmp, tmp2);
4908 break;
4909 case 5:
4910 gen_helper_neon_abdl_u64(cpu_V0, tmp, tmp2);
4911 break;
9ee6e8bb
PB
4912 default: abort();
4913 }
ad69471c
PB
4914 dead_tmp(tmp2);
4915 dead_tmp(tmp);
9ee6e8bb
PB
4916 break;
4917 case 8: case 9: case 10: case 11: case 12: case 13:
4918 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
ad69471c 4919 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb
PB
4920 break;
4921 case 14: /* Polynomial VMULL */
4922 cpu_abort(env, "Polynomial VMULL not implemented");
4923
4924 default: /* 15 is RESERVED. */
4925 return 1;
4926 }
4927 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4928 /* Accumulate. */
4929 if (op == 10 || op == 11) {
ad69471c 4930 gen_neon_negl(cpu_V0, size);
9ee6e8bb
PB
4931 }
4932
9ee6e8bb 4933 if (op != 13) {
ad69471c 4934 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb
PB
4935 }
4936
4937 switch (op) {
4938 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
ad69471c 4939 gen_neon_addl(size);
9ee6e8bb
PB
4940 break;
4941 case 9: case 11: /* VQDMLAL, VQDMLSL */
ad69471c
PB
4942 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
4943 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
4944 break;
9ee6e8bb
PB
4945 /* Fall through. */
4946 case 13: /* VQDMULL */
ad69471c 4947 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
4948 break;
4949 default:
4950 abort();
4951 }
ad69471c 4952 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4953 } else if (op == 4 || op == 6) {
4954 /* Narrowing operation. */
ad69471c 4955 tmp = new_tmp();
9ee6e8bb
PB
4956 if (u) {
4957 switch (size) {
ad69471c
PB
4958 case 0:
4959 gen_helper_neon_narrow_high_u8(tmp, cpu_V0);
4960 break;
4961 case 1:
4962 gen_helper_neon_narrow_high_u16(tmp, cpu_V0);
4963 break;
4964 case 2:
4965 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4966 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4967 break;
9ee6e8bb
PB
4968 default: abort();
4969 }
4970 } else {
4971 switch (size) {
ad69471c
PB
4972 case 0:
4973 gen_helper_neon_narrow_round_high_u8(tmp, cpu_V0);
4974 break;
4975 case 1:
4976 gen_helper_neon_narrow_round_high_u16(tmp, cpu_V0);
4977 break;
4978 case 2:
4979 tcg_gen_addi_i64(cpu_V0, cpu_V0, 1u << 31);
4980 tcg_gen_shri_i64(cpu_V0, cpu_V0, 32);
4981 tcg_gen_trunc_i64_i32(tmp, cpu_V0);
4982 break;
9ee6e8bb
PB
4983 default: abort();
4984 }
4985 }
ad69471c
PB
4986 if (pass == 0) {
4987 tmp3 = tmp;
4988 } else {
4989 neon_store_reg(rd, 0, tmp3);
4990 neon_store_reg(rd, 1, tmp);
4991 }
9ee6e8bb
PB
4992 } else {
4993 /* Write back the result. */
ad69471c 4994 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
4995 }
4996 }
4997 } else {
4998 /* Two registers and a scalar. */
4999 switch (op) {
5000 case 0: /* Integer VMLA scalar */
5001 case 1: /* Float VMLA scalar */
5002 case 4: /* Integer VMLS scalar */
5003 case 5: /* Floating point VMLS scalar */
5004 case 8: /* Integer VMUL scalar */
5005 case 9: /* Floating point VMUL scalar */
5006 case 12: /* VQDMULH scalar */
5007 case 13: /* VQRDMULH scalar */
5008 gen_neon_get_scalar(size, rm);
8f8e3aa4 5009 gen_neon_movl_scratch_T0(0);
9ee6e8bb
PB
5010 for (pass = 0; pass < (u ? 4 : 2); pass++) {
5011 if (pass != 0)
8f8e3aa4 5012 gen_neon_movl_T0_scratch(0);
9ee6e8bb
PB
5013 NEON_GET_REG(T1, rn, pass);
5014 if (op == 12) {
5015 if (size == 1) {
ad69471c 5016 gen_helper_neon_qdmulh_s16(CPU_T0E01);
9ee6e8bb 5017 } else {
ad69471c 5018 gen_helper_neon_qdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5019 }
5020 } else if (op == 13) {
5021 if (size == 1) {
ad69471c 5022 gen_helper_neon_qrdmulh_s16(CPU_T0E01);
9ee6e8bb 5023 } else {
ad69471c 5024 gen_helper_neon_qrdmulh_s32(CPU_T0E01);
9ee6e8bb
PB
5025 }
5026 } else if (op & 1) {
ad69471c 5027 gen_helper_neon_mul_f32(CPU_T001);
9ee6e8bb
PB
5028 } else {
5029 switch (size) {
ad69471c
PB
5030 case 0: gen_helper_neon_mul_u8(CPU_T001); break;
5031 case 1: gen_helper_neon_mul_u16(CPU_T001); break;
9ee6e8bb
PB
5032 case 2: gen_op_mul_T0_T1(); break;
5033 default: return 1;
5034 }
5035 }
5036 if (op < 8) {
5037 /* Accumulate. */
5038 NEON_GET_REG(T1, rd, pass);
5039 switch (op) {
5040 case 0:
5041 gen_neon_add(size);
5042 break;
5043 case 1:
ad69471c 5044 gen_helper_neon_add_f32(CPU_T001);
9ee6e8bb
PB
5045 break;
5046 case 4:
ad69471c 5047 gen_neon_rsb(size);
9ee6e8bb
PB
5048 break;
5049 case 5:
ad69471c 5050 gen_helper_neon_sub_f32(cpu_T[0], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
5051 break;
5052 default:
5053 abort();
5054 }
5055 }
5056 NEON_SET_REG(T0, rd, pass);
5057 }
5058 break;
5059 case 2: /* VMLAL sclar */
5060 case 3: /* VQDMLAL scalar */
5061 case 6: /* VMLSL scalar */
5062 case 7: /* VQDMLSL scalar */
5063 case 10: /* VMULL scalar */
5064 case 11: /* VQDMULL scalar */
ad69471c
PB
5065 if (size == 0 && (op == 3 || op == 7 || op == 11))
5066 return 1;
5067
9ee6e8bb 5068 gen_neon_get_scalar(size, rm);
ad69471c
PB
5069 NEON_GET_REG(T1, rn, 1);
5070
9ee6e8bb 5071 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5072 if (pass == 0) {
5073 tmp = neon_load_reg(rn, 0);
9ee6e8bb 5074 } else {
ad69471c
PB
5075 tmp = new_tmp();
5076 tcg_gen_mov_i32(tmp, cpu_T[1]);
9ee6e8bb 5077 }
ad69471c
PB
5078 tmp2 = new_tmp();
5079 tcg_gen_mov_i32(tmp2, cpu_T[0]);
5080 gen_neon_mull(cpu_V0, tmp, tmp2, size, u);
9ee6e8bb 5081 if (op == 6 || op == 7) {
ad69471c
PB
5082 gen_neon_negl(cpu_V0, size);
5083 }
5084 if (op != 11) {
5085 neon_load_reg64(cpu_V1, rd + pass);
9ee6e8bb 5086 }
9ee6e8bb
PB
5087 switch (op) {
5088 case 2: case 6:
ad69471c 5089 gen_neon_addl(size);
9ee6e8bb
PB
5090 break;
5091 case 3: case 7:
ad69471c
PB
5092 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
5093 gen_neon_addl_saturate(cpu_V0, cpu_V1, size);
9ee6e8bb
PB
5094 break;
5095 case 10:
5096 /* no-op */
5097 break;
5098 case 11:
ad69471c 5099 gen_neon_addl_saturate(cpu_V0, cpu_V0, size);
9ee6e8bb
PB
5100 break;
5101 default:
5102 abort();
5103 }
ad69471c 5104 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5105 }
5106 break;
5107 default: /* 14 and 15 are RESERVED */
5108 return 1;
5109 }
5110 }
5111 } else { /* size == 3 */
5112 if (!u) {
5113 /* Extract. */
9ee6e8bb 5114 imm = (insn >> 8) & 0xf;
ad69471c
PB
5115 count = q + 1;
5116
5117 if (imm > 7 && !q)
5118 return 1;
5119
5120 if (imm == 0) {
5121 neon_load_reg64(cpu_V0, rn);
5122 if (q) {
5123 neon_load_reg64(cpu_V1, rn + 1);
9ee6e8bb 5124 }
ad69471c
PB
5125 } else if (imm == 8) {
5126 neon_load_reg64(cpu_V0, rn + 1);
5127 if (q) {
5128 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5129 }
ad69471c
PB
5130 } else if (q) {
5131 tmp = tcg_temp_new(TCG_TYPE_I64);
5132 if (imm < 8) {
5133 neon_load_reg64(cpu_V0, rn);
5134 neon_load_reg64(tmp, rn + 1);
5135 } else {
5136 neon_load_reg64(cpu_V0, rn + 1);
5137 neon_load_reg64(tmp, rm);
5138 }
5139 tcg_gen_shri_i64(cpu_V0, cpu_V0, (imm & 7) * 8);
5140 tcg_gen_shli_i64(cpu_V1, tmp, 64 - ((imm & 7) * 8));
5141 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5142 if (imm < 8) {
5143 neon_load_reg64(cpu_V1, rm);
9ee6e8bb 5144 } else {
ad69471c
PB
5145 neon_load_reg64(cpu_V1, rm + 1);
5146 imm -= 8;
9ee6e8bb 5147 }
ad69471c
PB
5148 tcg_gen_shli_i64(cpu_V1, cpu_V1, 64 - (imm * 8));
5149 tcg_gen_shri_i64(tmp, tmp, imm * 8);
5150 tcg_gen_or_i64(cpu_V1, cpu_V1, tmp);
5151 } else {
5152 neon_load_reg64(cpu_V0, rn);
5153 tcg_gen_shri_i32(cpu_V0, cpu_V0, imm * 8);
5154 neon_load_reg64(cpu_V1, rm);
5155 tcg_gen_shli_i32(cpu_V1, cpu_V1, 64 - (imm * 8));
5156 tcg_gen_or_i64(cpu_V0, cpu_V0, cpu_V1);
5157 }
5158 neon_store_reg64(cpu_V0, rd);
5159 if (q) {
5160 neon_store_reg64(cpu_V1, rd + 1);
9ee6e8bb
PB
5161 }
5162 } else if ((insn & (1 << 11)) == 0) {
5163 /* Two register misc. */
5164 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
5165 size = (insn >> 18) & 3;
5166 switch (op) {
5167 case 0: /* VREV64 */
5168 if (size == 3)
5169 return 1;
5170 for (pass = 0; pass < (q ? 2 : 1); pass++) {
5171 NEON_GET_REG(T0, rm, pass * 2);
5172 NEON_GET_REG(T1, rm, pass * 2 + 1);
5173 switch (size) {
b0109805 5174 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5175 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5176 case 2: /* no-op */ break;
5177 default: abort();
5178 }
5179 NEON_SET_REG(T0, rd, pass * 2 + 1);
5180 if (size == 2) {
5181 NEON_SET_REG(T1, rd, pass * 2);
5182 } else {
5183 gen_op_movl_T0_T1();
5184 switch (size) {
b0109805 5185 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5186 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5187 default: abort();
5188 }
5189 NEON_SET_REG(T0, rd, pass * 2);
5190 }
5191 }
5192 break;
5193 case 4: case 5: /* VPADDL */
5194 case 12: case 13: /* VPADAL */
9ee6e8bb
PB
5195 if (size == 3)
5196 return 1;
ad69471c
PB
5197 for (pass = 0; pass < q + 1; pass++) {
5198 tmp = neon_load_reg(rm, pass * 2);
5199 gen_neon_widen(cpu_V0, tmp, size, op & 1);
5200 tmp = neon_load_reg(rm, pass * 2 + 1);
5201 gen_neon_widen(cpu_V1, tmp, size, op & 1);
5202 switch (size) {
5203 case 0: gen_helper_neon_paddl_u16(CPU_V001); break;
5204 case 1: gen_helper_neon_paddl_u32(CPU_V001); break;
5205 case 2: tcg_gen_add_i64(CPU_V001); break;
5206 default: abort();
5207 }
9ee6e8bb
PB
5208 if (op >= 12) {
5209 /* Accumulate. */
ad69471c
PB
5210 neon_load_reg64(cpu_V1, rd + pass);
5211 gen_neon_addl(size);
9ee6e8bb 5212 }
ad69471c 5213 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5214 }
5215 break;
5216 case 33: /* VTRN */
5217 if (size == 2) {
5218 for (n = 0; n < (q ? 4 : 2); n += 2) {
5219 NEON_GET_REG(T0, rm, n);
5220 NEON_GET_REG(T1, rd, n + 1);
5221 NEON_SET_REG(T1, rm, n);
5222 NEON_SET_REG(T0, rd, n + 1);
5223 }
5224 } else {
5225 goto elementwise;
5226 }
5227 break;
5228 case 34: /* VUZP */
5229 /* Reg Before After
5230 Rd A3 A2 A1 A0 B2 B0 A2 A0
5231 Rm B3 B2 B1 B0 B3 B1 A3 A1
5232 */
5233 if (size == 3)
5234 return 1;
5235 gen_neon_unzip(rd, q, 0, size);
5236 gen_neon_unzip(rm, q, 4, size);
5237 if (q) {
5238 static int unzip_order_q[8] =
5239 {0, 2, 4, 6, 1, 3, 5, 7};
5240 for (n = 0; n < 8; n++) {
5241 int reg = (n < 4) ? rd : rm;
5242 gen_neon_movl_T0_scratch(unzip_order_q[n]);
5243 NEON_SET_REG(T0, reg, n % 4);
5244 }
5245 } else {
5246 static int unzip_order[4] =
5247 {0, 4, 1, 5};
5248 for (n = 0; n < 4; n++) {
5249 int reg = (n < 2) ? rd : rm;
5250 gen_neon_movl_T0_scratch(unzip_order[n]);
5251 NEON_SET_REG(T0, reg, n % 2);
5252 }
5253 }
5254 break;
5255 case 35: /* VZIP */
5256 /* Reg Before After
5257 Rd A3 A2 A1 A0 B1 A1 B0 A0
5258 Rm B3 B2 B1 B0 B3 A3 B2 A2
5259 */
5260 if (size == 3)
5261 return 1;
5262 count = (q ? 4 : 2);
5263 for (n = 0; n < count; n++) {
5264 NEON_GET_REG(T0, rd, n);
5265 NEON_GET_REG(T1, rd, n);
5266 switch (size) {
ad69471c
PB
5267 case 0: gen_helper_neon_zip_u8(); break;
5268 case 1: gen_helper_neon_zip_u16(); break;
9ee6e8bb
PB
5269 case 2: /* no-op */; break;
5270 default: abort();
5271 }
5272 gen_neon_movl_scratch_T0(n * 2);
5273 gen_neon_movl_scratch_T1(n * 2 + 1);
5274 }
5275 for (n = 0; n < count * 2; n++) {
5276 int reg = (n < count) ? rd : rm;
5277 gen_neon_movl_T0_scratch(n);
5278 NEON_SET_REG(T0, reg, n % count);
5279 }
5280 break;
5281 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
ad69471c
PB
5282 if (size == 3)
5283 return 1;
9ee6e8bb 5284 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5285 neon_load_reg64(cpu_V0, rm + pass);
5286 tmp = new_tmp();
9ee6e8bb 5287 if (op == 36 && q == 0) {
ad69471c 5288 gen_neon_narrow(size, tmp, cpu_V0);
9ee6e8bb 5289 } else if (q) {
ad69471c 5290 gen_neon_narrow_satu(size, tmp, cpu_V0);
9ee6e8bb 5291 } else {
ad69471c
PB
5292 gen_neon_narrow_sats(size, tmp, cpu_V0);
5293 }
5294 if (pass == 0) {
5295 tmp2 = tmp;
5296 } else {
5297 neon_store_reg(rd, 0, tmp2);
5298 neon_store_reg(rd, 1, tmp);
9ee6e8bb 5299 }
9ee6e8bb
PB
5300 }
5301 break;
5302 case 38: /* VSHLL */
ad69471c 5303 if (q || size == 3)
9ee6e8bb 5304 return 1;
ad69471c
PB
5305 tmp = neon_load_reg(rm, 0);
5306 tmp2 = neon_load_reg(rm, 1);
9ee6e8bb 5307 for (pass = 0; pass < 2; pass++) {
ad69471c
PB
5308 if (pass == 1)
5309 tmp = tmp2;
5310 gen_neon_widen(cpu_V0, tmp, size, 1);
5311 neon_store_reg64(cpu_V0, rd + pass);
9ee6e8bb
PB
5312 }
5313 break;
5314 default:
5315 elementwise:
5316 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5317 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5318 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5319 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5320 } else {
5321 NEON_GET_REG(T0, rm, pass);
5322 }
5323 switch (op) {
5324 case 1: /* VREV32 */
5325 switch (size) {
b0109805 5326 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5327 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5328 default: return 1;
5329 }
5330 break;
5331 case 2: /* VREV16 */
5332 if (size != 0)
5333 return 1;
3670669c 5334 gen_rev16(cpu_T[0]);
9ee6e8bb 5335 break;
9ee6e8bb
PB
5336 case 8: /* CLS */
5337 switch (size) {
ad69471c
PB
5338 case 0: gen_helper_neon_cls_s8(cpu_T[0], cpu_T[0]); break;
5339 case 1: gen_helper_neon_cls_s16(cpu_T[0], cpu_T[0]); break;
5340 case 2: gen_helper_neon_cls_s32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5341 default: return 1;
5342 }
5343 break;
5344 case 9: /* CLZ */
5345 switch (size) {
ad69471c
PB
5346 case 0: gen_helper_neon_clz_u8(cpu_T[0], cpu_T[0]); break;
5347 case 1: gen_helper_neon_clz_u16(cpu_T[0], cpu_T[0]); break;
1497c961 5348 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5349 default: return 1;
5350 }
5351 break;
5352 case 10: /* CNT */
5353 if (size != 0)
5354 return 1;
ad69471c 5355 gen_helper_neon_cnt_u8(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5356 break;
5357 case 11: /* VNOT */
5358 if (size != 0)
5359 return 1;
5360 gen_op_notl_T0();
5361 break;
5362 case 14: /* VQABS */
5363 switch (size) {
ad69471c
PB
5364 case 0: gen_helper_neon_qabs_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5365 case 1: gen_helper_neon_qabs_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5366 case 2: gen_helper_neon_qabs_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5367 default: return 1;
5368 }
5369 break;
5370 case 15: /* VQNEG */
5371 switch (size) {
ad69471c
PB
5372 case 0: gen_helper_neon_qneg_s8(cpu_T[0], cpu_env, cpu_T[0]); break;
5373 case 1: gen_helper_neon_qneg_s16(cpu_T[0], cpu_env, cpu_T[0]); break;
5374 case 2: gen_helper_neon_qneg_s32(cpu_T[0], cpu_env, cpu_T[0]); break;
9ee6e8bb
PB
5375 default: return 1;
5376 }
5377 break;
5378 case 16: case 19: /* VCGT #0, VCLE #0 */
5379 gen_op_movl_T1_im(0);
5380 switch(size) {
ad69471c
PB
5381 case 0: gen_helper_neon_cgt_s8(CPU_T001); break;
5382 case 1: gen_helper_neon_cgt_s16(CPU_T001); break;
5383 case 2: gen_helper_neon_cgt_s32(CPU_T001); break;
9ee6e8bb
PB
5384 default: return 1;
5385 }
5386 if (op == 19)
5387 gen_op_notl_T0();
5388 break;
5389 case 17: case 20: /* VCGE #0, VCLT #0 */
5390 gen_op_movl_T1_im(0);
5391 switch(size) {
ad69471c
PB
5392 case 0: gen_helper_neon_cge_s8(CPU_T001); break;
5393 case 1: gen_helper_neon_cge_s16(CPU_T001); break;
5394 case 2: gen_helper_neon_cge_s32(CPU_T001); break;
9ee6e8bb
PB
5395 default: return 1;
5396 }
5397 if (op == 20)
5398 gen_op_notl_T0();
5399 break;
5400 case 18: /* VCEQ #0 */
5401 gen_op_movl_T1_im(0);
5402 switch(size) {
ad69471c
PB
5403 case 0: gen_helper_neon_ceq_u8(CPU_T001); break;
5404 case 1: gen_helper_neon_ceq_u16(CPU_T001); break;
5405 case 2: gen_helper_neon_ceq_u32(CPU_T001); break;
9ee6e8bb
PB
5406 default: return 1;
5407 }
5408 break;
5409 case 22: /* VABS */
5410 switch(size) {
ad69471c
PB
5411 case 0: gen_helper_neon_abs_s8(cpu_T[0], cpu_T[0]); break;
5412 case 1: gen_helper_neon_abs_s16(cpu_T[0], cpu_T[0]); break;
5413 case 2: tcg_gen_abs_i32(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5414 default: return 1;
5415 }
5416 break;
5417 case 23: /* VNEG */
5418 gen_op_movl_T1_im(0);
ad69471c
PB
5419 if (size == 3)
5420 return 1;
5421 gen_neon_rsb(size);
9ee6e8bb
PB
5422 break;
5423 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5424 gen_op_movl_T1_im(0);
ad69471c 5425 gen_helper_neon_cgt_f32(CPU_T001);
9ee6e8bb
PB
5426 if (op == 27)
5427 gen_op_notl_T0();
5428 break;
5429 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5430 gen_op_movl_T1_im(0);
ad69471c 5431 gen_helper_neon_cge_f32(CPU_T001);
9ee6e8bb
PB
5432 if (op == 28)
5433 gen_op_notl_T0();
5434 break;
5435 case 26: /* Float VCEQ #0 */
5436 gen_op_movl_T1_im(0);
ad69471c 5437 gen_helper_neon_ceq_f32(CPU_T001);
9ee6e8bb
PB
5438 break;
5439 case 30: /* Float VABS */
4373f3ce 5440 gen_vfp_abs(0);
9ee6e8bb
PB
5441 break;
5442 case 31: /* Float VNEG */
4373f3ce 5443 gen_vfp_neg(0);
9ee6e8bb
PB
5444 break;
5445 case 32: /* VSWP */
5446 NEON_GET_REG(T1, rd, pass);
5447 NEON_SET_REG(T1, rm, pass);
5448 break;
5449 case 33: /* VTRN */
5450 NEON_GET_REG(T1, rd, pass);
5451 switch (size) {
ad69471c
PB
5452 case 0: gen_helper_neon_trn_u8(); break;
5453 case 1: gen_helper_neon_trn_u16(); break;
9ee6e8bb
PB
5454 case 2: abort();
5455 default: return 1;
5456 }
5457 NEON_SET_REG(T1, rm, pass);
5458 break;
5459 case 56: /* Integer VRECPE */
4373f3ce 5460 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5461 break;
5462 case 57: /* Integer VRSQRTE */
4373f3ce 5463 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5464 break;
5465 case 58: /* Float VRECPE */
4373f3ce 5466 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5467 break;
5468 case 59: /* Float VRSQRTE */
4373f3ce 5469 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5470 break;
5471 case 60: /* VCVT.F32.S32 */
4373f3ce 5472 gen_vfp_tosiz(0);
9ee6e8bb
PB
5473 break;
5474 case 61: /* VCVT.F32.U32 */
4373f3ce 5475 gen_vfp_touiz(0);
9ee6e8bb
PB
5476 break;
5477 case 62: /* VCVT.S32.F32 */
4373f3ce 5478 gen_vfp_sito(0);
9ee6e8bb
PB
5479 break;
5480 case 63: /* VCVT.U32.F32 */
4373f3ce 5481 gen_vfp_uito(0);
9ee6e8bb
PB
5482 break;
5483 default:
5484 /* Reserved: 21, 29, 39-56 */
5485 return 1;
5486 }
5487 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5488 tcg_gen_st_f32(cpu_F0s, cpu_env,
5489 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5490 } else {
5491 NEON_SET_REG(T0, rd, pass);
5492 }
5493 }
5494 break;
5495 }
5496 } else if ((insn & (1 << 10)) == 0) {
5497 /* VTBL, VTBX. */
5498 n = (insn >> 5) & 0x18;
9ee6e8bb 5499 if (insn & (1 << 6)) {
8f8e3aa4 5500 tmp = neon_load_reg(rd, 0);
9ee6e8bb 5501 } else {
8f8e3aa4
PB
5502 tmp = new_tmp();
5503 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5504 }
8f8e3aa4
PB
5505 tmp2 = neon_load_reg(rm, 0);
5506 gen_helper_neon_tbl(tmp2, tmp2, tmp, tcg_const_i32(rn),
5507 tcg_const_i32(n));
9ee6e8bb 5508 if (insn & (1 << 6)) {
8f8e3aa4 5509 tmp = neon_load_reg(rd, 1);
9ee6e8bb 5510 } else {
8f8e3aa4
PB
5511 tmp = new_tmp();
5512 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 5513 }
8f8e3aa4
PB
5514 tmp3 = neon_load_reg(rm, 1);
5515 gen_helper_neon_tbl(tmp3, tmp3, tmp, tcg_const_i32(rn),
5516 tcg_const_i32(n));
5517 neon_store_reg(rd, 0, tmp2);
5518 neon_store_reg(rd, 1, tmp2);
9ee6e8bb
PB
5519 } else if ((insn & 0x380) == 0) {
5520 /* VDUP */
5521 if (insn & (1 << 19)) {
5522 NEON_SET_REG(T0, rm, 1);
5523 } else {
5524 NEON_SET_REG(T0, rm, 0);
5525 }
5526 if (insn & (1 << 16)) {
ad69471c 5527 gen_neon_dup_u8(cpu_T[0], ((insn >> 17) & 3) * 8);
9ee6e8bb
PB
5528 } else if (insn & (1 << 17)) {
5529 if ((insn >> 18) & 1)
ad69471c 5530 gen_neon_dup_high16(cpu_T[0]);
9ee6e8bb 5531 else
ad69471c 5532 gen_neon_dup_low16(cpu_T[0]);
9ee6e8bb
PB
5533 }
5534 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5535 NEON_SET_REG(T0, rd, pass);
5536 }
5537 } else {
5538 return 1;
5539 }
5540 }
5541 }
5542 return 0;
5543}
5544
5545static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5546{
5547 int cpnum;
5548
5549 cpnum = (insn >> 8) & 0xf;
5550 if (arm_feature(env, ARM_FEATURE_XSCALE)
5551 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5552 return 1;
5553
5554 switch (cpnum) {
5555 case 0:
5556 case 1:
5557 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5558 return disas_iwmmxt_insn(env, s, insn);
5559 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5560 return disas_dsp_insn(env, s, insn);
5561 }
5562 return 1;
5563 case 10:
5564 case 11:
5565 return disas_vfp_insn (env, s, insn);
5566 case 15:
5567 return disas_cp15_insn (env, s, insn);
5568 default:
5569 /* Unknown coprocessor. See if the board has hooked it. */
5570 return disas_cp_insn (env, s, insn);
5571 }
5572}
5573
5e3f878a
PB
5574
5575/* Store a 64-bit value to a register pair. Clobbers val. */
5576static void gen_storeq_reg(DisasContext *s, int rlow, int rhigh, TCGv val)
5577{
5578 TCGv tmp;
5579 tmp = new_tmp();
5580 tcg_gen_trunc_i64_i32(tmp, val);
5581 store_reg(s, rlow, tmp);
5582 tmp = new_tmp();
5583 tcg_gen_shri_i64(val, val, 32);
5584 tcg_gen_trunc_i64_i32(tmp, val);
5585 store_reg(s, rhigh, tmp);
5586}
5587
5588/* load a 32-bit value from a register and perform a 64-bit accumulate. */
5589static void gen_addq_lo(DisasContext *s, TCGv val, int rlow)
5590{
5591 TCGv tmp;
5592 TCGv tmp2;
5593
5594 /* Load 64-bit value rd:rn. */
5595 tmp = tcg_temp_new(TCG_TYPE_I64);
5596 tmp2 = load_reg(s, rlow);
5597 tcg_gen_extu_i32_i64(tmp, tmp2);
5598 dead_tmp(tmp2);
5599 tcg_gen_add_i64(val, val, tmp);
5600}
5601
5602/* load and add a 64-bit value from a register pair. */
5603static void gen_addq(DisasContext *s, TCGv val, int rlow, int rhigh)
5604{
5605 TCGv tmp;
5606 TCGv tmp2;
5607
5608 /* Load 64-bit value rd:rn. */
5609 tmp = tcg_temp_new(TCG_TYPE_I64);
5610 tmp2 = load_reg(s, rhigh);
5611 tcg_gen_extu_i32_i64(tmp, tmp2);
5612 dead_tmp(tmp2);
5613 tcg_gen_shli_i64(tmp, tmp, 32);
5614 tcg_gen_add_i64(val, val, tmp);
5615
5616 tmp2 = load_reg(s, rlow);
5617 tcg_gen_extu_i32_i64(tmp, tmp2);
5618 dead_tmp(tmp2);
5619 tcg_gen_add_i64(val, val, tmp);
5620}
5621
5622/* Set N and Z flags from a 64-bit value. */
5623static void gen_logicq_cc(TCGv val)
5624{
5625 TCGv tmp = new_tmp();
5626 gen_helper_logicq_cc(tmp, val);
6fbe23d5
PB
5627 gen_logic_CC(tmp);
5628 dead_tmp(tmp);
5e3f878a
PB
5629}
5630
9ee6e8bb
PB
5631static void disas_arm_insn(CPUState * env, DisasContext *s)
5632{
5633 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5634 TCGv tmp;
3670669c 5635 TCGv tmp2;
6ddbc6e4 5636 TCGv tmp3;
b0109805 5637 TCGv addr;
9ee6e8bb
PB
5638
5639 insn = ldl_code(s->pc);
5640 s->pc += 4;
5641
5642 /* M variants do not implement ARM mode. */
5643 if (IS_M(env))
5644 goto illegal_op;
5645 cond = insn >> 28;
5646 if (cond == 0xf){
5647 /* Unconditional instructions. */
5648 if (((insn >> 25) & 7) == 1) {
5649 /* NEON Data processing. */
5650 if (!arm_feature(env, ARM_FEATURE_NEON))
5651 goto illegal_op;
5652
5653 if (disas_neon_data_insn(env, s, insn))
5654 goto illegal_op;
5655 return;
5656 }
5657 if ((insn & 0x0f100000) == 0x04000000) {
5658 /* NEON load/store. */
5659 if (!arm_feature(env, ARM_FEATURE_NEON))
5660 goto illegal_op;
5661
5662 if (disas_neon_ls_insn(env, s, insn))
5663 goto illegal_op;
5664 return;
5665 }
5666 if ((insn & 0x0d70f000) == 0x0550f000)
5667 return; /* PLD */
5668 else if ((insn & 0x0ffffdff) == 0x01010000) {
5669 ARCH(6);
5670 /* setend */
5671 if (insn & (1 << 9)) {
5672 /* BE8 mode not implemented. */
5673 goto illegal_op;
5674 }
5675 return;
5676 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5677 switch ((insn >> 4) & 0xf) {
5678 case 1: /* clrex */
5679 ARCH(6K);
8f8e3aa4 5680 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
5681 return;
5682 case 4: /* dsb */
5683 case 5: /* dmb */
5684 case 6: /* isb */
5685 ARCH(7);
5686 /* We don't emulate caches so these are a no-op. */
5687 return;
5688 default:
5689 goto illegal_op;
5690 }
5691 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5692 /* srs */
5693 uint32_t offset;
5694 if (IS_USER(s))
5695 goto illegal_op;
5696 ARCH(6);
5697 op1 = (insn & 0x1f);
5698 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5699 addr = load_reg(s, 13);
9ee6e8bb 5700 } else {
b0109805
PB
5701 addr = new_tmp();
5702 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5703 }
5704 i = (insn >> 23) & 3;
5705 switch (i) {
5706 case 0: offset = -4; break; /* DA */
5707 case 1: offset = -8; break; /* DB */
5708 case 2: offset = 0; break; /* IA */
5709 case 3: offset = 4; break; /* IB */
5710 default: abort();
5711 }
5712 if (offset)
b0109805
PB
5713 tcg_gen_addi_i32(addr, addr, offset);
5714 tmp = load_reg(s, 14);
5715 gen_st32(tmp, addr, 0);
5716 tmp = new_tmp();
5717 gen_helper_cpsr_read(tmp);
5718 tcg_gen_addi_i32(addr, addr, 4);
5719 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5720 if (insn & (1 << 21)) {
5721 /* Base writeback. */
5722 switch (i) {
5723 case 0: offset = -8; break;
5724 case 1: offset = -4; break;
5725 case 2: offset = 4; break;
5726 case 3: offset = 0; break;
5727 default: abort();
5728 }
5729 if (offset)
b0109805 5730 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5731 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5732 gen_movl_reg_T1(s, 13);
5733 } else {
b0109805 5734 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5735 }
b0109805
PB
5736 } else {
5737 dead_tmp(addr);
9ee6e8bb
PB
5738 }
5739 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5740 /* rfe */
5741 uint32_t offset;
5742 if (IS_USER(s))
5743 goto illegal_op;
5744 ARCH(6);
5745 rn = (insn >> 16) & 0xf;
b0109805 5746 addr = load_reg(s, rn);
9ee6e8bb
PB
5747 i = (insn >> 23) & 3;
5748 switch (i) {
b0109805
PB
5749 case 0: offset = -4; break; /* DA */
5750 case 1: offset = -8; break; /* DB */
5751 case 2: offset = 0; break; /* IA */
5752 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5753 default: abort();
5754 }
5755 if (offset)
b0109805
PB
5756 tcg_gen_addi_i32(addr, addr, offset);
5757 /* Load PC into tmp and CPSR into tmp2. */
5758 tmp = gen_ld32(addr, 0);
5759 tcg_gen_addi_i32(addr, addr, 4);
5760 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5761 if (insn & (1 << 21)) {
5762 /* Base writeback. */
5763 switch (i) {
b0109805
PB
5764 case 0: offset = -8; break;
5765 case 1: offset = -4; break;
5766 case 2: offset = 4; break;
5767 case 3: offset = 0; break;
9ee6e8bb
PB
5768 default: abort();
5769 }
5770 if (offset)
b0109805
PB
5771 tcg_gen_addi_i32(addr, addr, offset);
5772 store_reg(s, rn, addr);
5773 } else {
5774 dead_tmp(addr);
9ee6e8bb 5775 }
b0109805 5776 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5777 } else if ((insn & 0x0e000000) == 0x0a000000) {
5778 /* branch link and change to thumb (blx <offset>) */
5779 int32_t offset;
5780
5781 val = (uint32_t)s->pc;
d9ba4830
PB
5782 tmp = new_tmp();
5783 tcg_gen_movi_i32(tmp, val);
5784 store_reg(s, 14, tmp);
9ee6e8bb
PB
5785 /* Sign-extend the 24-bit offset */
5786 offset = (((int32_t)insn) << 8) >> 8;
5787 /* offset * 4 + bit24 * 2 + (thumb bit) */
5788 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5789 /* pipeline offset */
5790 val += 4;
d9ba4830 5791 gen_bx_im(s, val);
9ee6e8bb
PB
5792 return;
5793 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5794 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5795 /* iWMMXt register transfer. */
5796 if (env->cp15.c15_cpar & (1 << 1))
5797 if (!disas_iwmmxt_insn(env, s, insn))
5798 return;
5799 }
5800 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5801 /* Coprocessor double register transfer. */
5802 } else if ((insn & 0x0f000010) == 0x0e000010) {
5803 /* Additional coprocessor register transfer. */
5804 } else if ((insn & 0x0ff10010) == 0x01000000) {
5805 uint32_t mask;
5806 uint32_t val;
5807 /* cps (privileged) */
5808 if (IS_USER(s))
5809 return;
5810 mask = val = 0;
5811 if (insn & (1 << 19)) {
5812 if (insn & (1 << 8))
5813 mask |= CPSR_A;
5814 if (insn & (1 << 7))
5815 mask |= CPSR_I;
5816 if (insn & (1 << 6))
5817 mask |= CPSR_F;
5818 if (insn & (1 << 18))
5819 val |= mask;
5820 }
5821 if (insn & (1 << 14)) {
5822 mask |= CPSR_M;
5823 val |= (insn & 0x1f);
5824 }
5825 if (mask) {
5826 gen_op_movl_T0_im(val);
5827 gen_set_psr_T0(s, mask, 0);
5828 }
5829 return;
5830 }
5831 goto illegal_op;
5832 }
5833 if (cond != 0xe) {
5834 /* if not always execute, we generate a conditional jump to
5835 next instruction */
5836 s->condlabel = gen_new_label();
d9ba4830 5837 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5838 s->condjmp = 1;
5839 }
5840 if ((insn & 0x0f900000) == 0x03000000) {
5841 if ((insn & (1 << 21)) == 0) {
5842 ARCH(6T2);
5843 rd = (insn >> 12) & 0xf;
5844 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5845 if ((insn & (1 << 22)) == 0) {
5846 /* MOVW */
5e3f878a
PB
5847 tmp = new_tmp();
5848 tcg_gen_movi_i32(tmp, val);
9ee6e8bb
PB
5849 } else {
5850 /* MOVT */
5e3f878a 5851 tmp = load_reg(s, rd);
86831435 5852 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 5853 tcg_gen_ori_i32(tmp, tmp, val << 16);
9ee6e8bb 5854 }
5e3f878a 5855 store_reg(s, rd, tmp);
9ee6e8bb
PB
5856 } else {
5857 if (((insn >> 12) & 0xf) != 0xf)
5858 goto illegal_op;
5859 if (((insn >> 16) & 0xf) == 0) {
5860 gen_nop_hint(s, insn & 0xff);
5861 } else {
5862 /* CPSR = immediate */
5863 val = insn & 0xff;
5864 shift = ((insn >> 8) & 0xf) * 2;
5865 if (shift)
5866 val = (val >> shift) | (val << (32 - shift));
5867 gen_op_movl_T0_im(val);
5868 i = ((insn & (1 << 22)) != 0);
5869 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5870 goto illegal_op;
5871 }
5872 }
5873 } else if ((insn & 0x0f900000) == 0x01000000
5874 && (insn & 0x00000090) != 0x00000090) {
5875 /* miscellaneous instructions */
5876 op1 = (insn >> 21) & 3;
5877 sh = (insn >> 4) & 0xf;
5878 rm = insn & 0xf;
5879 switch (sh) {
5880 case 0x0: /* move program status register */
5881 if (op1 & 1) {
5882 /* PSR = reg */
5883 gen_movl_T0_reg(s, rm);
5884 i = ((op1 & 2) != 0);
5885 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5886 goto illegal_op;
5887 } else {
5888 /* reg = PSR */
5889 rd = (insn >> 12) & 0xf;
5890 if (op1 & 2) {
5891 if (IS_USER(s))
5892 goto illegal_op;
d9ba4830 5893 tmp = load_cpu_field(spsr);
9ee6e8bb 5894 } else {
d9ba4830
PB
5895 tmp = new_tmp();
5896 gen_helper_cpsr_read(tmp);
9ee6e8bb 5897 }
d9ba4830 5898 store_reg(s, rd, tmp);
9ee6e8bb
PB
5899 }
5900 break;
5901 case 0x1:
5902 if (op1 == 1) {
5903 /* branch/exchange thumb (bx). */
d9ba4830
PB
5904 tmp = load_reg(s, rm);
5905 gen_bx(s, tmp);
9ee6e8bb
PB
5906 } else if (op1 == 3) {
5907 /* clz */
5908 rd = (insn >> 12) & 0xf;
1497c961
PB
5909 tmp = load_reg(s, rm);
5910 gen_helper_clz(tmp, tmp);
5911 store_reg(s, rd, tmp);
9ee6e8bb
PB
5912 } else {
5913 goto illegal_op;
5914 }
5915 break;
5916 case 0x2:
5917 if (op1 == 1) {
5918 ARCH(5J); /* bxj */
5919 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5920 tmp = load_reg(s, rm);
5921 gen_bx(s, tmp);
9ee6e8bb
PB
5922 } else {
5923 goto illegal_op;
5924 }
5925 break;
5926 case 0x3:
5927 if (op1 != 1)
5928 goto illegal_op;
5929
5930 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5931 tmp = load_reg(s, rm);
5932 tmp2 = new_tmp();
5933 tcg_gen_movi_i32(tmp2, s->pc);
5934 store_reg(s, 14, tmp2);
5935 gen_bx(s, tmp);
9ee6e8bb
PB
5936 break;
5937 case 0x5: /* saturating add/subtract */
5938 rd = (insn >> 12) & 0xf;
5939 rn = (insn >> 16) & 0xf;
5e3f878a
PB
5940 tmp = load_reg(s, rn);
5941 tmp2 = load_reg(s, rn);
9ee6e8bb 5942 if (op1 & 2)
5e3f878a 5943 gen_helper_double_saturate(tmp2, tmp2);
9ee6e8bb 5944 if (op1 & 1)
5e3f878a 5945 gen_helper_sub_saturate(tmp, tmp, tmp2);
9ee6e8bb 5946 else
5e3f878a
PB
5947 gen_helper_add_saturate(tmp, tmp, tmp2);
5948 dead_tmp(tmp2);
5949 store_reg(s, rd, tmp);
9ee6e8bb
PB
5950 break;
5951 case 7: /* bkpt */
5952 gen_set_condexec(s);
5e3f878a 5953 gen_set_pc_im(s->pc - 4);
d9ba4830 5954 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5955 s->is_jmp = DISAS_JUMP;
5956 break;
5957 case 0x8: /* signed multiply */
5958 case 0xa:
5959 case 0xc:
5960 case 0xe:
5961 rs = (insn >> 8) & 0xf;
5962 rn = (insn >> 12) & 0xf;
5963 rd = (insn >> 16) & 0xf;
5964 if (op1 == 1) {
5965 /* (32 * 16) >> 16 */
5e3f878a
PB
5966 tmp = load_reg(s, rm);
5967 tmp2 = load_reg(s, rs);
9ee6e8bb 5968 if (sh & 4)
5e3f878a 5969 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 5970 else
5e3f878a
PB
5971 gen_sxth(tmp2);
5972 tmp2 = gen_muls_i64_i32(tmp, tmp2);
5973 tcg_gen_shri_i64(tmp2, tmp2, 16);
5974 tmp = new_tmp();
5975 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 5976 if ((sh & 2) == 0) {
5e3f878a
PB
5977 tmp2 = load_reg(s, rn);
5978 gen_helper_add_setq(tmp, tmp, tmp2);
5979 dead_tmp(tmp2);
9ee6e8bb 5980 }
5e3f878a 5981 store_reg(s, rd, tmp);
9ee6e8bb
PB
5982 } else {
5983 /* 16 * 16 */
5e3f878a
PB
5984 tmp = load_reg(s, rm);
5985 tmp2 = load_reg(s, rs);
5986 gen_mulxy(tmp, tmp2, sh & 2, sh & 4);
5987 dead_tmp(tmp2);
9ee6e8bb 5988 if (op1 == 2) {
5e3f878a
PB
5989 tmp = tcg_temp_new(TCG_TYPE_I64);
5990 tcg_gen_ext_i32_i64(tmp, cpu_T[0]);
5991 gen_addq(s, tmp, rn, rd);
5992 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
5993 } else {
5994 if (op1 == 0) {
5e3f878a
PB
5995 tmp2 = load_reg(s, rn);
5996 gen_helper_add_setq(tmp, tmp, tmp2);
5997 dead_tmp(tmp2);
9ee6e8bb 5998 }
5e3f878a 5999 store_reg(s, rd, tmp);
9ee6e8bb
PB
6000 }
6001 }
6002 break;
6003 default:
6004 goto illegal_op;
6005 }
6006 } else if (((insn & 0x0e000000) == 0 &&
6007 (insn & 0x00000090) != 0x90) ||
6008 ((insn & 0x0e000000) == (1 << 25))) {
6009 int set_cc, logic_cc, shiftop;
6010
6011 op1 = (insn >> 21) & 0xf;
6012 set_cc = (insn >> 20) & 1;
6013 logic_cc = table_logic_cc[op1] & set_cc;
6014
6015 /* data processing instruction */
6016 if (insn & (1 << 25)) {
6017 /* immediate operand */
6018 val = insn & 0xff;
6019 shift = ((insn >> 8) & 0xf) * 2;
6020 if (shift)
6021 val = (val >> shift) | (val << (32 - shift));
6022 gen_op_movl_T1_im(val);
6023 if (logic_cc && shift)
b26eefb6 6024 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6025 } else {
6026 /* register */
6027 rm = (insn) & 0xf;
6028 gen_movl_T1_reg(s, rm);
6029 shiftop = (insn >> 5) & 3;
6030 if (!(insn & (1 << 4))) {
6031 shift = (insn >> 7) & 0x1f;
9a119ff6 6032 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6033 } else {
6034 rs = (insn >> 8) & 0xf;
8984bd2e
PB
6035 tmp = load_reg(s, rs);
6036 gen_arm_shift_reg(cpu_T[1], shiftop, tmp, logic_cc);
9ee6e8bb
PB
6037 }
6038 }
6039 if (op1 != 0x0f && op1 != 0x0d) {
6040 rn = (insn >> 16) & 0xf;
6041 gen_movl_T0_reg(s, rn);
6042 }
6043 rd = (insn >> 12) & 0xf;
6044 switch(op1) {
6045 case 0x00:
6046 gen_op_andl_T0_T1();
6047 gen_movl_reg_T0(s, rd);
6048 if (logic_cc)
6049 gen_op_logic_T0_cc();
6050 break;
6051 case 0x01:
6052 gen_op_xorl_T0_T1();
6053 gen_movl_reg_T0(s, rd);
6054 if (logic_cc)
6055 gen_op_logic_T0_cc();
6056 break;
6057 case 0x02:
6058 if (set_cc && rd == 15) {
6059 /* SUBS r15, ... is used for exception return. */
6060 if (IS_USER(s))
6061 goto illegal_op;
6062 gen_op_subl_T0_T1_cc();
6063 gen_exception_return(s);
6064 } else {
6065 if (set_cc)
6066 gen_op_subl_T0_T1_cc();
6067 else
6068 gen_op_subl_T0_T1();
6069 gen_movl_reg_T0(s, rd);
6070 }
6071 break;
6072 case 0x03:
6073 if (set_cc)
6074 gen_op_rsbl_T0_T1_cc();
6075 else
6076 gen_op_rsbl_T0_T1();
6077 gen_movl_reg_T0(s, rd);
6078 break;
6079 case 0x04:
6080 if (set_cc)
6081 gen_op_addl_T0_T1_cc();
6082 else
6083 gen_op_addl_T0_T1();
6084 gen_movl_reg_T0(s, rd);
6085 break;
6086 case 0x05:
6087 if (set_cc)
6088 gen_op_adcl_T0_T1_cc();
6089 else
b26eefb6 6090 gen_adc_T0_T1();
9ee6e8bb
PB
6091 gen_movl_reg_T0(s, rd);
6092 break;
6093 case 0x06:
6094 if (set_cc)
6095 gen_op_sbcl_T0_T1_cc();
6096 else
3670669c 6097 gen_sbc_T0_T1();
9ee6e8bb
PB
6098 gen_movl_reg_T0(s, rd);
6099 break;
6100 case 0x07:
6101 if (set_cc)
6102 gen_op_rscl_T0_T1_cc();
6103 else
3670669c 6104 gen_rsc_T0_T1();
9ee6e8bb
PB
6105 gen_movl_reg_T0(s, rd);
6106 break;
6107 case 0x08:
6108 if (set_cc) {
6109 gen_op_andl_T0_T1();
6110 gen_op_logic_T0_cc();
6111 }
6112 break;
6113 case 0x09:
6114 if (set_cc) {
6115 gen_op_xorl_T0_T1();
6116 gen_op_logic_T0_cc();
6117 }
6118 break;
6119 case 0x0a:
6120 if (set_cc) {
6121 gen_op_subl_T0_T1_cc();
6122 }
6123 break;
6124 case 0x0b:
6125 if (set_cc) {
6126 gen_op_addl_T0_T1_cc();
6127 }
6128 break;
6129 case 0x0c:
6130 gen_op_orl_T0_T1();
6131 gen_movl_reg_T0(s, rd);
6132 if (logic_cc)
6133 gen_op_logic_T0_cc();
6134 break;
6135 case 0x0d:
6136 if (logic_cc && rd == 15) {
6137 /* MOVS r15, ... is used for exception return. */
6138 if (IS_USER(s))
6139 goto illegal_op;
6140 gen_op_movl_T0_T1();
6141 gen_exception_return(s);
6142 } else {
6143 gen_movl_reg_T1(s, rd);
6144 if (logic_cc)
6145 gen_op_logic_T1_cc();
6146 }
6147 break;
6148 case 0x0e:
6149 gen_op_bicl_T0_T1();
6150 gen_movl_reg_T0(s, rd);
6151 if (logic_cc)
6152 gen_op_logic_T0_cc();
6153 break;
6154 default:
6155 case 0x0f:
6156 gen_op_notl_T1();
6157 gen_movl_reg_T1(s, rd);
6158 if (logic_cc)
6159 gen_op_logic_T1_cc();
6160 break;
6161 }
6162 } else {
6163 /* other instructions */
6164 op1 = (insn >> 24) & 0xf;
6165 switch(op1) {
6166 case 0x0:
6167 case 0x1:
6168 /* multiplies, extra load/stores */
6169 sh = (insn >> 5) & 3;
6170 if (sh == 0) {
6171 if (op1 == 0x0) {
6172 rd = (insn >> 16) & 0xf;
6173 rn = (insn >> 12) & 0xf;
6174 rs = (insn >> 8) & 0xf;
6175 rm = (insn) & 0xf;
6176 op1 = (insn >> 20) & 0xf;
6177 switch (op1) {
6178 case 0: case 1: case 2: case 3: case 6:
6179 /* 32 bit mul */
5e3f878a
PB
6180 tmp = load_reg(s, rs);
6181 tmp2 = load_reg(s, rm);
6182 tcg_gen_mul_i32(tmp, tmp, tmp2);
6183 dead_tmp(tmp2);
9ee6e8bb
PB
6184 if (insn & (1 << 22)) {
6185 /* Subtract (mls) */
6186 ARCH(6T2);
5e3f878a
PB
6187 tmp2 = load_reg(s, rn);
6188 tcg_gen_sub_i32(tmp, tmp2, tmp);
6189 dead_tmp(tmp2);
9ee6e8bb
PB
6190 } else if (insn & (1 << 21)) {
6191 /* Add */
5e3f878a
PB
6192 tmp2 = load_reg(s, rn);
6193 tcg_gen_add_i32(tmp, tmp, tmp2);
6194 dead_tmp(tmp2);
9ee6e8bb
PB
6195 }
6196 if (insn & (1 << 20))
5e3f878a
PB
6197 gen_logic_CC(tmp);
6198 store_reg(s, rd, tmp);
9ee6e8bb
PB
6199 break;
6200 default:
6201 /* 64 bit mul */
5e3f878a
PB
6202 tmp = load_reg(s, rs);
6203 tmp2 = load_reg(s, rm);
9ee6e8bb 6204 if (insn & (1 << 22))
5e3f878a 6205 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6206 else
5e3f878a 6207 tmp = gen_mulu_i64_i32(tmp, tmp2);
9ee6e8bb 6208 if (insn & (1 << 21)) /* mult accumulate */
5e3f878a 6209 gen_addq(s, tmp, rn, rd);
9ee6e8bb
PB
6210 if (!(insn & (1 << 23))) { /* double accumulate */
6211 ARCH(6);
5e3f878a
PB
6212 gen_addq_lo(s, tmp, rn);
6213 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
6214 }
6215 if (insn & (1 << 20))
5e3f878a
PB
6216 gen_logicq_cc(tmp);
6217 gen_storeq_reg(s, rn, rd, tmp);
9ee6e8bb
PB
6218 break;
6219 }
6220 } else {
6221 rn = (insn >> 16) & 0xf;
6222 rd = (insn >> 12) & 0xf;
6223 if (insn & (1 << 23)) {
6224 /* load/store exclusive */
6225 gen_movl_T1_reg(s, rn);
72f1c62f 6226 addr = cpu_T[1];
9ee6e8bb 6227 if (insn & (1 << 20)) {
8f8e3aa4
PB
6228 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6229 tmp = gen_ld32(addr, IS_USER(s));
6230 store_reg(s, rd, tmp);
9ee6e8bb 6231 } else {
8f8e3aa4 6232 int label = gen_new_label();
9ee6e8bb 6233 rm = insn & 0xf;
8f8e3aa4 6234 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6235 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6236 0, label);
8f8e3aa4
PB
6237 tmp = load_reg(s,rm);
6238 gen_st32(tmp, cpu_T[1], IS_USER(s));
2637a3be 6239 gen_set_label(label);
8f8e3aa4 6240 gen_movl_reg_T0(s, rd);
9ee6e8bb 6241 }
9ee6e8bb
PB
6242 } else {
6243 /* SWP instruction */
6244 rm = (insn) & 0xf;
6245
8984bd2e
PB
6246 /* ??? This is not really atomic. However we know
6247 we never have multiple CPUs running in parallel,
6248 so it is good enough. */
6249 addr = load_reg(s, rn);
6250 tmp = load_reg(s, rm);
9ee6e8bb 6251 if (insn & (1 << 22)) {
8984bd2e
PB
6252 tmp2 = gen_ld8u(addr, IS_USER(s));
6253 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb 6254 } else {
8984bd2e
PB
6255 tmp2 = gen_ld32(addr, IS_USER(s));
6256 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6257 }
8984bd2e
PB
6258 dead_tmp(addr);
6259 store_reg(s, rd, tmp2);
9ee6e8bb
PB
6260 }
6261 }
6262 } else {
6263 int address_offset;
6264 int load;
6265 /* Misc load/store */
6266 rn = (insn >> 16) & 0xf;
6267 rd = (insn >> 12) & 0xf;
b0109805 6268 addr = load_reg(s, rn);
9ee6e8bb 6269 if (insn & (1 << 24))
b0109805 6270 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
6271 address_offset = 0;
6272 if (insn & (1 << 20)) {
6273 /* load */
6274 switch(sh) {
6275 case 1:
b0109805 6276 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
6277 break;
6278 case 2:
b0109805 6279 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
6280 break;
6281 default:
6282 case 3:
b0109805 6283 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
6284 break;
6285 }
6286 load = 1;
6287 } else if (sh & 2) {
6288 /* doubleword */
6289 if (sh & 1) {
6290 /* store */
b0109805
PB
6291 tmp = load_reg(s, rd);
6292 gen_st32(tmp, addr, IS_USER(s));
6293 tcg_gen_addi_i32(addr, addr, 4);
6294 tmp = load_reg(s, rd + 1);
6295 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6296 load = 0;
6297 } else {
6298 /* load */
b0109805
PB
6299 tmp = gen_ld32(addr, IS_USER(s));
6300 store_reg(s, rd, tmp);
6301 tcg_gen_addi_i32(addr, addr, 4);
6302 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
6303 rd++;
6304 load = 1;
6305 }
6306 address_offset = -4;
6307 } else {
6308 /* store */
b0109805
PB
6309 tmp = load_reg(s, rd);
6310 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6311 load = 0;
6312 }
6313 /* Perform base writeback before the loaded value to
6314 ensure correct behavior with overlapping index registers.
6315 ldrd with base writeback is is undefined if the
6316 destination and index registers overlap. */
6317 if (!(insn & (1 << 24))) {
b0109805
PB
6318 gen_add_datah_offset(s, insn, address_offset, addr);
6319 store_reg(s, rn, addr);
9ee6e8bb
PB
6320 } else if (insn & (1 << 21)) {
6321 if (address_offset)
b0109805
PB
6322 tcg_gen_addi_i32(addr, addr, address_offset);
6323 store_reg(s, rn, addr);
6324 } else {
6325 dead_tmp(addr);
9ee6e8bb
PB
6326 }
6327 if (load) {
6328 /* Complete the load. */
b0109805 6329 store_reg(s, rd, tmp);
9ee6e8bb
PB
6330 }
6331 }
6332 break;
6333 case 0x4:
6334 case 0x5:
6335 goto do_ldst;
6336 case 0x6:
6337 case 0x7:
6338 if (insn & (1 << 4)) {
6339 ARCH(6);
6340 /* Armv6 Media instructions. */
6341 rm = insn & 0xf;
6342 rn = (insn >> 16) & 0xf;
2c0262af 6343 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
6344 rs = (insn >> 8) & 0xf;
6345 switch ((insn >> 23) & 3) {
6346 case 0: /* Parallel add/subtract. */
6347 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
6348 tmp = load_reg(s, rn);
6349 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6350 sh = (insn >> 5) & 7;
6351 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
6352 goto illegal_op;
6ddbc6e4
PB
6353 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6354 dead_tmp(tmp2);
6355 store_reg(s, rd, tmp);
9ee6e8bb
PB
6356 break;
6357 case 1:
6358 if ((insn & 0x00700020) == 0) {
6c95676b 6359 /* Halfword pack. */
3670669c
PB
6360 tmp = load_reg(s, rn);
6361 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6362 shift = (insn >> 7) & 0x1f;
6363 if (shift)
3670669c
PB
6364 tcg_gen_shli_i32(tmp2, tmp2, shift);
6365 if (insn & (1 << 6)) {
6366 /* pkhtb */
6367 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
86831435 6368 tcg_gen_ext16u_i32(tmp2, tmp2);
3670669c
PB
6369 } else {
6370 /* pkhbt */
86831435 6371 tcg_gen_ext16u_i32(tmp, tmp);
3670669c
PB
6372 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6373 }
6374 tcg_gen_or_i32(tmp, tmp, tmp2);
6375 store_reg(s, rd, tmp);
9ee6e8bb
PB
6376 } else if ((insn & 0x00200020) == 0x00200000) {
6377 /* [us]sat */
6ddbc6e4 6378 tmp = load_reg(s, rm);
9ee6e8bb
PB
6379 shift = (insn >> 7) & 0x1f;
6380 if (insn & (1 << 6)) {
6381 if (shift == 0)
6382 shift = 31;
6ddbc6e4 6383 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6384 } else {
6ddbc6e4 6385 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6386 }
6387 sh = (insn >> 16) & 0x1f;
6388 if (sh != 0) {
6389 if (insn & (1 << 22))
6ddbc6e4 6390 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6391 else
6ddbc6e4 6392 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6393 }
6ddbc6e4 6394 store_reg(s, rd, tmp);
9ee6e8bb
PB
6395 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6396 /* [us]sat16 */
6ddbc6e4 6397 tmp = load_reg(s, rm);
9ee6e8bb
PB
6398 sh = (insn >> 16) & 0x1f;
6399 if (sh != 0) {
6400 if (insn & (1 << 22))
6ddbc6e4 6401 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6402 else
6ddbc6e4 6403 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6404 }
6ddbc6e4 6405 store_reg(s, rd, tmp);
9ee6e8bb
PB
6406 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6407 /* Select bytes. */
6ddbc6e4
PB
6408 tmp = load_reg(s, rn);
6409 tmp2 = load_reg(s, rm);
6410 tmp3 = new_tmp();
6411 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6412 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6413 dead_tmp(tmp3);
6414 dead_tmp(tmp2);
6415 store_reg(s, rd, tmp);
9ee6e8bb 6416 } else if ((insn & 0x000003e0) == 0x00000060) {
5e3f878a 6417 tmp = load_reg(s, rm);
9ee6e8bb
PB
6418 shift = (insn >> 10) & 3;
6419 /* ??? In many cases it's not neccessary to do a
6420 rotate, a shift is sufficient. */
6421 if (shift != 0)
5e3f878a 6422 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
6423 op1 = (insn >> 20) & 7;
6424 switch (op1) {
5e3f878a
PB
6425 case 0: gen_sxtb16(tmp); break;
6426 case 2: gen_sxtb(tmp); break;
6427 case 3: gen_sxth(tmp); break;
6428 case 4: gen_uxtb16(tmp); break;
6429 case 6: gen_uxtb(tmp); break;
6430 case 7: gen_uxth(tmp); break;
9ee6e8bb
PB
6431 default: goto illegal_op;
6432 }
6433 if (rn != 15) {
5e3f878a 6434 tmp2 = load_reg(s, rn);
9ee6e8bb 6435 if ((op1 & 3) == 0) {
5e3f878a 6436 gen_add16(tmp, tmp2);
9ee6e8bb 6437 } else {
5e3f878a
PB
6438 tcg_gen_add_i32(tmp, tmp, tmp2);
6439 dead_tmp(tmp2);
9ee6e8bb
PB
6440 }
6441 }
6c95676b 6442 store_reg(s, rd, tmp);
9ee6e8bb
PB
6443 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6444 /* rev */
b0109805 6445 tmp = load_reg(s, rm);
9ee6e8bb
PB
6446 if (insn & (1 << 22)) {
6447 if (insn & (1 << 7)) {
b0109805 6448 gen_revsh(tmp);
9ee6e8bb
PB
6449 } else {
6450 ARCH(6T2);
b0109805 6451 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6452 }
6453 } else {
6454 if (insn & (1 << 7))
b0109805 6455 gen_rev16(tmp);
9ee6e8bb 6456 else
b0109805 6457 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6458 }
b0109805 6459 store_reg(s, rd, tmp);
9ee6e8bb
PB
6460 } else {
6461 goto illegal_op;
6462 }
6463 break;
6464 case 2: /* Multiplies (Type 3). */
5e3f878a
PB
6465 tmp = load_reg(s, rm);
6466 tmp2 = load_reg(s, rs);
9ee6e8bb
PB
6467 if (insn & (1 << 20)) {
6468 /* Signed multiply most significant [accumulate]. */
5e3f878a 6469 tmp2 = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 6470 if (insn & (1 << 5))
5e3f878a
PB
6471 tcg_gen_addi_i64(tmp2, tmp2, 0x80000000u);
6472 tcg_gen_shri_i64(tmp2, tmp2, 32);
6473 tmp = new_tmp();
6474 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb 6475 if (rn != 15) {
5e3f878a 6476 tmp2 = load_reg(s, rn);
9ee6e8bb 6477 if (insn & (1 << 6)) {
5e3f878a 6478 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6479 } else {
5e3f878a 6480 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6481 }
5e3f878a 6482 dead_tmp(tmp2);
9ee6e8bb 6483 }
5e3f878a 6484 store_reg(s, rd, tmp);
9ee6e8bb
PB
6485 } else {
6486 if (insn & (1 << 5))
5e3f878a
PB
6487 gen_swap_half(tmp2);
6488 gen_smul_dual(tmp, tmp2);
6489 /* This addition cannot overflow. */
6490 if (insn & (1 << 6)) {
6491 tcg_gen_sub_i32(tmp, tmp, tmp2);
6492 } else {
6493 tcg_gen_add_i32(tmp, tmp, tmp2);
6494 }
6495 dead_tmp(tmp2);
9ee6e8bb 6496 if (insn & (1 << 22)) {
5e3f878a
PB
6497 /* smlald, smlsld */
6498 tmp2 = tcg_temp_new(TCG_TYPE_I64);
6499 tcg_gen_ext_i32_i64(tmp2, tmp);
6500 dead_tmp(tmp);
6501 gen_addq(s, tmp2, rn, rd);
6502 gen_storeq_reg(s, rn, rd, tmp2);
9ee6e8bb 6503 } else {
5e3f878a 6504 /* smuad, smusd, smlad, smlsd */
9ee6e8bb
PB
6505 if (rn != 15)
6506 {
5e3f878a
PB
6507 tmp2 = load_reg(s, rn);
6508 gen_helper_add_setq(tmp, tmp, tmp2);
6509 dead_tmp(tmp2);
9ee6e8bb 6510 }
5e3f878a 6511 store_reg(s, rd, tmp);
9ee6e8bb
PB
6512 }
6513 }
6514 break;
6515 case 3:
6516 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6517 switch (op1) {
6518 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6519 ARCH(6);
6520 tmp = load_reg(s, rm);
6521 tmp2 = load_reg(s, rs);
6522 gen_helper_usad8(tmp, tmp, tmp2);
6523 dead_tmp(tmp2);
9ee6e8bb 6524 if (rn != 15) {
6ddbc6e4
PB
6525 tmp2 = load_reg(s, rn);
6526 tcg_gen_add_i32(tmp, tmp, tmp2);
6527 dead_tmp(tmp2);
9ee6e8bb 6528 }
6ddbc6e4 6529 store_reg(s, rd, tmp);
9ee6e8bb
PB
6530 break;
6531 case 0x20: case 0x24: case 0x28: case 0x2c:
6532 /* Bitfield insert/clear. */
6533 ARCH(6T2);
6534 shift = (insn >> 7) & 0x1f;
6535 i = (insn >> 16) & 0x1f;
6536 i = i + 1 - shift;
6537 if (rm == 15) {
5e3f878a
PB
6538 tmp = new_tmp();
6539 tcg_gen_movi_i32(tmp, 0);
9ee6e8bb 6540 } else {
5e3f878a 6541 tmp = load_reg(s, rm);
9ee6e8bb
PB
6542 }
6543 if (i != 32) {
5e3f878a 6544 tmp2 = load_reg(s, rd);
8f8e3aa4 6545 gen_bfi(tmp, tmp2, tmp, shift, (1u << i) - 1);
5e3f878a 6546 dead_tmp(tmp2);
9ee6e8bb 6547 }
5e3f878a 6548 store_reg(s, rd, tmp);
9ee6e8bb
PB
6549 break;
6550 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6551 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5e3f878a 6552 tmp = load_reg(s, rm);
9ee6e8bb
PB
6553 shift = (insn >> 7) & 0x1f;
6554 i = ((insn >> 16) & 0x1f) + 1;
6555 if (shift + i > 32)
6556 goto illegal_op;
6557 if (i < 32) {
6558 if (op1 & 0x20) {
5e3f878a 6559 gen_ubfx(tmp, shift, (1u << i) - 1);
9ee6e8bb 6560 } else {
5e3f878a 6561 gen_sbfx(tmp, shift, i);
9ee6e8bb
PB
6562 }
6563 }
5e3f878a 6564 store_reg(s, rd, tmp);
9ee6e8bb
PB
6565 break;
6566 default:
6567 goto illegal_op;
6568 }
6569 break;
6570 }
6571 break;
6572 }
6573 do_ldst:
6574 /* Check for undefined extension instructions
6575 * per the ARM Bible IE:
6576 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6577 */
6578 sh = (0xf << 20) | (0xf << 4);
6579 if (op1 == 0x7 && ((insn & sh) == sh))
6580 {
6581 goto illegal_op;
6582 }
6583 /* load/store byte/word */
6584 rn = (insn >> 16) & 0xf;
6585 rd = (insn >> 12) & 0xf;
b0109805 6586 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6587 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6588 if (insn & (1 << 24))
b0109805 6589 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6590 if (insn & (1 << 20)) {
6591 /* load */
6592 s->is_mem = 1;
9ee6e8bb 6593 if (insn & (1 << 22)) {
b0109805 6594 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6595 } else {
b0109805 6596 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6597 }
9ee6e8bb
PB
6598 } else {
6599 /* store */
b0109805 6600 tmp = load_reg(s, rd);
9ee6e8bb 6601 if (insn & (1 << 22))
b0109805 6602 gen_st8(tmp, tmp2, i);
9ee6e8bb 6603 else
b0109805 6604 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6605 }
6606 if (!(insn & (1 << 24))) {
b0109805
PB
6607 gen_add_data_offset(s, insn, tmp2);
6608 store_reg(s, rn, tmp2);
6609 } else if (insn & (1 << 21)) {
6610 store_reg(s, rn, tmp2);
6611 } else {
6612 dead_tmp(tmp2);
9ee6e8bb
PB
6613 }
6614 if (insn & (1 << 20)) {
6615 /* Complete the load. */
6616 if (rd == 15)
b0109805 6617 gen_bx(s, tmp);
9ee6e8bb 6618 else
b0109805 6619 store_reg(s, rd, tmp);
9ee6e8bb
PB
6620 }
6621 break;
6622 case 0x08:
6623 case 0x09:
6624 {
6625 int j, n, user, loaded_base;
b0109805 6626 TCGv loaded_var;
9ee6e8bb
PB
6627 /* load/store multiple words */
6628 /* XXX: store correct base if write back */
6629 user = 0;
6630 if (insn & (1 << 22)) {
6631 if (IS_USER(s))
6632 goto illegal_op; /* only usable in supervisor mode */
6633
6634 if ((insn & (1 << 15)) == 0)
6635 user = 1;
6636 }
6637 rn = (insn >> 16) & 0xf;
b0109805 6638 addr = load_reg(s, rn);
9ee6e8bb
PB
6639
6640 /* compute total size */
6641 loaded_base = 0;
6642 n = 0;
6643 for(i=0;i<16;i++) {
6644 if (insn & (1 << i))
6645 n++;
6646 }
6647 /* XXX: test invalid n == 0 case ? */
6648 if (insn & (1 << 23)) {
6649 if (insn & (1 << 24)) {
6650 /* pre increment */
b0109805 6651 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6652 } else {
6653 /* post increment */
6654 }
6655 } else {
6656 if (insn & (1 << 24)) {
6657 /* pre decrement */
b0109805 6658 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6659 } else {
6660 /* post decrement */
6661 if (n != 1)
b0109805 6662 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6663 }
6664 }
6665 j = 0;
6666 for(i=0;i<16;i++) {
6667 if (insn & (1 << i)) {
6668 if (insn & (1 << 20)) {
6669 /* load */
b0109805 6670 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6671 if (i == 15) {
b0109805 6672 gen_bx(s, tmp);
9ee6e8bb 6673 } else if (user) {
b0109805
PB
6674 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6675 dead_tmp(tmp);
9ee6e8bb 6676 } else if (i == rn) {
b0109805 6677 loaded_var = tmp;
9ee6e8bb
PB
6678 loaded_base = 1;
6679 } else {
b0109805 6680 store_reg(s, i, tmp);
9ee6e8bb
PB
6681 }
6682 } else {
6683 /* store */
6684 if (i == 15) {
6685 /* special case: r15 = PC + 8 */
6686 val = (long)s->pc + 4;
b0109805
PB
6687 tmp = new_tmp();
6688 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6689 } else if (user) {
b0109805
PB
6690 tmp = new_tmp();
6691 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6692 } else {
b0109805 6693 tmp = load_reg(s, i);
9ee6e8bb 6694 }
b0109805 6695 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6696 }
6697 j++;
6698 /* no need to add after the last transfer */
6699 if (j != n)
b0109805 6700 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6701 }
6702 }
6703 if (insn & (1 << 21)) {
6704 /* write back */
6705 if (insn & (1 << 23)) {
6706 if (insn & (1 << 24)) {
6707 /* pre increment */
6708 } else {
6709 /* post increment */
b0109805 6710 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6711 }
6712 } else {
6713 if (insn & (1 << 24)) {
6714 /* pre decrement */
6715 if (n != 1)
b0109805 6716 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6717 } else {
6718 /* post decrement */
b0109805 6719 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6720 }
6721 }
b0109805
PB
6722 store_reg(s, rn, addr);
6723 } else {
6724 dead_tmp(addr);
9ee6e8bb
PB
6725 }
6726 if (loaded_base) {
b0109805 6727 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6728 }
6729 if ((insn & (1 << 22)) && !user) {
6730 /* Restore CPSR from SPSR. */
d9ba4830
PB
6731 tmp = load_cpu_field(spsr);
6732 gen_set_cpsr(tmp, 0xffffffff);
6733 dead_tmp(tmp);
9ee6e8bb
PB
6734 s->is_jmp = DISAS_UPDATE;
6735 }
6736 }
6737 break;
6738 case 0xa:
6739 case 0xb:
6740 {
6741 int32_t offset;
6742
6743 /* branch (and link) */
6744 val = (int32_t)s->pc;
6745 if (insn & (1 << 24)) {
5e3f878a
PB
6746 tmp = new_tmp();
6747 tcg_gen_movi_i32(tmp, val);
6748 store_reg(s, 14, tmp);
9ee6e8bb
PB
6749 }
6750 offset = (((int32_t)insn << 8) >> 8);
6751 val += (offset << 2) + 4;
6752 gen_jmp(s, val);
6753 }
6754 break;
6755 case 0xc:
6756 case 0xd:
6757 case 0xe:
6758 /* Coprocessor. */
6759 if (disas_coproc_insn(env, s, insn))
6760 goto illegal_op;
6761 break;
6762 case 0xf:
6763 /* swi */
5e3f878a 6764 gen_set_pc_im(s->pc);
9ee6e8bb
PB
6765 s->is_jmp = DISAS_SWI;
6766 break;
6767 default:
6768 illegal_op:
6769 gen_set_condexec(s);
5e3f878a 6770 gen_set_pc_im(s->pc - 4);
d9ba4830 6771 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6772 s->is_jmp = DISAS_JUMP;
6773 break;
6774 }
6775 }
6776}
6777
6778/* Return true if this is a Thumb-2 logical op. */
6779static int
6780thumb2_logic_op(int op)
6781{
6782 return (op < 8);
6783}
6784
6785/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6786 then set condition code flags based on the result of the operation.
6787 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6788 to the high bit of T1.
6789 Returns zero if the opcode is valid. */
6790
6791static int
6792gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6793{
6794 int logic_cc;
6795
6796 logic_cc = 0;
6797 switch (op) {
6798 case 0: /* and */
6799 gen_op_andl_T0_T1();
6800 logic_cc = conds;
6801 break;
6802 case 1: /* bic */
6803 gen_op_bicl_T0_T1();
6804 logic_cc = conds;
6805 break;
6806 case 2: /* orr */
6807 gen_op_orl_T0_T1();
6808 logic_cc = conds;
6809 break;
6810 case 3: /* orn */
6811 gen_op_notl_T1();
6812 gen_op_orl_T0_T1();
6813 logic_cc = conds;
6814 break;
6815 case 4: /* eor */
6816 gen_op_xorl_T0_T1();
6817 logic_cc = conds;
6818 break;
6819 case 8: /* add */
6820 if (conds)
6821 gen_op_addl_T0_T1_cc();
6822 else
6823 gen_op_addl_T0_T1();
6824 break;
6825 case 10: /* adc */
6826 if (conds)
6827 gen_op_adcl_T0_T1_cc();
6828 else
b26eefb6 6829 gen_adc_T0_T1();
9ee6e8bb
PB
6830 break;
6831 case 11: /* sbc */
6832 if (conds)
6833 gen_op_sbcl_T0_T1_cc();
6834 else
3670669c 6835 gen_sbc_T0_T1();
9ee6e8bb
PB
6836 break;
6837 case 13: /* sub */
6838 if (conds)
6839 gen_op_subl_T0_T1_cc();
6840 else
6841 gen_op_subl_T0_T1();
6842 break;
6843 case 14: /* rsb */
6844 if (conds)
6845 gen_op_rsbl_T0_T1_cc();
6846 else
6847 gen_op_rsbl_T0_T1();
6848 break;
6849 default: /* 5, 6, 7, 9, 12, 15. */
6850 return 1;
6851 }
6852 if (logic_cc) {
6853 gen_op_logic_T0_cc();
6854 if (shifter_out)
b26eefb6 6855 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6856 }
6857 return 0;
6858}
6859
6860/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6861 is not legal. */
6862static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6863{
b0109805 6864 uint32_t insn, imm, shift, offset;
9ee6e8bb 6865 uint32_t rd, rn, rm, rs;
b26eefb6 6866 TCGv tmp;
6ddbc6e4
PB
6867 TCGv tmp2;
6868 TCGv tmp3;
b0109805 6869 TCGv addr;
9ee6e8bb
PB
6870 int op;
6871 int shiftop;
6872 int conds;
6873 int logic_cc;
6874
6875 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6876 || arm_feature (env, ARM_FEATURE_M))) {
601d70b9 6877 /* Thumb-1 cores may need to treat bl and blx as a pair of
9ee6e8bb
PB
6878 16-bit instructions to get correct prefetch abort behavior. */
6879 insn = insn_hw1;
6880 if ((insn & (1 << 12)) == 0) {
6881 /* Second half of blx. */
6882 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6883 tmp = load_reg(s, 14);
6884 tcg_gen_addi_i32(tmp, tmp, offset);
6885 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6886
d9ba4830 6887 tmp2 = new_tmp();
b0109805 6888 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6889 store_reg(s, 14, tmp2);
6890 gen_bx(s, tmp);
9ee6e8bb
PB
6891 return 0;
6892 }
6893 if (insn & (1 << 11)) {
6894 /* Second half of bl. */
6895 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830 6896 tmp = load_reg(s, 14);
6a0d8a1d 6897 tcg_gen_addi_i32(tmp, tmp, offset);
9ee6e8bb 6898
d9ba4830 6899 tmp2 = new_tmp();
b0109805 6900 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6901 store_reg(s, 14, tmp2);
6902 gen_bx(s, tmp);
9ee6e8bb
PB
6903 return 0;
6904 }
6905 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6906 /* Instruction spans a page boundary. Implement it as two
6907 16-bit instructions in case the second half causes an
6908 prefetch abort. */
6909 offset = ((int32_t)insn << 21) >> 9;
b0109805 6910 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6911 gen_movl_reg_T0(s, 14);
6912 return 0;
6913 }
6914 /* Fall through to 32-bit decode. */
6915 }
6916
6917 insn = lduw_code(s->pc);
6918 s->pc += 2;
6919 insn |= (uint32_t)insn_hw1 << 16;
6920
6921 if ((insn & 0xf800e800) != 0xf000e800) {
6922 ARCH(6T2);
6923 }
6924
6925 rn = (insn >> 16) & 0xf;
6926 rs = (insn >> 12) & 0xf;
6927 rd = (insn >> 8) & 0xf;
6928 rm = insn & 0xf;
6929 switch ((insn >> 25) & 0xf) {
6930 case 0: case 1: case 2: case 3:
6931 /* 16-bit instructions. Should never happen. */
6932 abort();
6933 case 4:
6934 if (insn & (1 << 22)) {
6935 /* Other load/store, table branch. */
6936 if (insn & 0x01200000) {
6937 /* Load/store doubleword. */
6938 if (rn == 15) {
b0109805
PB
6939 addr = new_tmp();
6940 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6941 } else {
b0109805 6942 addr = load_reg(s, rn);
9ee6e8bb
PB
6943 }
6944 offset = (insn & 0xff) * 4;
6945 if ((insn & (1 << 23)) == 0)
6946 offset = -offset;
6947 if (insn & (1 << 24)) {
b0109805 6948 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6949 offset = 0;
6950 }
6951 if (insn & (1 << 20)) {
6952 /* ldrd */
b0109805
PB
6953 tmp = gen_ld32(addr, IS_USER(s));
6954 store_reg(s, rs, tmp);
6955 tcg_gen_addi_i32(addr, addr, 4);
6956 tmp = gen_ld32(addr, IS_USER(s));
6957 store_reg(s, rd, tmp);
9ee6e8bb
PB
6958 } else {
6959 /* strd */
b0109805
PB
6960 tmp = load_reg(s, rs);
6961 gen_st32(tmp, addr, IS_USER(s));
6962 tcg_gen_addi_i32(addr, addr, 4);
6963 tmp = load_reg(s, rd);
6964 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6965 }
6966 if (insn & (1 << 21)) {
6967 /* Base writeback. */
6968 if (rn == 15)
6969 goto illegal_op;
b0109805
PB
6970 tcg_gen_addi_i32(addr, addr, offset - 4);
6971 store_reg(s, rn, addr);
6972 } else {
6973 dead_tmp(addr);
9ee6e8bb
PB
6974 }
6975 } else if ((insn & (1 << 23)) == 0) {
6976 /* Load/store exclusive word. */
2c0262af 6977 gen_movl_T1_reg(s, rn);
72f1c62f 6978 addr = cpu_T[1];
2c0262af 6979 if (insn & (1 << 20)) {
8f8e3aa4
PB
6980 gen_helper_mark_exclusive(cpu_env, cpu_T[1]);
6981 tmp = gen_ld32(addr, IS_USER(s));
6982 store_reg(s, rd, tmp);
9ee6e8bb 6983 } else {
8f8e3aa4
PB
6984 int label = gen_new_label();
6985 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a
PB
6986 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0],
6987 0, label);
8f8e3aa4
PB
6988 tmp = load_reg(s, rs);
6989 gen_st32(tmp, cpu_T[1], IS_USER(s));
6990 gen_set_label(label);
6991 gen_movl_reg_T0(s, rd);
9ee6e8bb 6992 }
9ee6e8bb
PB
6993 } else if ((insn & (1 << 6)) == 0) {
6994 /* Table Branch. */
6995 if (rn == 15) {
b0109805
PB
6996 addr = new_tmp();
6997 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 6998 } else {
b0109805 6999 addr = load_reg(s, rn);
9ee6e8bb 7000 }
b26eefb6 7001 tmp = load_reg(s, rm);
b0109805 7002 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
7003 if (insn & (1 << 4)) {
7004 /* tbh */
b0109805 7005 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7006 dead_tmp(tmp);
b0109805 7007 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 7008 } else { /* tbb */
b26eefb6 7009 dead_tmp(tmp);
b0109805 7010 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7011 }
b0109805
PB
7012 dead_tmp(addr);
7013 tcg_gen_shli_i32(tmp, tmp, 1);
7014 tcg_gen_addi_i32(tmp, tmp, s->pc);
7015 store_reg(s, 15, tmp);
9ee6e8bb
PB
7016 } else {
7017 /* Load/store exclusive byte/halfword/doubleword. */
8f8e3aa4
PB
7018 /* ??? These are not really atomic. However we know
7019 we never have multiple CPUs running in parallel,
7020 so it is good enough. */
9ee6e8bb 7021 op = (insn >> 4) & 0x3;
8f8e3aa4
PB
7022 /* Must use a global reg for the address because we have
7023 a conditional branch in the store instruction. */
9ee6e8bb 7024 gen_movl_T1_reg(s, rn);
8f8e3aa4 7025 addr = cpu_T[1];
9ee6e8bb 7026 if (insn & (1 << 20)) {
8f8e3aa4 7027 gen_helper_mark_exclusive(cpu_env, addr);
9ee6e8bb
PB
7028 switch (op) {
7029 case 0:
8f8e3aa4 7030 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 7031 break;
2c0262af 7032 case 1:
8f8e3aa4 7033 tmp = gen_ld16u(addr, IS_USER(s));
2c0262af 7034 break;
9ee6e8bb 7035 case 3:
8f8e3aa4
PB
7036 tmp = gen_ld32(addr, IS_USER(s));
7037 tcg_gen_addi_i32(addr, addr, 4);
7038 tmp2 = gen_ld32(addr, IS_USER(s));
7039 store_reg(s, rd, tmp2);
2c0262af
FB
7040 break;
7041 default:
9ee6e8bb
PB
7042 goto illegal_op;
7043 }
8f8e3aa4 7044 store_reg(s, rs, tmp);
9ee6e8bb 7045 } else {
8f8e3aa4
PB
7046 int label = gen_new_label();
7047 /* Must use a global that is not killed by the branch. */
7048 gen_helper_test_exclusive(cpu_T[0], cpu_env, addr);
cb63669a 7049 tcg_gen_brcondi_i32(TCG_COND_NE, cpu_T[0], 0, label);
8f8e3aa4 7050 tmp = load_reg(s, rs);
9ee6e8bb
PB
7051 switch (op) {
7052 case 0:
8f8e3aa4 7053 gen_st8(tmp, addr, IS_USER(s));
9ee6e8bb
PB
7054 break;
7055 case 1:
8f8e3aa4 7056 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb 7057 break;
2c0262af 7058 case 3:
8f8e3aa4
PB
7059 gen_st32(tmp, addr, IS_USER(s));
7060 tcg_gen_addi_i32(addr, addr, 4);
7061 tmp = load_reg(s, rd);
7062 gen_st32(tmp, addr, IS_USER(s));
2c0262af 7063 break;
9ee6e8bb
PB
7064 default:
7065 goto illegal_op;
2c0262af 7066 }
8f8e3aa4 7067 gen_set_label(label);
9ee6e8bb
PB
7068 gen_movl_reg_T0(s, rm);
7069 }
7070 }
7071 } else {
7072 /* Load/store multiple, RFE, SRS. */
7073 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
7074 /* Not available in user mode. */
b0109805 7075 if (IS_USER(s))
9ee6e8bb
PB
7076 goto illegal_op;
7077 if (insn & (1 << 20)) {
7078 /* rfe */
b0109805
PB
7079 addr = load_reg(s, rn);
7080 if ((insn & (1 << 24)) == 0)
7081 tcg_gen_addi_i32(addr, addr, -8);
7082 /* Load PC into tmp and CPSR into tmp2. */
7083 tmp = gen_ld32(addr, 0);
7084 tcg_gen_addi_i32(addr, addr, 4);
7085 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
7086 if (insn & (1 << 21)) {
7087 /* Base writeback. */
b0109805
PB
7088 if (insn & (1 << 24)) {
7089 tcg_gen_addi_i32(addr, addr, 4);
7090 } else {
7091 tcg_gen_addi_i32(addr, addr, -4);
7092 }
7093 store_reg(s, rn, addr);
7094 } else {
7095 dead_tmp(addr);
9ee6e8bb 7096 }
b0109805 7097 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
7098 } else {
7099 /* srs */
7100 op = (insn & 0x1f);
7101 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7102 addr = load_reg(s, 13);
9ee6e8bb 7103 } else {
b0109805
PB
7104 addr = new_tmp();
7105 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
7106 }
7107 if ((insn & (1 << 24)) == 0) {
b0109805 7108 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 7109 }
b0109805
PB
7110 tmp = load_reg(s, 14);
7111 gen_st32(tmp, addr, 0);
7112 tcg_gen_addi_i32(addr, addr, 4);
7113 tmp = new_tmp();
7114 gen_helper_cpsr_read(tmp);
7115 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
7116 if (insn & (1 << 21)) {
7117 if ((insn & (1 << 24)) == 0) {
b0109805 7118 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 7119 } else {
b0109805 7120 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7121 }
7122 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 7123 store_reg(s, 13, addr);
9ee6e8bb 7124 } else {
b0109805
PB
7125 gen_helper_set_r13_banked(cpu_env,
7126 tcg_const_i32(op), addr);
9ee6e8bb 7127 }
b0109805
PB
7128 } else {
7129 dead_tmp(addr);
9ee6e8bb
PB
7130 }
7131 }
7132 } else {
7133 int i;
7134 /* Load/store multiple. */
b0109805 7135 addr = load_reg(s, rn);
9ee6e8bb
PB
7136 offset = 0;
7137 for (i = 0; i < 16; i++) {
7138 if (insn & (1 << i))
7139 offset += 4;
7140 }
7141 if (insn & (1 << 24)) {
b0109805 7142 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7143 }
7144
7145 for (i = 0; i < 16; i++) {
7146 if ((insn & (1 << i)) == 0)
7147 continue;
7148 if (insn & (1 << 20)) {
7149 /* Load. */
b0109805 7150 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 7151 if (i == 15) {
b0109805 7152 gen_bx(s, tmp);
9ee6e8bb 7153 } else {
b0109805 7154 store_reg(s, i, tmp);
9ee6e8bb
PB
7155 }
7156 } else {
7157 /* Store. */
b0109805
PB
7158 tmp = load_reg(s, i);
7159 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 7160 }
b0109805 7161 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
7162 }
7163 if (insn & (1 << 21)) {
7164 /* Base register writeback. */
7165 if (insn & (1 << 24)) {
b0109805 7166 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
7167 }
7168 /* Fault if writeback register is in register list. */
7169 if (insn & (1 << rn))
7170 goto illegal_op;
b0109805
PB
7171 store_reg(s, rn, addr);
7172 } else {
7173 dead_tmp(addr);
9ee6e8bb
PB
7174 }
7175 }
7176 }
7177 break;
7178 case 5: /* Data processing register constant shift. */
7179 if (rn == 15)
7180 gen_op_movl_T0_im(0);
7181 else
7182 gen_movl_T0_reg(s, rn);
7183 gen_movl_T1_reg(s, rm);
7184 op = (insn >> 21) & 0xf;
7185 shiftop = (insn >> 4) & 3;
7186 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
7187 conds = (insn & (1 << 20)) != 0;
7188 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 7189 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
7190 if (gen_thumb2_data_op(s, op, conds, 0))
7191 goto illegal_op;
7192 if (rd != 15)
7193 gen_movl_reg_T0(s, rd);
7194 break;
7195 case 13: /* Misc data processing. */
7196 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
7197 if (op < 4 && (insn & 0xf000) != 0xf000)
7198 goto illegal_op;
7199 switch (op) {
7200 case 0: /* Register controlled shift. */
8984bd2e
PB
7201 tmp = load_reg(s, rn);
7202 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7203 if ((insn & 0x70) != 0)
7204 goto illegal_op;
7205 op = (insn >> 21) & 3;
8984bd2e
PB
7206 logic_cc = (insn & (1 << 20)) != 0;
7207 gen_arm_shift_reg(tmp, op, tmp2, logic_cc);
7208 if (logic_cc)
7209 gen_logic_CC(tmp);
7210 store_reg(s, rd, tmp);
9ee6e8bb
PB
7211 break;
7212 case 1: /* Sign/zero extend. */
5e3f878a 7213 tmp = load_reg(s, rm);
9ee6e8bb
PB
7214 shift = (insn >> 4) & 3;
7215 /* ??? In many cases it's not neccessary to do a
7216 rotate, a shift is sufficient. */
7217 if (shift != 0)
5e3f878a 7218 tcg_gen_rori_i32(tmp, tmp, shift * 8);
9ee6e8bb
PB
7219 op = (insn >> 20) & 7;
7220 switch (op) {
5e3f878a
PB
7221 case 0: gen_sxth(tmp); break;
7222 case 1: gen_uxth(tmp); break;
7223 case 2: gen_sxtb16(tmp); break;
7224 case 3: gen_uxtb16(tmp); break;
7225 case 4: gen_sxtb(tmp); break;
7226 case 5: gen_uxtb(tmp); break;
9ee6e8bb
PB
7227 default: goto illegal_op;
7228 }
7229 if (rn != 15) {
5e3f878a 7230 tmp2 = load_reg(s, rn);
9ee6e8bb 7231 if ((op >> 1) == 1) {
5e3f878a 7232 gen_add16(tmp, tmp2);
9ee6e8bb 7233 } else {
5e3f878a
PB
7234 tcg_gen_add_i32(tmp, tmp, tmp2);
7235 dead_tmp(tmp2);
9ee6e8bb
PB
7236 }
7237 }
5e3f878a 7238 store_reg(s, rd, tmp);
9ee6e8bb
PB
7239 break;
7240 case 2: /* SIMD add/subtract. */
7241 op = (insn >> 20) & 7;
7242 shift = (insn >> 4) & 7;
7243 if ((op & 3) == 3 || (shift & 3) == 3)
7244 goto illegal_op;
6ddbc6e4
PB
7245 tmp = load_reg(s, rn);
7246 tmp2 = load_reg(s, rm);
7247 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
7248 dead_tmp(tmp2);
7249 store_reg(s, rd, tmp);
9ee6e8bb
PB
7250 break;
7251 case 3: /* Other data processing. */
7252 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
7253 if (op < 4) {
7254 /* Saturating add/subtract. */
d9ba4830
PB
7255 tmp = load_reg(s, rn);
7256 tmp2 = load_reg(s, rm);
9ee6e8bb 7257 if (op & 2)
d9ba4830 7258 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 7259 if (op & 1)
d9ba4830 7260 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 7261 else
d9ba4830
PB
7262 gen_helper_add_saturate(tmp, tmp, tmp2);
7263 dead_tmp(tmp2);
9ee6e8bb 7264 } else {
d9ba4830 7265 tmp = load_reg(s, rn);
9ee6e8bb
PB
7266 switch (op) {
7267 case 0x0a: /* rbit */
d9ba4830 7268 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
7269 break;
7270 case 0x08: /* rev */
d9ba4830 7271 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
7272 break;
7273 case 0x09: /* rev16 */
d9ba4830 7274 gen_rev16(tmp);
9ee6e8bb
PB
7275 break;
7276 case 0x0b: /* revsh */
d9ba4830 7277 gen_revsh(tmp);
9ee6e8bb
PB
7278 break;
7279 case 0x10: /* sel */
d9ba4830 7280 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
7281 tmp3 = new_tmp();
7282 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 7283 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 7284 dead_tmp(tmp3);
d9ba4830 7285 dead_tmp(tmp2);
9ee6e8bb
PB
7286 break;
7287 case 0x18: /* clz */
d9ba4830 7288 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
7289 break;
7290 default:
7291 goto illegal_op;
7292 }
7293 }
d9ba4830 7294 store_reg(s, rd, tmp);
9ee6e8bb
PB
7295 break;
7296 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
7297 op = (insn >> 4) & 0xf;
d9ba4830
PB
7298 tmp = load_reg(s, rn);
7299 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7300 switch ((insn >> 20) & 7) {
7301 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
7302 tcg_gen_mul_i32(tmp, tmp, tmp2);
7303 dead_tmp(tmp2);
9ee6e8bb 7304 if (rs != 15) {
d9ba4830 7305 tmp2 = load_reg(s, rs);
9ee6e8bb 7306 if (op)
d9ba4830 7307 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 7308 else
d9ba4830
PB
7309 tcg_gen_add_i32(tmp, tmp, tmp2);
7310 dead_tmp(tmp2);
9ee6e8bb 7311 }
9ee6e8bb
PB
7312 break;
7313 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
7314 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7315 dead_tmp(tmp2);
9ee6e8bb 7316 if (rs != 15) {
d9ba4830
PB
7317 tmp2 = load_reg(s, rs);
7318 gen_helper_add_setq(tmp, tmp, tmp2);
7319 dead_tmp(tmp2);
9ee6e8bb 7320 }
9ee6e8bb
PB
7321 break;
7322 case 2: /* Dual multiply add. */
7323 case 4: /* Dual multiply subtract. */
7324 if (op)
d9ba4830
PB
7325 gen_swap_half(tmp2);
7326 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
7327 /* This addition cannot overflow. */
7328 if (insn & (1 << 22)) {
d9ba4830 7329 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 7330 } else {
d9ba4830 7331 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 7332 }
d9ba4830 7333 dead_tmp(tmp2);
9ee6e8bb
PB
7334 if (rs != 15)
7335 {
d9ba4830
PB
7336 tmp2 = load_reg(s, rs);
7337 gen_helper_add_setq(tmp, tmp, tmp2);
7338 dead_tmp(tmp2);
9ee6e8bb 7339 }
9ee6e8bb
PB
7340 break;
7341 case 3: /* 32 * 16 -> 32msb */
7342 if (op)
d9ba4830 7343 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 7344 else
d9ba4830 7345 gen_sxth(tmp2);
5e3f878a
PB
7346 tmp2 = gen_muls_i64_i32(tmp, tmp2);
7347 tcg_gen_shri_i64(tmp2, tmp2, 16);
7348 tmp = new_tmp();
7349 tcg_gen_trunc_i64_i32(tmp, tmp2);
9ee6e8bb
PB
7350 if (rs != 15)
7351 {
d9ba4830
PB
7352 tmp2 = load_reg(s, rs);
7353 gen_helper_add_setq(tmp, tmp, tmp2);
7354 dead_tmp(tmp2);
9ee6e8bb 7355 }
9ee6e8bb
PB
7356 break;
7357 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
7358 gen_imull(tmp, tmp2);
7359 if (insn & (1 << 5)) {
7360 gen_roundqd(tmp, tmp2);
7361 dead_tmp(tmp2);
7362 } else {
7363 dead_tmp(tmp);
7364 tmp = tmp2;
7365 }
9ee6e8bb 7366 if (rs != 15) {
d9ba4830 7367 tmp2 = load_reg(s, rs);
9ee6e8bb 7368 if (insn & (1 << 21)) {
d9ba4830 7369 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 7370 } else {
d9ba4830 7371 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 7372 }
d9ba4830 7373 dead_tmp(tmp2);
2c0262af 7374 }
9ee6e8bb
PB
7375 break;
7376 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
7377 gen_helper_usad8(tmp, tmp, tmp2);
7378 dead_tmp(tmp2);
9ee6e8bb 7379 if (rs != 15) {
d9ba4830
PB
7380 tmp2 = load_reg(s, rs);
7381 tcg_gen_add_i32(tmp, tmp, tmp2);
7382 dead_tmp(tmp2);
5fd46862 7383 }
9ee6e8bb 7384 break;
2c0262af 7385 }
d9ba4830 7386 store_reg(s, rd, tmp);
2c0262af 7387 break;
9ee6e8bb
PB
7388 case 6: case 7: /* 64-bit multiply, Divide. */
7389 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
5e3f878a
PB
7390 tmp = load_reg(s, rn);
7391 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
7392 if ((op & 0x50) == 0x10) {
7393 /* sdiv, udiv */
7394 if (!arm_feature(env, ARM_FEATURE_DIV))
7395 goto illegal_op;
7396 if (op & 0x20)
5e3f878a 7397 gen_helper_udiv(tmp, tmp, tmp2);
2c0262af 7398 else
5e3f878a
PB
7399 gen_helper_sdiv(tmp, tmp, tmp2);
7400 dead_tmp(tmp2);
7401 store_reg(s, rd, tmp);
9ee6e8bb
PB
7402 } else if ((op & 0xe) == 0xc) {
7403 /* Dual multiply accumulate long. */
7404 if (op & 1)
5e3f878a
PB
7405 gen_swap_half(tmp2);
7406 gen_smul_dual(tmp, tmp2);
9ee6e8bb 7407 if (op & 0x10) {
5e3f878a 7408 tcg_gen_sub_i32(tmp, tmp, tmp2);
b5ff1b31 7409 } else {
5e3f878a 7410 tcg_gen_add_i32(tmp, tmp, tmp2);
b5ff1b31 7411 }
5e3f878a
PB
7412 dead_tmp(tmp2);
7413 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7414 gen_addq(s, tmp, rs, rd);
7415 gen_storeq_reg(s, rs, rd, tmp);
2c0262af 7416 } else {
9ee6e8bb
PB
7417 if (op & 0x20) {
7418 /* Unsigned 64-bit multiply */
5e3f878a 7419 tmp = gen_mulu_i64_i32(tmp, tmp2);
b5ff1b31 7420 } else {
9ee6e8bb
PB
7421 if (op & 8) {
7422 /* smlalxy */
5e3f878a
PB
7423 gen_mulxy(tmp, tmp2, op & 2, op & 1);
7424 dead_tmp(tmp2);
7425 tmp2 = tcg_temp_new(TCG_TYPE_I64);
7426 tcg_gen_ext_i32_i64(tmp2, tmp);
7427 dead_tmp(tmp);
7428 tmp = tmp2;
9ee6e8bb
PB
7429 } else {
7430 /* Signed 64-bit multiply */
5e3f878a 7431 tmp = gen_muls_i64_i32(tmp, tmp2);
9ee6e8bb 7432 }
b5ff1b31 7433 }
9ee6e8bb
PB
7434 if (op & 4) {
7435 /* umaal */
5e3f878a
PB
7436 gen_addq_lo(s, tmp, rs);
7437 gen_addq_lo(s, tmp, rd);
9ee6e8bb
PB
7438 } else if (op & 0x40) {
7439 /* 64-bit accumulate. */
5e3f878a 7440 gen_addq(s, tmp, rs, rd);
9ee6e8bb 7441 }
5e3f878a 7442 gen_storeq_reg(s, rs, rd, tmp);
5fd46862 7443 }
2c0262af 7444 break;
9ee6e8bb
PB
7445 }
7446 break;
7447 case 6: case 7: case 14: case 15:
7448 /* Coprocessor. */
7449 if (((insn >> 24) & 3) == 3) {
7450 /* Translate into the equivalent ARM encoding. */
7451 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7452 if (disas_neon_data_insn(env, s, insn))
7453 goto illegal_op;
7454 } else {
7455 if (insn & (1 << 28))
7456 goto illegal_op;
7457 if (disas_coproc_insn (env, s, insn))
7458 goto illegal_op;
7459 }
7460 break;
7461 case 8: case 9: case 10: case 11:
7462 if (insn & (1 << 15)) {
7463 /* Branches, misc control. */
7464 if (insn & 0x5000) {
7465 /* Unconditional branch. */
7466 /* signextend(hw1[10:0]) -> offset[:12]. */
7467 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7468 /* hw1[10:0] -> offset[11:1]. */
7469 offset |= (insn & 0x7ff) << 1;
7470 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7471 offset[24:22] already have the same value because of the
7472 sign extension above. */
7473 offset ^= ((~insn) & (1 << 13)) << 10;
7474 offset ^= ((~insn) & (1 << 11)) << 11;
7475
9ee6e8bb
PB
7476 if (insn & (1 << 14)) {
7477 /* Branch and link. */
b0109805 7478 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7479 gen_movl_reg_T1(s, 14);
b5ff1b31 7480 }
3b46e624 7481
b0109805 7482 offset += s->pc;
9ee6e8bb
PB
7483 if (insn & (1 << 12)) {
7484 /* b/bl */
b0109805 7485 gen_jmp(s, offset);
9ee6e8bb
PB
7486 } else {
7487 /* blx */
b0109805
PB
7488 offset &= ~(uint32_t)2;
7489 gen_bx_im(s, offset);
2c0262af 7490 }
9ee6e8bb
PB
7491 } else if (((insn >> 23) & 7) == 7) {
7492 /* Misc control */
7493 if (insn & (1 << 13))
7494 goto illegal_op;
7495
7496 if (insn & (1 << 26)) {
7497 /* Secure monitor call (v6Z) */
7498 goto illegal_op; /* not implemented. */
2c0262af 7499 } else {
9ee6e8bb
PB
7500 op = (insn >> 20) & 7;
7501 switch (op) {
7502 case 0: /* msr cpsr. */
7503 if (IS_M(env)) {
8984bd2e
PB
7504 tmp = load_reg(s, rn);
7505 addr = tcg_const_i32(insn & 0xff);
7506 gen_helper_v7m_msr(cpu_env, addr, tmp);
9ee6e8bb
PB
7507 gen_lookup_tb(s);
7508 break;
7509 }
7510 /* fall through */
7511 case 1: /* msr spsr. */
7512 if (IS_M(env))
7513 goto illegal_op;
7514 gen_movl_T0_reg(s, rn);
7515 if (gen_set_psr_T0(s,
7516 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7517 op == 1))
7518 goto illegal_op;
7519 break;
7520 case 2: /* cps, nop-hint. */
7521 if (((insn >> 8) & 7) == 0) {
7522 gen_nop_hint(s, insn & 0xff);
7523 }
7524 /* Implemented as NOP in user mode. */
7525 if (IS_USER(s))
7526 break;
7527 offset = 0;
7528 imm = 0;
7529 if (insn & (1 << 10)) {
7530 if (insn & (1 << 7))
7531 offset |= CPSR_A;
7532 if (insn & (1 << 6))
7533 offset |= CPSR_I;
7534 if (insn & (1 << 5))
7535 offset |= CPSR_F;
7536 if (insn & (1 << 9))
7537 imm = CPSR_A | CPSR_I | CPSR_F;
7538 }
7539 if (insn & (1 << 8)) {
7540 offset |= 0x1f;
7541 imm |= (insn & 0x1f);
7542 }
7543 if (offset) {
7544 gen_op_movl_T0_im(imm);
7545 gen_set_psr_T0(s, offset, 0);
7546 }
7547 break;
7548 case 3: /* Special control operations. */
7549 op = (insn >> 4) & 0xf;
7550 switch (op) {
7551 case 2: /* clrex */
8f8e3aa4 7552 gen_helper_clrex(cpu_env);
9ee6e8bb
PB
7553 break;
7554 case 4: /* dsb */
7555 case 5: /* dmb */
7556 case 6: /* isb */
7557 /* These execute as NOPs. */
7558 ARCH(7);
7559 break;
7560 default:
7561 goto illegal_op;
7562 }
7563 break;
7564 case 4: /* bxj */
7565 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7566 tmp = load_reg(s, rn);
7567 gen_bx(s, tmp);
9ee6e8bb
PB
7568 break;
7569 case 5: /* Exception return. */
7570 /* Unpredictable in user mode. */
7571 goto illegal_op;
7572 case 6: /* mrs cpsr. */
8984bd2e 7573 tmp = new_tmp();
9ee6e8bb 7574 if (IS_M(env)) {
8984bd2e
PB
7575 addr = tcg_const_i32(insn & 0xff);
7576 gen_helper_v7m_mrs(tmp, cpu_env, addr);
9ee6e8bb 7577 } else {
8984bd2e 7578 gen_helper_cpsr_read(tmp);
9ee6e8bb 7579 }
8984bd2e 7580 store_reg(s, rd, tmp);
9ee6e8bb
PB
7581 break;
7582 case 7: /* mrs spsr. */
7583 /* Not accessible in user mode. */
7584 if (IS_USER(s) || IS_M(env))
7585 goto illegal_op;
d9ba4830
PB
7586 tmp = load_cpu_field(spsr);
7587 store_reg(s, rd, tmp);
9ee6e8bb 7588 break;
2c0262af
FB
7589 }
7590 }
9ee6e8bb
PB
7591 } else {
7592 /* Conditional branch. */
7593 op = (insn >> 22) & 0xf;
7594 /* Generate a conditional jump to next instruction. */
7595 s->condlabel = gen_new_label();
d9ba4830 7596 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7597 s->condjmp = 1;
7598
7599 /* offset[11:1] = insn[10:0] */
7600 offset = (insn & 0x7ff) << 1;
7601 /* offset[17:12] = insn[21:16]. */
7602 offset |= (insn & 0x003f0000) >> 4;
7603 /* offset[31:20] = insn[26]. */
7604 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7605 /* offset[18] = insn[13]. */
7606 offset |= (insn & (1 << 13)) << 5;
7607 /* offset[19] = insn[11]. */
7608 offset |= (insn & (1 << 11)) << 8;
7609
7610 /* jump to the offset */
b0109805 7611 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7612 }
7613 } else {
7614 /* Data processing immediate. */
7615 if (insn & (1 << 25)) {
7616 if (insn & (1 << 24)) {
7617 if (insn & (1 << 20))
7618 goto illegal_op;
7619 /* Bitfield/Saturate. */
7620 op = (insn >> 21) & 7;
7621 imm = insn & 0x1f;
7622 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7623 if (rn == 15) {
7624 tmp = new_tmp();
7625 tcg_gen_movi_i32(tmp, 0);
7626 } else {
7627 tmp = load_reg(s, rn);
7628 }
9ee6e8bb
PB
7629 switch (op) {
7630 case 2: /* Signed bitfield extract. */
7631 imm++;
7632 if (shift + imm > 32)
7633 goto illegal_op;
7634 if (imm < 32)
6ddbc6e4 7635 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7636 break;
7637 case 6: /* Unsigned bitfield extract. */
7638 imm++;
7639 if (shift + imm > 32)
7640 goto illegal_op;
7641 if (imm < 32)
6ddbc6e4 7642 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7643 break;
7644 case 3: /* Bitfield insert/clear. */
7645 if (imm < shift)
7646 goto illegal_op;
7647 imm = imm + 1 - shift;
7648 if (imm != 32) {
6ddbc6e4 7649 tmp2 = load_reg(s, rd);
8f8e3aa4 7650 gen_bfi(tmp, tmp2, tmp, shift, (1u << imm) - 1);
6ddbc6e4 7651 dead_tmp(tmp2);
9ee6e8bb
PB
7652 }
7653 break;
7654 case 7:
7655 goto illegal_op;
7656 default: /* Saturate. */
9ee6e8bb
PB
7657 if (shift) {
7658 if (op & 1)
6ddbc6e4 7659 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7660 else
6ddbc6e4 7661 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7662 }
6ddbc6e4 7663 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7664 if (op & 4) {
7665 /* Unsigned. */
9ee6e8bb 7666 if ((op & 1) && shift == 0)
6ddbc6e4 7667 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7668 else
6ddbc6e4 7669 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7670 } else {
9ee6e8bb 7671 /* Signed. */
9ee6e8bb 7672 if ((op & 1) && shift == 0)
6ddbc6e4 7673 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7674 else
6ddbc6e4 7675 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7676 }
9ee6e8bb 7677 break;
2c0262af 7678 }
6ddbc6e4 7679 store_reg(s, rd, tmp);
9ee6e8bb
PB
7680 } else {
7681 imm = ((insn & 0x04000000) >> 15)
7682 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7683 if (insn & (1 << 22)) {
7684 /* 16-bit immediate. */
7685 imm |= (insn >> 4) & 0xf000;
7686 if (insn & (1 << 23)) {
7687 /* movt */
5e3f878a 7688 tmp = load_reg(s, rd);
86831435 7689 tcg_gen_ext16u_i32(tmp, tmp);
5e3f878a 7690 tcg_gen_ori_i32(tmp, tmp, imm << 16);
2c0262af 7691 } else {
9ee6e8bb 7692 /* movw */
5e3f878a
PB
7693 tmp = new_tmp();
7694 tcg_gen_movi_i32(tmp, imm);
2c0262af
FB
7695 }
7696 } else {
9ee6e8bb
PB
7697 /* Add/sub 12-bit immediate. */
7698 if (rn == 15) {
b0109805 7699 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7700 if (insn & (1 << 23))
b0109805 7701 offset -= imm;
9ee6e8bb 7702 else
b0109805 7703 offset += imm;
5e3f878a
PB
7704 tmp = new_tmp();
7705 tcg_gen_movi_i32(tmp, offset);
2c0262af 7706 } else {
5e3f878a 7707 tmp = load_reg(s, rn);
9ee6e8bb 7708 if (insn & (1 << 23))
5e3f878a 7709 tcg_gen_subi_i32(tmp, tmp, imm);
9ee6e8bb 7710 else
5e3f878a 7711 tcg_gen_addi_i32(tmp, tmp, imm);
2c0262af 7712 }
9ee6e8bb 7713 }
5e3f878a 7714 store_reg(s, rd, tmp);
191abaa2 7715 }
9ee6e8bb
PB
7716 } else {
7717 int shifter_out = 0;
7718 /* modified 12-bit immediate. */
7719 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7720 imm = (insn & 0xff);
7721 switch (shift) {
7722 case 0: /* XY */
7723 /* Nothing to do. */
7724 break;
7725 case 1: /* 00XY00XY */
7726 imm |= imm << 16;
7727 break;
7728 case 2: /* XY00XY00 */
7729 imm |= imm << 16;
7730 imm <<= 8;
7731 break;
7732 case 3: /* XYXYXYXY */
7733 imm |= imm << 16;
7734 imm |= imm << 8;
7735 break;
7736 default: /* Rotated constant. */
7737 shift = (shift << 1) | (imm >> 7);
7738 imm |= 0x80;
7739 imm = imm << (32 - shift);
7740 shifter_out = 1;
7741 break;
b5ff1b31 7742 }
9ee6e8bb
PB
7743 gen_op_movl_T1_im(imm);
7744 rn = (insn >> 16) & 0xf;
7745 if (rn == 15)
7746 gen_op_movl_T0_im(0);
7747 else
7748 gen_movl_T0_reg(s, rn);
7749 op = (insn >> 21) & 0xf;
7750 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7751 shifter_out))
7752 goto illegal_op;
7753 rd = (insn >> 8) & 0xf;
7754 if (rd != 15) {
7755 gen_movl_reg_T0(s, rd);
2c0262af 7756 }
2c0262af 7757 }
9ee6e8bb
PB
7758 }
7759 break;
7760 case 12: /* Load/store single data item. */
7761 {
7762 int postinc = 0;
7763 int writeback = 0;
b0109805 7764 int user;
9ee6e8bb
PB
7765 if ((insn & 0x01100000) == 0x01000000) {
7766 if (disas_neon_ls_insn(env, s, insn))
c1713132 7767 goto illegal_op;
9ee6e8bb
PB
7768 break;
7769 }
b0109805 7770 user = IS_USER(s);
9ee6e8bb 7771 if (rn == 15) {
b0109805 7772 addr = new_tmp();
9ee6e8bb
PB
7773 /* PC relative. */
7774 /* s->pc has already been incremented by 4. */
7775 imm = s->pc & 0xfffffffc;
7776 if (insn & (1 << 23))
7777 imm += insn & 0xfff;
7778 else
7779 imm -= insn & 0xfff;
b0109805 7780 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7781 } else {
b0109805 7782 addr = load_reg(s, rn);
9ee6e8bb
PB
7783 if (insn & (1 << 23)) {
7784 /* Positive offset. */
7785 imm = insn & 0xfff;
b0109805 7786 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7787 } else {
7788 op = (insn >> 8) & 7;
7789 imm = insn & 0xff;
7790 switch (op) {
7791 case 0: case 8: /* Shifted Register. */
7792 shift = (insn >> 4) & 0xf;
7793 if (shift > 3)
18c9b560 7794 goto illegal_op;
b26eefb6 7795 tmp = load_reg(s, rm);
9ee6e8bb 7796 if (shift)
b26eefb6 7797 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7798 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7799 dead_tmp(tmp);
9ee6e8bb
PB
7800 break;
7801 case 4: /* Negative offset. */
b0109805 7802 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7803 break;
7804 case 6: /* User privilege. */
b0109805
PB
7805 tcg_gen_addi_i32(addr, addr, imm);
7806 user = 1;
9ee6e8bb
PB
7807 break;
7808 case 1: /* Post-decrement. */
7809 imm = -imm;
7810 /* Fall through. */
7811 case 3: /* Post-increment. */
9ee6e8bb
PB
7812 postinc = 1;
7813 writeback = 1;
7814 break;
7815 case 5: /* Pre-decrement. */
7816 imm = -imm;
7817 /* Fall through. */
7818 case 7: /* Pre-increment. */
b0109805 7819 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7820 writeback = 1;
7821 break;
7822 default:
b7bcbe95 7823 goto illegal_op;
9ee6e8bb
PB
7824 }
7825 }
7826 }
7827 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7828 if (insn & (1 << 20)) {
7829 /* Load. */
7830 if (rs == 15 && op != 2) {
7831 if (op & 2)
b5ff1b31 7832 goto illegal_op;
9ee6e8bb
PB
7833 /* Memory hint. Implemented as NOP. */
7834 } else {
7835 switch (op) {
b0109805
PB
7836 case 0: tmp = gen_ld8u(addr, user); break;
7837 case 4: tmp = gen_ld8s(addr, user); break;
7838 case 1: tmp = gen_ld16u(addr, user); break;
7839 case 5: tmp = gen_ld16s(addr, user); break;
7840 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7841 default: goto illegal_op;
7842 }
7843 if (rs == 15) {
b0109805 7844 gen_bx(s, tmp);
9ee6e8bb 7845 } else {
b0109805 7846 store_reg(s, rs, tmp);
9ee6e8bb
PB
7847 }
7848 }
7849 } else {
7850 /* Store. */
7851 if (rs == 15)
b7bcbe95 7852 goto illegal_op;
b0109805 7853 tmp = load_reg(s, rs);
9ee6e8bb 7854 switch (op) {
b0109805
PB
7855 case 0: gen_st8(tmp, addr, user); break;
7856 case 1: gen_st16(tmp, addr, user); break;
7857 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7858 default: goto illegal_op;
b7bcbe95 7859 }
2c0262af 7860 }
9ee6e8bb 7861 if (postinc)
b0109805
PB
7862 tcg_gen_addi_i32(addr, addr, imm);
7863 if (writeback) {
7864 store_reg(s, rn, addr);
7865 } else {
7866 dead_tmp(addr);
7867 }
9ee6e8bb
PB
7868 }
7869 break;
7870 default:
7871 goto illegal_op;
2c0262af 7872 }
9ee6e8bb
PB
7873 return 0;
7874illegal_op:
7875 return 1;
2c0262af
FB
7876}
7877
9ee6e8bb 7878static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7879{
7880 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7881 int32_t offset;
7882 int i;
b26eefb6 7883 TCGv tmp;
d9ba4830 7884 TCGv tmp2;
b0109805 7885 TCGv addr;
99c475ab 7886
9ee6e8bb
PB
7887 if (s->condexec_mask) {
7888 cond = s->condexec_cond;
7889 s->condlabel = gen_new_label();
d9ba4830 7890 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7891 s->condjmp = 1;
7892 }
7893
b5ff1b31 7894 insn = lduw_code(s->pc);
99c475ab 7895 s->pc += 2;
b5ff1b31 7896
99c475ab
FB
7897 switch (insn >> 12) {
7898 case 0: case 1:
7899 rd = insn & 7;
7900 op = (insn >> 11) & 3;
7901 if (op == 3) {
7902 /* add/subtract */
7903 rn = (insn >> 3) & 7;
7904 gen_movl_T0_reg(s, rn);
7905 if (insn & (1 << 10)) {
7906 /* immediate */
7907 gen_op_movl_T1_im((insn >> 6) & 7);
7908 } else {
7909 /* reg */
7910 rm = (insn >> 6) & 7;
7911 gen_movl_T1_reg(s, rm);
7912 }
9ee6e8bb
PB
7913 if (insn & (1 << 9)) {
7914 if (s->condexec_mask)
7915 gen_op_subl_T0_T1();
7916 else
7917 gen_op_subl_T0_T1_cc();
7918 } else {
7919 if (s->condexec_mask)
7920 gen_op_addl_T0_T1();
7921 else
7922 gen_op_addl_T0_T1_cc();
7923 }
99c475ab
FB
7924 gen_movl_reg_T0(s, rd);
7925 } else {
7926 /* shift immediate */
7927 rm = (insn >> 3) & 7;
7928 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7929 tmp = load_reg(s, rm);
7930 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7931 if (!s->condexec_mask)
7932 gen_logic_CC(tmp);
7933 store_reg(s, rd, tmp);
99c475ab
FB
7934 }
7935 break;
7936 case 2: case 3:
7937 /* arithmetic large immediate */
7938 op = (insn >> 11) & 3;
7939 rd = (insn >> 8) & 0x7;
7940 if (op == 0) {
7941 gen_op_movl_T0_im(insn & 0xff);
7942 } else {
7943 gen_movl_T0_reg(s, rd);
7944 gen_op_movl_T1_im(insn & 0xff);
7945 }
7946 switch (op) {
7947 case 0: /* mov */
9ee6e8bb
PB
7948 if (!s->condexec_mask)
7949 gen_op_logic_T0_cc();
99c475ab
FB
7950 break;
7951 case 1: /* cmp */
7952 gen_op_subl_T0_T1_cc();
7953 break;
7954 case 2: /* add */
9ee6e8bb
PB
7955 if (s->condexec_mask)
7956 gen_op_addl_T0_T1();
7957 else
7958 gen_op_addl_T0_T1_cc();
99c475ab
FB
7959 break;
7960 case 3: /* sub */
9ee6e8bb
PB
7961 if (s->condexec_mask)
7962 gen_op_subl_T0_T1();
7963 else
7964 gen_op_subl_T0_T1_cc();
99c475ab
FB
7965 break;
7966 }
7967 if (op != 1)
7968 gen_movl_reg_T0(s, rd);
7969 break;
7970 case 4:
7971 if (insn & (1 << 11)) {
7972 rd = (insn >> 8) & 7;
5899f386
FB
7973 /* load pc-relative. Bit 1 of PC is ignored. */
7974 val = s->pc + 2 + ((insn & 0xff) * 4);
7975 val &= ~(uint32_t)2;
b0109805
PB
7976 addr = new_tmp();
7977 tcg_gen_movi_i32(addr, val);
7978 tmp = gen_ld32(addr, IS_USER(s));
7979 dead_tmp(addr);
7980 store_reg(s, rd, tmp);
99c475ab
FB
7981 break;
7982 }
7983 if (insn & (1 << 10)) {
7984 /* data processing extended or blx */
7985 rd = (insn & 7) | ((insn >> 4) & 8);
7986 rm = (insn >> 3) & 0xf;
7987 op = (insn >> 8) & 3;
7988 switch (op) {
7989 case 0: /* add */
7990 gen_movl_T0_reg(s, rd);
7991 gen_movl_T1_reg(s, rm);
7992 gen_op_addl_T0_T1();
7993 gen_movl_reg_T0(s, rd);
7994 break;
7995 case 1: /* cmp */
7996 gen_movl_T0_reg(s, rd);
7997 gen_movl_T1_reg(s, rm);
7998 gen_op_subl_T0_T1_cc();
7999 break;
8000 case 2: /* mov/cpy */
8001 gen_movl_T0_reg(s, rm);
8002 gen_movl_reg_T0(s, rd);
8003 break;
8004 case 3:/* branch [and link] exchange thumb register */
b0109805 8005 tmp = load_reg(s, rm);
99c475ab
FB
8006 if (insn & (1 << 7)) {
8007 val = (uint32_t)s->pc | 1;
b0109805
PB
8008 tmp2 = new_tmp();
8009 tcg_gen_movi_i32(tmp2, val);
8010 store_reg(s, 14, tmp2);
99c475ab 8011 }
d9ba4830 8012 gen_bx(s, tmp);
99c475ab
FB
8013 break;
8014 }
8015 break;
8016 }
8017
8018 /* data processing register */
8019 rd = insn & 7;
8020 rm = (insn >> 3) & 7;
8021 op = (insn >> 6) & 0xf;
8022 if (op == 2 || op == 3 || op == 4 || op == 7) {
8023 /* the shift/rotate ops want the operands backwards */
8024 val = rm;
8025 rm = rd;
8026 rd = val;
8027 val = 1;
8028 } else {
8029 val = 0;
8030 }
8031
8032 if (op == 9) /* neg */
8033 gen_op_movl_T0_im(0);
8034 else if (op != 0xf) /* mvn doesn't read its first operand */
8035 gen_movl_T0_reg(s, rd);
8036
8037 gen_movl_T1_reg(s, rm);
5899f386 8038 switch (op) {
99c475ab
FB
8039 case 0x0: /* and */
8040 gen_op_andl_T0_T1();
9ee6e8bb
PB
8041 if (!s->condexec_mask)
8042 gen_op_logic_T0_cc();
99c475ab
FB
8043 break;
8044 case 0x1: /* eor */
8045 gen_op_xorl_T0_T1();
9ee6e8bb
PB
8046 if (!s->condexec_mask)
8047 gen_op_logic_T0_cc();
99c475ab
FB
8048 break;
8049 case 0x2: /* lsl */
9ee6e8bb 8050 if (s->condexec_mask) {
8984bd2e 8051 gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8052 } else {
8984bd2e 8053 gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8054 gen_op_logic_T1_cc();
8055 }
99c475ab
FB
8056 break;
8057 case 0x3: /* lsr */
9ee6e8bb 8058 if (s->condexec_mask) {
8984bd2e 8059 gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8060 } else {
8984bd2e 8061 gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8062 gen_op_logic_T1_cc();
8063 }
99c475ab
FB
8064 break;
8065 case 0x4: /* asr */
9ee6e8bb 8066 if (s->condexec_mask) {
8984bd2e 8067 gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8068 } else {
8984bd2e 8069 gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8070 gen_op_logic_T1_cc();
8071 }
99c475ab
FB
8072 break;
8073 case 0x5: /* adc */
9ee6e8bb 8074 if (s->condexec_mask)
b26eefb6 8075 gen_adc_T0_T1();
9ee6e8bb
PB
8076 else
8077 gen_op_adcl_T0_T1_cc();
99c475ab
FB
8078 break;
8079 case 0x6: /* sbc */
9ee6e8bb 8080 if (s->condexec_mask)
3670669c 8081 gen_sbc_T0_T1();
9ee6e8bb
PB
8082 else
8083 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
8084 break;
8085 case 0x7: /* ror */
9ee6e8bb 8086 if (s->condexec_mask) {
8984bd2e 8087 gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb 8088 } else {
8984bd2e 8089 gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]);
9ee6e8bb
PB
8090 gen_op_logic_T1_cc();
8091 }
99c475ab
FB
8092 break;
8093 case 0x8: /* tst */
8094 gen_op_andl_T0_T1();
8095 gen_op_logic_T0_cc();
8096 rd = 16;
5899f386 8097 break;
99c475ab 8098 case 0x9: /* neg */
9ee6e8bb 8099 if (s->condexec_mask)
390efc54 8100 tcg_gen_neg_i32(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
8101 else
8102 gen_op_subl_T0_T1_cc();
99c475ab
FB
8103 break;
8104 case 0xa: /* cmp */
8105 gen_op_subl_T0_T1_cc();
8106 rd = 16;
8107 break;
8108 case 0xb: /* cmn */
8109 gen_op_addl_T0_T1_cc();
8110 rd = 16;
8111 break;
8112 case 0xc: /* orr */
8113 gen_op_orl_T0_T1();
9ee6e8bb
PB
8114 if (!s->condexec_mask)
8115 gen_op_logic_T0_cc();
99c475ab
FB
8116 break;
8117 case 0xd: /* mul */
8118 gen_op_mull_T0_T1();
9ee6e8bb
PB
8119 if (!s->condexec_mask)
8120 gen_op_logic_T0_cc();
99c475ab
FB
8121 break;
8122 case 0xe: /* bic */
8123 gen_op_bicl_T0_T1();
9ee6e8bb
PB
8124 if (!s->condexec_mask)
8125 gen_op_logic_T0_cc();
99c475ab
FB
8126 break;
8127 case 0xf: /* mvn */
8128 gen_op_notl_T1();
9ee6e8bb
PB
8129 if (!s->condexec_mask)
8130 gen_op_logic_T1_cc();
99c475ab 8131 val = 1;
5899f386 8132 rm = rd;
99c475ab
FB
8133 break;
8134 }
8135 if (rd != 16) {
8136 if (val)
5899f386 8137 gen_movl_reg_T1(s, rm);
99c475ab
FB
8138 else
8139 gen_movl_reg_T0(s, rd);
8140 }
8141 break;
8142
8143 case 5:
8144 /* load/store register offset. */
8145 rd = insn & 7;
8146 rn = (insn >> 3) & 7;
8147 rm = (insn >> 6) & 7;
8148 op = (insn >> 9) & 7;
b0109805 8149 addr = load_reg(s, rn);
b26eefb6 8150 tmp = load_reg(s, rm);
b0109805 8151 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 8152 dead_tmp(tmp);
99c475ab
FB
8153
8154 if (op < 3) /* store */
b0109805 8155 tmp = load_reg(s, rd);
99c475ab
FB
8156
8157 switch (op) {
8158 case 0: /* str */
b0109805 8159 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
8160 break;
8161 case 1: /* strh */
b0109805 8162 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
8163 break;
8164 case 2: /* strb */
b0109805 8165 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
8166 break;
8167 case 3: /* ldrsb */
b0109805 8168 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
8169 break;
8170 case 4: /* ldr */
b0109805 8171 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8172 break;
8173 case 5: /* ldrh */
b0109805 8174 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
8175 break;
8176 case 6: /* ldrb */
b0109805 8177 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
8178 break;
8179 case 7: /* ldrsh */
b0109805 8180 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
8181 break;
8182 }
8183 if (op >= 3) /* load */
b0109805
PB
8184 store_reg(s, rd, tmp);
8185 dead_tmp(addr);
99c475ab
FB
8186 break;
8187
8188 case 6:
8189 /* load/store word immediate offset */
8190 rd = insn & 7;
8191 rn = (insn >> 3) & 7;
b0109805 8192 addr = load_reg(s, rn);
99c475ab 8193 val = (insn >> 4) & 0x7c;
b0109805 8194 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8195
8196 if (insn & (1 << 11)) {
8197 /* load */
b0109805
PB
8198 tmp = gen_ld32(addr, IS_USER(s));
8199 store_reg(s, rd, tmp);
99c475ab
FB
8200 } else {
8201 /* store */
b0109805
PB
8202 tmp = load_reg(s, rd);
8203 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8204 }
b0109805 8205 dead_tmp(addr);
99c475ab
FB
8206 break;
8207
8208 case 7:
8209 /* load/store byte immediate offset */
8210 rd = insn & 7;
8211 rn = (insn >> 3) & 7;
b0109805 8212 addr = load_reg(s, rn);
99c475ab 8213 val = (insn >> 6) & 0x1f;
b0109805 8214 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8215
8216 if (insn & (1 << 11)) {
8217 /* load */
b0109805
PB
8218 tmp = gen_ld8u(addr, IS_USER(s));
8219 store_reg(s, rd, tmp);
99c475ab
FB
8220 } else {
8221 /* store */
b0109805
PB
8222 tmp = load_reg(s, rd);
8223 gen_st8(tmp, addr, IS_USER(s));
99c475ab 8224 }
b0109805 8225 dead_tmp(addr);
99c475ab
FB
8226 break;
8227
8228 case 8:
8229 /* load/store halfword immediate offset */
8230 rd = insn & 7;
8231 rn = (insn >> 3) & 7;
b0109805 8232 addr = load_reg(s, rn);
99c475ab 8233 val = (insn >> 5) & 0x3e;
b0109805 8234 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8235
8236 if (insn & (1 << 11)) {
8237 /* load */
b0109805
PB
8238 tmp = gen_ld16u(addr, IS_USER(s));
8239 store_reg(s, rd, tmp);
99c475ab
FB
8240 } else {
8241 /* store */
b0109805
PB
8242 tmp = load_reg(s, rd);
8243 gen_st16(tmp, addr, IS_USER(s));
99c475ab 8244 }
b0109805 8245 dead_tmp(addr);
99c475ab
FB
8246 break;
8247
8248 case 9:
8249 /* load/store from stack */
8250 rd = (insn >> 8) & 7;
b0109805 8251 addr = load_reg(s, 13);
99c475ab 8252 val = (insn & 0xff) * 4;
b0109805 8253 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
8254
8255 if (insn & (1 << 11)) {
8256 /* load */
b0109805
PB
8257 tmp = gen_ld32(addr, IS_USER(s));
8258 store_reg(s, rd, tmp);
99c475ab
FB
8259 } else {
8260 /* store */
b0109805
PB
8261 tmp = load_reg(s, rd);
8262 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8263 }
b0109805 8264 dead_tmp(addr);
99c475ab
FB
8265 break;
8266
8267 case 10:
8268 /* add to high reg */
8269 rd = (insn >> 8) & 7;
5899f386
FB
8270 if (insn & (1 << 11)) {
8271 /* SP */
5e3f878a 8272 tmp = load_reg(s, 13);
5899f386
FB
8273 } else {
8274 /* PC. bit 1 is ignored. */
5e3f878a
PB
8275 tmp = new_tmp();
8276 tcg_gen_movi_i32(tmp, (s->pc + 2) & ~(uint32_t)2);
5899f386 8277 }
99c475ab 8278 val = (insn & 0xff) * 4;
5e3f878a
PB
8279 tcg_gen_addi_i32(tmp, tmp, val);
8280 store_reg(s, rd, tmp);
99c475ab
FB
8281 break;
8282
8283 case 11:
8284 /* misc */
8285 op = (insn >> 8) & 0xf;
8286 switch (op) {
8287 case 0:
8288 /* adjust stack pointer */
b26eefb6 8289 tmp = load_reg(s, 13);
99c475ab
FB
8290 val = (insn & 0x7f) * 4;
8291 if (insn & (1 << 7))
6a0d8a1d 8292 val = -(int32_t)val;
b26eefb6
PB
8293 tcg_gen_addi_i32(tmp, tmp, val);
8294 store_reg(s, 13, tmp);
99c475ab
FB
8295 break;
8296
9ee6e8bb
PB
8297 case 2: /* sign/zero extend. */
8298 ARCH(6);
8299 rd = insn & 7;
8300 rm = (insn >> 3) & 7;
b0109805 8301 tmp = load_reg(s, rm);
9ee6e8bb 8302 switch ((insn >> 6) & 3) {
b0109805
PB
8303 case 0: gen_sxth(tmp); break;
8304 case 1: gen_sxtb(tmp); break;
8305 case 2: gen_uxth(tmp); break;
8306 case 3: gen_uxtb(tmp); break;
9ee6e8bb 8307 }
b0109805 8308 store_reg(s, rd, tmp);
9ee6e8bb 8309 break;
99c475ab
FB
8310 case 4: case 5: case 0xc: case 0xd:
8311 /* push/pop */
b0109805 8312 addr = load_reg(s, 13);
5899f386
FB
8313 if (insn & (1 << 8))
8314 offset = 4;
99c475ab 8315 else
5899f386
FB
8316 offset = 0;
8317 for (i = 0; i < 8; i++) {
8318 if (insn & (1 << i))
8319 offset += 4;
8320 }
8321 if ((insn & (1 << 11)) == 0) {
b0109805 8322 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8323 }
99c475ab
FB
8324 for (i = 0; i < 8; i++) {
8325 if (insn & (1 << i)) {
8326 if (insn & (1 << 11)) {
8327 /* pop */
b0109805
PB
8328 tmp = gen_ld32(addr, IS_USER(s));
8329 store_reg(s, i, tmp);
99c475ab
FB
8330 } else {
8331 /* push */
b0109805
PB
8332 tmp = load_reg(s, i);
8333 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8334 }
5899f386 8335 /* advance to the next address. */
b0109805 8336 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8337 }
8338 }
8339 if (insn & (1 << 8)) {
8340 if (insn & (1 << 11)) {
8341 /* pop pc */
b0109805 8342 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
8343 /* don't set the pc until the rest of the instruction
8344 has completed */
8345 } else {
8346 /* push lr */
b0109805
PB
8347 tmp = load_reg(s, 14);
8348 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8349 }
b0109805 8350 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 8351 }
5899f386 8352 if ((insn & (1 << 11)) == 0) {
b0109805 8353 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 8354 }
99c475ab 8355 /* write back the new stack pointer */
b0109805 8356 store_reg(s, 13, addr);
99c475ab
FB
8357 /* set the new PC value */
8358 if ((insn & 0x0900) == 0x0900)
b0109805 8359 gen_bx(s, tmp);
99c475ab
FB
8360 break;
8361
9ee6e8bb
PB
8362 case 1: case 3: case 9: case 11: /* czb */
8363 rm = insn & 7;
d9ba4830 8364 tmp = load_reg(s, rm);
9ee6e8bb
PB
8365 s->condlabel = gen_new_label();
8366 s->condjmp = 1;
8367 if (insn & (1 << 11))
cb63669a 8368 tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, s->condlabel);
9ee6e8bb 8369 else
cb63669a 8370 tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, s->condlabel);
d9ba4830 8371 dead_tmp(tmp);
9ee6e8bb
PB
8372 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
8373 val = (uint32_t)s->pc + 2;
8374 val += offset;
8375 gen_jmp(s, val);
8376 break;
8377
8378 case 15: /* IT, nop-hint. */
8379 if ((insn & 0xf) == 0) {
8380 gen_nop_hint(s, (insn >> 4) & 0xf);
8381 break;
8382 }
8383 /* If Then. */
8384 s->condexec_cond = (insn >> 4) & 0xe;
8385 s->condexec_mask = insn & 0x1f;
8386 /* No actual code generated for this insn, just setup state. */
8387 break;
8388
06c949e6 8389 case 0xe: /* bkpt */
9ee6e8bb 8390 gen_set_condexec(s);
5e3f878a 8391 gen_set_pc_im(s->pc - 2);
d9ba4830 8392 gen_exception(EXCP_BKPT);
06c949e6
PB
8393 s->is_jmp = DISAS_JUMP;
8394 break;
8395
9ee6e8bb
PB
8396 case 0xa: /* rev */
8397 ARCH(6);
8398 rn = (insn >> 3) & 0x7;
8399 rd = insn & 0x7;
b0109805 8400 tmp = load_reg(s, rn);
9ee6e8bb 8401 switch ((insn >> 6) & 3) {
b0109805
PB
8402 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8403 case 1: gen_rev16(tmp); break;
8404 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8405 default: goto illegal_op;
8406 }
b0109805 8407 store_reg(s, rd, tmp);
9ee6e8bb
PB
8408 break;
8409
8410 case 6: /* cps */
8411 ARCH(6);
8412 if (IS_USER(s))
8413 break;
8414 if (IS_M(env)) {
8984bd2e 8415 tmp = tcg_const_i32((insn & (1 << 4)) != 0);
9ee6e8bb 8416 /* PRIMASK */
8984bd2e
PB
8417 if (insn & 1) {
8418 addr = tcg_const_i32(16);
8419 gen_helper_v7m_msr(cpu_env, addr, tmp);
8420 }
9ee6e8bb 8421 /* FAULTMASK */
8984bd2e
PB
8422 if (insn & 2) {
8423 addr = tcg_const_i32(17);
8424 gen_helper_v7m_msr(cpu_env, addr, tmp);
8425 }
9ee6e8bb
PB
8426 gen_lookup_tb(s);
8427 } else {
8428 if (insn & (1 << 4))
8429 shift = CPSR_A | CPSR_I | CPSR_F;
8430 else
8431 shift = 0;
8432
8433 val = ((insn & 7) << 6) & shift;
8434 gen_op_movl_T0_im(val);
8435 gen_set_psr_T0(s, shift, 0);
8436 }
8437 break;
8438
99c475ab
FB
8439 default:
8440 goto undef;
8441 }
8442 break;
8443
8444 case 12:
8445 /* load/store multiple */
8446 rn = (insn >> 8) & 0x7;
b0109805 8447 addr = load_reg(s, rn);
99c475ab
FB
8448 for (i = 0; i < 8; i++) {
8449 if (insn & (1 << i)) {
99c475ab
FB
8450 if (insn & (1 << 11)) {
8451 /* load */
b0109805
PB
8452 tmp = gen_ld32(addr, IS_USER(s));
8453 store_reg(s, i, tmp);
99c475ab
FB
8454 } else {
8455 /* store */
b0109805
PB
8456 tmp = load_reg(s, i);
8457 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8458 }
5899f386 8459 /* advance to the next address */
b0109805 8460 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8461 }
8462 }
5899f386 8463 /* Base register writeback. */
b0109805
PB
8464 if ((insn & (1 << rn)) == 0) {
8465 store_reg(s, rn, addr);
8466 } else {
8467 dead_tmp(addr);
8468 }
99c475ab
FB
8469 break;
8470
8471 case 13:
8472 /* conditional branch or swi */
8473 cond = (insn >> 8) & 0xf;
8474 if (cond == 0xe)
8475 goto undef;
8476
8477 if (cond == 0xf) {
8478 /* swi */
9ee6e8bb 8479 gen_set_condexec(s);
422ebf69 8480 gen_set_pc_im(s->pc);
9ee6e8bb 8481 s->is_jmp = DISAS_SWI;
99c475ab
FB
8482 break;
8483 }
8484 /* generate a conditional jump to next instruction */
e50e6a20 8485 s->condlabel = gen_new_label();
d9ba4830 8486 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8487 s->condjmp = 1;
99c475ab
FB
8488 gen_movl_T1_reg(s, 15);
8489
8490 /* jump to the offset */
5899f386 8491 val = (uint32_t)s->pc + 2;
99c475ab 8492 offset = ((int32_t)insn << 24) >> 24;
5899f386 8493 val += offset << 1;
8aaca4c0 8494 gen_jmp(s, val);
99c475ab
FB
8495 break;
8496
8497 case 14:
358bf29e 8498 if (insn & (1 << 11)) {
9ee6e8bb
PB
8499 if (disas_thumb2_insn(env, s, insn))
8500 goto undef32;
358bf29e
PB
8501 break;
8502 }
9ee6e8bb 8503 /* unconditional branch */
99c475ab
FB
8504 val = (uint32_t)s->pc;
8505 offset = ((int32_t)insn << 21) >> 21;
8506 val += (offset << 1) + 2;
8aaca4c0 8507 gen_jmp(s, val);
99c475ab
FB
8508 break;
8509
8510 case 15:
9ee6e8bb 8511 if (disas_thumb2_insn(env, s, insn))
6a0d8a1d 8512 goto undef32;
9ee6e8bb 8513 break;
99c475ab
FB
8514 }
8515 return;
9ee6e8bb
PB
8516undef32:
8517 gen_set_condexec(s);
5e3f878a 8518 gen_set_pc_im(s->pc - 4);
d9ba4830 8519 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8520 s->is_jmp = DISAS_JUMP;
8521 return;
8522illegal_op:
99c475ab 8523undef:
9ee6e8bb 8524 gen_set_condexec(s);
5e3f878a 8525 gen_set_pc_im(s->pc - 2);
d9ba4830 8526 gen_exception(EXCP_UDEF);
99c475ab
FB
8527 s->is_jmp = DISAS_JUMP;
8528}
8529
2c0262af
FB
8530/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8531 basic block 'tb'. If search_pc is TRUE, also generate PC
8532 information for each intermediate instruction. */
5fafdf24
TS
8533static inline int gen_intermediate_code_internal(CPUState *env,
8534 TranslationBlock *tb,
2c0262af
FB
8535 int search_pc)
8536{
8537 DisasContext dc1, *dc = &dc1;
8538 uint16_t *gen_opc_end;
8539 int j, lj;
0fa85d43 8540 target_ulong pc_start;
b5ff1b31 8541 uint32_t next_page_start;
3b46e624 8542
2c0262af 8543 /* generate intermediate code */
b26eefb6
PB
8544 num_temps = 0;
8545 memset(temps, 0, sizeof(temps));
8546
0fa85d43 8547 pc_start = tb->pc;
3b46e624 8548
2c0262af
FB
8549 dc->tb = tb;
8550
2c0262af 8551 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8552
8553 dc->is_jmp = DISAS_NEXT;
8554 dc->pc = pc_start;
8aaca4c0 8555 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8556 dc->condjmp = 0;
5899f386 8557 dc->thumb = env->thumb;
9ee6e8bb
PB
8558 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8559 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8560 dc->is_mem = 0;
b5ff1b31 8561#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8562 if (IS_M(env)) {
8563 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8564 } else {
8565 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8566 }
b5ff1b31 8567#endif
4373f3ce
PB
8568 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8569 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8570 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8571 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
ad69471c
PB
8572 cpu_V0 = cpu_F0d;
8573 cpu_V1 = cpu_F1d;
e677137d
PB
8574 /* FIXME: cpu_M0 can probably be the same as cpu_V0. */
8575 cpu_M0 = tcg_temp_new(TCG_TYPE_I64);
b5ff1b31 8576 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8577 lj = -1;
9ee6e8bb
PB
8578 /* Reset the conditional execution bits immediately. This avoids
8579 complications trying to do it at the end of the block. */
8580 if (env->condexec_bits)
8f01245e
PB
8581 {
8582 TCGv tmp = new_tmp();
8583 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8584 store_cpu_field(tmp, condexec_bits);
8f01245e 8585 }
2c0262af 8586 do {
fbb4a2e3
PB
8587#ifdef CONFIG_USER_ONLY
8588 /* Intercept jump to the magic kernel page. */
8589 if (dc->pc >= 0xffff0000) {
8590 /* We always get here via a jump, so know we are not in a
8591 conditional execution block. */
8592 gen_exception(EXCP_KERNEL_TRAP);
8593 dc->is_jmp = DISAS_UPDATE;
8594 break;
8595 }
8596#else
9ee6e8bb
PB
8597 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8598 /* We always get here via a jump, so know we are not in a
8599 conditional execution block. */
d9ba4830 8600 gen_exception(EXCP_EXCEPTION_EXIT);
9ee6e8bb
PB
8601 }
8602#endif
8603
1fddef4b
FB
8604 if (env->nb_breakpoints > 0) {
8605 for(j = 0; j < env->nb_breakpoints; j++) {
8606 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 8607 gen_set_condexec(dc);
5e3f878a 8608 gen_set_pc_im(dc->pc);
d9ba4830 8609 gen_exception(EXCP_DEBUG);
1fddef4b 8610 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8611 /* Advance PC so that clearing the breakpoint will
8612 invalidate this TB. */
8613 dc->pc += 2;
8614 goto done_generating;
1fddef4b
FB
8615 break;
8616 }
8617 }
8618 }
2c0262af
FB
8619 if (search_pc) {
8620 j = gen_opc_ptr - gen_opc_buf;
8621 if (lj < j) {
8622 lj++;
8623 while (lj < j)
8624 gen_opc_instr_start[lj++] = 0;
8625 }
0fa85d43 8626 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
8627 gen_opc_instr_start[lj] = 1;
8628 }
e50e6a20 8629
9ee6e8bb
PB
8630 if (env->thumb) {
8631 disas_thumb_insn(env, dc);
8632 if (dc->condexec_mask) {
8633 dc->condexec_cond = (dc->condexec_cond & 0xe)
8634 | ((dc->condexec_mask >> 4) & 1);
8635 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8636 if (dc->condexec_mask == 0) {
8637 dc->condexec_cond = 0;
8638 }
8639 }
8640 } else {
8641 disas_arm_insn(env, dc);
8642 }
b26eefb6
PB
8643 if (num_temps) {
8644 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8645 num_temps = 0;
8646 }
e50e6a20
FB
8647
8648 if (dc->condjmp && !dc->is_jmp) {
8649 gen_set_label(dc->condlabel);
8650 dc->condjmp = 0;
8651 }
6658ffb8
PB
8652 /* Terminate the TB on memory ops if watchpoints are present. */
8653 /* FIXME: This should be replacd by the deterministic execution
8654 * IRQ raising bits. */
8655 if (dc->is_mem && env->nb_watchpoints)
8656 break;
8657
e50e6a20
FB
8658 /* Translation stops when a conditional branch is enoutered.
8659 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
8660 * Also stop translation when a page boundary is reached. This
8661 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
8662 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8663 !env->singlestep_enabled &&
b5ff1b31 8664 dc->pc < next_page_start);
9ee6e8bb 8665
b5ff1b31 8666 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8667 instruction was a conditional branch or trap, and the PC has
8668 already been written. */
8aaca4c0
FB
8669 if (__builtin_expect(env->singlestep_enabled, 0)) {
8670 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8671 if (dc->condjmp) {
9ee6e8bb
PB
8672 gen_set_condexec(dc);
8673 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8674 gen_exception(EXCP_SWI);
9ee6e8bb 8675 } else {
d9ba4830 8676 gen_exception(EXCP_DEBUG);
9ee6e8bb 8677 }
e50e6a20
FB
8678 gen_set_label(dc->condlabel);
8679 }
8680 if (dc->condjmp || !dc->is_jmp) {
5e3f878a 8681 gen_set_pc_im(dc->pc);
e50e6a20 8682 dc->condjmp = 0;
8aaca4c0 8683 }
9ee6e8bb
PB
8684 gen_set_condexec(dc);
8685 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8686 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8687 } else {
8688 /* FIXME: Single stepping a WFI insn will not halt
8689 the CPU. */
d9ba4830 8690 gen_exception(EXCP_DEBUG);
9ee6e8bb 8691 }
8aaca4c0 8692 } else {
9ee6e8bb
PB
8693 /* While branches must always occur at the end of an IT block,
8694 there are a few other things that can cause us to terminate
8695 the TB in the middel of an IT block:
8696 - Exception generating instructions (bkpt, swi, undefined).
8697 - Page boundaries.
8698 - Hardware watchpoints.
8699 Hardware breakpoints have already been handled and skip this code.
8700 */
8701 gen_set_condexec(dc);
8aaca4c0 8702 switch(dc->is_jmp) {
8aaca4c0 8703 case DISAS_NEXT:
6e256c93 8704 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8705 break;
8706 default:
8707 case DISAS_JUMP:
8708 case DISAS_UPDATE:
8709 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8710 tcg_gen_exit_tb(0);
8aaca4c0
FB
8711 break;
8712 case DISAS_TB_JUMP:
8713 /* nothing more to generate */
8714 break;
9ee6e8bb 8715 case DISAS_WFI:
d9ba4830 8716 gen_helper_wfi();
9ee6e8bb
PB
8717 break;
8718 case DISAS_SWI:
d9ba4830 8719 gen_exception(EXCP_SWI);
9ee6e8bb 8720 break;
8aaca4c0 8721 }
e50e6a20
FB
8722 if (dc->condjmp) {
8723 gen_set_label(dc->condlabel);
9ee6e8bb 8724 gen_set_condexec(dc);
6e256c93 8725 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8726 dc->condjmp = 0;
8727 }
2c0262af 8728 }
9ee6e8bb 8729done_generating:
2c0262af
FB
8730 *gen_opc_ptr = INDEX_op_end;
8731
8732#ifdef DEBUG_DISAS
e19e89a5 8733 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8734 fprintf(logfile, "----------------\n");
8735 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8736 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8737 fprintf(logfile, "\n");
8738 }
8739#endif
b5ff1b31
FB
8740 if (search_pc) {
8741 j = gen_opc_ptr - gen_opc_buf;
8742 lj++;
8743 while (lj <= j)
8744 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8745 } else {
2c0262af 8746 tb->size = dc->pc - pc_start;
b5ff1b31 8747 }
2c0262af
FB
8748 return 0;
8749}
8750
8751int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8752{
8753 return gen_intermediate_code_internal(env, tb, 0);
8754}
8755
8756int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8757{
8758 return gen_intermediate_code_internal(env, tb, 1);
8759}
8760
b5ff1b31
FB
8761static const char *cpu_mode_names[16] = {
8762 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8763 "???", "???", "???", "und", "???", "???", "???", "sys"
8764};
9ee6e8bb 8765
5fafdf24 8766void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8767 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8768 int flags)
2c0262af
FB
8769{
8770 int i;
bc380d17 8771 union {
b7bcbe95
FB
8772 uint32_t i;
8773 float s;
8774 } s0, s1;
8775 CPU_DoubleU d;
a94a6abf
PB
8776 /* ??? This assumes float64 and double have the same layout.
8777 Oh well, it's only debug dumps. */
8778 union {
8779 float64 f64;
8780 double d;
8781 } d0;
b5ff1b31 8782 uint32_t psr;
2c0262af
FB
8783
8784 for(i=0;i<16;i++) {
7fe48483 8785 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8786 if ((i % 4) == 3)
7fe48483 8787 cpu_fprintf(f, "\n");
2c0262af 8788 else
7fe48483 8789 cpu_fprintf(f, " ");
2c0262af 8790 }
b5ff1b31 8791 psr = cpsr_read(env);
687fa640
TS
8792 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8793 psr,
b5ff1b31
FB
8794 psr & (1 << 31) ? 'N' : '-',
8795 psr & (1 << 30) ? 'Z' : '-',
8796 psr & (1 << 29) ? 'C' : '-',
8797 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8798 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8799 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95 8800
5e3f878a 8801#if 0
b7bcbe95 8802 for (i = 0; i < 16; i++) {
8e96005d
FB
8803 d.d = env->vfp.regs[i];
8804 s0.i = d.l.lower;
8805 s1.i = d.l.upper;
a94a6abf
PB
8806 d0.f64 = d.d;
8807 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8808 i * 2, (int)s0.i, s0.s,
a94a6abf 8809 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8810 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8811 d0.d);
b7bcbe95 8812 }
40f137e1 8813 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
5e3f878a 8814#endif
2c0262af 8815}
a6b025d3 8816
d2856f1a
AJ
8817void gen_pc_load(CPUState *env, TranslationBlock *tb,
8818 unsigned long searched_pc, int pc_pos, void *puc)
8819{
8820 env->regs[15] = gen_opc_pc[pc_pos];
8821}