]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
ARM TCG conversion 10/16.
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6
PB
79static TCGv cpu_env;
80/* FIXME: These should be removed. */
81static TCGv cpu_T[3];
4373f3ce 82static TCGv cpu_F0s, cpu_F1s, cpu_F0d, cpu_F1d;
b26eefb6
PB
83
84/* initialize TCG globals. */
85void arm_translate_init(void)
86{
87 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
88
89 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
90 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
91 cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2");
92}
93
94/* The code generator doesn't like lots of temporaries, so maintain our own
95 cache for reuse within a function. */
96#define MAX_TEMPS 8
97static int num_temps;
98static TCGv temps[MAX_TEMPS];
99
100/* Allocate a temporary variable. */
101static TCGv new_tmp(void)
102{
103 TCGv tmp;
104 if (num_temps == MAX_TEMPS)
105 abort();
106
107 if (GET_TCGV(temps[num_temps]))
108 return temps[num_temps++];
109
110 tmp = tcg_temp_new(TCG_TYPE_I32);
111 temps[num_temps++] = tmp;
112 return tmp;
113}
114
115/* Release a temporary variable. */
116static void dead_tmp(TCGv tmp)
117{
118 int i;
119 num_temps--;
120 i = num_temps;
121 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
122 return;
123
124 /* Shuffle this temp to the last slot. */
125 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
126 i--;
127 while (i < num_temps) {
128 temps[i] = temps[i + 1];
129 i++;
130 }
131 temps[i] = tmp;
132}
133
d9ba4830
PB
134static inline TCGv load_cpu_offset(int offset)
135{
136 TCGv tmp = new_tmp();
137 tcg_gen_ld_i32(tmp, cpu_env, offset);
138 return tmp;
139}
140
141#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
142
143static inline void store_cpu_offset(TCGv var, int offset)
144{
145 tcg_gen_st_i32(var, cpu_env, offset);
146 dead_tmp(var);
147}
148
149#define store_cpu_field(var, name) \
150 store_cpu_offset(var, offsetof(CPUState, name))
151
b26eefb6
PB
152/* Set a variable to the value of a CPU register. */
153static void load_reg_var(DisasContext *s, TCGv var, int reg)
154{
155 if (reg == 15) {
156 uint32_t addr;
157 /* normaly, since we updated PC, we need only to add one insn */
158 if (s->thumb)
159 addr = (long)s->pc + 2;
160 else
161 addr = (long)s->pc + 4;
162 tcg_gen_movi_i32(var, addr);
163 } else {
164 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
165 }
166}
167
168/* Create a new temporary and set it to the value of a CPU register. */
169static inline TCGv load_reg(DisasContext *s, int reg)
170{
171 TCGv tmp = new_tmp();
172 load_reg_var(s, tmp, reg);
173 return tmp;
174}
175
176/* Set a CPU register. The source must be a temporary and will be
177 marked as dead. */
178static void store_reg(DisasContext *s, int reg, TCGv var)
179{
180 if (reg == 15) {
181 tcg_gen_andi_i32(var, var, ~1);
182 s->is_jmp = DISAS_JUMP;
183 }
184 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
185 dead_tmp(var);
186}
187
188
189/* Basic operations. */
190#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
191#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2])
192#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
193#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2])
194#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0])
195#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
196#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
197#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im)
198
199#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
200#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
202#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
203
204#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
207#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
208#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
209#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
210#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
211
212#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
213#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
214#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
215#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
216#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
217
218/* Value extensions. */
219#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
220#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
221#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
222#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
223
1497c961
PB
224#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
225#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
226
227#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 228
1497c961
PB
229#define gen_op_addl_T0_T1_setq() \
230 gen_helper_add_setq(cpu_T[0], cpu_T[0], cpu_T[1])
231#define gen_op_addl_T0_T1_saturate() \
232 gen_helper_add_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
233#define gen_op_subl_T0_T1_saturate() \
234 gen_helper_sub_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
235#define gen_op_addl_T0_T1_usaturate() \
236 gen_helper_add_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
237#define gen_op_subl_T0_T1_usaturate() \
238 gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
f51bbbfe 239
3670669c
PB
240/* Copy the most significant bit of T0 to all bits of T1. */
241#define gen_op_signbit_T1_T0() tcg_gen_sari_i32(cpu_T[1], cpu_T[0], 31)
242
d9ba4830
PB
243#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
244/* Set NZCV flags from the high 4 bits of var. */
245#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
246
247static void gen_exception(int excp)
248{
249 TCGv tmp = new_tmp();
250 tcg_gen_movi_i32(tmp, excp);
251 gen_helper_exception(tmp);
252 dead_tmp(tmp);
253}
254
3670669c
PB
255static void gen_smul_dual(TCGv a, TCGv b)
256{
257 TCGv tmp1 = new_tmp();
258 TCGv tmp2 = new_tmp();
3670669c
PB
259 tcg_gen_ext8s_i32(tmp1, a);
260 tcg_gen_ext8s_i32(tmp2, b);
261 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
262 dead_tmp(tmp2);
263 tcg_gen_sari_i32(a, a, 16);
264 tcg_gen_sari_i32(b, b, 16);
265 tcg_gen_mul_i32(b, b, a);
266 tcg_gen_mov_i32(a, tmp1);
267 dead_tmp(tmp1);
268}
269
270/* Byteswap each halfword. */
271static void gen_rev16(TCGv var)
272{
273 TCGv tmp = new_tmp();
274 tcg_gen_shri_i32(tmp, var, 8);
275 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
276 tcg_gen_shli_i32(var, var, 8);
277 tcg_gen_andi_i32(var, var, 0xff00ff00);
278 tcg_gen_or_i32(var, var, tmp);
279 dead_tmp(tmp);
280}
281
282/* Byteswap low halfword and sign extend. */
283static void gen_revsh(TCGv var)
284{
285 TCGv tmp = new_tmp();
286 tcg_gen_shri_i32(tmp, var, 8);
287 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
288 tcg_gen_shli_i32(var, var, 8);
289 tcg_gen_ext8s_i32(var, var);
290 tcg_gen_or_i32(var, var, tmp);
291 dead_tmp(tmp);
292}
293
294/* Unsigned bitfield extract. */
295static void gen_ubfx(TCGv var, int shift, uint32_t mask)
296{
297 if (shift)
298 tcg_gen_shri_i32(var, var, shift);
299 tcg_gen_andi_i32(var, var, mask);
300}
301
302/* Signed bitfield extract. */
303static void gen_sbfx(TCGv var, int shift, int width)
304{
305 uint32_t signbit;
306
307 if (shift)
308 tcg_gen_sari_i32(var, var, shift);
309 if (shift + width < 32) {
310 signbit = 1u << (width - 1);
311 tcg_gen_andi_i32(var, var, (1u << width) - 1);
312 tcg_gen_xori_i32(var, var, signbit);
313 tcg_gen_subi_i32(var, var, signbit);
314 }
315}
316
317/* Bitfield insertion. Insert val into base. Clobbers base and val. */
318static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
319{
320 tcg_gen_shli_i32(val, val, shift);
321 tcg_gen_andi_i32(val, val, mask);
322 tcg_gen_andi_i32(base, base, ~mask);
323 tcg_gen_or_i32(dest, base, val);
324}
325
d9ba4830
PB
326/* Round the top 32 bits of a 64-bit value. */
327static void gen_roundqd(TCGv a, TCGv b)
3670669c 328{
d9ba4830
PB
329 tcg_gen_shri_i32(a, a, 31);
330 tcg_gen_add_i32(a, a, b);
3670669c
PB
331}
332
8f01245e
PB
333/* FIXME: Most targets have native widening multiplication.
334 It would be good to use that instead of a full wide multiply. */
335/* Unsigned 32x32->64 multiply. */
336static void gen_op_mull_T0_T1(void)
337{
338 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
339 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
340
341 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
342 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
343 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
344 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
345 tcg_gen_shri_i64(tmp1, tmp1, 32);
346 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
347}
348
349/* Signed 32x32->64 multiply. */
d9ba4830 350static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
351{
352 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
353 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
354
d9ba4830
PB
355 tcg_gen_ext_i32_i64(tmp1, a);
356 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 357 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 358 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 359 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
360 tcg_gen_trunc_i64_i32(b, tmp1);
361}
362#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
363
364/* Signed 32x16 multiply, top 32 bits. */
365static void gen_imulw(TCGv a, TCGv b)
366{
367 gen_imull(a, b);
368 tcg_gen_shri_i32(a, a, 16);
369 tcg_gen_shli_i32(b, b, 16);
370 tcg_gen_or_i32(a, a, b);
8f01245e
PB
371}
372
373/* Swap low and high halfwords. */
374static void gen_swap_half(TCGv var)
375{
376 TCGv tmp = new_tmp();
377 tcg_gen_shri_i32(tmp, var, 16);
378 tcg_gen_shli_i32(var, var, 16);
379 tcg_gen_or_i32(var, var, tmp);
3670669c 380 dead_tmp(tmp);
8f01245e
PB
381}
382
b26eefb6
PB
383/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
384 tmp = (t0 ^ t1) & 0x8000;
385 t0 &= ~0x8000;
386 t1 &= ~0x8000;
387 t0 = (t0 + t1) ^ tmp;
388 */
389
390static void gen_add16(TCGv t0, TCGv t1)
391{
392 TCGv tmp = new_tmp();
393 tcg_gen_xor_i32(tmp, t0, t1);
394 tcg_gen_andi_i32(tmp, tmp, 0x8000);
395 tcg_gen_andi_i32(t0, t0, ~0x8000);
396 tcg_gen_andi_i32(t1, t1, ~0x8000);
397 tcg_gen_add_i32(t0, t0, t1);
398 tcg_gen_xor_i32(t0, t0, tmp);
399 dead_tmp(tmp);
400 dead_tmp(t1);
401}
402
9a119ff6
PB
403#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
404
b26eefb6
PB
405/* Set CF to the top bit of var. */
406static void gen_set_CF_bit31(TCGv var)
407{
408 TCGv tmp = new_tmp();
409 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 410 gen_set_CF(var);
b26eefb6
PB
411 dead_tmp(tmp);
412}
413
414/* Set N and Z flags from var. */
415static inline void gen_logic_CC(TCGv var)
416{
417 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF));
418}
419
420/* T0 += T1 + CF. */
421static void gen_adc_T0_T1(void)
422{
d9ba4830 423 TCGv tmp;
b26eefb6 424 gen_op_addl_T0_T1();
d9ba4830 425 tmp = load_cpu_field(CF);
b26eefb6
PB
426 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
427 dead_tmp(tmp);
428}
429
3670669c
PB
430/* dest = T0 - T1 + CF - 1. */
431static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
432{
d9ba4830 433 TCGv tmp;
3670669c 434 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 435 tmp = load_cpu_field(CF);
3670669c
PB
436 tcg_gen_add_i32(dest, dest, tmp);
437 tcg_gen_subi_i32(dest, dest, 1);
438 dead_tmp(tmp);
439}
440
441#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
442#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
443
b26eefb6
PB
444/* FIXME: Implement this natively. */
445static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
446{
447 tcg_gen_xori_i32(t0, t1, ~0);
448}
449
450/* T0 &= ~T1. Clobbers T1. */
451/* FIXME: Implement bic natively. */
452static inline void gen_op_bicl_T0_T1(void)
453{
454 gen_op_notl_T1();
455 gen_op_andl_T0_T1();
456}
457
458/* FIXME: Implement this natively. */
459static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
460{
461 TCGv tmp;
462
463 if (i == 0)
464 return;
465
466 tmp = new_tmp();
467 tcg_gen_shri_i32(tmp, t1, i);
468 tcg_gen_shli_i32(t1, t1, 32 - i);
469 tcg_gen_or_i32(t0, t1, tmp);
470 dead_tmp(tmp);
471}
472
9a119ff6 473static void shifter_out_im(TCGv var, int shift)
b26eefb6 474{
9a119ff6
PB
475 TCGv tmp = new_tmp();
476 if (shift == 0) {
477 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 478 } else {
9a119ff6
PB
479 tcg_gen_shri_i32(tmp, var, shift);
480 if (shift != 31);
481 tcg_gen_andi_i32(tmp, tmp, 1);
482 }
483 gen_set_CF(tmp);
484 dead_tmp(tmp);
485}
b26eefb6 486
9a119ff6
PB
487/* Shift by immediate. Includes special handling for shift == 0. */
488static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
489{
490 switch (shiftop) {
491 case 0: /* LSL */
492 if (shift != 0) {
493 if (flags)
494 shifter_out_im(var, 32 - shift);
495 tcg_gen_shli_i32(var, var, shift);
496 }
497 break;
498 case 1: /* LSR */
499 if (shift == 0) {
500 if (flags) {
501 tcg_gen_shri_i32(var, var, 31);
502 gen_set_CF(var);
503 }
504 tcg_gen_movi_i32(var, 0);
505 } else {
506 if (flags)
507 shifter_out_im(var, shift - 1);
508 tcg_gen_shri_i32(var, var, shift);
509 }
510 break;
511 case 2: /* ASR */
512 if (shift == 0)
513 shift = 32;
514 if (flags)
515 shifter_out_im(var, shift - 1);
516 if (shift == 32)
517 shift = 31;
518 tcg_gen_sari_i32(var, var, shift);
519 break;
520 case 3: /* ROR/RRX */
521 if (shift != 0) {
522 if (flags)
523 shifter_out_im(var, shift - 1);
524 tcg_gen_rori_i32(var, var, shift); break;
525 } else {
d9ba4830 526 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
527 if (flags)
528 shifter_out_im(var, 0);
529 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
530 tcg_gen_shli_i32(tmp, tmp, 31);
531 tcg_gen_or_i32(var, var, tmp);
532 dead_tmp(tmp);
b26eefb6
PB
533 }
534 }
535};
536
6ddbc6e4
PB
537#define PAS_OP(pfx) \
538 switch (op2) { \
539 case 0: gen_pas_helper(glue(pfx,add16)); break; \
540 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
541 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
542 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
543 case 4: gen_pas_helper(glue(pfx,add8)); break; \
544 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
545 }
d9ba4830 546static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
547{
548 TCGv tmp;
549
550 switch (op1) {
551#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
552 case 1:
553 tmp = tcg_temp_new(TCG_TYPE_PTR);
554 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
555 PAS_OP(s)
556 break;
557 case 5:
558 tmp = tcg_temp_new(TCG_TYPE_PTR);
559 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
560 PAS_OP(u)
561 break;
562#undef gen_pas_helper
563#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
564 case 2:
565 PAS_OP(q);
566 break;
567 case 3:
568 PAS_OP(sh);
569 break;
570 case 6:
571 PAS_OP(uq);
572 break;
573 case 7:
574 PAS_OP(uh);
575 break;
576#undef gen_pas_helper
577 }
578}
9ee6e8bb
PB
579#undef PAS_OP
580
6ddbc6e4
PB
581/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
582#define PAS_OP(pfx) \
583 switch (op2) { \
584 case 0: gen_pas_helper(glue(pfx,add8)); break; \
585 case 1: gen_pas_helper(glue(pfx,add16)); break; \
586 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
587 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
588 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
589 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
590 }
d9ba4830 591static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
592{
593 TCGv tmp;
594
595 switch (op1) {
596#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
597 case 0:
598 tmp = tcg_temp_new(TCG_TYPE_PTR);
599 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
600 PAS_OP(s)
601 break;
602 case 4:
603 tmp = tcg_temp_new(TCG_TYPE_PTR);
604 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
605 PAS_OP(u)
606 break;
607#undef gen_pas_helper
608#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
609 case 1:
610 PAS_OP(q);
611 break;
612 case 2:
613 PAS_OP(sh);
614 break;
615 case 5:
616 PAS_OP(uq);
617 break;
618 case 6:
619 PAS_OP(uh);
620 break;
621#undef gen_pas_helper
622 }
623}
9ee6e8bb
PB
624#undef PAS_OP
625
d9ba4830
PB
626static void gen_test_cc(int cc, int label)
627{
628 TCGv tmp;
629 TCGv tmp2;
630 TCGv zero;
631 int inv;
632
633 zero = tcg_const_i32(0);
634 switch (cc) {
635 case 0: /* eq: Z */
636 tmp = load_cpu_field(NZF);
637 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
638 break;
639 case 1: /* ne: !Z */
640 tmp = load_cpu_field(NZF);
641 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
642 break;
643 case 2: /* cs: C */
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
646 break;
647 case 3: /* cc: !C */
648 tmp = load_cpu_field(CF);
649 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
650 break;
651 case 4: /* mi: N */
652 tmp = load_cpu_field(NZF);
653 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
654 break;
655 case 5: /* pl: !N */
656 tmp = load_cpu_field(NZF);
657 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
658 break;
659 case 6: /* vs: V */
660 tmp = load_cpu_field(VF);
661 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
662 break;
663 case 7: /* vc: !V */
664 tmp = load_cpu_field(VF);
665 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
666 break;
667 case 8: /* hi: C && !Z */
668 inv = gen_new_label();
669 tmp = load_cpu_field(CF);
670 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
671 dead_tmp(tmp);
672 tmp = load_cpu_field(NZF);
673 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
674 gen_set_label(inv);
675 break;
676 case 9: /* ls: !C || Z */
677 tmp = load_cpu_field(CF);
678 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
679 dead_tmp(tmp);
680 tmp = load_cpu_field(NZF);
681 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
682 break;
683 case 10: /* ge: N == V -> N ^ V == 0 */
684 tmp = load_cpu_field(VF);
685 tmp2 = load_cpu_field(NZF);
686 tcg_gen_xor_i32(tmp, tmp, tmp2);
687 dead_tmp(tmp2);
688 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
689 break;
690 case 11: /* lt: N != V -> N ^ V != 0 */
691 tmp = load_cpu_field(VF);
692 tmp2 = load_cpu_field(NZF);
693 tcg_gen_xor_i32(tmp, tmp, tmp2);
694 dead_tmp(tmp2);
695 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
696 break;
697 case 12: /* gt: !Z && N == V */
698 inv = gen_new_label();
699 tmp = load_cpu_field(NZF);
700 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
701 dead_tmp(tmp);
702 tmp = load_cpu_field(VF);
703 tmp2 = load_cpu_field(NZF);
704 tcg_gen_xor_i32(tmp, tmp, tmp2);
705 dead_tmp(tmp2);
706 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
707 gen_set_label(inv);
708 break;
709 case 13: /* le: Z || N != V */
710 tmp = load_cpu_field(NZF);
711 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
712 dead_tmp(tmp);
713 tmp = load_cpu_field(VF);
714 tmp2 = load_cpu_field(NZF);
715 tcg_gen_xor_i32(tmp, tmp, tmp2);
716 dead_tmp(tmp2);
717 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
718 break;
719 default:
720 fprintf(stderr, "Bad condition code 0x%x\n", cc);
721 abort();
722 }
723 dead_tmp(tmp);
724}
2c0262af
FB
725
726const uint8_t table_logic_cc[16] = {
727 1, /* and */
728 1, /* xor */
729 0, /* sub */
730 0, /* rsb */
731 0, /* add */
732 0, /* adc */
733 0, /* sbc */
734 0, /* rsc */
735 1, /* andl */
736 1, /* xorl */
737 0, /* cmp */
738 0, /* cmn */
739 1, /* orr */
740 1, /* mov */
741 1, /* bic */
742 1, /* mvn */
743};
3b46e624 744
2c0262af
FB
745static GenOpFunc *gen_shift_T1_T0[4] = {
746 gen_op_shll_T1_T0,
747 gen_op_shrl_T1_T0,
748 gen_op_sarl_T1_T0,
749 gen_op_rorl_T1_T0,
750};
751
752static GenOpFunc *gen_shift_T1_T0_cc[4] = {
753 gen_op_shll_T1_T0_cc,
754 gen_op_shrl_T1_T0_cc,
755 gen_op_sarl_T1_T0_cc,
756 gen_op_rorl_T1_T0_cc,
757};
758
d9ba4830
PB
759/* Set PC and Thumb state from an immediate address. */
760static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 761{
b26eefb6 762 TCGv tmp;
99c475ab 763
b26eefb6
PB
764 s->is_jmp = DISAS_UPDATE;
765 tmp = new_tmp();
d9ba4830
PB
766 if (s->thumb != (addr & 1)) {
767 tcg_gen_movi_i32(tmp, addr & 1);
768 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
769 }
770 tcg_gen_movi_i32(tmp, addr & ~1);
771 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 772 dead_tmp(tmp);
d9ba4830
PB
773}
774
775/* Set PC and Thumb state from var. var is marked as dead. */
776static inline void gen_bx(DisasContext *s, TCGv var)
777{
778 TCGv tmp;
779
780 s->is_jmp = DISAS_UPDATE;
781 tmp = new_tmp();
782 tcg_gen_andi_i32(tmp, var, 1);
783 store_cpu_field(tmp, thumb);
784 tcg_gen_andi_i32(var, var, ~1);
785 store_cpu_field(var, regs[15]);
786}
787
788/* TODO: This should be removed. Use gen_bx instead. */
789static inline void gen_bx_T0(DisasContext *s)
790{
791 TCGv tmp = new_tmp();
792 tcg_gen_mov_i32(tmp, cpu_T[0]);
793 gen_bx(s, tmp);
b26eefb6 794}
b5ff1b31
FB
795
796#if defined(CONFIG_USER_ONLY)
797#define gen_ldst(name, s) gen_op_##name##_raw()
798#else
799#define gen_ldst(name, s) do { \
6658ffb8 800 s->is_mem = 1; \
b5ff1b31
FB
801 if (IS_USER(s)) \
802 gen_op_##name##_user(); \
803 else \
804 gen_op_##name##_kernel(); \
805 } while (0)
806#endif
b0109805
PB
807static inline TCGv gen_ld8s(TCGv addr, int index)
808{
809 TCGv tmp = new_tmp();
810 tcg_gen_qemu_ld8s(tmp, addr, index);
811 return tmp;
812}
813static inline TCGv gen_ld8u(TCGv addr, int index)
814{
815 TCGv tmp = new_tmp();
816 tcg_gen_qemu_ld8u(tmp, addr, index);
817 return tmp;
818}
819static inline TCGv gen_ld16s(TCGv addr, int index)
820{
821 TCGv tmp = new_tmp();
822 tcg_gen_qemu_ld16s(tmp, addr, index);
823 return tmp;
824}
825static inline TCGv gen_ld16u(TCGv addr, int index)
826{
827 TCGv tmp = new_tmp();
828 tcg_gen_qemu_ld16u(tmp, addr, index);
829 return tmp;
830}
831static inline TCGv gen_ld32(TCGv addr, int index)
832{
833 TCGv tmp = new_tmp();
834 tcg_gen_qemu_ld32u(tmp, addr, index);
835 return tmp;
836}
837static inline void gen_st8(TCGv val, TCGv addr, int index)
838{
839 tcg_gen_qemu_st8(val, addr, index);
840 dead_tmp(val);
841}
842static inline void gen_st16(TCGv val, TCGv addr, int index)
843{
844 tcg_gen_qemu_st16(val, addr, index);
845 dead_tmp(val);
846}
847static inline void gen_st32(TCGv val, TCGv addr, int index)
848{
849 tcg_gen_qemu_st32(val, addr, index);
850 dead_tmp(val);
851}
b5ff1b31 852
2c0262af
FB
853static inline void gen_movl_T0_reg(DisasContext *s, int reg)
854{
b26eefb6 855 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
856}
857
858static inline void gen_movl_T1_reg(DisasContext *s, int reg)
859{
b26eefb6 860 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
861}
862
863static inline void gen_movl_T2_reg(DisasContext *s, int reg)
864{
b26eefb6
PB
865 load_reg_var(s, cpu_T[2], reg);
866}
867
868static inline void gen_set_pc_T0(void)
869{
870 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
2c0262af
FB
871}
872
873static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
874{
b26eefb6
PB
875 TCGv tmp;
876 if (reg == 15) {
877 tmp = new_tmp();
878 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
879 } else {
880 tmp = cpu_T[t];
881 }
882 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 883 if (reg == 15) {
b26eefb6 884 dead_tmp(tmp);
2c0262af
FB
885 s->is_jmp = DISAS_JUMP;
886 }
887}
888
889static inline void gen_movl_reg_T0(DisasContext *s, int reg)
890{
891 gen_movl_reg_TN(s, reg, 0);
892}
893
894static inline void gen_movl_reg_T1(DisasContext *s, int reg)
895{
896 gen_movl_reg_TN(s, reg, 1);
897}
898
b5ff1b31
FB
899/* Force a TB lookup after an instruction that changes the CPU state. */
900static inline void gen_lookup_tb(DisasContext *s)
901{
902 gen_op_movl_T0_im(s->pc);
903 gen_movl_reg_T0(s, 15);
904 s->is_jmp = DISAS_UPDATE;
905}
906
b0109805
PB
907static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
908 TCGv var)
2c0262af 909{
1e8d4eec 910 int val, rm, shift, shiftop;
b26eefb6 911 TCGv offset;
2c0262af
FB
912
913 if (!(insn & (1 << 25))) {
914 /* immediate */
915 val = insn & 0xfff;
916 if (!(insn & (1 << 23)))
917 val = -val;
537730b9 918 if (val != 0)
b0109805 919 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
920 } else {
921 /* shift/register */
922 rm = (insn) & 0xf;
923 shift = (insn >> 7) & 0x1f;
1e8d4eec 924 shiftop = (insn >> 5) & 3;
b26eefb6 925 offset = load_reg(s, rm);
9a119ff6 926 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 927 if (!(insn & (1 << 23)))
b0109805 928 tcg_gen_sub_i32(var, var, offset);
2c0262af 929 else
b0109805 930 tcg_gen_add_i32(var, var, offset);
b26eefb6 931 dead_tmp(offset);
2c0262af
FB
932 }
933}
934
191f9a93 935static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
b0109805 936 int extra, TCGv var)
2c0262af
FB
937{
938 int val, rm;
b26eefb6 939 TCGv offset;
3b46e624 940
2c0262af
FB
941 if (insn & (1 << 22)) {
942 /* immediate */
943 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
944 if (!(insn & (1 << 23)))
945 val = -val;
18acad92 946 val += extra;
537730b9 947 if (val != 0)
b0109805 948 tcg_gen_addi_i32(var, var, val);
2c0262af
FB
949 } else {
950 /* register */
191f9a93 951 if (extra)
b0109805 952 tcg_gen_addi_i32(var, var, extra);
2c0262af 953 rm = (insn) & 0xf;
b26eefb6 954 offset = load_reg(s, rm);
2c0262af 955 if (!(insn & (1 << 23)))
b0109805 956 tcg_gen_sub_i32(var, var, offset);
2c0262af 957 else
b0109805 958 tcg_gen_add_i32(var, var, offset);
b26eefb6 959 dead_tmp(offset);
2c0262af
FB
960 }
961}
962
4373f3ce
PB
963#define VFP_OP2(name) \
964static inline void gen_vfp_##name(int dp) \
965{ \
966 if (dp) \
967 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
968 else \
969 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
b7bcbe95
FB
970}
971
4373f3ce 972#define VFP_OP1i(name) \
9ee6e8bb
PB
973static inline void gen_vfp_##name(int dp, int arg) \
974{ \
975 if (dp) \
976 gen_op_vfp_##name##d(arg); \
977 else \
978 gen_op_vfp_##name##s(arg); \
979}
980
4373f3ce
PB
981VFP_OP2(add)
982VFP_OP2(sub)
983VFP_OP2(mul)
984VFP_OP2(div)
985
986#undef VFP_OP2
987
988static inline void gen_vfp_abs(int dp)
989{
990 if (dp)
991 gen_helper_vfp_absd(cpu_F0d, cpu_F0d);
992 else
993 gen_helper_vfp_abss(cpu_F0s, cpu_F0s);
994}
995
996static inline void gen_vfp_neg(int dp)
997{
998 if (dp)
999 gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
1000 else
1001 gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
1002}
1003
1004static inline void gen_vfp_sqrt(int dp)
1005{
1006 if (dp)
1007 gen_helper_vfp_sqrtd(cpu_F0d, cpu_F0d, cpu_env);
1008 else
1009 gen_helper_vfp_sqrts(cpu_F0s, cpu_F0s, cpu_env);
1010}
1011
1012static inline void gen_vfp_cmp(int dp)
1013{
1014 if (dp)
1015 gen_helper_vfp_cmpd(cpu_F0d, cpu_F1d, cpu_env);
1016 else
1017 gen_helper_vfp_cmps(cpu_F0s, cpu_F1s, cpu_env);
1018}
1019
1020static inline void gen_vfp_cmpe(int dp)
1021{
1022 if (dp)
1023 gen_helper_vfp_cmped(cpu_F0d, cpu_F1d, cpu_env);
1024 else
1025 gen_helper_vfp_cmpes(cpu_F0s, cpu_F1s, cpu_env);
1026}
1027
1028static inline void gen_vfp_F1_ld0(int dp)
1029{
1030 if (dp)
1031 tcg_gen_movi_i64(cpu_F0d, 0);
1032 else
1033 tcg_gen_movi_i32(cpu_F0s, 0);
1034}
1035
1036static inline void gen_vfp_uito(int dp)
1037{
1038 if (dp)
1039 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1040 else
1041 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1042}
1043
1044static inline void gen_vfp_sito(int dp)
1045{
1046 if (dp)
1047 gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
1048 else
1049 gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
1050}
1051
1052static inline void gen_vfp_toui(int dp)
1053{
1054 if (dp)
1055 gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
1056 else
1057 gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
1058}
1059
1060static inline void gen_vfp_touiz(int dp)
1061{
1062 if (dp)
1063 gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
1064 else
1065 gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
1066}
1067
1068static inline void gen_vfp_tosi(int dp)
1069{
1070 if (dp)
1071 gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
1072 else
1073 gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
1074}
1075
1076static inline void gen_vfp_tosiz(int dp)
9ee6e8bb
PB
1077{
1078 if (dp)
4373f3ce 1079 gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
9ee6e8bb 1080 else
4373f3ce
PB
1081 gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
1082}
1083
1084#define VFP_GEN_FIX(name) \
1085static inline void gen_vfp_##name(int dp, int shift) \
1086{ \
1087 if (dp) \
1088 gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tcg_const_i32(shift), cpu_env);\
1089 else \
1090 gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tcg_const_i32(shift), cpu_env);\
9ee6e8bb 1091}
4373f3ce
PB
1092VFP_GEN_FIX(tosh)
1093VFP_GEN_FIX(tosl)
1094VFP_GEN_FIX(touh)
1095VFP_GEN_FIX(toul)
1096VFP_GEN_FIX(shto)
1097VFP_GEN_FIX(slto)
1098VFP_GEN_FIX(uhto)
1099VFP_GEN_FIX(ulto)
1100#undef VFP_GEN_FIX
9ee6e8bb 1101
b5ff1b31
FB
1102static inline void gen_vfp_ld(DisasContext *s, int dp)
1103{
1104 if (dp)
4373f3ce 1105 tcg_gen_qemu_ld64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1106 else
4373f3ce 1107 tcg_gen_qemu_ld32u(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1108}
1109
1110static inline void gen_vfp_st(DisasContext *s, int dp)
1111{
1112 if (dp)
4373f3ce 1113 tcg_gen_qemu_st64(cpu_F0d, cpu_T[1], IS_USER(s));
b5ff1b31 1114 else
4373f3ce 1115 tcg_gen_qemu_st32(cpu_F0s, cpu_T[1], IS_USER(s));
b5ff1b31
FB
1116}
1117
8e96005d
FB
1118static inline long
1119vfp_reg_offset (int dp, int reg)
1120{
1121 if (dp)
1122 return offsetof(CPUARMState, vfp.regs[reg]);
1123 else if (reg & 1) {
1124 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1125 + offsetof(CPU_DoubleU, l.upper);
1126 } else {
1127 return offsetof(CPUARMState, vfp.regs[reg >> 1])
1128 + offsetof(CPU_DoubleU, l.lower);
1129 }
1130}
9ee6e8bb
PB
1131
1132/* Return the offset of a 32-bit piece of a NEON register.
1133 zero is the least significant end of the register. */
1134static inline long
1135neon_reg_offset (int reg, int n)
1136{
1137 int sreg;
1138 sreg = reg * 2 + n;
1139 return vfp_reg_offset(0, sreg);
1140}
1141
1142#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
1143#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
1144
4373f3ce
PB
1145#define tcg_gen_ld_f32 tcg_gen_ld_i32
1146#define tcg_gen_ld_f64 tcg_gen_ld_i64
1147#define tcg_gen_st_f32 tcg_gen_st_i32
1148#define tcg_gen_st_f64 tcg_gen_st_i64
1149
b7bcbe95
FB
1150static inline void gen_mov_F0_vreg(int dp, int reg)
1151{
1152 if (dp)
4373f3ce 1153 tcg_gen_ld_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1154 else
4373f3ce 1155 tcg_gen_ld_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1156}
1157
1158static inline void gen_mov_F1_vreg(int dp, int reg)
1159{
1160 if (dp)
4373f3ce 1161 tcg_gen_ld_f64(cpu_F1d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1162 else
4373f3ce 1163 tcg_gen_ld_f32(cpu_F1s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1164}
1165
1166static inline void gen_mov_vreg_F0(int dp, int reg)
1167{
1168 if (dp)
4373f3ce 1169 tcg_gen_st_f64(cpu_F0d, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95 1170 else
4373f3ce 1171 tcg_gen_st_f32(cpu_F0s, cpu_env, vfp_reg_offset(dp, reg));
b7bcbe95
FB
1172}
1173
18c9b560
AZ
1174#define ARM_CP_RW_BIT (1 << 20)
1175
1176static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1177{
1178 int rd;
1179 uint32_t offset;
1180
1181 rd = (insn >> 16) & 0xf;
1182 gen_movl_T1_reg(s, rd);
1183
1184 offset = (insn & 0xff) << ((insn >> 7) & 2);
1185 if (insn & (1 << 24)) {
1186 /* Pre indexed */
1187 if (insn & (1 << 23))
1188 gen_op_addl_T1_im(offset);
1189 else
1190 gen_op_addl_T1_im(-offset);
1191
1192 if (insn & (1 << 21))
1193 gen_movl_reg_T1(s, rd);
1194 } else if (insn & (1 << 21)) {
1195 /* Post indexed */
1196 if (insn & (1 << 23))
1197 gen_op_movl_T0_im(offset);
1198 else
1199 gen_op_movl_T0_im(- offset);
1200 gen_op_addl_T0_T1();
1201 gen_movl_reg_T0(s, rd);
1202 } else if (!(insn & (1 << 23)))
1203 return 1;
1204 return 0;
1205}
1206
1207static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1208{
1209 int rd = (insn >> 0) & 0xf;
1210
1211 if (insn & (1 << 8))
1212 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1213 return 1;
1214 else
1215 gen_op_iwmmxt_movl_T0_wCx(rd);
1216 else
1217 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
1218
1219 gen_op_movl_T1_im(mask);
1220 gen_op_andl_T0_T1();
1221 return 0;
1222}
1223
1224/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1225 (ie. an undefined instruction). */
1226static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1227{
1228 int rd, wrd;
1229 int rdhi, rdlo, rd0, rd1, i;
b0109805 1230 TCGv tmp;
18c9b560
AZ
1231
1232 if ((insn & 0x0e000e00) == 0x0c000000) {
1233 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1234 wrd = insn & 0xf;
1235 rdlo = (insn >> 12) & 0xf;
1236 rdhi = (insn >> 16) & 0xf;
1237 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1238 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
1239 gen_movl_reg_T0(s, rdlo);
1240 gen_movl_reg_T1(s, rdhi);
1241 } else { /* TMCRR */
1242 gen_movl_T0_reg(s, rdlo);
1243 gen_movl_T1_reg(s, rdhi);
1244 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
1245 gen_op_iwmmxt_set_mup();
1246 }
1247 return 0;
1248 }
1249
1250 wrd = (insn >> 12) & 0xf;
1251 if (gen_iwmmxt_address(s, insn))
1252 return 1;
1253 if (insn & ARM_CP_RW_BIT) {
1254 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
b0109805
PB
1255 tmp = gen_ld32(cpu_T[1], IS_USER(s));
1256 tcg_gen_mov_i32(cpu_T[0], tmp);
1257 dead_tmp(tmp);
18c9b560
AZ
1258 gen_op_iwmmxt_movl_wCx_T0(wrd);
1259 } else {
1260 if (insn & (1 << 8))
1261 if (insn & (1 << 22)) /* WLDRD */
1262 gen_ldst(iwmmxt_ldq, s);
1263 else /* WLDRW wRd */
1264 gen_ldst(iwmmxt_ldl, s);
1265 else
1266 if (insn & (1 << 22)) /* WLDRH */
1267 gen_ldst(iwmmxt_ldw, s);
1268 else /* WLDRB */
1269 gen_ldst(iwmmxt_ldb, s);
1270 gen_op_iwmmxt_movq_wRn_M0(wrd);
1271 }
1272 } else {
1273 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1274 gen_op_iwmmxt_movl_T0_wCx(wrd);
b0109805
PB
1275 tmp = new_tmp();
1276 tcg_gen_mov_i32(tmp, cpu_T[0]);
1277 gen_st32(tmp, cpu_T[1], IS_USER(s));
18c9b560
AZ
1278 } else {
1279 gen_op_iwmmxt_movq_M0_wRn(wrd);
1280 if (insn & (1 << 8))
1281 if (insn & (1 << 22)) /* WSTRD */
1282 gen_ldst(iwmmxt_stq, s);
1283 else /* WSTRW wRd */
1284 gen_ldst(iwmmxt_stl, s);
1285 else
1286 if (insn & (1 << 22)) /* WSTRH */
1287 gen_ldst(iwmmxt_ldw, s);
1288 else /* WSTRB */
1289 gen_ldst(iwmmxt_stb, s);
1290 }
1291 }
1292 return 0;
1293 }
1294
1295 if ((insn & 0x0f000000) != 0x0e000000)
1296 return 1;
1297
1298 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1299 case 0x000: /* WOR */
1300 wrd = (insn >> 12) & 0xf;
1301 rd0 = (insn >> 0) & 0xf;
1302 rd1 = (insn >> 16) & 0xf;
1303 gen_op_iwmmxt_movq_M0_wRn(rd0);
1304 gen_op_iwmmxt_orq_M0_wRn(rd1);
1305 gen_op_iwmmxt_setpsr_nz();
1306 gen_op_iwmmxt_movq_wRn_M0(wrd);
1307 gen_op_iwmmxt_set_mup();
1308 gen_op_iwmmxt_set_cup();
1309 break;
1310 case 0x011: /* TMCR */
1311 if (insn & 0xf)
1312 return 1;
1313 rd = (insn >> 12) & 0xf;
1314 wrd = (insn >> 16) & 0xf;
1315 switch (wrd) {
1316 case ARM_IWMMXT_wCID:
1317 case ARM_IWMMXT_wCASF:
1318 break;
1319 case ARM_IWMMXT_wCon:
1320 gen_op_iwmmxt_set_cup();
1321 /* Fall through. */
1322 case ARM_IWMMXT_wCSSF:
1323 gen_op_iwmmxt_movl_T0_wCx(wrd);
1324 gen_movl_T1_reg(s, rd);
1325 gen_op_bicl_T0_T1();
1326 gen_op_iwmmxt_movl_wCx_T0(wrd);
1327 break;
1328 case ARM_IWMMXT_wCGR0:
1329 case ARM_IWMMXT_wCGR1:
1330 case ARM_IWMMXT_wCGR2:
1331 case ARM_IWMMXT_wCGR3:
1332 gen_op_iwmmxt_set_cup();
1333 gen_movl_reg_T0(s, rd);
1334 gen_op_iwmmxt_movl_wCx_T0(wrd);
1335 break;
1336 default:
1337 return 1;
1338 }
1339 break;
1340 case 0x100: /* WXOR */
1341 wrd = (insn >> 12) & 0xf;
1342 rd0 = (insn >> 0) & 0xf;
1343 rd1 = (insn >> 16) & 0xf;
1344 gen_op_iwmmxt_movq_M0_wRn(rd0);
1345 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1346 gen_op_iwmmxt_setpsr_nz();
1347 gen_op_iwmmxt_movq_wRn_M0(wrd);
1348 gen_op_iwmmxt_set_mup();
1349 gen_op_iwmmxt_set_cup();
1350 break;
1351 case 0x111: /* TMRC */
1352 if (insn & 0xf)
1353 return 1;
1354 rd = (insn >> 12) & 0xf;
1355 wrd = (insn >> 16) & 0xf;
1356 gen_op_iwmmxt_movl_T0_wCx(wrd);
1357 gen_movl_reg_T0(s, rd);
1358 break;
1359 case 0x300: /* WANDN */
1360 wrd = (insn >> 12) & 0xf;
1361 rd0 = (insn >> 0) & 0xf;
1362 rd1 = (insn >> 16) & 0xf;
1363 gen_op_iwmmxt_movq_M0_wRn(rd0);
1364 gen_op_iwmmxt_negq_M0();
1365 gen_op_iwmmxt_andq_M0_wRn(rd1);
1366 gen_op_iwmmxt_setpsr_nz();
1367 gen_op_iwmmxt_movq_wRn_M0(wrd);
1368 gen_op_iwmmxt_set_mup();
1369 gen_op_iwmmxt_set_cup();
1370 break;
1371 case 0x200: /* WAND */
1372 wrd = (insn >> 12) & 0xf;
1373 rd0 = (insn >> 0) & 0xf;
1374 rd1 = (insn >> 16) & 0xf;
1375 gen_op_iwmmxt_movq_M0_wRn(rd0);
1376 gen_op_iwmmxt_andq_M0_wRn(rd1);
1377 gen_op_iwmmxt_setpsr_nz();
1378 gen_op_iwmmxt_movq_wRn_M0(wrd);
1379 gen_op_iwmmxt_set_mup();
1380 gen_op_iwmmxt_set_cup();
1381 break;
1382 case 0x810: case 0xa10: /* WMADD */
1383 wrd = (insn >> 12) & 0xf;
1384 rd0 = (insn >> 0) & 0xf;
1385 rd1 = (insn >> 16) & 0xf;
1386 gen_op_iwmmxt_movq_M0_wRn(rd0);
1387 if (insn & (1 << 21))
1388 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1389 else
1390 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1391 gen_op_iwmmxt_movq_wRn_M0(wrd);
1392 gen_op_iwmmxt_set_mup();
1393 break;
1394 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1395 wrd = (insn >> 12) & 0xf;
1396 rd0 = (insn >> 16) & 0xf;
1397 rd1 = (insn >> 0) & 0xf;
1398 gen_op_iwmmxt_movq_M0_wRn(rd0);
1399 switch ((insn >> 22) & 3) {
1400 case 0:
1401 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1402 break;
1403 case 1:
1404 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1405 break;
1406 case 2:
1407 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1408 break;
1409 case 3:
1410 return 1;
1411 }
1412 gen_op_iwmmxt_movq_wRn_M0(wrd);
1413 gen_op_iwmmxt_set_mup();
1414 gen_op_iwmmxt_set_cup();
1415 break;
1416 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1417 wrd = (insn >> 12) & 0xf;
1418 rd0 = (insn >> 16) & 0xf;
1419 rd1 = (insn >> 0) & 0xf;
1420 gen_op_iwmmxt_movq_M0_wRn(rd0);
1421 switch ((insn >> 22) & 3) {
1422 case 0:
1423 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1424 break;
1425 case 1:
1426 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1427 break;
1428 case 2:
1429 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1430 break;
1431 case 3:
1432 return 1;
1433 }
1434 gen_op_iwmmxt_movq_wRn_M0(wrd);
1435 gen_op_iwmmxt_set_mup();
1436 gen_op_iwmmxt_set_cup();
1437 break;
1438 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1439 wrd = (insn >> 12) & 0xf;
1440 rd0 = (insn >> 16) & 0xf;
1441 rd1 = (insn >> 0) & 0xf;
1442 gen_op_iwmmxt_movq_M0_wRn(rd0);
1443 if (insn & (1 << 22))
1444 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1445 else
1446 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1447 if (!(insn & (1 << 20)))
1448 gen_op_iwmmxt_addl_M0_wRn(wrd);
1449 gen_op_iwmmxt_movq_wRn_M0(wrd);
1450 gen_op_iwmmxt_set_mup();
1451 break;
1452 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1453 wrd = (insn >> 12) & 0xf;
1454 rd0 = (insn >> 16) & 0xf;
1455 rd1 = (insn >> 0) & 0xf;
1456 gen_op_iwmmxt_movq_M0_wRn(rd0);
1457 if (insn & (1 << 21))
1458 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1459 else
1460 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1461 gen_op_iwmmxt_movq_wRn_M0(wrd);
1462 gen_op_iwmmxt_set_mup();
1463 break;
1464 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1465 wrd = (insn >> 12) & 0xf;
1466 rd0 = (insn >> 16) & 0xf;
1467 rd1 = (insn >> 0) & 0xf;
1468 gen_op_iwmmxt_movq_M0_wRn(rd0);
1469 if (insn & (1 << 21))
1470 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1471 else
1472 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1473 if (!(insn & (1 << 20))) {
1474 if (insn & (1 << 21))
1475 gen_op_iwmmxt_addsq_M0_wRn(wrd);
1476 else
1477 gen_op_iwmmxt_adduq_M0_wRn(wrd);
1478 }
1479 gen_op_iwmmxt_movq_wRn_M0(wrd);
1480 gen_op_iwmmxt_set_mup();
1481 break;
1482 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1483 wrd = (insn >> 12) & 0xf;
1484 rd0 = (insn >> 16) & 0xf;
1485 rd1 = (insn >> 0) & 0xf;
1486 gen_op_iwmmxt_movq_M0_wRn(rd0);
1487 switch ((insn >> 22) & 3) {
1488 case 0:
1489 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1490 break;
1491 case 1:
1492 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1493 break;
1494 case 2:
1495 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1496 break;
1497 case 3:
1498 return 1;
1499 }
1500 gen_op_iwmmxt_movq_wRn_M0(wrd);
1501 gen_op_iwmmxt_set_mup();
1502 gen_op_iwmmxt_set_cup();
1503 break;
1504 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1505 wrd = (insn >> 12) & 0xf;
1506 rd0 = (insn >> 16) & 0xf;
1507 rd1 = (insn >> 0) & 0xf;
1508 gen_op_iwmmxt_movq_M0_wRn(rd0);
1509 if (insn & (1 << 22))
1510 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
1511 else
1512 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
1513 gen_op_iwmmxt_movq_wRn_M0(wrd);
1514 gen_op_iwmmxt_set_mup();
1515 gen_op_iwmmxt_set_cup();
1516 break;
1517 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1518 wrd = (insn >> 12) & 0xf;
1519 rd0 = (insn >> 16) & 0xf;
1520 rd1 = (insn >> 0) & 0xf;
1521 gen_op_iwmmxt_movq_M0_wRn(rd0);
1522 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1523 gen_op_movl_T1_im(7);
1524 gen_op_andl_T0_T1();
1525 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1530 rd = (insn >> 12) & 0xf;
1531 wrd = (insn >> 16) & 0xf;
1532 gen_movl_T0_reg(s, rd);
1533 gen_op_iwmmxt_movq_M0_wRn(wrd);
1534 switch ((insn >> 6) & 3) {
1535 case 0:
1536 gen_op_movl_T1_im(0xff);
1537 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1538 break;
1539 case 1:
1540 gen_op_movl_T1_im(0xffff);
1541 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1542 break;
1543 case 2:
1544 gen_op_movl_T1_im(0xffffffff);
1545 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1546 break;
1547 case 3:
1548 return 1;
1549 }
1550 gen_op_iwmmxt_movq_wRn_M0(wrd);
1551 gen_op_iwmmxt_set_mup();
1552 break;
1553 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1554 rd = (insn >> 12) & 0xf;
1555 wrd = (insn >> 16) & 0xf;
1556 if (rd == 15)
1557 return 1;
1558 gen_op_iwmmxt_movq_M0_wRn(wrd);
1559 switch ((insn >> 22) & 3) {
1560 case 0:
1561 if (insn & 8)
1562 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1563 else {
1564 gen_op_movl_T1_im(0xff);
1565 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
1566 }
1567 break;
1568 case 1:
1569 if (insn & 8)
1570 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1571 else {
1572 gen_op_movl_T1_im(0xffff);
1573 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
1574 }
1575 break;
1576 case 2:
1577 gen_op_movl_T1_im(0xffffffff);
1578 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
1579 break;
1580 case 3:
1581 return 1;
1582 }
b26eefb6 1583 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1584 break;
1585 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1586 if ((insn & 0x000ff008) != 0x0003f000)
1587 return 1;
1588 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1589 switch ((insn >> 22) & 3) {
1590 case 0:
1591 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1592 break;
1593 case 1:
1594 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1595 break;
1596 case 2:
1597 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1598 break;
1599 case 3:
1600 return 1;
1601 }
1602 gen_op_shll_T1_im(28);
d9ba4830 1603 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1604 break;
1605 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1606 rd = (insn >> 12) & 0xf;
1607 wrd = (insn >> 16) & 0xf;
1608 gen_movl_T0_reg(s, rd);
1609 switch ((insn >> 6) & 3) {
1610 case 0:
1611 gen_op_iwmmxt_bcstb_M0_T0();
1612 break;
1613 case 1:
1614 gen_op_iwmmxt_bcstw_M0_T0();
1615 break;
1616 case 2:
1617 gen_op_iwmmxt_bcstl_M0_T0();
1618 break;
1619 case 3:
1620 return 1;
1621 }
1622 gen_op_iwmmxt_movq_wRn_M0(wrd);
1623 gen_op_iwmmxt_set_mup();
1624 break;
1625 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1626 if ((insn & 0x000ff00f) != 0x0003f000)
1627 return 1;
1628 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1629 switch ((insn >> 22) & 3) {
1630 case 0:
1631 for (i = 0; i < 7; i ++) {
1632 gen_op_shll_T1_im(4);
1633 gen_op_andl_T0_T1();
1634 }
1635 break;
1636 case 1:
1637 for (i = 0; i < 3; i ++) {
1638 gen_op_shll_T1_im(8);
1639 gen_op_andl_T0_T1();
1640 }
1641 break;
1642 case 2:
1643 gen_op_shll_T1_im(16);
1644 gen_op_andl_T0_T1();
1645 break;
1646 case 3:
1647 return 1;
1648 }
d9ba4830 1649 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1650 break;
1651 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1652 wrd = (insn >> 12) & 0xf;
1653 rd0 = (insn >> 16) & 0xf;
1654 gen_op_iwmmxt_movq_M0_wRn(rd0);
1655 switch ((insn >> 22) & 3) {
1656 case 0:
1657 gen_op_iwmmxt_addcb_M0();
1658 break;
1659 case 1:
1660 gen_op_iwmmxt_addcw_M0();
1661 break;
1662 case 2:
1663 gen_op_iwmmxt_addcl_M0();
1664 break;
1665 case 3:
1666 return 1;
1667 }
1668 gen_op_iwmmxt_movq_wRn_M0(wrd);
1669 gen_op_iwmmxt_set_mup();
1670 break;
1671 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1672 if ((insn & 0x000ff00f) != 0x0003f000)
1673 return 1;
1674 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1675 switch ((insn >> 22) & 3) {
1676 case 0:
1677 for (i = 0; i < 7; i ++) {
1678 gen_op_shll_T1_im(4);
1679 gen_op_orl_T0_T1();
1680 }
1681 break;
1682 case 1:
1683 for (i = 0; i < 3; i ++) {
1684 gen_op_shll_T1_im(8);
1685 gen_op_orl_T0_T1();
1686 }
1687 break;
1688 case 2:
1689 gen_op_shll_T1_im(16);
1690 gen_op_orl_T0_T1();
1691 break;
1692 case 3:
1693 return 1;
1694 }
d9ba4830 1695 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1696 break;
1697 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1698 rd = (insn >> 12) & 0xf;
1699 rd0 = (insn >> 16) & 0xf;
1700 if ((insn & 0xf) != 0)
1701 return 1;
1702 gen_op_iwmmxt_movq_M0_wRn(rd0);
1703 switch ((insn >> 22) & 3) {
1704 case 0:
1705 gen_op_iwmmxt_msbb_T0_M0();
1706 break;
1707 case 1:
1708 gen_op_iwmmxt_msbw_T0_M0();
1709 break;
1710 case 2:
1711 gen_op_iwmmxt_msbl_T0_M0();
1712 break;
1713 case 3:
1714 return 1;
1715 }
1716 gen_movl_reg_T0(s, rd);
1717 break;
1718 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1719 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1720 wrd = (insn >> 12) & 0xf;
1721 rd0 = (insn >> 16) & 0xf;
1722 rd1 = (insn >> 0) & 0xf;
1723 gen_op_iwmmxt_movq_M0_wRn(rd0);
1724 switch ((insn >> 22) & 3) {
1725 case 0:
1726 if (insn & (1 << 21))
1727 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1728 else
1729 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1730 break;
1731 case 1:
1732 if (insn & (1 << 21))
1733 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1734 else
1735 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1736 break;
1737 case 2:
1738 if (insn & (1 << 21))
1739 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1740 else
1741 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1742 break;
1743 case 3:
1744 return 1;
1745 }
1746 gen_op_iwmmxt_movq_wRn_M0(wrd);
1747 gen_op_iwmmxt_set_mup();
1748 gen_op_iwmmxt_set_cup();
1749 break;
1750 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1751 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1752 wrd = (insn >> 12) & 0xf;
1753 rd0 = (insn >> 16) & 0xf;
1754 gen_op_iwmmxt_movq_M0_wRn(rd0);
1755 switch ((insn >> 22) & 3) {
1756 case 0:
1757 if (insn & (1 << 21))
1758 gen_op_iwmmxt_unpacklsb_M0();
1759 else
1760 gen_op_iwmmxt_unpacklub_M0();
1761 break;
1762 case 1:
1763 if (insn & (1 << 21))
1764 gen_op_iwmmxt_unpacklsw_M0();
1765 else
1766 gen_op_iwmmxt_unpackluw_M0();
1767 break;
1768 case 2:
1769 if (insn & (1 << 21))
1770 gen_op_iwmmxt_unpacklsl_M0();
1771 else
1772 gen_op_iwmmxt_unpacklul_M0();
1773 break;
1774 case 3:
1775 return 1;
1776 }
1777 gen_op_iwmmxt_movq_wRn_M0(wrd);
1778 gen_op_iwmmxt_set_mup();
1779 gen_op_iwmmxt_set_cup();
1780 break;
1781 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1782 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1783 wrd = (insn >> 12) & 0xf;
1784 rd0 = (insn >> 16) & 0xf;
1785 gen_op_iwmmxt_movq_M0_wRn(rd0);
1786 switch ((insn >> 22) & 3) {
1787 case 0:
1788 if (insn & (1 << 21))
1789 gen_op_iwmmxt_unpackhsb_M0();
1790 else
1791 gen_op_iwmmxt_unpackhub_M0();
1792 break;
1793 case 1:
1794 if (insn & (1 << 21))
1795 gen_op_iwmmxt_unpackhsw_M0();
1796 else
1797 gen_op_iwmmxt_unpackhuw_M0();
1798 break;
1799 case 2:
1800 if (insn & (1 << 21))
1801 gen_op_iwmmxt_unpackhsl_M0();
1802 else
1803 gen_op_iwmmxt_unpackhul_M0();
1804 break;
1805 case 3:
1806 return 1;
1807 }
1808 gen_op_iwmmxt_movq_wRn_M0(wrd);
1809 gen_op_iwmmxt_set_mup();
1810 gen_op_iwmmxt_set_cup();
1811 break;
1812 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1813 case 0x214: case 0x614: case 0xa14: case 0xe14:
1814 wrd = (insn >> 12) & 0xf;
1815 rd0 = (insn >> 16) & 0xf;
1816 gen_op_iwmmxt_movq_M0_wRn(rd0);
1817 if (gen_iwmmxt_shift(insn, 0xff))
1818 return 1;
1819 switch ((insn >> 22) & 3) {
1820 case 0:
1821 return 1;
1822 case 1:
1823 gen_op_iwmmxt_srlw_M0_T0();
1824 break;
1825 case 2:
1826 gen_op_iwmmxt_srll_M0_T0();
1827 break;
1828 case 3:
1829 gen_op_iwmmxt_srlq_M0_T0();
1830 break;
1831 }
1832 gen_op_iwmmxt_movq_wRn_M0(wrd);
1833 gen_op_iwmmxt_set_mup();
1834 gen_op_iwmmxt_set_cup();
1835 break;
1836 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1837 case 0x014: case 0x414: case 0x814: case 0xc14:
1838 wrd = (insn >> 12) & 0xf;
1839 rd0 = (insn >> 16) & 0xf;
1840 gen_op_iwmmxt_movq_M0_wRn(rd0);
1841 if (gen_iwmmxt_shift(insn, 0xff))
1842 return 1;
1843 switch ((insn >> 22) & 3) {
1844 case 0:
1845 return 1;
1846 case 1:
1847 gen_op_iwmmxt_sraw_M0_T0();
1848 break;
1849 case 2:
1850 gen_op_iwmmxt_sral_M0_T0();
1851 break;
1852 case 3:
1853 gen_op_iwmmxt_sraq_M0_T0();
1854 break;
1855 }
1856 gen_op_iwmmxt_movq_wRn_M0(wrd);
1857 gen_op_iwmmxt_set_mup();
1858 gen_op_iwmmxt_set_cup();
1859 break;
1860 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1861 case 0x114: case 0x514: case 0x914: case 0xd14:
1862 wrd = (insn >> 12) & 0xf;
1863 rd0 = (insn >> 16) & 0xf;
1864 gen_op_iwmmxt_movq_M0_wRn(rd0);
1865 if (gen_iwmmxt_shift(insn, 0xff))
1866 return 1;
1867 switch ((insn >> 22) & 3) {
1868 case 0:
1869 return 1;
1870 case 1:
1871 gen_op_iwmmxt_sllw_M0_T0();
1872 break;
1873 case 2:
1874 gen_op_iwmmxt_slll_M0_T0();
1875 break;
1876 case 3:
1877 gen_op_iwmmxt_sllq_M0_T0();
1878 break;
1879 }
1880 gen_op_iwmmxt_movq_wRn_M0(wrd);
1881 gen_op_iwmmxt_set_mup();
1882 gen_op_iwmmxt_set_cup();
1883 break;
1884 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1885 case 0x314: case 0x714: case 0xb14: case 0xf14:
1886 wrd = (insn >> 12) & 0xf;
1887 rd0 = (insn >> 16) & 0xf;
1888 gen_op_iwmmxt_movq_M0_wRn(rd0);
1889 switch ((insn >> 22) & 3) {
1890 case 0:
1891 return 1;
1892 case 1:
1893 if (gen_iwmmxt_shift(insn, 0xf))
1894 return 1;
1895 gen_op_iwmmxt_rorw_M0_T0();
1896 break;
1897 case 2:
1898 if (gen_iwmmxt_shift(insn, 0x1f))
1899 return 1;
1900 gen_op_iwmmxt_rorl_M0_T0();
1901 break;
1902 case 3:
1903 if (gen_iwmmxt_shift(insn, 0x3f))
1904 return 1;
1905 gen_op_iwmmxt_rorq_M0_T0();
1906 break;
1907 }
1908 gen_op_iwmmxt_movq_wRn_M0(wrd);
1909 gen_op_iwmmxt_set_mup();
1910 gen_op_iwmmxt_set_cup();
1911 break;
1912 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1913 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1914 wrd = (insn >> 12) & 0xf;
1915 rd0 = (insn >> 16) & 0xf;
1916 rd1 = (insn >> 0) & 0xf;
1917 gen_op_iwmmxt_movq_M0_wRn(rd0);
1918 switch ((insn >> 22) & 3) {
1919 case 0:
1920 if (insn & (1 << 21))
1921 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1922 else
1923 gen_op_iwmmxt_minub_M0_wRn(rd1);
1924 break;
1925 case 1:
1926 if (insn & (1 << 21))
1927 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1928 else
1929 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1930 break;
1931 case 2:
1932 if (insn & (1 << 21))
1933 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1934 else
1935 gen_op_iwmmxt_minul_M0_wRn(rd1);
1936 break;
1937 case 3:
1938 return 1;
1939 }
1940 gen_op_iwmmxt_movq_wRn_M0(wrd);
1941 gen_op_iwmmxt_set_mup();
1942 break;
1943 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1944 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1945 wrd = (insn >> 12) & 0xf;
1946 rd0 = (insn >> 16) & 0xf;
1947 rd1 = (insn >> 0) & 0xf;
1948 gen_op_iwmmxt_movq_M0_wRn(rd0);
1949 switch ((insn >> 22) & 3) {
1950 case 0:
1951 if (insn & (1 << 21))
1952 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1953 else
1954 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1955 break;
1956 case 1:
1957 if (insn & (1 << 21))
1958 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1959 else
1960 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1961 break;
1962 case 2:
1963 if (insn & (1 << 21))
1964 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1965 else
1966 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1967 break;
1968 case 3:
1969 return 1;
1970 }
1971 gen_op_iwmmxt_movq_wRn_M0(wrd);
1972 gen_op_iwmmxt_set_mup();
1973 break;
1974 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1975 case 0x402: case 0x502: case 0x602: case 0x702:
1976 wrd = (insn >> 12) & 0xf;
1977 rd0 = (insn >> 16) & 0xf;
1978 rd1 = (insn >> 0) & 0xf;
1979 gen_op_iwmmxt_movq_M0_wRn(rd0);
1980 gen_op_movl_T0_im((insn >> 20) & 3);
1981 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1982 gen_op_iwmmxt_movq_wRn_M0(wrd);
1983 gen_op_iwmmxt_set_mup();
1984 break;
1985 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1986 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1987 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1988 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1989 wrd = (insn >> 12) & 0xf;
1990 rd0 = (insn >> 16) & 0xf;
1991 rd1 = (insn >> 0) & 0xf;
1992 gen_op_iwmmxt_movq_M0_wRn(rd0);
1993 switch ((insn >> 20) & 0xf) {
1994 case 0x0:
1995 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1996 break;
1997 case 0x1:
1998 gen_op_iwmmxt_subub_M0_wRn(rd1);
1999 break;
2000 case 0x3:
2001 gen_op_iwmmxt_subsb_M0_wRn(rd1);
2002 break;
2003 case 0x4:
2004 gen_op_iwmmxt_subnw_M0_wRn(rd1);
2005 break;
2006 case 0x5:
2007 gen_op_iwmmxt_subuw_M0_wRn(rd1);
2008 break;
2009 case 0x7:
2010 gen_op_iwmmxt_subsw_M0_wRn(rd1);
2011 break;
2012 case 0x8:
2013 gen_op_iwmmxt_subnl_M0_wRn(rd1);
2014 break;
2015 case 0x9:
2016 gen_op_iwmmxt_subul_M0_wRn(rd1);
2017 break;
2018 case 0xb:
2019 gen_op_iwmmxt_subsl_M0_wRn(rd1);
2020 break;
2021 default:
2022 return 1;
2023 }
2024 gen_op_iwmmxt_movq_wRn_M0(wrd);
2025 gen_op_iwmmxt_set_mup();
2026 gen_op_iwmmxt_set_cup();
2027 break;
2028 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
2029 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
2030 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
2031 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
2032 wrd = (insn >> 12) & 0xf;
2033 rd0 = (insn >> 16) & 0xf;
2034 gen_op_iwmmxt_movq_M0_wRn(rd0);
2035 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
2036 gen_op_iwmmxt_shufh_M0_T0();
2037 gen_op_iwmmxt_movq_wRn_M0(wrd);
2038 gen_op_iwmmxt_set_mup();
2039 gen_op_iwmmxt_set_cup();
2040 break;
2041 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
2042 case 0x418: case 0x518: case 0x618: case 0x718:
2043 case 0x818: case 0x918: case 0xa18: case 0xb18:
2044 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
2045 wrd = (insn >> 12) & 0xf;
2046 rd0 = (insn >> 16) & 0xf;
2047 rd1 = (insn >> 0) & 0xf;
2048 gen_op_iwmmxt_movq_M0_wRn(rd0);
2049 switch ((insn >> 20) & 0xf) {
2050 case 0x0:
2051 gen_op_iwmmxt_addnb_M0_wRn(rd1);
2052 break;
2053 case 0x1:
2054 gen_op_iwmmxt_addub_M0_wRn(rd1);
2055 break;
2056 case 0x3:
2057 gen_op_iwmmxt_addsb_M0_wRn(rd1);
2058 break;
2059 case 0x4:
2060 gen_op_iwmmxt_addnw_M0_wRn(rd1);
2061 break;
2062 case 0x5:
2063 gen_op_iwmmxt_adduw_M0_wRn(rd1);
2064 break;
2065 case 0x7:
2066 gen_op_iwmmxt_addsw_M0_wRn(rd1);
2067 break;
2068 case 0x8:
2069 gen_op_iwmmxt_addnl_M0_wRn(rd1);
2070 break;
2071 case 0x9:
2072 gen_op_iwmmxt_addul_M0_wRn(rd1);
2073 break;
2074 case 0xb:
2075 gen_op_iwmmxt_addsl_M0_wRn(rd1);
2076 break;
2077 default:
2078 return 1;
2079 }
2080 gen_op_iwmmxt_movq_wRn_M0(wrd);
2081 gen_op_iwmmxt_set_mup();
2082 gen_op_iwmmxt_set_cup();
2083 break;
2084 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
2085 case 0x408: case 0x508: case 0x608: case 0x708:
2086 case 0x808: case 0x908: case 0xa08: case 0xb08:
2087 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
2088 wrd = (insn >> 12) & 0xf;
2089 rd0 = (insn >> 16) & 0xf;
2090 rd1 = (insn >> 0) & 0xf;
2091 gen_op_iwmmxt_movq_M0_wRn(rd0);
2092 if (!(insn & (1 << 20)))
2093 return 1;
2094 switch ((insn >> 22) & 3) {
2095 case 0:
2096 return 1;
2097 case 1:
2098 if (insn & (1 << 21))
2099 gen_op_iwmmxt_packsw_M0_wRn(rd1);
2100 else
2101 gen_op_iwmmxt_packuw_M0_wRn(rd1);
2102 break;
2103 case 2:
2104 if (insn & (1 << 21))
2105 gen_op_iwmmxt_packsl_M0_wRn(rd1);
2106 else
2107 gen_op_iwmmxt_packul_M0_wRn(rd1);
2108 break;
2109 case 3:
2110 if (insn & (1 << 21))
2111 gen_op_iwmmxt_packsq_M0_wRn(rd1);
2112 else
2113 gen_op_iwmmxt_packuq_M0_wRn(rd1);
2114 break;
2115 }
2116 gen_op_iwmmxt_movq_wRn_M0(wrd);
2117 gen_op_iwmmxt_set_mup();
2118 gen_op_iwmmxt_set_cup();
2119 break;
2120 case 0x201: case 0x203: case 0x205: case 0x207:
2121 case 0x209: case 0x20b: case 0x20d: case 0x20f:
2122 case 0x211: case 0x213: case 0x215: case 0x217:
2123 case 0x219: case 0x21b: case 0x21d: case 0x21f:
2124 wrd = (insn >> 5) & 0xf;
2125 rd0 = (insn >> 12) & 0xf;
2126 rd1 = (insn >> 0) & 0xf;
2127 if (rd0 == 0xf || rd1 == 0xf)
2128 return 1;
2129 gen_op_iwmmxt_movq_M0_wRn(wrd);
2130 switch ((insn >> 16) & 0xf) {
2131 case 0x0: /* TMIA */
b26eefb6
PB
2132 gen_movl_T0_reg(s, rd0);
2133 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2134 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2135 break;
2136 case 0x8: /* TMIAPH */
b26eefb6
PB
2137 gen_movl_T0_reg(s, rd0);
2138 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2139 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2140 break;
2141 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2142 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2143 if (insn & (1 << 16))
2144 gen_op_shrl_T1_im(16);
2145 gen_op_movl_T0_T1();
b26eefb6 2146 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2147 if (insn & (1 << 17))
2148 gen_op_shrl_T1_im(16);
2149 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2150 break;
2151 default:
2152 return 1;
2153 }
2154 gen_op_iwmmxt_movq_wRn_M0(wrd);
2155 gen_op_iwmmxt_set_mup();
2156 break;
2157 default:
2158 return 1;
2159 }
2160
2161 return 0;
2162}
2163
2164/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2165 (ie. an undefined instruction). */
2166static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2167{
2168 int acc, rd0, rd1, rdhi, rdlo;
2169
2170 if ((insn & 0x0ff00f10) == 0x0e200010) {
2171 /* Multiply with Internal Accumulate Format */
2172 rd0 = (insn >> 12) & 0xf;
2173 rd1 = insn & 0xf;
2174 acc = (insn >> 5) & 7;
2175
2176 if (acc != 0)
2177 return 1;
2178
2179 switch ((insn >> 16) & 0xf) {
2180 case 0x0: /* MIA */
b26eefb6
PB
2181 gen_movl_T0_reg(s, rd0);
2182 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2183 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2184 break;
2185 case 0x8: /* MIAPH */
b26eefb6
PB
2186 gen_movl_T0_reg(s, rd0);
2187 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2188 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2189 break;
2190 case 0xc: /* MIABB */
2191 case 0xd: /* MIABT */
2192 case 0xe: /* MIATB */
2193 case 0xf: /* MIATT */
b26eefb6 2194 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2195 if (insn & (1 << 16))
2196 gen_op_shrl_T1_im(16);
2197 gen_op_movl_T0_T1();
b26eefb6 2198 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2199 if (insn & (1 << 17))
2200 gen_op_shrl_T1_im(16);
2201 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2202 break;
2203 default:
2204 return 1;
2205 }
2206
2207 gen_op_iwmmxt_movq_wRn_M0(acc);
2208 return 0;
2209 }
2210
2211 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2212 /* Internal Accumulator Access Format */
2213 rdhi = (insn >> 16) & 0xf;
2214 rdlo = (insn >> 12) & 0xf;
2215 acc = insn & 7;
2216
2217 if (acc != 0)
2218 return 1;
2219
2220 if (insn & ARM_CP_RW_BIT) { /* MRA */
2221 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2222 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2223 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2224 gen_op_andl_T0_T1();
b26eefb6 2225 gen_movl_reg_T0(s, rdhi);
18c9b560 2226 } else { /* MAR */
b26eefb6
PB
2227 gen_movl_T0_reg(s, rdlo);
2228 gen_movl_T1_reg(s, rdhi);
18c9b560
AZ
2229 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
2230 }
2231 return 0;
2232 }
2233
2234 return 1;
2235}
2236
c1713132
AZ
2237/* Disassemble system coprocessor instruction. Return nonzero if
2238 instruction is not defined. */
2239static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2240{
2241 uint32_t rd = (insn >> 12) & 0xf;
2242 uint32_t cp = (insn >> 8) & 0xf;
2243 if (IS_USER(s)) {
2244 return 1;
2245 }
2246
18c9b560 2247 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2248 if (!env->cp[cp].cp_read)
2249 return 1;
2250 gen_op_movl_T0_im((uint32_t) s->pc);
b26eefb6 2251 gen_set_pc_T0();
c1713132
AZ
2252 gen_op_movl_T0_cp(insn);
2253 gen_movl_reg_T0(s, rd);
2254 } else {
2255 if (!env->cp[cp].cp_write)
2256 return 1;
2257 gen_op_movl_T0_im((uint32_t) s->pc);
b26eefb6 2258 gen_set_pc_T0();
c1713132
AZ
2259 gen_movl_T0_reg(s, rd);
2260 gen_op_movl_cp_T0(insn);
2261 }
2262 return 0;
2263}
2264
9ee6e8bb
PB
2265static int cp15_user_ok(uint32_t insn)
2266{
2267 int cpn = (insn >> 16) & 0xf;
2268 int cpm = insn & 0xf;
2269 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2270
2271 if (cpn == 13 && cpm == 0) {
2272 /* TLS register. */
2273 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2274 return 1;
2275 }
2276 if (cpn == 7) {
2277 /* ISB, DSB, DMB. */
2278 if ((cpm == 5 && op == 4)
2279 || (cpm == 10 && (op == 4 || op == 5)))
2280 return 1;
2281 }
2282 return 0;
2283}
2284
b5ff1b31
FB
2285/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2286 instruction is not defined. */
a90b7318 2287static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2288{
2289 uint32_t rd;
2290
9ee6e8bb
PB
2291 /* M profile cores use memory mapped registers instead of cp15. */
2292 if (arm_feature(env, ARM_FEATURE_M))
2293 return 1;
2294
2295 if ((insn & (1 << 25)) == 0) {
2296 if (insn & (1 << 20)) {
2297 /* mrrc */
2298 return 1;
2299 }
2300 /* mcrr. Used for block cache operations, so implement as no-op. */
2301 return 0;
2302 }
2303 if ((insn & (1 << 4)) == 0) {
2304 /* cdp */
2305 return 1;
2306 }
2307 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2308 return 1;
2309 }
9332f9da
FB
2310 if ((insn & 0x0fff0fff) == 0x0e070f90
2311 || (insn & 0x0fff0fff) == 0x0e070f58) {
2312 /* Wait for interrupt. */
2313 gen_op_movl_T0_im((long)s->pc);
b26eefb6 2314 gen_set_pc_T0();
9ee6e8bb 2315 s->is_jmp = DISAS_WFI;
9332f9da
FB
2316 return 0;
2317 }
b5ff1b31 2318 rd = (insn >> 12) & 0xf;
18c9b560 2319 if (insn & ARM_CP_RW_BIT) {
b5ff1b31
FB
2320 gen_op_movl_T0_cp15(insn);
2321 /* If the destination register is r15 then sets condition codes. */
2322 if (rd != 15)
2323 gen_movl_reg_T0(s, rd);
2324 } else {
2325 gen_movl_T0_reg(s, rd);
2326 gen_op_movl_cp15_T0(insn);
a90b7318
AZ
2327 /* Normally we would always end the TB here, but Linux
2328 * arch/arm/mach-pxa/sleep.S expects two instructions following
2329 * an MMU enable to execute from cache. Imitate this behaviour. */
2330 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2331 (insn & 0x0fff0fff) != 0x0e010f10)
2332 gen_lookup_tb(s);
b5ff1b31 2333 }
b5ff1b31
FB
2334 return 0;
2335}
2336
9ee6e8bb
PB
2337#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2338#define VFP_SREG(insn, bigbit, smallbit) \
2339 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2340#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2341 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2342 reg = (((insn) >> (bigbit)) & 0x0f) \
2343 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2344 } else { \
2345 if (insn & (1 << (smallbit))) \
2346 return 1; \
2347 reg = ((insn) >> (bigbit)) & 0x0f; \
2348 }} while (0)
2349
2350#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2351#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2352#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2353#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2354#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2355#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2356
4373f3ce
PB
2357/* Move between integer and VFP cores. */
2358static TCGv gen_vfp_mrs(void)
2359{
2360 TCGv tmp = new_tmp();
2361 tcg_gen_mov_i32(tmp, cpu_F0s);
2362 return tmp;
2363}
2364
2365static void gen_vfp_msr(TCGv tmp)
2366{
2367 tcg_gen_mov_i32(cpu_F0s, tmp);
2368 dead_tmp(tmp);
2369}
2370
9ee6e8bb
PB
2371static inline int
2372vfp_enabled(CPUState * env)
2373{
2374 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2375}
2376
b7bcbe95
FB
2377/* Disassemble a VFP instruction. Returns nonzero if an error occured
2378 (ie. an undefined instruction). */
2379static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2380{
2381 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2382 int dp, veclen;
4373f3ce 2383 TCGv tmp;
b7bcbe95 2384
40f137e1
PB
2385 if (!arm_feature(env, ARM_FEATURE_VFP))
2386 return 1;
2387
9ee6e8bb
PB
2388 if (!vfp_enabled(env)) {
2389 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2390 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2391 return 1;
2392 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2393 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2394 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2395 return 1;
2396 }
b7bcbe95
FB
2397 dp = ((insn & 0xf00) == 0xb00);
2398 switch ((insn >> 24) & 0xf) {
2399 case 0xe:
2400 if (insn & (1 << 4)) {
2401 /* single register transfer */
b7bcbe95
FB
2402 rd = (insn >> 12) & 0xf;
2403 if (dp) {
9ee6e8bb
PB
2404 int size;
2405 int pass;
2406
2407 VFP_DREG_N(rn, insn);
2408 if (insn & 0xf)
b7bcbe95 2409 return 1;
9ee6e8bb
PB
2410 if (insn & 0x00c00060
2411 && !arm_feature(env, ARM_FEATURE_NEON))
2412 return 1;
2413
2414 pass = (insn >> 21) & 1;
2415 if (insn & (1 << 22)) {
2416 size = 0;
2417 offset = ((insn >> 5) & 3) * 8;
2418 } else if (insn & (1 << 5)) {
2419 size = 1;
2420 offset = (insn & (1 << 6)) ? 16 : 0;
2421 } else {
2422 size = 2;
2423 offset = 0;
2424 }
18c9b560 2425 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2426 /* vfp->arm */
9ee6e8bb
PB
2427 switch (size) {
2428 case 0:
2429 NEON_GET_REG(T1, rn, pass);
2430 if (offset)
2431 gen_op_shrl_T1_im(offset);
2432 if (insn & (1 << 23))
b26eefb6 2433 gen_uxtb(cpu_T[1]);
9ee6e8bb 2434 else
b26eefb6 2435 gen_sxtb(cpu_T[1]);
9ee6e8bb
PB
2436 break;
2437 case 1:
2438 NEON_GET_REG(T1, rn, pass);
2439 if (insn & (1 << 23)) {
2440 if (offset) {
2441 gen_op_shrl_T1_im(16);
2442 } else {
b26eefb6 2443 gen_uxth(cpu_T[1]);
9ee6e8bb
PB
2444 }
2445 } else {
2446 if (offset) {
2447 gen_op_sarl_T1_im(16);
2448 } else {
b26eefb6 2449 gen_sxth(cpu_T[1]);
9ee6e8bb
PB
2450 }
2451 }
2452 break;
2453 case 2:
2454 NEON_GET_REG(T1, rn, pass);
2455 break;
2456 }
2457 gen_movl_reg_T1(s, rd);
b7bcbe95
FB
2458 } else {
2459 /* arm->vfp */
9ee6e8bb
PB
2460 gen_movl_T0_reg(s, rd);
2461 if (insn & (1 << 23)) {
2462 /* VDUP */
2463 if (size == 0) {
2464 gen_op_neon_dup_u8(0);
2465 } else if (size == 1) {
2466 gen_op_neon_dup_low16();
2467 }
2468 NEON_SET_REG(T0, rn, 0);
2469 NEON_SET_REG(T0, rn, 1);
2470 } else {
2471 /* VMOV */
2472 switch (size) {
2473 case 0:
2474 NEON_GET_REG(T2, rn, pass);
2475 gen_op_movl_T1_im(0xff);
2476 gen_op_andl_T0_T1();
2477 gen_op_neon_insert_elt(offset, ~(0xff << offset));
2478 NEON_SET_REG(T2, rn, pass);
2479 break;
2480 case 1:
2481 NEON_GET_REG(T2, rn, pass);
2482 gen_op_movl_T1_im(0xffff);
2483 gen_op_andl_T0_T1();
2484 bank_mask = offset ? 0xffff : 0xffff0000;
2485 gen_op_neon_insert_elt(offset, bank_mask);
2486 NEON_SET_REG(T2, rn, pass);
2487 break;
2488 case 2:
2489 NEON_SET_REG(T0, rn, pass);
2490 break;
2491 }
2492 }
b7bcbe95 2493 }
9ee6e8bb
PB
2494 } else { /* !dp */
2495 if ((insn & 0x6f) != 0x00)
2496 return 1;
2497 rn = VFP_SREG_N(insn);
18c9b560 2498 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2499 /* vfp->arm */
2500 if (insn & (1 << 21)) {
2501 /* system register */
40f137e1 2502 rn >>= 1;
9ee6e8bb 2503
b7bcbe95 2504 switch (rn) {
40f137e1 2505 case ARM_VFP_FPSID:
4373f3ce 2506 /* VFP2 allows access to FSID from userspace.
9ee6e8bb
PB
2507 VFP3 restricts all id registers to privileged
2508 accesses. */
2509 if (IS_USER(s)
2510 && arm_feature(env, ARM_FEATURE_VFP3))
2511 return 1;
4373f3ce 2512 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2513 break;
40f137e1 2514 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2515 if (IS_USER(s))
2516 return 1;
4373f3ce 2517 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2518 break;
40f137e1
PB
2519 case ARM_VFP_FPINST:
2520 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2521 /* Not present in VFP3. */
2522 if (IS_USER(s)
2523 || arm_feature(env, ARM_FEATURE_VFP3))
2524 return 1;
4373f3ce 2525 tmp = load_cpu_field(vfp.xregs[rn]);
b7bcbe95 2526 break;
40f137e1 2527 case ARM_VFP_FPSCR:
4373f3ce
PB
2528 if (rd == 15) {
2529 tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
2530 tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
2531 } else {
2532 tmp = new_tmp();
2533 gen_helper_vfp_get_fpscr(tmp, cpu_env);
2534 }
b7bcbe95 2535 break;
9ee6e8bb
PB
2536 case ARM_VFP_MVFR0:
2537 case ARM_VFP_MVFR1:
2538 if (IS_USER(s)
2539 || !arm_feature(env, ARM_FEATURE_VFP3))
2540 return 1;
4373f3ce 2541 tmp = load_cpu_field(vfp.xregs[rn]);
9ee6e8bb 2542 break;
b7bcbe95
FB
2543 default:
2544 return 1;
2545 }
2546 } else {
2547 gen_mov_F0_vreg(0, rn);
4373f3ce 2548 tmp = gen_vfp_mrs();
b7bcbe95
FB
2549 }
2550 if (rd == 15) {
b5ff1b31 2551 /* Set the 4 flag bits in the CPSR. */
4373f3ce
PB
2552 gen_set_nzcv(tmp);
2553 dead_tmp(tmp);
2554 } else {
2555 store_reg(s, rd, tmp);
2556 }
b7bcbe95
FB
2557 } else {
2558 /* arm->vfp */
4373f3ce 2559 tmp = load_reg(s, rd);
b7bcbe95 2560 if (insn & (1 << 21)) {
40f137e1 2561 rn >>= 1;
b7bcbe95
FB
2562 /* system register */
2563 switch (rn) {
40f137e1 2564 case ARM_VFP_FPSID:
9ee6e8bb
PB
2565 case ARM_VFP_MVFR0:
2566 case ARM_VFP_MVFR1:
b7bcbe95
FB
2567 /* Writes are ignored. */
2568 break;
40f137e1 2569 case ARM_VFP_FPSCR:
4373f3ce
PB
2570 gen_helper_vfp_set_fpscr(cpu_env, tmp);
2571 dead_tmp(tmp);
b5ff1b31 2572 gen_lookup_tb(s);
b7bcbe95 2573 break;
40f137e1 2574 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2575 if (IS_USER(s))
2576 return 1;
4373f3ce 2577 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1
PB
2578 gen_lookup_tb(s);
2579 break;
2580 case ARM_VFP_FPINST:
2581 case ARM_VFP_FPINST2:
4373f3ce 2582 store_cpu_field(tmp, vfp.xregs[rn]);
40f137e1 2583 break;
b7bcbe95
FB
2584 default:
2585 return 1;
2586 }
2587 } else {
4373f3ce 2588 gen_vfp_msr(tmp);
b7bcbe95
FB
2589 gen_mov_vreg_F0(0, rn);
2590 }
2591 }
2592 }
2593 } else {
2594 /* data processing */
2595 /* The opcode is in bits 23, 21, 20 and 6. */
2596 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2597 if (dp) {
2598 if (op == 15) {
2599 /* rn is opcode */
2600 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2601 } else {
2602 /* rn is register number */
9ee6e8bb 2603 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2604 }
2605
2606 if (op == 15 && (rn == 15 || rn > 17)) {
2607 /* Integer or single precision destination. */
9ee6e8bb 2608 rd = VFP_SREG_D(insn);
b7bcbe95 2609 } else {
9ee6e8bb 2610 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2611 }
2612
2613 if (op == 15 && (rn == 16 || rn == 17)) {
2614 /* Integer source. */
2615 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2616 } else {
9ee6e8bb 2617 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2618 }
2619 } else {
9ee6e8bb 2620 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2621 if (op == 15 && rn == 15) {
2622 /* Double precision destination. */
9ee6e8bb
PB
2623 VFP_DREG_D(rd, insn);
2624 } else {
2625 rd = VFP_SREG_D(insn);
2626 }
2627 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2628 }
2629
2630 veclen = env->vfp.vec_len;
2631 if (op == 15 && rn > 3)
2632 veclen = 0;
2633
2634 /* Shut up compiler warnings. */
2635 delta_m = 0;
2636 delta_d = 0;
2637 bank_mask = 0;
3b46e624 2638
b7bcbe95
FB
2639 if (veclen > 0) {
2640 if (dp)
2641 bank_mask = 0xc;
2642 else
2643 bank_mask = 0x18;
2644
2645 /* Figure out what type of vector operation this is. */
2646 if ((rd & bank_mask) == 0) {
2647 /* scalar */
2648 veclen = 0;
2649 } else {
2650 if (dp)
2651 delta_d = (env->vfp.vec_stride >> 1) + 1;
2652 else
2653 delta_d = env->vfp.vec_stride + 1;
2654
2655 if ((rm & bank_mask) == 0) {
2656 /* mixed scalar/vector */
2657 delta_m = 0;
2658 } else {
2659 /* vector */
2660 delta_m = delta_d;
2661 }
2662 }
2663 }
2664
2665 /* Load the initial operands. */
2666 if (op == 15) {
2667 switch (rn) {
2668 case 16:
2669 case 17:
2670 /* Integer source */
2671 gen_mov_F0_vreg(0, rm);
2672 break;
2673 case 8:
2674 case 9:
2675 /* Compare */
2676 gen_mov_F0_vreg(dp, rd);
2677 gen_mov_F1_vreg(dp, rm);
2678 break;
2679 case 10:
2680 case 11:
2681 /* Compare with zero */
2682 gen_mov_F0_vreg(dp, rd);
2683 gen_vfp_F1_ld0(dp);
2684 break;
9ee6e8bb
PB
2685 case 20:
2686 case 21:
2687 case 22:
2688 case 23:
2689 /* Source and destination the same. */
2690 gen_mov_F0_vreg(dp, rd);
2691 break;
b7bcbe95
FB
2692 default:
2693 /* One source operand. */
2694 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2695 break;
b7bcbe95
FB
2696 }
2697 } else {
2698 /* Two source operands. */
2699 gen_mov_F0_vreg(dp, rn);
2700 gen_mov_F1_vreg(dp, rm);
2701 }
2702
2703 for (;;) {
2704 /* Perform the calculation. */
2705 switch (op) {
2706 case 0: /* mac: fd + (fn * fm) */
2707 gen_vfp_mul(dp);
2708 gen_mov_F1_vreg(dp, rd);
2709 gen_vfp_add(dp);
2710 break;
2711 case 1: /* nmac: fd - (fn * fm) */
2712 gen_vfp_mul(dp);
2713 gen_vfp_neg(dp);
2714 gen_mov_F1_vreg(dp, rd);
2715 gen_vfp_add(dp);
2716 break;
2717 case 2: /* msc: -fd + (fn * fm) */
2718 gen_vfp_mul(dp);
2719 gen_mov_F1_vreg(dp, rd);
2720 gen_vfp_sub(dp);
2721 break;
2722 case 3: /* nmsc: -fd - (fn * fm) */
2723 gen_vfp_mul(dp);
2724 gen_mov_F1_vreg(dp, rd);
2725 gen_vfp_add(dp);
2726 gen_vfp_neg(dp);
2727 break;
2728 case 4: /* mul: fn * fm */
2729 gen_vfp_mul(dp);
2730 break;
2731 case 5: /* nmul: -(fn * fm) */
2732 gen_vfp_mul(dp);
2733 gen_vfp_neg(dp);
2734 break;
2735 case 6: /* add: fn + fm */
2736 gen_vfp_add(dp);
2737 break;
2738 case 7: /* sub: fn - fm */
2739 gen_vfp_sub(dp);
2740 break;
2741 case 8: /* div: fn / fm */
2742 gen_vfp_div(dp);
2743 break;
9ee6e8bb
PB
2744 case 14: /* fconst */
2745 if (!arm_feature(env, ARM_FEATURE_VFP3))
2746 return 1;
2747
2748 n = (insn << 12) & 0x80000000;
2749 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2750 if (dp) {
2751 if (i & 0x40)
2752 i |= 0x3f80;
2753 else
2754 i |= 0x4000;
2755 n |= i << 16;
4373f3ce 2756 tcg_gen_movi_i64(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb
PB
2757 } else {
2758 if (i & 0x40)
2759 i |= 0x780;
2760 else
2761 i |= 0x800;
2762 n |= i << 19;
4373f3ce 2763 tcg_gen_movi_i32(cpu_F0d, ((uint64_t)n) << 32);
9ee6e8bb 2764 }
9ee6e8bb 2765 break;
b7bcbe95
FB
2766 case 15: /* extension space */
2767 switch (rn) {
2768 case 0: /* cpy */
2769 /* no-op */
2770 break;
2771 case 1: /* abs */
2772 gen_vfp_abs(dp);
2773 break;
2774 case 2: /* neg */
2775 gen_vfp_neg(dp);
2776 break;
2777 case 3: /* sqrt */
2778 gen_vfp_sqrt(dp);
2779 break;
2780 case 8: /* cmp */
2781 gen_vfp_cmp(dp);
2782 break;
2783 case 9: /* cmpe */
2784 gen_vfp_cmpe(dp);
2785 break;
2786 case 10: /* cmpz */
2787 gen_vfp_cmp(dp);
2788 break;
2789 case 11: /* cmpez */
2790 gen_vfp_F1_ld0(dp);
2791 gen_vfp_cmpe(dp);
2792 break;
2793 case 15: /* single<->double conversion */
2794 if (dp)
4373f3ce 2795 gen_helper_vfp_fcvtsd(cpu_F0s, cpu_F0d, cpu_env);
b7bcbe95 2796 else
4373f3ce 2797 gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
b7bcbe95
FB
2798 break;
2799 case 16: /* fuito */
2800 gen_vfp_uito(dp);
2801 break;
2802 case 17: /* fsito */
2803 gen_vfp_sito(dp);
2804 break;
9ee6e8bb
PB
2805 case 20: /* fshto */
2806 if (!arm_feature(env, ARM_FEATURE_VFP3))
2807 return 1;
2808 gen_vfp_shto(dp, rm);
2809 break;
2810 case 21: /* fslto */
2811 if (!arm_feature(env, ARM_FEATURE_VFP3))
2812 return 1;
2813 gen_vfp_slto(dp, rm);
2814 break;
2815 case 22: /* fuhto */
2816 if (!arm_feature(env, ARM_FEATURE_VFP3))
2817 return 1;
2818 gen_vfp_uhto(dp, rm);
2819 break;
2820 case 23: /* fulto */
2821 if (!arm_feature(env, ARM_FEATURE_VFP3))
2822 return 1;
2823 gen_vfp_ulto(dp, rm);
2824 break;
b7bcbe95
FB
2825 case 24: /* ftoui */
2826 gen_vfp_toui(dp);
2827 break;
2828 case 25: /* ftouiz */
2829 gen_vfp_touiz(dp);
2830 break;
2831 case 26: /* ftosi */
2832 gen_vfp_tosi(dp);
2833 break;
2834 case 27: /* ftosiz */
2835 gen_vfp_tosiz(dp);
2836 break;
9ee6e8bb
PB
2837 case 28: /* ftosh */
2838 if (!arm_feature(env, ARM_FEATURE_VFP3))
2839 return 1;
2840 gen_vfp_tosh(dp, rm);
2841 break;
2842 case 29: /* ftosl */
2843 if (!arm_feature(env, ARM_FEATURE_VFP3))
2844 return 1;
2845 gen_vfp_tosl(dp, rm);
2846 break;
2847 case 30: /* ftouh */
2848 if (!arm_feature(env, ARM_FEATURE_VFP3))
2849 return 1;
2850 gen_vfp_touh(dp, rm);
2851 break;
2852 case 31: /* ftoul */
2853 if (!arm_feature(env, ARM_FEATURE_VFP3))
2854 return 1;
2855 gen_vfp_toul(dp, rm);
2856 break;
b7bcbe95
FB
2857 default: /* undefined */
2858 printf ("rn:%d\n", rn);
2859 return 1;
2860 }
2861 break;
2862 default: /* undefined */
2863 printf ("op:%d\n", op);
2864 return 1;
2865 }
2866
2867 /* Write back the result. */
2868 if (op == 15 && (rn >= 8 && rn <= 11))
2869 ; /* Comparison, do nothing. */
2870 else if (op == 15 && rn > 17)
2871 /* Integer result. */
2872 gen_mov_vreg_F0(0, rd);
2873 else if (op == 15 && rn == 15)
2874 /* conversion */
2875 gen_mov_vreg_F0(!dp, rd);
2876 else
2877 gen_mov_vreg_F0(dp, rd);
2878
2879 /* break out of the loop if we have finished */
2880 if (veclen == 0)
2881 break;
2882
2883 if (op == 15 && delta_m == 0) {
2884 /* single source one-many */
2885 while (veclen--) {
2886 rd = ((rd + delta_d) & (bank_mask - 1))
2887 | (rd & bank_mask);
2888 gen_mov_vreg_F0(dp, rd);
2889 }
2890 break;
2891 }
2892 /* Setup the next operands. */
2893 veclen--;
2894 rd = ((rd + delta_d) & (bank_mask - 1))
2895 | (rd & bank_mask);
2896
2897 if (op == 15) {
2898 /* One source operand. */
2899 rm = ((rm + delta_m) & (bank_mask - 1))
2900 | (rm & bank_mask);
2901 gen_mov_F0_vreg(dp, rm);
2902 } else {
2903 /* Two source operands. */
2904 rn = ((rn + delta_d) & (bank_mask - 1))
2905 | (rn & bank_mask);
2906 gen_mov_F0_vreg(dp, rn);
2907 if (delta_m) {
2908 rm = ((rm + delta_m) & (bank_mask - 1))
2909 | (rm & bank_mask);
2910 gen_mov_F1_vreg(dp, rm);
2911 }
2912 }
2913 }
2914 }
2915 break;
2916 case 0xc:
2917 case 0xd:
9ee6e8bb 2918 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
2919 /* two-register transfer */
2920 rn = (insn >> 16) & 0xf;
2921 rd = (insn >> 12) & 0xf;
2922 if (dp) {
9ee6e8bb
PB
2923 VFP_DREG_M(rm, insn);
2924 } else {
2925 rm = VFP_SREG_M(insn);
2926 }
b7bcbe95 2927
18c9b560 2928 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2929 /* vfp->arm */
2930 if (dp) {
4373f3ce
PB
2931 gen_mov_F0_vreg(0, rm * 2);
2932 tmp = gen_vfp_mrs();
2933 store_reg(s, rd, tmp);
2934 gen_mov_F0_vreg(0, rm * 2 + 1);
2935 tmp = gen_vfp_mrs();
2936 store_reg(s, rn, tmp);
b7bcbe95
FB
2937 } else {
2938 gen_mov_F0_vreg(0, rm);
4373f3ce
PB
2939 tmp = gen_vfp_mrs();
2940 store_reg(s, rn, tmp);
b7bcbe95 2941 gen_mov_F0_vreg(0, rm + 1);
4373f3ce
PB
2942 tmp = gen_vfp_mrs();
2943 store_reg(s, rd, tmp);
b7bcbe95
FB
2944 }
2945 } else {
2946 /* arm->vfp */
2947 if (dp) {
4373f3ce
PB
2948 tmp = load_reg(s, rd);
2949 gen_vfp_msr(tmp);
2950 gen_mov_vreg_F0(0, rm * 2);
2951 tmp = load_reg(s, rn);
2952 gen_vfp_msr(tmp);
2953 gen_mov_vreg_F0(0, rm * 2 + 1);
b7bcbe95 2954 } else {
4373f3ce
PB
2955 tmp = load_reg(s, rn);
2956 gen_vfp_msr(tmp);
b7bcbe95 2957 gen_mov_vreg_F0(0, rm);
4373f3ce
PB
2958 tmp = load_reg(s, rd);
2959 gen_vfp_msr(tmp);
b7bcbe95
FB
2960 gen_mov_vreg_F0(0, rm + 1);
2961 }
2962 }
2963 } else {
2964 /* Load/store */
2965 rn = (insn >> 16) & 0xf;
2966 if (dp)
9ee6e8bb 2967 VFP_DREG_D(rd, insn);
b7bcbe95 2968 else
9ee6e8bb
PB
2969 rd = VFP_SREG_D(insn);
2970 if (s->thumb && rn == 15) {
2971 gen_op_movl_T1_im(s->pc & ~2);
2972 } else {
2973 gen_movl_T1_reg(s, rn);
2974 }
b7bcbe95
FB
2975 if ((insn & 0x01200000) == 0x01000000) {
2976 /* Single load/store */
2977 offset = (insn & 0xff) << 2;
2978 if ((insn & (1 << 23)) == 0)
2979 offset = -offset;
2980 gen_op_addl_T1_im(offset);
2981 if (insn & (1 << 20)) {
b5ff1b31 2982 gen_vfp_ld(s, dp);
b7bcbe95
FB
2983 gen_mov_vreg_F0(dp, rd);
2984 } else {
2985 gen_mov_F0_vreg(dp, rd);
b5ff1b31 2986 gen_vfp_st(s, dp);
b7bcbe95
FB
2987 }
2988 } else {
2989 /* load/store multiple */
2990 if (dp)
2991 n = (insn >> 1) & 0x7f;
2992 else
2993 n = insn & 0xff;
2994
2995 if (insn & (1 << 24)) /* pre-decrement */
2996 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2997
2998 if (dp)
2999 offset = 8;
3000 else
3001 offset = 4;
3002 for (i = 0; i < n; i++) {
18c9b560 3003 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 3004 /* load */
b5ff1b31 3005 gen_vfp_ld(s, dp);
b7bcbe95
FB
3006 gen_mov_vreg_F0(dp, rd + i);
3007 } else {
3008 /* store */
3009 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 3010 gen_vfp_st(s, dp);
b7bcbe95
FB
3011 }
3012 gen_op_addl_T1_im(offset);
3013 }
3014 if (insn & (1 << 21)) {
3015 /* writeback */
3016 if (insn & (1 << 24))
3017 offset = -offset * n;
3018 else if (dp && (insn & 1))
3019 offset = 4;
3020 else
3021 offset = 0;
3022
3023 if (offset != 0)
3024 gen_op_addl_T1_im(offset);
3025 gen_movl_reg_T1(s, rn);
3026 }
3027 }
3028 }
3029 break;
3030 default:
3031 /* Should never happen. */
3032 return 1;
3033 }
3034 return 0;
3035}
3036
6e256c93 3037static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 3038{
6e256c93
FB
3039 TranslationBlock *tb;
3040
3041 tb = s->tb;
3042 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 3043 tcg_gen_goto_tb(n);
6e256c93 3044 gen_op_movl_T0_im(dest);
b26eefb6 3045 gen_set_pc_T0();
57fec1fe 3046 tcg_gen_exit_tb((long)tb + n);
6e256c93
FB
3047 } else {
3048 gen_op_movl_T0_im(dest);
b26eefb6 3049 gen_set_pc_T0();
57fec1fe 3050 tcg_gen_exit_tb(0);
6e256c93 3051 }
c53be334
FB
3052}
3053
8aaca4c0
FB
3054static inline void gen_jmp (DisasContext *s, uint32_t dest)
3055{
3056 if (__builtin_expect(s->singlestep_enabled, 0)) {
3057 /* An indirect jump so that we still trigger the debug exception. */
5899f386 3058 if (s->thumb)
d9ba4830
PB
3059 dest |= 1;
3060 gen_bx_im(s, dest);
8aaca4c0 3061 } else {
6e256c93 3062 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
3063 s->is_jmp = DISAS_TB_JUMP;
3064 }
3065}
3066
d9ba4830 3067static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 3068{
ee097184 3069 if (x)
d9ba4830 3070 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 3071 else
d9ba4830 3072 gen_sxth(t0);
ee097184 3073 if (y)
d9ba4830 3074 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 3075 else
d9ba4830
PB
3076 gen_sxth(t1);
3077 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
3078}
3079
3080/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 3081static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
3082 uint32_t mask;
3083
3084 mask = 0;
3085 if (flags & (1 << 0))
3086 mask |= 0xff;
3087 if (flags & (1 << 1))
3088 mask |= 0xff00;
3089 if (flags & (1 << 2))
3090 mask |= 0xff0000;
3091 if (flags & (1 << 3))
3092 mask |= 0xff000000;
9ee6e8bb 3093
2ae23e75 3094 /* Mask out undefined bits. */
9ee6e8bb
PB
3095 mask &= ~CPSR_RESERVED;
3096 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 3097 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 3098 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 3099 mask &= ~CPSR_IT;
9ee6e8bb 3100 /* Mask out execution state bits. */
2ae23e75 3101 if (!spsr)
e160c51c 3102 mask &= ~CPSR_EXEC;
b5ff1b31
FB
3103 /* Mask out privileged bits. */
3104 if (IS_USER(s))
9ee6e8bb 3105 mask &= CPSR_USER;
b5ff1b31
FB
3106 return mask;
3107}
3108
3109/* Returns nonzero if access to the PSR is not permitted. */
3110static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
3111{
d9ba4830 3112 TCGv tmp;
b5ff1b31
FB
3113 if (spsr) {
3114 /* ??? This is also undefined in system mode. */
3115 if (IS_USER(s))
3116 return 1;
d9ba4830
PB
3117
3118 tmp = load_cpu_field(spsr);
3119 tcg_gen_andi_i32(tmp, tmp, ~mask);
3120 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
3121 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
3122 store_cpu_field(tmp, spsr);
b5ff1b31 3123 } else {
d9ba4830 3124 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
3125 }
3126 gen_lookup_tb(s);
3127 return 0;
3128}
3129
9ee6e8bb 3130/* Generate an old-style exception return. */
b5ff1b31
FB
3131static void gen_exception_return(DisasContext *s)
3132{
d9ba4830 3133 TCGv tmp;
b26eefb6 3134 gen_set_pc_T0();
d9ba4830
PB
3135 tmp = load_cpu_field(spsr);
3136 gen_set_cpsr(tmp, 0xffffffff);
3137 dead_tmp(tmp);
b5ff1b31
FB
3138 s->is_jmp = DISAS_UPDATE;
3139}
3140
b0109805
PB
3141/* Generate a v6 exception return. Marks both values as dead. */
3142static void gen_rfe(DisasContext *s, TCGv pc, TCGv cpsr)
2c0262af 3143{
b0109805
PB
3144 gen_set_cpsr(cpsr, 0xffffffff);
3145 dead_tmp(cpsr);
3146 store_reg(s, 15, pc);
9ee6e8bb
PB
3147 s->is_jmp = DISAS_UPDATE;
3148}
3b46e624 3149
9ee6e8bb
PB
3150static inline void
3151gen_set_condexec (DisasContext *s)
3152{
3153 if (s->condexec_mask) {
8f01245e
PB
3154 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
3155 TCGv tmp = new_tmp();
3156 tcg_gen_movi_i32(tmp, val);
d9ba4830 3157 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
3158 }
3159}
3b46e624 3160
9ee6e8bb
PB
3161static void gen_nop_hint(DisasContext *s, int val)
3162{
3163 switch (val) {
3164 case 3: /* wfi */
3165 gen_op_movl_T0_im((long)s->pc);
b26eefb6 3166 gen_set_pc_T0();
9ee6e8bb
PB
3167 s->is_jmp = DISAS_WFI;
3168 break;
3169 case 2: /* wfe */
3170 case 4: /* sev */
3171 /* TODO: Implement SEV and WFE. May help SMP performance. */
3172 default: /* nop */
3173 break;
3174 }
3175}
99c475ab 3176
9ee6e8bb
PB
3177/* Neon shift by constant. The actual ops are the same as used for variable
3178 shifts. [OP][U][SIZE] */
3179static GenOpFunc *gen_neon_shift_im[8][2][4] = {
3180 { /* 0 */ /* VSHR */
3181 {
3182 gen_op_neon_shl_u8,
3183 gen_op_neon_shl_u16,
3184 gen_op_neon_shl_u32,
3185 gen_op_neon_shl_u64
3186 }, {
3187 gen_op_neon_shl_s8,
3188 gen_op_neon_shl_s16,
3189 gen_op_neon_shl_s32,
3190 gen_op_neon_shl_s64
3191 }
3192 }, { /* 1 */ /* VSRA */
3193 {
3194 gen_op_neon_shl_u8,
3195 gen_op_neon_shl_u16,
3196 gen_op_neon_shl_u32,
3197 gen_op_neon_shl_u64
3198 }, {
3199 gen_op_neon_shl_s8,
3200 gen_op_neon_shl_s16,
3201 gen_op_neon_shl_s32,
3202 gen_op_neon_shl_s64
3203 }
3204 }, { /* 2 */ /* VRSHR */
3205 {
3206 gen_op_neon_rshl_u8,
3207 gen_op_neon_rshl_u16,
3208 gen_op_neon_rshl_u32,
3209 gen_op_neon_rshl_u64
3210 }, {
3211 gen_op_neon_rshl_s8,
3212 gen_op_neon_rshl_s16,
3213 gen_op_neon_rshl_s32,
3214 gen_op_neon_rshl_s64
3215 }
3216 }, { /* 3 */ /* VRSRA */
3217 {
3218 gen_op_neon_rshl_u8,
3219 gen_op_neon_rshl_u16,
3220 gen_op_neon_rshl_u32,
3221 gen_op_neon_rshl_u64
3222 }, {
3223 gen_op_neon_rshl_s8,
3224 gen_op_neon_rshl_s16,
3225 gen_op_neon_rshl_s32,
3226 gen_op_neon_rshl_s64
3227 }
3228 }, { /* 4 */
3229 {
3230 NULL, NULL, NULL, NULL
3231 }, { /* VSRI */
3232 gen_op_neon_shl_u8,
3233 gen_op_neon_shl_u16,
3234 gen_op_neon_shl_u32,
3235 gen_op_neon_shl_u64,
3236 }
3237 }, { /* 5 */
3238 { /* VSHL */
3239 gen_op_neon_shl_u8,
3240 gen_op_neon_shl_u16,
3241 gen_op_neon_shl_u32,
3242 gen_op_neon_shl_u64,
3243 }, { /* VSLI */
3244 gen_op_neon_shl_u8,
3245 gen_op_neon_shl_u16,
3246 gen_op_neon_shl_u32,
3247 gen_op_neon_shl_u64,
3248 }
3249 }, { /* 6 */ /* VQSHL */
3250 {
3251 gen_op_neon_qshl_u8,
3252 gen_op_neon_qshl_u16,
3253 gen_op_neon_qshl_u32,
3254 gen_op_neon_qshl_u64
3255 }, {
3256 gen_op_neon_qshl_s8,
3257 gen_op_neon_qshl_s16,
3258 gen_op_neon_qshl_s32,
3259 gen_op_neon_qshl_s64
3260 }
3261 }, { /* 7 */ /* VQSHLU */
3262 {
3263 gen_op_neon_qshl_u8,
3264 gen_op_neon_qshl_u16,
3265 gen_op_neon_qshl_u32,
3266 gen_op_neon_qshl_u64
3267 }, {
3268 gen_op_neon_qshl_u8,
3269 gen_op_neon_qshl_u16,
3270 gen_op_neon_qshl_u32,
3271 gen_op_neon_qshl_u64
3272 }
99c475ab 3273 }
9ee6e8bb
PB
3274};
3275
3276/* [R][U][size - 1] */
3277static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
3278 {
3279 {
3280 gen_op_neon_shl_u16,
3281 gen_op_neon_shl_u32,
3282 gen_op_neon_shl_u64
3283 }, {
3284 gen_op_neon_shl_s16,
3285 gen_op_neon_shl_s32,
3286 gen_op_neon_shl_s64
3287 }
3288 }, {
3289 {
3290 gen_op_neon_rshl_u16,
3291 gen_op_neon_rshl_u32,
3292 gen_op_neon_rshl_u64
3293 }, {
3294 gen_op_neon_rshl_s16,
3295 gen_op_neon_rshl_s32,
3296 gen_op_neon_rshl_s64
3297 }
2c0262af 3298 }
9ee6e8bb 3299};
99c475ab 3300
9ee6e8bb
PB
3301static inline void
3302gen_op_neon_narrow_u32 ()
3303{
3304 /* No-op. */
3305}
3306
3307static GenOpFunc *gen_neon_narrow[3] = {
3308 gen_op_neon_narrow_u8,
3309 gen_op_neon_narrow_u16,
3310 gen_op_neon_narrow_u32
3311};
3312
3313static GenOpFunc *gen_neon_narrow_satu[3] = {
3314 gen_op_neon_narrow_sat_u8,
3315 gen_op_neon_narrow_sat_u16,
3316 gen_op_neon_narrow_sat_u32
3317};
3318
3319static GenOpFunc *gen_neon_narrow_sats[3] = {
3320 gen_op_neon_narrow_sat_s8,
3321 gen_op_neon_narrow_sat_s16,
3322 gen_op_neon_narrow_sat_s32
3323};
3324
3325static inline int gen_neon_add(int size)
3326{
3327 switch (size) {
3328 case 0: gen_op_neon_add_u8(); break;
3329 case 1: gen_op_neon_add_u16(); break;
3330 case 2: gen_op_addl_T0_T1(); break;
3331 default: return 1;
3332 }
3333 return 0;
3334}
3335
3336/* 32-bit pairwise ops end up the same as the elementsise versions. */
3337#define gen_op_neon_pmax_s32 gen_op_neon_max_s32
3338#define gen_op_neon_pmax_u32 gen_op_neon_max_u32
3339#define gen_op_neon_pmin_s32 gen_op_neon_min_s32
3340#define gen_op_neon_pmin_u32 gen_op_neon_min_u32
3341
3342#define GEN_NEON_INTEGER_OP(name) do { \
3343 switch ((size << 1) | u) { \
3344 case 0: gen_op_neon_##name##_s8(); break; \
3345 case 1: gen_op_neon_##name##_u8(); break; \
3346 case 2: gen_op_neon_##name##_s16(); break; \
3347 case 3: gen_op_neon_##name##_u16(); break; \
3348 case 4: gen_op_neon_##name##_s32(); break; \
3349 case 5: gen_op_neon_##name##_u32(); break; \
3350 default: return 1; \
3351 }} while (0)
3352
3353static inline void
3354gen_neon_movl_scratch_T0(int scratch)
3355{
3356 uint32_t offset;
3357
3358 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3359 gen_op_neon_setreg_T0(offset);
3360}
3361
3362static inline void
3363gen_neon_movl_scratch_T1(int scratch)
3364{
3365 uint32_t offset;
3366
3367 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3368 gen_op_neon_setreg_T1(offset);
3369}
3370
3371static inline void
3372gen_neon_movl_T0_scratch(int scratch)
3373{
3374 uint32_t offset;
3375
3376 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3377 gen_op_neon_getreg_T0(offset);
3378}
3379
3380static inline void
3381gen_neon_movl_T1_scratch(int scratch)
3382{
3383 uint32_t offset;
3384
3385 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3386 gen_op_neon_getreg_T1(offset);
3387}
3388
3389static inline void gen_op_neon_widen_u32(void)
3390{
3391 gen_op_movl_T1_im(0);
3392}
3393
3394static inline void gen_neon_get_scalar(int size, int reg)
3395{
3396 if (size == 1) {
3397 NEON_GET_REG(T0, reg >> 1, reg & 1);
3398 } else {
3399 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3400 if (reg & 1)
3401 gen_op_neon_dup_low16();
3402 else
3403 gen_op_neon_dup_high16();
3404 }
3405}
3406
3407static void gen_neon_unzip(int reg, int q, int tmp, int size)
3408{
3409 int n;
3410
3411 for (n = 0; n < q + 1; n += 2) {
3412 NEON_GET_REG(T0, reg, n);
3413 NEON_GET_REG(T0, reg, n + n);
3414 switch (size) {
3415 case 0: gen_op_neon_unzip_u8(); break;
3416 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
3417 case 2: /* no-op */; break;
3418 default: abort();
3419 }
3420 gen_neon_movl_scratch_T0(tmp + n);
3421 gen_neon_movl_scratch_T1(tmp + n + 1);
3422 }
3423}
3424
3425static struct {
3426 int nregs;
3427 int interleave;
3428 int spacing;
3429} neon_ls_element_type[11] = {
3430 {4, 4, 1},
3431 {4, 4, 2},
3432 {4, 1, 1},
3433 {4, 2, 1},
3434 {3, 3, 1},
3435 {3, 3, 2},
3436 {3, 1, 1},
3437 {1, 1, 1},
3438 {2, 2, 1},
3439 {2, 2, 2},
3440 {2, 1, 1}
3441};
3442
3443/* Translate a NEON load/store element instruction. Return nonzero if the
3444 instruction is invalid. */
3445static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3446{
3447 int rd, rn, rm;
3448 int op;
3449 int nregs;
3450 int interleave;
3451 int stride;
3452 int size;
3453 int reg;
3454 int pass;
3455 int load;
3456 int shift;
3457 uint32_t mask;
3458 int n;
b0109805 3459 TCGv tmp;
9ee6e8bb
PB
3460
3461 if (!vfp_enabled(env))
3462 return 1;
3463 VFP_DREG_D(rd, insn);
3464 rn = (insn >> 16) & 0xf;
3465 rm = insn & 0xf;
3466 load = (insn & (1 << 21)) != 0;
3467 if ((insn & (1 << 23)) == 0) {
3468 /* Load store all elements. */
3469 op = (insn >> 8) & 0xf;
3470 size = (insn >> 6) & 3;
3471 if (op > 10 || size == 3)
3472 return 1;
3473 nregs = neon_ls_element_type[op].nregs;
3474 interleave = neon_ls_element_type[op].interleave;
3475 gen_movl_T1_reg(s, rn);
3476 stride = (1 << size) * interleave;
3477 for (reg = 0; reg < nregs; reg++) {
3478 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3479 gen_movl_T1_reg(s, rn);
3480 gen_op_addl_T1_im((1 << size) * reg);
3481 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3482 gen_movl_T1_reg(s, rn);
3483 gen_op_addl_T1_im(1 << size);
3484 }
3485 for (pass = 0; pass < 2; pass++) {
3486 if (size == 2) {
3487 if (load) {
b0109805
PB
3488 tmp = gen_ld32(cpu_T[1], IS_USER(s));
3489 tcg_gen_mov_i32(cpu_T[0], tmp);
3490 dead_tmp(tmp);
9ee6e8bb
PB
3491 NEON_SET_REG(T0, rd, pass);
3492 } else {
3493 NEON_GET_REG(T0, rd, pass);
b0109805
PB
3494 tmp = new_tmp();
3495 tcg_gen_mov_i32(tmp, cpu_T[0]);
3496 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3497 }
3498 gen_op_addl_T1_im(stride);
3499 } else if (size == 1) {
3500 if (load) {
b0109805
PB
3501 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3502 tcg_gen_mov_i32(cpu_T[0], tmp);
3503 dead_tmp(tmp);
9ee6e8bb
PB
3504 gen_op_addl_T1_im(stride);
3505 gen_op_movl_T2_T0();
b0109805
PB
3506 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3507 tcg_gen_mov_i32(cpu_T[0], tmp);
3508 dead_tmp(tmp);
9ee6e8bb
PB
3509 gen_op_addl_T1_im(stride);
3510 gen_op_neon_insert_elt(16, 0xffff);
3511 NEON_SET_REG(T2, rd, pass);
3512 } else {
3513 NEON_GET_REG(T2, rd, pass);
3514 gen_op_movl_T0_T2();
b0109805
PB
3515 tmp = new_tmp();
3516 tcg_gen_mov_i32(tmp, cpu_T[0]);
3517 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3518 gen_op_addl_T1_im(stride);
3519 gen_op_neon_extract_elt(16, 0xffff0000);
b0109805
PB
3520 tmp = new_tmp();
3521 tcg_gen_mov_i32(tmp, cpu_T[0]);
3522 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3523 gen_op_addl_T1_im(stride);
3524 }
3525 } else /* size == 0 */ {
3526 if (load) {
3527 mask = 0xff;
3528 for (n = 0; n < 4; n++) {
b0109805
PB
3529 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3530 tcg_gen_mov_i32(cpu_T[0], tmp);
3531 dead_tmp(tmp);
9ee6e8bb
PB
3532 gen_op_addl_T1_im(stride);
3533 if (n == 0) {
3534 gen_op_movl_T2_T0();
3535 } else {
3536 gen_op_neon_insert_elt(n * 8, ~mask);
3537 }
3538 mask <<= 8;
3539 }
3540 NEON_SET_REG(T2, rd, pass);
3541 } else {
3542 NEON_GET_REG(T2, rd, pass);
3543 mask = 0xff;
3544 for (n = 0; n < 4; n++) {
3545 if (n == 0) {
3546 gen_op_movl_T0_T2();
3547 } else {
3548 gen_op_neon_extract_elt(n * 8, mask);
3549 }
b0109805
PB
3550 tmp = new_tmp();
3551 tcg_gen_mov_i32(tmp, cpu_T[0]);
3552 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3553 gen_op_addl_T1_im(stride);
3554 mask <<= 8;
3555 }
3556 }
3557 }
3558 }
3559 rd += neon_ls_element_type[op].spacing;
3560 }
3561 stride = nregs * 8;
3562 } else {
3563 size = (insn >> 10) & 3;
3564 if (size == 3) {
3565 /* Load single element to all lanes. */
3566 if (!load)
3567 return 1;
3568 size = (insn >> 6) & 3;
3569 nregs = ((insn >> 8) & 3) + 1;
3570 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3571 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3572 for (reg = 0; reg < nregs; reg++) {
3573 switch (size) {
3574 case 0:
b0109805
PB
3575 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
3576 tcg_gen_mov_i32(cpu_T[0], tmp);
3577 dead_tmp(tmp);
9ee6e8bb
PB
3578 gen_op_neon_dup_u8(0);
3579 break;
3580 case 1:
b0109805
PB
3581 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
3582 tcg_gen_mov_i32(cpu_T[0], tmp);
3583 dead_tmp(tmp);
9ee6e8bb
PB
3584 gen_op_neon_dup_low16();
3585 break;
3586 case 2:
b0109805
PB
3587 tmp = gen_ld32(cpu_T[0], IS_USER(s));
3588 tcg_gen_mov_i32(cpu_T[0], tmp);
3589 dead_tmp(tmp);
9ee6e8bb
PB
3590 break;
3591 case 3:
3592 return 1;
99c475ab 3593 }
9ee6e8bb
PB
3594 gen_op_addl_T1_im(1 << size);
3595 NEON_SET_REG(T0, rd, 0);
3596 NEON_SET_REG(T0, rd, 1);
3597 rd += stride;
3598 }
3599 stride = (1 << size) * nregs;
3600 } else {
3601 /* Single element. */
3602 pass = (insn >> 7) & 1;
3603 switch (size) {
3604 case 0:
3605 shift = ((insn >> 5) & 3) * 8;
3606 mask = 0xff << shift;
3607 stride = 1;
3608 break;
3609 case 1:
3610 shift = ((insn >> 6) & 1) * 16;
3611 mask = shift ? 0xffff0000 : 0xffff;
3612 stride = (insn & (1 << 5)) ? 2 : 1;
3613 break;
3614 case 2:
3615 shift = 0;
3616 mask = 0xffffffff;
3617 stride = (insn & (1 << 6)) ? 2 : 1;
3618 break;
3619 default:
3620 abort();
3621 }
3622 nregs = ((insn >> 8) & 3) + 1;
3623 gen_movl_T1_reg(s, rn);
3624 for (reg = 0; reg < nregs; reg++) {
3625 if (load) {
3626 if (size != 2) {
3627 NEON_GET_REG(T2, rd, pass);
3628 }
3629 switch (size) {
3630 case 0:
b0109805 3631 tmp = gen_ld8u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3632 break;
3633 case 1:
b0109805 3634 tmp = gen_ld16u(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3635 break;
3636 case 2:
b0109805 3637 tmp = gen_ld32(cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3638 break;
3639 }
b0109805
PB
3640 tcg_gen_mov_i32(cpu_T[0], tmp);
3641 dead_tmp(tmp);
9ee6e8bb
PB
3642 if (size != 2) {
3643 gen_op_neon_insert_elt(shift, ~mask);
3644 NEON_SET_REG(T0, rd, pass);
b0109805
PB
3645 } else {
3646 NEON_SET_REG(T0, rd, pass);
9ee6e8bb
PB
3647 }
3648 } else { /* Store */
3649 if (size == 2) {
3650 NEON_GET_REG(T0, rd, pass);
3651 } else {
3652 NEON_GET_REG(T2, rd, pass);
3653 gen_op_neon_extract_elt(shift, mask);
3654 }
b0109805
PB
3655 tmp = new_tmp();
3656 tcg_gen_mov_i32(tmp, cpu_T[0]);
9ee6e8bb
PB
3657 switch (size) {
3658 case 0:
b0109805 3659 gen_st8(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3660 break;
3661 case 1:
b0109805 3662 gen_st16(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb
PB
3663 break;
3664 case 2:
b0109805 3665 gen_st32(tmp, cpu_T[1], IS_USER(s));
9ee6e8bb 3666 break;
99c475ab 3667 }
99c475ab 3668 }
9ee6e8bb
PB
3669 rd += stride;
3670 gen_op_addl_T1_im(1 << size);
99c475ab 3671 }
9ee6e8bb 3672 stride = nregs * (1 << size);
99c475ab 3673 }
9ee6e8bb
PB
3674 }
3675 if (rm != 15) {
b26eefb6
PB
3676 TCGv base;
3677
3678 base = load_reg(s, rn);
9ee6e8bb 3679 if (rm == 13) {
b26eefb6 3680 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3681 } else {
b26eefb6
PB
3682 TCGv index;
3683 index = load_reg(s, rm);
3684 tcg_gen_add_i32(base, base, index);
3685 dead_tmp(index);
9ee6e8bb 3686 }
b26eefb6 3687 store_reg(s, rn, base);
9ee6e8bb
PB
3688 }
3689 return 0;
3690}
3b46e624 3691
9ee6e8bb
PB
3692/* Translate a NEON data processing instruction. Return nonzero if the
3693 instruction is invalid.
3694 In general we process vectors in 32-bit chunks. This means we can reuse
3695 some of the scalar ops, and hopefully the code generated for 32-bit
3696 hosts won't be too awful. The downside is that the few 64-bit operations
3697 (mainly shifts) get complicated. */
2c0262af 3698
9ee6e8bb
PB
3699static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3700{
3701 int op;
3702 int q;
3703 int rd, rn, rm;
3704 int size;
3705 int shift;
3706 int pass;
3707 int count;
3708 int pairwise;
3709 int u;
3710 int n;
3711 uint32_t imm;
3712
3713 if (!vfp_enabled(env))
3714 return 1;
3715 q = (insn & (1 << 6)) != 0;
3716 u = (insn >> 24) & 1;
3717 VFP_DREG_D(rd, insn);
3718 VFP_DREG_N(rn, insn);
3719 VFP_DREG_M(rm, insn);
3720 size = (insn >> 20) & 3;
3721 if ((insn & (1 << 23)) == 0) {
3722 /* Three register same length. */
3723 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3724 if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3725 for (pass = 0; pass < (q ? 2 : 1); pass++) {
3726 NEON_GET_REG(T0, rm, pass * 2);
3727 NEON_GET_REG(T1, rm, pass * 2 + 1);
3728 gen_neon_movl_scratch_T0(0);
3729 gen_neon_movl_scratch_T1(1);
3730 NEON_GET_REG(T0, rn, pass * 2);
3731 NEON_GET_REG(T1, rn, pass * 2 + 1);
3732 switch (op) {
3733 case 1: /* VQADD */
3734 if (u) {
3735 gen_op_neon_addl_saturate_u64();
2c0262af 3736 } else {
9ee6e8bb 3737 gen_op_neon_addl_saturate_s64();
2c0262af 3738 }
9ee6e8bb
PB
3739 break;
3740 case 5: /* VQSUB */
3741 if (u) {
3742 gen_op_neon_subl_saturate_u64();
1e8d4eec 3743 } else {
9ee6e8bb 3744 gen_op_neon_subl_saturate_s64();
1e8d4eec 3745 }
9ee6e8bb
PB
3746 break;
3747 case 16:
3748 if (u) {
3749 gen_op_neon_subl_u64();
3750 } else {
3751 gen_op_neon_addl_u64();
3752 }
3753 break;
3754 default:
3755 abort();
2c0262af 3756 }
9ee6e8bb
PB
3757 NEON_SET_REG(T0, rd, pass * 2);
3758 NEON_SET_REG(T1, rd, pass * 2 + 1);
2c0262af 3759 }
9ee6e8bb 3760 return 0;
2c0262af 3761 }
9ee6e8bb
PB
3762 switch (op) {
3763 case 8: /* VSHL */
3764 case 9: /* VQSHL */
3765 case 10: /* VRSHL */
3766 case 11: /* VQSHL */
3767 /* Shift operations have Rn and Rm reversed. */
3768 {
3769 int tmp;
3770 tmp = rn;
3771 rn = rm;
3772 rm = tmp;
3773 pairwise = 0;
3774 }
2c0262af 3775 break;
9ee6e8bb
PB
3776 case 20: /* VPMAX */
3777 case 21: /* VPMIN */
3778 case 23: /* VPADD */
3779 pairwise = 1;
2c0262af 3780 break;
9ee6e8bb
PB
3781 case 26: /* VPADD (float) */
3782 pairwise = (u && size < 2);
2c0262af 3783 break;
9ee6e8bb
PB
3784 case 30: /* VPMIN/VPMAX (float) */
3785 pairwise = u;
2c0262af 3786 break;
9ee6e8bb
PB
3787 default:
3788 pairwise = 0;
2c0262af 3789 break;
9ee6e8bb
PB
3790 }
3791 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3792
3793 if (pairwise) {
3794 /* Pairwise. */
3795 if (q)
3796 n = (pass & 1) * 2;
2c0262af 3797 else
9ee6e8bb
PB
3798 n = 0;
3799 if (pass < q + 1) {
3800 NEON_GET_REG(T0, rn, n);
3801 NEON_GET_REG(T1, rn, n + 1);
3802 } else {
3803 NEON_GET_REG(T0, rm, n);
3804 NEON_GET_REG(T1, rm, n + 1);
3805 }
3806 } else {
3807 /* Elementwise. */
3808 NEON_GET_REG(T0, rn, pass);
3809 NEON_GET_REG(T1, rm, pass);
3810 }
3811 switch (op) {
3812 case 0: /* VHADD */
3813 GEN_NEON_INTEGER_OP(hadd);
3814 break;
3815 case 1: /* VQADD */
3816 switch (size << 1| u) {
3817 case 0: gen_op_neon_qadd_s8(); break;
3818 case 1: gen_op_neon_qadd_u8(); break;
3819 case 2: gen_op_neon_qadd_s16(); break;
3820 case 3: gen_op_neon_qadd_u16(); break;
3821 case 4: gen_op_addl_T0_T1_saturate(); break;
3822 case 5: gen_op_addl_T0_T1_usaturate(); break;
3823 default: abort();
3824 }
2c0262af 3825 break;
9ee6e8bb
PB
3826 case 2: /* VRHADD */
3827 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 3828 break;
9ee6e8bb
PB
3829 case 3: /* Logic ops. */
3830 switch ((u << 2) | size) {
3831 case 0: /* VAND */
2c0262af 3832 gen_op_andl_T0_T1();
9ee6e8bb
PB
3833 break;
3834 case 1: /* BIC */
3835 gen_op_bicl_T0_T1();
3836 break;
3837 case 2: /* VORR */
3838 gen_op_orl_T0_T1();
3839 break;
3840 case 3: /* VORN */
3841 gen_op_notl_T1();
3842 gen_op_orl_T0_T1();
3843 break;
3844 case 4: /* VEOR */
3845 gen_op_xorl_T0_T1();
3846 break;
3847 case 5: /* VBSL */
3848 NEON_GET_REG(T2, rd, pass);
3849 gen_op_neon_bsl();
3850 break;
3851 case 6: /* VBIT */
3852 NEON_GET_REG(T2, rd, pass);
3853 gen_op_neon_bit();
3854 break;
3855 case 7: /* VBIF */
3856 NEON_GET_REG(T2, rd, pass);
3857 gen_op_neon_bif();
3858 break;
2c0262af
FB
3859 }
3860 break;
9ee6e8bb
PB
3861 case 4: /* VHSUB */
3862 GEN_NEON_INTEGER_OP(hsub);
3863 break;
3864 case 5: /* VQSUB */
3865 switch ((size << 1) | u) {
3866 case 0: gen_op_neon_qsub_s8(); break;
3867 case 1: gen_op_neon_qsub_u8(); break;
3868 case 2: gen_op_neon_qsub_s16(); break;
3869 case 3: gen_op_neon_qsub_u16(); break;
3870 case 4: gen_op_subl_T0_T1_saturate(); break;
3871 case 5: gen_op_subl_T0_T1_usaturate(); break;
3872 default: abort();
2c0262af
FB
3873 }
3874 break;
9ee6e8bb
PB
3875 case 6: /* VCGT */
3876 GEN_NEON_INTEGER_OP(cgt);
3877 break;
3878 case 7: /* VCGE */
3879 GEN_NEON_INTEGER_OP(cge);
3880 break;
3881 case 8: /* VSHL */
3882 switch ((size << 1) | u) {
3883 case 0: gen_op_neon_shl_s8(); break;
3884 case 1: gen_op_neon_shl_u8(); break;
3885 case 2: gen_op_neon_shl_s16(); break;
3886 case 3: gen_op_neon_shl_u16(); break;
3887 case 4: gen_op_neon_shl_s32(); break;
3888 case 5: gen_op_neon_shl_u32(); break;
3889#if 0
3890 /* ??? Implementing these is tricky because the vector ops work
3891 on 32-bit pieces. */
3892 case 6: gen_op_neon_shl_s64(); break;
3893 case 7: gen_op_neon_shl_u64(); break;
3894#else
3895 case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3896#endif
2c0262af
FB
3897 }
3898 break;
9ee6e8bb
PB
3899 case 9: /* VQSHL */
3900 switch ((size << 1) | u) {
3901 case 0: gen_op_neon_qshl_s8(); break;
3902 case 1: gen_op_neon_qshl_u8(); break;
3903 case 2: gen_op_neon_qshl_s16(); break;
3904 case 3: gen_op_neon_qshl_u16(); break;
3905 case 4: gen_op_neon_qshl_s32(); break;
3906 case 5: gen_op_neon_qshl_u32(); break;
3907#if 0
3908 /* ??? Implementing these is tricky because the vector ops work
3909 on 32-bit pieces. */
3910 case 6: gen_op_neon_qshl_s64(); break;
3911 case 7: gen_op_neon_qshl_u64(); break;
3912#else
3913 case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3914#endif
2c0262af
FB
3915 }
3916 break;
9ee6e8bb
PB
3917 case 10: /* VRSHL */
3918 switch ((size << 1) | u) {
3919 case 0: gen_op_neon_rshl_s8(); break;
3920 case 1: gen_op_neon_rshl_u8(); break;
3921 case 2: gen_op_neon_rshl_s16(); break;
3922 case 3: gen_op_neon_rshl_u16(); break;
3923 case 4: gen_op_neon_rshl_s32(); break;
3924 case 5: gen_op_neon_rshl_u32(); break;
3925#if 0
3926 /* ??? Implementing these is tricky because the vector ops work
3927 on 32-bit pieces. */
3928 case 6: gen_op_neon_rshl_s64(); break;
3929 case 7: gen_op_neon_rshl_u64(); break;
3930#else
3931 case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3932#endif
3933 }
2c0262af 3934 break;
9ee6e8bb
PB
3935 case 11: /* VQRSHL */
3936 switch ((size << 1) | u) {
3937 case 0: gen_op_neon_qrshl_s8(); break;
3938 case 1: gen_op_neon_qrshl_u8(); break;
3939 case 2: gen_op_neon_qrshl_s16(); break;
3940 case 3: gen_op_neon_qrshl_u16(); break;
3941 case 4: gen_op_neon_qrshl_s32(); break;
3942 case 5: gen_op_neon_qrshl_u32(); break;
3943#if 0
3944 /* ??? Implementing these is tricky because the vector ops work
3945 on 32-bit pieces. */
3946 case 6: gen_op_neon_qrshl_s64(); break;
3947 case 7: gen_op_neon_qrshl_u64(); break;
3948#else
3949 case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3950#endif
3951 }
3952 break;
3953 case 12: /* VMAX */
3954 GEN_NEON_INTEGER_OP(max);
3955 break;
3956 case 13: /* VMIN */
3957 GEN_NEON_INTEGER_OP(min);
3958 break;
3959 case 14: /* VABD */
3960 GEN_NEON_INTEGER_OP(abd);
3961 break;
3962 case 15: /* VABA */
3963 GEN_NEON_INTEGER_OP(abd);
3964 NEON_GET_REG(T1, rd, pass);
3965 gen_neon_add(size);
3966 break;
3967 case 16:
3968 if (!u) { /* VADD */
3969 if (gen_neon_add(size))
3970 return 1;
3971 } else { /* VSUB */
3972 switch (size) {
3973 case 0: gen_op_neon_sub_u8(); break;
3974 case 1: gen_op_neon_sub_u16(); break;
3975 case 2: gen_op_subl_T0_T1(); break;
3976 default: return 1;
3977 }
3978 }
3979 break;
3980 case 17:
3981 if (!u) { /* VTST */
3982 switch (size) {
3983 case 0: gen_op_neon_tst_u8(); break;
3984 case 1: gen_op_neon_tst_u16(); break;
3985 case 2: gen_op_neon_tst_u32(); break;
3986 default: return 1;
3987 }
3988 } else { /* VCEQ */
3989 switch (size) {
3990 case 0: gen_op_neon_ceq_u8(); break;
3991 case 1: gen_op_neon_ceq_u16(); break;
3992 case 2: gen_op_neon_ceq_u32(); break;
3993 default: return 1;
3994 }
3995 }
3996 break;
3997 case 18: /* Multiply. */
3998 switch (size) {
3999 case 0: gen_op_neon_mul_u8(); break;
4000 case 1: gen_op_neon_mul_u16(); break;
4001 case 2: gen_op_mul_T0_T1(); break;
4002 default: return 1;
4003 }
4004 NEON_GET_REG(T1, rd, pass);
4005 if (u) { /* VMLS */
4006 switch (size) {
4007 case 0: gen_op_neon_rsb_u8(); break;
4008 case 1: gen_op_neon_rsb_u16(); break;
4009 case 2: gen_op_rsbl_T0_T1(); break;
4010 default: return 1;
4011 }
4012 } else { /* VMLA */
4013 gen_neon_add(size);
4014 }
4015 break;
4016 case 19: /* VMUL */
4017 if (u) { /* polynomial */
4018 gen_op_neon_mul_p8();
4019 } else { /* Integer */
4020 switch (size) {
4021 case 0: gen_op_neon_mul_u8(); break;
4022 case 1: gen_op_neon_mul_u16(); break;
4023 case 2: gen_op_mul_T0_T1(); break;
4024 default: return 1;
4025 }
4026 }
4027 break;
4028 case 20: /* VPMAX */
4029 GEN_NEON_INTEGER_OP(pmax);
4030 break;
4031 case 21: /* VPMIN */
4032 GEN_NEON_INTEGER_OP(pmin);
4033 break;
4034 case 22: /* Hultiply high. */
4035 if (!u) { /* VQDMULH */
4036 switch (size) {
4037 case 1: gen_op_neon_qdmulh_s16(); break;
4038 case 2: gen_op_neon_qdmulh_s32(); break;
4039 default: return 1;
4040 }
4041 } else { /* VQRDHMUL */
4042 switch (size) {
4043 case 1: gen_op_neon_qrdmulh_s16(); break;
4044 case 2: gen_op_neon_qrdmulh_s32(); break;
4045 default: return 1;
4046 }
4047 }
4048 break;
4049 case 23: /* VPADD */
4050 if (u)
4051 return 1;
4052 switch (size) {
4053 case 0: gen_op_neon_padd_u8(); break;
4054 case 1: gen_op_neon_padd_u16(); break;
4055 case 2: gen_op_addl_T0_T1(); break;
4056 default: return 1;
4057 }
4058 break;
4059 case 26: /* Floating point arithnetic. */
4060 switch ((u << 2) | size) {
4061 case 0: /* VADD */
4062 gen_op_neon_add_f32();
4063 break;
4064 case 2: /* VSUB */
4065 gen_op_neon_sub_f32();
4066 break;
4067 case 4: /* VPADD */
4068 gen_op_neon_add_f32();
4069 break;
4070 case 6: /* VABD */
4071 gen_op_neon_abd_f32();
4072 break;
4073 default:
4074 return 1;
4075 }
4076 break;
4077 case 27: /* Float multiply. */
4078 gen_op_neon_mul_f32();
4079 if (!u) {
4080 NEON_GET_REG(T1, rd, pass);
4081 if (size == 0) {
4082 gen_op_neon_add_f32();
4083 } else {
4084 gen_op_neon_rsb_f32();
4085 }
4086 }
4087 break;
4088 case 28: /* Float compare. */
4089 if (!u) {
4090 gen_op_neon_ceq_f32();
b5ff1b31 4091 } else {
9ee6e8bb
PB
4092 if (size == 0)
4093 gen_op_neon_cge_f32();
4094 else
4095 gen_op_neon_cgt_f32();
b5ff1b31 4096 }
2c0262af 4097 break;
9ee6e8bb
PB
4098 case 29: /* Float compare absolute. */
4099 if (!u)
4100 return 1;
4101 if (size == 0)
4102 gen_op_neon_acge_f32();
4103 else
4104 gen_op_neon_acgt_f32();
2c0262af 4105 break;
9ee6e8bb
PB
4106 case 30: /* Float min/max. */
4107 if (size == 0)
4108 gen_op_neon_max_f32();
4109 else
4110 gen_op_neon_min_f32();
4111 break;
4112 case 31:
4113 if (size == 0)
4373f3ce 4114 gen_helper_recps_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
9ee6e8bb 4115 else
4373f3ce 4116 gen_helper_rsqrts_f32(cpu_T[0], cpu_T[0], cpu_T[1], cpu_env);
2c0262af 4117 break;
9ee6e8bb
PB
4118 default:
4119 abort();
2c0262af 4120 }
9ee6e8bb
PB
4121 /* Save the result. For elementwise operations we can put it
4122 straight into the destination register. For pairwise operations
4123 we have to be careful to avoid clobbering the source operands. */
4124 if (pairwise && rd == rm) {
4125 gen_neon_movl_scratch_T0(pass);
4126 } else {
4127 NEON_SET_REG(T0, rd, pass);
4128 }
4129
4130 } /* for pass */
4131 if (pairwise && rd == rm) {
4132 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4133 gen_neon_movl_T0_scratch(pass);
4134 NEON_SET_REG(T0, rd, pass);
4135 }
4136 }
4137 } else if (insn & (1 << 4)) {
4138 if ((insn & 0x00380080) != 0) {
4139 /* Two registers and shift. */
4140 op = (insn >> 8) & 0xf;
4141 if (insn & (1 << 7)) {
4142 /* 64-bit shift. */
4143 size = 3;
4144 } else {
4145 size = 2;
4146 while ((insn & (1 << (size + 19))) == 0)
4147 size--;
4148 }
4149 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
4150 /* To avoid excessive dumplication of ops we implement shift
4151 by immediate using the variable shift operations. */
4152 if (op < 8) {
4153 /* Shift by immediate:
4154 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
4155 /* Right shifts are encoded as N - shift, where N is the
4156 element size in bits. */
4157 if (op <= 4)
4158 shift = shift - (1 << (size + 3));
4159 else
4160 shift++;
4161 if (size == 3) {
4162 count = q + 1;
4163 } else {
4164 count = q ? 4: 2;
4165 }
4166 switch (size) {
4167 case 0:
4168 imm = (uint8_t) shift;
4169 imm |= imm << 8;
4170 imm |= imm << 16;
4171 break;
4172 case 1:
4173 imm = (uint16_t) shift;
4174 imm |= imm << 16;
4175 break;
4176 case 2:
4177 case 3:
4178 imm = shift;
4179 break;
4180 default:
4181 abort();
4182 }
4183
4184 for (pass = 0; pass < count; pass++) {
4185 if (size < 3) {
4186 /* Operands in T0 and T1. */
4187 gen_op_movl_T1_im(imm);
4188 NEON_GET_REG(T0, rm, pass);
2c0262af 4189 } else {
9ee6e8bb
PB
4190 /* Operands in {T0, T1} and env->vfp.scratch. */
4191 gen_op_movl_T0_im(imm);
4192 gen_neon_movl_scratch_T0(0);
4193 gen_op_movl_T0_im((int32_t)imm >> 31);
4194 gen_neon_movl_scratch_T0(1);
4195 NEON_GET_REG(T0, rm, pass * 2);
4196 NEON_GET_REG(T1, rm, pass * 2 + 1);
4197 }
4198
4199 if (gen_neon_shift_im[op][u][size] == NULL)
4200 return 1;
4201 gen_neon_shift_im[op][u][size]();
4202
4203 if (op == 1 || op == 3) {
4204 /* Accumulate. */
4205 if (size == 3) {
4206 gen_neon_movl_scratch_T0(0);
4207 gen_neon_movl_scratch_T1(1);
4208 NEON_GET_REG(T0, rd, pass * 2);
4209 NEON_GET_REG(T1, rd, pass * 2 + 1);
4210 gen_op_neon_addl_u64();
4211 } else {
4212 NEON_GET_REG(T1, rd, pass);
4213 gen_neon_add(size);
99c475ab 4214 }
9ee6e8bb
PB
4215 } else if (op == 4 || (op == 5 && u)) {
4216 /* Insert */
4217 if (size == 3) {
4218 cpu_abort(env, "VS[LR]I.64 not implemented");
4219 }
4220 switch (size) {
4221 case 0:
4222 if (op == 4)
4223 imm = 0xff >> -shift;
4224 else
4225 imm = (uint8_t)(0xff << shift);
4226 imm |= imm << 8;
4227 imm |= imm << 16;
4228 break;
4229 case 1:
4230 if (op == 4)
4231 imm = 0xffff >> -shift;
4232 else
4233 imm = (uint16_t)(0xffff << shift);
4234 imm |= imm << 16;
4235 break;
4236 case 2:
4237 if (op == 4)
4238 imm = 0xffffffffu >> -shift;
4239 else
4240 imm = 0xffffffffu << shift;
4241 break;
4242 default:
4243 abort();
4244 }
4245 NEON_GET_REG(T1, rd, pass);
4246 gen_op_movl_T2_im(imm);
4247 gen_op_neon_bsl();
2c0262af 4248 }
9ee6e8bb
PB
4249 if (size == 3) {
4250 NEON_SET_REG(T0, rd, pass * 2);
4251 NEON_SET_REG(T1, rd, pass * 2 + 1);
4252 } else {
4253 NEON_SET_REG(T0, rd, pass);
4254 }
4255 } /* for pass */
4256 } else if (op < 10) {
4257 /* Shift by immedaiate and narrow:
4258 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4259 shift = shift - (1 << (size + 3));
4260 size++;
4261 if (size == 3) {
4262 count = q + 1;
2c0262af 4263 } else {
9ee6e8bb
PB
4264 count = q ? 4: 2;
4265 }
4266 switch (size) {
4267 case 1:
4268 imm = (uint16_t) shift;
4269 imm |= imm << 16;
4270 break;
4271 case 2:
4272 case 3:
4273 imm = shift;
4274 break;
4275 default:
4276 abort();
4277 }
4278
4279 /* Processing MSB first means we need to do less shuffling at
4280 the end. */
4281 for (pass = count - 1; pass >= 0; pass--) {
4282 /* Avoid clobbering the second operand before it has been
4283 written. */
4284 n = pass;
4285 if (rd == rm)
4286 n ^= (count - 1);
4287 else
4288 n = pass;
4289
4290 if (size < 3) {
4291 /* Operands in T0 and T1. */
4292 gen_op_movl_T1_im(imm);
4293 NEON_GET_REG(T0, rm, n);
2c0262af 4294 } else {
9ee6e8bb
PB
4295 /* Operands in {T0, T1} and env->vfp.scratch. */
4296 gen_op_movl_T0_im(imm);
4297 gen_neon_movl_scratch_T0(0);
4298 gen_op_movl_T0_im((int32_t)imm >> 31);
4299 gen_neon_movl_scratch_T0(1);
4300 NEON_GET_REG(T0, rm, n * 2);
4301 NEON_GET_REG(T0, rm, n * 2 + 1);
4302 }
3b46e624 4303
9ee6e8bb
PB
4304 gen_neon_shift_im_narrow[q][u][size - 1]();
4305
4306 if (size < 3 && (pass & 1) == 0) {
4307 gen_neon_movl_scratch_T0(0);
4308 } else {
4309 uint32_t offset;
4310
4311 if (size < 3)
4312 gen_neon_movl_T1_scratch(0);
4313
4314 if (op == 8 && !u) {
4315 gen_neon_narrow[size - 1]();
99c475ab 4316 } else {
9ee6e8bb
PB
4317 if (op == 8)
4318 gen_neon_narrow_sats[size - 2]();
4319 else
4320 gen_neon_narrow_satu[size - 1]();
99c475ab 4321 }
9ee6e8bb
PB
4322 if (size == 3)
4323 offset = neon_reg_offset(rd, n);
4324 else
4325 offset = neon_reg_offset(rd, n >> 1);
4326 gen_op_neon_setreg_T0(offset);
4327 }
4328 } /* for pass */
4329 } else if (op == 10) {
4330 /* VSHLL */
4331 if (q)
4332 return 1;
4333 for (pass = 0; pass < 2; pass++) {
4334 /* Avoid clobbering the input operand. */
4335 if (rd == rm)
4336 n = 1 - pass;
4337 else
4338 n = pass;
4339
4340 NEON_GET_REG(T0, rm, n);
4341 GEN_NEON_INTEGER_OP(widen);
4342 if (shift != 0) {
4343 /* The shift is less than the width of the source
4344 type, so in some cases we can just
4345 shift the whole register. */
4346 if (size == 1 || (size == 0 && u)) {
4347 gen_op_shll_T0_im(shift);
4348 gen_op_shll_T1_im(shift);
4349 } else {
4350 switch (size) {
4351 case 0: gen_op_neon_shll_u16(shift); break;
4352 case 2: gen_op_neon_shll_u64(shift); break;
4353 default: abort();
4354 }
4355 }
4356 }
4357 NEON_SET_REG(T0, rd, n * 2);
4358 NEON_SET_REG(T1, rd, n * 2 + 1);
4359 }
4360 } else if (op == 15 || op == 16) {
4361 /* VCVT fixed-point. */
4362 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373f3ce 4363 tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
9ee6e8bb
PB
4364 if (op & 1) {
4365 if (u)
4373f3ce 4366 gen_vfp_ulto(0, shift);
9ee6e8bb 4367 else
4373f3ce 4368 gen_vfp_slto(0, shift);
9ee6e8bb
PB
4369 } else {
4370 if (u)
4373f3ce 4371 gen_vfp_toul(0, shift);
9ee6e8bb 4372 else
4373f3ce 4373 gen_vfp_tosl(0, shift);
2c0262af 4374 }
4373f3ce 4375 tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
2c0262af
FB
4376 }
4377 } else {
9ee6e8bb
PB
4378 return 1;
4379 }
4380 } else { /* (insn & 0x00380080) == 0 */
4381 int invert;
4382
4383 op = (insn >> 8) & 0xf;
4384 /* One register and immediate. */
4385 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4386 invert = (insn & (1 << 5)) != 0;
4387 switch (op) {
4388 case 0: case 1:
4389 /* no-op */
4390 break;
4391 case 2: case 3:
4392 imm <<= 8;
4393 break;
4394 case 4: case 5:
4395 imm <<= 16;
4396 break;
4397 case 6: case 7:
4398 imm <<= 24;
4399 break;
4400 case 8: case 9:
4401 imm |= imm << 16;
4402 break;
4403 case 10: case 11:
4404 imm = (imm << 8) | (imm << 24);
4405 break;
4406 case 12:
4407 imm = (imm < 8) | 0xff;
4408 break;
4409 case 13:
4410 imm = (imm << 16) | 0xffff;
4411 break;
4412 case 14:
4413 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4414 if (invert)
4415 imm = ~imm;
4416 break;
4417 case 15:
4418 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4419 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4420 break;
4421 }
4422 if (invert)
4423 imm = ~imm;
4424
4425 if (op != 14 || !invert)
4426 gen_op_movl_T1_im(imm);
4427
4428 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4429 if (op & 1 && op < 12) {
4430 NEON_GET_REG(T0, rd, pass);
4431 if (invert) {
4432 /* The immediate value has already been inverted, so
4433 BIC becomes AND. */
4434 gen_op_andl_T0_T1();
4435 } else {
4436 gen_op_orl_T0_T1();
4437 }
4438 NEON_SET_REG(T0, rd, pass);
4439 } else {
4440 if (op == 14 && invert) {
4441 uint32_t tmp;
4442 tmp = 0;
4443 for (n = 0; n < 4; n++) {
4444 if (imm & (1 << (n + (pass & 1) * 4)))
4445 tmp |= 0xff << (n * 8);
4446 }
4447 gen_op_movl_T1_im(tmp);
4448 }
4449 /* VMOV, VMVN. */
4450 NEON_SET_REG(T1, rd, pass);
4451 }
4452 }
4453 }
4454 } else { /* (insn & 0x00800010 == 0x00800010) */
4455 if (size != 3) {
4456 op = (insn >> 8) & 0xf;
4457 if ((insn & (1 << 6)) == 0) {
4458 /* Three registers of different lengths. */
4459 int src1_wide;
4460 int src2_wide;
4461 int prewiden;
4462 /* prewiden, src1_wide, src2_wide */
4463 static const int neon_3reg_wide[16][3] = {
4464 {1, 0, 0}, /* VADDL */
4465 {1, 1, 0}, /* VADDW */
4466 {1, 0, 0}, /* VSUBL */
4467 {1, 1, 0}, /* VSUBW */
4468 {0, 1, 1}, /* VADDHN */
4469 {0, 0, 0}, /* VABAL */
4470 {0, 1, 1}, /* VSUBHN */
4471 {0, 0, 0}, /* VABDL */
4472 {0, 0, 0}, /* VMLAL */
4473 {0, 0, 0}, /* VQDMLAL */
4474 {0, 0, 0}, /* VMLSL */
4475 {0, 0, 0}, /* VQDMLSL */
4476 {0, 0, 0}, /* Integer VMULL */
4477 {0, 0, 0}, /* VQDMULL */
4478 {0, 0, 0} /* Polynomial VMULL */
4479 };
4480
4481 prewiden = neon_3reg_wide[op][0];
4482 src1_wide = neon_3reg_wide[op][1];
4483 src2_wide = neon_3reg_wide[op][2];
4484
4485 /* Avoid overlapping operands. Wide source operands are
4486 always aligned so will never overlap with wide
4487 destinations in problematic ways. */
4488 if (rd == rm) {
4489 NEON_GET_REG(T2, rm, 1);
4490 } else if (rd == rn) {
4491 NEON_GET_REG(T2, rn, 1);
4492 }
4493 for (pass = 0; pass < 2; pass++) {
4494 /* Load the second operand into env->vfp.scratch.
4495 Also widen narrow operands. */
4496 if (pass == 1 && rd == rm) {
4497 if (prewiden) {
4498 gen_op_movl_T0_T2();
4499 } else {
4500 gen_op_movl_T1_T2();
4501 }
4502 } else {
4503 if (src2_wide) {
4504 NEON_GET_REG(T0, rm, pass * 2);
4505 NEON_GET_REG(T1, rm, pass * 2 + 1);
4506 } else {
4507 if (prewiden) {
4508 NEON_GET_REG(T0, rm, pass);
4509 } else {
4510 NEON_GET_REG(T1, rm, pass);
4511 }
4512 }
4513 }
4514 if (prewiden && !src2_wide) {
4515 GEN_NEON_INTEGER_OP(widen);
4516 }
4517 if (prewiden || src2_wide) {
4518 gen_neon_movl_scratch_T0(0);
4519 gen_neon_movl_scratch_T1(1);
4520 }
4521
4522 /* Load the first operand. */
4523 if (pass == 1 && rd == rn) {
4524 gen_op_movl_T0_T2();
4525 } else {
4526 if (src1_wide) {
4527 NEON_GET_REG(T0, rn, pass * 2);
4528 NEON_GET_REG(T1, rn, pass * 2 + 1);
4529 } else {
4530 NEON_GET_REG(T0, rn, pass);
4531 }
4532 }
4533 if (prewiden && !src1_wide) {
4534 GEN_NEON_INTEGER_OP(widen);
4535 }
4536 switch (op) {
4537 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4538 switch (size) {
4539 case 0: gen_op_neon_addl_u16(); break;
4540 case 1: gen_op_neon_addl_u32(); break;
4541 case 2: gen_op_neon_addl_u64(); break;
4542 default: abort();
4543 }
4544 break;
4545 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4546 switch (size) {
4547 case 0: gen_op_neon_subl_u16(); break;
4548 case 1: gen_op_neon_subl_u32(); break;
4549 case 2: gen_op_neon_subl_u64(); break;
4550 default: abort();
4551 }
4552 break;
4553 case 5: case 7: /* VABAL, VABDL */
4554 switch ((size << 1) | u) {
4555 case 0: gen_op_neon_abdl_s16(); break;
4556 case 1: gen_op_neon_abdl_u16(); break;
4557 case 2: gen_op_neon_abdl_s32(); break;
4558 case 3: gen_op_neon_abdl_u32(); break;
4559 case 4: gen_op_neon_abdl_s64(); break;
4560 case 5: gen_op_neon_abdl_u64(); break;
4561 default: abort();
4562 }
4563 break;
4564 case 8: case 9: case 10: case 11: case 12: case 13:
4565 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4566 switch ((size << 1) | u) {
4567 case 0: gen_op_neon_mull_s8(); break;
4568 case 1: gen_op_neon_mull_u8(); break;
4569 case 2: gen_op_neon_mull_s16(); break;
4570 case 3: gen_op_neon_mull_u16(); break;
4571 case 4: gen_op_imull_T0_T1(); break;
4572 case 5: gen_op_mull_T0_T1(); break;
4573 default: abort();
4574 }
4575 break;
4576 case 14: /* Polynomial VMULL */
4577 cpu_abort(env, "Polynomial VMULL not implemented");
4578
4579 default: /* 15 is RESERVED. */
4580 return 1;
4581 }
4582 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4583 /* Accumulate. */
4584 if (op == 10 || op == 11) {
4585 switch (size) {
4586 case 0: gen_op_neon_negl_u16(); break;
4587 case 1: gen_op_neon_negl_u32(); break;
4588 case 2: gen_op_neon_negl_u64(); break;
4589 default: abort();
4590 }
4591 }
4592
4593 gen_neon_movl_scratch_T0(0);
4594 gen_neon_movl_scratch_T1(1);
4595
4596 if (op != 13) {
4597 NEON_GET_REG(T0, rd, pass * 2);
4598 NEON_GET_REG(T1, rd, pass * 2 + 1);
4599 }
4600
4601 switch (op) {
4602 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4603 switch (size) {
4604 case 0: gen_op_neon_addl_u16(); break;
4605 case 1: gen_op_neon_addl_u32(); break;
4606 case 2: gen_op_neon_addl_u64(); break;
4607 default: abort();
4608 }
4609 break;
4610 case 9: case 11: /* VQDMLAL, VQDMLSL */
4611 switch (size) {
4612 case 1: gen_op_neon_addl_saturate_s32(); break;
4613 case 2: gen_op_neon_addl_saturate_s64(); break;
4614 default: abort();
4615 }
4616 /* Fall through. */
4617 case 13: /* VQDMULL */
4618 switch (size) {
4619 case 1: gen_op_neon_addl_saturate_s32(); break;
4620 case 2: gen_op_neon_addl_saturate_s64(); break;
4621 default: abort();
4622 }
4623 break;
4624 default:
4625 abort();
4626 }
4627 NEON_SET_REG(T0, rd, pass * 2);
4628 NEON_SET_REG(T1, rd, pass * 2 + 1);
4629 } else if (op == 4 || op == 6) {
4630 /* Narrowing operation. */
4631 if (u) {
4632 switch (size) {
4633 case 0: gen_op_neon_narrow_high_u8(); break;
4634 case 1: gen_op_neon_narrow_high_u16(); break;
4635 case 2: gen_op_movl_T0_T1(); break;
4636 default: abort();
4637 }
4638 } else {
4639 switch (size) {
4640 case 0: gen_op_neon_narrow_high_round_u8(); break;
4641 case 1: gen_op_neon_narrow_high_round_u16(); break;
4642 case 2: gen_op_neon_narrow_high_round_u32(); break;
4643 default: abort();
4644 }
4645 }
4646 NEON_SET_REG(T0, rd, pass);
4647 } else {
4648 /* Write back the result. */
4649 NEON_SET_REG(T0, rd, pass * 2);
4650 NEON_SET_REG(T1, rd, pass * 2 + 1);
4651 }
4652 }
4653 } else {
4654 /* Two registers and a scalar. */
4655 switch (op) {
4656 case 0: /* Integer VMLA scalar */
4657 case 1: /* Float VMLA scalar */
4658 case 4: /* Integer VMLS scalar */
4659 case 5: /* Floating point VMLS scalar */
4660 case 8: /* Integer VMUL scalar */
4661 case 9: /* Floating point VMUL scalar */
4662 case 12: /* VQDMULH scalar */
4663 case 13: /* VQRDMULH scalar */
4664 gen_neon_get_scalar(size, rm);
4665 gen_op_movl_T2_T0();
4666 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4667 if (pass != 0)
4668 gen_op_movl_T0_T2();
4669 NEON_GET_REG(T1, rn, pass);
4670 if (op == 12) {
4671 if (size == 1) {
4672 gen_op_neon_qdmulh_s16();
4673 } else {
4674 gen_op_neon_qdmulh_s32();
4675 }
4676 } else if (op == 13) {
4677 if (size == 1) {
4678 gen_op_neon_qrdmulh_s16();
4679 } else {
4680 gen_op_neon_qrdmulh_s32();
4681 }
4682 } else if (op & 1) {
4683 gen_op_neon_mul_f32();
4684 } else {
4685 switch (size) {
4686 case 0: gen_op_neon_mul_u8(); break;
4687 case 1: gen_op_neon_mul_u16(); break;
4688 case 2: gen_op_mul_T0_T1(); break;
4689 default: return 1;
4690 }
4691 }
4692 if (op < 8) {
4693 /* Accumulate. */
4694 NEON_GET_REG(T1, rd, pass);
4695 switch (op) {
4696 case 0:
4697 gen_neon_add(size);
4698 break;
4699 case 1:
4700 gen_op_neon_add_f32();
4701 break;
4702 case 4:
4703 switch (size) {
4704 case 0: gen_op_neon_rsb_u8(); break;
4705 case 1: gen_op_neon_rsb_u16(); break;
4706 case 2: gen_op_rsbl_T0_T1(); break;
4707 default: return 1;
4708 }
4709 break;
4710 case 5:
4711 gen_op_neon_rsb_f32();
4712 break;
4713 default:
4714 abort();
4715 }
4716 }
4717 NEON_SET_REG(T0, rd, pass);
4718 }
4719 break;
4720 case 2: /* VMLAL sclar */
4721 case 3: /* VQDMLAL scalar */
4722 case 6: /* VMLSL scalar */
4723 case 7: /* VQDMLSL scalar */
4724 case 10: /* VMULL scalar */
4725 case 11: /* VQDMULL scalar */
4726 if (rd == rn) {
4727 /* Save overlapping operands before they are
4728 clobbered. */
4729 NEON_GET_REG(T0, rn, 1);
4730 gen_neon_movl_scratch_T0(2);
4731 }
4732 gen_neon_get_scalar(size, rm);
4733 gen_op_movl_T2_T0();
4734 for (pass = 0; pass < 2; pass++) {
4735 if (pass != 0) {
4736 gen_op_movl_T0_T2();
4737 }
4738 if (pass != 0 && rd == rn) {
4739 gen_neon_movl_T1_scratch(2);
4740 } else {
4741 NEON_GET_REG(T1, rn, pass);
4742 }
4743 switch ((size << 1) | u) {
4744 case 0: gen_op_neon_mull_s8(); break;
4745 case 1: gen_op_neon_mull_u8(); break;
4746 case 2: gen_op_neon_mull_s16(); break;
4747 case 3: gen_op_neon_mull_u16(); break;
4748 case 4: gen_op_imull_T0_T1(); break;
4749 case 5: gen_op_mull_T0_T1(); break;
4750 default: abort();
4751 }
4752 if (op == 6 || op == 7) {
4753 switch (size) {
4754 case 0: gen_op_neon_negl_u16(); break;
4755 case 1: gen_op_neon_negl_u32(); break;
4756 case 2: gen_op_neon_negl_u64(); break;
4757 default: abort();
4758 }
4759 }
4760 gen_neon_movl_scratch_T0(0);
4761 gen_neon_movl_scratch_T1(1);
4762 NEON_GET_REG(T0, rd, pass * 2);
4763 NEON_GET_REG(T1, rd, pass * 2 + 1);
4764 switch (op) {
4765 case 2: case 6:
4766 switch (size) {
4767 case 0: gen_op_neon_addl_u16(); break;
4768 case 1: gen_op_neon_addl_u32(); break;
4769 case 2: gen_op_neon_addl_u64(); break;
4770 default: abort();
4771 }
4772 break;
4773 case 3: case 7:
4774 switch (size) {
4775 case 1:
4776 gen_op_neon_addl_saturate_s32();
4777 gen_op_neon_addl_saturate_s32();
4778 break;
4779 case 2:
4780 gen_op_neon_addl_saturate_s64();
4781 gen_op_neon_addl_saturate_s64();
4782 break;
4783 default: abort();
4784 }
4785 break;
4786 case 10:
4787 /* no-op */
4788 break;
4789 case 11:
4790 switch (size) {
4791 case 1: gen_op_neon_addl_saturate_s32(); break;
4792 case 2: gen_op_neon_addl_saturate_s64(); break;
4793 default: abort();
4794 }
4795 break;
4796 default:
4797 abort();
4798 }
4799 NEON_SET_REG(T0, rd, pass * 2);
4800 NEON_SET_REG(T1, rd, pass * 2 + 1);
4801 }
4802 break;
4803 default: /* 14 and 15 are RESERVED */
4804 return 1;
4805 }
4806 }
4807 } else { /* size == 3 */
4808 if (!u) {
4809 /* Extract. */
4810 int reg;
4811 imm = (insn >> 8) & 0xf;
4812 reg = rn;
4813 count = q ? 4 : 2;
4814 n = imm >> 2;
4815 NEON_GET_REG(T0, reg, n);
4816 for (pass = 0; pass < count; pass++) {
4817 n++;
4818 if (n > count) {
4819 reg = rm;
4820 n -= count;
4821 }
4822 if (imm & 3) {
4823 NEON_GET_REG(T1, reg, n);
4824 gen_op_neon_extract((insn << 3) & 0x1f);
4825 }
4826 /* ??? This is broken if rd and rm overlap */
4827 NEON_SET_REG(T0, rd, pass);
4828 if (imm & 3) {
4829 gen_op_movl_T0_T1();
4830 } else {
4831 NEON_GET_REG(T0, reg, n);
4832 }
4833 }
4834 } else if ((insn & (1 << 11)) == 0) {
4835 /* Two register misc. */
4836 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4837 size = (insn >> 18) & 3;
4838 switch (op) {
4839 case 0: /* VREV64 */
4840 if (size == 3)
4841 return 1;
4842 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4843 NEON_GET_REG(T0, rm, pass * 2);
4844 NEON_GET_REG(T1, rm, pass * 2 + 1);
4845 switch (size) {
b0109805 4846 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 4847 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
4848 case 2: /* no-op */ break;
4849 default: abort();
4850 }
4851 NEON_SET_REG(T0, rd, pass * 2 + 1);
4852 if (size == 2) {
4853 NEON_SET_REG(T1, rd, pass * 2);
4854 } else {
4855 gen_op_movl_T0_T1();
4856 switch (size) {
b0109805 4857 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 4858 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
4859 default: abort();
4860 }
4861 NEON_SET_REG(T0, rd, pass * 2);
4862 }
4863 }
4864 break;
4865 case 4: case 5: /* VPADDL */
4866 case 12: case 13: /* VPADAL */
4867 if (size < 2)
4868 goto elementwise;
4869 if (size == 3)
4870 return 1;
4871 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4872 NEON_GET_REG(T0, rm, pass * 2);
4873 NEON_GET_REG(T1, rm, pass * 2 + 1);
4874 if (op & 1)
4875 gen_op_neon_paddl_u32();
4876 else
4877 gen_op_neon_paddl_s32();
4878 if (op >= 12) {
4879 /* Accumulate. */
4880 gen_neon_movl_scratch_T0(0);
4881 gen_neon_movl_scratch_T1(1);
4882
4883 NEON_GET_REG(T0, rd, pass * 2);
4884 NEON_GET_REG(T1, rd, pass * 2 + 1);
4885 gen_op_neon_addl_u64();
4886 }
4887 NEON_SET_REG(T0, rd, pass * 2);
4888 NEON_SET_REG(T1, rd, pass * 2 + 1);
4889 }
4890 break;
4891 case 33: /* VTRN */
4892 if (size == 2) {
4893 for (n = 0; n < (q ? 4 : 2); n += 2) {
4894 NEON_GET_REG(T0, rm, n);
4895 NEON_GET_REG(T1, rd, n + 1);
4896 NEON_SET_REG(T1, rm, n);
4897 NEON_SET_REG(T0, rd, n + 1);
4898 }
4899 } else {
4900 goto elementwise;
4901 }
4902 break;
4903 case 34: /* VUZP */
4904 /* Reg Before After
4905 Rd A3 A2 A1 A0 B2 B0 A2 A0
4906 Rm B3 B2 B1 B0 B3 B1 A3 A1
4907 */
4908 if (size == 3)
4909 return 1;
4910 gen_neon_unzip(rd, q, 0, size);
4911 gen_neon_unzip(rm, q, 4, size);
4912 if (q) {
4913 static int unzip_order_q[8] =
4914 {0, 2, 4, 6, 1, 3, 5, 7};
4915 for (n = 0; n < 8; n++) {
4916 int reg = (n < 4) ? rd : rm;
4917 gen_neon_movl_T0_scratch(unzip_order_q[n]);
4918 NEON_SET_REG(T0, reg, n % 4);
4919 }
4920 } else {
4921 static int unzip_order[4] =
4922 {0, 4, 1, 5};
4923 for (n = 0; n < 4; n++) {
4924 int reg = (n < 2) ? rd : rm;
4925 gen_neon_movl_T0_scratch(unzip_order[n]);
4926 NEON_SET_REG(T0, reg, n % 2);
4927 }
4928 }
4929 break;
4930 case 35: /* VZIP */
4931 /* Reg Before After
4932 Rd A3 A2 A1 A0 B1 A1 B0 A0
4933 Rm B3 B2 B1 B0 B3 A3 B2 A2
4934 */
4935 if (size == 3)
4936 return 1;
4937 count = (q ? 4 : 2);
4938 for (n = 0; n < count; n++) {
4939 NEON_GET_REG(T0, rd, n);
4940 NEON_GET_REG(T1, rd, n);
4941 switch (size) {
4942 case 0: gen_op_neon_zip_u8(); break;
4943 case 1: gen_op_neon_zip_u16(); break;
4944 case 2: /* no-op */; break;
4945 default: abort();
4946 }
4947 gen_neon_movl_scratch_T0(n * 2);
4948 gen_neon_movl_scratch_T1(n * 2 + 1);
4949 }
4950 for (n = 0; n < count * 2; n++) {
4951 int reg = (n < count) ? rd : rm;
4952 gen_neon_movl_T0_scratch(n);
4953 NEON_SET_REG(T0, reg, n % count);
4954 }
4955 break;
4956 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4957 for (pass = 0; pass < 2; pass++) {
4958 if (rd == rm + 1) {
4959 n = 1 - pass;
4960 } else {
4961 n = pass;
4962 }
4963 NEON_GET_REG(T0, rm, n * 2);
4964 NEON_GET_REG(T1, rm, n * 2 + 1);
4965 if (op == 36 && q == 0) {
4966 switch (size) {
4967 case 0: gen_op_neon_narrow_u8(); break;
4968 case 1: gen_op_neon_narrow_u16(); break;
4969 case 2: /* no-op */ break;
4970 default: return 1;
4971 }
4972 } else if (q) {
4973 switch (size) {
4974 case 0: gen_op_neon_narrow_sat_u8(); break;
4975 case 1: gen_op_neon_narrow_sat_u16(); break;
4976 case 2: gen_op_neon_narrow_sat_u32(); break;
4977 default: return 1;
4978 }
4979 } else {
4980 switch (size) {
4981 case 0: gen_op_neon_narrow_sat_s8(); break;
4982 case 1: gen_op_neon_narrow_sat_s16(); break;
4983 case 2: gen_op_neon_narrow_sat_s32(); break;
4984 default: return 1;
4985 }
4986 }
4987 NEON_SET_REG(T0, rd, n);
4988 }
4989 break;
4990 case 38: /* VSHLL */
4991 if (q)
4992 return 1;
4993 if (rm == rd) {
4994 NEON_GET_REG(T2, rm, 1);
4995 }
4996 for (pass = 0; pass < 2; pass++) {
4997 if (pass == 1 && rm == rd) {
4998 gen_op_movl_T0_T2();
4999 } else {
5000 NEON_GET_REG(T0, rm, pass);
5001 }
5002 switch (size) {
5003 case 0: gen_op_neon_widen_high_u8(); break;
5004 case 1: gen_op_neon_widen_high_u16(); break;
5005 case 2:
5006 gen_op_movl_T1_T0();
5007 gen_op_movl_T0_im(0);
5008 break;
5009 default: return 1;
5010 }
5011 NEON_SET_REG(T0, rd, pass * 2);
5012 NEON_SET_REG(T1, rd, pass * 2 + 1);
5013 }
5014 break;
5015 default:
5016 elementwise:
5017 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5018 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5019 tcg_gen_ld_f32(cpu_F0s, cpu_env,
5020 neon_reg_offset(rm, pass));
9ee6e8bb
PB
5021 } else {
5022 NEON_GET_REG(T0, rm, pass);
5023 }
5024 switch (op) {
5025 case 1: /* VREV32 */
5026 switch (size) {
b0109805 5027 case 0: tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]); break;
8f01245e 5028 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
5029 default: return 1;
5030 }
5031 break;
5032 case 2: /* VREV16 */
5033 if (size != 0)
5034 return 1;
3670669c 5035 gen_rev16(cpu_T[0]);
9ee6e8bb
PB
5036 break;
5037 case 4: case 5: /* VPADDL */
5038 case 12: case 13: /* VPADAL */
5039 switch ((size << 1) | (op & 1)) {
5040 case 0: gen_op_neon_paddl_s8(); break;
5041 case 1: gen_op_neon_paddl_u8(); break;
5042 case 2: gen_op_neon_paddl_s16(); break;
5043 case 3: gen_op_neon_paddl_u16(); break;
5044 default: abort();
5045 }
5046 if (op >= 12) {
5047 /* Accumulate */
5048 NEON_GET_REG(T1, rd, pass);
5049 switch (size) {
5050 case 0: gen_op_neon_add_u16(); break;
5051 case 1: gen_op_addl_T0_T1(); break;
5052 default: abort();
5053 }
5054 }
5055 break;
5056 case 8: /* CLS */
5057 switch (size) {
5058 case 0: gen_op_neon_cls_s8(); break;
5059 case 1: gen_op_neon_cls_s16(); break;
5060 case 2: gen_op_neon_cls_s32(); break;
5061 default: return 1;
5062 }
5063 break;
5064 case 9: /* CLZ */
5065 switch (size) {
5066 case 0: gen_op_neon_clz_u8(); break;
5067 case 1: gen_op_neon_clz_u16(); break;
1497c961 5068 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
5069 default: return 1;
5070 }
5071 break;
5072 case 10: /* CNT */
5073 if (size != 0)
5074 return 1;
5075 gen_op_neon_cnt_u8();
5076 break;
5077 case 11: /* VNOT */
5078 if (size != 0)
5079 return 1;
5080 gen_op_notl_T0();
5081 break;
5082 case 14: /* VQABS */
5083 switch (size) {
5084 case 0: gen_op_neon_qabs_s8(); break;
5085 case 1: gen_op_neon_qabs_s16(); break;
5086 case 2: gen_op_neon_qabs_s32(); break;
5087 default: return 1;
5088 }
5089 break;
5090 case 15: /* VQNEG */
5091 switch (size) {
5092 case 0: gen_op_neon_qneg_s8(); break;
5093 case 1: gen_op_neon_qneg_s16(); break;
5094 case 2: gen_op_neon_qneg_s32(); break;
5095 default: return 1;
5096 }
5097 break;
5098 case 16: case 19: /* VCGT #0, VCLE #0 */
5099 gen_op_movl_T1_im(0);
5100 switch(size) {
5101 case 0: gen_op_neon_cgt_s8(); break;
5102 case 1: gen_op_neon_cgt_s16(); break;
5103 case 2: gen_op_neon_cgt_s32(); break;
5104 default: return 1;
5105 }
5106 if (op == 19)
5107 gen_op_notl_T0();
5108 break;
5109 case 17: case 20: /* VCGE #0, VCLT #0 */
5110 gen_op_movl_T1_im(0);
5111 switch(size) {
5112 case 0: gen_op_neon_cge_s8(); break;
5113 case 1: gen_op_neon_cge_s16(); break;
5114 case 2: gen_op_neon_cge_s32(); break;
5115 default: return 1;
5116 }
5117 if (op == 20)
5118 gen_op_notl_T0();
5119 break;
5120 case 18: /* VCEQ #0 */
5121 gen_op_movl_T1_im(0);
5122 switch(size) {
5123 case 0: gen_op_neon_ceq_u8(); break;
5124 case 1: gen_op_neon_ceq_u16(); break;
5125 case 2: gen_op_neon_ceq_u32(); break;
5126 default: return 1;
5127 }
5128 break;
5129 case 22: /* VABS */
5130 switch(size) {
5131 case 0: gen_op_neon_abs_s8(); break;
5132 case 1: gen_op_neon_abs_s16(); break;
5133 case 2: gen_op_neon_abs_s32(); break;
5134 default: return 1;
5135 }
5136 break;
5137 case 23: /* VNEG */
5138 gen_op_movl_T1_im(0);
5139 switch(size) {
5140 case 0: gen_op_neon_rsb_u8(); break;
5141 case 1: gen_op_neon_rsb_u16(); break;
5142 case 2: gen_op_rsbl_T0_T1(); break;
5143 default: return 1;
5144 }
5145 break;
5146 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
5147 gen_op_movl_T1_im(0);
5148 gen_op_neon_cgt_f32();
5149 if (op == 27)
5150 gen_op_notl_T0();
5151 break;
5152 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
5153 gen_op_movl_T1_im(0);
5154 gen_op_neon_cge_f32();
5155 if (op == 28)
5156 gen_op_notl_T0();
5157 break;
5158 case 26: /* Float VCEQ #0 */
5159 gen_op_movl_T1_im(0);
5160 gen_op_neon_ceq_f32();
5161 break;
5162 case 30: /* Float VABS */
4373f3ce 5163 gen_vfp_abs(0);
9ee6e8bb
PB
5164 break;
5165 case 31: /* Float VNEG */
4373f3ce 5166 gen_vfp_neg(0);
9ee6e8bb
PB
5167 break;
5168 case 32: /* VSWP */
5169 NEON_GET_REG(T1, rd, pass);
5170 NEON_SET_REG(T1, rm, pass);
5171 break;
5172 case 33: /* VTRN */
5173 NEON_GET_REG(T1, rd, pass);
5174 switch (size) {
5175 case 0: gen_op_neon_trn_u8(); break;
5176 case 1: gen_op_neon_trn_u16(); break;
5177 case 2: abort();
5178 default: return 1;
5179 }
5180 NEON_SET_REG(T1, rm, pass);
5181 break;
5182 case 56: /* Integer VRECPE */
4373f3ce 5183 gen_helper_recpe_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5184 break;
5185 case 57: /* Integer VRSQRTE */
4373f3ce 5186 gen_helper_rsqrte_u32(cpu_T[0], cpu_T[0], cpu_env);
9ee6e8bb
PB
5187 break;
5188 case 58: /* Float VRECPE */
4373f3ce 5189 gen_helper_recpe_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5190 break;
5191 case 59: /* Float VRSQRTE */
4373f3ce 5192 gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
9ee6e8bb
PB
5193 break;
5194 case 60: /* VCVT.F32.S32 */
4373f3ce 5195 gen_vfp_tosiz(0);
9ee6e8bb
PB
5196 break;
5197 case 61: /* VCVT.F32.U32 */
4373f3ce 5198 gen_vfp_touiz(0);
9ee6e8bb
PB
5199 break;
5200 case 62: /* VCVT.S32.F32 */
4373f3ce 5201 gen_vfp_sito(0);
9ee6e8bb
PB
5202 break;
5203 case 63: /* VCVT.U32.F32 */
4373f3ce 5204 gen_vfp_uito(0);
9ee6e8bb
PB
5205 break;
5206 default:
5207 /* Reserved: 21, 29, 39-56 */
5208 return 1;
5209 }
5210 if (op == 30 || op == 31 || op >= 58) {
4373f3ce
PB
5211 tcg_gen_st_f32(cpu_F0s, cpu_env,
5212 neon_reg_offset(rd, pass));
9ee6e8bb
PB
5213 } else {
5214 NEON_SET_REG(T0, rd, pass);
5215 }
5216 }
5217 break;
5218 }
5219 } else if ((insn & (1 << 10)) == 0) {
5220 /* VTBL, VTBX. */
5221 n = (insn >> 5) & 0x18;
5222 NEON_GET_REG(T1, rm, 0);
5223 if (insn & (1 << 6)) {
5224 NEON_GET_REG(T0, rd, 0);
5225 } else {
5226 gen_op_movl_T0_im(0);
5227 }
5228 gen_op_neon_tbl(rn, n);
5229 gen_op_movl_T2_T0();
5230 NEON_GET_REG(T1, rm, 1);
5231 if (insn & (1 << 6)) {
5232 NEON_GET_REG(T0, rd, 0);
5233 } else {
5234 gen_op_movl_T0_im(0);
5235 }
5236 gen_op_neon_tbl(rn, n);
5237 NEON_SET_REG(T2, rd, 0);
5238 NEON_SET_REG(T0, rd, 1);
5239 } else if ((insn & 0x380) == 0) {
5240 /* VDUP */
5241 if (insn & (1 << 19)) {
5242 NEON_SET_REG(T0, rm, 1);
5243 } else {
5244 NEON_SET_REG(T0, rm, 0);
5245 }
5246 if (insn & (1 << 16)) {
5247 gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
5248 } else if (insn & (1 << 17)) {
5249 if ((insn >> 18) & 1)
5250 gen_op_neon_dup_high16();
5251 else
5252 gen_op_neon_dup_low16();
5253 }
5254 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5255 NEON_SET_REG(T0, rd, pass);
5256 }
5257 } else {
5258 return 1;
5259 }
5260 }
5261 }
5262 return 0;
5263}
5264
5265static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5266{
5267 int cpnum;
5268
5269 cpnum = (insn >> 8) & 0xf;
5270 if (arm_feature(env, ARM_FEATURE_XSCALE)
5271 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5272 return 1;
5273
5274 switch (cpnum) {
5275 case 0:
5276 case 1:
5277 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5278 return disas_iwmmxt_insn(env, s, insn);
5279 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5280 return disas_dsp_insn(env, s, insn);
5281 }
5282 return 1;
5283 case 10:
5284 case 11:
5285 return disas_vfp_insn (env, s, insn);
5286 case 15:
5287 return disas_cp15_insn (env, s, insn);
5288 default:
5289 /* Unknown coprocessor. See if the board has hooked it. */
5290 return disas_cp_insn (env, s, insn);
5291 }
5292}
5293
5294static void disas_arm_insn(CPUState * env, DisasContext *s)
5295{
5296 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5297 TCGv tmp;
3670669c 5298 TCGv tmp2;
6ddbc6e4 5299 TCGv tmp3;
b0109805 5300 TCGv addr;
9ee6e8bb
PB
5301
5302 insn = ldl_code(s->pc);
5303 s->pc += 4;
5304
5305 /* M variants do not implement ARM mode. */
5306 if (IS_M(env))
5307 goto illegal_op;
5308 cond = insn >> 28;
5309 if (cond == 0xf){
5310 /* Unconditional instructions. */
5311 if (((insn >> 25) & 7) == 1) {
5312 /* NEON Data processing. */
5313 if (!arm_feature(env, ARM_FEATURE_NEON))
5314 goto illegal_op;
5315
5316 if (disas_neon_data_insn(env, s, insn))
5317 goto illegal_op;
5318 return;
5319 }
5320 if ((insn & 0x0f100000) == 0x04000000) {
5321 /* NEON load/store. */
5322 if (!arm_feature(env, ARM_FEATURE_NEON))
5323 goto illegal_op;
5324
5325 if (disas_neon_ls_insn(env, s, insn))
5326 goto illegal_op;
5327 return;
5328 }
5329 if ((insn & 0x0d70f000) == 0x0550f000)
5330 return; /* PLD */
5331 else if ((insn & 0x0ffffdff) == 0x01010000) {
5332 ARCH(6);
5333 /* setend */
5334 if (insn & (1 << 9)) {
5335 /* BE8 mode not implemented. */
5336 goto illegal_op;
5337 }
5338 return;
5339 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5340 switch ((insn >> 4) & 0xf) {
5341 case 1: /* clrex */
5342 ARCH(6K);
5343 gen_op_clrex();
5344 return;
5345 case 4: /* dsb */
5346 case 5: /* dmb */
5347 case 6: /* isb */
5348 ARCH(7);
5349 /* We don't emulate caches so these are a no-op. */
5350 return;
5351 default:
5352 goto illegal_op;
5353 }
5354 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5355 /* srs */
5356 uint32_t offset;
5357 if (IS_USER(s))
5358 goto illegal_op;
5359 ARCH(6);
5360 op1 = (insn & 0x1f);
5361 if (op1 == (env->uncached_cpsr & CPSR_M)) {
b0109805 5362 addr = load_reg(s, 13);
9ee6e8bb 5363 } else {
b0109805
PB
5364 addr = new_tmp();
5365 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op1));
9ee6e8bb
PB
5366 }
5367 i = (insn >> 23) & 3;
5368 switch (i) {
5369 case 0: offset = -4; break; /* DA */
5370 case 1: offset = -8; break; /* DB */
5371 case 2: offset = 0; break; /* IA */
5372 case 3: offset = 4; break; /* IB */
5373 default: abort();
5374 }
5375 if (offset)
b0109805
PB
5376 tcg_gen_addi_i32(addr, addr, offset);
5377 tmp = load_reg(s, 14);
5378 gen_st32(tmp, addr, 0);
5379 tmp = new_tmp();
5380 gen_helper_cpsr_read(tmp);
5381 tcg_gen_addi_i32(addr, addr, 4);
5382 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
5383 if (insn & (1 << 21)) {
5384 /* Base writeback. */
5385 switch (i) {
5386 case 0: offset = -8; break;
5387 case 1: offset = -4; break;
5388 case 2: offset = 4; break;
5389 case 3: offset = 0; break;
5390 default: abort();
5391 }
5392 if (offset)
b0109805 5393 tcg_gen_addi_i32(addr, tmp, offset);
9ee6e8bb
PB
5394 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5395 gen_movl_reg_T1(s, 13);
5396 } else {
b0109805 5397 gen_helper_set_r13_banked(cpu_env, tcg_const_i32(op1), cpu_T[1]);
9ee6e8bb 5398 }
b0109805
PB
5399 } else {
5400 dead_tmp(addr);
9ee6e8bb
PB
5401 }
5402 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5403 /* rfe */
5404 uint32_t offset;
5405 if (IS_USER(s))
5406 goto illegal_op;
5407 ARCH(6);
5408 rn = (insn >> 16) & 0xf;
b0109805 5409 addr = load_reg(s, rn);
9ee6e8bb
PB
5410 i = (insn >> 23) & 3;
5411 switch (i) {
b0109805
PB
5412 case 0: offset = -4; break; /* DA */
5413 case 1: offset = -8; break; /* DB */
5414 case 2: offset = 0; break; /* IA */
5415 case 3: offset = 4; break; /* IB */
9ee6e8bb
PB
5416 default: abort();
5417 }
5418 if (offset)
b0109805
PB
5419 tcg_gen_addi_i32(addr, addr, offset);
5420 /* Load PC into tmp and CPSR into tmp2. */
5421 tmp = gen_ld32(addr, 0);
5422 tcg_gen_addi_i32(addr, addr, 4);
5423 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
5424 if (insn & (1 << 21)) {
5425 /* Base writeback. */
5426 switch (i) {
b0109805
PB
5427 case 0: offset = -8; break;
5428 case 1: offset = -4; break;
5429 case 2: offset = 4; break;
5430 case 3: offset = 0; break;
9ee6e8bb
PB
5431 default: abort();
5432 }
5433 if (offset)
b0109805
PB
5434 tcg_gen_addi_i32(addr, addr, offset);
5435 store_reg(s, rn, addr);
5436 } else {
5437 dead_tmp(addr);
9ee6e8bb 5438 }
b0109805 5439 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
5440 } else if ((insn & 0x0e000000) == 0x0a000000) {
5441 /* branch link and change to thumb (blx <offset>) */
5442 int32_t offset;
5443
5444 val = (uint32_t)s->pc;
d9ba4830
PB
5445 tmp = new_tmp();
5446 tcg_gen_movi_i32(tmp, val);
5447 store_reg(s, 14, tmp);
9ee6e8bb
PB
5448 /* Sign-extend the 24-bit offset */
5449 offset = (((int32_t)insn) << 8) >> 8;
5450 /* offset * 4 + bit24 * 2 + (thumb bit) */
5451 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5452 /* pipeline offset */
5453 val += 4;
d9ba4830 5454 gen_bx_im(s, val);
9ee6e8bb
PB
5455 return;
5456 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5457 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5458 /* iWMMXt register transfer. */
5459 if (env->cp15.c15_cpar & (1 << 1))
5460 if (!disas_iwmmxt_insn(env, s, insn))
5461 return;
5462 }
5463 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5464 /* Coprocessor double register transfer. */
5465 } else if ((insn & 0x0f000010) == 0x0e000010) {
5466 /* Additional coprocessor register transfer. */
5467 } else if ((insn & 0x0ff10010) == 0x01000000) {
5468 uint32_t mask;
5469 uint32_t val;
5470 /* cps (privileged) */
5471 if (IS_USER(s))
5472 return;
5473 mask = val = 0;
5474 if (insn & (1 << 19)) {
5475 if (insn & (1 << 8))
5476 mask |= CPSR_A;
5477 if (insn & (1 << 7))
5478 mask |= CPSR_I;
5479 if (insn & (1 << 6))
5480 mask |= CPSR_F;
5481 if (insn & (1 << 18))
5482 val |= mask;
5483 }
5484 if (insn & (1 << 14)) {
5485 mask |= CPSR_M;
5486 val |= (insn & 0x1f);
5487 }
5488 if (mask) {
5489 gen_op_movl_T0_im(val);
5490 gen_set_psr_T0(s, mask, 0);
5491 }
5492 return;
5493 }
5494 goto illegal_op;
5495 }
5496 if (cond != 0xe) {
5497 /* if not always execute, we generate a conditional jump to
5498 next instruction */
5499 s->condlabel = gen_new_label();
d9ba4830 5500 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5501 s->condjmp = 1;
5502 }
5503 if ((insn & 0x0f900000) == 0x03000000) {
5504 if ((insn & (1 << 21)) == 0) {
5505 ARCH(6T2);
5506 rd = (insn >> 12) & 0xf;
5507 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5508 if ((insn & (1 << 22)) == 0) {
5509 /* MOVW */
5510 gen_op_movl_T0_im(val);
5511 } else {
5512 /* MOVT */
5513 gen_movl_T0_reg(s, rd);
5514 gen_op_movl_T1_im(0xffff);
5515 gen_op_andl_T0_T1();
5516 gen_op_movl_T1_im(val << 16);
5517 gen_op_orl_T0_T1();
5518 }
5519 gen_movl_reg_T0(s, rd);
5520 } else {
5521 if (((insn >> 12) & 0xf) != 0xf)
5522 goto illegal_op;
5523 if (((insn >> 16) & 0xf) == 0) {
5524 gen_nop_hint(s, insn & 0xff);
5525 } else {
5526 /* CPSR = immediate */
5527 val = insn & 0xff;
5528 shift = ((insn >> 8) & 0xf) * 2;
5529 if (shift)
5530 val = (val >> shift) | (val << (32 - shift));
5531 gen_op_movl_T0_im(val);
5532 i = ((insn & (1 << 22)) != 0);
5533 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5534 goto illegal_op;
5535 }
5536 }
5537 } else if ((insn & 0x0f900000) == 0x01000000
5538 && (insn & 0x00000090) != 0x00000090) {
5539 /* miscellaneous instructions */
5540 op1 = (insn >> 21) & 3;
5541 sh = (insn >> 4) & 0xf;
5542 rm = insn & 0xf;
5543 switch (sh) {
5544 case 0x0: /* move program status register */
5545 if (op1 & 1) {
5546 /* PSR = reg */
5547 gen_movl_T0_reg(s, rm);
5548 i = ((op1 & 2) != 0);
5549 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5550 goto illegal_op;
5551 } else {
5552 /* reg = PSR */
5553 rd = (insn >> 12) & 0xf;
5554 if (op1 & 2) {
5555 if (IS_USER(s))
5556 goto illegal_op;
d9ba4830 5557 tmp = load_cpu_field(spsr);
9ee6e8bb 5558 } else {
d9ba4830
PB
5559 tmp = new_tmp();
5560 gen_helper_cpsr_read(tmp);
9ee6e8bb 5561 }
d9ba4830 5562 store_reg(s, rd, tmp);
9ee6e8bb
PB
5563 }
5564 break;
5565 case 0x1:
5566 if (op1 == 1) {
5567 /* branch/exchange thumb (bx). */
d9ba4830
PB
5568 tmp = load_reg(s, rm);
5569 gen_bx(s, tmp);
9ee6e8bb
PB
5570 } else if (op1 == 3) {
5571 /* clz */
5572 rd = (insn >> 12) & 0xf;
1497c961
PB
5573 tmp = load_reg(s, rm);
5574 gen_helper_clz(tmp, tmp);
5575 store_reg(s, rd, tmp);
9ee6e8bb
PB
5576 } else {
5577 goto illegal_op;
5578 }
5579 break;
5580 case 0x2:
5581 if (op1 == 1) {
5582 ARCH(5J); /* bxj */
5583 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5584 tmp = load_reg(s, rm);
5585 gen_bx(s, tmp);
9ee6e8bb
PB
5586 } else {
5587 goto illegal_op;
5588 }
5589 break;
5590 case 0x3:
5591 if (op1 != 1)
5592 goto illegal_op;
5593
5594 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5595 tmp = load_reg(s, rm);
5596 tmp2 = new_tmp();
5597 tcg_gen_movi_i32(tmp2, s->pc);
5598 store_reg(s, 14, tmp2);
5599 gen_bx(s, tmp);
9ee6e8bb
PB
5600 break;
5601 case 0x5: /* saturating add/subtract */
5602 rd = (insn >> 12) & 0xf;
5603 rn = (insn >> 16) & 0xf;
5604 gen_movl_T0_reg(s, rm);
5605 gen_movl_T1_reg(s, rn);
5606 if (op1 & 2)
1497c961 5607 gen_helper_double_saturate(cpu_T[1], cpu_T[1]);
9ee6e8bb
PB
5608 if (op1 & 1)
5609 gen_op_subl_T0_T1_saturate();
5610 else
5611 gen_op_addl_T0_T1_saturate();
5612 gen_movl_reg_T0(s, rd);
5613 break;
5614 case 7: /* bkpt */
5615 gen_set_condexec(s);
5616 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 5617 gen_set_pc_T0();
d9ba4830 5618 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5619 s->is_jmp = DISAS_JUMP;
5620 break;
5621 case 0x8: /* signed multiply */
5622 case 0xa:
5623 case 0xc:
5624 case 0xe:
5625 rs = (insn >> 8) & 0xf;
5626 rn = (insn >> 12) & 0xf;
5627 rd = (insn >> 16) & 0xf;
5628 if (op1 == 1) {
5629 /* (32 * 16) >> 16 */
5630 gen_movl_T0_reg(s, rm);
5631 gen_movl_T1_reg(s, rs);
5632 if (sh & 4)
5633 gen_op_sarl_T1_im(16);
5634 else
b26eefb6 5635 gen_sxth(cpu_T[1]);
d9ba4830 5636 gen_imulw(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
5637 if ((sh & 2) == 0) {
5638 gen_movl_T1_reg(s, rn);
5639 gen_op_addl_T0_T1_setq();
5640 }
5641 gen_movl_reg_T0(s, rd);
5642 } else {
5643 /* 16 * 16 */
5644 gen_movl_T0_reg(s, rm);
5645 gen_movl_T1_reg(s, rs);
d9ba4830 5646 gen_mulxy(cpu_T[0], cpu_T[1], sh & 2, sh & 4);
9ee6e8bb
PB
5647 if (op1 == 2) {
5648 gen_op_signbit_T1_T0();
5649 gen_op_addq_T0_T1(rn, rd);
5650 gen_movl_reg_T0(s, rn);
5651 gen_movl_reg_T1(s, rd);
5652 } else {
5653 if (op1 == 0) {
5654 gen_movl_T1_reg(s, rn);
5655 gen_op_addl_T0_T1_setq();
5656 }
5657 gen_movl_reg_T0(s, rd);
5658 }
5659 }
5660 break;
5661 default:
5662 goto illegal_op;
5663 }
5664 } else if (((insn & 0x0e000000) == 0 &&
5665 (insn & 0x00000090) != 0x90) ||
5666 ((insn & 0x0e000000) == (1 << 25))) {
5667 int set_cc, logic_cc, shiftop;
5668
5669 op1 = (insn >> 21) & 0xf;
5670 set_cc = (insn >> 20) & 1;
5671 logic_cc = table_logic_cc[op1] & set_cc;
5672
5673 /* data processing instruction */
5674 if (insn & (1 << 25)) {
5675 /* immediate operand */
5676 val = insn & 0xff;
5677 shift = ((insn >> 8) & 0xf) * 2;
5678 if (shift)
5679 val = (val >> shift) | (val << (32 - shift));
5680 gen_op_movl_T1_im(val);
5681 if (logic_cc && shift)
b26eefb6 5682 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
5683 } else {
5684 /* register */
5685 rm = (insn) & 0xf;
5686 gen_movl_T1_reg(s, rm);
5687 shiftop = (insn >> 5) & 3;
5688 if (!(insn & (1 << 4))) {
5689 shift = (insn >> 7) & 0x1f;
9a119ff6 5690 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
5691 } else {
5692 rs = (insn >> 8) & 0xf;
5693 gen_movl_T0_reg(s, rs);
5694 if (logic_cc) {
5695 gen_shift_T1_T0_cc[shiftop]();
5696 } else {
5697 gen_shift_T1_T0[shiftop]();
5698 }
5699 }
5700 }
5701 if (op1 != 0x0f && op1 != 0x0d) {
5702 rn = (insn >> 16) & 0xf;
5703 gen_movl_T0_reg(s, rn);
5704 }
5705 rd = (insn >> 12) & 0xf;
5706 switch(op1) {
5707 case 0x00:
5708 gen_op_andl_T0_T1();
5709 gen_movl_reg_T0(s, rd);
5710 if (logic_cc)
5711 gen_op_logic_T0_cc();
5712 break;
5713 case 0x01:
5714 gen_op_xorl_T0_T1();
5715 gen_movl_reg_T0(s, rd);
5716 if (logic_cc)
5717 gen_op_logic_T0_cc();
5718 break;
5719 case 0x02:
5720 if (set_cc && rd == 15) {
5721 /* SUBS r15, ... is used for exception return. */
5722 if (IS_USER(s))
5723 goto illegal_op;
5724 gen_op_subl_T0_T1_cc();
5725 gen_exception_return(s);
5726 } else {
5727 if (set_cc)
5728 gen_op_subl_T0_T1_cc();
5729 else
5730 gen_op_subl_T0_T1();
5731 gen_movl_reg_T0(s, rd);
5732 }
5733 break;
5734 case 0x03:
5735 if (set_cc)
5736 gen_op_rsbl_T0_T1_cc();
5737 else
5738 gen_op_rsbl_T0_T1();
5739 gen_movl_reg_T0(s, rd);
5740 break;
5741 case 0x04:
5742 if (set_cc)
5743 gen_op_addl_T0_T1_cc();
5744 else
5745 gen_op_addl_T0_T1();
5746 gen_movl_reg_T0(s, rd);
5747 break;
5748 case 0x05:
5749 if (set_cc)
5750 gen_op_adcl_T0_T1_cc();
5751 else
b26eefb6 5752 gen_adc_T0_T1();
9ee6e8bb
PB
5753 gen_movl_reg_T0(s, rd);
5754 break;
5755 case 0x06:
5756 if (set_cc)
5757 gen_op_sbcl_T0_T1_cc();
5758 else
3670669c 5759 gen_sbc_T0_T1();
9ee6e8bb
PB
5760 gen_movl_reg_T0(s, rd);
5761 break;
5762 case 0x07:
5763 if (set_cc)
5764 gen_op_rscl_T0_T1_cc();
5765 else
3670669c 5766 gen_rsc_T0_T1();
9ee6e8bb
PB
5767 gen_movl_reg_T0(s, rd);
5768 break;
5769 case 0x08:
5770 if (set_cc) {
5771 gen_op_andl_T0_T1();
5772 gen_op_logic_T0_cc();
5773 }
5774 break;
5775 case 0x09:
5776 if (set_cc) {
5777 gen_op_xorl_T0_T1();
5778 gen_op_logic_T0_cc();
5779 }
5780 break;
5781 case 0x0a:
5782 if (set_cc) {
5783 gen_op_subl_T0_T1_cc();
5784 }
5785 break;
5786 case 0x0b:
5787 if (set_cc) {
5788 gen_op_addl_T0_T1_cc();
5789 }
5790 break;
5791 case 0x0c:
5792 gen_op_orl_T0_T1();
5793 gen_movl_reg_T0(s, rd);
5794 if (logic_cc)
5795 gen_op_logic_T0_cc();
5796 break;
5797 case 0x0d:
5798 if (logic_cc && rd == 15) {
5799 /* MOVS r15, ... is used for exception return. */
5800 if (IS_USER(s))
5801 goto illegal_op;
5802 gen_op_movl_T0_T1();
5803 gen_exception_return(s);
5804 } else {
5805 gen_movl_reg_T1(s, rd);
5806 if (logic_cc)
5807 gen_op_logic_T1_cc();
5808 }
5809 break;
5810 case 0x0e:
5811 gen_op_bicl_T0_T1();
5812 gen_movl_reg_T0(s, rd);
5813 if (logic_cc)
5814 gen_op_logic_T0_cc();
5815 break;
5816 default:
5817 case 0x0f:
5818 gen_op_notl_T1();
5819 gen_movl_reg_T1(s, rd);
5820 if (logic_cc)
5821 gen_op_logic_T1_cc();
5822 break;
5823 }
5824 } else {
5825 /* other instructions */
5826 op1 = (insn >> 24) & 0xf;
5827 switch(op1) {
5828 case 0x0:
5829 case 0x1:
5830 /* multiplies, extra load/stores */
5831 sh = (insn >> 5) & 3;
5832 if (sh == 0) {
5833 if (op1 == 0x0) {
5834 rd = (insn >> 16) & 0xf;
5835 rn = (insn >> 12) & 0xf;
5836 rs = (insn >> 8) & 0xf;
5837 rm = (insn) & 0xf;
5838 op1 = (insn >> 20) & 0xf;
5839 switch (op1) {
5840 case 0: case 1: case 2: case 3: case 6:
5841 /* 32 bit mul */
5842 gen_movl_T0_reg(s, rs);
5843 gen_movl_T1_reg(s, rm);
5844 gen_op_mul_T0_T1();
5845 if (insn & (1 << 22)) {
5846 /* Subtract (mls) */
5847 ARCH(6T2);
5848 gen_movl_T1_reg(s, rn);
5849 gen_op_rsbl_T0_T1();
5850 } else if (insn & (1 << 21)) {
5851 /* Add */
5852 gen_movl_T1_reg(s, rn);
5853 gen_op_addl_T0_T1();
5854 }
5855 if (insn & (1 << 20))
5856 gen_op_logic_T0_cc();
5857 gen_movl_reg_T0(s, rd);
5858 break;
5859 default:
5860 /* 64 bit mul */
5861 gen_movl_T0_reg(s, rs);
5862 gen_movl_T1_reg(s, rm);
5863 if (insn & (1 << 22))
5864 gen_op_imull_T0_T1();
5865 else
5866 gen_op_mull_T0_T1();
5867 if (insn & (1 << 21)) /* mult accumulate */
5868 gen_op_addq_T0_T1(rn, rd);
5869 if (!(insn & (1 << 23))) { /* double accumulate */
5870 ARCH(6);
5871 gen_op_addq_lo_T0_T1(rn);
5872 gen_op_addq_lo_T0_T1(rd);
5873 }
5874 if (insn & (1 << 20))
5875 gen_op_logicq_cc();
5876 gen_movl_reg_T0(s, rn);
5877 gen_movl_reg_T1(s, rd);
5878 break;
5879 }
5880 } else {
5881 rn = (insn >> 16) & 0xf;
5882 rd = (insn >> 12) & 0xf;
5883 if (insn & (1 << 23)) {
5884 /* load/store exclusive */
5885 gen_movl_T1_reg(s, rn);
5886 if (insn & (1 << 20)) {
5887 gen_ldst(ldlex, s);
5888 } else {
5889 rm = insn & 0xf;
5890 gen_movl_T0_reg(s, rm);
5891 gen_ldst(stlex, s);
5892 }
5893 gen_movl_reg_T0(s, rd);
5894 } else {
5895 /* SWP instruction */
5896 rm = (insn) & 0xf;
5897
5898 gen_movl_T0_reg(s, rm);
5899 gen_movl_T1_reg(s, rn);
5900 if (insn & (1 << 22)) {
5901 gen_ldst(swpb, s);
5902 } else {
5903 gen_ldst(swpl, s);
5904 }
5905 gen_movl_reg_T0(s, rd);
5906 }
5907 }
5908 } else {
5909 int address_offset;
5910 int load;
5911 /* Misc load/store */
5912 rn = (insn >> 16) & 0xf;
5913 rd = (insn >> 12) & 0xf;
b0109805 5914 addr = load_reg(s, rn);
9ee6e8bb 5915 if (insn & (1 << 24))
b0109805 5916 gen_add_datah_offset(s, insn, 0, addr);
9ee6e8bb
PB
5917 address_offset = 0;
5918 if (insn & (1 << 20)) {
5919 /* load */
5920 switch(sh) {
5921 case 1:
b0109805 5922 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb
PB
5923 break;
5924 case 2:
b0109805 5925 tmp = gen_ld8s(addr, IS_USER(s));
9ee6e8bb
PB
5926 break;
5927 default:
5928 case 3:
b0109805 5929 tmp = gen_ld16s(addr, IS_USER(s));
9ee6e8bb
PB
5930 break;
5931 }
5932 load = 1;
5933 } else if (sh & 2) {
5934 /* doubleword */
5935 if (sh & 1) {
5936 /* store */
b0109805
PB
5937 tmp = load_reg(s, rd);
5938 gen_st32(tmp, addr, IS_USER(s));
5939 tcg_gen_addi_i32(addr, addr, 4);
5940 tmp = load_reg(s, rd + 1);
5941 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
5942 load = 0;
5943 } else {
5944 /* load */
b0109805
PB
5945 tmp = gen_ld32(addr, IS_USER(s));
5946 store_reg(s, rd, tmp);
5947 tcg_gen_addi_i32(addr, addr, 4);
5948 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb
PB
5949 rd++;
5950 load = 1;
5951 }
5952 address_offset = -4;
5953 } else {
5954 /* store */
b0109805
PB
5955 tmp = load_reg(s, rd);
5956 gen_st16(tmp, addr, IS_USER(s));
9ee6e8bb
PB
5957 load = 0;
5958 }
5959 /* Perform base writeback before the loaded value to
5960 ensure correct behavior with overlapping index registers.
5961 ldrd with base writeback is is undefined if the
5962 destination and index registers overlap. */
5963 if (!(insn & (1 << 24))) {
b0109805
PB
5964 gen_add_datah_offset(s, insn, address_offset, addr);
5965 store_reg(s, rn, addr);
9ee6e8bb
PB
5966 } else if (insn & (1 << 21)) {
5967 if (address_offset)
b0109805
PB
5968 tcg_gen_addi_i32(addr, addr, address_offset);
5969 store_reg(s, rn, addr);
5970 } else {
5971 dead_tmp(addr);
9ee6e8bb
PB
5972 }
5973 if (load) {
5974 /* Complete the load. */
b0109805 5975 store_reg(s, rd, tmp);
9ee6e8bb
PB
5976 }
5977 }
5978 break;
5979 case 0x4:
5980 case 0x5:
5981 goto do_ldst;
5982 case 0x6:
5983 case 0x7:
5984 if (insn & (1 << 4)) {
5985 ARCH(6);
5986 /* Armv6 Media instructions. */
5987 rm = insn & 0xf;
5988 rn = (insn >> 16) & 0xf;
2c0262af 5989 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
5990 rs = (insn >> 8) & 0xf;
5991 switch ((insn >> 23) & 3) {
5992 case 0: /* Parallel add/subtract. */
5993 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
5994 tmp = load_reg(s, rn);
5995 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
5996 sh = (insn >> 5) & 7;
5997 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5998 goto illegal_op;
6ddbc6e4
PB
5999 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
6000 dead_tmp(tmp2);
6001 store_reg(s, rd, tmp);
9ee6e8bb
PB
6002 break;
6003 case 1:
6004 if ((insn & 0x00700020) == 0) {
6005 /* Hafword pack. */
3670669c
PB
6006 tmp = load_reg(s, rn);
6007 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6008 shift = (insn >> 7) & 0x1f;
6009 if (shift)
3670669c
PB
6010 tcg_gen_shli_i32(tmp2, tmp2, shift);
6011 if (insn & (1 << 6)) {
6012 /* pkhtb */
6013 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
6014 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
6015 } else {
6016 /* pkhbt */
6017 tcg_gen_andi_i32(tmp, tmp, 0xffff);
6018 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
6019 }
6020 tcg_gen_or_i32(tmp, tmp, tmp2);
6021 store_reg(s, rd, tmp);
9ee6e8bb
PB
6022 } else if ((insn & 0x00200020) == 0x00200000) {
6023 /* [us]sat */
6ddbc6e4 6024 tmp = load_reg(s, rm);
9ee6e8bb
PB
6025 shift = (insn >> 7) & 0x1f;
6026 if (insn & (1 << 6)) {
6027 if (shift == 0)
6028 shift = 31;
6ddbc6e4 6029 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 6030 } else {
6ddbc6e4 6031 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
6032 }
6033 sh = (insn >> 16) & 0x1f;
6034 if (sh != 0) {
6035 if (insn & (1 << 22))
6ddbc6e4 6036 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6037 else
6ddbc6e4 6038 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6039 }
6ddbc6e4 6040 store_reg(s, rd, tmp);
9ee6e8bb
PB
6041 } else if ((insn & 0x00300fe0) == 0x00200f20) {
6042 /* [us]sat16 */
6ddbc6e4 6043 tmp = load_reg(s, rm);
9ee6e8bb
PB
6044 sh = (insn >> 16) & 0x1f;
6045 if (sh != 0) {
6046 if (insn & (1 << 22))
6ddbc6e4 6047 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6048 else
6ddbc6e4 6049 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 6050 }
6ddbc6e4 6051 store_reg(s, rd, tmp);
9ee6e8bb
PB
6052 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
6053 /* Select bytes. */
6ddbc6e4
PB
6054 tmp = load_reg(s, rn);
6055 tmp2 = load_reg(s, rm);
6056 tmp3 = new_tmp();
6057 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
6058 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6059 dead_tmp(tmp3);
6060 dead_tmp(tmp2);
6061 store_reg(s, rd, tmp);
9ee6e8bb
PB
6062 } else if ((insn & 0x000003e0) == 0x00000060) {
6063 gen_movl_T1_reg(s, rm);
6064 shift = (insn >> 10) & 3;
6065 /* ??? In many cases it's not neccessary to do a
6066 rotate, a shift is sufficient. */
6067 if (shift != 0)
6068 gen_op_rorl_T1_im(shift * 8);
6069 op1 = (insn >> 20) & 7;
6070 switch (op1) {
b26eefb6
PB
6071 case 0: gen_sxtb16(cpu_T[1]); break;
6072 case 2: gen_sxtb(cpu_T[1]); break;
6073 case 3: gen_sxth(cpu_T[1]); break;
6074 case 4: gen_uxtb16(cpu_T[1]); break;
6075 case 6: gen_uxtb(cpu_T[1]); break;
6076 case 7: gen_uxth(cpu_T[1]); break;
9ee6e8bb
PB
6077 default: goto illegal_op;
6078 }
6079 if (rn != 15) {
b26eefb6 6080 tmp = load_reg(s, rn);
9ee6e8bb 6081 if ((op1 & 3) == 0) {
b26eefb6 6082 gen_add16(cpu_T[1], tmp);
9ee6e8bb 6083 } else {
b26eefb6
PB
6084 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6085 dead_tmp(tmp);
9ee6e8bb
PB
6086 }
6087 }
6088 gen_movl_reg_T1(s, rd);
6089 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
6090 /* rev */
b0109805 6091 tmp = load_reg(s, rm);
9ee6e8bb
PB
6092 if (insn & (1 << 22)) {
6093 if (insn & (1 << 7)) {
b0109805 6094 gen_revsh(tmp);
9ee6e8bb
PB
6095 } else {
6096 ARCH(6T2);
b0109805 6097 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6098 }
6099 } else {
6100 if (insn & (1 << 7))
b0109805 6101 gen_rev16(tmp);
9ee6e8bb 6102 else
b0109805 6103 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb 6104 }
b0109805 6105 store_reg(s, rd, tmp);
9ee6e8bb
PB
6106 } else {
6107 goto illegal_op;
6108 }
6109 break;
6110 case 2: /* Multiplies (Type 3). */
6111 gen_movl_T0_reg(s, rm);
6112 gen_movl_T1_reg(s, rs);
6113 if (insn & (1 << 20)) {
6114 /* Signed multiply most significant [accumulate]. */
6115 gen_op_imull_T0_T1();
6116 if (insn & (1 << 5))
d9ba4830 6117 gen_roundqd(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
6118 else
6119 gen_op_movl_T0_T1();
6120 if (rn != 15) {
6121 gen_movl_T1_reg(s, rn);
6122 if (insn & (1 << 6)) {
6123 gen_op_addl_T0_T1();
6124 } else {
6125 gen_op_rsbl_T0_T1();
6126 }
6127 }
6128 gen_movl_reg_T0(s, rd);
6129 } else {
6130 if (insn & (1 << 5))
8f01245e 6131 gen_swap_half(cpu_T[1]);
3670669c 6132 gen_smul_dual(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
6133 if (insn & (1 << 22)) {
6134 if (insn & (1 << 6)) {
6135 /* smlald */
6136 gen_op_addq_T0_T1_dual(rn, rd);
6137 } else {
6138 /* smlsld */
6139 gen_op_subq_T0_T1_dual(rn, rd);
6140 }
6141 } else {
6142 /* This addition cannot overflow. */
6143 if (insn & (1 << 6)) {
6144 /* sm[ul]sd */
6145 gen_op_subl_T0_T1();
6146 } else {
6147 /* sm[ul]ad */
6148 gen_op_addl_T0_T1();
6149 }
6150 if (rn != 15)
6151 {
6152 gen_movl_T1_reg(s, rn);
6153 gen_op_addl_T0_T1_setq();
6154 }
6155 gen_movl_reg_T0(s, rd);
6156 }
6157 }
6158 break;
6159 case 3:
6160 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
6161 switch (op1) {
6162 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
6163 ARCH(6);
6164 tmp = load_reg(s, rm);
6165 tmp2 = load_reg(s, rs);
6166 gen_helper_usad8(tmp, tmp, tmp2);
6167 dead_tmp(tmp2);
9ee6e8bb 6168 if (rn != 15) {
6ddbc6e4
PB
6169 tmp2 = load_reg(s, rn);
6170 tcg_gen_add_i32(tmp, tmp, tmp2);
6171 dead_tmp(tmp2);
9ee6e8bb 6172 }
6ddbc6e4 6173 store_reg(s, rd, tmp);
9ee6e8bb
PB
6174 break;
6175 case 0x20: case 0x24: case 0x28: case 0x2c:
6176 /* Bitfield insert/clear. */
6177 ARCH(6T2);
6178 shift = (insn >> 7) & 0x1f;
6179 i = (insn >> 16) & 0x1f;
6180 i = i + 1 - shift;
6181 if (rm == 15) {
6182 gen_op_movl_T1_im(0);
6183 } else {
6184 gen_movl_T1_reg(s, rm);
6185 }
6186 if (i != 32) {
6187 gen_movl_T0_reg(s, rd);
3670669c
PB
6188 gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
6189 shift, ((1u << i) - 1) << shift);
9ee6e8bb
PB
6190 }
6191 gen_movl_reg_T1(s, rd);
6192 break;
6193 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
6194 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
6195 gen_movl_T1_reg(s, rm);
6196 shift = (insn >> 7) & 0x1f;
6197 i = ((insn >> 16) & 0x1f) + 1;
6198 if (shift + i > 32)
6199 goto illegal_op;
6200 if (i < 32) {
6201 if (op1 & 0x20) {
3670669c 6202 gen_ubfx(cpu_T[1], shift, (1u << i) - 1);
9ee6e8bb 6203 } else {
3670669c 6204 gen_sbfx(cpu_T[1], shift, i);
9ee6e8bb
PB
6205 }
6206 }
6207 gen_movl_reg_T1(s, rd);
6208 break;
6209 default:
6210 goto illegal_op;
6211 }
6212 break;
6213 }
6214 break;
6215 }
6216 do_ldst:
6217 /* Check for undefined extension instructions
6218 * per the ARM Bible IE:
6219 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6220 */
6221 sh = (0xf << 20) | (0xf << 4);
6222 if (op1 == 0x7 && ((insn & sh) == sh))
6223 {
6224 goto illegal_op;
6225 }
6226 /* load/store byte/word */
6227 rn = (insn >> 16) & 0xf;
6228 rd = (insn >> 12) & 0xf;
b0109805 6229 tmp2 = load_reg(s, rn);
9ee6e8bb
PB
6230 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6231 if (insn & (1 << 24))
b0109805 6232 gen_add_data_offset(s, insn, tmp2);
9ee6e8bb
PB
6233 if (insn & (1 << 20)) {
6234 /* load */
6235 s->is_mem = 1;
9ee6e8bb 6236 if (insn & (1 << 22)) {
b0109805 6237 tmp = gen_ld8u(tmp2, i);
9ee6e8bb 6238 } else {
b0109805 6239 tmp = gen_ld32(tmp2, i);
9ee6e8bb 6240 }
9ee6e8bb
PB
6241 } else {
6242 /* store */
b0109805 6243 tmp = load_reg(s, rd);
9ee6e8bb 6244 if (insn & (1 << 22))
b0109805 6245 gen_st8(tmp, tmp2, i);
9ee6e8bb 6246 else
b0109805 6247 gen_st32(tmp, tmp2, i);
9ee6e8bb
PB
6248 }
6249 if (!(insn & (1 << 24))) {
b0109805
PB
6250 gen_add_data_offset(s, insn, tmp2);
6251 store_reg(s, rn, tmp2);
6252 } else if (insn & (1 << 21)) {
6253 store_reg(s, rn, tmp2);
6254 } else {
6255 dead_tmp(tmp2);
9ee6e8bb
PB
6256 }
6257 if (insn & (1 << 20)) {
6258 /* Complete the load. */
6259 if (rd == 15)
b0109805 6260 gen_bx(s, tmp);
9ee6e8bb 6261 else
b0109805 6262 store_reg(s, rd, tmp);
9ee6e8bb
PB
6263 }
6264 break;
6265 case 0x08:
6266 case 0x09:
6267 {
6268 int j, n, user, loaded_base;
b0109805 6269 TCGv loaded_var;
9ee6e8bb
PB
6270 /* load/store multiple words */
6271 /* XXX: store correct base if write back */
6272 user = 0;
6273 if (insn & (1 << 22)) {
6274 if (IS_USER(s))
6275 goto illegal_op; /* only usable in supervisor mode */
6276
6277 if ((insn & (1 << 15)) == 0)
6278 user = 1;
6279 }
6280 rn = (insn >> 16) & 0xf;
b0109805 6281 addr = load_reg(s, rn);
9ee6e8bb
PB
6282
6283 /* compute total size */
6284 loaded_base = 0;
6285 n = 0;
6286 for(i=0;i<16;i++) {
6287 if (insn & (1 << i))
6288 n++;
6289 }
6290 /* XXX: test invalid n == 0 case ? */
6291 if (insn & (1 << 23)) {
6292 if (insn & (1 << 24)) {
6293 /* pre increment */
b0109805 6294 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6295 } else {
6296 /* post increment */
6297 }
6298 } else {
6299 if (insn & (1 << 24)) {
6300 /* pre decrement */
b0109805 6301 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6302 } else {
6303 /* post decrement */
6304 if (n != 1)
b0109805 6305 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6306 }
6307 }
6308 j = 0;
6309 for(i=0;i<16;i++) {
6310 if (insn & (1 << i)) {
6311 if (insn & (1 << 20)) {
6312 /* load */
b0109805 6313 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6314 if (i == 15) {
b0109805 6315 gen_bx(s, tmp);
9ee6e8bb 6316 } else if (user) {
b0109805
PB
6317 gen_helper_set_user_reg(tcg_const_i32(i), tmp);
6318 dead_tmp(tmp);
9ee6e8bb 6319 } else if (i == rn) {
b0109805 6320 loaded_var = tmp;
9ee6e8bb
PB
6321 loaded_base = 1;
6322 } else {
b0109805 6323 store_reg(s, i, tmp);
9ee6e8bb
PB
6324 }
6325 } else {
6326 /* store */
6327 if (i == 15) {
6328 /* special case: r15 = PC + 8 */
6329 val = (long)s->pc + 4;
b0109805
PB
6330 tmp = new_tmp();
6331 tcg_gen_movi_i32(tmp, val);
9ee6e8bb 6332 } else if (user) {
b0109805
PB
6333 tmp = new_tmp();
6334 gen_helper_get_user_reg(tmp, tcg_const_i32(i));
9ee6e8bb 6335 } else {
b0109805 6336 tmp = load_reg(s, i);
9ee6e8bb 6337 }
b0109805 6338 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6339 }
6340 j++;
6341 /* no need to add after the last transfer */
6342 if (j != n)
b0109805 6343 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6344 }
6345 }
6346 if (insn & (1 << 21)) {
6347 /* write back */
6348 if (insn & (1 << 23)) {
6349 if (insn & (1 << 24)) {
6350 /* pre increment */
6351 } else {
6352 /* post increment */
b0109805 6353 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6354 }
6355 } else {
6356 if (insn & (1 << 24)) {
6357 /* pre decrement */
6358 if (n != 1)
b0109805 6359 tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
9ee6e8bb
PB
6360 } else {
6361 /* post decrement */
b0109805 6362 tcg_gen_addi_i32(addr, addr, -(n * 4));
9ee6e8bb
PB
6363 }
6364 }
b0109805
PB
6365 store_reg(s, rn, addr);
6366 } else {
6367 dead_tmp(addr);
9ee6e8bb
PB
6368 }
6369 if (loaded_base) {
b0109805 6370 store_reg(s, rn, loaded_var);
9ee6e8bb
PB
6371 }
6372 if ((insn & (1 << 22)) && !user) {
6373 /* Restore CPSR from SPSR. */
d9ba4830
PB
6374 tmp = load_cpu_field(spsr);
6375 gen_set_cpsr(tmp, 0xffffffff);
6376 dead_tmp(tmp);
9ee6e8bb
PB
6377 s->is_jmp = DISAS_UPDATE;
6378 }
6379 }
6380 break;
6381 case 0xa:
6382 case 0xb:
6383 {
6384 int32_t offset;
6385
6386 /* branch (and link) */
6387 val = (int32_t)s->pc;
6388 if (insn & (1 << 24)) {
6389 gen_op_movl_T0_im(val);
b26eefb6 6390 gen_movl_reg_T0(s, 14);
9ee6e8bb
PB
6391 }
6392 offset = (((int32_t)insn << 8) >> 8);
6393 val += (offset << 2) + 4;
6394 gen_jmp(s, val);
6395 }
6396 break;
6397 case 0xc:
6398 case 0xd:
6399 case 0xe:
6400 /* Coprocessor. */
6401 if (disas_coproc_insn(env, s, insn))
6402 goto illegal_op;
6403 break;
6404 case 0xf:
6405 /* swi */
6406 gen_op_movl_T0_im((long)s->pc);
b26eefb6 6407 gen_set_pc_T0();
9ee6e8bb
PB
6408 s->is_jmp = DISAS_SWI;
6409 break;
6410 default:
6411 illegal_op:
6412 gen_set_condexec(s);
6413 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 6414 gen_set_pc_T0();
d9ba4830 6415 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6416 s->is_jmp = DISAS_JUMP;
6417 break;
6418 }
6419 }
6420}
6421
6422/* Return true if this is a Thumb-2 logical op. */
6423static int
6424thumb2_logic_op(int op)
6425{
6426 return (op < 8);
6427}
6428
6429/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6430 then set condition code flags based on the result of the operation.
6431 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6432 to the high bit of T1.
6433 Returns zero if the opcode is valid. */
6434
6435static int
6436gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6437{
6438 int logic_cc;
6439
6440 logic_cc = 0;
6441 switch (op) {
6442 case 0: /* and */
6443 gen_op_andl_T0_T1();
6444 logic_cc = conds;
6445 break;
6446 case 1: /* bic */
6447 gen_op_bicl_T0_T1();
6448 logic_cc = conds;
6449 break;
6450 case 2: /* orr */
6451 gen_op_orl_T0_T1();
6452 logic_cc = conds;
6453 break;
6454 case 3: /* orn */
6455 gen_op_notl_T1();
6456 gen_op_orl_T0_T1();
6457 logic_cc = conds;
6458 break;
6459 case 4: /* eor */
6460 gen_op_xorl_T0_T1();
6461 logic_cc = conds;
6462 break;
6463 case 8: /* add */
6464 if (conds)
6465 gen_op_addl_T0_T1_cc();
6466 else
6467 gen_op_addl_T0_T1();
6468 break;
6469 case 10: /* adc */
6470 if (conds)
6471 gen_op_adcl_T0_T1_cc();
6472 else
b26eefb6 6473 gen_adc_T0_T1();
9ee6e8bb
PB
6474 break;
6475 case 11: /* sbc */
6476 if (conds)
6477 gen_op_sbcl_T0_T1_cc();
6478 else
3670669c 6479 gen_sbc_T0_T1();
9ee6e8bb
PB
6480 break;
6481 case 13: /* sub */
6482 if (conds)
6483 gen_op_subl_T0_T1_cc();
6484 else
6485 gen_op_subl_T0_T1();
6486 break;
6487 case 14: /* rsb */
6488 if (conds)
6489 gen_op_rsbl_T0_T1_cc();
6490 else
6491 gen_op_rsbl_T0_T1();
6492 break;
6493 default: /* 5, 6, 7, 9, 12, 15. */
6494 return 1;
6495 }
6496 if (logic_cc) {
6497 gen_op_logic_T0_cc();
6498 if (shifter_out)
b26eefb6 6499 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6500 }
6501 return 0;
6502}
6503
6504/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6505 is not legal. */
6506static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6507{
b0109805 6508 uint32_t insn, imm, shift, offset;
9ee6e8bb 6509 uint32_t rd, rn, rm, rs;
b26eefb6 6510 TCGv tmp;
6ddbc6e4
PB
6511 TCGv tmp2;
6512 TCGv tmp3;
b0109805 6513 TCGv addr;
9ee6e8bb
PB
6514 int op;
6515 int shiftop;
6516 int conds;
6517 int logic_cc;
6518
6519 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6520 || arm_feature (env, ARM_FEATURE_M))) {
6521 /* Thumb-1 cores may need to tread bl and blx as a pair of
6522 16-bit instructions to get correct prefetch abort behavior. */
6523 insn = insn_hw1;
6524 if ((insn & (1 << 12)) == 0) {
6525 /* Second half of blx. */
6526 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6527 tmp = load_reg(s, 14);
6528 tcg_gen_addi_i32(tmp, tmp, offset);
6529 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb 6530
d9ba4830 6531 tmp2 = new_tmp();
b0109805 6532 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6533 store_reg(s, 14, tmp2);
6534 gen_bx(s, tmp);
9ee6e8bb
PB
6535 return 0;
6536 }
6537 if (insn & (1 << 11)) {
6538 /* Second half of bl. */
6539 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830
PB
6540 tmp = load_reg(s, 14);
6541 tcg_gen_addi_i32(tmp, tmp, 14);
9ee6e8bb 6542
d9ba4830 6543 tmp2 = new_tmp();
b0109805 6544 tcg_gen_movi_i32(tmp2, s->pc | 1);
d9ba4830
PB
6545 store_reg(s, 14, tmp2);
6546 gen_bx(s, tmp);
9ee6e8bb
PB
6547 return 0;
6548 }
6549 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6550 /* Instruction spans a page boundary. Implement it as two
6551 16-bit instructions in case the second half causes an
6552 prefetch abort. */
6553 offset = ((int32_t)insn << 21) >> 9;
b0109805 6554 gen_op_movl_T0_im(s->pc + 2 + offset);
9ee6e8bb
PB
6555 gen_movl_reg_T0(s, 14);
6556 return 0;
6557 }
6558 /* Fall through to 32-bit decode. */
6559 }
6560
6561 insn = lduw_code(s->pc);
6562 s->pc += 2;
6563 insn |= (uint32_t)insn_hw1 << 16;
6564
6565 if ((insn & 0xf800e800) != 0xf000e800) {
6566 ARCH(6T2);
6567 }
6568
6569 rn = (insn >> 16) & 0xf;
6570 rs = (insn >> 12) & 0xf;
6571 rd = (insn >> 8) & 0xf;
6572 rm = insn & 0xf;
6573 switch ((insn >> 25) & 0xf) {
6574 case 0: case 1: case 2: case 3:
6575 /* 16-bit instructions. Should never happen. */
6576 abort();
6577 case 4:
6578 if (insn & (1 << 22)) {
6579 /* Other load/store, table branch. */
6580 if (insn & 0x01200000) {
6581 /* Load/store doubleword. */
6582 if (rn == 15) {
b0109805
PB
6583 addr = new_tmp();
6584 tcg_gen_movi_i32(addr, s->pc & ~3);
9ee6e8bb 6585 } else {
b0109805 6586 addr = load_reg(s, rn);
9ee6e8bb
PB
6587 }
6588 offset = (insn & 0xff) * 4;
6589 if ((insn & (1 << 23)) == 0)
6590 offset = -offset;
6591 if (insn & (1 << 24)) {
b0109805 6592 tcg_gen_addi_i32(addr, addr, offset);
9ee6e8bb
PB
6593 offset = 0;
6594 }
6595 if (insn & (1 << 20)) {
6596 /* ldrd */
b0109805
PB
6597 tmp = gen_ld32(addr, IS_USER(s));
6598 store_reg(s, rs, tmp);
6599 tcg_gen_addi_i32(addr, addr, 4);
6600 tmp = gen_ld32(addr, IS_USER(s));
6601 store_reg(s, rd, tmp);
9ee6e8bb
PB
6602 } else {
6603 /* strd */
b0109805
PB
6604 tmp = load_reg(s, rs);
6605 gen_st32(tmp, addr, IS_USER(s));
6606 tcg_gen_addi_i32(addr, addr, 4);
6607 tmp = load_reg(s, rd);
6608 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb
PB
6609 }
6610 if (insn & (1 << 21)) {
6611 /* Base writeback. */
6612 if (rn == 15)
6613 goto illegal_op;
b0109805
PB
6614 tcg_gen_addi_i32(addr, addr, offset - 4);
6615 store_reg(s, rn, addr);
6616 } else {
6617 dead_tmp(addr);
9ee6e8bb
PB
6618 }
6619 } else if ((insn & (1 << 23)) == 0) {
6620 /* Load/store exclusive word. */
6621 gen_movl_T0_reg(s, rd);
2c0262af 6622 gen_movl_T1_reg(s, rn);
2c0262af 6623 if (insn & (1 << 20)) {
9ee6e8bb
PB
6624 gen_ldst(ldlex, s);
6625 } else {
6626 gen_ldst(stlex, s);
6627 }
6628 gen_movl_reg_T0(s, rd);
6629 } else if ((insn & (1 << 6)) == 0) {
6630 /* Table Branch. */
6631 if (rn == 15) {
b0109805
PB
6632 addr = new_tmp();
6633 tcg_gen_movi_i32(addr, s->pc);
9ee6e8bb 6634 } else {
b0109805 6635 addr = load_reg(s, rn);
9ee6e8bb 6636 }
b26eefb6 6637 tmp = load_reg(s, rm);
b0109805 6638 tcg_gen_add_i32(addr, addr, tmp);
9ee6e8bb
PB
6639 if (insn & (1 << 4)) {
6640 /* tbh */
b0109805 6641 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 6642 dead_tmp(tmp);
b0109805 6643 tmp = gen_ld16u(addr, IS_USER(s));
9ee6e8bb 6644 } else { /* tbb */
b26eefb6 6645 dead_tmp(tmp);
b0109805 6646 tmp = gen_ld8u(addr, IS_USER(s));
9ee6e8bb 6647 }
b0109805
PB
6648 dead_tmp(addr);
6649 tcg_gen_shli_i32(tmp, tmp, 1);
6650 tcg_gen_addi_i32(tmp, tmp, s->pc);
6651 store_reg(s, 15, tmp);
9ee6e8bb
PB
6652 } else {
6653 /* Load/store exclusive byte/halfword/doubleword. */
6654 op = (insn >> 4) & 0x3;
6655 gen_movl_T1_reg(s, rn);
6656 if (insn & (1 << 20)) {
6657 switch (op) {
6658 case 0:
6659 gen_ldst(ldbex, s);
6660 break;
2c0262af 6661 case 1:
9ee6e8bb 6662 gen_ldst(ldwex, s);
2c0262af 6663 break;
9ee6e8bb
PB
6664 case 3:
6665 gen_ldst(ldqex, s);
6666 gen_movl_reg_T1(s, rd);
2c0262af
FB
6667 break;
6668 default:
9ee6e8bb
PB
6669 goto illegal_op;
6670 }
6671 gen_movl_reg_T0(s, rs);
6672 } else {
6673 gen_movl_T0_reg(s, rs);
6674 switch (op) {
6675 case 0:
6676 gen_ldst(stbex, s);
6677 break;
6678 case 1:
6679 gen_ldst(stwex, s);
6680 break;
2c0262af 6681 case 3:
9ee6e8bb
PB
6682 gen_movl_T2_reg(s, rd);
6683 gen_ldst(stqex, s);
2c0262af 6684 break;
9ee6e8bb
PB
6685 default:
6686 goto illegal_op;
2c0262af 6687 }
9ee6e8bb
PB
6688 gen_movl_reg_T0(s, rm);
6689 }
6690 }
6691 } else {
6692 /* Load/store multiple, RFE, SRS. */
6693 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6694 /* Not available in user mode. */
b0109805 6695 if (IS_USER(s))
9ee6e8bb
PB
6696 goto illegal_op;
6697 if (insn & (1 << 20)) {
6698 /* rfe */
b0109805
PB
6699 addr = load_reg(s, rn);
6700 if ((insn & (1 << 24)) == 0)
6701 tcg_gen_addi_i32(addr, addr, -8);
6702 /* Load PC into tmp and CPSR into tmp2. */
6703 tmp = gen_ld32(addr, 0);
6704 tcg_gen_addi_i32(addr, addr, 4);
6705 tmp2 = gen_ld32(addr, 0);
9ee6e8bb
PB
6706 if (insn & (1 << 21)) {
6707 /* Base writeback. */
b0109805
PB
6708 if (insn & (1 << 24)) {
6709 tcg_gen_addi_i32(addr, addr, 4);
6710 } else {
6711 tcg_gen_addi_i32(addr, addr, -4);
6712 }
6713 store_reg(s, rn, addr);
6714 } else {
6715 dead_tmp(addr);
9ee6e8bb 6716 }
b0109805 6717 gen_rfe(s, tmp, tmp2);
9ee6e8bb
PB
6718 } else {
6719 /* srs */
6720 op = (insn & 0x1f);
6721 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 6722 addr = load_reg(s, 13);
9ee6e8bb 6723 } else {
b0109805
PB
6724 addr = new_tmp();
6725 gen_helper_get_r13_banked(addr, cpu_env, tcg_const_i32(op));
9ee6e8bb
PB
6726 }
6727 if ((insn & (1 << 24)) == 0) {
b0109805 6728 tcg_gen_addi_i32(addr, addr, -8);
9ee6e8bb 6729 }
b0109805
PB
6730 tmp = load_reg(s, 14);
6731 gen_st32(tmp, addr, 0);
6732 tcg_gen_addi_i32(addr, addr, 4);
6733 tmp = new_tmp();
6734 gen_helper_cpsr_read(tmp);
6735 gen_st32(tmp, addr, 0);
9ee6e8bb
PB
6736 if (insn & (1 << 21)) {
6737 if ((insn & (1 << 24)) == 0) {
b0109805 6738 tcg_gen_addi_i32(addr, addr, -4);
9ee6e8bb 6739 } else {
b0109805 6740 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6741 }
6742 if (op == (env->uncached_cpsr & CPSR_M)) {
b0109805 6743 store_reg(s, 13, addr);
9ee6e8bb 6744 } else {
b0109805
PB
6745 gen_helper_set_r13_banked(cpu_env,
6746 tcg_const_i32(op), addr);
9ee6e8bb 6747 }
b0109805
PB
6748 } else {
6749 dead_tmp(addr);
9ee6e8bb
PB
6750 }
6751 }
6752 } else {
6753 int i;
6754 /* Load/store multiple. */
b0109805 6755 addr = load_reg(s, rn);
9ee6e8bb
PB
6756 offset = 0;
6757 for (i = 0; i < 16; i++) {
6758 if (insn & (1 << i))
6759 offset += 4;
6760 }
6761 if (insn & (1 << 24)) {
b0109805 6762 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
6763 }
6764
6765 for (i = 0; i < 16; i++) {
6766 if ((insn & (1 << i)) == 0)
6767 continue;
6768 if (insn & (1 << 20)) {
6769 /* Load. */
b0109805 6770 tmp = gen_ld32(addr, IS_USER(s));
9ee6e8bb 6771 if (i == 15) {
b0109805 6772 gen_bx(s, tmp);
9ee6e8bb 6773 } else {
b0109805 6774 store_reg(s, i, tmp);
9ee6e8bb
PB
6775 }
6776 } else {
6777 /* Store. */
b0109805
PB
6778 tmp = load_reg(s, i);
6779 gen_st32(tmp, addr, IS_USER(s));
9ee6e8bb 6780 }
b0109805 6781 tcg_gen_addi_i32(addr, addr, 4);
9ee6e8bb
PB
6782 }
6783 if (insn & (1 << 21)) {
6784 /* Base register writeback. */
6785 if (insn & (1 << 24)) {
b0109805 6786 tcg_gen_addi_i32(addr, addr, -offset);
9ee6e8bb
PB
6787 }
6788 /* Fault if writeback register is in register list. */
6789 if (insn & (1 << rn))
6790 goto illegal_op;
b0109805
PB
6791 store_reg(s, rn, addr);
6792 } else {
6793 dead_tmp(addr);
9ee6e8bb
PB
6794 }
6795 }
6796 }
6797 break;
6798 case 5: /* Data processing register constant shift. */
6799 if (rn == 15)
6800 gen_op_movl_T0_im(0);
6801 else
6802 gen_movl_T0_reg(s, rn);
6803 gen_movl_T1_reg(s, rm);
6804 op = (insn >> 21) & 0xf;
6805 shiftop = (insn >> 4) & 3;
6806 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6807 conds = (insn & (1 << 20)) != 0;
6808 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 6809 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6810 if (gen_thumb2_data_op(s, op, conds, 0))
6811 goto illegal_op;
6812 if (rd != 15)
6813 gen_movl_reg_T0(s, rd);
6814 break;
6815 case 13: /* Misc data processing. */
6816 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6817 if (op < 4 && (insn & 0xf000) != 0xf000)
6818 goto illegal_op;
6819 switch (op) {
6820 case 0: /* Register controlled shift. */
6821 gen_movl_T0_reg(s, rm);
6822 gen_movl_T1_reg(s, rn);
6823 if ((insn & 0x70) != 0)
6824 goto illegal_op;
6825 op = (insn >> 21) & 3;
6826 if (insn & (1 << 20)) {
6827 gen_shift_T1_T0_cc[op]();
6828 gen_op_logic_T1_cc();
6829 } else {
6830 gen_shift_T1_T0[op]();
6831 }
6832 gen_movl_reg_T1(s, rd);
6833 break;
6834 case 1: /* Sign/zero extend. */
6835 gen_movl_T1_reg(s, rm);
6836 shift = (insn >> 4) & 3;
6837 /* ??? In many cases it's not neccessary to do a
6838 rotate, a shift is sufficient. */
6839 if (shift != 0)
6840 gen_op_rorl_T1_im(shift * 8);
6841 op = (insn >> 20) & 7;
6842 switch (op) {
b26eefb6
PB
6843 case 0: gen_sxth(cpu_T[1]); break;
6844 case 1: gen_uxth(cpu_T[1]); break;
6845 case 2: gen_sxtb16(cpu_T[1]); break;
6846 case 3: gen_uxtb16(cpu_T[1]); break;
6847 case 4: gen_sxtb(cpu_T[1]); break;
6848 case 5: gen_uxtb(cpu_T[1]); break;
9ee6e8bb
PB
6849 default: goto illegal_op;
6850 }
6851 if (rn != 15) {
b26eefb6 6852 tmp = load_reg(s, rn);
9ee6e8bb 6853 if ((op >> 1) == 1) {
b26eefb6 6854 gen_add16(cpu_T[1], tmp);
9ee6e8bb 6855 } else {
b26eefb6
PB
6856 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6857 dead_tmp(tmp);
9ee6e8bb
PB
6858 }
6859 }
6860 gen_movl_reg_T1(s, rd);
6861 break;
6862 case 2: /* SIMD add/subtract. */
6863 op = (insn >> 20) & 7;
6864 shift = (insn >> 4) & 7;
6865 if ((op & 3) == 3 || (shift & 3) == 3)
6866 goto illegal_op;
6ddbc6e4
PB
6867 tmp = load_reg(s, rn);
6868 tmp2 = load_reg(s, rm);
6869 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
6870 dead_tmp(tmp2);
6871 store_reg(s, rd, tmp);
9ee6e8bb
PB
6872 break;
6873 case 3: /* Other data processing. */
6874 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6875 if (op < 4) {
6876 /* Saturating add/subtract. */
d9ba4830
PB
6877 tmp = load_reg(s, rn);
6878 tmp2 = load_reg(s, rm);
9ee6e8bb 6879 if (op & 2)
d9ba4830 6880 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 6881 if (op & 1)
d9ba4830 6882 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 6883 else
d9ba4830
PB
6884 gen_helper_add_saturate(tmp, tmp, tmp2);
6885 dead_tmp(tmp2);
9ee6e8bb 6886 } else {
d9ba4830 6887 tmp = load_reg(s, rn);
9ee6e8bb
PB
6888 switch (op) {
6889 case 0x0a: /* rbit */
d9ba4830 6890 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6891 break;
6892 case 0x08: /* rev */
d9ba4830 6893 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
6894 break;
6895 case 0x09: /* rev16 */
d9ba4830 6896 gen_rev16(tmp);
9ee6e8bb
PB
6897 break;
6898 case 0x0b: /* revsh */
d9ba4830 6899 gen_revsh(tmp);
9ee6e8bb
PB
6900 break;
6901 case 0x10: /* sel */
d9ba4830 6902 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
6903 tmp3 = new_tmp();
6904 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 6905 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 6906 dead_tmp(tmp3);
d9ba4830 6907 dead_tmp(tmp2);
9ee6e8bb
PB
6908 break;
6909 case 0x18: /* clz */
d9ba4830 6910 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
6911 break;
6912 default:
6913 goto illegal_op;
6914 }
6915 }
d9ba4830 6916 store_reg(s, rd, tmp);
9ee6e8bb
PB
6917 break;
6918 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6919 op = (insn >> 4) & 0xf;
d9ba4830
PB
6920 tmp = load_reg(s, rn);
6921 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6922 switch ((insn >> 20) & 7) {
6923 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
6924 tcg_gen_mul_i32(tmp, tmp, tmp2);
6925 dead_tmp(tmp2);
9ee6e8bb 6926 if (rs != 15) {
d9ba4830 6927 tmp2 = load_reg(s, rs);
9ee6e8bb 6928 if (op)
d9ba4830 6929 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 6930 else
d9ba4830
PB
6931 tcg_gen_add_i32(tmp, tmp, tmp2);
6932 dead_tmp(tmp2);
9ee6e8bb 6933 }
9ee6e8bb
PB
6934 break;
6935 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
6936 gen_mulxy(tmp, tmp2, op & 2, op & 1);
6937 dead_tmp(tmp2);
9ee6e8bb 6938 if (rs != 15) {
d9ba4830
PB
6939 tmp2 = load_reg(s, rs);
6940 gen_helper_add_setq(tmp, tmp, tmp2);
6941 dead_tmp(tmp2);
9ee6e8bb 6942 }
9ee6e8bb
PB
6943 break;
6944 case 2: /* Dual multiply add. */
6945 case 4: /* Dual multiply subtract. */
6946 if (op)
d9ba4830
PB
6947 gen_swap_half(tmp2);
6948 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
6949 /* This addition cannot overflow. */
6950 if (insn & (1 << 22)) {
d9ba4830 6951 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6952 } else {
d9ba4830 6953 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6954 }
d9ba4830 6955 dead_tmp(tmp2);
9ee6e8bb
PB
6956 if (rs != 15)
6957 {
d9ba4830
PB
6958 tmp2 = load_reg(s, rs);
6959 gen_helper_add_setq(tmp, tmp, tmp2);
6960 dead_tmp(tmp2);
9ee6e8bb 6961 }
9ee6e8bb
PB
6962 break;
6963 case 3: /* 32 * 16 -> 32msb */
6964 if (op)
d9ba4830 6965 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6966 else
d9ba4830
PB
6967 gen_sxth(tmp2);
6968 gen_imulw(tmp, tmp2);
6969 dead_tmp(tmp2);
9ee6e8bb
PB
6970 if (rs != 15)
6971 {
d9ba4830
PB
6972 tmp2 = load_reg(s, rs);
6973 gen_helper_add_setq(tmp, tmp, tmp2);
6974 dead_tmp(tmp2);
9ee6e8bb 6975 }
9ee6e8bb
PB
6976 break;
6977 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
6978 gen_imull(tmp, tmp2);
6979 if (insn & (1 << 5)) {
6980 gen_roundqd(tmp, tmp2);
6981 dead_tmp(tmp2);
6982 } else {
6983 dead_tmp(tmp);
6984 tmp = tmp2;
6985 }
9ee6e8bb 6986 if (rs != 15) {
d9ba4830 6987 tmp2 = load_reg(s, rs);
9ee6e8bb 6988 if (insn & (1 << 21)) {
d9ba4830 6989 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 6990 } else {
d9ba4830 6991 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 6992 }
d9ba4830 6993 dead_tmp(tmp2);
2c0262af 6994 }
9ee6e8bb
PB
6995 break;
6996 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
6997 gen_helper_usad8(tmp, tmp, tmp2);
6998 dead_tmp(tmp2);
9ee6e8bb 6999 if (rs != 15) {
d9ba4830
PB
7000 tmp2 = load_reg(s, rs);
7001 tcg_gen_add_i32(tmp, tmp, tmp2);
7002 dead_tmp(tmp2);
5fd46862 7003 }
9ee6e8bb 7004 break;
2c0262af 7005 }
d9ba4830 7006 store_reg(s, rd, tmp);
2c0262af 7007 break;
9ee6e8bb
PB
7008 case 6: case 7: /* 64-bit multiply, Divide. */
7009 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
7010 gen_movl_T0_reg(s, rn);
7011 gen_movl_T1_reg(s, rm);
7012 if ((op & 0x50) == 0x10) {
7013 /* sdiv, udiv */
7014 if (!arm_feature(env, ARM_FEATURE_DIV))
7015 goto illegal_op;
7016 if (op & 0x20)
3670669c 7017 gen_helper_udiv(cpu_T[0], cpu_T[0], cpu_T[1]);
2c0262af 7018 else
3670669c 7019 gen_helper_sdiv(cpu_T[0], cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
7020 gen_movl_reg_T0(s, rd);
7021 } else if ((op & 0xe) == 0xc) {
7022 /* Dual multiply accumulate long. */
7023 if (op & 1)
8f01245e 7024 gen_swap_half(cpu_T[1]);
3670669c 7025 gen_smul_dual(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
7026 if (op & 0x10) {
7027 gen_op_subl_T0_T1();
b5ff1b31 7028 } else {
9ee6e8bb 7029 gen_op_addl_T0_T1();
b5ff1b31 7030 }
9ee6e8bb
PB
7031 gen_op_signbit_T1_T0();
7032 gen_op_addq_T0_T1(rs, rd);
7033 gen_movl_reg_T0(s, rs);
7034 gen_movl_reg_T1(s, rd);
2c0262af 7035 } else {
9ee6e8bb
PB
7036 if (op & 0x20) {
7037 /* Unsigned 64-bit multiply */
7038 gen_op_mull_T0_T1();
b5ff1b31 7039 } else {
9ee6e8bb
PB
7040 if (op & 8) {
7041 /* smlalxy */
d9ba4830 7042 gen_mulxy(cpu_T[0], cpu_T[1], op & 2, op & 1);
9ee6e8bb
PB
7043 gen_op_signbit_T1_T0();
7044 } else {
7045 /* Signed 64-bit multiply */
7046 gen_op_imull_T0_T1();
7047 }
b5ff1b31 7048 }
9ee6e8bb
PB
7049 if (op & 4) {
7050 /* umaal */
7051 gen_op_addq_lo_T0_T1(rs);
7052 gen_op_addq_lo_T0_T1(rd);
7053 } else if (op & 0x40) {
7054 /* 64-bit accumulate. */
7055 gen_op_addq_T0_T1(rs, rd);
7056 }
7057 gen_movl_reg_T0(s, rs);
7058 gen_movl_reg_T1(s, rd);
5fd46862 7059 }
2c0262af 7060 break;
9ee6e8bb
PB
7061 }
7062 break;
7063 case 6: case 7: case 14: case 15:
7064 /* Coprocessor. */
7065 if (((insn >> 24) & 3) == 3) {
7066 /* Translate into the equivalent ARM encoding. */
7067 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
7068 if (disas_neon_data_insn(env, s, insn))
7069 goto illegal_op;
7070 } else {
7071 if (insn & (1 << 28))
7072 goto illegal_op;
7073 if (disas_coproc_insn (env, s, insn))
7074 goto illegal_op;
7075 }
7076 break;
7077 case 8: case 9: case 10: case 11:
7078 if (insn & (1 << 15)) {
7079 /* Branches, misc control. */
7080 if (insn & 0x5000) {
7081 /* Unconditional branch. */
7082 /* signextend(hw1[10:0]) -> offset[:12]. */
7083 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
7084 /* hw1[10:0] -> offset[11:1]. */
7085 offset |= (insn & 0x7ff) << 1;
7086 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
7087 offset[24:22] already have the same value because of the
7088 sign extension above. */
7089 offset ^= ((~insn) & (1 << 13)) << 10;
7090 offset ^= ((~insn) & (1 << 11)) << 11;
7091
9ee6e8bb
PB
7092 if (insn & (1 << 14)) {
7093 /* Branch and link. */
b0109805 7094 gen_op_movl_T1_im(s->pc | 1);
9ee6e8bb 7095 gen_movl_reg_T1(s, 14);
b5ff1b31 7096 }
3b46e624 7097
b0109805 7098 offset += s->pc;
9ee6e8bb
PB
7099 if (insn & (1 << 12)) {
7100 /* b/bl */
b0109805 7101 gen_jmp(s, offset);
9ee6e8bb
PB
7102 } else {
7103 /* blx */
b0109805
PB
7104 offset &= ~(uint32_t)2;
7105 gen_bx_im(s, offset);
2c0262af 7106 }
9ee6e8bb
PB
7107 } else if (((insn >> 23) & 7) == 7) {
7108 /* Misc control */
7109 if (insn & (1 << 13))
7110 goto illegal_op;
7111
7112 if (insn & (1 << 26)) {
7113 /* Secure monitor call (v6Z) */
7114 goto illegal_op; /* not implemented. */
2c0262af 7115 } else {
9ee6e8bb
PB
7116 op = (insn >> 20) & 7;
7117 switch (op) {
7118 case 0: /* msr cpsr. */
7119 if (IS_M(env)) {
7120 gen_op_v7m_msr_T0(insn & 0xff);
7121 gen_movl_reg_T0(s, rn);
7122 gen_lookup_tb(s);
7123 break;
7124 }
7125 /* fall through */
7126 case 1: /* msr spsr. */
7127 if (IS_M(env))
7128 goto illegal_op;
7129 gen_movl_T0_reg(s, rn);
7130 if (gen_set_psr_T0(s,
7131 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
7132 op == 1))
7133 goto illegal_op;
7134 break;
7135 case 2: /* cps, nop-hint. */
7136 if (((insn >> 8) & 7) == 0) {
7137 gen_nop_hint(s, insn & 0xff);
7138 }
7139 /* Implemented as NOP in user mode. */
7140 if (IS_USER(s))
7141 break;
7142 offset = 0;
7143 imm = 0;
7144 if (insn & (1 << 10)) {
7145 if (insn & (1 << 7))
7146 offset |= CPSR_A;
7147 if (insn & (1 << 6))
7148 offset |= CPSR_I;
7149 if (insn & (1 << 5))
7150 offset |= CPSR_F;
7151 if (insn & (1 << 9))
7152 imm = CPSR_A | CPSR_I | CPSR_F;
7153 }
7154 if (insn & (1 << 8)) {
7155 offset |= 0x1f;
7156 imm |= (insn & 0x1f);
7157 }
7158 if (offset) {
7159 gen_op_movl_T0_im(imm);
7160 gen_set_psr_T0(s, offset, 0);
7161 }
7162 break;
7163 case 3: /* Special control operations. */
7164 op = (insn >> 4) & 0xf;
7165 switch (op) {
7166 case 2: /* clrex */
7167 gen_op_clrex();
7168 break;
7169 case 4: /* dsb */
7170 case 5: /* dmb */
7171 case 6: /* isb */
7172 /* These execute as NOPs. */
7173 ARCH(7);
7174 break;
7175 default:
7176 goto illegal_op;
7177 }
7178 break;
7179 case 4: /* bxj */
7180 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
7181 tmp = load_reg(s, rn);
7182 gen_bx(s, tmp);
9ee6e8bb
PB
7183 break;
7184 case 5: /* Exception return. */
7185 /* Unpredictable in user mode. */
7186 goto illegal_op;
7187 case 6: /* mrs cpsr. */
7188 if (IS_M(env)) {
7189 gen_op_v7m_mrs_T0(insn & 0xff);
7190 } else {
d9ba4830 7191 gen_helper_cpsr_read(cpu_T[0]);
9ee6e8bb
PB
7192 }
7193 gen_movl_reg_T0(s, rd);
7194 break;
7195 case 7: /* mrs spsr. */
7196 /* Not accessible in user mode. */
7197 if (IS_USER(s) || IS_M(env))
7198 goto illegal_op;
d9ba4830
PB
7199 tmp = load_cpu_field(spsr);
7200 store_reg(s, rd, tmp);
9ee6e8bb 7201 break;
2c0262af
FB
7202 }
7203 }
9ee6e8bb
PB
7204 } else {
7205 /* Conditional branch. */
7206 op = (insn >> 22) & 0xf;
7207 /* Generate a conditional jump to next instruction. */
7208 s->condlabel = gen_new_label();
d9ba4830 7209 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7210 s->condjmp = 1;
7211
7212 /* offset[11:1] = insn[10:0] */
7213 offset = (insn & 0x7ff) << 1;
7214 /* offset[17:12] = insn[21:16]. */
7215 offset |= (insn & 0x003f0000) >> 4;
7216 /* offset[31:20] = insn[26]. */
7217 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7218 /* offset[18] = insn[13]. */
7219 offset |= (insn & (1 << 13)) << 5;
7220 /* offset[19] = insn[11]. */
7221 offset |= (insn & (1 << 11)) << 8;
7222
7223 /* jump to the offset */
b0109805 7224 gen_jmp(s, s->pc + offset);
9ee6e8bb
PB
7225 }
7226 } else {
7227 /* Data processing immediate. */
7228 if (insn & (1 << 25)) {
7229 if (insn & (1 << 24)) {
7230 if (insn & (1 << 20))
7231 goto illegal_op;
7232 /* Bitfield/Saturate. */
7233 op = (insn >> 21) & 7;
7234 imm = insn & 0x1f;
7235 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7236 if (rn == 15) {
7237 tmp = new_tmp();
7238 tcg_gen_movi_i32(tmp, 0);
7239 } else {
7240 tmp = load_reg(s, rn);
7241 }
9ee6e8bb
PB
7242 switch (op) {
7243 case 2: /* Signed bitfield extract. */
7244 imm++;
7245 if (shift + imm > 32)
7246 goto illegal_op;
7247 if (imm < 32)
6ddbc6e4 7248 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7249 break;
7250 case 6: /* Unsigned bitfield extract. */
7251 imm++;
7252 if (shift + imm > 32)
7253 goto illegal_op;
7254 if (imm < 32)
6ddbc6e4 7255 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7256 break;
7257 case 3: /* Bitfield insert/clear. */
7258 if (imm < shift)
7259 goto illegal_op;
7260 imm = imm + 1 - shift;
7261 if (imm != 32) {
6ddbc6e4
PB
7262 tmp2 = load_reg(s, rd);
7263 gen_bfi(tmp, tmp2, tmp,
3670669c 7264 shift, ((1u << imm) - 1) << shift);
6ddbc6e4 7265 dead_tmp(tmp2);
9ee6e8bb
PB
7266 }
7267 break;
7268 case 7:
7269 goto illegal_op;
7270 default: /* Saturate. */
9ee6e8bb
PB
7271 if (shift) {
7272 if (op & 1)
6ddbc6e4 7273 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7274 else
6ddbc6e4 7275 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7276 }
6ddbc6e4 7277 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7278 if (op & 4) {
7279 /* Unsigned. */
9ee6e8bb 7280 if ((op & 1) && shift == 0)
6ddbc6e4 7281 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7282 else
6ddbc6e4 7283 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7284 } else {
9ee6e8bb 7285 /* Signed. */
9ee6e8bb 7286 if ((op & 1) && shift == 0)
6ddbc6e4 7287 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7288 else
6ddbc6e4 7289 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7290 }
9ee6e8bb 7291 break;
2c0262af 7292 }
6ddbc6e4 7293 store_reg(s, rd, tmp);
9ee6e8bb
PB
7294 } else {
7295 imm = ((insn & 0x04000000) >> 15)
7296 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7297 if (insn & (1 << 22)) {
7298 /* 16-bit immediate. */
7299 imm |= (insn >> 4) & 0xf000;
7300 if (insn & (1 << 23)) {
7301 /* movt */
7302 gen_movl_T0_reg(s, rd);
8f01245e
PB
7303 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
7304 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], imm << 16);
2c0262af 7305 } else {
9ee6e8bb
PB
7306 /* movw */
7307 gen_op_movl_T0_im(imm);
2c0262af
FB
7308 }
7309 } else {
9ee6e8bb
PB
7310 /* Add/sub 12-bit immediate. */
7311 if (rn == 15) {
b0109805 7312 offset = s->pc & ~(uint32_t)3;
9ee6e8bb 7313 if (insn & (1 << 23))
b0109805 7314 offset -= imm;
9ee6e8bb 7315 else
b0109805
PB
7316 offset += imm;
7317 gen_op_movl_T0_im(offset);
2c0262af 7318 } else {
9ee6e8bb
PB
7319 gen_movl_T0_reg(s, rn);
7320 gen_op_movl_T1_im(imm);
7321 if (insn & (1 << 23))
7322 gen_op_subl_T0_T1();
7323 else
7324 gen_op_addl_T0_T1();
2c0262af 7325 }
9ee6e8bb
PB
7326 }
7327 gen_movl_reg_T0(s, rd);
191abaa2 7328 }
9ee6e8bb
PB
7329 } else {
7330 int shifter_out = 0;
7331 /* modified 12-bit immediate. */
7332 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7333 imm = (insn & 0xff);
7334 switch (shift) {
7335 case 0: /* XY */
7336 /* Nothing to do. */
7337 break;
7338 case 1: /* 00XY00XY */
7339 imm |= imm << 16;
7340 break;
7341 case 2: /* XY00XY00 */
7342 imm |= imm << 16;
7343 imm <<= 8;
7344 break;
7345 case 3: /* XYXYXYXY */
7346 imm |= imm << 16;
7347 imm |= imm << 8;
7348 break;
7349 default: /* Rotated constant. */
7350 shift = (shift << 1) | (imm >> 7);
7351 imm |= 0x80;
7352 imm = imm << (32 - shift);
7353 shifter_out = 1;
7354 break;
b5ff1b31 7355 }
9ee6e8bb
PB
7356 gen_op_movl_T1_im(imm);
7357 rn = (insn >> 16) & 0xf;
7358 if (rn == 15)
7359 gen_op_movl_T0_im(0);
7360 else
7361 gen_movl_T0_reg(s, rn);
7362 op = (insn >> 21) & 0xf;
7363 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7364 shifter_out))
7365 goto illegal_op;
7366 rd = (insn >> 8) & 0xf;
7367 if (rd != 15) {
7368 gen_movl_reg_T0(s, rd);
2c0262af 7369 }
2c0262af 7370 }
9ee6e8bb
PB
7371 }
7372 break;
7373 case 12: /* Load/store single data item. */
7374 {
7375 int postinc = 0;
7376 int writeback = 0;
b0109805 7377 int user;
9ee6e8bb
PB
7378 if ((insn & 0x01100000) == 0x01000000) {
7379 if (disas_neon_ls_insn(env, s, insn))
c1713132 7380 goto illegal_op;
9ee6e8bb
PB
7381 break;
7382 }
b0109805 7383 user = IS_USER(s);
9ee6e8bb 7384 if (rn == 15) {
b0109805 7385 addr = new_tmp();
9ee6e8bb
PB
7386 /* PC relative. */
7387 /* s->pc has already been incremented by 4. */
7388 imm = s->pc & 0xfffffffc;
7389 if (insn & (1 << 23))
7390 imm += insn & 0xfff;
7391 else
7392 imm -= insn & 0xfff;
b0109805 7393 tcg_gen_movi_i32(addr, imm);
9ee6e8bb 7394 } else {
b0109805 7395 addr = load_reg(s, rn);
9ee6e8bb
PB
7396 if (insn & (1 << 23)) {
7397 /* Positive offset. */
7398 imm = insn & 0xfff;
b0109805 7399 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7400 } else {
7401 op = (insn >> 8) & 7;
7402 imm = insn & 0xff;
7403 switch (op) {
7404 case 0: case 8: /* Shifted Register. */
7405 shift = (insn >> 4) & 0xf;
7406 if (shift > 3)
18c9b560 7407 goto illegal_op;
b26eefb6 7408 tmp = load_reg(s, rm);
9ee6e8bb 7409 if (shift)
b26eefb6 7410 tcg_gen_shli_i32(tmp, tmp, shift);
b0109805 7411 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7412 dead_tmp(tmp);
9ee6e8bb
PB
7413 break;
7414 case 4: /* Negative offset. */
b0109805 7415 tcg_gen_addi_i32(addr, addr, -imm);
9ee6e8bb
PB
7416 break;
7417 case 6: /* User privilege. */
b0109805
PB
7418 tcg_gen_addi_i32(addr, addr, imm);
7419 user = 1;
9ee6e8bb
PB
7420 break;
7421 case 1: /* Post-decrement. */
7422 imm = -imm;
7423 /* Fall through. */
7424 case 3: /* Post-increment. */
9ee6e8bb
PB
7425 postinc = 1;
7426 writeback = 1;
7427 break;
7428 case 5: /* Pre-decrement. */
7429 imm = -imm;
7430 /* Fall through. */
7431 case 7: /* Pre-increment. */
b0109805 7432 tcg_gen_addi_i32(addr, addr, imm);
9ee6e8bb
PB
7433 writeback = 1;
7434 break;
7435 default:
b7bcbe95 7436 goto illegal_op;
9ee6e8bb
PB
7437 }
7438 }
7439 }
7440 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7441 if (insn & (1 << 20)) {
7442 /* Load. */
7443 if (rs == 15 && op != 2) {
7444 if (op & 2)
b5ff1b31 7445 goto illegal_op;
9ee6e8bb
PB
7446 /* Memory hint. Implemented as NOP. */
7447 } else {
7448 switch (op) {
b0109805
PB
7449 case 0: tmp = gen_ld8u(addr, user); break;
7450 case 4: tmp = gen_ld8s(addr, user); break;
7451 case 1: tmp = gen_ld16u(addr, user); break;
7452 case 5: tmp = gen_ld16s(addr, user); break;
7453 case 2: tmp = gen_ld32(addr, user); break;
9ee6e8bb
PB
7454 default: goto illegal_op;
7455 }
7456 if (rs == 15) {
b0109805 7457 gen_bx(s, tmp);
9ee6e8bb 7458 } else {
b0109805 7459 store_reg(s, rs, tmp);
9ee6e8bb
PB
7460 }
7461 }
7462 } else {
7463 /* Store. */
7464 if (rs == 15)
b7bcbe95 7465 goto illegal_op;
b0109805 7466 tmp = load_reg(s, rs);
9ee6e8bb 7467 switch (op) {
b0109805
PB
7468 case 0: gen_st8(tmp, addr, user); break;
7469 case 1: gen_st16(tmp, addr, user); break;
7470 case 2: gen_st32(tmp, addr, user); break;
9ee6e8bb 7471 default: goto illegal_op;
b7bcbe95 7472 }
2c0262af 7473 }
9ee6e8bb 7474 if (postinc)
b0109805
PB
7475 tcg_gen_addi_i32(addr, addr, imm);
7476 if (writeback) {
7477 store_reg(s, rn, addr);
7478 } else {
7479 dead_tmp(addr);
7480 }
9ee6e8bb
PB
7481 }
7482 break;
7483 default:
7484 goto illegal_op;
2c0262af 7485 }
9ee6e8bb
PB
7486 return 0;
7487illegal_op:
7488 return 1;
2c0262af
FB
7489}
7490
9ee6e8bb 7491static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7492{
7493 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7494 int32_t offset;
7495 int i;
b26eefb6 7496 TCGv tmp;
d9ba4830 7497 TCGv tmp2;
b0109805 7498 TCGv addr;
99c475ab 7499
9ee6e8bb
PB
7500 if (s->condexec_mask) {
7501 cond = s->condexec_cond;
7502 s->condlabel = gen_new_label();
d9ba4830 7503 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7504 s->condjmp = 1;
7505 }
7506
b5ff1b31 7507 insn = lduw_code(s->pc);
99c475ab 7508 s->pc += 2;
b5ff1b31 7509
99c475ab
FB
7510 switch (insn >> 12) {
7511 case 0: case 1:
7512 rd = insn & 7;
7513 op = (insn >> 11) & 3;
7514 if (op == 3) {
7515 /* add/subtract */
7516 rn = (insn >> 3) & 7;
7517 gen_movl_T0_reg(s, rn);
7518 if (insn & (1 << 10)) {
7519 /* immediate */
7520 gen_op_movl_T1_im((insn >> 6) & 7);
7521 } else {
7522 /* reg */
7523 rm = (insn >> 6) & 7;
7524 gen_movl_T1_reg(s, rm);
7525 }
9ee6e8bb
PB
7526 if (insn & (1 << 9)) {
7527 if (s->condexec_mask)
7528 gen_op_subl_T0_T1();
7529 else
7530 gen_op_subl_T0_T1_cc();
7531 } else {
7532 if (s->condexec_mask)
7533 gen_op_addl_T0_T1();
7534 else
7535 gen_op_addl_T0_T1_cc();
7536 }
99c475ab
FB
7537 gen_movl_reg_T0(s, rd);
7538 } else {
7539 /* shift immediate */
7540 rm = (insn >> 3) & 7;
7541 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7542 tmp = load_reg(s, rm);
7543 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7544 if (!s->condexec_mask)
7545 gen_logic_CC(tmp);
7546 store_reg(s, rd, tmp);
99c475ab
FB
7547 }
7548 break;
7549 case 2: case 3:
7550 /* arithmetic large immediate */
7551 op = (insn >> 11) & 3;
7552 rd = (insn >> 8) & 0x7;
7553 if (op == 0) {
7554 gen_op_movl_T0_im(insn & 0xff);
7555 } else {
7556 gen_movl_T0_reg(s, rd);
7557 gen_op_movl_T1_im(insn & 0xff);
7558 }
7559 switch (op) {
7560 case 0: /* mov */
9ee6e8bb
PB
7561 if (!s->condexec_mask)
7562 gen_op_logic_T0_cc();
99c475ab
FB
7563 break;
7564 case 1: /* cmp */
7565 gen_op_subl_T0_T1_cc();
7566 break;
7567 case 2: /* add */
9ee6e8bb
PB
7568 if (s->condexec_mask)
7569 gen_op_addl_T0_T1();
7570 else
7571 gen_op_addl_T0_T1_cc();
99c475ab
FB
7572 break;
7573 case 3: /* sub */
9ee6e8bb
PB
7574 if (s->condexec_mask)
7575 gen_op_subl_T0_T1();
7576 else
7577 gen_op_subl_T0_T1_cc();
99c475ab
FB
7578 break;
7579 }
7580 if (op != 1)
7581 gen_movl_reg_T0(s, rd);
7582 break;
7583 case 4:
7584 if (insn & (1 << 11)) {
7585 rd = (insn >> 8) & 7;
5899f386
FB
7586 /* load pc-relative. Bit 1 of PC is ignored. */
7587 val = s->pc + 2 + ((insn & 0xff) * 4);
7588 val &= ~(uint32_t)2;
b0109805
PB
7589 addr = new_tmp();
7590 tcg_gen_movi_i32(addr, val);
7591 tmp = gen_ld32(addr, IS_USER(s));
7592 dead_tmp(addr);
7593 store_reg(s, rd, tmp);
99c475ab
FB
7594 break;
7595 }
7596 if (insn & (1 << 10)) {
7597 /* data processing extended or blx */
7598 rd = (insn & 7) | ((insn >> 4) & 8);
7599 rm = (insn >> 3) & 0xf;
7600 op = (insn >> 8) & 3;
7601 switch (op) {
7602 case 0: /* add */
7603 gen_movl_T0_reg(s, rd);
7604 gen_movl_T1_reg(s, rm);
7605 gen_op_addl_T0_T1();
7606 gen_movl_reg_T0(s, rd);
7607 break;
7608 case 1: /* cmp */
7609 gen_movl_T0_reg(s, rd);
7610 gen_movl_T1_reg(s, rm);
7611 gen_op_subl_T0_T1_cc();
7612 break;
7613 case 2: /* mov/cpy */
7614 gen_movl_T0_reg(s, rm);
7615 gen_movl_reg_T0(s, rd);
7616 break;
7617 case 3:/* branch [and link] exchange thumb register */
b0109805 7618 tmp = load_reg(s, rm);
99c475ab
FB
7619 if (insn & (1 << 7)) {
7620 val = (uint32_t)s->pc | 1;
b0109805
PB
7621 tmp2 = new_tmp();
7622 tcg_gen_movi_i32(tmp2, val);
7623 store_reg(s, 14, tmp2);
99c475ab 7624 }
d9ba4830 7625 gen_bx(s, tmp);
99c475ab
FB
7626 break;
7627 }
7628 break;
7629 }
7630
7631 /* data processing register */
7632 rd = insn & 7;
7633 rm = (insn >> 3) & 7;
7634 op = (insn >> 6) & 0xf;
7635 if (op == 2 || op == 3 || op == 4 || op == 7) {
7636 /* the shift/rotate ops want the operands backwards */
7637 val = rm;
7638 rm = rd;
7639 rd = val;
7640 val = 1;
7641 } else {
7642 val = 0;
7643 }
7644
7645 if (op == 9) /* neg */
7646 gen_op_movl_T0_im(0);
7647 else if (op != 0xf) /* mvn doesn't read its first operand */
7648 gen_movl_T0_reg(s, rd);
7649
7650 gen_movl_T1_reg(s, rm);
5899f386 7651 switch (op) {
99c475ab
FB
7652 case 0x0: /* and */
7653 gen_op_andl_T0_T1();
9ee6e8bb
PB
7654 if (!s->condexec_mask)
7655 gen_op_logic_T0_cc();
99c475ab
FB
7656 break;
7657 case 0x1: /* eor */
7658 gen_op_xorl_T0_T1();
9ee6e8bb
PB
7659 if (!s->condexec_mask)
7660 gen_op_logic_T0_cc();
99c475ab
FB
7661 break;
7662 case 0x2: /* lsl */
9ee6e8bb
PB
7663 if (s->condexec_mask) {
7664 gen_op_shll_T1_T0();
7665 } else {
7666 gen_op_shll_T1_T0_cc();
7667 gen_op_logic_T1_cc();
7668 }
99c475ab
FB
7669 break;
7670 case 0x3: /* lsr */
9ee6e8bb
PB
7671 if (s->condexec_mask) {
7672 gen_op_shrl_T1_T0();
7673 } else {
7674 gen_op_shrl_T1_T0_cc();
7675 gen_op_logic_T1_cc();
7676 }
99c475ab
FB
7677 break;
7678 case 0x4: /* asr */
9ee6e8bb
PB
7679 if (s->condexec_mask) {
7680 gen_op_sarl_T1_T0();
7681 } else {
7682 gen_op_sarl_T1_T0_cc();
7683 gen_op_logic_T1_cc();
7684 }
99c475ab
FB
7685 break;
7686 case 0x5: /* adc */
9ee6e8bb 7687 if (s->condexec_mask)
b26eefb6 7688 gen_adc_T0_T1();
9ee6e8bb
PB
7689 else
7690 gen_op_adcl_T0_T1_cc();
99c475ab
FB
7691 break;
7692 case 0x6: /* sbc */
9ee6e8bb 7693 if (s->condexec_mask)
3670669c 7694 gen_sbc_T0_T1();
9ee6e8bb
PB
7695 else
7696 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
7697 break;
7698 case 0x7: /* ror */
9ee6e8bb
PB
7699 if (s->condexec_mask) {
7700 gen_op_rorl_T1_T0();
7701 } else {
7702 gen_op_rorl_T1_T0_cc();
7703 gen_op_logic_T1_cc();
7704 }
99c475ab
FB
7705 break;
7706 case 0x8: /* tst */
7707 gen_op_andl_T0_T1();
7708 gen_op_logic_T0_cc();
7709 rd = 16;
5899f386 7710 break;
99c475ab 7711 case 0x9: /* neg */
9ee6e8bb
PB
7712 if (s->condexec_mask)
7713 gen_op_subl_T0_T1();
7714 else
7715 gen_op_subl_T0_T1_cc();
99c475ab
FB
7716 break;
7717 case 0xa: /* cmp */
7718 gen_op_subl_T0_T1_cc();
7719 rd = 16;
7720 break;
7721 case 0xb: /* cmn */
7722 gen_op_addl_T0_T1_cc();
7723 rd = 16;
7724 break;
7725 case 0xc: /* orr */
7726 gen_op_orl_T0_T1();
9ee6e8bb
PB
7727 if (!s->condexec_mask)
7728 gen_op_logic_T0_cc();
99c475ab
FB
7729 break;
7730 case 0xd: /* mul */
7731 gen_op_mull_T0_T1();
9ee6e8bb
PB
7732 if (!s->condexec_mask)
7733 gen_op_logic_T0_cc();
99c475ab
FB
7734 break;
7735 case 0xe: /* bic */
7736 gen_op_bicl_T0_T1();
9ee6e8bb
PB
7737 if (!s->condexec_mask)
7738 gen_op_logic_T0_cc();
99c475ab
FB
7739 break;
7740 case 0xf: /* mvn */
7741 gen_op_notl_T1();
9ee6e8bb
PB
7742 if (!s->condexec_mask)
7743 gen_op_logic_T1_cc();
99c475ab 7744 val = 1;
5899f386 7745 rm = rd;
99c475ab
FB
7746 break;
7747 }
7748 if (rd != 16) {
7749 if (val)
5899f386 7750 gen_movl_reg_T1(s, rm);
99c475ab
FB
7751 else
7752 gen_movl_reg_T0(s, rd);
7753 }
7754 break;
7755
7756 case 5:
7757 /* load/store register offset. */
7758 rd = insn & 7;
7759 rn = (insn >> 3) & 7;
7760 rm = (insn >> 6) & 7;
7761 op = (insn >> 9) & 7;
b0109805 7762 addr = load_reg(s, rn);
b26eefb6 7763 tmp = load_reg(s, rm);
b0109805 7764 tcg_gen_add_i32(addr, addr, tmp);
b26eefb6 7765 dead_tmp(tmp);
99c475ab
FB
7766
7767 if (op < 3) /* store */
b0109805 7768 tmp = load_reg(s, rd);
99c475ab
FB
7769
7770 switch (op) {
7771 case 0: /* str */
b0109805 7772 gen_st32(tmp, addr, IS_USER(s));
99c475ab
FB
7773 break;
7774 case 1: /* strh */
b0109805 7775 gen_st16(tmp, addr, IS_USER(s));
99c475ab
FB
7776 break;
7777 case 2: /* strb */
b0109805 7778 gen_st8(tmp, addr, IS_USER(s));
99c475ab
FB
7779 break;
7780 case 3: /* ldrsb */
b0109805 7781 tmp = gen_ld8s(addr, IS_USER(s));
99c475ab
FB
7782 break;
7783 case 4: /* ldr */
b0109805 7784 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
7785 break;
7786 case 5: /* ldrh */
b0109805 7787 tmp = gen_ld16u(addr, IS_USER(s));
99c475ab
FB
7788 break;
7789 case 6: /* ldrb */
b0109805 7790 tmp = gen_ld8u(addr, IS_USER(s));
99c475ab
FB
7791 break;
7792 case 7: /* ldrsh */
b0109805 7793 tmp = gen_ld16s(addr, IS_USER(s));
99c475ab
FB
7794 break;
7795 }
7796 if (op >= 3) /* load */
b0109805
PB
7797 store_reg(s, rd, tmp);
7798 dead_tmp(addr);
99c475ab
FB
7799 break;
7800
7801 case 6:
7802 /* load/store word immediate offset */
7803 rd = insn & 7;
7804 rn = (insn >> 3) & 7;
b0109805 7805 addr = load_reg(s, rn);
99c475ab 7806 val = (insn >> 4) & 0x7c;
b0109805 7807 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
7808
7809 if (insn & (1 << 11)) {
7810 /* load */
b0109805
PB
7811 tmp = gen_ld32(addr, IS_USER(s));
7812 store_reg(s, rd, tmp);
99c475ab
FB
7813 } else {
7814 /* store */
b0109805
PB
7815 tmp = load_reg(s, rd);
7816 gen_st32(tmp, addr, IS_USER(s));
99c475ab 7817 }
b0109805 7818 dead_tmp(addr);
99c475ab
FB
7819 break;
7820
7821 case 7:
7822 /* load/store byte immediate offset */
7823 rd = insn & 7;
7824 rn = (insn >> 3) & 7;
b0109805 7825 addr = load_reg(s, rn);
99c475ab 7826 val = (insn >> 6) & 0x1f;
b0109805 7827 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
7828
7829 if (insn & (1 << 11)) {
7830 /* load */
b0109805
PB
7831 tmp = gen_ld8u(addr, IS_USER(s));
7832 store_reg(s, rd, tmp);
99c475ab
FB
7833 } else {
7834 /* store */
b0109805
PB
7835 tmp = load_reg(s, rd);
7836 gen_st8(tmp, addr, IS_USER(s));
99c475ab 7837 }
b0109805 7838 dead_tmp(addr);
99c475ab
FB
7839 break;
7840
7841 case 8:
7842 /* load/store halfword immediate offset */
7843 rd = insn & 7;
7844 rn = (insn >> 3) & 7;
b0109805 7845 addr = load_reg(s, rn);
99c475ab 7846 val = (insn >> 5) & 0x3e;
b0109805 7847 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
7848
7849 if (insn & (1 << 11)) {
7850 /* load */
b0109805
PB
7851 tmp = gen_ld16u(addr, IS_USER(s));
7852 store_reg(s, rd, tmp);
99c475ab
FB
7853 } else {
7854 /* store */
b0109805
PB
7855 tmp = load_reg(s, rd);
7856 gen_st16(tmp, addr, IS_USER(s));
99c475ab 7857 }
b0109805 7858 dead_tmp(addr);
99c475ab
FB
7859 break;
7860
7861 case 9:
7862 /* load/store from stack */
7863 rd = (insn >> 8) & 7;
b0109805 7864 addr = load_reg(s, 13);
99c475ab 7865 val = (insn & 0xff) * 4;
b0109805 7866 tcg_gen_addi_i32(addr, addr, val);
99c475ab
FB
7867
7868 if (insn & (1 << 11)) {
7869 /* load */
b0109805
PB
7870 tmp = gen_ld32(addr, IS_USER(s));
7871 store_reg(s, rd, tmp);
99c475ab
FB
7872 } else {
7873 /* store */
b0109805
PB
7874 tmp = load_reg(s, rd);
7875 gen_st32(tmp, addr, IS_USER(s));
99c475ab 7876 }
b0109805 7877 dead_tmp(addr);
99c475ab
FB
7878 break;
7879
7880 case 10:
7881 /* add to high reg */
7882 rd = (insn >> 8) & 7;
5899f386
FB
7883 if (insn & (1 << 11)) {
7884 /* SP */
7885 gen_movl_T0_reg(s, 13);
7886 } else {
7887 /* PC. bit 1 is ignored. */
7888 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7889 }
99c475ab
FB
7890 val = (insn & 0xff) * 4;
7891 gen_op_movl_T1_im(val);
7892 gen_op_addl_T0_T1();
7893 gen_movl_reg_T0(s, rd);
7894 break;
7895
7896 case 11:
7897 /* misc */
7898 op = (insn >> 8) & 0xf;
7899 switch (op) {
7900 case 0:
7901 /* adjust stack pointer */
b26eefb6 7902 tmp = load_reg(s, 13);
99c475ab
FB
7903 val = (insn & 0x7f) * 4;
7904 if (insn & (1 << 7))
7905 val = -(int32_t)val;
b26eefb6
PB
7906 tcg_gen_addi_i32(tmp, tmp, val);
7907 store_reg(s, 13, tmp);
99c475ab
FB
7908 break;
7909
9ee6e8bb
PB
7910 case 2: /* sign/zero extend. */
7911 ARCH(6);
7912 rd = insn & 7;
7913 rm = (insn >> 3) & 7;
b0109805 7914 tmp = load_reg(s, rm);
9ee6e8bb 7915 switch ((insn >> 6) & 3) {
b0109805
PB
7916 case 0: gen_sxth(tmp); break;
7917 case 1: gen_sxtb(tmp); break;
7918 case 2: gen_uxth(tmp); break;
7919 case 3: gen_uxtb(tmp); break;
9ee6e8bb 7920 }
b0109805 7921 store_reg(s, rd, tmp);
9ee6e8bb 7922 break;
99c475ab
FB
7923 case 4: case 5: case 0xc: case 0xd:
7924 /* push/pop */
b0109805 7925 addr = load_reg(s, 13);
5899f386
FB
7926 if (insn & (1 << 8))
7927 offset = 4;
99c475ab 7928 else
5899f386
FB
7929 offset = 0;
7930 for (i = 0; i < 8; i++) {
7931 if (insn & (1 << i))
7932 offset += 4;
7933 }
7934 if ((insn & (1 << 11)) == 0) {
b0109805 7935 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 7936 }
99c475ab
FB
7937 for (i = 0; i < 8; i++) {
7938 if (insn & (1 << i)) {
7939 if (insn & (1 << 11)) {
7940 /* pop */
b0109805
PB
7941 tmp = gen_ld32(addr, IS_USER(s));
7942 store_reg(s, i, tmp);
99c475ab
FB
7943 } else {
7944 /* push */
b0109805
PB
7945 tmp = load_reg(s, i);
7946 gen_st32(tmp, addr, IS_USER(s));
99c475ab 7947 }
5899f386 7948 /* advance to the next address. */
b0109805 7949 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
7950 }
7951 }
7952 if (insn & (1 << 8)) {
7953 if (insn & (1 << 11)) {
7954 /* pop pc */
b0109805 7955 tmp = gen_ld32(addr, IS_USER(s));
99c475ab
FB
7956 /* don't set the pc until the rest of the instruction
7957 has completed */
7958 } else {
7959 /* push lr */
b0109805
PB
7960 tmp = load_reg(s, 14);
7961 gen_st32(tmp, addr, IS_USER(s));
99c475ab 7962 }
b0109805 7963 tcg_gen_addi_i32(addr, addr, 4);
99c475ab 7964 }
5899f386 7965 if ((insn & (1 << 11)) == 0) {
b0109805 7966 tcg_gen_addi_i32(addr, addr, -offset);
5899f386 7967 }
99c475ab 7968 /* write back the new stack pointer */
b0109805 7969 store_reg(s, 13, addr);
99c475ab
FB
7970 /* set the new PC value */
7971 if ((insn & 0x0900) == 0x0900)
b0109805 7972 gen_bx(s, tmp);
99c475ab
FB
7973 break;
7974
9ee6e8bb
PB
7975 case 1: case 3: case 9: case 11: /* czb */
7976 rm = insn & 7;
d9ba4830
PB
7977 tmp = load_reg(s, rm);
7978 tmp2 = tcg_const_i32(0);
9ee6e8bb
PB
7979 s->condlabel = gen_new_label();
7980 s->condjmp = 1;
7981 if (insn & (1 << 11))
d9ba4830 7982 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
9ee6e8bb 7983 else
d9ba4830
PB
7984 tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
7985 dead_tmp(tmp);
9ee6e8bb
PB
7986 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7987 val = (uint32_t)s->pc + 2;
7988 val += offset;
7989 gen_jmp(s, val);
7990 break;
7991
7992 case 15: /* IT, nop-hint. */
7993 if ((insn & 0xf) == 0) {
7994 gen_nop_hint(s, (insn >> 4) & 0xf);
7995 break;
7996 }
7997 /* If Then. */
7998 s->condexec_cond = (insn >> 4) & 0xe;
7999 s->condexec_mask = insn & 0x1f;
8000 /* No actual code generated for this insn, just setup state. */
8001 break;
8002
06c949e6 8003 case 0xe: /* bkpt */
9ee6e8bb 8004 gen_set_condexec(s);
06c949e6 8005 gen_op_movl_T0_im((long)s->pc - 2);
b26eefb6 8006 gen_set_pc_T0();
d9ba4830 8007 gen_exception(EXCP_BKPT);
06c949e6
PB
8008 s->is_jmp = DISAS_JUMP;
8009 break;
8010
9ee6e8bb
PB
8011 case 0xa: /* rev */
8012 ARCH(6);
8013 rn = (insn >> 3) & 0x7;
8014 rd = insn & 0x7;
b0109805 8015 tmp = load_reg(s, rn);
9ee6e8bb 8016 switch ((insn >> 6) & 3) {
b0109805
PB
8017 case 0: tcg_gen_bswap_i32(tmp, tmp); break;
8018 case 1: gen_rev16(tmp); break;
8019 case 3: gen_revsh(tmp); break;
9ee6e8bb
PB
8020 default: goto illegal_op;
8021 }
b0109805 8022 store_reg(s, rd, tmp);
9ee6e8bb
PB
8023 break;
8024
8025 case 6: /* cps */
8026 ARCH(6);
8027 if (IS_USER(s))
8028 break;
8029 if (IS_M(env)) {
8030 val = (insn & (1 << 4)) != 0;
8031 gen_op_movl_T0_im(val);
8032 /* PRIMASK */
8033 if (insn & 1)
8034 gen_op_v7m_msr_T0(16);
8035 /* FAULTMASK */
8036 if (insn & 2)
8037 gen_op_v7m_msr_T0(17);
8038
8039 gen_lookup_tb(s);
8040 } else {
8041 if (insn & (1 << 4))
8042 shift = CPSR_A | CPSR_I | CPSR_F;
8043 else
8044 shift = 0;
8045
8046 val = ((insn & 7) << 6) & shift;
8047 gen_op_movl_T0_im(val);
8048 gen_set_psr_T0(s, shift, 0);
8049 }
8050 break;
8051
99c475ab
FB
8052 default:
8053 goto undef;
8054 }
8055 break;
8056
8057 case 12:
8058 /* load/store multiple */
8059 rn = (insn >> 8) & 0x7;
b0109805 8060 addr = load_reg(s, rn);
99c475ab
FB
8061 for (i = 0; i < 8; i++) {
8062 if (insn & (1 << i)) {
99c475ab
FB
8063 if (insn & (1 << 11)) {
8064 /* load */
b0109805
PB
8065 tmp = gen_ld32(addr, IS_USER(s));
8066 store_reg(s, i, tmp);
99c475ab
FB
8067 } else {
8068 /* store */
b0109805
PB
8069 tmp = load_reg(s, i);
8070 gen_st32(tmp, addr, IS_USER(s));
99c475ab 8071 }
5899f386 8072 /* advance to the next address */
b0109805 8073 tcg_gen_addi_i32(addr, addr, 4);
99c475ab
FB
8074 }
8075 }
5899f386 8076 /* Base register writeback. */
b0109805
PB
8077 if ((insn & (1 << rn)) == 0) {
8078 store_reg(s, rn, addr);
8079 } else {
8080 dead_tmp(addr);
8081 }
99c475ab
FB
8082 break;
8083
8084 case 13:
8085 /* conditional branch or swi */
8086 cond = (insn >> 8) & 0xf;
8087 if (cond == 0xe)
8088 goto undef;
8089
8090 if (cond == 0xf) {
8091 /* swi */
9ee6e8bb 8092 gen_set_condexec(s);
99c475ab
FB
8093 gen_op_movl_T0_im((long)s->pc | 1);
8094 /* Don't set r15. */
b26eefb6 8095 gen_set_pc_T0();
9ee6e8bb 8096 s->is_jmp = DISAS_SWI;
99c475ab
FB
8097 break;
8098 }
8099 /* generate a conditional jump to next instruction */
e50e6a20 8100 s->condlabel = gen_new_label();
d9ba4830 8101 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 8102 s->condjmp = 1;
99c475ab
FB
8103 gen_movl_T1_reg(s, 15);
8104
8105 /* jump to the offset */
5899f386 8106 val = (uint32_t)s->pc + 2;
99c475ab 8107 offset = ((int32_t)insn << 24) >> 24;
5899f386 8108 val += offset << 1;
8aaca4c0 8109 gen_jmp(s, val);
99c475ab
FB
8110 break;
8111
8112 case 14:
358bf29e 8113 if (insn & (1 << 11)) {
9ee6e8bb
PB
8114 if (disas_thumb2_insn(env, s, insn))
8115 goto undef32;
358bf29e
PB
8116 break;
8117 }
9ee6e8bb 8118 /* unconditional branch */
99c475ab
FB
8119 val = (uint32_t)s->pc;
8120 offset = ((int32_t)insn << 21) >> 21;
8121 val += (offset << 1) + 2;
8aaca4c0 8122 gen_jmp(s, val);
99c475ab
FB
8123 break;
8124
8125 case 15:
9ee6e8bb
PB
8126 if (disas_thumb2_insn(env, s, insn))
8127 goto undef32;
8128 break;
99c475ab
FB
8129 }
8130 return;
9ee6e8bb
PB
8131undef32:
8132 gen_set_condexec(s);
8133 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 8134 gen_set_pc_T0();
d9ba4830 8135 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
8136 s->is_jmp = DISAS_JUMP;
8137 return;
8138illegal_op:
99c475ab 8139undef:
9ee6e8bb 8140 gen_set_condexec(s);
5899f386 8141 gen_op_movl_T0_im((long)s->pc - 2);
b26eefb6 8142 gen_set_pc_T0();
d9ba4830 8143 gen_exception(EXCP_UDEF);
99c475ab
FB
8144 s->is_jmp = DISAS_JUMP;
8145}
8146
2c0262af
FB
8147/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
8148 basic block 'tb'. If search_pc is TRUE, also generate PC
8149 information for each intermediate instruction. */
5fafdf24
TS
8150static inline int gen_intermediate_code_internal(CPUState *env,
8151 TranslationBlock *tb,
2c0262af
FB
8152 int search_pc)
8153{
8154 DisasContext dc1, *dc = &dc1;
8155 uint16_t *gen_opc_end;
8156 int j, lj;
0fa85d43 8157 target_ulong pc_start;
b5ff1b31 8158 uint32_t next_page_start;
3b46e624 8159
2c0262af 8160 /* generate intermediate code */
b26eefb6
PB
8161 num_temps = 0;
8162 memset(temps, 0, sizeof(temps));
8163
0fa85d43 8164 pc_start = tb->pc;
3b46e624 8165
2c0262af
FB
8166 dc->tb = tb;
8167
2c0262af 8168 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
8169
8170 dc->is_jmp = DISAS_NEXT;
8171 dc->pc = pc_start;
8aaca4c0 8172 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 8173 dc->condjmp = 0;
5899f386 8174 dc->thumb = env->thumb;
9ee6e8bb
PB
8175 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
8176 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 8177 dc->is_mem = 0;
b5ff1b31 8178#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
8179 if (IS_M(env)) {
8180 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
8181 } else {
8182 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
8183 }
b5ff1b31 8184#endif
4373f3ce
PB
8185 cpu_F0s = tcg_temp_new(TCG_TYPE_I32);
8186 cpu_F1s = tcg_temp_new(TCG_TYPE_I32);
8187 cpu_F0d = tcg_temp_new(TCG_TYPE_I64);
8188 cpu_F1d = tcg_temp_new(TCG_TYPE_I64);
b5ff1b31 8189 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 8190 lj = -1;
9ee6e8bb
PB
8191 /* Reset the conditional execution bits immediately. This avoids
8192 complications trying to do it at the end of the block. */
8193 if (env->condexec_bits)
8f01245e
PB
8194 {
8195 TCGv tmp = new_tmp();
8196 tcg_gen_movi_i32(tmp, 0);
d9ba4830 8197 store_cpu_field(tmp, condexec_bits);
8f01245e 8198 }
2c0262af 8199 do {
9ee6e8bb
PB
8200#ifndef CONFIG_USER_ONLY
8201 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
8202 /* We always get here via a jump, so know we are not in a
8203 conditional execution block. */
d9ba4830 8204 gen_exception(EXCP_EXCEPTION_EXIT);
9ee6e8bb
PB
8205 }
8206#endif
8207
1fddef4b
FB
8208 if (env->nb_breakpoints > 0) {
8209 for(j = 0; j < env->nb_breakpoints; j++) {
8210 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 8211 gen_set_condexec(dc);
1fddef4b 8212 gen_op_movl_T0_im((long)dc->pc);
b26eefb6 8213 gen_set_pc_T0();
d9ba4830 8214 gen_exception(EXCP_DEBUG);
1fddef4b 8215 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
8216 /* Advance PC so that clearing the breakpoint will
8217 invalidate this TB. */
8218 dc->pc += 2;
8219 goto done_generating;
1fddef4b
FB
8220 break;
8221 }
8222 }
8223 }
2c0262af
FB
8224 if (search_pc) {
8225 j = gen_opc_ptr - gen_opc_buf;
8226 if (lj < j) {
8227 lj++;
8228 while (lj < j)
8229 gen_opc_instr_start[lj++] = 0;
8230 }
0fa85d43 8231 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
8232 gen_opc_instr_start[lj] = 1;
8233 }
e50e6a20 8234
9ee6e8bb
PB
8235 if (env->thumb) {
8236 disas_thumb_insn(env, dc);
8237 if (dc->condexec_mask) {
8238 dc->condexec_cond = (dc->condexec_cond & 0xe)
8239 | ((dc->condexec_mask >> 4) & 1);
8240 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8241 if (dc->condexec_mask == 0) {
8242 dc->condexec_cond = 0;
8243 }
8244 }
8245 } else {
8246 disas_arm_insn(env, dc);
8247 }
b26eefb6
PB
8248 if (num_temps) {
8249 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8250 num_temps = 0;
8251 }
e50e6a20
FB
8252
8253 if (dc->condjmp && !dc->is_jmp) {
8254 gen_set_label(dc->condlabel);
8255 dc->condjmp = 0;
8256 }
6658ffb8
PB
8257 /* Terminate the TB on memory ops if watchpoints are present. */
8258 /* FIXME: This should be replacd by the deterministic execution
8259 * IRQ raising bits. */
8260 if (dc->is_mem && env->nb_watchpoints)
8261 break;
8262
e50e6a20
FB
8263 /* Translation stops when a conditional branch is enoutered.
8264 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
8265 * Also stop translation when a page boundary is reached. This
8266 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
8267 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8268 !env->singlestep_enabled &&
b5ff1b31 8269 dc->pc < next_page_start);
9ee6e8bb 8270
b5ff1b31 8271 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8272 instruction was a conditional branch or trap, and the PC has
8273 already been written. */
8aaca4c0
FB
8274 if (__builtin_expect(env->singlestep_enabled, 0)) {
8275 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8276 if (dc->condjmp) {
9ee6e8bb
PB
8277 gen_set_condexec(dc);
8278 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8279 gen_exception(EXCP_SWI);
9ee6e8bb 8280 } else {
d9ba4830 8281 gen_exception(EXCP_DEBUG);
9ee6e8bb 8282 }
e50e6a20
FB
8283 gen_set_label(dc->condlabel);
8284 }
8285 if (dc->condjmp || !dc->is_jmp) {
8aaca4c0 8286 gen_op_movl_T0_im((long)dc->pc);
b26eefb6 8287 gen_set_pc_T0();
e50e6a20 8288 dc->condjmp = 0;
8aaca4c0 8289 }
9ee6e8bb
PB
8290 gen_set_condexec(dc);
8291 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8292 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8293 } else {
8294 /* FIXME: Single stepping a WFI insn will not halt
8295 the CPU. */
d9ba4830 8296 gen_exception(EXCP_DEBUG);
9ee6e8bb 8297 }
8aaca4c0 8298 } else {
9ee6e8bb
PB
8299 /* While branches must always occur at the end of an IT block,
8300 there are a few other things that can cause us to terminate
8301 the TB in the middel of an IT block:
8302 - Exception generating instructions (bkpt, swi, undefined).
8303 - Page boundaries.
8304 - Hardware watchpoints.
8305 Hardware breakpoints have already been handled and skip this code.
8306 */
8307 gen_set_condexec(dc);
8aaca4c0 8308 switch(dc->is_jmp) {
8aaca4c0 8309 case DISAS_NEXT:
6e256c93 8310 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8311 break;
8312 default:
8313 case DISAS_JUMP:
8314 case DISAS_UPDATE:
8315 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8316 tcg_gen_exit_tb(0);
8aaca4c0
FB
8317 break;
8318 case DISAS_TB_JUMP:
8319 /* nothing more to generate */
8320 break;
9ee6e8bb 8321 case DISAS_WFI:
d9ba4830 8322 gen_helper_wfi();
9ee6e8bb
PB
8323 break;
8324 case DISAS_SWI:
d9ba4830 8325 gen_exception(EXCP_SWI);
9ee6e8bb 8326 break;
8aaca4c0 8327 }
e50e6a20
FB
8328 if (dc->condjmp) {
8329 gen_set_label(dc->condlabel);
9ee6e8bb 8330 gen_set_condexec(dc);
6e256c93 8331 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8332 dc->condjmp = 0;
8333 }
2c0262af 8334 }
9ee6e8bb 8335done_generating:
2c0262af
FB
8336 *gen_opc_ptr = INDEX_op_end;
8337
8338#ifdef DEBUG_DISAS
e19e89a5 8339 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8340 fprintf(logfile, "----------------\n");
8341 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8342 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8343 fprintf(logfile, "\n");
8344 }
8345#endif
b5ff1b31
FB
8346 if (search_pc) {
8347 j = gen_opc_ptr - gen_opc_buf;
8348 lj++;
8349 while (lj <= j)
8350 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8351 } else {
2c0262af 8352 tb->size = dc->pc - pc_start;
b5ff1b31 8353 }
2c0262af
FB
8354 return 0;
8355}
8356
8357int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8358{
8359 return gen_intermediate_code_internal(env, tb, 0);
8360}
8361
8362int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8363{
8364 return gen_intermediate_code_internal(env, tb, 1);
8365}
8366
b5ff1b31
FB
8367static const char *cpu_mode_names[16] = {
8368 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8369 "???", "???", "???", "und", "???", "???", "???", "sys"
8370};
9ee6e8bb 8371
5fafdf24 8372void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8373 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8374 int flags)
2c0262af
FB
8375{
8376 int i;
bc380d17 8377 union {
b7bcbe95
FB
8378 uint32_t i;
8379 float s;
8380 } s0, s1;
8381 CPU_DoubleU d;
a94a6abf
PB
8382 /* ??? This assumes float64 and double have the same layout.
8383 Oh well, it's only debug dumps. */
8384 union {
8385 float64 f64;
8386 double d;
8387 } d0;
b5ff1b31 8388 uint32_t psr;
2c0262af
FB
8389
8390 for(i=0;i<16;i++) {
7fe48483 8391 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8392 if ((i % 4) == 3)
7fe48483 8393 cpu_fprintf(f, "\n");
2c0262af 8394 else
7fe48483 8395 cpu_fprintf(f, " ");
2c0262af 8396 }
b5ff1b31 8397 psr = cpsr_read(env);
687fa640
TS
8398 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8399 psr,
b5ff1b31
FB
8400 psr & (1 << 31) ? 'N' : '-',
8401 psr & (1 << 30) ? 'Z' : '-',
8402 psr & (1 << 29) ? 'C' : '-',
8403 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8404 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8405 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95
FB
8406
8407 for (i = 0; i < 16; i++) {
8e96005d
FB
8408 d.d = env->vfp.regs[i];
8409 s0.i = d.l.lower;
8410 s1.i = d.l.upper;
a94a6abf
PB
8411 d0.f64 = d.d;
8412 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8413 i * 2, (int)s0.i, s0.s,
a94a6abf 8414 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8415 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8416 d0.d);
b7bcbe95 8417 }
40f137e1 8418 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
2c0262af 8419}
a6b025d3 8420