]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
ARM TCG conversion 8/16.
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
57fec1fe 31#include "tcg-op.h"
1497c961
PB
32
33#define GEN_HELPER 1
b26eefb6 34#include "helpers.h"
2c0262af 35
9ee6e8bb
PB
36#define ENABLE_ARCH_5J 0
37#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
38#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
39#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
40#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
41
42#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
43
2c0262af
FB
44/* internal defines */
45typedef struct DisasContext {
0fa85d43 46 target_ulong pc;
2c0262af 47 int is_jmp;
e50e6a20
FB
48 /* Nonzero if this instruction has been conditionally skipped. */
49 int condjmp;
50 /* The label that will be jumped to when the instruction is skipped. */
51 int condlabel;
9ee6e8bb
PB
52 /* Thumb-2 condtional execution bits. */
53 int condexec_mask;
54 int condexec_cond;
2c0262af 55 struct TranslationBlock *tb;
8aaca4c0 56 int singlestep_enabled;
5899f386 57 int thumb;
6658ffb8 58 int is_mem;
b5ff1b31
FB
59#if !defined(CONFIG_USER_ONLY)
60 int user;
61#endif
2c0262af
FB
62} DisasContext;
63
b5ff1b31
FB
64#if defined(CONFIG_USER_ONLY)
65#define IS_USER(s) 1
66#else
67#define IS_USER(s) (s->user)
68#endif
69
9ee6e8bb
PB
70/* These instructions trap after executing, so defer them until after the
71 conditional executions state has been updated. */
72#define DISAS_WFI 4
73#define DISAS_SWI 5
2c0262af
FB
74
75/* XXX: move that elsewhere */
2c0262af
FB
76extern FILE *logfile;
77extern int loglevel;
78
b26eefb6
PB
79static TCGv cpu_env;
80/* FIXME: These should be removed. */
81static TCGv cpu_T[3];
82
83/* initialize TCG globals. */
84void arm_translate_init(void)
85{
86 cpu_env = tcg_global_reg_new(TCG_TYPE_PTR, TCG_AREG0, "env");
87
88 cpu_T[0] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG1, "T0");
89 cpu_T[1] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG2, "T1");
90 cpu_T[2] = tcg_global_reg_new(TCG_TYPE_I32, TCG_AREG3, "T2");
91}
92
93/* The code generator doesn't like lots of temporaries, so maintain our own
94 cache for reuse within a function. */
95#define MAX_TEMPS 8
96static int num_temps;
97static TCGv temps[MAX_TEMPS];
98
99/* Allocate a temporary variable. */
100static TCGv new_tmp(void)
101{
102 TCGv tmp;
103 if (num_temps == MAX_TEMPS)
104 abort();
105
106 if (GET_TCGV(temps[num_temps]))
107 return temps[num_temps++];
108
109 tmp = tcg_temp_new(TCG_TYPE_I32);
110 temps[num_temps++] = tmp;
111 return tmp;
112}
113
114/* Release a temporary variable. */
115static void dead_tmp(TCGv tmp)
116{
117 int i;
118 num_temps--;
119 i = num_temps;
120 if (GET_TCGV(temps[i]) == GET_TCGV(tmp))
121 return;
122
123 /* Shuffle this temp to the last slot. */
124 while (GET_TCGV(temps[i]) != GET_TCGV(tmp))
125 i--;
126 while (i < num_temps) {
127 temps[i] = temps[i + 1];
128 i++;
129 }
130 temps[i] = tmp;
131}
132
d9ba4830
PB
133static inline TCGv load_cpu_offset(int offset)
134{
135 TCGv tmp = new_tmp();
136 tcg_gen_ld_i32(tmp, cpu_env, offset);
137 return tmp;
138}
139
140#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
141
142static inline void store_cpu_offset(TCGv var, int offset)
143{
144 tcg_gen_st_i32(var, cpu_env, offset);
145 dead_tmp(var);
146}
147
148#define store_cpu_field(var, name) \
149 store_cpu_offset(var, offsetof(CPUState, name))
150
b26eefb6
PB
151/* Set a variable to the value of a CPU register. */
152static void load_reg_var(DisasContext *s, TCGv var, int reg)
153{
154 if (reg == 15) {
155 uint32_t addr;
156 /* normaly, since we updated PC, we need only to add one insn */
157 if (s->thumb)
158 addr = (long)s->pc + 2;
159 else
160 addr = (long)s->pc + 4;
161 tcg_gen_movi_i32(var, addr);
162 } else {
163 tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
164 }
165}
166
167/* Create a new temporary and set it to the value of a CPU register. */
168static inline TCGv load_reg(DisasContext *s, int reg)
169{
170 TCGv tmp = new_tmp();
171 load_reg_var(s, tmp, reg);
172 return tmp;
173}
174
175/* Set a CPU register. The source must be a temporary and will be
176 marked as dead. */
177static void store_reg(DisasContext *s, int reg, TCGv var)
178{
179 if (reg == 15) {
180 tcg_gen_andi_i32(var, var, ~1);
181 s->is_jmp = DISAS_JUMP;
182 }
183 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, regs[reg]));
184 dead_tmp(var);
185}
186
187
188/* Basic operations. */
189#define gen_op_movl_T0_T1() tcg_gen_mov_i32(cpu_T[0], cpu_T[1])
190#define gen_op_movl_T0_T2() tcg_gen_mov_i32(cpu_T[0], cpu_T[2])
191#define gen_op_movl_T1_T0() tcg_gen_mov_i32(cpu_T[1], cpu_T[0])
192#define gen_op_movl_T1_T2() tcg_gen_mov_i32(cpu_T[1], cpu_T[2])
193#define gen_op_movl_T2_T0() tcg_gen_mov_i32(cpu_T[2], cpu_T[0])
194#define gen_op_movl_T0_im(im) tcg_gen_movi_i32(cpu_T[0], im)
195#define gen_op_movl_T1_im(im) tcg_gen_movi_i32(cpu_T[1], im)
196#define gen_op_movl_T2_im(im) tcg_gen_movi_i32(cpu_T[2], im)
197
198#define gen_op_addl_T1_im(im) tcg_gen_addi_i32(cpu_T[1], cpu_T[1], im)
199#define gen_op_addl_T0_T1() tcg_gen_add_i32(cpu_T[0], cpu_T[0], cpu_T[1])
200#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
201#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
202
203#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
204#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
205#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
206#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
207#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
208#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
209#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
210
211#define gen_op_shll_T0_im(im) tcg_gen_shli_i32(cpu_T[0], cpu_T[0], im)
212#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
213#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
214#define gen_op_sarl_T1_im(im) tcg_gen_sari_i32(cpu_T[1], cpu_T[1], im)
215#define gen_op_rorl_T1_im(im) tcg_gen_rori_i32(cpu_T[1], cpu_T[1], im)
216
217/* Value extensions. */
218#define gen_uxtb(var) tcg_gen_andi_i32(var, var, 0xff)
219#define gen_uxth(var) tcg_gen_andi_i32(var, var, 0xffff)
220#define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
221#define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
222
1497c961
PB
223#define gen_sxtb16(var) gen_helper_sxtb16(var, var)
224#define gen_uxtb16(var) gen_helper_uxtb16(var, var)
8f01245e
PB
225#define gen_op_rev_T0() tcg_gen_bswap_i32(cpu_T[0], cpu_T[0])
226
227#define gen_op_mul_T0_T1() tcg_gen_mul_i32(cpu_T[0], cpu_T[0], cpu_T[1])
b26eefb6 228
1497c961
PB
229#define gen_op_addl_T0_T1_setq() \
230 gen_helper_add_setq(cpu_T[0], cpu_T[0], cpu_T[1])
231#define gen_op_addl_T0_T1_saturate() \
232 gen_helper_add_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
233#define gen_op_subl_T0_T1_saturate() \
234 gen_helper_sub_saturate(cpu_T[0], cpu_T[0], cpu_T[1])
235#define gen_op_addl_T0_T1_usaturate() \
236 gen_helper_add_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
237#define gen_op_subl_T0_T1_usaturate() \
238 gen_helper_sub_usaturate(cpu_T[0], cpu_T[0], cpu_T[1])
f51bbbfe 239
3670669c
PB
240/* Copy the most significant bit of T0 to all bits of T1. */
241#define gen_op_signbit_T1_T0() tcg_gen_sari_i32(cpu_T[1], cpu_T[0], 31)
242
d9ba4830
PB
243#define gen_set_cpsr(var, mask) gen_helper_cpsr_write(var, tcg_const_i32(mask))
244/* Set NZCV flags from the high 4 bits of var. */
245#define gen_set_nzcv(var) gen_set_cpsr(var, CPSR_NZCV)
246
247static void gen_exception(int excp)
248{
249 TCGv tmp = new_tmp();
250 tcg_gen_movi_i32(tmp, excp);
251 gen_helper_exception(tmp);
252 dead_tmp(tmp);
253}
254
3670669c
PB
255static void gen_smul_dual(TCGv a, TCGv b)
256{
257 TCGv tmp1 = new_tmp();
258 TCGv tmp2 = new_tmp();
3670669c
PB
259 tcg_gen_ext8s_i32(tmp1, a);
260 tcg_gen_ext8s_i32(tmp2, b);
261 tcg_gen_mul_i32(tmp1, tmp1, tmp2);
262 dead_tmp(tmp2);
263 tcg_gen_sari_i32(a, a, 16);
264 tcg_gen_sari_i32(b, b, 16);
265 tcg_gen_mul_i32(b, b, a);
266 tcg_gen_mov_i32(a, tmp1);
267 dead_tmp(tmp1);
268}
269
270/* Byteswap each halfword. */
271static void gen_rev16(TCGv var)
272{
273 TCGv tmp = new_tmp();
274 tcg_gen_shri_i32(tmp, var, 8);
275 tcg_gen_andi_i32(tmp, tmp, 0x00ff00ff);
276 tcg_gen_shli_i32(var, var, 8);
277 tcg_gen_andi_i32(var, var, 0xff00ff00);
278 tcg_gen_or_i32(var, var, tmp);
279 dead_tmp(tmp);
280}
281
282/* Byteswap low halfword and sign extend. */
283static void gen_revsh(TCGv var)
284{
285 TCGv tmp = new_tmp();
286 tcg_gen_shri_i32(tmp, var, 8);
287 tcg_gen_andi_i32(tmp, tmp, 0x00ff);
288 tcg_gen_shli_i32(var, var, 8);
289 tcg_gen_ext8s_i32(var, var);
290 tcg_gen_or_i32(var, var, tmp);
291 dead_tmp(tmp);
292}
293
294/* Unsigned bitfield extract. */
295static void gen_ubfx(TCGv var, int shift, uint32_t mask)
296{
297 if (shift)
298 tcg_gen_shri_i32(var, var, shift);
299 tcg_gen_andi_i32(var, var, mask);
300}
301
302/* Signed bitfield extract. */
303static void gen_sbfx(TCGv var, int shift, int width)
304{
305 uint32_t signbit;
306
307 if (shift)
308 tcg_gen_sari_i32(var, var, shift);
309 if (shift + width < 32) {
310 signbit = 1u << (width - 1);
311 tcg_gen_andi_i32(var, var, (1u << width) - 1);
312 tcg_gen_xori_i32(var, var, signbit);
313 tcg_gen_subi_i32(var, var, signbit);
314 }
315}
316
317/* Bitfield insertion. Insert val into base. Clobbers base and val. */
318static void gen_bfi(TCGv dest, TCGv base, TCGv val, int shift, uint32_t mask)
319{
320 tcg_gen_shli_i32(val, val, shift);
321 tcg_gen_andi_i32(val, val, mask);
322 tcg_gen_andi_i32(base, base, ~mask);
323 tcg_gen_or_i32(dest, base, val);
324}
325
d9ba4830
PB
326/* Round the top 32 bits of a 64-bit value. */
327static void gen_roundqd(TCGv a, TCGv b)
3670669c 328{
d9ba4830
PB
329 tcg_gen_shri_i32(a, a, 31);
330 tcg_gen_add_i32(a, a, b);
3670669c
PB
331}
332
8f01245e
PB
333/* FIXME: Most targets have native widening multiplication.
334 It would be good to use that instead of a full wide multiply. */
335/* Unsigned 32x32->64 multiply. */
336static void gen_op_mull_T0_T1(void)
337{
338 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
339 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
340
341 tcg_gen_extu_i32_i64(tmp1, cpu_T[0]);
342 tcg_gen_extu_i32_i64(tmp2, cpu_T[1]);
343 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
344 tcg_gen_trunc_i64_i32(cpu_T[0], tmp1);
345 tcg_gen_shri_i64(tmp1, tmp1, 32);
346 tcg_gen_trunc_i64_i32(cpu_T[1], tmp1);
347}
348
349/* Signed 32x32->64 multiply. */
d9ba4830 350static void gen_imull(TCGv a, TCGv b)
8f01245e
PB
351{
352 TCGv tmp1 = tcg_temp_new(TCG_TYPE_I64);
353 TCGv tmp2 = tcg_temp_new(TCG_TYPE_I64);
354
d9ba4830
PB
355 tcg_gen_ext_i32_i64(tmp1, a);
356 tcg_gen_ext_i32_i64(tmp2, b);
8f01245e 357 tcg_gen_mul_i64(tmp1, tmp1, tmp2);
d9ba4830 358 tcg_gen_trunc_i64_i32(a, tmp1);
8f01245e 359 tcg_gen_shri_i64(tmp1, tmp1, 32);
d9ba4830
PB
360 tcg_gen_trunc_i64_i32(b, tmp1);
361}
362#define gen_op_imull_T0_T1() gen_imull(cpu_T[0], cpu_T[1])
363
364/* Signed 32x16 multiply, top 32 bits. */
365static void gen_imulw(TCGv a, TCGv b)
366{
367 gen_imull(a, b);
368 tcg_gen_shri_i32(a, a, 16);
369 tcg_gen_shli_i32(b, b, 16);
370 tcg_gen_or_i32(a, a, b);
8f01245e
PB
371}
372
373/* Swap low and high halfwords. */
374static void gen_swap_half(TCGv var)
375{
376 TCGv tmp = new_tmp();
377 tcg_gen_shri_i32(tmp, var, 16);
378 tcg_gen_shli_i32(var, var, 16);
379 tcg_gen_or_i32(var, var, tmp);
3670669c 380 dead_tmp(tmp);
8f01245e
PB
381}
382
b26eefb6
PB
383/* Dual 16-bit add. Result placed in t0 and t1 is marked as dead.
384 tmp = (t0 ^ t1) & 0x8000;
385 t0 &= ~0x8000;
386 t1 &= ~0x8000;
387 t0 = (t0 + t1) ^ tmp;
388 */
389
390static void gen_add16(TCGv t0, TCGv t1)
391{
392 TCGv tmp = new_tmp();
393 tcg_gen_xor_i32(tmp, t0, t1);
394 tcg_gen_andi_i32(tmp, tmp, 0x8000);
395 tcg_gen_andi_i32(t0, t0, ~0x8000);
396 tcg_gen_andi_i32(t1, t1, ~0x8000);
397 tcg_gen_add_i32(t0, t0, t1);
398 tcg_gen_xor_i32(t0, t0, tmp);
399 dead_tmp(tmp);
400 dead_tmp(t1);
401}
402
9a119ff6
PB
403#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
404
b26eefb6
PB
405/* Set CF to the top bit of var. */
406static void gen_set_CF_bit31(TCGv var)
407{
408 TCGv tmp = new_tmp();
409 tcg_gen_shri_i32(tmp, var, 31);
9a119ff6 410 gen_set_CF(var);
b26eefb6
PB
411 dead_tmp(tmp);
412}
413
414/* Set N and Z flags from var. */
415static inline void gen_logic_CC(TCGv var)
416{
417 tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NZF));
418}
419
420/* T0 += T1 + CF. */
421static void gen_adc_T0_T1(void)
422{
d9ba4830 423 TCGv tmp;
b26eefb6 424 gen_op_addl_T0_T1();
d9ba4830 425 tmp = load_cpu_field(CF);
b26eefb6
PB
426 tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp);
427 dead_tmp(tmp);
428}
429
3670669c
PB
430/* dest = T0 - T1 + CF - 1. */
431static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
432{
d9ba4830 433 TCGv tmp;
3670669c 434 tcg_gen_sub_i32(dest, t0, t1);
d9ba4830 435 tmp = load_cpu_field(CF);
3670669c
PB
436 tcg_gen_add_i32(dest, dest, tmp);
437 tcg_gen_subi_i32(dest, dest, 1);
438 dead_tmp(tmp);
439}
440
441#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
442#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
443
b26eefb6
PB
444/* FIXME: Implement this natively. */
445static inline void tcg_gen_not_i32(TCGv t0, TCGv t1)
446{
447 tcg_gen_xori_i32(t0, t1, ~0);
448}
449
450/* T0 &= ~T1. Clobbers T1. */
451/* FIXME: Implement bic natively. */
452static inline void gen_op_bicl_T0_T1(void)
453{
454 gen_op_notl_T1();
455 gen_op_andl_T0_T1();
456}
457
458/* FIXME: Implement this natively. */
459static void tcg_gen_rori_i32(TCGv t0, TCGv t1, int i)
460{
461 TCGv tmp;
462
463 if (i == 0)
464 return;
465
466 tmp = new_tmp();
467 tcg_gen_shri_i32(tmp, t1, i);
468 tcg_gen_shli_i32(t1, t1, 32 - i);
469 tcg_gen_or_i32(t0, t1, tmp);
470 dead_tmp(tmp);
471}
472
9a119ff6 473static void shifter_out_im(TCGv var, int shift)
b26eefb6 474{
9a119ff6
PB
475 TCGv tmp = new_tmp();
476 if (shift == 0) {
477 tcg_gen_andi_i32(tmp, var, 1);
b26eefb6 478 } else {
9a119ff6
PB
479 tcg_gen_shri_i32(tmp, var, shift);
480 if (shift != 31);
481 tcg_gen_andi_i32(tmp, tmp, 1);
482 }
483 gen_set_CF(tmp);
484 dead_tmp(tmp);
485}
b26eefb6 486
9a119ff6
PB
487/* Shift by immediate. Includes special handling for shift == 0. */
488static inline void gen_arm_shift_im(TCGv var, int shiftop, int shift, int flags)
489{
490 switch (shiftop) {
491 case 0: /* LSL */
492 if (shift != 0) {
493 if (flags)
494 shifter_out_im(var, 32 - shift);
495 tcg_gen_shli_i32(var, var, shift);
496 }
497 break;
498 case 1: /* LSR */
499 if (shift == 0) {
500 if (flags) {
501 tcg_gen_shri_i32(var, var, 31);
502 gen_set_CF(var);
503 }
504 tcg_gen_movi_i32(var, 0);
505 } else {
506 if (flags)
507 shifter_out_im(var, shift - 1);
508 tcg_gen_shri_i32(var, var, shift);
509 }
510 break;
511 case 2: /* ASR */
512 if (shift == 0)
513 shift = 32;
514 if (flags)
515 shifter_out_im(var, shift - 1);
516 if (shift == 32)
517 shift = 31;
518 tcg_gen_sari_i32(var, var, shift);
519 break;
520 case 3: /* ROR/RRX */
521 if (shift != 0) {
522 if (flags)
523 shifter_out_im(var, shift - 1);
524 tcg_gen_rori_i32(var, var, shift); break;
525 } else {
d9ba4830 526 TCGv tmp = load_cpu_field(CF);
9a119ff6
PB
527 if (flags)
528 shifter_out_im(var, 0);
529 tcg_gen_shri_i32(var, var, 1);
b26eefb6
PB
530 tcg_gen_shli_i32(tmp, tmp, 31);
531 tcg_gen_or_i32(var, var, tmp);
532 dead_tmp(tmp);
b26eefb6
PB
533 }
534 }
535};
536
6ddbc6e4
PB
537#define PAS_OP(pfx) \
538 switch (op2) { \
539 case 0: gen_pas_helper(glue(pfx,add16)); break; \
540 case 1: gen_pas_helper(glue(pfx,addsubx)); break; \
541 case 2: gen_pas_helper(glue(pfx,subaddx)); break; \
542 case 3: gen_pas_helper(glue(pfx,sub16)); break; \
543 case 4: gen_pas_helper(glue(pfx,add8)); break; \
544 case 7: gen_pas_helper(glue(pfx,sub8)); break; \
545 }
d9ba4830 546static void gen_arm_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
547{
548 TCGv tmp;
549
550 switch (op1) {
551#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
552 case 1:
553 tmp = tcg_temp_new(TCG_TYPE_PTR);
554 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
555 PAS_OP(s)
556 break;
557 case 5:
558 tmp = tcg_temp_new(TCG_TYPE_PTR);
559 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
560 PAS_OP(u)
561 break;
562#undef gen_pas_helper
563#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
564 case 2:
565 PAS_OP(q);
566 break;
567 case 3:
568 PAS_OP(sh);
569 break;
570 case 6:
571 PAS_OP(uq);
572 break;
573 case 7:
574 PAS_OP(uh);
575 break;
576#undef gen_pas_helper
577 }
578}
9ee6e8bb
PB
579#undef PAS_OP
580
6ddbc6e4
PB
581/* For unknown reasons Arm and Thumb-2 use arbitrarily different encodings. */
582#define PAS_OP(pfx) \
583 switch (op2) { \
584 case 0: gen_pas_helper(glue(pfx,add8)); break; \
585 case 1: gen_pas_helper(glue(pfx,add16)); break; \
586 case 2: gen_pas_helper(glue(pfx,addsubx)); break; \
587 case 4: gen_pas_helper(glue(pfx,sub8)); break; \
588 case 5: gen_pas_helper(glue(pfx,sub16)); break; \
589 case 6: gen_pas_helper(glue(pfx,subaddx)); break; \
590 }
d9ba4830 591static void gen_thumb2_parallel_addsub(int op1, int op2, TCGv a, TCGv b)
6ddbc6e4
PB
592{
593 TCGv tmp;
594
595 switch (op1) {
596#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
597 case 0:
598 tmp = tcg_temp_new(TCG_TYPE_PTR);
599 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
600 PAS_OP(s)
601 break;
602 case 4:
603 tmp = tcg_temp_new(TCG_TYPE_PTR);
604 tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
605 PAS_OP(u)
606 break;
607#undef gen_pas_helper
608#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b)
609 case 1:
610 PAS_OP(q);
611 break;
612 case 2:
613 PAS_OP(sh);
614 break;
615 case 5:
616 PAS_OP(uq);
617 break;
618 case 6:
619 PAS_OP(uh);
620 break;
621#undef gen_pas_helper
622 }
623}
9ee6e8bb
PB
624#undef PAS_OP
625
d9ba4830
PB
626static void gen_test_cc(int cc, int label)
627{
628 TCGv tmp;
629 TCGv tmp2;
630 TCGv zero;
631 int inv;
632
633 zero = tcg_const_i32(0);
634 switch (cc) {
635 case 0: /* eq: Z */
636 tmp = load_cpu_field(NZF);
637 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
638 break;
639 case 1: /* ne: !Z */
640 tmp = load_cpu_field(NZF);
641 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
642 break;
643 case 2: /* cs: C */
644 tmp = load_cpu_field(CF);
645 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
646 break;
647 case 3: /* cc: !C */
648 tmp = load_cpu_field(CF);
649 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
650 break;
651 case 4: /* mi: N */
652 tmp = load_cpu_field(NZF);
653 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
654 break;
655 case 5: /* pl: !N */
656 tmp = load_cpu_field(NZF);
657 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
658 break;
659 case 6: /* vs: V */
660 tmp = load_cpu_field(VF);
661 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
662 break;
663 case 7: /* vc: !V */
664 tmp = load_cpu_field(VF);
665 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
666 break;
667 case 8: /* hi: C && !Z */
668 inv = gen_new_label();
669 tmp = load_cpu_field(CF);
670 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
671 dead_tmp(tmp);
672 tmp = load_cpu_field(NZF);
673 tcg_gen_brcond_i32(TCG_COND_NE, tmp, zero, label);
674 gen_set_label(inv);
675 break;
676 case 9: /* ls: !C || Z */
677 tmp = load_cpu_field(CF);
678 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
679 dead_tmp(tmp);
680 tmp = load_cpu_field(NZF);
681 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
682 break;
683 case 10: /* ge: N == V -> N ^ V == 0 */
684 tmp = load_cpu_field(VF);
685 tmp2 = load_cpu_field(NZF);
686 tcg_gen_xor_i32(tmp, tmp, tmp2);
687 dead_tmp(tmp2);
688 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
689 break;
690 case 11: /* lt: N != V -> N ^ V != 0 */
691 tmp = load_cpu_field(VF);
692 tmp2 = load_cpu_field(NZF);
693 tcg_gen_xor_i32(tmp, tmp, tmp2);
694 dead_tmp(tmp2);
695 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
696 break;
697 case 12: /* gt: !Z && N == V */
698 inv = gen_new_label();
699 tmp = load_cpu_field(NZF);
700 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, inv);
701 dead_tmp(tmp);
702 tmp = load_cpu_field(VF);
703 tmp2 = load_cpu_field(NZF);
704 tcg_gen_xor_i32(tmp, tmp, tmp2);
705 dead_tmp(tmp2);
706 tcg_gen_brcond_i32(TCG_COND_GE, tmp, zero, label);
707 gen_set_label(inv);
708 break;
709 case 13: /* le: Z || N != V */
710 tmp = load_cpu_field(NZF);
711 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, zero, label);
712 dead_tmp(tmp);
713 tmp = load_cpu_field(VF);
714 tmp2 = load_cpu_field(NZF);
715 tcg_gen_xor_i32(tmp, tmp, tmp2);
716 dead_tmp(tmp2);
717 tcg_gen_brcond_i32(TCG_COND_LT, tmp, zero, label);
718 break;
719 default:
720 fprintf(stderr, "Bad condition code 0x%x\n", cc);
721 abort();
722 }
723 dead_tmp(tmp);
724}
2c0262af
FB
725
726const uint8_t table_logic_cc[16] = {
727 1, /* and */
728 1, /* xor */
729 0, /* sub */
730 0, /* rsb */
731 0, /* add */
732 0, /* adc */
733 0, /* sbc */
734 0, /* rsc */
735 1, /* andl */
736 1, /* xorl */
737 0, /* cmp */
738 0, /* cmn */
739 1, /* orr */
740 1, /* mov */
741 1, /* bic */
742 1, /* mvn */
743};
3b46e624 744
2c0262af
FB
745static GenOpFunc *gen_shift_T1_T0[4] = {
746 gen_op_shll_T1_T0,
747 gen_op_shrl_T1_T0,
748 gen_op_sarl_T1_T0,
749 gen_op_rorl_T1_T0,
750};
751
752static GenOpFunc *gen_shift_T1_T0_cc[4] = {
753 gen_op_shll_T1_T0_cc,
754 gen_op_shrl_T1_T0_cc,
755 gen_op_sarl_T1_T0_cc,
756 gen_op_rorl_T1_T0_cc,
757};
758
d9ba4830
PB
759/* Set PC and Thumb state from an immediate address. */
760static inline void gen_bx_im(DisasContext *s, uint32_t addr)
99c475ab 761{
b26eefb6 762 TCGv tmp;
99c475ab 763
b26eefb6
PB
764 s->is_jmp = DISAS_UPDATE;
765 tmp = new_tmp();
d9ba4830
PB
766 if (s->thumb != (addr & 1)) {
767 tcg_gen_movi_i32(tmp, addr & 1);
768 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
769 }
770 tcg_gen_movi_i32(tmp, addr & ~1);
771 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[15]));
b26eefb6 772 dead_tmp(tmp);
d9ba4830
PB
773}
774
775/* Set PC and Thumb state from var. var is marked as dead. */
776static inline void gen_bx(DisasContext *s, TCGv var)
777{
778 TCGv tmp;
779
780 s->is_jmp = DISAS_UPDATE;
781 tmp = new_tmp();
782 tcg_gen_andi_i32(tmp, var, 1);
783 store_cpu_field(tmp, thumb);
784 tcg_gen_andi_i32(var, var, ~1);
785 store_cpu_field(var, regs[15]);
786}
787
788/* TODO: This should be removed. Use gen_bx instead. */
789static inline void gen_bx_T0(DisasContext *s)
790{
791 TCGv tmp = new_tmp();
792 tcg_gen_mov_i32(tmp, cpu_T[0]);
793 gen_bx(s, tmp);
b26eefb6 794}
b5ff1b31
FB
795
796#if defined(CONFIG_USER_ONLY)
797#define gen_ldst(name, s) gen_op_##name##_raw()
798#else
799#define gen_ldst(name, s) do { \
6658ffb8 800 s->is_mem = 1; \
b5ff1b31
FB
801 if (IS_USER(s)) \
802 gen_op_##name##_user(); \
803 else \
804 gen_op_##name##_kernel(); \
805 } while (0)
806#endif
807
2c0262af
FB
808static inline void gen_movl_T0_reg(DisasContext *s, int reg)
809{
b26eefb6 810 load_reg_var(s, cpu_T[0], reg);
2c0262af
FB
811}
812
813static inline void gen_movl_T1_reg(DisasContext *s, int reg)
814{
b26eefb6 815 load_reg_var(s, cpu_T[1], reg);
2c0262af
FB
816}
817
818static inline void gen_movl_T2_reg(DisasContext *s, int reg)
819{
b26eefb6
PB
820 load_reg_var(s, cpu_T[2], reg);
821}
822
823static inline void gen_set_pc_T0(void)
824{
825 tcg_gen_st_i32(cpu_T[0], cpu_env, offsetof(CPUState, regs[15]));
2c0262af
FB
826}
827
828static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
829{
b26eefb6
PB
830 TCGv tmp;
831 if (reg == 15) {
832 tmp = new_tmp();
833 tcg_gen_andi_i32(tmp, cpu_T[t], ~1);
834 } else {
835 tmp = cpu_T[t];
836 }
837 tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, regs[reg]));
2c0262af 838 if (reg == 15) {
b26eefb6 839 dead_tmp(tmp);
2c0262af
FB
840 s->is_jmp = DISAS_JUMP;
841 }
842}
843
844static inline void gen_movl_reg_T0(DisasContext *s, int reg)
845{
846 gen_movl_reg_TN(s, reg, 0);
847}
848
849static inline void gen_movl_reg_T1(DisasContext *s, int reg)
850{
851 gen_movl_reg_TN(s, reg, 1);
852}
853
b5ff1b31
FB
854/* Force a TB lookup after an instruction that changes the CPU state. */
855static inline void gen_lookup_tb(DisasContext *s)
856{
857 gen_op_movl_T0_im(s->pc);
858 gen_movl_reg_T0(s, 15);
859 s->is_jmp = DISAS_UPDATE;
860}
861
2c0262af
FB
862static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
863{
1e8d4eec 864 int val, rm, shift, shiftop;
b26eefb6 865 TCGv offset;
2c0262af
FB
866
867 if (!(insn & (1 << 25))) {
868 /* immediate */
869 val = insn & 0xfff;
870 if (!(insn & (1 << 23)))
871 val = -val;
537730b9
FB
872 if (val != 0)
873 gen_op_addl_T1_im(val);
2c0262af
FB
874 } else {
875 /* shift/register */
876 rm = (insn) & 0xf;
877 shift = (insn >> 7) & 0x1f;
1e8d4eec 878 shiftop = (insn >> 5) & 3;
b26eefb6 879 offset = load_reg(s, rm);
9a119ff6 880 gen_arm_shift_im(offset, shiftop, shift, 0);
2c0262af 881 if (!(insn & (1 << 23)))
b26eefb6 882 tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
2c0262af 883 else
b26eefb6
PB
884 tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
885 dead_tmp(offset);
2c0262af
FB
886 }
887}
888
191f9a93
PB
889static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
890 int extra)
2c0262af
FB
891{
892 int val, rm;
b26eefb6 893 TCGv offset;
3b46e624 894
2c0262af
FB
895 if (insn & (1 << 22)) {
896 /* immediate */
897 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
898 if (!(insn & (1 << 23)))
899 val = -val;
18acad92 900 val += extra;
537730b9
FB
901 if (val != 0)
902 gen_op_addl_T1_im(val);
2c0262af
FB
903 } else {
904 /* register */
191f9a93
PB
905 if (extra)
906 gen_op_addl_T1_im(extra);
2c0262af 907 rm = (insn) & 0xf;
b26eefb6 908 offset = load_reg(s, rm);
2c0262af 909 if (!(insn & (1 << 23)))
b26eefb6 910 tcg_gen_sub_i32(cpu_T[1], cpu_T[1], offset);
2c0262af 911 else
b26eefb6
PB
912 tcg_gen_add_i32(cpu_T[1], cpu_T[1], offset);
913 dead_tmp(offset);
2c0262af
FB
914 }
915}
916
b7bcbe95
FB
917#define VFP_OP(name) \
918static inline void gen_vfp_##name(int dp) \
919{ \
920 if (dp) \
921 gen_op_vfp_##name##d(); \
922 else \
923 gen_op_vfp_##name##s(); \
924}
925
9ee6e8bb
PB
926#define VFP_OP1(name) \
927static inline void gen_vfp_##name(int dp, int arg) \
928{ \
929 if (dp) \
930 gen_op_vfp_##name##d(arg); \
931 else \
932 gen_op_vfp_##name##s(arg); \
933}
934
b7bcbe95
FB
935VFP_OP(add)
936VFP_OP(sub)
937VFP_OP(mul)
938VFP_OP(div)
939VFP_OP(neg)
940VFP_OP(abs)
941VFP_OP(sqrt)
942VFP_OP(cmp)
943VFP_OP(cmpe)
944VFP_OP(F1_ld0)
945VFP_OP(uito)
946VFP_OP(sito)
947VFP_OP(toui)
948VFP_OP(touiz)
949VFP_OP(tosi)
950VFP_OP(tosiz)
9ee6e8bb
PB
951VFP_OP1(tosh)
952VFP_OP1(tosl)
953VFP_OP1(touh)
954VFP_OP1(toul)
955VFP_OP1(shto)
956VFP_OP1(slto)
957VFP_OP1(uhto)
958VFP_OP1(ulto)
b7bcbe95
FB
959
960#undef VFP_OP
961
9ee6e8bb
PB
962static inline void gen_vfp_fconst(int dp, uint32_t val)
963{
964 if (dp)
965 gen_op_vfp_fconstd(val);
966 else
967 gen_op_vfp_fconsts(val);
968}
969
b5ff1b31
FB
970static inline void gen_vfp_ld(DisasContext *s, int dp)
971{
972 if (dp)
973 gen_ldst(vfp_ldd, s);
974 else
975 gen_ldst(vfp_lds, s);
976}
977
978static inline void gen_vfp_st(DisasContext *s, int dp)
979{
980 if (dp)
981 gen_ldst(vfp_std, s);
982 else
983 gen_ldst(vfp_sts, s);
984}
985
8e96005d
FB
986static inline long
987vfp_reg_offset (int dp, int reg)
988{
989 if (dp)
990 return offsetof(CPUARMState, vfp.regs[reg]);
991 else if (reg & 1) {
992 return offsetof(CPUARMState, vfp.regs[reg >> 1])
993 + offsetof(CPU_DoubleU, l.upper);
994 } else {
995 return offsetof(CPUARMState, vfp.regs[reg >> 1])
996 + offsetof(CPU_DoubleU, l.lower);
997 }
998}
9ee6e8bb
PB
999
1000/* Return the offset of a 32-bit piece of a NEON register.
1001 zero is the least significant end of the register. */
1002static inline long
1003neon_reg_offset (int reg, int n)
1004{
1005 int sreg;
1006 sreg = reg * 2 + n;
1007 return vfp_reg_offset(0, sreg);
1008}
1009
1010#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
1011#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
1012
b7bcbe95
FB
1013static inline void gen_mov_F0_vreg(int dp, int reg)
1014{
1015 if (dp)
8e96005d 1016 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 1017 else
8e96005d 1018 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
1019}
1020
1021static inline void gen_mov_F1_vreg(int dp, int reg)
1022{
1023 if (dp)
8e96005d 1024 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
b7bcbe95 1025 else
8e96005d 1026 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
1027}
1028
1029static inline void gen_mov_vreg_F0(int dp, int reg)
1030{
1031 if (dp)
8e96005d 1032 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 1033 else
8e96005d 1034 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
1035}
1036
18c9b560
AZ
1037#define ARM_CP_RW_BIT (1 << 20)
1038
1039static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
1040{
1041 int rd;
1042 uint32_t offset;
1043
1044 rd = (insn >> 16) & 0xf;
1045 gen_movl_T1_reg(s, rd);
1046
1047 offset = (insn & 0xff) << ((insn >> 7) & 2);
1048 if (insn & (1 << 24)) {
1049 /* Pre indexed */
1050 if (insn & (1 << 23))
1051 gen_op_addl_T1_im(offset);
1052 else
1053 gen_op_addl_T1_im(-offset);
1054
1055 if (insn & (1 << 21))
1056 gen_movl_reg_T1(s, rd);
1057 } else if (insn & (1 << 21)) {
1058 /* Post indexed */
1059 if (insn & (1 << 23))
1060 gen_op_movl_T0_im(offset);
1061 else
1062 gen_op_movl_T0_im(- offset);
1063 gen_op_addl_T0_T1();
1064 gen_movl_reg_T0(s, rd);
1065 } else if (!(insn & (1 << 23)))
1066 return 1;
1067 return 0;
1068}
1069
1070static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
1071{
1072 int rd = (insn >> 0) & 0xf;
1073
1074 if (insn & (1 << 8))
1075 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
1076 return 1;
1077 else
1078 gen_op_iwmmxt_movl_T0_wCx(rd);
1079 else
1080 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
1081
1082 gen_op_movl_T1_im(mask);
1083 gen_op_andl_T0_T1();
1084 return 0;
1085}
1086
1087/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
1088 (ie. an undefined instruction). */
1089static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
1090{
1091 int rd, wrd;
1092 int rdhi, rdlo, rd0, rd1, i;
1093
1094 if ((insn & 0x0e000e00) == 0x0c000000) {
1095 if ((insn & 0x0fe00ff0) == 0x0c400000) {
1096 wrd = insn & 0xf;
1097 rdlo = (insn >> 12) & 0xf;
1098 rdhi = (insn >> 16) & 0xf;
1099 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
1100 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
1101 gen_movl_reg_T0(s, rdlo);
1102 gen_movl_reg_T1(s, rdhi);
1103 } else { /* TMCRR */
1104 gen_movl_T0_reg(s, rdlo);
1105 gen_movl_T1_reg(s, rdhi);
1106 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
1107 gen_op_iwmmxt_set_mup();
1108 }
1109 return 0;
1110 }
1111
1112 wrd = (insn >> 12) & 0xf;
1113 if (gen_iwmmxt_address(s, insn))
1114 return 1;
1115 if (insn & ARM_CP_RW_BIT) {
1116 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
1117 gen_ldst(ldl, s);
1118 gen_op_iwmmxt_movl_wCx_T0(wrd);
1119 } else {
1120 if (insn & (1 << 8))
1121 if (insn & (1 << 22)) /* WLDRD */
1122 gen_ldst(iwmmxt_ldq, s);
1123 else /* WLDRW wRd */
1124 gen_ldst(iwmmxt_ldl, s);
1125 else
1126 if (insn & (1 << 22)) /* WLDRH */
1127 gen_ldst(iwmmxt_ldw, s);
1128 else /* WLDRB */
1129 gen_ldst(iwmmxt_ldb, s);
1130 gen_op_iwmmxt_movq_wRn_M0(wrd);
1131 }
1132 } else {
1133 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
1134 gen_op_iwmmxt_movl_T0_wCx(wrd);
1135 gen_ldst(stl, s);
1136 } else {
1137 gen_op_iwmmxt_movq_M0_wRn(wrd);
1138 if (insn & (1 << 8))
1139 if (insn & (1 << 22)) /* WSTRD */
1140 gen_ldst(iwmmxt_stq, s);
1141 else /* WSTRW wRd */
1142 gen_ldst(iwmmxt_stl, s);
1143 else
1144 if (insn & (1 << 22)) /* WSTRH */
1145 gen_ldst(iwmmxt_ldw, s);
1146 else /* WSTRB */
1147 gen_ldst(iwmmxt_stb, s);
1148 }
1149 }
1150 return 0;
1151 }
1152
1153 if ((insn & 0x0f000000) != 0x0e000000)
1154 return 1;
1155
1156 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
1157 case 0x000: /* WOR */
1158 wrd = (insn >> 12) & 0xf;
1159 rd0 = (insn >> 0) & 0xf;
1160 rd1 = (insn >> 16) & 0xf;
1161 gen_op_iwmmxt_movq_M0_wRn(rd0);
1162 gen_op_iwmmxt_orq_M0_wRn(rd1);
1163 gen_op_iwmmxt_setpsr_nz();
1164 gen_op_iwmmxt_movq_wRn_M0(wrd);
1165 gen_op_iwmmxt_set_mup();
1166 gen_op_iwmmxt_set_cup();
1167 break;
1168 case 0x011: /* TMCR */
1169 if (insn & 0xf)
1170 return 1;
1171 rd = (insn >> 12) & 0xf;
1172 wrd = (insn >> 16) & 0xf;
1173 switch (wrd) {
1174 case ARM_IWMMXT_wCID:
1175 case ARM_IWMMXT_wCASF:
1176 break;
1177 case ARM_IWMMXT_wCon:
1178 gen_op_iwmmxt_set_cup();
1179 /* Fall through. */
1180 case ARM_IWMMXT_wCSSF:
1181 gen_op_iwmmxt_movl_T0_wCx(wrd);
1182 gen_movl_T1_reg(s, rd);
1183 gen_op_bicl_T0_T1();
1184 gen_op_iwmmxt_movl_wCx_T0(wrd);
1185 break;
1186 case ARM_IWMMXT_wCGR0:
1187 case ARM_IWMMXT_wCGR1:
1188 case ARM_IWMMXT_wCGR2:
1189 case ARM_IWMMXT_wCGR3:
1190 gen_op_iwmmxt_set_cup();
1191 gen_movl_reg_T0(s, rd);
1192 gen_op_iwmmxt_movl_wCx_T0(wrd);
1193 break;
1194 default:
1195 return 1;
1196 }
1197 break;
1198 case 0x100: /* WXOR */
1199 wrd = (insn >> 12) & 0xf;
1200 rd0 = (insn >> 0) & 0xf;
1201 rd1 = (insn >> 16) & 0xf;
1202 gen_op_iwmmxt_movq_M0_wRn(rd0);
1203 gen_op_iwmmxt_xorq_M0_wRn(rd1);
1204 gen_op_iwmmxt_setpsr_nz();
1205 gen_op_iwmmxt_movq_wRn_M0(wrd);
1206 gen_op_iwmmxt_set_mup();
1207 gen_op_iwmmxt_set_cup();
1208 break;
1209 case 0x111: /* TMRC */
1210 if (insn & 0xf)
1211 return 1;
1212 rd = (insn >> 12) & 0xf;
1213 wrd = (insn >> 16) & 0xf;
1214 gen_op_iwmmxt_movl_T0_wCx(wrd);
1215 gen_movl_reg_T0(s, rd);
1216 break;
1217 case 0x300: /* WANDN */
1218 wrd = (insn >> 12) & 0xf;
1219 rd0 = (insn >> 0) & 0xf;
1220 rd1 = (insn >> 16) & 0xf;
1221 gen_op_iwmmxt_movq_M0_wRn(rd0);
1222 gen_op_iwmmxt_negq_M0();
1223 gen_op_iwmmxt_andq_M0_wRn(rd1);
1224 gen_op_iwmmxt_setpsr_nz();
1225 gen_op_iwmmxt_movq_wRn_M0(wrd);
1226 gen_op_iwmmxt_set_mup();
1227 gen_op_iwmmxt_set_cup();
1228 break;
1229 case 0x200: /* WAND */
1230 wrd = (insn >> 12) & 0xf;
1231 rd0 = (insn >> 0) & 0xf;
1232 rd1 = (insn >> 16) & 0xf;
1233 gen_op_iwmmxt_movq_M0_wRn(rd0);
1234 gen_op_iwmmxt_andq_M0_wRn(rd1);
1235 gen_op_iwmmxt_setpsr_nz();
1236 gen_op_iwmmxt_movq_wRn_M0(wrd);
1237 gen_op_iwmmxt_set_mup();
1238 gen_op_iwmmxt_set_cup();
1239 break;
1240 case 0x810: case 0xa10: /* WMADD */
1241 wrd = (insn >> 12) & 0xf;
1242 rd0 = (insn >> 0) & 0xf;
1243 rd1 = (insn >> 16) & 0xf;
1244 gen_op_iwmmxt_movq_M0_wRn(rd0);
1245 if (insn & (1 << 21))
1246 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
1247 else
1248 gen_op_iwmmxt_madduq_M0_wRn(rd1);
1249 gen_op_iwmmxt_movq_wRn_M0(wrd);
1250 gen_op_iwmmxt_set_mup();
1251 break;
1252 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
1253 wrd = (insn >> 12) & 0xf;
1254 rd0 = (insn >> 16) & 0xf;
1255 rd1 = (insn >> 0) & 0xf;
1256 gen_op_iwmmxt_movq_M0_wRn(rd0);
1257 switch ((insn >> 22) & 3) {
1258 case 0:
1259 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
1260 break;
1261 case 1:
1262 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
1263 break;
1264 case 2:
1265 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
1266 break;
1267 case 3:
1268 return 1;
1269 }
1270 gen_op_iwmmxt_movq_wRn_M0(wrd);
1271 gen_op_iwmmxt_set_mup();
1272 gen_op_iwmmxt_set_cup();
1273 break;
1274 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
1275 wrd = (insn >> 12) & 0xf;
1276 rd0 = (insn >> 16) & 0xf;
1277 rd1 = (insn >> 0) & 0xf;
1278 gen_op_iwmmxt_movq_M0_wRn(rd0);
1279 switch ((insn >> 22) & 3) {
1280 case 0:
1281 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
1282 break;
1283 case 1:
1284 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
1285 break;
1286 case 2:
1287 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
1288 break;
1289 case 3:
1290 return 1;
1291 }
1292 gen_op_iwmmxt_movq_wRn_M0(wrd);
1293 gen_op_iwmmxt_set_mup();
1294 gen_op_iwmmxt_set_cup();
1295 break;
1296 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
1297 wrd = (insn >> 12) & 0xf;
1298 rd0 = (insn >> 16) & 0xf;
1299 rd1 = (insn >> 0) & 0xf;
1300 gen_op_iwmmxt_movq_M0_wRn(rd0);
1301 if (insn & (1 << 22))
1302 gen_op_iwmmxt_sadw_M0_wRn(rd1);
1303 else
1304 gen_op_iwmmxt_sadb_M0_wRn(rd1);
1305 if (!(insn & (1 << 20)))
1306 gen_op_iwmmxt_addl_M0_wRn(wrd);
1307 gen_op_iwmmxt_movq_wRn_M0(wrd);
1308 gen_op_iwmmxt_set_mup();
1309 break;
1310 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
1311 wrd = (insn >> 12) & 0xf;
1312 rd0 = (insn >> 16) & 0xf;
1313 rd1 = (insn >> 0) & 0xf;
1314 gen_op_iwmmxt_movq_M0_wRn(rd0);
1315 if (insn & (1 << 21))
1316 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1317 else
1318 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
1319 gen_op_iwmmxt_movq_wRn_M0(wrd);
1320 gen_op_iwmmxt_set_mup();
1321 break;
1322 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
1323 wrd = (insn >> 12) & 0xf;
1324 rd0 = (insn >> 16) & 0xf;
1325 rd1 = (insn >> 0) & 0xf;
1326 gen_op_iwmmxt_movq_M0_wRn(rd0);
1327 if (insn & (1 << 21))
1328 gen_op_iwmmxt_macsw_M0_wRn(rd1);
1329 else
1330 gen_op_iwmmxt_macuw_M0_wRn(rd1);
1331 if (!(insn & (1 << 20))) {
1332 if (insn & (1 << 21))
1333 gen_op_iwmmxt_addsq_M0_wRn(wrd);
1334 else
1335 gen_op_iwmmxt_adduq_M0_wRn(wrd);
1336 }
1337 gen_op_iwmmxt_movq_wRn_M0(wrd);
1338 gen_op_iwmmxt_set_mup();
1339 break;
1340 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
1341 wrd = (insn >> 12) & 0xf;
1342 rd0 = (insn >> 16) & 0xf;
1343 rd1 = (insn >> 0) & 0xf;
1344 gen_op_iwmmxt_movq_M0_wRn(rd0);
1345 switch ((insn >> 22) & 3) {
1346 case 0:
1347 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
1348 break;
1349 case 1:
1350 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
1351 break;
1352 case 2:
1353 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
1354 break;
1355 case 3:
1356 return 1;
1357 }
1358 gen_op_iwmmxt_movq_wRn_M0(wrd);
1359 gen_op_iwmmxt_set_mup();
1360 gen_op_iwmmxt_set_cup();
1361 break;
1362 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
1363 wrd = (insn >> 12) & 0xf;
1364 rd0 = (insn >> 16) & 0xf;
1365 rd1 = (insn >> 0) & 0xf;
1366 gen_op_iwmmxt_movq_M0_wRn(rd0);
1367 if (insn & (1 << 22))
1368 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
1369 else
1370 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
1371 gen_op_iwmmxt_movq_wRn_M0(wrd);
1372 gen_op_iwmmxt_set_mup();
1373 gen_op_iwmmxt_set_cup();
1374 break;
1375 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
1376 wrd = (insn >> 12) & 0xf;
1377 rd0 = (insn >> 16) & 0xf;
1378 rd1 = (insn >> 0) & 0xf;
1379 gen_op_iwmmxt_movq_M0_wRn(rd0);
1380 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
1381 gen_op_movl_T1_im(7);
1382 gen_op_andl_T0_T1();
1383 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1384 gen_op_iwmmxt_movq_wRn_M0(wrd);
1385 gen_op_iwmmxt_set_mup();
1386 break;
1387 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
1388 rd = (insn >> 12) & 0xf;
1389 wrd = (insn >> 16) & 0xf;
1390 gen_movl_T0_reg(s, rd);
1391 gen_op_iwmmxt_movq_M0_wRn(wrd);
1392 switch ((insn >> 6) & 3) {
1393 case 0:
1394 gen_op_movl_T1_im(0xff);
1395 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
1396 break;
1397 case 1:
1398 gen_op_movl_T1_im(0xffff);
1399 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
1400 break;
1401 case 2:
1402 gen_op_movl_T1_im(0xffffffff);
1403 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
1404 break;
1405 case 3:
1406 return 1;
1407 }
1408 gen_op_iwmmxt_movq_wRn_M0(wrd);
1409 gen_op_iwmmxt_set_mup();
1410 break;
1411 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
1412 rd = (insn >> 12) & 0xf;
1413 wrd = (insn >> 16) & 0xf;
1414 if (rd == 15)
1415 return 1;
1416 gen_op_iwmmxt_movq_M0_wRn(wrd);
1417 switch ((insn >> 22) & 3) {
1418 case 0:
1419 if (insn & 8)
1420 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
1421 else {
1422 gen_op_movl_T1_im(0xff);
1423 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
1424 }
1425 break;
1426 case 1:
1427 if (insn & 8)
1428 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
1429 else {
1430 gen_op_movl_T1_im(0xffff);
1431 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
1432 }
1433 break;
1434 case 2:
1435 gen_op_movl_T1_im(0xffffffff);
1436 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
1437 break;
1438 case 3:
1439 return 1;
1440 }
b26eefb6 1441 gen_movl_reg_T0(s, rd);
18c9b560
AZ
1442 break;
1443 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1444 if ((insn & 0x000ff008) != 0x0003f000)
1445 return 1;
1446 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1447 switch ((insn >> 22) & 3) {
1448 case 0:
1449 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1450 break;
1451 case 1:
1452 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1453 break;
1454 case 2:
1455 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1456 break;
1457 case 3:
1458 return 1;
1459 }
1460 gen_op_shll_T1_im(28);
d9ba4830 1461 gen_set_nzcv(cpu_T[1]);
18c9b560
AZ
1462 break;
1463 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1464 rd = (insn >> 12) & 0xf;
1465 wrd = (insn >> 16) & 0xf;
1466 gen_movl_T0_reg(s, rd);
1467 switch ((insn >> 6) & 3) {
1468 case 0:
1469 gen_op_iwmmxt_bcstb_M0_T0();
1470 break;
1471 case 1:
1472 gen_op_iwmmxt_bcstw_M0_T0();
1473 break;
1474 case 2:
1475 gen_op_iwmmxt_bcstl_M0_T0();
1476 break;
1477 case 3:
1478 return 1;
1479 }
1480 gen_op_iwmmxt_movq_wRn_M0(wrd);
1481 gen_op_iwmmxt_set_mup();
1482 break;
1483 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1484 if ((insn & 0x000ff00f) != 0x0003f000)
1485 return 1;
1486 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1487 switch ((insn >> 22) & 3) {
1488 case 0:
1489 for (i = 0; i < 7; i ++) {
1490 gen_op_shll_T1_im(4);
1491 gen_op_andl_T0_T1();
1492 }
1493 break;
1494 case 1:
1495 for (i = 0; i < 3; i ++) {
1496 gen_op_shll_T1_im(8);
1497 gen_op_andl_T0_T1();
1498 }
1499 break;
1500 case 2:
1501 gen_op_shll_T1_im(16);
1502 gen_op_andl_T0_T1();
1503 break;
1504 case 3:
1505 return 1;
1506 }
d9ba4830 1507 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1508 break;
1509 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1510 wrd = (insn >> 12) & 0xf;
1511 rd0 = (insn >> 16) & 0xf;
1512 gen_op_iwmmxt_movq_M0_wRn(rd0);
1513 switch ((insn >> 22) & 3) {
1514 case 0:
1515 gen_op_iwmmxt_addcb_M0();
1516 break;
1517 case 1:
1518 gen_op_iwmmxt_addcw_M0();
1519 break;
1520 case 2:
1521 gen_op_iwmmxt_addcl_M0();
1522 break;
1523 case 3:
1524 return 1;
1525 }
1526 gen_op_iwmmxt_movq_wRn_M0(wrd);
1527 gen_op_iwmmxt_set_mup();
1528 break;
1529 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1530 if ((insn & 0x000ff00f) != 0x0003f000)
1531 return 1;
1532 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1533 switch ((insn >> 22) & 3) {
1534 case 0:
1535 for (i = 0; i < 7; i ++) {
1536 gen_op_shll_T1_im(4);
1537 gen_op_orl_T0_T1();
1538 }
1539 break;
1540 case 1:
1541 for (i = 0; i < 3; i ++) {
1542 gen_op_shll_T1_im(8);
1543 gen_op_orl_T0_T1();
1544 }
1545 break;
1546 case 2:
1547 gen_op_shll_T1_im(16);
1548 gen_op_orl_T0_T1();
1549 break;
1550 case 3:
1551 return 1;
1552 }
d9ba4830 1553 gen_set_nzcv(cpu_T[0]);
18c9b560
AZ
1554 break;
1555 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1556 rd = (insn >> 12) & 0xf;
1557 rd0 = (insn >> 16) & 0xf;
1558 if ((insn & 0xf) != 0)
1559 return 1;
1560 gen_op_iwmmxt_movq_M0_wRn(rd0);
1561 switch ((insn >> 22) & 3) {
1562 case 0:
1563 gen_op_iwmmxt_msbb_T0_M0();
1564 break;
1565 case 1:
1566 gen_op_iwmmxt_msbw_T0_M0();
1567 break;
1568 case 2:
1569 gen_op_iwmmxt_msbl_T0_M0();
1570 break;
1571 case 3:
1572 return 1;
1573 }
1574 gen_movl_reg_T0(s, rd);
1575 break;
1576 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1577 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1578 wrd = (insn >> 12) & 0xf;
1579 rd0 = (insn >> 16) & 0xf;
1580 rd1 = (insn >> 0) & 0xf;
1581 gen_op_iwmmxt_movq_M0_wRn(rd0);
1582 switch ((insn >> 22) & 3) {
1583 case 0:
1584 if (insn & (1 << 21))
1585 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1586 else
1587 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1588 break;
1589 case 1:
1590 if (insn & (1 << 21))
1591 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1592 else
1593 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1594 break;
1595 case 2:
1596 if (insn & (1 << 21))
1597 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1598 else
1599 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1600 break;
1601 case 3:
1602 return 1;
1603 }
1604 gen_op_iwmmxt_movq_wRn_M0(wrd);
1605 gen_op_iwmmxt_set_mup();
1606 gen_op_iwmmxt_set_cup();
1607 break;
1608 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1609 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1610 wrd = (insn >> 12) & 0xf;
1611 rd0 = (insn >> 16) & 0xf;
1612 gen_op_iwmmxt_movq_M0_wRn(rd0);
1613 switch ((insn >> 22) & 3) {
1614 case 0:
1615 if (insn & (1 << 21))
1616 gen_op_iwmmxt_unpacklsb_M0();
1617 else
1618 gen_op_iwmmxt_unpacklub_M0();
1619 break;
1620 case 1:
1621 if (insn & (1 << 21))
1622 gen_op_iwmmxt_unpacklsw_M0();
1623 else
1624 gen_op_iwmmxt_unpackluw_M0();
1625 break;
1626 case 2:
1627 if (insn & (1 << 21))
1628 gen_op_iwmmxt_unpacklsl_M0();
1629 else
1630 gen_op_iwmmxt_unpacklul_M0();
1631 break;
1632 case 3:
1633 return 1;
1634 }
1635 gen_op_iwmmxt_movq_wRn_M0(wrd);
1636 gen_op_iwmmxt_set_mup();
1637 gen_op_iwmmxt_set_cup();
1638 break;
1639 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1640 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1641 wrd = (insn >> 12) & 0xf;
1642 rd0 = (insn >> 16) & 0xf;
1643 gen_op_iwmmxt_movq_M0_wRn(rd0);
1644 switch ((insn >> 22) & 3) {
1645 case 0:
1646 if (insn & (1 << 21))
1647 gen_op_iwmmxt_unpackhsb_M0();
1648 else
1649 gen_op_iwmmxt_unpackhub_M0();
1650 break;
1651 case 1:
1652 if (insn & (1 << 21))
1653 gen_op_iwmmxt_unpackhsw_M0();
1654 else
1655 gen_op_iwmmxt_unpackhuw_M0();
1656 break;
1657 case 2:
1658 if (insn & (1 << 21))
1659 gen_op_iwmmxt_unpackhsl_M0();
1660 else
1661 gen_op_iwmmxt_unpackhul_M0();
1662 break;
1663 case 3:
1664 return 1;
1665 }
1666 gen_op_iwmmxt_movq_wRn_M0(wrd);
1667 gen_op_iwmmxt_set_mup();
1668 gen_op_iwmmxt_set_cup();
1669 break;
1670 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1671 case 0x214: case 0x614: case 0xa14: case 0xe14:
1672 wrd = (insn >> 12) & 0xf;
1673 rd0 = (insn >> 16) & 0xf;
1674 gen_op_iwmmxt_movq_M0_wRn(rd0);
1675 if (gen_iwmmxt_shift(insn, 0xff))
1676 return 1;
1677 switch ((insn >> 22) & 3) {
1678 case 0:
1679 return 1;
1680 case 1:
1681 gen_op_iwmmxt_srlw_M0_T0();
1682 break;
1683 case 2:
1684 gen_op_iwmmxt_srll_M0_T0();
1685 break;
1686 case 3:
1687 gen_op_iwmmxt_srlq_M0_T0();
1688 break;
1689 }
1690 gen_op_iwmmxt_movq_wRn_M0(wrd);
1691 gen_op_iwmmxt_set_mup();
1692 gen_op_iwmmxt_set_cup();
1693 break;
1694 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1695 case 0x014: case 0x414: case 0x814: case 0xc14:
1696 wrd = (insn >> 12) & 0xf;
1697 rd0 = (insn >> 16) & 0xf;
1698 gen_op_iwmmxt_movq_M0_wRn(rd0);
1699 if (gen_iwmmxt_shift(insn, 0xff))
1700 return 1;
1701 switch ((insn >> 22) & 3) {
1702 case 0:
1703 return 1;
1704 case 1:
1705 gen_op_iwmmxt_sraw_M0_T0();
1706 break;
1707 case 2:
1708 gen_op_iwmmxt_sral_M0_T0();
1709 break;
1710 case 3:
1711 gen_op_iwmmxt_sraq_M0_T0();
1712 break;
1713 }
1714 gen_op_iwmmxt_movq_wRn_M0(wrd);
1715 gen_op_iwmmxt_set_mup();
1716 gen_op_iwmmxt_set_cup();
1717 break;
1718 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1719 case 0x114: case 0x514: case 0x914: case 0xd14:
1720 wrd = (insn >> 12) & 0xf;
1721 rd0 = (insn >> 16) & 0xf;
1722 gen_op_iwmmxt_movq_M0_wRn(rd0);
1723 if (gen_iwmmxt_shift(insn, 0xff))
1724 return 1;
1725 switch ((insn >> 22) & 3) {
1726 case 0:
1727 return 1;
1728 case 1:
1729 gen_op_iwmmxt_sllw_M0_T0();
1730 break;
1731 case 2:
1732 gen_op_iwmmxt_slll_M0_T0();
1733 break;
1734 case 3:
1735 gen_op_iwmmxt_sllq_M0_T0();
1736 break;
1737 }
1738 gen_op_iwmmxt_movq_wRn_M0(wrd);
1739 gen_op_iwmmxt_set_mup();
1740 gen_op_iwmmxt_set_cup();
1741 break;
1742 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1743 case 0x314: case 0x714: case 0xb14: case 0xf14:
1744 wrd = (insn >> 12) & 0xf;
1745 rd0 = (insn >> 16) & 0xf;
1746 gen_op_iwmmxt_movq_M0_wRn(rd0);
1747 switch ((insn >> 22) & 3) {
1748 case 0:
1749 return 1;
1750 case 1:
1751 if (gen_iwmmxt_shift(insn, 0xf))
1752 return 1;
1753 gen_op_iwmmxt_rorw_M0_T0();
1754 break;
1755 case 2:
1756 if (gen_iwmmxt_shift(insn, 0x1f))
1757 return 1;
1758 gen_op_iwmmxt_rorl_M0_T0();
1759 break;
1760 case 3:
1761 if (gen_iwmmxt_shift(insn, 0x3f))
1762 return 1;
1763 gen_op_iwmmxt_rorq_M0_T0();
1764 break;
1765 }
1766 gen_op_iwmmxt_movq_wRn_M0(wrd);
1767 gen_op_iwmmxt_set_mup();
1768 gen_op_iwmmxt_set_cup();
1769 break;
1770 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1771 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1772 wrd = (insn >> 12) & 0xf;
1773 rd0 = (insn >> 16) & 0xf;
1774 rd1 = (insn >> 0) & 0xf;
1775 gen_op_iwmmxt_movq_M0_wRn(rd0);
1776 switch ((insn >> 22) & 3) {
1777 case 0:
1778 if (insn & (1 << 21))
1779 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1780 else
1781 gen_op_iwmmxt_minub_M0_wRn(rd1);
1782 break;
1783 case 1:
1784 if (insn & (1 << 21))
1785 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1786 else
1787 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1788 break;
1789 case 2:
1790 if (insn & (1 << 21))
1791 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1792 else
1793 gen_op_iwmmxt_minul_M0_wRn(rd1);
1794 break;
1795 case 3:
1796 return 1;
1797 }
1798 gen_op_iwmmxt_movq_wRn_M0(wrd);
1799 gen_op_iwmmxt_set_mup();
1800 break;
1801 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1802 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1803 wrd = (insn >> 12) & 0xf;
1804 rd0 = (insn >> 16) & 0xf;
1805 rd1 = (insn >> 0) & 0xf;
1806 gen_op_iwmmxt_movq_M0_wRn(rd0);
1807 switch ((insn >> 22) & 3) {
1808 case 0:
1809 if (insn & (1 << 21))
1810 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1811 else
1812 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1813 break;
1814 case 1:
1815 if (insn & (1 << 21))
1816 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1817 else
1818 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1819 break;
1820 case 2:
1821 if (insn & (1 << 21))
1822 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1823 else
1824 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1825 break;
1826 case 3:
1827 return 1;
1828 }
1829 gen_op_iwmmxt_movq_wRn_M0(wrd);
1830 gen_op_iwmmxt_set_mup();
1831 break;
1832 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1833 case 0x402: case 0x502: case 0x602: case 0x702:
1834 wrd = (insn >> 12) & 0xf;
1835 rd0 = (insn >> 16) & 0xf;
1836 rd1 = (insn >> 0) & 0xf;
1837 gen_op_iwmmxt_movq_M0_wRn(rd0);
1838 gen_op_movl_T0_im((insn >> 20) & 3);
1839 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1840 gen_op_iwmmxt_movq_wRn_M0(wrd);
1841 gen_op_iwmmxt_set_mup();
1842 break;
1843 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1844 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1845 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1846 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1847 wrd = (insn >> 12) & 0xf;
1848 rd0 = (insn >> 16) & 0xf;
1849 rd1 = (insn >> 0) & 0xf;
1850 gen_op_iwmmxt_movq_M0_wRn(rd0);
1851 switch ((insn >> 20) & 0xf) {
1852 case 0x0:
1853 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1854 break;
1855 case 0x1:
1856 gen_op_iwmmxt_subub_M0_wRn(rd1);
1857 break;
1858 case 0x3:
1859 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1860 break;
1861 case 0x4:
1862 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1863 break;
1864 case 0x5:
1865 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1866 break;
1867 case 0x7:
1868 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1869 break;
1870 case 0x8:
1871 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1872 break;
1873 case 0x9:
1874 gen_op_iwmmxt_subul_M0_wRn(rd1);
1875 break;
1876 case 0xb:
1877 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1878 break;
1879 default:
1880 return 1;
1881 }
1882 gen_op_iwmmxt_movq_wRn_M0(wrd);
1883 gen_op_iwmmxt_set_mup();
1884 gen_op_iwmmxt_set_cup();
1885 break;
1886 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1887 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1888 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1889 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1890 wrd = (insn >> 12) & 0xf;
1891 rd0 = (insn >> 16) & 0xf;
1892 gen_op_iwmmxt_movq_M0_wRn(rd0);
1893 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1894 gen_op_iwmmxt_shufh_M0_T0();
1895 gen_op_iwmmxt_movq_wRn_M0(wrd);
1896 gen_op_iwmmxt_set_mup();
1897 gen_op_iwmmxt_set_cup();
1898 break;
1899 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1900 case 0x418: case 0x518: case 0x618: case 0x718:
1901 case 0x818: case 0x918: case 0xa18: case 0xb18:
1902 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1903 wrd = (insn >> 12) & 0xf;
1904 rd0 = (insn >> 16) & 0xf;
1905 rd1 = (insn >> 0) & 0xf;
1906 gen_op_iwmmxt_movq_M0_wRn(rd0);
1907 switch ((insn >> 20) & 0xf) {
1908 case 0x0:
1909 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1910 break;
1911 case 0x1:
1912 gen_op_iwmmxt_addub_M0_wRn(rd1);
1913 break;
1914 case 0x3:
1915 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1916 break;
1917 case 0x4:
1918 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1919 break;
1920 case 0x5:
1921 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1922 break;
1923 case 0x7:
1924 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1925 break;
1926 case 0x8:
1927 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1928 break;
1929 case 0x9:
1930 gen_op_iwmmxt_addul_M0_wRn(rd1);
1931 break;
1932 case 0xb:
1933 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1934 break;
1935 default:
1936 return 1;
1937 }
1938 gen_op_iwmmxt_movq_wRn_M0(wrd);
1939 gen_op_iwmmxt_set_mup();
1940 gen_op_iwmmxt_set_cup();
1941 break;
1942 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1943 case 0x408: case 0x508: case 0x608: case 0x708:
1944 case 0x808: case 0x908: case 0xa08: case 0xb08:
1945 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1946 wrd = (insn >> 12) & 0xf;
1947 rd0 = (insn >> 16) & 0xf;
1948 rd1 = (insn >> 0) & 0xf;
1949 gen_op_iwmmxt_movq_M0_wRn(rd0);
1950 if (!(insn & (1 << 20)))
1951 return 1;
1952 switch ((insn >> 22) & 3) {
1953 case 0:
1954 return 1;
1955 case 1:
1956 if (insn & (1 << 21))
1957 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1958 else
1959 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1960 break;
1961 case 2:
1962 if (insn & (1 << 21))
1963 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1964 else
1965 gen_op_iwmmxt_packul_M0_wRn(rd1);
1966 break;
1967 case 3:
1968 if (insn & (1 << 21))
1969 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1970 else
1971 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1972 break;
1973 }
1974 gen_op_iwmmxt_movq_wRn_M0(wrd);
1975 gen_op_iwmmxt_set_mup();
1976 gen_op_iwmmxt_set_cup();
1977 break;
1978 case 0x201: case 0x203: case 0x205: case 0x207:
1979 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1980 case 0x211: case 0x213: case 0x215: case 0x217:
1981 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1982 wrd = (insn >> 5) & 0xf;
1983 rd0 = (insn >> 12) & 0xf;
1984 rd1 = (insn >> 0) & 0xf;
1985 if (rd0 == 0xf || rd1 == 0xf)
1986 return 1;
1987 gen_op_iwmmxt_movq_M0_wRn(wrd);
1988 switch ((insn >> 16) & 0xf) {
1989 case 0x0: /* TMIA */
b26eefb6
PB
1990 gen_movl_T0_reg(s, rd0);
1991 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
1992 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1993 break;
1994 case 0x8: /* TMIAPH */
b26eefb6
PB
1995 gen_movl_T0_reg(s, rd0);
1996 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
1997 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1998 break;
1999 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
b26eefb6 2000 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2001 if (insn & (1 << 16))
2002 gen_op_shrl_T1_im(16);
2003 gen_op_movl_T0_T1();
b26eefb6 2004 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2005 if (insn & (1 << 17))
2006 gen_op_shrl_T1_im(16);
2007 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2008 break;
2009 default:
2010 return 1;
2011 }
2012 gen_op_iwmmxt_movq_wRn_M0(wrd);
2013 gen_op_iwmmxt_set_mup();
2014 break;
2015 default:
2016 return 1;
2017 }
2018
2019 return 0;
2020}
2021
2022/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
2023 (ie. an undefined instruction). */
2024static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2025{
2026 int acc, rd0, rd1, rdhi, rdlo;
2027
2028 if ((insn & 0x0ff00f10) == 0x0e200010) {
2029 /* Multiply with Internal Accumulate Format */
2030 rd0 = (insn >> 12) & 0xf;
2031 rd1 = insn & 0xf;
2032 acc = (insn >> 5) & 7;
2033
2034 if (acc != 0)
2035 return 1;
2036
2037 switch ((insn >> 16) & 0xf) {
2038 case 0x0: /* MIA */
b26eefb6
PB
2039 gen_movl_T0_reg(s, rd0);
2040 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2041 gen_op_iwmmxt_muladdsl_M0_T0_T1();
2042 break;
2043 case 0x8: /* MIAPH */
b26eefb6
PB
2044 gen_movl_T0_reg(s, rd0);
2045 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2046 gen_op_iwmmxt_muladdsw_M0_T0_T1();
2047 break;
2048 case 0xc: /* MIABB */
2049 case 0xd: /* MIABT */
2050 case 0xe: /* MIATB */
2051 case 0xf: /* MIATT */
b26eefb6 2052 gen_movl_T1_reg(s, rd0);
18c9b560
AZ
2053 if (insn & (1 << 16))
2054 gen_op_shrl_T1_im(16);
2055 gen_op_movl_T0_T1();
b26eefb6 2056 gen_movl_T1_reg(s, rd1);
18c9b560
AZ
2057 if (insn & (1 << 17))
2058 gen_op_shrl_T1_im(16);
2059 gen_op_iwmmxt_muladdswl_M0_T0_T1();
2060 break;
2061 default:
2062 return 1;
2063 }
2064
2065 gen_op_iwmmxt_movq_wRn_M0(acc);
2066 return 0;
2067 }
2068
2069 if ((insn & 0x0fe00ff8) == 0x0c400000) {
2070 /* Internal Accumulator Access Format */
2071 rdhi = (insn >> 16) & 0xf;
2072 rdlo = (insn >> 12) & 0xf;
2073 acc = insn & 7;
2074
2075 if (acc != 0)
2076 return 1;
2077
2078 if (insn & ARM_CP_RW_BIT) { /* MRA */
2079 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
b26eefb6 2080 gen_movl_reg_T0(s, rdlo);
18c9b560
AZ
2081 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
2082 gen_op_andl_T0_T1();
b26eefb6 2083 gen_movl_reg_T0(s, rdhi);
18c9b560 2084 } else { /* MAR */
b26eefb6
PB
2085 gen_movl_T0_reg(s, rdlo);
2086 gen_movl_T1_reg(s, rdhi);
18c9b560
AZ
2087 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
2088 }
2089 return 0;
2090 }
2091
2092 return 1;
2093}
2094
c1713132
AZ
2095/* Disassemble system coprocessor instruction. Return nonzero if
2096 instruction is not defined. */
2097static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
2098{
2099 uint32_t rd = (insn >> 12) & 0xf;
2100 uint32_t cp = (insn >> 8) & 0xf;
2101 if (IS_USER(s)) {
2102 return 1;
2103 }
2104
18c9b560 2105 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
2106 if (!env->cp[cp].cp_read)
2107 return 1;
2108 gen_op_movl_T0_im((uint32_t) s->pc);
b26eefb6 2109 gen_set_pc_T0();
c1713132
AZ
2110 gen_op_movl_T0_cp(insn);
2111 gen_movl_reg_T0(s, rd);
2112 } else {
2113 if (!env->cp[cp].cp_write)
2114 return 1;
2115 gen_op_movl_T0_im((uint32_t) s->pc);
b26eefb6 2116 gen_set_pc_T0();
c1713132
AZ
2117 gen_movl_T0_reg(s, rd);
2118 gen_op_movl_cp_T0(insn);
2119 }
2120 return 0;
2121}
2122
9ee6e8bb
PB
2123static int cp15_user_ok(uint32_t insn)
2124{
2125 int cpn = (insn >> 16) & 0xf;
2126 int cpm = insn & 0xf;
2127 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
2128
2129 if (cpn == 13 && cpm == 0) {
2130 /* TLS register. */
2131 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
2132 return 1;
2133 }
2134 if (cpn == 7) {
2135 /* ISB, DSB, DMB. */
2136 if ((cpm == 5 && op == 4)
2137 || (cpm == 10 && (op == 4 || op == 5)))
2138 return 1;
2139 }
2140 return 0;
2141}
2142
b5ff1b31
FB
2143/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
2144 instruction is not defined. */
a90b7318 2145static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
2146{
2147 uint32_t rd;
2148
9ee6e8bb
PB
2149 /* M profile cores use memory mapped registers instead of cp15. */
2150 if (arm_feature(env, ARM_FEATURE_M))
2151 return 1;
2152
2153 if ((insn & (1 << 25)) == 0) {
2154 if (insn & (1 << 20)) {
2155 /* mrrc */
2156 return 1;
2157 }
2158 /* mcrr. Used for block cache operations, so implement as no-op. */
2159 return 0;
2160 }
2161 if ((insn & (1 << 4)) == 0) {
2162 /* cdp */
2163 return 1;
2164 }
2165 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
2166 return 1;
2167 }
9332f9da
FB
2168 if ((insn & 0x0fff0fff) == 0x0e070f90
2169 || (insn & 0x0fff0fff) == 0x0e070f58) {
2170 /* Wait for interrupt. */
2171 gen_op_movl_T0_im((long)s->pc);
b26eefb6 2172 gen_set_pc_T0();
9ee6e8bb 2173 s->is_jmp = DISAS_WFI;
9332f9da
FB
2174 return 0;
2175 }
b5ff1b31 2176 rd = (insn >> 12) & 0xf;
18c9b560 2177 if (insn & ARM_CP_RW_BIT) {
b5ff1b31
FB
2178 gen_op_movl_T0_cp15(insn);
2179 /* If the destination register is r15 then sets condition codes. */
2180 if (rd != 15)
2181 gen_movl_reg_T0(s, rd);
2182 } else {
2183 gen_movl_T0_reg(s, rd);
2184 gen_op_movl_cp15_T0(insn);
a90b7318
AZ
2185 /* Normally we would always end the TB here, but Linux
2186 * arch/arm/mach-pxa/sleep.S expects two instructions following
2187 * an MMU enable to execute from cache. Imitate this behaviour. */
2188 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
2189 (insn & 0x0fff0fff) != 0x0e010f10)
2190 gen_lookup_tb(s);
b5ff1b31 2191 }
b5ff1b31
FB
2192 return 0;
2193}
2194
9ee6e8bb
PB
2195#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
2196#define VFP_SREG(insn, bigbit, smallbit) \
2197 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
2198#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
2199 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
2200 reg = (((insn) >> (bigbit)) & 0x0f) \
2201 | (((insn) >> ((smallbit) - 4)) & 0x10); \
2202 } else { \
2203 if (insn & (1 << (smallbit))) \
2204 return 1; \
2205 reg = ((insn) >> (bigbit)) & 0x0f; \
2206 }} while (0)
2207
2208#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
2209#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
2210#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
2211#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
2212#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
2213#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
2214
2215static inline int
2216vfp_enabled(CPUState * env)
2217{
2218 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
2219}
2220
b7bcbe95
FB
2221/* Disassemble a VFP instruction. Returns nonzero if an error occured
2222 (ie. an undefined instruction). */
2223static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
2224{
2225 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
2226 int dp, veclen;
2227
40f137e1
PB
2228 if (!arm_feature(env, ARM_FEATURE_VFP))
2229 return 1;
2230
9ee6e8bb
PB
2231 if (!vfp_enabled(env)) {
2232 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
2233 if ((insn & 0x0fe00fff) != 0x0ee00a10)
2234 return 1;
2235 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
2236 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
2237 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
2238 return 1;
2239 }
b7bcbe95
FB
2240 dp = ((insn & 0xf00) == 0xb00);
2241 switch ((insn >> 24) & 0xf) {
2242 case 0xe:
2243 if (insn & (1 << 4)) {
2244 /* single register transfer */
b7bcbe95
FB
2245 rd = (insn >> 12) & 0xf;
2246 if (dp) {
9ee6e8bb
PB
2247 int size;
2248 int pass;
2249
2250 VFP_DREG_N(rn, insn);
2251 if (insn & 0xf)
b7bcbe95 2252 return 1;
9ee6e8bb
PB
2253 if (insn & 0x00c00060
2254 && !arm_feature(env, ARM_FEATURE_NEON))
2255 return 1;
2256
2257 pass = (insn >> 21) & 1;
2258 if (insn & (1 << 22)) {
2259 size = 0;
2260 offset = ((insn >> 5) & 3) * 8;
2261 } else if (insn & (1 << 5)) {
2262 size = 1;
2263 offset = (insn & (1 << 6)) ? 16 : 0;
2264 } else {
2265 size = 2;
2266 offset = 0;
2267 }
18c9b560 2268 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2269 /* vfp->arm */
9ee6e8bb
PB
2270 switch (size) {
2271 case 0:
2272 NEON_GET_REG(T1, rn, pass);
2273 if (offset)
2274 gen_op_shrl_T1_im(offset);
2275 if (insn & (1 << 23))
b26eefb6 2276 gen_uxtb(cpu_T[1]);
9ee6e8bb 2277 else
b26eefb6 2278 gen_sxtb(cpu_T[1]);
9ee6e8bb
PB
2279 break;
2280 case 1:
2281 NEON_GET_REG(T1, rn, pass);
2282 if (insn & (1 << 23)) {
2283 if (offset) {
2284 gen_op_shrl_T1_im(16);
2285 } else {
b26eefb6 2286 gen_uxth(cpu_T[1]);
9ee6e8bb
PB
2287 }
2288 } else {
2289 if (offset) {
2290 gen_op_sarl_T1_im(16);
2291 } else {
b26eefb6 2292 gen_sxth(cpu_T[1]);
9ee6e8bb
PB
2293 }
2294 }
2295 break;
2296 case 2:
2297 NEON_GET_REG(T1, rn, pass);
2298 break;
2299 }
2300 gen_movl_reg_T1(s, rd);
b7bcbe95
FB
2301 } else {
2302 /* arm->vfp */
9ee6e8bb
PB
2303 gen_movl_T0_reg(s, rd);
2304 if (insn & (1 << 23)) {
2305 /* VDUP */
2306 if (size == 0) {
2307 gen_op_neon_dup_u8(0);
2308 } else if (size == 1) {
2309 gen_op_neon_dup_low16();
2310 }
2311 NEON_SET_REG(T0, rn, 0);
2312 NEON_SET_REG(T0, rn, 1);
2313 } else {
2314 /* VMOV */
2315 switch (size) {
2316 case 0:
2317 NEON_GET_REG(T2, rn, pass);
2318 gen_op_movl_T1_im(0xff);
2319 gen_op_andl_T0_T1();
2320 gen_op_neon_insert_elt(offset, ~(0xff << offset));
2321 NEON_SET_REG(T2, rn, pass);
2322 break;
2323 case 1:
2324 NEON_GET_REG(T2, rn, pass);
2325 gen_op_movl_T1_im(0xffff);
2326 gen_op_andl_T0_T1();
2327 bank_mask = offset ? 0xffff : 0xffff0000;
2328 gen_op_neon_insert_elt(offset, bank_mask);
2329 NEON_SET_REG(T2, rn, pass);
2330 break;
2331 case 2:
2332 NEON_SET_REG(T0, rn, pass);
2333 break;
2334 }
2335 }
b7bcbe95 2336 }
9ee6e8bb
PB
2337 } else { /* !dp */
2338 if ((insn & 0x6f) != 0x00)
2339 return 1;
2340 rn = VFP_SREG_N(insn);
18c9b560 2341 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2342 /* vfp->arm */
2343 if (insn & (1 << 21)) {
2344 /* system register */
40f137e1 2345 rn >>= 1;
9ee6e8bb 2346
b7bcbe95 2347 switch (rn) {
40f137e1 2348 case ARM_VFP_FPSID:
9ee6e8bb
PB
2349 /* VFP2 allows access for FSID from userspace.
2350 VFP3 restricts all id registers to privileged
2351 accesses. */
2352 if (IS_USER(s)
2353 && arm_feature(env, ARM_FEATURE_VFP3))
2354 return 1;
2355 gen_op_vfp_movl_T0_xreg(rn);
2356 break;
40f137e1 2357 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2358 if (IS_USER(s))
2359 return 1;
2360 gen_op_vfp_movl_T0_xreg(rn);
2361 break;
40f137e1
PB
2362 case ARM_VFP_FPINST:
2363 case ARM_VFP_FPINST2:
9ee6e8bb
PB
2364 /* Not present in VFP3. */
2365 if (IS_USER(s)
2366 || arm_feature(env, ARM_FEATURE_VFP3))
2367 return 1;
40f137e1 2368 gen_op_vfp_movl_T0_xreg(rn);
b7bcbe95 2369 break;
40f137e1 2370 case ARM_VFP_FPSCR:
b7bcbe95
FB
2371 if (rd == 15)
2372 gen_op_vfp_movl_T0_fpscr_flags();
2373 else
2374 gen_op_vfp_movl_T0_fpscr();
2375 break;
9ee6e8bb
PB
2376 case ARM_VFP_MVFR0:
2377 case ARM_VFP_MVFR1:
2378 if (IS_USER(s)
2379 || !arm_feature(env, ARM_FEATURE_VFP3))
2380 return 1;
2381 gen_op_vfp_movl_T0_xreg(rn);
2382 break;
b7bcbe95
FB
2383 default:
2384 return 1;
2385 }
2386 } else {
2387 gen_mov_F0_vreg(0, rn);
2388 gen_op_vfp_mrs();
2389 }
2390 if (rd == 15) {
b5ff1b31 2391 /* Set the 4 flag bits in the CPSR. */
d9ba4830 2392 gen_set_nzcv(cpu_T[0]);
b7bcbe95
FB
2393 } else
2394 gen_movl_reg_T0(s, rd);
2395 } else {
2396 /* arm->vfp */
2397 gen_movl_T0_reg(s, rd);
2398 if (insn & (1 << 21)) {
40f137e1 2399 rn >>= 1;
b7bcbe95
FB
2400 /* system register */
2401 switch (rn) {
40f137e1 2402 case ARM_VFP_FPSID:
9ee6e8bb
PB
2403 case ARM_VFP_MVFR0:
2404 case ARM_VFP_MVFR1:
b7bcbe95
FB
2405 /* Writes are ignored. */
2406 break;
40f137e1 2407 case ARM_VFP_FPSCR:
b7bcbe95 2408 gen_op_vfp_movl_fpscr_T0();
b5ff1b31 2409 gen_lookup_tb(s);
b7bcbe95 2410 break;
40f137e1 2411 case ARM_VFP_FPEXC:
9ee6e8bb
PB
2412 if (IS_USER(s))
2413 return 1;
40f137e1
PB
2414 gen_op_vfp_movl_xreg_T0(rn);
2415 gen_lookup_tb(s);
2416 break;
2417 case ARM_VFP_FPINST:
2418 case ARM_VFP_FPINST2:
2419 gen_op_vfp_movl_xreg_T0(rn);
2420 break;
b7bcbe95
FB
2421 default:
2422 return 1;
2423 }
2424 } else {
2425 gen_op_vfp_msr();
2426 gen_mov_vreg_F0(0, rn);
2427 }
2428 }
2429 }
2430 } else {
2431 /* data processing */
2432 /* The opcode is in bits 23, 21, 20 and 6. */
2433 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
2434 if (dp) {
2435 if (op == 15) {
2436 /* rn is opcode */
2437 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
2438 } else {
2439 /* rn is register number */
9ee6e8bb 2440 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2441 }
2442
2443 if (op == 15 && (rn == 15 || rn > 17)) {
2444 /* Integer or single precision destination. */
9ee6e8bb 2445 rd = VFP_SREG_D(insn);
b7bcbe95 2446 } else {
9ee6e8bb 2447 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2448 }
2449
2450 if (op == 15 && (rn == 16 || rn == 17)) {
2451 /* Integer source. */
2452 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2453 } else {
9ee6e8bb 2454 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2455 }
2456 } else {
9ee6e8bb 2457 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2458 if (op == 15 && rn == 15) {
2459 /* Double precision destination. */
9ee6e8bb
PB
2460 VFP_DREG_D(rd, insn);
2461 } else {
2462 rd = VFP_SREG_D(insn);
2463 }
2464 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2465 }
2466
2467 veclen = env->vfp.vec_len;
2468 if (op == 15 && rn > 3)
2469 veclen = 0;
2470
2471 /* Shut up compiler warnings. */
2472 delta_m = 0;
2473 delta_d = 0;
2474 bank_mask = 0;
3b46e624 2475
b7bcbe95
FB
2476 if (veclen > 0) {
2477 if (dp)
2478 bank_mask = 0xc;
2479 else
2480 bank_mask = 0x18;
2481
2482 /* Figure out what type of vector operation this is. */
2483 if ((rd & bank_mask) == 0) {
2484 /* scalar */
2485 veclen = 0;
2486 } else {
2487 if (dp)
2488 delta_d = (env->vfp.vec_stride >> 1) + 1;
2489 else
2490 delta_d = env->vfp.vec_stride + 1;
2491
2492 if ((rm & bank_mask) == 0) {
2493 /* mixed scalar/vector */
2494 delta_m = 0;
2495 } else {
2496 /* vector */
2497 delta_m = delta_d;
2498 }
2499 }
2500 }
2501
2502 /* Load the initial operands. */
2503 if (op == 15) {
2504 switch (rn) {
2505 case 16:
2506 case 17:
2507 /* Integer source */
2508 gen_mov_F0_vreg(0, rm);
2509 break;
2510 case 8:
2511 case 9:
2512 /* Compare */
2513 gen_mov_F0_vreg(dp, rd);
2514 gen_mov_F1_vreg(dp, rm);
2515 break;
2516 case 10:
2517 case 11:
2518 /* Compare with zero */
2519 gen_mov_F0_vreg(dp, rd);
2520 gen_vfp_F1_ld0(dp);
2521 break;
9ee6e8bb
PB
2522 case 20:
2523 case 21:
2524 case 22:
2525 case 23:
2526 /* Source and destination the same. */
2527 gen_mov_F0_vreg(dp, rd);
2528 break;
b7bcbe95
FB
2529 default:
2530 /* One source operand. */
2531 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2532 break;
b7bcbe95
FB
2533 }
2534 } else {
2535 /* Two source operands. */
2536 gen_mov_F0_vreg(dp, rn);
2537 gen_mov_F1_vreg(dp, rm);
2538 }
2539
2540 for (;;) {
2541 /* Perform the calculation. */
2542 switch (op) {
2543 case 0: /* mac: fd + (fn * fm) */
2544 gen_vfp_mul(dp);
2545 gen_mov_F1_vreg(dp, rd);
2546 gen_vfp_add(dp);
2547 break;
2548 case 1: /* nmac: fd - (fn * fm) */
2549 gen_vfp_mul(dp);
2550 gen_vfp_neg(dp);
2551 gen_mov_F1_vreg(dp, rd);
2552 gen_vfp_add(dp);
2553 break;
2554 case 2: /* msc: -fd + (fn * fm) */
2555 gen_vfp_mul(dp);
2556 gen_mov_F1_vreg(dp, rd);
2557 gen_vfp_sub(dp);
2558 break;
2559 case 3: /* nmsc: -fd - (fn * fm) */
2560 gen_vfp_mul(dp);
2561 gen_mov_F1_vreg(dp, rd);
2562 gen_vfp_add(dp);
2563 gen_vfp_neg(dp);
2564 break;
2565 case 4: /* mul: fn * fm */
2566 gen_vfp_mul(dp);
2567 break;
2568 case 5: /* nmul: -(fn * fm) */
2569 gen_vfp_mul(dp);
2570 gen_vfp_neg(dp);
2571 break;
2572 case 6: /* add: fn + fm */
2573 gen_vfp_add(dp);
2574 break;
2575 case 7: /* sub: fn - fm */
2576 gen_vfp_sub(dp);
2577 break;
2578 case 8: /* div: fn / fm */
2579 gen_vfp_div(dp);
2580 break;
9ee6e8bb
PB
2581 case 14: /* fconst */
2582 if (!arm_feature(env, ARM_FEATURE_VFP3))
2583 return 1;
2584
2585 n = (insn << 12) & 0x80000000;
2586 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2587 if (dp) {
2588 if (i & 0x40)
2589 i |= 0x3f80;
2590 else
2591 i |= 0x4000;
2592 n |= i << 16;
2593 } else {
2594 if (i & 0x40)
2595 i |= 0x780;
2596 else
2597 i |= 0x800;
2598 n |= i << 19;
2599 }
2600 gen_vfp_fconst(dp, n);
2601 break;
b7bcbe95
FB
2602 case 15: /* extension space */
2603 switch (rn) {
2604 case 0: /* cpy */
2605 /* no-op */
2606 break;
2607 case 1: /* abs */
2608 gen_vfp_abs(dp);
2609 break;
2610 case 2: /* neg */
2611 gen_vfp_neg(dp);
2612 break;
2613 case 3: /* sqrt */
2614 gen_vfp_sqrt(dp);
2615 break;
2616 case 8: /* cmp */
2617 gen_vfp_cmp(dp);
2618 break;
2619 case 9: /* cmpe */
2620 gen_vfp_cmpe(dp);
2621 break;
2622 case 10: /* cmpz */
2623 gen_vfp_cmp(dp);
2624 break;
2625 case 11: /* cmpez */
2626 gen_vfp_F1_ld0(dp);
2627 gen_vfp_cmpe(dp);
2628 break;
2629 case 15: /* single<->double conversion */
2630 if (dp)
2631 gen_op_vfp_fcvtsd();
2632 else
2633 gen_op_vfp_fcvtds();
2634 break;
2635 case 16: /* fuito */
2636 gen_vfp_uito(dp);
2637 break;
2638 case 17: /* fsito */
2639 gen_vfp_sito(dp);
2640 break;
9ee6e8bb
PB
2641 case 20: /* fshto */
2642 if (!arm_feature(env, ARM_FEATURE_VFP3))
2643 return 1;
2644 gen_vfp_shto(dp, rm);
2645 break;
2646 case 21: /* fslto */
2647 if (!arm_feature(env, ARM_FEATURE_VFP3))
2648 return 1;
2649 gen_vfp_slto(dp, rm);
2650 break;
2651 case 22: /* fuhto */
2652 if (!arm_feature(env, ARM_FEATURE_VFP3))
2653 return 1;
2654 gen_vfp_uhto(dp, rm);
2655 break;
2656 case 23: /* fulto */
2657 if (!arm_feature(env, ARM_FEATURE_VFP3))
2658 return 1;
2659 gen_vfp_ulto(dp, rm);
2660 break;
b7bcbe95
FB
2661 case 24: /* ftoui */
2662 gen_vfp_toui(dp);
2663 break;
2664 case 25: /* ftouiz */
2665 gen_vfp_touiz(dp);
2666 break;
2667 case 26: /* ftosi */
2668 gen_vfp_tosi(dp);
2669 break;
2670 case 27: /* ftosiz */
2671 gen_vfp_tosiz(dp);
2672 break;
9ee6e8bb
PB
2673 case 28: /* ftosh */
2674 if (!arm_feature(env, ARM_FEATURE_VFP3))
2675 return 1;
2676 gen_vfp_tosh(dp, rm);
2677 break;
2678 case 29: /* ftosl */
2679 if (!arm_feature(env, ARM_FEATURE_VFP3))
2680 return 1;
2681 gen_vfp_tosl(dp, rm);
2682 break;
2683 case 30: /* ftouh */
2684 if (!arm_feature(env, ARM_FEATURE_VFP3))
2685 return 1;
2686 gen_vfp_touh(dp, rm);
2687 break;
2688 case 31: /* ftoul */
2689 if (!arm_feature(env, ARM_FEATURE_VFP3))
2690 return 1;
2691 gen_vfp_toul(dp, rm);
2692 break;
b7bcbe95
FB
2693 default: /* undefined */
2694 printf ("rn:%d\n", rn);
2695 return 1;
2696 }
2697 break;
2698 default: /* undefined */
2699 printf ("op:%d\n", op);
2700 return 1;
2701 }
2702
2703 /* Write back the result. */
2704 if (op == 15 && (rn >= 8 && rn <= 11))
2705 ; /* Comparison, do nothing. */
2706 else if (op == 15 && rn > 17)
2707 /* Integer result. */
2708 gen_mov_vreg_F0(0, rd);
2709 else if (op == 15 && rn == 15)
2710 /* conversion */
2711 gen_mov_vreg_F0(!dp, rd);
2712 else
2713 gen_mov_vreg_F0(dp, rd);
2714
2715 /* break out of the loop if we have finished */
2716 if (veclen == 0)
2717 break;
2718
2719 if (op == 15 && delta_m == 0) {
2720 /* single source one-many */
2721 while (veclen--) {
2722 rd = ((rd + delta_d) & (bank_mask - 1))
2723 | (rd & bank_mask);
2724 gen_mov_vreg_F0(dp, rd);
2725 }
2726 break;
2727 }
2728 /* Setup the next operands. */
2729 veclen--;
2730 rd = ((rd + delta_d) & (bank_mask - 1))
2731 | (rd & bank_mask);
2732
2733 if (op == 15) {
2734 /* One source operand. */
2735 rm = ((rm + delta_m) & (bank_mask - 1))
2736 | (rm & bank_mask);
2737 gen_mov_F0_vreg(dp, rm);
2738 } else {
2739 /* Two source operands. */
2740 rn = ((rn + delta_d) & (bank_mask - 1))
2741 | (rn & bank_mask);
2742 gen_mov_F0_vreg(dp, rn);
2743 if (delta_m) {
2744 rm = ((rm + delta_m) & (bank_mask - 1))
2745 | (rm & bank_mask);
2746 gen_mov_F1_vreg(dp, rm);
2747 }
2748 }
2749 }
2750 }
2751 break;
2752 case 0xc:
2753 case 0xd:
9ee6e8bb 2754 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
2755 /* two-register transfer */
2756 rn = (insn >> 16) & 0xf;
2757 rd = (insn >> 12) & 0xf;
2758 if (dp) {
9ee6e8bb
PB
2759 VFP_DREG_M(rm, insn);
2760 } else {
2761 rm = VFP_SREG_M(insn);
2762 }
b7bcbe95 2763
18c9b560 2764 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2765 /* vfp->arm */
2766 if (dp) {
2767 gen_mov_F0_vreg(1, rm);
2768 gen_op_vfp_mrrd();
2769 gen_movl_reg_T0(s, rd);
2770 gen_movl_reg_T1(s, rn);
2771 } else {
2772 gen_mov_F0_vreg(0, rm);
2773 gen_op_vfp_mrs();
2774 gen_movl_reg_T0(s, rn);
2775 gen_mov_F0_vreg(0, rm + 1);
2776 gen_op_vfp_mrs();
2777 gen_movl_reg_T0(s, rd);
2778 }
2779 } else {
2780 /* arm->vfp */
2781 if (dp) {
2782 gen_movl_T0_reg(s, rd);
2783 gen_movl_T1_reg(s, rn);
2784 gen_op_vfp_mdrr();
2785 gen_mov_vreg_F0(1, rm);
2786 } else {
2787 gen_movl_T0_reg(s, rn);
2788 gen_op_vfp_msr();
2789 gen_mov_vreg_F0(0, rm);
2790 gen_movl_T0_reg(s, rd);
2791 gen_op_vfp_msr();
2792 gen_mov_vreg_F0(0, rm + 1);
2793 }
2794 }
2795 } else {
2796 /* Load/store */
2797 rn = (insn >> 16) & 0xf;
2798 if (dp)
9ee6e8bb 2799 VFP_DREG_D(rd, insn);
b7bcbe95 2800 else
9ee6e8bb
PB
2801 rd = VFP_SREG_D(insn);
2802 if (s->thumb && rn == 15) {
2803 gen_op_movl_T1_im(s->pc & ~2);
2804 } else {
2805 gen_movl_T1_reg(s, rn);
2806 }
b7bcbe95
FB
2807 if ((insn & 0x01200000) == 0x01000000) {
2808 /* Single load/store */
2809 offset = (insn & 0xff) << 2;
2810 if ((insn & (1 << 23)) == 0)
2811 offset = -offset;
2812 gen_op_addl_T1_im(offset);
2813 if (insn & (1 << 20)) {
b5ff1b31 2814 gen_vfp_ld(s, dp);
b7bcbe95
FB
2815 gen_mov_vreg_F0(dp, rd);
2816 } else {
2817 gen_mov_F0_vreg(dp, rd);
b5ff1b31 2818 gen_vfp_st(s, dp);
b7bcbe95
FB
2819 }
2820 } else {
2821 /* load/store multiple */
2822 if (dp)
2823 n = (insn >> 1) & 0x7f;
2824 else
2825 n = insn & 0xff;
2826
2827 if (insn & (1 << 24)) /* pre-decrement */
2828 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2829
2830 if (dp)
2831 offset = 8;
2832 else
2833 offset = 4;
2834 for (i = 0; i < n; i++) {
18c9b560 2835 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2836 /* load */
b5ff1b31 2837 gen_vfp_ld(s, dp);
b7bcbe95
FB
2838 gen_mov_vreg_F0(dp, rd + i);
2839 } else {
2840 /* store */
2841 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 2842 gen_vfp_st(s, dp);
b7bcbe95
FB
2843 }
2844 gen_op_addl_T1_im(offset);
2845 }
2846 if (insn & (1 << 21)) {
2847 /* writeback */
2848 if (insn & (1 << 24))
2849 offset = -offset * n;
2850 else if (dp && (insn & 1))
2851 offset = 4;
2852 else
2853 offset = 0;
2854
2855 if (offset != 0)
2856 gen_op_addl_T1_im(offset);
2857 gen_movl_reg_T1(s, rn);
2858 }
2859 }
2860 }
2861 break;
2862 default:
2863 /* Should never happen. */
2864 return 1;
2865 }
2866 return 0;
2867}
2868
6e256c93 2869static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 2870{
6e256c93
FB
2871 TranslationBlock *tb;
2872
2873 tb = s->tb;
2874 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
57fec1fe 2875 tcg_gen_goto_tb(n);
6e256c93 2876 gen_op_movl_T0_im(dest);
b26eefb6 2877 gen_set_pc_T0();
57fec1fe 2878 tcg_gen_exit_tb((long)tb + n);
6e256c93
FB
2879 } else {
2880 gen_op_movl_T0_im(dest);
b26eefb6 2881 gen_set_pc_T0();
57fec1fe 2882 tcg_gen_exit_tb(0);
6e256c93 2883 }
c53be334
FB
2884}
2885
8aaca4c0
FB
2886static inline void gen_jmp (DisasContext *s, uint32_t dest)
2887{
2888 if (__builtin_expect(s->singlestep_enabled, 0)) {
2889 /* An indirect jump so that we still trigger the debug exception. */
5899f386 2890 if (s->thumb)
d9ba4830
PB
2891 dest |= 1;
2892 gen_bx_im(s, dest);
8aaca4c0 2893 } else {
6e256c93 2894 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2895 s->is_jmp = DISAS_TB_JUMP;
2896 }
2897}
2898
d9ba4830 2899static inline void gen_mulxy(TCGv t0, TCGv t1, int x, int y)
b5ff1b31 2900{
ee097184 2901 if (x)
d9ba4830 2902 tcg_gen_sari_i32(t0, t0, 16);
b5ff1b31 2903 else
d9ba4830 2904 gen_sxth(t0);
ee097184 2905 if (y)
d9ba4830 2906 tcg_gen_sari_i32(t1, t1, 16);
b5ff1b31 2907 else
d9ba4830
PB
2908 gen_sxth(t1);
2909 tcg_gen_mul_i32(t0, t0, t1);
b5ff1b31
FB
2910}
2911
2912/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 2913static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
2914 uint32_t mask;
2915
2916 mask = 0;
2917 if (flags & (1 << 0))
2918 mask |= 0xff;
2919 if (flags & (1 << 1))
2920 mask |= 0xff00;
2921 if (flags & (1 << 2))
2922 mask |= 0xff0000;
2923 if (flags & (1 << 3))
2924 mask |= 0xff000000;
9ee6e8bb 2925
2ae23e75 2926 /* Mask out undefined bits. */
9ee6e8bb
PB
2927 mask &= ~CPSR_RESERVED;
2928 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 2929 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 2930 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 2931 mask &= ~CPSR_IT;
9ee6e8bb 2932 /* Mask out execution state bits. */
2ae23e75 2933 if (!spsr)
e160c51c 2934 mask &= ~CPSR_EXEC;
b5ff1b31
FB
2935 /* Mask out privileged bits. */
2936 if (IS_USER(s))
9ee6e8bb 2937 mask &= CPSR_USER;
b5ff1b31
FB
2938 return mask;
2939}
2940
2941/* Returns nonzero if access to the PSR is not permitted. */
2942static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2943{
d9ba4830 2944 TCGv tmp;
b5ff1b31
FB
2945 if (spsr) {
2946 /* ??? This is also undefined in system mode. */
2947 if (IS_USER(s))
2948 return 1;
d9ba4830
PB
2949
2950 tmp = load_cpu_field(spsr);
2951 tcg_gen_andi_i32(tmp, tmp, ~mask);
2952 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], mask);
2953 tcg_gen_or_i32(tmp, tmp, cpu_T[0]);
2954 store_cpu_field(tmp, spsr);
b5ff1b31 2955 } else {
d9ba4830 2956 gen_set_cpsr(cpu_T[0], mask);
b5ff1b31
FB
2957 }
2958 gen_lookup_tb(s);
2959 return 0;
2960}
2961
9ee6e8bb 2962/* Generate an old-style exception return. */
b5ff1b31
FB
2963static void gen_exception_return(DisasContext *s)
2964{
d9ba4830 2965 TCGv tmp;
b26eefb6 2966 gen_set_pc_T0();
d9ba4830
PB
2967 tmp = load_cpu_field(spsr);
2968 gen_set_cpsr(tmp, 0xffffffff);
2969 dead_tmp(tmp);
b5ff1b31
FB
2970 s->is_jmp = DISAS_UPDATE;
2971}
2972
9ee6e8bb
PB
2973/* Generate a v6 exception return. */
2974static void gen_rfe(DisasContext *s)
2c0262af 2975{
d9ba4830 2976 gen_set_cpsr(cpu_T[0], 0xffffffff);
9ee6e8bb 2977 gen_op_movl_T0_T2();
b26eefb6 2978 gen_set_pc_T0();
9ee6e8bb
PB
2979 s->is_jmp = DISAS_UPDATE;
2980}
3b46e624 2981
9ee6e8bb
PB
2982static inline void
2983gen_set_condexec (DisasContext *s)
2984{
2985 if (s->condexec_mask) {
8f01245e
PB
2986 uint32_t val = (s->condexec_cond << 4) | (s->condexec_mask >> 1);
2987 TCGv tmp = new_tmp();
2988 tcg_gen_movi_i32(tmp, val);
d9ba4830 2989 store_cpu_field(tmp, condexec_bits);
9ee6e8bb
PB
2990 }
2991}
3b46e624 2992
9ee6e8bb
PB
2993static void gen_nop_hint(DisasContext *s, int val)
2994{
2995 switch (val) {
2996 case 3: /* wfi */
2997 gen_op_movl_T0_im((long)s->pc);
b26eefb6 2998 gen_set_pc_T0();
9ee6e8bb
PB
2999 s->is_jmp = DISAS_WFI;
3000 break;
3001 case 2: /* wfe */
3002 case 4: /* sev */
3003 /* TODO: Implement SEV and WFE. May help SMP performance. */
3004 default: /* nop */
3005 break;
3006 }
3007}
99c475ab 3008
9ee6e8bb
PB
3009/* Neon shift by constant. The actual ops are the same as used for variable
3010 shifts. [OP][U][SIZE] */
3011static GenOpFunc *gen_neon_shift_im[8][2][4] = {
3012 { /* 0 */ /* VSHR */
3013 {
3014 gen_op_neon_shl_u8,
3015 gen_op_neon_shl_u16,
3016 gen_op_neon_shl_u32,
3017 gen_op_neon_shl_u64
3018 }, {
3019 gen_op_neon_shl_s8,
3020 gen_op_neon_shl_s16,
3021 gen_op_neon_shl_s32,
3022 gen_op_neon_shl_s64
3023 }
3024 }, { /* 1 */ /* VSRA */
3025 {
3026 gen_op_neon_shl_u8,
3027 gen_op_neon_shl_u16,
3028 gen_op_neon_shl_u32,
3029 gen_op_neon_shl_u64
3030 }, {
3031 gen_op_neon_shl_s8,
3032 gen_op_neon_shl_s16,
3033 gen_op_neon_shl_s32,
3034 gen_op_neon_shl_s64
3035 }
3036 }, { /* 2 */ /* VRSHR */
3037 {
3038 gen_op_neon_rshl_u8,
3039 gen_op_neon_rshl_u16,
3040 gen_op_neon_rshl_u32,
3041 gen_op_neon_rshl_u64
3042 }, {
3043 gen_op_neon_rshl_s8,
3044 gen_op_neon_rshl_s16,
3045 gen_op_neon_rshl_s32,
3046 gen_op_neon_rshl_s64
3047 }
3048 }, { /* 3 */ /* VRSRA */
3049 {
3050 gen_op_neon_rshl_u8,
3051 gen_op_neon_rshl_u16,
3052 gen_op_neon_rshl_u32,
3053 gen_op_neon_rshl_u64
3054 }, {
3055 gen_op_neon_rshl_s8,
3056 gen_op_neon_rshl_s16,
3057 gen_op_neon_rshl_s32,
3058 gen_op_neon_rshl_s64
3059 }
3060 }, { /* 4 */
3061 {
3062 NULL, NULL, NULL, NULL
3063 }, { /* VSRI */
3064 gen_op_neon_shl_u8,
3065 gen_op_neon_shl_u16,
3066 gen_op_neon_shl_u32,
3067 gen_op_neon_shl_u64,
3068 }
3069 }, { /* 5 */
3070 { /* VSHL */
3071 gen_op_neon_shl_u8,
3072 gen_op_neon_shl_u16,
3073 gen_op_neon_shl_u32,
3074 gen_op_neon_shl_u64,
3075 }, { /* VSLI */
3076 gen_op_neon_shl_u8,
3077 gen_op_neon_shl_u16,
3078 gen_op_neon_shl_u32,
3079 gen_op_neon_shl_u64,
3080 }
3081 }, { /* 6 */ /* VQSHL */
3082 {
3083 gen_op_neon_qshl_u8,
3084 gen_op_neon_qshl_u16,
3085 gen_op_neon_qshl_u32,
3086 gen_op_neon_qshl_u64
3087 }, {
3088 gen_op_neon_qshl_s8,
3089 gen_op_neon_qshl_s16,
3090 gen_op_neon_qshl_s32,
3091 gen_op_neon_qshl_s64
3092 }
3093 }, { /* 7 */ /* VQSHLU */
3094 {
3095 gen_op_neon_qshl_u8,
3096 gen_op_neon_qshl_u16,
3097 gen_op_neon_qshl_u32,
3098 gen_op_neon_qshl_u64
3099 }, {
3100 gen_op_neon_qshl_u8,
3101 gen_op_neon_qshl_u16,
3102 gen_op_neon_qshl_u32,
3103 gen_op_neon_qshl_u64
3104 }
99c475ab 3105 }
9ee6e8bb
PB
3106};
3107
3108/* [R][U][size - 1] */
3109static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
3110 {
3111 {
3112 gen_op_neon_shl_u16,
3113 gen_op_neon_shl_u32,
3114 gen_op_neon_shl_u64
3115 }, {
3116 gen_op_neon_shl_s16,
3117 gen_op_neon_shl_s32,
3118 gen_op_neon_shl_s64
3119 }
3120 }, {
3121 {
3122 gen_op_neon_rshl_u16,
3123 gen_op_neon_rshl_u32,
3124 gen_op_neon_rshl_u64
3125 }, {
3126 gen_op_neon_rshl_s16,
3127 gen_op_neon_rshl_s32,
3128 gen_op_neon_rshl_s64
3129 }
2c0262af 3130 }
9ee6e8bb 3131};
99c475ab 3132
9ee6e8bb
PB
3133static inline void
3134gen_op_neon_narrow_u32 ()
3135{
3136 /* No-op. */
3137}
3138
3139static GenOpFunc *gen_neon_narrow[3] = {
3140 gen_op_neon_narrow_u8,
3141 gen_op_neon_narrow_u16,
3142 gen_op_neon_narrow_u32
3143};
3144
3145static GenOpFunc *gen_neon_narrow_satu[3] = {
3146 gen_op_neon_narrow_sat_u8,
3147 gen_op_neon_narrow_sat_u16,
3148 gen_op_neon_narrow_sat_u32
3149};
3150
3151static GenOpFunc *gen_neon_narrow_sats[3] = {
3152 gen_op_neon_narrow_sat_s8,
3153 gen_op_neon_narrow_sat_s16,
3154 gen_op_neon_narrow_sat_s32
3155};
3156
3157static inline int gen_neon_add(int size)
3158{
3159 switch (size) {
3160 case 0: gen_op_neon_add_u8(); break;
3161 case 1: gen_op_neon_add_u16(); break;
3162 case 2: gen_op_addl_T0_T1(); break;
3163 default: return 1;
3164 }
3165 return 0;
3166}
3167
3168/* 32-bit pairwise ops end up the same as the elementsise versions. */
3169#define gen_op_neon_pmax_s32 gen_op_neon_max_s32
3170#define gen_op_neon_pmax_u32 gen_op_neon_max_u32
3171#define gen_op_neon_pmin_s32 gen_op_neon_min_s32
3172#define gen_op_neon_pmin_u32 gen_op_neon_min_u32
3173
3174#define GEN_NEON_INTEGER_OP(name) do { \
3175 switch ((size << 1) | u) { \
3176 case 0: gen_op_neon_##name##_s8(); break; \
3177 case 1: gen_op_neon_##name##_u8(); break; \
3178 case 2: gen_op_neon_##name##_s16(); break; \
3179 case 3: gen_op_neon_##name##_u16(); break; \
3180 case 4: gen_op_neon_##name##_s32(); break; \
3181 case 5: gen_op_neon_##name##_u32(); break; \
3182 default: return 1; \
3183 }} while (0)
3184
3185static inline void
3186gen_neon_movl_scratch_T0(int scratch)
3187{
3188 uint32_t offset;
3189
3190 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3191 gen_op_neon_setreg_T0(offset);
3192}
3193
3194static inline void
3195gen_neon_movl_scratch_T1(int scratch)
3196{
3197 uint32_t offset;
3198
3199 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3200 gen_op_neon_setreg_T1(offset);
3201}
3202
3203static inline void
3204gen_neon_movl_T0_scratch(int scratch)
3205{
3206 uint32_t offset;
3207
3208 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3209 gen_op_neon_getreg_T0(offset);
3210}
3211
3212static inline void
3213gen_neon_movl_T1_scratch(int scratch)
3214{
3215 uint32_t offset;
3216
3217 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
3218 gen_op_neon_getreg_T1(offset);
3219}
3220
3221static inline void gen_op_neon_widen_u32(void)
3222{
3223 gen_op_movl_T1_im(0);
3224}
3225
3226static inline void gen_neon_get_scalar(int size, int reg)
3227{
3228 if (size == 1) {
3229 NEON_GET_REG(T0, reg >> 1, reg & 1);
3230 } else {
3231 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
3232 if (reg & 1)
3233 gen_op_neon_dup_low16();
3234 else
3235 gen_op_neon_dup_high16();
3236 }
3237}
3238
3239static void gen_neon_unzip(int reg, int q, int tmp, int size)
3240{
3241 int n;
3242
3243 for (n = 0; n < q + 1; n += 2) {
3244 NEON_GET_REG(T0, reg, n);
3245 NEON_GET_REG(T0, reg, n + n);
3246 switch (size) {
3247 case 0: gen_op_neon_unzip_u8(); break;
3248 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
3249 case 2: /* no-op */; break;
3250 default: abort();
3251 }
3252 gen_neon_movl_scratch_T0(tmp + n);
3253 gen_neon_movl_scratch_T1(tmp + n + 1);
3254 }
3255}
3256
3257static struct {
3258 int nregs;
3259 int interleave;
3260 int spacing;
3261} neon_ls_element_type[11] = {
3262 {4, 4, 1},
3263 {4, 4, 2},
3264 {4, 1, 1},
3265 {4, 2, 1},
3266 {3, 3, 1},
3267 {3, 3, 2},
3268 {3, 1, 1},
3269 {1, 1, 1},
3270 {2, 2, 1},
3271 {2, 2, 2},
3272 {2, 1, 1}
3273};
3274
3275/* Translate a NEON load/store element instruction. Return nonzero if the
3276 instruction is invalid. */
3277static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
3278{
3279 int rd, rn, rm;
3280 int op;
3281 int nregs;
3282 int interleave;
3283 int stride;
3284 int size;
3285 int reg;
3286 int pass;
3287 int load;
3288 int shift;
3289 uint32_t mask;
3290 int n;
3291
3292 if (!vfp_enabled(env))
3293 return 1;
3294 VFP_DREG_D(rd, insn);
3295 rn = (insn >> 16) & 0xf;
3296 rm = insn & 0xf;
3297 load = (insn & (1 << 21)) != 0;
3298 if ((insn & (1 << 23)) == 0) {
3299 /* Load store all elements. */
3300 op = (insn >> 8) & 0xf;
3301 size = (insn >> 6) & 3;
3302 if (op > 10 || size == 3)
3303 return 1;
3304 nregs = neon_ls_element_type[op].nregs;
3305 interleave = neon_ls_element_type[op].interleave;
3306 gen_movl_T1_reg(s, rn);
3307 stride = (1 << size) * interleave;
3308 for (reg = 0; reg < nregs; reg++) {
3309 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
3310 gen_movl_T1_reg(s, rn);
3311 gen_op_addl_T1_im((1 << size) * reg);
3312 } else if (interleave == 2 && nregs == 4 && reg == 2) {
3313 gen_movl_T1_reg(s, rn);
3314 gen_op_addl_T1_im(1 << size);
3315 }
3316 for (pass = 0; pass < 2; pass++) {
3317 if (size == 2) {
3318 if (load) {
3319 gen_ldst(ldl, s);
3320 NEON_SET_REG(T0, rd, pass);
3321 } else {
3322 NEON_GET_REG(T0, rd, pass);
3323 gen_ldst(stl, s);
3324 }
3325 gen_op_addl_T1_im(stride);
3326 } else if (size == 1) {
3327 if (load) {
3328 gen_ldst(lduw, s);
3329 gen_op_addl_T1_im(stride);
3330 gen_op_movl_T2_T0();
3331 gen_ldst(lduw, s);
3332 gen_op_addl_T1_im(stride);
3333 gen_op_neon_insert_elt(16, 0xffff);
3334 NEON_SET_REG(T2, rd, pass);
3335 } else {
3336 NEON_GET_REG(T2, rd, pass);
3337 gen_op_movl_T0_T2();
3338 gen_ldst(stw, s);
3339 gen_op_addl_T1_im(stride);
3340 gen_op_neon_extract_elt(16, 0xffff0000);
3341 gen_ldst(stw, s);
3342 gen_op_addl_T1_im(stride);
3343 }
3344 } else /* size == 0 */ {
3345 if (load) {
3346 mask = 0xff;
3347 for (n = 0; n < 4; n++) {
3348 gen_ldst(ldub, s);
3349 gen_op_addl_T1_im(stride);
3350 if (n == 0) {
3351 gen_op_movl_T2_T0();
3352 } else {
3353 gen_op_neon_insert_elt(n * 8, ~mask);
3354 }
3355 mask <<= 8;
3356 }
3357 NEON_SET_REG(T2, rd, pass);
3358 } else {
3359 NEON_GET_REG(T2, rd, pass);
3360 mask = 0xff;
3361 for (n = 0; n < 4; n++) {
3362 if (n == 0) {
3363 gen_op_movl_T0_T2();
3364 } else {
3365 gen_op_neon_extract_elt(n * 8, mask);
3366 }
3367 gen_ldst(stb, s);
3368 gen_op_addl_T1_im(stride);
3369 mask <<= 8;
3370 }
3371 }
3372 }
3373 }
3374 rd += neon_ls_element_type[op].spacing;
3375 }
3376 stride = nregs * 8;
3377 } else {
3378 size = (insn >> 10) & 3;
3379 if (size == 3) {
3380 /* Load single element to all lanes. */
3381 if (!load)
3382 return 1;
3383 size = (insn >> 6) & 3;
3384 nregs = ((insn >> 8) & 3) + 1;
3385 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 3386 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
3387 for (reg = 0; reg < nregs; reg++) {
3388 switch (size) {
3389 case 0:
3390 gen_ldst(ldub, s);
3391 gen_op_neon_dup_u8(0);
3392 break;
3393 case 1:
3394 gen_ldst(lduw, s);
3395 gen_op_neon_dup_low16();
3396 break;
3397 case 2:
3398 gen_ldst(ldl, s);
3399 break;
3400 case 3:
3401 return 1;
99c475ab 3402 }
9ee6e8bb
PB
3403 gen_op_addl_T1_im(1 << size);
3404 NEON_SET_REG(T0, rd, 0);
3405 NEON_SET_REG(T0, rd, 1);
3406 rd += stride;
3407 }
3408 stride = (1 << size) * nregs;
3409 } else {
3410 /* Single element. */
3411 pass = (insn >> 7) & 1;
3412 switch (size) {
3413 case 0:
3414 shift = ((insn >> 5) & 3) * 8;
3415 mask = 0xff << shift;
3416 stride = 1;
3417 break;
3418 case 1:
3419 shift = ((insn >> 6) & 1) * 16;
3420 mask = shift ? 0xffff0000 : 0xffff;
3421 stride = (insn & (1 << 5)) ? 2 : 1;
3422 break;
3423 case 2:
3424 shift = 0;
3425 mask = 0xffffffff;
3426 stride = (insn & (1 << 6)) ? 2 : 1;
3427 break;
3428 default:
3429 abort();
3430 }
3431 nregs = ((insn >> 8) & 3) + 1;
3432 gen_movl_T1_reg(s, rn);
3433 for (reg = 0; reg < nregs; reg++) {
3434 if (load) {
3435 if (size != 2) {
3436 NEON_GET_REG(T2, rd, pass);
3437 }
3438 switch (size) {
3439 case 0:
3440 gen_ldst(ldub, s);
3441 break;
3442 case 1:
3443 gen_ldst(lduw, s);
3444 break;
3445 case 2:
3446 gen_ldst(ldl, s);
3447 NEON_SET_REG(T0, rd, pass);
3448 break;
3449 }
3450 if (size != 2) {
3451 gen_op_neon_insert_elt(shift, ~mask);
3452 NEON_SET_REG(T0, rd, pass);
3453 }
3454 } else { /* Store */
3455 if (size == 2) {
3456 NEON_GET_REG(T0, rd, pass);
3457 } else {
3458 NEON_GET_REG(T2, rd, pass);
3459 gen_op_neon_extract_elt(shift, mask);
3460 }
3461 switch (size) {
3462 case 0:
3463 gen_ldst(stb, s);
3464 break;
3465 case 1:
3466 gen_ldst(stw, s);
3467 break;
3468 case 2:
3469 gen_ldst(stl, s);
3470 break;
99c475ab 3471 }
99c475ab 3472 }
9ee6e8bb
PB
3473 rd += stride;
3474 gen_op_addl_T1_im(1 << size);
99c475ab 3475 }
9ee6e8bb 3476 stride = nregs * (1 << size);
99c475ab 3477 }
9ee6e8bb
PB
3478 }
3479 if (rm != 15) {
b26eefb6
PB
3480 TCGv base;
3481
3482 base = load_reg(s, rn);
9ee6e8bb 3483 if (rm == 13) {
b26eefb6 3484 tcg_gen_addi_i32(base, base, stride);
9ee6e8bb 3485 } else {
b26eefb6
PB
3486 TCGv index;
3487 index = load_reg(s, rm);
3488 tcg_gen_add_i32(base, base, index);
3489 dead_tmp(index);
9ee6e8bb 3490 }
b26eefb6 3491 store_reg(s, rn, base);
9ee6e8bb
PB
3492 }
3493 return 0;
3494}
3b46e624 3495
9ee6e8bb
PB
3496/* Translate a NEON data processing instruction. Return nonzero if the
3497 instruction is invalid.
3498 In general we process vectors in 32-bit chunks. This means we can reuse
3499 some of the scalar ops, and hopefully the code generated for 32-bit
3500 hosts won't be too awful. The downside is that the few 64-bit operations
3501 (mainly shifts) get complicated. */
2c0262af 3502
9ee6e8bb
PB
3503static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3504{
3505 int op;
3506 int q;
3507 int rd, rn, rm;
3508 int size;
3509 int shift;
3510 int pass;
3511 int count;
3512 int pairwise;
3513 int u;
3514 int n;
3515 uint32_t imm;
3516
3517 if (!vfp_enabled(env))
3518 return 1;
3519 q = (insn & (1 << 6)) != 0;
3520 u = (insn >> 24) & 1;
3521 VFP_DREG_D(rd, insn);
3522 VFP_DREG_N(rn, insn);
3523 VFP_DREG_M(rm, insn);
3524 size = (insn >> 20) & 3;
3525 if ((insn & (1 << 23)) == 0) {
3526 /* Three register same length. */
3527 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3528 if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3529 for (pass = 0; pass < (q ? 2 : 1); pass++) {
3530 NEON_GET_REG(T0, rm, pass * 2);
3531 NEON_GET_REG(T1, rm, pass * 2 + 1);
3532 gen_neon_movl_scratch_T0(0);
3533 gen_neon_movl_scratch_T1(1);
3534 NEON_GET_REG(T0, rn, pass * 2);
3535 NEON_GET_REG(T1, rn, pass * 2 + 1);
3536 switch (op) {
3537 case 1: /* VQADD */
3538 if (u) {
3539 gen_op_neon_addl_saturate_u64();
2c0262af 3540 } else {
9ee6e8bb 3541 gen_op_neon_addl_saturate_s64();
2c0262af 3542 }
9ee6e8bb
PB
3543 break;
3544 case 5: /* VQSUB */
3545 if (u) {
3546 gen_op_neon_subl_saturate_u64();
1e8d4eec 3547 } else {
9ee6e8bb 3548 gen_op_neon_subl_saturate_s64();
1e8d4eec 3549 }
9ee6e8bb
PB
3550 break;
3551 case 16:
3552 if (u) {
3553 gen_op_neon_subl_u64();
3554 } else {
3555 gen_op_neon_addl_u64();
3556 }
3557 break;
3558 default:
3559 abort();
2c0262af 3560 }
9ee6e8bb
PB
3561 NEON_SET_REG(T0, rd, pass * 2);
3562 NEON_SET_REG(T1, rd, pass * 2 + 1);
2c0262af 3563 }
9ee6e8bb 3564 return 0;
2c0262af 3565 }
9ee6e8bb
PB
3566 switch (op) {
3567 case 8: /* VSHL */
3568 case 9: /* VQSHL */
3569 case 10: /* VRSHL */
3570 case 11: /* VQSHL */
3571 /* Shift operations have Rn and Rm reversed. */
3572 {
3573 int tmp;
3574 tmp = rn;
3575 rn = rm;
3576 rm = tmp;
3577 pairwise = 0;
3578 }
2c0262af 3579 break;
9ee6e8bb
PB
3580 case 20: /* VPMAX */
3581 case 21: /* VPMIN */
3582 case 23: /* VPADD */
3583 pairwise = 1;
2c0262af 3584 break;
9ee6e8bb
PB
3585 case 26: /* VPADD (float) */
3586 pairwise = (u && size < 2);
2c0262af 3587 break;
9ee6e8bb
PB
3588 case 30: /* VPMIN/VPMAX (float) */
3589 pairwise = u;
2c0262af 3590 break;
9ee6e8bb
PB
3591 default:
3592 pairwise = 0;
2c0262af 3593 break;
9ee6e8bb
PB
3594 }
3595 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3596
3597 if (pairwise) {
3598 /* Pairwise. */
3599 if (q)
3600 n = (pass & 1) * 2;
2c0262af 3601 else
9ee6e8bb
PB
3602 n = 0;
3603 if (pass < q + 1) {
3604 NEON_GET_REG(T0, rn, n);
3605 NEON_GET_REG(T1, rn, n + 1);
3606 } else {
3607 NEON_GET_REG(T0, rm, n);
3608 NEON_GET_REG(T1, rm, n + 1);
3609 }
3610 } else {
3611 /* Elementwise. */
3612 NEON_GET_REG(T0, rn, pass);
3613 NEON_GET_REG(T1, rm, pass);
3614 }
3615 switch (op) {
3616 case 0: /* VHADD */
3617 GEN_NEON_INTEGER_OP(hadd);
3618 break;
3619 case 1: /* VQADD */
3620 switch (size << 1| u) {
3621 case 0: gen_op_neon_qadd_s8(); break;
3622 case 1: gen_op_neon_qadd_u8(); break;
3623 case 2: gen_op_neon_qadd_s16(); break;
3624 case 3: gen_op_neon_qadd_u16(); break;
3625 case 4: gen_op_addl_T0_T1_saturate(); break;
3626 case 5: gen_op_addl_T0_T1_usaturate(); break;
3627 default: abort();
3628 }
2c0262af 3629 break;
9ee6e8bb
PB
3630 case 2: /* VRHADD */
3631 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 3632 break;
9ee6e8bb
PB
3633 case 3: /* Logic ops. */
3634 switch ((u << 2) | size) {
3635 case 0: /* VAND */
2c0262af 3636 gen_op_andl_T0_T1();
9ee6e8bb
PB
3637 break;
3638 case 1: /* BIC */
3639 gen_op_bicl_T0_T1();
3640 break;
3641 case 2: /* VORR */
3642 gen_op_orl_T0_T1();
3643 break;
3644 case 3: /* VORN */
3645 gen_op_notl_T1();
3646 gen_op_orl_T0_T1();
3647 break;
3648 case 4: /* VEOR */
3649 gen_op_xorl_T0_T1();
3650 break;
3651 case 5: /* VBSL */
3652 NEON_GET_REG(T2, rd, pass);
3653 gen_op_neon_bsl();
3654 break;
3655 case 6: /* VBIT */
3656 NEON_GET_REG(T2, rd, pass);
3657 gen_op_neon_bit();
3658 break;
3659 case 7: /* VBIF */
3660 NEON_GET_REG(T2, rd, pass);
3661 gen_op_neon_bif();
3662 break;
2c0262af
FB
3663 }
3664 break;
9ee6e8bb
PB
3665 case 4: /* VHSUB */
3666 GEN_NEON_INTEGER_OP(hsub);
3667 break;
3668 case 5: /* VQSUB */
3669 switch ((size << 1) | u) {
3670 case 0: gen_op_neon_qsub_s8(); break;
3671 case 1: gen_op_neon_qsub_u8(); break;
3672 case 2: gen_op_neon_qsub_s16(); break;
3673 case 3: gen_op_neon_qsub_u16(); break;
3674 case 4: gen_op_subl_T0_T1_saturate(); break;
3675 case 5: gen_op_subl_T0_T1_usaturate(); break;
3676 default: abort();
2c0262af
FB
3677 }
3678 break;
9ee6e8bb
PB
3679 case 6: /* VCGT */
3680 GEN_NEON_INTEGER_OP(cgt);
3681 break;
3682 case 7: /* VCGE */
3683 GEN_NEON_INTEGER_OP(cge);
3684 break;
3685 case 8: /* VSHL */
3686 switch ((size << 1) | u) {
3687 case 0: gen_op_neon_shl_s8(); break;
3688 case 1: gen_op_neon_shl_u8(); break;
3689 case 2: gen_op_neon_shl_s16(); break;
3690 case 3: gen_op_neon_shl_u16(); break;
3691 case 4: gen_op_neon_shl_s32(); break;
3692 case 5: gen_op_neon_shl_u32(); break;
3693#if 0
3694 /* ??? Implementing these is tricky because the vector ops work
3695 on 32-bit pieces. */
3696 case 6: gen_op_neon_shl_s64(); break;
3697 case 7: gen_op_neon_shl_u64(); break;
3698#else
3699 case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3700#endif
2c0262af
FB
3701 }
3702 break;
9ee6e8bb
PB
3703 case 9: /* VQSHL */
3704 switch ((size << 1) | u) {
3705 case 0: gen_op_neon_qshl_s8(); break;
3706 case 1: gen_op_neon_qshl_u8(); break;
3707 case 2: gen_op_neon_qshl_s16(); break;
3708 case 3: gen_op_neon_qshl_u16(); break;
3709 case 4: gen_op_neon_qshl_s32(); break;
3710 case 5: gen_op_neon_qshl_u32(); break;
3711#if 0
3712 /* ??? Implementing these is tricky because the vector ops work
3713 on 32-bit pieces. */
3714 case 6: gen_op_neon_qshl_s64(); break;
3715 case 7: gen_op_neon_qshl_u64(); break;
3716#else
3717 case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3718#endif
2c0262af
FB
3719 }
3720 break;
9ee6e8bb
PB
3721 case 10: /* VRSHL */
3722 switch ((size << 1) | u) {
3723 case 0: gen_op_neon_rshl_s8(); break;
3724 case 1: gen_op_neon_rshl_u8(); break;
3725 case 2: gen_op_neon_rshl_s16(); break;
3726 case 3: gen_op_neon_rshl_u16(); break;
3727 case 4: gen_op_neon_rshl_s32(); break;
3728 case 5: gen_op_neon_rshl_u32(); break;
3729#if 0
3730 /* ??? Implementing these is tricky because the vector ops work
3731 on 32-bit pieces. */
3732 case 6: gen_op_neon_rshl_s64(); break;
3733 case 7: gen_op_neon_rshl_u64(); break;
3734#else
3735 case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3736#endif
3737 }
2c0262af 3738 break;
9ee6e8bb
PB
3739 case 11: /* VQRSHL */
3740 switch ((size << 1) | u) {
3741 case 0: gen_op_neon_qrshl_s8(); break;
3742 case 1: gen_op_neon_qrshl_u8(); break;
3743 case 2: gen_op_neon_qrshl_s16(); break;
3744 case 3: gen_op_neon_qrshl_u16(); break;
3745 case 4: gen_op_neon_qrshl_s32(); break;
3746 case 5: gen_op_neon_qrshl_u32(); break;
3747#if 0
3748 /* ??? Implementing these is tricky because the vector ops work
3749 on 32-bit pieces. */
3750 case 6: gen_op_neon_qrshl_s64(); break;
3751 case 7: gen_op_neon_qrshl_u64(); break;
3752#else
3753 case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3754#endif
3755 }
3756 break;
3757 case 12: /* VMAX */
3758 GEN_NEON_INTEGER_OP(max);
3759 break;
3760 case 13: /* VMIN */
3761 GEN_NEON_INTEGER_OP(min);
3762 break;
3763 case 14: /* VABD */
3764 GEN_NEON_INTEGER_OP(abd);
3765 break;
3766 case 15: /* VABA */
3767 GEN_NEON_INTEGER_OP(abd);
3768 NEON_GET_REG(T1, rd, pass);
3769 gen_neon_add(size);
3770 break;
3771 case 16:
3772 if (!u) { /* VADD */
3773 if (gen_neon_add(size))
3774 return 1;
3775 } else { /* VSUB */
3776 switch (size) {
3777 case 0: gen_op_neon_sub_u8(); break;
3778 case 1: gen_op_neon_sub_u16(); break;
3779 case 2: gen_op_subl_T0_T1(); break;
3780 default: return 1;
3781 }
3782 }
3783 break;
3784 case 17:
3785 if (!u) { /* VTST */
3786 switch (size) {
3787 case 0: gen_op_neon_tst_u8(); break;
3788 case 1: gen_op_neon_tst_u16(); break;
3789 case 2: gen_op_neon_tst_u32(); break;
3790 default: return 1;
3791 }
3792 } else { /* VCEQ */
3793 switch (size) {
3794 case 0: gen_op_neon_ceq_u8(); break;
3795 case 1: gen_op_neon_ceq_u16(); break;
3796 case 2: gen_op_neon_ceq_u32(); break;
3797 default: return 1;
3798 }
3799 }
3800 break;
3801 case 18: /* Multiply. */
3802 switch (size) {
3803 case 0: gen_op_neon_mul_u8(); break;
3804 case 1: gen_op_neon_mul_u16(); break;
3805 case 2: gen_op_mul_T0_T1(); break;
3806 default: return 1;
3807 }
3808 NEON_GET_REG(T1, rd, pass);
3809 if (u) { /* VMLS */
3810 switch (size) {
3811 case 0: gen_op_neon_rsb_u8(); break;
3812 case 1: gen_op_neon_rsb_u16(); break;
3813 case 2: gen_op_rsbl_T0_T1(); break;
3814 default: return 1;
3815 }
3816 } else { /* VMLA */
3817 gen_neon_add(size);
3818 }
3819 break;
3820 case 19: /* VMUL */
3821 if (u) { /* polynomial */
3822 gen_op_neon_mul_p8();
3823 } else { /* Integer */
3824 switch (size) {
3825 case 0: gen_op_neon_mul_u8(); break;
3826 case 1: gen_op_neon_mul_u16(); break;
3827 case 2: gen_op_mul_T0_T1(); break;
3828 default: return 1;
3829 }
3830 }
3831 break;
3832 case 20: /* VPMAX */
3833 GEN_NEON_INTEGER_OP(pmax);
3834 break;
3835 case 21: /* VPMIN */
3836 GEN_NEON_INTEGER_OP(pmin);
3837 break;
3838 case 22: /* Hultiply high. */
3839 if (!u) { /* VQDMULH */
3840 switch (size) {
3841 case 1: gen_op_neon_qdmulh_s16(); break;
3842 case 2: gen_op_neon_qdmulh_s32(); break;
3843 default: return 1;
3844 }
3845 } else { /* VQRDHMUL */
3846 switch (size) {
3847 case 1: gen_op_neon_qrdmulh_s16(); break;
3848 case 2: gen_op_neon_qrdmulh_s32(); break;
3849 default: return 1;
3850 }
3851 }
3852 break;
3853 case 23: /* VPADD */
3854 if (u)
3855 return 1;
3856 switch (size) {
3857 case 0: gen_op_neon_padd_u8(); break;
3858 case 1: gen_op_neon_padd_u16(); break;
3859 case 2: gen_op_addl_T0_T1(); break;
3860 default: return 1;
3861 }
3862 break;
3863 case 26: /* Floating point arithnetic. */
3864 switch ((u << 2) | size) {
3865 case 0: /* VADD */
3866 gen_op_neon_add_f32();
3867 break;
3868 case 2: /* VSUB */
3869 gen_op_neon_sub_f32();
3870 break;
3871 case 4: /* VPADD */
3872 gen_op_neon_add_f32();
3873 break;
3874 case 6: /* VABD */
3875 gen_op_neon_abd_f32();
3876 break;
3877 default:
3878 return 1;
3879 }
3880 break;
3881 case 27: /* Float multiply. */
3882 gen_op_neon_mul_f32();
3883 if (!u) {
3884 NEON_GET_REG(T1, rd, pass);
3885 if (size == 0) {
3886 gen_op_neon_add_f32();
3887 } else {
3888 gen_op_neon_rsb_f32();
3889 }
3890 }
3891 break;
3892 case 28: /* Float compare. */
3893 if (!u) {
3894 gen_op_neon_ceq_f32();
b5ff1b31 3895 } else {
9ee6e8bb
PB
3896 if (size == 0)
3897 gen_op_neon_cge_f32();
3898 else
3899 gen_op_neon_cgt_f32();
b5ff1b31 3900 }
2c0262af 3901 break;
9ee6e8bb
PB
3902 case 29: /* Float compare absolute. */
3903 if (!u)
3904 return 1;
3905 if (size == 0)
3906 gen_op_neon_acge_f32();
3907 else
3908 gen_op_neon_acgt_f32();
2c0262af 3909 break;
9ee6e8bb
PB
3910 case 30: /* Float min/max. */
3911 if (size == 0)
3912 gen_op_neon_max_f32();
3913 else
3914 gen_op_neon_min_f32();
3915 break;
3916 case 31:
3917 if (size == 0)
3918 gen_op_neon_recps_f32();
3919 else
3920 gen_op_neon_rsqrts_f32();
2c0262af 3921 break;
9ee6e8bb
PB
3922 default:
3923 abort();
2c0262af 3924 }
9ee6e8bb
PB
3925 /* Save the result. For elementwise operations we can put it
3926 straight into the destination register. For pairwise operations
3927 we have to be careful to avoid clobbering the source operands. */
3928 if (pairwise && rd == rm) {
3929 gen_neon_movl_scratch_T0(pass);
3930 } else {
3931 NEON_SET_REG(T0, rd, pass);
3932 }
3933
3934 } /* for pass */
3935 if (pairwise && rd == rm) {
3936 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3937 gen_neon_movl_T0_scratch(pass);
3938 NEON_SET_REG(T0, rd, pass);
3939 }
3940 }
3941 } else if (insn & (1 << 4)) {
3942 if ((insn & 0x00380080) != 0) {
3943 /* Two registers and shift. */
3944 op = (insn >> 8) & 0xf;
3945 if (insn & (1 << 7)) {
3946 /* 64-bit shift. */
3947 size = 3;
3948 } else {
3949 size = 2;
3950 while ((insn & (1 << (size + 19))) == 0)
3951 size--;
3952 }
3953 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3954 /* To avoid excessive dumplication of ops we implement shift
3955 by immediate using the variable shift operations. */
3956 if (op < 8) {
3957 /* Shift by immediate:
3958 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3959 /* Right shifts are encoded as N - shift, where N is the
3960 element size in bits. */
3961 if (op <= 4)
3962 shift = shift - (1 << (size + 3));
3963 else
3964 shift++;
3965 if (size == 3) {
3966 count = q + 1;
3967 } else {
3968 count = q ? 4: 2;
3969 }
3970 switch (size) {
3971 case 0:
3972 imm = (uint8_t) shift;
3973 imm |= imm << 8;
3974 imm |= imm << 16;
3975 break;
3976 case 1:
3977 imm = (uint16_t) shift;
3978 imm |= imm << 16;
3979 break;
3980 case 2:
3981 case 3:
3982 imm = shift;
3983 break;
3984 default:
3985 abort();
3986 }
3987
3988 for (pass = 0; pass < count; pass++) {
3989 if (size < 3) {
3990 /* Operands in T0 and T1. */
3991 gen_op_movl_T1_im(imm);
3992 NEON_GET_REG(T0, rm, pass);
2c0262af 3993 } else {
9ee6e8bb
PB
3994 /* Operands in {T0, T1} and env->vfp.scratch. */
3995 gen_op_movl_T0_im(imm);
3996 gen_neon_movl_scratch_T0(0);
3997 gen_op_movl_T0_im((int32_t)imm >> 31);
3998 gen_neon_movl_scratch_T0(1);
3999 NEON_GET_REG(T0, rm, pass * 2);
4000 NEON_GET_REG(T1, rm, pass * 2 + 1);
4001 }
4002
4003 if (gen_neon_shift_im[op][u][size] == NULL)
4004 return 1;
4005 gen_neon_shift_im[op][u][size]();
4006
4007 if (op == 1 || op == 3) {
4008 /* Accumulate. */
4009 if (size == 3) {
4010 gen_neon_movl_scratch_T0(0);
4011 gen_neon_movl_scratch_T1(1);
4012 NEON_GET_REG(T0, rd, pass * 2);
4013 NEON_GET_REG(T1, rd, pass * 2 + 1);
4014 gen_op_neon_addl_u64();
4015 } else {
4016 NEON_GET_REG(T1, rd, pass);
4017 gen_neon_add(size);
99c475ab 4018 }
9ee6e8bb
PB
4019 } else if (op == 4 || (op == 5 && u)) {
4020 /* Insert */
4021 if (size == 3) {
4022 cpu_abort(env, "VS[LR]I.64 not implemented");
4023 }
4024 switch (size) {
4025 case 0:
4026 if (op == 4)
4027 imm = 0xff >> -shift;
4028 else
4029 imm = (uint8_t)(0xff << shift);
4030 imm |= imm << 8;
4031 imm |= imm << 16;
4032 break;
4033 case 1:
4034 if (op == 4)
4035 imm = 0xffff >> -shift;
4036 else
4037 imm = (uint16_t)(0xffff << shift);
4038 imm |= imm << 16;
4039 break;
4040 case 2:
4041 if (op == 4)
4042 imm = 0xffffffffu >> -shift;
4043 else
4044 imm = 0xffffffffu << shift;
4045 break;
4046 default:
4047 abort();
4048 }
4049 NEON_GET_REG(T1, rd, pass);
4050 gen_op_movl_T2_im(imm);
4051 gen_op_neon_bsl();
2c0262af 4052 }
9ee6e8bb
PB
4053 if (size == 3) {
4054 NEON_SET_REG(T0, rd, pass * 2);
4055 NEON_SET_REG(T1, rd, pass * 2 + 1);
4056 } else {
4057 NEON_SET_REG(T0, rd, pass);
4058 }
4059 } /* for pass */
4060 } else if (op < 10) {
4061 /* Shift by immedaiate and narrow:
4062 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
4063 shift = shift - (1 << (size + 3));
4064 size++;
4065 if (size == 3) {
4066 count = q + 1;
2c0262af 4067 } else {
9ee6e8bb
PB
4068 count = q ? 4: 2;
4069 }
4070 switch (size) {
4071 case 1:
4072 imm = (uint16_t) shift;
4073 imm |= imm << 16;
4074 break;
4075 case 2:
4076 case 3:
4077 imm = shift;
4078 break;
4079 default:
4080 abort();
4081 }
4082
4083 /* Processing MSB first means we need to do less shuffling at
4084 the end. */
4085 for (pass = count - 1; pass >= 0; pass--) {
4086 /* Avoid clobbering the second operand before it has been
4087 written. */
4088 n = pass;
4089 if (rd == rm)
4090 n ^= (count - 1);
4091 else
4092 n = pass;
4093
4094 if (size < 3) {
4095 /* Operands in T0 and T1. */
4096 gen_op_movl_T1_im(imm);
4097 NEON_GET_REG(T0, rm, n);
2c0262af 4098 } else {
9ee6e8bb
PB
4099 /* Operands in {T0, T1} and env->vfp.scratch. */
4100 gen_op_movl_T0_im(imm);
4101 gen_neon_movl_scratch_T0(0);
4102 gen_op_movl_T0_im((int32_t)imm >> 31);
4103 gen_neon_movl_scratch_T0(1);
4104 NEON_GET_REG(T0, rm, n * 2);
4105 NEON_GET_REG(T0, rm, n * 2 + 1);
4106 }
3b46e624 4107
9ee6e8bb
PB
4108 gen_neon_shift_im_narrow[q][u][size - 1]();
4109
4110 if (size < 3 && (pass & 1) == 0) {
4111 gen_neon_movl_scratch_T0(0);
4112 } else {
4113 uint32_t offset;
4114
4115 if (size < 3)
4116 gen_neon_movl_T1_scratch(0);
4117
4118 if (op == 8 && !u) {
4119 gen_neon_narrow[size - 1]();
99c475ab 4120 } else {
9ee6e8bb
PB
4121 if (op == 8)
4122 gen_neon_narrow_sats[size - 2]();
4123 else
4124 gen_neon_narrow_satu[size - 1]();
99c475ab 4125 }
9ee6e8bb
PB
4126 if (size == 3)
4127 offset = neon_reg_offset(rd, n);
4128 else
4129 offset = neon_reg_offset(rd, n >> 1);
4130 gen_op_neon_setreg_T0(offset);
4131 }
4132 } /* for pass */
4133 } else if (op == 10) {
4134 /* VSHLL */
4135 if (q)
4136 return 1;
4137 for (pass = 0; pass < 2; pass++) {
4138 /* Avoid clobbering the input operand. */
4139 if (rd == rm)
4140 n = 1 - pass;
4141 else
4142 n = pass;
4143
4144 NEON_GET_REG(T0, rm, n);
4145 GEN_NEON_INTEGER_OP(widen);
4146 if (shift != 0) {
4147 /* The shift is less than the width of the source
4148 type, so in some cases we can just
4149 shift the whole register. */
4150 if (size == 1 || (size == 0 && u)) {
4151 gen_op_shll_T0_im(shift);
4152 gen_op_shll_T1_im(shift);
4153 } else {
4154 switch (size) {
4155 case 0: gen_op_neon_shll_u16(shift); break;
4156 case 2: gen_op_neon_shll_u64(shift); break;
4157 default: abort();
4158 }
4159 }
4160 }
4161 NEON_SET_REG(T0, rd, n * 2);
4162 NEON_SET_REG(T1, rd, n * 2 + 1);
4163 }
4164 } else if (op == 15 || op == 16) {
4165 /* VCVT fixed-point. */
4166 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4167 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4168 if (op & 1) {
4169 if (u)
4170 gen_op_vfp_ultos(shift);
4171 else
4172 gen_op_vfp_sltos(shift);
4173 } else {
4174 if (u)
4175 gen_op_vfp_touls(shift);
4176 else
4177 gen_op_vfp_tosls(shift);
2c0262af 4178 }
9ee6e8bb 4179 gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
2c0262af
FB
4180 }
4181 } else {
9ee6e8bb
PB
4182 return 1;
4183 }
4184 } else { /* (insn & 0x00380080) == 0 */
4185 int invert;
4186
4187 op = (insn >> 8) & 0xf;
4188 /* One register and immediate. */
4189 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
4190 invert = (insn & (1 << 5)) != 0;
4191 switch (op) {
4192 case 0: case 1:
4193 /* no-op */
4194 break;
4195 case 2: case 3:
4196 imm <<= 8;
4197 break;
4198 case 4: case 5:
4199 imm <<= 16;
4200 break;
4201 case 6: case 7:
4202 imm <<= 24;
4203 break;
4204 case 8: case 9:
4205 imm |= imm << 16;
4206 break;
4207 case 10: case 11:
4208 imm = (imm << 8) | (imm << 24);
4209 break;
4210 case 12:
4211 imm = (imm < 8) | 0xff;
4212 break;
4213 case 13:
4214 imm = (imm << 16) | 0xffff;
4215 break;
4216 case 14:
4217 imm |= (imm << 8) | (imm << 16) | (imm << 24);
4218 if (invert)
4219 imm = ~imm;
4220 break;
4221 case 15:
4222 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
4223 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
4224 break;
4225 }
4226 if (invert)
4227 imm = ~imm;
4228
4229 if (op != 14 || !invert)
4230 gen_op_movl_T1_im(imm);
4231
4232 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4233 if (op & 1 && op < 12) {
4234 NEON_GET_REG(T0, rd, pass);
4235 if (invert) {
4236 /* The immediate value has already been inverted, so
4237 BIC becomes AND. */
4238 gen_op_andl_T0_T1();
4239 } else {
4240 gen_op_orl_T0_T1();
4241 }
4242 NEON_SET_REG(T0, rd, pass);
4243 } else {
4244 if (op == 14 && invert) {
4245 uint32_t tmp;
4246 tmp = 0;
4247 for (n = 0; n < 4; n++) {
4248 if (imm & (1 << (n + (pass & 1) * 4)))
4249 tmp |= 0xff << (n * 8);
4250 }
4251 gen_op_movl_T1_im(tmp);
4252 }
4253 /* VMOV, VMVN. */
4254 NEON_SET_REG(T1, rd, pass);
4255 }
4256 }
4257 }
4258 } else { /* (insn & 0x00800010 == 0x00800010) */
4259 if (size != 3) {
4260 op = (insn >> 8) & 0xf;
4261 if ((insn & (1 << 6)) == 0) {
4262 /* Three registers of different lengths. */
4263 int src1_wide;
4264 int src2_wide;
4265 int prewiden;
4266 /* prewiden, src1_wide, src2_wide */
4267 static const int neon_3reg_wide[16][3] = {
4268 {1, 0, 0}, /* VADDL */
4269 {1, 1, 0}, /* VADDW */
4270 {1, 0, 0}, /* VSUBL */
4271 {1, 1, 0}, /* VSUBW */
4272 {0, 1, 1}, /* VADDHN */
4273 {0, 0, 0}, /* VABAL */
4274 {0, 1, 1}, /* VSUBHN */
4275 {0, 0, 0}, /* VABDL */
4276 {0, 0, 0}, /* VMLAL */
4277 {0, 0, 0}, /* VQDMLAL */
4278 {0, 0, 0}, /* VMLSL */
4279 {0, 0, 0}, /* VQDMLSL */
4280 {0, 0, 0}, /* Integer VMULL */
4281 {0, 0, 0}, /* VQDMULL */
4282 {0, 0, 0} /* Polynomial VMULL */
4283 };
4284
4285 prewiden = neon_3reg_wide[op][0];
4286 src1_wide = neon_3reg_wide[op][1];
4287 src2_wide = neon_3reg_wide[op][2];
4288
4289 /* Avoid overlapping operands. Wide source operands are
4290 always aligned so will never overlap with wide
4291 destinations in problematic ways. */
4292 if (rd == rm) {
4293 NEON_GET_REG(T2, rm, 1);
4294 } else if (rd == rn) {
4295 NEON_GET_REG(T2, rn, 1);
4296 }
4297 for (pass = 0; pass < 2; pass++) {
4298 /* Load the second operand into env->vfp.scratch.
4299 Also widen narrow operands. */
4300 if (pass == 1 && rd == rm) {
4301 if (prewiden) {
4302 gen_op_movl_T0_T2();
4303 } else {
4304 gen_op_movl_T1_T2();
4305 }
4306 } else {
4307 if (src2_wide) {
4308 NEON_GET_REG(T0, rm, pass * 2);
4309 NEON_GET_REG(T1, rm, pass * 2 + 1);
4310 } else {
4311 if (prewiden) {
4312 NEON_GET_REG(T0, rm, pass);
4313 } else {
4314 NEON_GET_REG(T1, rm, pass);
4315 }
4316 }
4317 }
4318 if (prewiden && !src2_wide) {
4319 GEN_NEON_INTEGER_OP(widen);
4320 }
4321 if (prewiden || src2_wide) {
4322 gen_neon_movl_scratch_T0(0);
4323 gen_neon_movl_scratch_T1(1);
4324 }
4325
4326 /* Load the first operand. */
4327 if (pass == 1 && rd == rn) {
4328 gen_op_movl_T0_T2();
4329 } else {
4330 if (src1_wide) {
4331 NEON_GET_REG(T0, rn, pass * 2);
4332 NEON_GET_REG(T1, rn, pass * 2 + 1);
4333 } else {
4334 NEON_GET_REG(T0, rn, pass);
4335 }
4336 }
4337 if (prewiden && !src1_wide) {
4338 GEN_NEON_INTEGER_OP(widen);
4339 }
4340 switch (op) {
4341 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
4342 switch (size) {
4343 case 0: gen_op_neon_addl_u16(); break;
4344 case 1: gen_op_neon_addl_u32(); break;
4345 case 2: gen_op_neon_addl_u64(); break;
4346 default: abort();
4347 }
4348 break;
4349 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
4350 switch (size) {
4351 case 0: gen_op_neon_subl_u16(); break;
4352 case 1: gen_op_neon_subl_u32(); break;
4353 case 2: gen_op_neon_subl_u64(); break;
4354 default: abort();
4355 }
4356 break;
4357 case 5: case 7: /* VABAL, VABDL */
4358 switch ((size << 1) | u) {
4359 case 0: gen_op_neon_abdl_s16(); break;
4360 case 1: gen_op_neon_abdl_u16(); break;
4361 case 2: gen_op_neon_abdl_s32(); break;
4362 case 3: gen_op_neon_abdl_u32(); break;
4363 case 4: gen_op_neon_abdl_s64(); break;
4364 case 5: gen_op_neon_abdl_u64(); break;
4365 default: abort();
4366 }
4367 break;
4368 case 8: case 9: case 10: case 11: case 12: case 13:
4369 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
4370 switch ((size << 1) | u) {
4371 case 0: gen_op_neon_mull_s8(); break;
4372 case 1: gen_op_neon_mull_u8(); break;
4373 case 2: gen_op_neon_mull_s16(); break;
4374 case 3: gen_op_neon_mull_u16(); break;
4375 case 4: gen_op_imull_T0_T1(); break;
4376 case 5: gen_op_mull_T0_T1(); break;
4377 default: abort();
4378 }
4379 break;
4380 case 14: /* Polynomial VMULL */
4381 cpu_abort(env, "Polynomial VMULL not implemented");
4382
4383 default: /* 15 is RESERVED. */
4384 return 1;
4385 }
4386 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
4387 /* Accumulate. */
4388 if (op == 10 || op == 11) {
4389 switch (size) {
4390 case 0: gen_op_neon_negl_u16(); break;
4391 case 1: gen_op_neon_negl_u32(); break;
4392 case 2: gen_op_neon_negl_u64(); break;
4393 default: abort();
4394 }
4395 }
4396
4397 gen_neon_movl_scratch_T0(0);
4398 gen_neon_movl_scratch_T1(1);
4399
4400 if (op != 13) {
4401 NEON_GET_REG(T0, rd, pass * 2);
4402 NEON_GET_REG(T1, rd, pass * 2 + 1);
4403 }
4404
4405 switch (op) {
4406 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
4407 switch (size) {
4408 case 0: gen_op_neon_addl_u16(); break;
4409 case 1: gen_op_neon_addl_u32(); break;
4410 case 2: gen_op_neon_addl_u64(); break;
4411 default: abort();
4412 }
4413 break;
4414 case 9: case 11: /* VQDMLAL, VQDMLSL */
4415 switch (size) {
4416 case 1: gen_op_neon_addl_saturate_s32(); break;
4417 case 2: gen_op_neon_addl_saturate_s64(); break;
4418 default: abort();
4419 }
4420 /* Fall through. */
4421 case 13: /* VQDMULL */
4422 switch (size) {
4423 case 1: gen_op_neon_addl_saturate_s32(); break;
4424 case 2: gen_op_neon_addl_saturate_s64(); break;
4425 default: abort();
4426 }
4427 break;
4428 default:
4429 abort();
4430 }
4431 NEON_SET_REG(T0, rd, pass * 2);
4432 NEON_SET_REG(T1, rd, pass * 2 + 1);
4433 } else if (op == 4 || op == 6) {
4434 /* Narrowing operation. */
4435 if (u) {
4436 switch (size) {
4437 case 0: gen_op_neon_narrow_high_u8(); break;
4438 case 1: gen_op_neon_narrow_high_u16(); break;
4439 case 2: gen_op_movl_T0_T1(); break;
4440 default: abort();
4441 }
4442 } else {
4443 switch (size) {
4444 case 0: gen_op_neon_narrow_high_round_u8(); break;
4445 case 1: gen_op_neon_narrow_high_round_u16(); break;
4446 case 2: gen_op_neon_narrow_high_round_u32(); break;
4447 default: abort();
4448 }
4449 }
4450 NEON_SET_REG(T0, rd, pass);
4451 } else {
4452 /* Write back the result. */
4453 NEON_SET_REG(T0, rd, pass * 2);
4454 NEON_SET_REG(T1, rd, pass * 2 + 1);
4455 }
4456 }
4457 } else {
4458 /* Two registers and a scalar. */
4459 switch (op) {
4460 case 0: /* Integer VMLA scalar */
4461 case 1: /* Float VMLA scalar */
4462 case 4: /* Integer VMLS scalar */
4463 case 5: /* Floating point VMLS scalar */
4464 case 8: /* Integer VMUL scalar */
4465 case 9: /* Floating point VMUL scalar */
4466 case 12: /* VQDMULH scalar */
4467 case 13: /* VQRDMULH scalar */
4468 gen_neon_get_scalar(size, rm);
4469 gen_op_movl_T2_T0();
4470 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4471 if (pass != 0)
4472 gen_op_movl_T0_T2();
4473 NEON_GET_REG(T1, rn, pass);
4474 if (op == 12) {
4475 if (size == 1) {
4476 gen_op_neon_qdmulh_s16();
4477 } else {
4478 gen_op_neon_qdmulh_s32();
4479 }
4480 } else if (op == 13) {
4481 if (size == 1) {
4482 gen_op_neon_qrdmulh_s16();
4483 } else {
4484 gen_op_neon_qrdmulh_s32();
4485 }
4486 } else if (op & 1) {
4487 gen_op_neon_mul_f32();
4488 } else {
4489 switch (size) {
4490 case 0: gen_op_neon_mul_u8(); break;
4491 case 1: gen_op_neon_mul_u16(); break;
4492 case 2: gen_op_mul_T0_T1(); break;
4493 default: return 1;
4494 }
4495 }
4496 if (op < 8) {
4497 /* Accumulate. */
4498 NEON_GET_REG(T1, rd, pass);
4499 switch (op) {
4500 case 0:
4501 gen_neon_add(size);
4502 break;
4503 case 1:
4504 gen_op_neon_add_f32();
4505 break;
4506 case 4:
4507 switch (size) {
4508 case 0: gen_op_neon_rsb_u8(); break;
4509 case 1: gen_op_neon_rsb_u16(); break;
4510 case 2: gen_op_rsbl_T0_T1(); break;
4511 default: return 1;
4512 }
4513 break;
4514 case 5:
4515 gen_op_neon_rsb_f32();
4516 break;
4517 default:
4518 abort();
4519 }
4520 }
4521 NEON_SET_REG(T0, rd, pass);
4522 }
4523 break;
4524 case 2: /* VMLAL sclar */
4525 case 3: /* VQDMLAL scalar */
4526 case 6: /* VMLSL scalar */
4527 case 7: /* VQDMLSL scalar */
4528 case 10: /* VMULL scalar */
4529 case 11: /* VQDMULL scalar */
4530 if (rd == rn) {
4531 /* Save overlapping operands before they are
4532 clobbered. */
4533 NEON_GET_REG(T0, rn, 1);
4534 gen_neon_movl_scratch_T0(2);
4535 }
4536 gen_neon_get_scalar(size, rm);
4537 gen_op_movl_T2_T0();
4538 for (pass = 0; pass < 2; pass++) {
4539 if (pass != 0) {
4540 gen_op_movl_T0_T2();
4541 }
4542 if (pass != 0 && rd == rn) {
4543 gen_neon_movl_T1_scratch(2);
4544 } else {
4545 NEON_GET_REG(T1, rn, pass);
4546 }
4547 switch ((size << 1) | u) {
4548 case 0: gen_op_neon_mull_s8(); break;
4549 case 1: gen_op_neon_mull_u8(); break;
4550 case 2: gen_op_neon_mull_s16(); break;
4551 case 3: gen_op_neon_mull_u16(); break;
4552 case 4: gen_op_imull_T0_T1(); break;
4553 case 5: gen_op_mull_T0_T1(); break;
4554 default: abort();
4555 }
4556 if (op == 6 || op == 7) {
4557 switch (size) {
4558 case 0: gen_op_neon_negl_u16(); break;
4559 case 1: gen_op_neon_negl_u32(); break;
4560 case 2: gen_op_neon_negl_u64(); break;
4561 default: abort();
4562 }
4563 }
4564 gen_neon_movl_scratch_T0(0);
4565 gen_neon_movl_scratch_T1(1);
4566 NEON_GET_REG(T0, rd, pass * 2);
4567 NEON_GET_REG(T1, rd, pass * 2 + 1);
4568 switch (op) {
4569 case 2: case 6:
4570 switch (size) {
4571 case 0: gen_op_neon_addl_u16(); break;
4572 case 1: gen_op_neon_addl_u32(); break;
4573 case 2: gen_op_neon_addl_u64(); break;
4574 default: abort();
4575 }
4576 break;
4577 case 3: case 7:
4578 switch (size) {
4579 case 1:
4580 gen_op_neon_addl_saturate_s32();
4581 gen_op_neon_addl_saturate_s32();
4582 break;
4583 case 2:
4584 gen_op_neon_addl_saturate_s64();
4585 gen_op_neon_addl_saturate_s64();
4586 break;
4587 default: abort();
4588 }
4589 break;
4590 case 10:
4591 /* no-op */
4592 break;
4593 case 11:
4594 switch (size) {
4595 case 1: gen_op_neon_addl_saturate_s32(); break;
4596 case 2: gen_op_neon_addl_saturate_s64(); break;
4597 default: abort();
4598 }
4599 break;
4600 default:
4601 abort();
4602 }
4603 NEON_SET_REG(T0, rd, pass * 2);
4604 NEON_SET_REG(T1, rd, pass * 2 + 1);
4605 }
4606 break;
4607 default: /* 14 and 15 are RESERVED */
4608 return 1;
4609 }
4610 }
4611 } else { /* size == 3 */
4612 if (!u) {
4613 /* Extract. */
4614 int reg;
4615 imm = (insn >> 8) & 0xf;
4616 reg = rn;
4617 count = q ? 4 : 2;
4618 n = imm >> 2;
4619 NEON_GET_REG(T0, reg, n);
4620 for (pass = 0; pass < count; pass++) {
4621 n++;
4622 if (n > count) {
4623 reg = rm;
4624 n -= count;
4625 }
4626 if (imm & 3) {
4627 NEON_GET_REG(T1, reg, n);
4628 gen_op_neon_extract((insn << 3) & 0x1f);
4629 }
4630 /* ??? This is broken if rd and rm overlap */
4631 NEON_SET_REG(T0, rd, pass);
4632 if (imm & 3) {
4633 gen_op_movl_T0_T1();
4634 } else {
4635 NEON_GET_REG(T0, reg, n);
4636 }
4637 }
4638 } else if ((insn & (1 << 11)) == 0) {
4639 /* Two register misc. */
4640 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4641 size = (insn >> 18) & 3;
4642 switch (op) {
4643 case 0: /* VREV64 */
4644 if (size == 3)
4645 return 1;
4646 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4647 NEON_GET_REG(T0, rm, pass * 2);
4648 NEON_GET_REG(T1, rm, pass * 2 + 1);
4649 switch (size) {
4650 case 0: gen_op_rev_T0(); break;
8f01245e 4651 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
4652 case 2: /* no-op */ break;
4653 default: abort();
4654 }
4655 NEON_SET_REG(T0, rd, pass * 2 + 1);
4656 if (size == 2) {
4657 NEON_SET_REG(T1, rd, pass * 2);
4658 } else {
4659 gen_op_movl_T0_T1();
4660 switch (size) {
4661 case 0: gen_op_rev_T0(); break;
8f01245e 4662 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
4663 default: abort();
4664 }
4665 NEON_SET_REG(T0, rd, pass * 2);
4666 }
4667 }
4668 break;
4669 case 4: case 5: /* VPADDL */
4670 case 12: case 13: /* VPADAL */
4671 if (size < 2)
4672 goto elementwise;
4673 if (size == 3)
4674 return 1;
4675 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4676 NEON_GET_REG(T0, rm, pass * 2);
4677 NEON_GET_REG(T1, rm, pass * 2 + 1);
4678 if (op & 1)
4679 gen_op_neon_paddl_u32();
4680 else
4681 gen_op_neon_paddl_s32();
4682 if (op >= 12) {
4683 /* Accumulate. */
4684 gen_neon_movl_scratch_T0(0);
4685 gen_neon_movl_scratch_T1(1);
4686
4687 NEON_GET_REG(T0, rd, pass * 2);
4688 NEON_GET_REG(T1, rd, pass * 2 + 1);
4689 gen_op_neon_addl_u64();
4690 }
4691 NEON_SET_REG(T0, rd, pass * 2);
4692 NEON_SET_REG(T1, rd, pass * 2 + 1);
4693 }
4694 break;
4695 case 33: /* VTRN */
4696 if (size == 2) {
4697 for (n = 0; n < (q ? 4 : 2); n += 2) {
4698 NEON_GET_REG(T0, rm, n);
4699 NEON_GET_REG(T1, rd, n + 1);
4700 NEON_SET_REG(T1, rm, n);
4701 NEON_SET_REG(T0, rd, n + 1);
4702 }
4703 } else {
4704 goto elementwise;
4705 }
4706 break;
4707 case 34: /* VUZP */
4708 /* Reg Before After
4709 Rd A3 A2 A1 A0 B2 B0 A2 A0
4710 Rm B3 B2 B1 B0 B3 B1 A3 A1
4711 */
4712 if (size == 3)
4713 return 1;
4714 gen_neon_unzip(rd, q, 0, size);
4715 gen_neon_unzip(rm, q, 4, size);
4716 if (q) {
4717 static int unzip_order_q[8] =
4718 {0, 2, 4, 6, 1, 3, 5, 7};
4719 for (n = 0; n < 8; n++) {
4720 int reg = (n < 4) ? rd : rm;
4721 gen_neon_movl_T0_scratch(unzip_order_q[n]);
4722 NEON_SET_REG(T0, reg, n % 4);
4723 }
4724 } else {
4725 static int unzip_order[4] =
4726 {0, 4, 1, 5};
4727 for (n = 0; n < 4; n++) {
4728 int reg = (n < 2) ? rd : rm;
4729 gen_neon_movl_T0_scratch(unzip_order[n]);
4730 NEON_SET_REG(T0, reg, n % 2);
4731 }
4732 }
4733 break;
4734 case 35: /* VZIP */
4735 /* Reg Before After
4736 Rd A3 A2 A1 A0 B1 A1 B0 A0
4737 Rm B3 B2 B1 B0 B3 A3 B2 A2
4738 */
4739 if (size == 3)
4740 return 1;
4741 count = (q ? 4 : 2);
4742 for (n = 0; n < count; n++) {
4743 NEON_GET_REG(T0, rd, n);
4744 NEON_GET_REG(T1, rd, n);
4745 switch (size) {
4746 case 0: gen_op_neon_zip_u8(); break;
4747 case 1: gen_op_neon_zip_u16(); break;
4748 case 2: /* no-op */; break;
4749 default: abort();
4750 }
4751 gen_neon_movl_scratch_T0(n * 2);
4752 gen_neon_movl_scratch_T1(n * 2 + 1);
4753 }
4754 for (n = 0; n < count * 2; n++) {
4755 int reg = (n < count) ? rd : rm;
4756 gen_neon_movl_T0_scratch(n);
4757 NEON_SET_REG(T0, reg, n % count);
4758 }
4759 break;
4760 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4761 for (pass = 0; pass < 2; pass++) {
4762 if (rd == rm + 1) {
4763 n = 1 - pass;
4764 } else {
4765 n = pass;
4766 }
4767 NEON_GET_REG(T0, rm, n * 2);
4768 NEON_GET_REG(T1, rm, n * 2 + 1);
4769 if (op == 36 && q == 0) {
4770 switch (size) {
4771 case 0: gen_op_neon_narrow_u8(); break;
4772 case 1: gen_op_neon_narrow_u16(); break;
4773 case 2: /* no-op */ break;
4774 default: return 1;
4775 }
4776 } else if (q) {
4777 switch (size) {
4778 case 0: gen_op_neon_narrow_sat_u8(); break;
4779 case 1: gen_op_neon_narrow_sat_u16(); break;
4780 case 2: gen_op_neon_narrow_sat_u32(); break;
4781 default: return 1;
4782 }
4783 } else {
4784 switch (size) {
4785 case 0: gen_op_neon_narrow_sat_s8(); break;
4786 case 1: gen_op_neon_narrow_sat_s16(); break;
4787 case 2: gen_op_neon_narrow_sat_s32(); break;
4788 default: return 1;
4789 }
4790 }
4791 NEON_SET_REG(T0, rd, n);
4792 }
4793 break;
4794 case 38: /* VSHLL */
4795 if (q)
4796 return 1;
4797 if (rm == rd) {
4798 NEON_GET_REG(T2, rm, 1);
4799 }
4800 for (pass = 0; pass < 2; pass++) {
4801 if (pass == 1 && rm == rd) {
4802 gen_op_movl_T0_T2();
4803 } else {
4804 NEON_GET_REG(T0, rm, pass);
4805 }
4806 switch (size) {
4807 case 0: gen_op_neon_widen_high_u8(); break;
4808 case 1: gen_op_neon_widen_high_u16(); break;
4809 case 2:
4810 gen_op_movl_T1_T0();
4811 gen_op_movl_T0_im(0);
4812 break;
4813 default: return 1;
4814 }
4815 NEON_SET_REG(T0, rd, pass * 2);
4816 NEON_SET_REG(T1, rd, pass * 2 + 1);
4817 }
4818 break;
4819 default:
4820 elementwise:
4821 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4822 if (op == 30 || op == 31 || op >= 58) {
4823 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4824 } else {
4825 NEON_GET_REG(T0, rm, pass);
4826 }
4827 switch (op) {
4828 case 1: /* VREV32 */
4829 switch (size) {
4830 case 0: gen_op_rev_T0(); break;
8f01245e 4831 case 1: gen_swap_half(cpu_T[0]); break;
9ee6e8bb
PB
4832 default: return 1;
4833 }
4834 break;
4835 case 2: /* VREV16 */
4836 if (size != 0)
4837 return 1;
3670669c 4838 gen_rev16(cpu_T[0]);
9ee6e8bb
PB
4839 break;
4840 case 4: case 5: /* VPADDL */
4841 case 12: case 13: /* VPADAL */
4842 switch ((size << 1) | (op & 1)) {
4843 case 0: gen_op_neon_paddl_s8(); break;
4844 case 1: gen_op_neon_paddl_u8(); break;
4845 case 2: gen_op_neon_paddl_s16(); break;
4846 case 3: gen_op_neon_paddl_u16(); break;
4847 default: abort();
4848 }
4849 if (op >= 12) {
4850 /* Accumulate */
4851 NEON_GET_REG(T1, rd, pass);
4852 switch (size) {
4853 case 0: gen_op_neon_add_u16(); break;
4854 case 1: gen_op_addl_T0_T1(); break;
4855 default: abort();
4856 }
4857 }
4858 break;
4859 case 8: /* CLS */
4860 switch (size) {
4861 case 0: gen_op_neon_cls_s8(); break;
4862 case 1: gen_op_neon_cls_s16(); break;
4863 case 2: gen_op_neon_cls_s32(); break;
4864 default: return 1;
4865 }
4866 break;
4867 case 9: /* CLZ */
4868 switch (size) {
4869 case 0: gen_op_neon_clz_u8(); break;
4870 case 1: gen_op_neon_clz_u16(); break;
1497c961 4871 case 2: gen_helper_clz(cpu_T[0], cpu_T[0]); break;
9ee6e8bb
PB
4872 default: return 1;
4873 }
4874 break;
4875 case 10: /* CNT */
4876 if (size != 0)
4877 return 1;
4878 gen_op_neon_cnt_u8();
4879 break;
4880 case 11: /* VNOT */
4881 if (size != 0)
4882 return 1;
4883 gen_op_notl_T0();
4884 break;
4885 case 14: /* VQABS */
4886 switch (size) {
4887 case 0: gen_op_neon_qabs_s8(); break;
4888 case 1: gen_op_neon_qabs_s16(); break;
4889 case 2: gen_op_neon_qabs_s32(); break;
4890 default: return 1;
4891 }
4892 break;
4893 case 15: /* VQNEG */
4894 switch (size) {
4895 case 0: gen_op_neon_qneg_s8(); break;
4896 case 1: gen_op_neon_qneg_s16(); break;
4897 case 2: gen_op_neon_qneg_s32(); break;
4898 default: return 1;
4899 }
4900 break;
4901 case 16: case 19: /* VCGT #0, VCLE #0 */
4902 gen_op_movl_T1_im(0);
4903 switch(size) {
4904 case 0: gen_op_neon_cgt_s8(); break;
4905 case 1: gen_op_neon_cgt_s16(); break;
4906 case 2: gen_op_neon_cgt_s32(); break;
4907 default: return 1;
4908 }
4909 if (op == 19)
4910 gen_op_notl_T0();
4911 break;
4912 case 17: case 20: /* VCGE #0, VCLT #0 */
4913 gen_op_movl_T1_im(0);
4914 switch(size) {
4915 case 0: gen_op_neon_cge_s8(); break;
4916 case 1: gen_op_neon_cge_s16(); break;
4917 case 2: gen_op_neon_cge_s32(); break;
4918 default: return 1;
4919 }
4920 if (op == 20)
4921 gen_op_notl_T0();
4922 break;
4923 case 18: /* VCEQ #0 */
4924 gen_op_movl_T1_im(0);
4925 switch(size) {
4926 case 0: gen_op_neon_ceq_u8(); break;
4927 case 1: gen_op_neon_ceq_u16(); break;
4928 case 2: gen_op_neon_ceq_u32(); break;
4929 default: return 1;
4930 }
4931 break;
4932 case 22: /* VABS */
4933 switch(size) {
4934 case 0: gen_op_neon_abs_s8(); break;
4935 case 1: gen_op_neon_abs_s16(); break;
4936 case 2: gen_op_neon_abs_s32(); break;
4937 default: return 1;
4938 }
4939 break;
4940 case 23: /* VNEG */
4941 gen_op_movl_T1_im(0);
4942 switch(size) {
4943 case 0: gen_op_neon_rsb_u8(); break;
4944 case 1: gen_op_neon_rsb_u16(); break;
4945 case 2: gen_op_rsbl_T0_T1(); break;
4946 default: return 1;
4947 }
4948 break;
4949 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4950 gen_op_movl_T1_im(0);
4951 gen_op_neon_cgt_f32();
4952 if (op == 27)
4953 gen_op_notl_T0();
4954 break;
4955 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4956 gen_op_movl_T1_im(0);
4957 gen_op_neon_cge_f32();
4958 if (op == 28)
4959 gen_op_notl_T0();
4960 break;
4961 case 26: /* Float VCEQ #0 */
4962 gen_op_movl_T1_im(0);
4963 gen_op_neon_ceq_f32();
4964 break;
4965 case 30: /* Float VABS */
4966 gen_op_vfp_abss();
4967 break;
4968 case 31: /* Float VNEG */
4969 gen_op_vfp_negs();
4970 break;
4971 case 32: /* VSWP */
4972 NEON_GET_REG(T1, rd, pass);
4973 NEON_SET_REG(T1, rm, pass);
4974 break;
4975 case 33: /* VTRN */
4976 NEON_GET_REG(T1, rd, pass);
4977 switch (size) {
4978 case 0: gen_op_neon_trn_u8(); break;
4979 case 1: gen_op_neon_trn_u16(); break;
4980 case 2: abort();
4981 default: return 1;
4982 }
4983 NEON_SET_REG(T1, rm, pass);
4984 break;
4985 case 56: /* Integer VRECPE */
4986 gen_op_neon_recpe_u32();
4987 break;
4988 case 57: /* Integer VRSQRTE */
4989 gen_op_neon_rsqrte_u32();
4990 break;
4991 case 58: /* Float VRECPE */
4992 gen_op_neon_recpe_f32();
4993 break;
4994 case 59: /* Float VRSQRTE */
4995 gen_op_neon_rsqrte_f32();
4996 break;
4997 case 60: /* VCVT.F32.S32 */
4998 gen_op_vfp_tosizs();
4999 break;
5000 case 61: /* VCVT.F32.U32 */
5001 gen_op_vfp_touizs();
5002 break;
5003 case 62: /* VCVT.S32.F32 */
5004 gen_op_vfp_sitos();
5005 break;
5006 case 63: /* VCVT.U32.F32 */
5007 gen_op_vfp_uitos();
5008 break;
5009 default:
5010 /* Reserved: 21, 29, 39-56 */
5011 return 1;
5012 }
5013 if (op == 30 || op == 31 || op >= 58) {
5014 gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
5015 } else {
5016 NEON_SET_REG(T0, rd, pass);
5017 }
5018 }
5019 break;
5020 }
5021 } else if ((insn & (1 << 10)) == 0) {
5022 /* VTBL, VTBX. */
5023 n = (insn >> 5) & 0x18;
5024 NEON_GET_REG(T1, rm, 0);
5025 if (insn & (1 << 6)) {
5026 NEON_GET_REG(T0, rd, 0);
5027 } else {
5028 gen_op_movl_T0_im(0);
5029 }
5030 gen_op_neon_tbl(rn, n);
5031 gen_op_movl_T2_T0();
5032 NEON_GET_REG(T1, rm, 1);
5033 if (insn & (1 << 6)) {
5034 NEON_GET_REG(T0, rd, 0);
5035 } else {
5036 gen_op_movl_T0_im(0);
5037 }
5038 gen_op_neon_tbl(rn, n);
5039 NEON_SET_REG(T2, rd, 0);
5040 NEON_SET_REG(T0, rd, 1);
5041 } else if ((insn & 0x380) == 0) {
5042 /* VDUP */
5043 if (insn & (1 << 19)) {
5044 NEON_SET_REG(T0, rm, 1);
5045 } else {
5046 NEON_SET_REG(T0, rm, 0);
5047 }
5048 if (insn & (1 << 16)) {
5049 gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
5050 } else if (insn & (1 << 17)) {
5051 if ((insn >> 18) & 1)
5052 gen_op_neon_dup_high16();
5053 else
5054 gen_op_neon_dup_low16();
5055 }
5056 for (pass = 0; pass < (q ? 4 : 2); pass++) {
5057 NEON_SET_REG(T0, rd, pass);
5058 }
5059 } else {
5060 return 1;
5061 }
5062 }
5063 }
5064 return 0;
5065}
5066
5067static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
5068{
5069 int cpnum;
5070
5071 cpnum = (insn >> 8) & 0xf;
5072 if (arm_feature(env, ARM_FEATURE_XSCALE)
5073 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
5074 return 1;
5075
5076 switch (cpnum) {
5077 case 0:
5078 case 1:
5079 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5080 return disas_iwmmxt_insn(env, s, insn);
5081 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
5082 return disas_dsp_insn(env, s, insn);
5083 }
5084 return 1;
5085 case 10:
5086 case 11:
5087 return disas_vfp_insn (env, s, insn);
5088 case 15:
5089 return disas_cp15_insn (env, s, insn);
5090 default:
5091 /* Unknown coprocessor. See if the board has hooked it. */
5092 return disas_cp_insn (env, s, insn);
5093 }
5094}
5095
5096static void disas_arm_insn(CPUState * env, DisasContext *s)
5097{
5098 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
b26eefb6 5099 TCGv tmp;
3670669c 5100 TCGv tmp2;
6ddbc6e4 5101 TCGv tmp3;
9ee6e8bb
PB
5102
5103 insn = ldl_code(s->pc);
5104 s->pc += 4;
5105
5106 /* M variants do not implement ARM mode. */
5107 if (IS_M(env))
5108 goto illegal_op;
5109 cond = insn >> 28;
5110 if (cond == 0xf){
5111 /* Unconditional instructions. */
5112 if (((insn >> 25) & 7) == 1) {
5113 /* NEON Data processing. */
5114 if (!arm_feature(env, ARM_FEATURE_NEON))
5115 goto illegal_op;
5116
5117 if (disas_neon_data_insn(env, s, insn))
5118 goto illegal_op;
5119 return;
5120 }
5121 if ((insn & 0x0f100000) == 0x04000000) {
5122 /* NEON load/store. */
5123 if (!arm_feature(env, ARM_FEATURE_NEON))
5124 goto illegal_op;
5125
5126 if (disas_neon_ls_insn(env, s, insn))
5127 goto illegal_op;
5128 return;
5129 }
5130 if ((insn & 0x0d70f000) == 0x0550f000)
5131 return; /* PLD */
5132 else if ((insn & 0x0ffffdff) == 0x01010000) {
5133 ARCH(6);
5134 /* setend */
5135 if (insn & (1 << 9)) {
5136 /* BE8 mode not implemented. */
5137 goto illegal_op;
5138 }
5139 return;
5140 } else if ((insn & 0x0fffff00) == 0x057ff000) {
5141 switch ((insn >> 4) & 0xf) {
5142 case 1: /* clrex */
5143 ARCH(6K);
5144 gen_op_clrex();
5145 return;
5146 case 4: /* dsb */
5147 case 5: /* dmb */
5148 case 6: /* isb */
5149 ARCH(7);
5150 /* We don't emulate caches so these are a no-op. */
5151 return;
5152 default:
5153 goto illegal_op;
5154 }
5155 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
5156 /* srs */
5157 uint32_t offset;
5158 if (IS_USER(s))
5159 goto illegal_op;
5160 ARCH(6);
5161 op1 = (insn & 0x1f);
5162 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5163 gen_movl_T1_reg(s, 13);
5164 } else {
5165 gen_op_movl_T1_r13_banked(op1);
5166 }
5167 i = (insn >> 23) & 3;
5168 switch (i) {
5169 case 0: offset = -4; break; /* DA */
5170 case 1: offset = -8; break; /* DB */
5171 case 2: offset = 0; break; /* IA */
5172 case 3: offset = 4; break; /* IB */
5173 default: abort();
5174 }
5175 if (offset)
5176 gen_op_addl_T1_im(offset);
5177 gen_movl_T0_reg(s, 14);
5178 gen_ldst(stl, s);
d9ba4830 5179 gen_helper_cpsr_read(cpu_T[0]);
9ee6e8bb
PB
5180 gen_op_addl_T1_im(4);
5181 gen_ldst(stl, s);
5182 if (insn & (1 << 21)) {
5183 /* Base writeback. */
5184 switch (i) {
5185 case 0: offset = -8; break;
5186 case 1: offset = -4; break;
5187 case 2: offset = 4; break;
5188 case 3: offset = 0; break;
5189 default: abort();
5190 }
5191 if (offset)
5192 gen_op_addl_T1_im(offset);
5193 if (op1 == (env->uncached_cpsr & CPSR_M)) {
5194 gen_movl_reg_T1(s, 13);
5195 } else {
5196 gen_op_movl_r13_T1_banked(op1);
5197 }
5198 }
5199 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
5200 /* rfe */
5201 uint32_t offset;
5202 if (IS_USER(s))
5203 goto illegal_op;
5204 ARCH(6);
5205 rn = (insn >> 16) & 0xf;
5206 gen_movl_T1_reg(s, rn);
5207 i = (insn >> 23) & 3;
5208 switch (i) {
5209 case 0: offset = 0; break; /* DA */
5210 case 1: offset = -4; break; /* DB */
5211 case 2: offset = 4; break; /* IA */
5212 case 3: offset = 8; break; /* IB */
5213 default: abort();
5214 }
5215 if (offset)
5216 gen_op_addl_T1_im(offset);
5217 /* Load CPSR into T2 and PC into T0. */
5218 gen_ldst(ldl, s);
5219 gen_op_movl_T2_T0();
5220 gen_op_addl_T1_im(-4);
5221 gen_ldst(ldl, s);
5222 if (insn & (1 << 21)) {
5223 /* Base writeback. */
5224 switch (i) {
5225 case 0: offset = -4; break;
5226 case 1: offset = 0; break;
5227 case 2: offset = 8; break;
5228 case 3: offset = 4; break;
5229 default: abort();
5230 }
5231 if (offset)
5232 gen_op_addl_T1_im(offset);
5233 gen_movl_reg_T1(s, rn);
5234 }
5235 gen_rfe(s);
5236 } else if ((insn & 0x0e000000) == 0x0a000000) {
5237 /* branch link and change to thumb (blx <offset>) */
5238 int32_t offset;
5239
5240 val = (uint32_t)s->pc;
d9ba4830
PB
5241 tmp = new_tmp();
5242 tcg_gen_movi_i32(tmp, val);
5243 store_reg(s, 14, tmp);
9ee6e8bb
PB
5244 /* Sign-extend the 24-bit offset */
5245 offset = (((int32_t)insn) << 8) >> 8;
5246 /* offset * 4 + bit24 * 2 + (thumb bit) */
5247 val += (offset << 2) | ((insn >> 23) & 2) | 1;
5248 /* pipeline offset */
5249 val += 4;
d9ba4830 5250 gen_bx_im(s, val);
9ee6e8bb
PB
5251 return;
5252 } else if ((insn & 0x0e000f00) == 0x0c000100) {
5253 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
5254 /* iWMMXt register transfer. */
5255 if (env->cp15.c15_cpar & (1 << 1))
5256 if (!disas_iwmmxt_insn(env, s, insn))
5257 return;
5258 }
5259 } else if ((insn & 0x0fe00000) == 0x0c400000) {
5260 /* Coprocessor double register transfer. */
5261 } else if ((insn & 0x0f000010) == 0x0e000010) {
5262 /* Additional coprocessor register transfer. */
5263 } else if ((insn & 0x0ff10010) == 0x01000000) {
5264 uint32_t mask;
5265 uint32_t val;
5266 /* cps (privileged) */
5267 if (IS_USER(s))
5268 return;
5269 mask = val = 0;
5270 if (insn & (1 << 19)) {
5271 if (insn & (1 << 8))
5272 mask |= CPSR_A;
5273 if (insn & (1 << 7))
5274 mask |= CPSR_I;
5275 if (insn & (1 << 6))
5276 mask |= CPSR_F;
5277 if (insn & (1 << 18))
5278 val |= mask;
5279 }
5280 if (insn & (1 << 14)) {
5281 mask |= CPSR_M;
5282 val |= (insn & 0x1f);
5283 }
5284 if (mask) {
5285 gen_op_movl_T0_im(val);
5286 gen_set_psr_T0(s, mask, 0);
5287 }
5288 return;
5289 }
5290 goto illegal_op;
5291 }
5292 if (cond != 0xe) {
5293 /* if not always execute, we generate a conditional jump to
5294 next instruction */
5295 s->condlabel = gen_new_label();
d9ba4830 5296 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
5297 s->condjmp = 1;
5298 }
5299 if ((insn & 0x0f900000) == 0x03000000) {
5300 if ((insn & (1 << 21)) == 0) {
5301 ARCH(6T2);
5302 rd = (insn >> 12) & 0xf;
5303 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
5304 if ((insn & (1 << 22)) == 0) {
5305 /* MOVW */
5306 gen_op_movl_T0_im(val);
5307 } else {
5308 /* MOVT */
5309 gen_movl_T0_reg(s, rd);
5310 gen_op_movl_T1_im(0xffff);
5311 gen_op_andl_T0_T1();
5312 gen_op_movl_T1_im(val << 16);
5313 gen_op_orl_T0_T1();
5314 }
5315 gen_movl_reg_T0(s, rd);
5316 } else {
5317 if (((insn >> 12) & 0xf) != 0xf)
5318 goto illegal_op;
5319 if (((insn >> 16) & 0xf) == 0) {
5320 gen_nop_hint(s, insn & 0xff);
5321 } else {
5322 /* CPSR = immediate */
5323 val = insn & 0xff;
5324 shift = ((insn >> 8) & 0xf) * 2;
5325 if (shift)
5326 val = (val >> shift) | (val << (32 - shift));
5327 gen_op_movl_T0_im(val);
5328 i = ((insn & (1 << 22)) != 0);
5329 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5330 goto illegal_op;
5331 }
5332 }
5333 } else if ((insn & 0x0f900000) == 0x01000000
5334 && (insn & 0x00000090) != 0x00000090) {
5335 /* miscellaneous instructions */
5336 op1 = (insn >> 21) & 3;
5337 sh = (insn >> 4) & 0xf;
5338 rm = insn & 0xf;
5339 switch (sh) {
5340 case 0x0: /* move program status register */
5341 if (op1 & 1) {
5342 /* PSR = reg */
5343 gen_movl_T0_reg(s, rm);
5344 i = ((op1 & 2) != 0);
5345 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
5346 goto illegal_op;
5347 } else {
5348 /* reg = PSR */
5349 rd = (insn >> 12) & 0xf;
5350 if (op1 & 2) {
5351 if (IS_USER(s))
5352 goto illegal_op;
d9ba4830 5353 tmp = load_cpu_field(spsr);
9ee6e8bb 5354 } else {
d9ba4830
PB
5355 tmp = new_tmp();
5356 gen_helper_cpsr_read(tmp);
9ee6e8bb 5357 }
d9ba4830 5358 store_reg(s, rd, tmp);
9ee6e8bb
PB
5359 }
5360 break;
5361 case 0x1:
5362 if (op1 == 1) {
5363 /* branch/exchange thumb (bx). */
d9ba4830
PB
5364 tmp = load_reg(s, rm);
5365 gen_bx(s, tmp);
9ee6e8bb
PB
5366 } else if (op1 == 3) {
5367 /* clz */
5368 rd = (insn >> 12) & 0xf;
1497c961
PB
5369 tmp = load_reg(s, rm);
5370 gen_helper_clz(tmp, tmp);
5371 store_reg(s, rd, tmp);
9ee6e8bb
PB
5372 } else {
5373 goto illegal_op;
5374 }
5375 break;
5376 case 0x2:
5377 if (op1 == 1) {
5378 ARCH(5J); /* bxj */
5379 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
5380 tmp = load_reg(s, rm);
5381 gen_bx(s, tmp);
9ee6e8bb
PB
5382 } else {
5383 goto illegal_op;
5384 }
5385 break;
5386 case 0x3:
5387 if (op1 != 1)
5388 goto illegal_op;
5389
5390 /* branch link/exchange thumb (blx) */
d9ba4830
PB
5391 tmp = load_reg(s, rm);
5392 tmp2 = new_tmp();
5393 tcg_gen_movi_i32(tmp2, s->pc);
5394 store_reg(s, 14, tmp2);
5395 gen_bx(s, tmp);
9ee6e8bb
PB
5396 break;
5397 case 0x5: /* saturating add/subtract */
5398 rd = (insn >> 12) & 0xf;
5399 rn = (insn >> 16) & 0xf;
5400 gen_movl_T0_reg(s, rm);
5401 gen_movl_T1_reg(s, rn);
5402 if (op1 & 2)
1497c961 5403 gen_helper_double_saturate(cpu_T[1], cpu_T[1]);
9ee6e8bb
PB
5404 if (op1 & 1)
5405 gen_op_subl_T0_T1_saturate();
5406 else
5407 gen_op_addl_T0_T1_saturate();
5408 gen_movl_reg_T0(s, rd);
5409 break;
5410 case 7: /* bkpt */
5411 gen_set_condexec(s);
5412 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 5413 gen_set_pc_T0();
d9ba4830 5414 gen_exception(EXCP_BKPT);
9ee6e8bb
PB
5415 s->is_jmp = DISAS_JUMP;
5416 break;
5417 case 0x8: /* signed multiply */
5418 case 0xa:
5419 case 0xc:
5420 case 0xe:
5421 rs = (insn >> 8) & 0xf;
5422 rn = (insn >> 12) & 0xf;
5423 rd = (insn >> 16) & 0xf;
5424 if (op1 == 1) {
5425 /* (32 * 16) >> 16 */
5426 gen_movl_T0_reg(s, rm);
5427 gen_movl_T1_reg(s, rs);
5428 if (sh & 4)
5429 gen_op_sarl_T1_im(16);
5430 else
b26eefb6 5431 gen_sxth(cpu_T[1]);
d9ba4830 5432 gen_imulw(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
5433 if ((sh & 2) == 0) {
5434 gen_movl_T1_reg(s, rn);
5435 gen_op_addl_T0_T1_setq();
5436 }
5437 gen_movl_reg_T0(s, rd);
5438 } else {
5439 /* 16 * 16 */
5440 gen_movl_T0_reg(s, rm);
5441 gen_movl_T1_reg(s, rs);
d9ba4830 5442 gen_mulxy(cpu_T[0], cpu_T[1], sh & 2, sh & 4);
9ee6e8bb
PB
5443 if (op1 == 2) {
5444 gen_op_signbit_T1_T0();
5445 gen_op_addq_T0_T1(rn, rd);
5446 gen_movl_reg_T0(s, rn);
5447 gen_movl_reg_T1(s, rd);
5448 } else {
5449 if (op1 == 0) {
5450 gen_movl_T1_reg(s, rn);
5451 gen_op_addl_T0_T1_setq();
5452 }
5453 gen_movl_reg_T0(s, rd);
5454 }
5455 }
5456 break;
5457 default:
5458 goto illegal_op;
5459 }
5460 } else if (((insn & 0x0e000000) == 0 &&
5461 (insn & 0x00000090) != 0x90) ||
5462 ((insn & 0x0e000000) == (1 << 25))) {
5463 int set_cc, logic_cc, shiftop;
5464
5465 op1 = (insn >> 21) & 0xf;
5466 set_cc = (insn >> 20) & 1;
5467 logic_cc = table_logic_cc[op1] & set_cc;
5468
5469 /* data processing instruction */
5470 if (insn & (1 << 25)) {
5471 /* immediate operand */
5472 val = insn & 0xff;
5473 shift = ((insn >> 8) & 0xf) * 2;
5474 if (shift)
5475 val = (val >> shift) | (val << (32 - shift));
5476 gen_op_movl_T1_im(val);
5477 if (logic_cc && shift)
b26eefb6 5478 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
5479 } else {
5480 /* register */
5481 rm = (insn) & 0xf;
5482 gen_movl_T1_reg(s, rm);
5483 shiftop = (insn >> 5) & 3;
5484 if (!(insn & (1 << 4))) {
5485 shift = (insn >> 7) & 0x1f;
9a119ff6 5486 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
5487 } else {
5488 rs = (insn >> 8) & 0xf;
5489 gen_movl_T0_reg(s, rs);
5490 if (logic_cc) {
5491 gen_shift_T1_T0_cc[shiftop]();
5492 } else {
5493 gen_shift_T1_T0[shiftop]();
5494 }
5495 }
5496 }
5497 if (op1 != 0x0f && op1 != 0x0d) {
5498 rn = (insn >> 16) & 0xf;
5499 gen_movl_T0_reg(s, rn);
5500 }
5501 rd = (insn >> 12) & 0xf;
5502 switch(op1) {
5503 case 0x00:
5504 gen_op_andl_T0_T1();
5505 gen_movl_reg_T0(s, rd);
5506 if (logic_cc)
5507 gen_op_logic_T0_cc();
5508 break;
5509 case 0x01:
5510 gen_op_xorl_T0_T1();
5511 gen_movl_reg_T0(s, rd);
5512 if (logic_cc)
5513 gen_op_logic_T0_cc();
5514 break;
5515 case 0x02:
5516 if (set_cc && rd == 15) {
5517 /* SUBS r15, ... is used for exception return. */
5518 if (IS_USER(s))
5519 goto illegal_op;
5520 gen_op_subl_T0_T1_cc();
5521 gen_exception_return(s);
5522 } else {
5523 if (set_cc)
5524 gen_op_subl_T0_T1_cc();
5525 else
5526 gen_op_subl_T0_T1();
5527 gen_movl_reg_T0(s, rd);
5528 }
5529 break;
5530 case 0x03:
5531 if (set_cc)
5532 gen_op_rsbl_T0_T1_cc();
5533 else
5534 gen_op_rsbl_T0_T1();
5535 gen_movl_reg_T0(s, rd);
5536 break;
5537 case 0x04:
5538 if (set_cc)
5539 gen_op_addl_T0_T1_cc();
5540 else
5541 gen_op_addl_T0_T1();
5542 gen_movl_reg_T0(s, rd);
5543 break;
5544 case 0x05:
5545 if (set_cc)
5546 gen_op_adcl_T0_T1_cc();
5547 else
b26eefb6 5548 gen_adc_T0_T1();
9ee6e8bb
PB
5549 gen_movl_reg_T0(s, rd);
5550 break;
5551 case 0x06:
5552 if (set_cc)
5553 gen_op_sbcl_T0_T1_cc();
5554 else
3670669c 5555 gen_sbc_T0_T1();
9ee6e8bb
PB
5556 gen_movl_reg_T0(s, rd);
5557 break;
5558 case 0x07:
5559 if (set_cc)
5560 gen_op_rscl_T0_T1_cc();
5561 else
3670669c 5562 gen_rsc_T0_T1();
9ee6e8bb
PB
5563 gen_movl_reg_T0(s, rd);
5564 break;
5565 case 0x08:
5566 if (set_cc) {
5567 gen_op_andl_T0_T1();
5568 gen_op_logic_T0_cc();
5569 }
5570 break;
5571 case 0x09:
5572 if (set_cc) {
5573 gen_op_xorl_T0_T1();
5574 gen_op_logic_T0_cc();
5575 }
5576 break;
5577 case 0x0a:
5578 if (set_cc) {
5579 gen_op_subl_T0_T1_cc();
5580 }
5581 break;
5582 case 0x0b:
5583 if (set_cc) {
5584 gen_op_addl_T0_T1_cc();
5585 }
5586 break;
5587 case 0x0c:
5588 gen_op_orl_T0_T1();
5589 gen_movl_reg_T0(s, rd);
5590 if (logic_cc)
5591 gen_op_logic_T0_cc();
5592 break;
5593 case 0x0d:
5594 if (logic_cc && rd == 15) {
5595 /* MOVS r15, ... is used for exception return. */
5596 if (IS_USER(s))
5597 goto illegal_op;
5598 gen_op_movl_T0_T1();
5599 gen_exception_return(s);
5600 } else {
5601 gen_movl_reg_T1(s, rd);
5602 if (logic_cc)
5603 gen_op_logic_T1_cc();
5604 }
5605 break;
5606 case 0x0e:
5607 gen_op_bicl_T0_T1();
5608 gen_movl_reg_T0(s, rd);
5609 if (logic_cc)
5610 gen_op_logic_T0_cc();
5611 break;
5612 default:
5613 case 0x0f:
5614 gen_op_notl_T1();
5615 gen_movl_reg_T1(s, rd);
5616 if (logic_cc)
5617 gen_op_logic_T1_cc();
5618 break;
5619 }
5620 } else {
5621 /* other instructions */
5622 op1 = (insn >> 24) & 0xf;
5623 switch(op1) {
5624 case 0x0:
5625 case 0x1:
5626 /* multiplies, extra load/stores */
5627 sh = (insn >> 5) & 3;
5628 if (sh == 0) {
5629 if (op1 == 0x0) {
5630 rd = (insn >> 16) & 0xf;
5631 rn = (insn >> 12) & 0xf;
5632 rs = (insn >> 8) & 0xf;
5633 rm = (insn) & 0xf;
5634 op1 = (insn >> 20) & 0xf;
5635 switch (op1) {
5636 case 0: case 1: case 2: case 3: case 6:
5637 /* 32 bit mul */
5638 gen_movl_T0_reg(s, rs);
5639 gen_movl_T1_reg(s, rm);
5640 gen_op_mul_T0_T1();
5641 if (insn & (1 << 22)) {
5642 /* Subtract (mls) */
5643 ARCH(6T2);
5644 gen_movl_T1_reg(s, rn);
5645 gen_op_rsbl_T0_T1();
5646 } else if (insn & (1 << 21)) {
5647 /* Add */
5648 gen_movl_T1_reg(s, rn);
5649 gen_op_addl_T0_T1();
5650 }
5651 if (insn & (1 << 20))
5652 gen_op_logic_T0_cc();
5653 gen_movl_reg_T0(s, rd);
5654 break;
5655 default:
5656 /* 64 bit mul */
5657 gen_movl_T0_reg(s, rs);
5658 gen_movl_T1_reg(s, rm);
5659 if (insn & (1 << 22))
5660 gen_op_imull_T0_T1();
5661 else
5662 gen_op_mull_T0_T1();
5663 if (insn & (1 << 21)) /* mult accumulate */
5664 gen_op_addq_T0_T1(rn, rd);
5665 if (!(insn & (1 << 23))) { /* double accumulate */
5666 ARCH(6);
5667 gen_op_addq_lo_T0_T1(rn);
5668 gen_op_addq_lo_T0_T1(rd);
5669 }
5670 if (insn & (1 << 20))
5671 gen_op_logicq_cc();
5672 gen_movl_reg_T0(s, rn);
5673 gen_movl_reg_T1(s, rd);
5674 break;
5675 }
5676 } else {
5677 rn = (insn >> 16) & 0xf;
5678 rd = (insn >> 12) & 0xf;
5679 if (insn & (1 << 23)) {
5680 /* load/store exclusive */
5681 gen_movl_T1_reg(s, rn);
5682 if (insn & (1 << 20)) {
5683 gen_ldst(ldlex, s);
5684 } else {
5685 rm = insn & 0xf;
5686 gen_movl_T0_reg(s, rm);
5687 gen_ldst(stlex, s);
5688 }
5689 gen_movl_reg_T0(s, rd);
5690 } else {
5691 /* SWP instruction */
5692 rm = (insn) & 0xf;
5693
5694 gen_movl_T0_reg(s, rm);
5695 gen_movl_T1_reg(s, rn);
5696 if (insn & (1 << 22)) {
5697 gen_ldst(swpb, s);
5698 } else {
5699 gen_ldst(swpl, s);
5700 }
5701 gen_movl_reg_T0(s, rd);
5702 }
5703 }
5704 } else {
5705 int address_offset;
5706 int load;
5707 /* Misc load/store */
5708 rn = (insn >> 16) & 0xf;
5709 rd = (insn >> 12) & 0xf;
5710 gen_movl_T1_reg(s, rn);
5711 if (insn & (1 << 24))
5712 gen_add_datah_offset(s, insn, 0);
5713 address_offset = 0;
5714 if (insn & (1 << 20)) {
5715 /* load */
5716 switch(sh) {
5717 case 1:
5718 gen_ldst(lduw, s);
5719 break;
5720 case 2:
5721 gen_ldst(ldsb, s);
5722 break;
5723 default:
5724 case 3:
5725 gen_ldst(ldsw, s);
5726 break;
5727 }
5728 load = 1;
5729 } else if (sh & 2) {
5730 /* doubleword */
5731 if (sh & 1) {
5732 /* store */
5733 gen_movl_T0_reg(s, rd);
5734 gen_ldst(stl, s);
5735 gen_op_addl_T1_im(4);
5736 gen_movl_T0_reg(s, rd + 1);
5737 gen_ldst(stl, s);
5738 load = 0;
5739 } else {
5740 /* load */
5741 gen_ldst(ldl, s);
5742 gen_movl_reg_T0(s, rd);
5743 gen_op_addl_T1_im(4);
5744 gen_ldst(ldl, s);
5745 rd++;
5746 load = 1;
5747 }
5748 address_offset = -4;
5749 } else {
5750 /* store */
5751 gen_movl_T0_reg(s, rd);
5752 gen_ldst(stw, s);
5753 load = 0;
5754 }
5755 /* Perform base writeback before the loaded value to
5756 ensure correct behavior with overlapping index registers.
5757 ldrd with base writeback is is undefined if the
5758 destination and index registers overlap. */
5759 if (!(insn & (1 << 24))) {
5760 gen_add_datah_offset(s, insn, address_offset);
5761 gen_movl_reg_T1(s, rn);
5762 } else if (insn & (1 << 21)) {
5763 if (address_offset)
5764 gen_op_addl_T1_im(address_offset);
5765 gen_movl_reg_T1(s, rn);
5766 }
5767 if (load) {
5768 /* Complete the load. */
5769 gen_movl_reg_T0(s, rd);
5770 }
5771 }
5772 break;
5773 case 0x4:
5774 case 0x5:
5775 goto do_ldst;
5776 case 0x6:
5777 case 0x7:
5778 if (insn & (1 << 4)) {
5779 ARCH(6);
5780 /* Armv6 Media instructions. */
5781 rm = insn & 0xf;
5782 rn = (insn >> 16) & 0xf;
2c0262af 5783 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
5784 rs = (insn >> 8) & 0xf;
5785 switch ((insn >> 23) & 3) {
5786 case 0: /* Parallel add/subtract. */
5787 op1 = (insn >> 20) & 7;
6ddbc6e4
PB
5788 tmp = load_reg(s, rn);
5789 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
5790 sh = (insn >> 5) & 7;
5791 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5792 goto illegal_op;
6ddbc6e4
PB
5793 gen_arm_parallel_addsub(op1, sh, tmp, tmp2);
5794 dead_tmp(tmp2);
5795 store_reg(s, rd, tmp);
9ee6e8bb
PB
5796 break;
5797 case 1:
5798 if ((insn & 0x00700020) == 0) {
5799 /* Hafword pack. */
3670669c
PB
5800 tmp = load_reg(s, rn);
5801 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
5802 shift = (insn >> 7) & 0x1f;
5803 if (shift)
3670669c
PB
5804 tcg_gen_shli_i32(tmp2, tmp2, shift);
5805 if (insn & (1 << 6)) {
5806 /* pkhtb */
5807 tcg_gen_andi_i32(tmp, tmp, 0xffff0000);
5808 tcg_gen_andi_i32(tmp2, tmp2, 0xffff);
5809 } else {
5810 /* pkhbt */
5811 tcg_gen_andi_i32(tmp, tmp, 0xffff);
5812 tcg_gen_andi_i32(tmp2, tmp2, 0xffff0000);
5813 }
5814 tcg_gen_or_i32(tmp, tmp, tmp2);
5815 store_reg(s, rd, tmp);
9ee6e8bb
PB
5816 } else if ((insn & 0x00200020) == 0x00200000) {
5817 /* [us]sat */
6ddbc6e4 5818 tmp = load_reg(s, rm);
9ee6e8bb
PB
5819 shift = (insn >> 7) & 0x1f;
5820 if (insn & (1 << 6)) {
5821 if (shift == 0)
5822 shift = 31;
6ddbc6e4 5823 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 5824 } else {
6ddbc6e4 5825 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb
PB
5826 }
5827 sh = (insn >> 16) & 0x1f;
5828 if (sh != 0) {
5829 if (insn & (1 << 22))
6ddbc6e4 5830 gen_helper_usat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 5831 else
6ddbc6e4 5832 gen_helper_ssat(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 5833 }
6ddbc6e4 5834 store_reg(s, rd, tmp);
9ee6e8bb
PB
5835 } else if ((insn & 0x00300fe0) == 0x00200f20) {
5836 /* [us]sat16 */
6ddbc6e4 5837 tmp = load_reg(s, rm);
9ee6e8bb
PB
5838 sh = (insn >> 16) & 0x1f;
5839 if (sh != 0) {
5840 if (insn & (1 << 22))
6ddbc6e4 5841 gen_helper_usat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 5842 else
6ddbc6e4 5843 gen_helper_ssat16(tmp, tmp, tcg_const_i32(sh));
9ee6e8bb 5844 }
6ddbc6e4 5845 store_reg(s, rd, tmp);
9ee6e8bb
PB
5846 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5847 /* Select bytes. */
6ddbc6e4
PB
5848 tmp = load_reg(s, rn);
5849 tmp2 = load_reg(s, rm);
5850 tmp3 = new_tmp();
5851 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
5852 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
5853 dead_tmp(tmp3);
5854 dead_tmp(tmp2);
5855 store_reg(s, rd, tmp);
9ee6e8bb
PB
5856 } else if ((insn & 0x000003e0) == 0x00000060) {
5857 gen_movl_T1_reg(s, rm);
5858 shift = (insn >> 10) & 3;
5859 /* ??? In many cases it's not neccessary to do a
5860 rotate, a shift is sufficient. */
5861 if (shift != 0)
5862 gen_op_rorl_T1_im(shift * 8);
5863 op1 = (insn >> 20) & 7;
5864 switch (op1) {
b26eefb6
PB
5865 case 0: gen_sxtb16(cpu_T[1]); break;
5866 case 2: gen_sxtb(cpu_T[1]); break;
5867 case 3: gen_sxth(cpu_T[1]); break;
5868 case 4: gen_uxtb16(cpu_T[1]); break;
5869 case 6: gen_uxtb(cpu_T[1]); break;
5870 case 7: gen_uxth(cpu_T[1]); break;
9ee6e8bb
PB
5871 default: goto illegal_op;
5872 }
5873 if (rn != 15) {
b26eefb6 5874 tmp = load_reg(s, rn);
9ee6e8bb 5875 if ((op1 & 3) == 0) {
b26eefb6 5876 gen_add16(cpu_T[1], tmp);
9ee6e8bb 5877 } else {
b26eefb6
PB
5878 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
5879 dead_tmp(tmp);
9ee6e8bb
PB
5880 }
5881 }
5882 gen_movl_reg_T1(s, rd);
5883 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5884 /* rev */
5885 gen_movl_T0_reg(s, rm);
5886 if (insn & (1 << 22)) {
5887 if (insn & (1 << 7)) {
3670669c 5888 gen_revsh(cpu_T[0]);
9ee6e8bb
PB
5889 } else {
5890 ARCH(6T2);
3670669c 5891 gen_helper_rbit(cpu_T[0], cpu_T[0]);
9ee6e8bb
PB
5892 }
5893 } else {
5894 if (insn & (1 << 7))
3670669c 5895 gen_rev16(cpu_T[0]);
9ee6e8bb
PB
5896 else
5897 gen_op_rev_T0();
5898 }
5899 gen_movl_reg_T0(s, rd);
5900 } else {
5901 goto illegal_op;
5902 }
5903 break;
5904 case 2: /* Multiplies (Type 3). */
5905 gen_movl_T0_reg(s, rm);
5906 gen_movl_T1_reg(s, rs);
5907 if (insn & (1 << 20)) {
5908 /* Signed multiply most significant [accumulate]. */
5909 gen_op_imull_T0_T1();
5910 if (insn & (1 << 5))
d9ba4830 5911 gen_roundqd(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
5912 else
5913 gen_op_movl_T0_T1();
5914 if (rn != 15) {
5915 gen_movl_T1_reg(s, rn);
5916 if (insn & (1 << 6)) {
5917 gen_op_addl_T0_T1();
5918 } else {
5919 gen_op_rsbl_T0_T1();
5920 }
5921 }
5922 gen_movl_reg_T0(s, rd);
5923 } else {
5924 if (insn & (1 << 5))
8f01245e 5925 gen_swap_half(cpu_T[1]);
3670669c 5926 gen_smul_dual(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
5927 if (insn & (1 << 22)) {
5928 if (insn & (1 << 6)) {
5929 /* smlald */
5930 gen_op_addq_T0_T1_dual(rn, rd);
5931 } else {
5932 /* smlsld */
5933 gen_op_subq_T0_T1_dual(rn, rd);
5934 }
5935 } else {
5936 /* This addition cannot overflow. */
5937 if (insn & (1 << 6)) {
5938 /* sm[ul]sd */
5939 gen_op_subl_T0_T1();
5940 } else {
5941 /* sm[ul]ad */
5942 gen_op_addl_T0_T1();
5943 }
5944 if (rn != 15)
5945 {
5946 gen_movl_T1_reg(s, rn);
5947 gen_op_addl_T0_T1_setq();
5948 }
5949 gen_movl_reg_T0(s, rd);
5950 }
5951 }
5952 break;
5953 case 3:
5954 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5955 switch (op1) {
5956 case 0: /* Unsigned sum of absolute differences. */
6ddbc6e4
PB
5957 ARCH(6);
5958 tmp = load_reg(s, rm);
5959 tmp2 = load_reg(s, rs);
5960 gen_helper_usad8(tmp, tmp, tmp2);
5961 dead_tmp(tmp2);
9ee6e8bb 5962 if (rn != 15) {
6ddbc6e4
PB
5963 tmp2 = load_reg(s, rn);
5964 tcg_gen_add_i32(tmp, tmp, tmp2);
5965 dead_tmp(tmp2);
9ee6e8bb 5966 }
6ddbc6e4 5967 store_reg(s, rd, tmp);
9ee6e8bb
PB
5968 break;
5969 case 0x20: case 0x24: case 0x28: case 0x2c:
5970 /* Bitfield insert/clear. */
5971 ARCH(6T2);
5972 shift = (insn >> 7) & 0x1f;
5973 i = (insn >> 16) & 0x1f;
5974 i = i + 1 - shift;
5975 if (rm == 15) {
5976 gen_op_movl_T1_im(0);
5977 } else {
5978 gen_movl_T1_reg(s, rm);
5979 }
5980 if (i != 32) {
5981 gen_movl_T0_reg(s, rd);
3670669c
PB
5982 gen_bfi(cpu_T[1], cpu_T[0], cpu_T[1],
5983 shift, ((1u << i) - 1) << shift);
9ee6e8bb
PB
5984 }
5985 gen_movl_reg_T1(s, rd);
5986 break;
5987 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5988 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5989 gen_movl_T1_reg(s, rm);
5990 shift = (insn >> 7) & 0x1f;
5991 i = ((insn >> 16) & 0x1f) + 1;
5992 if (shift + i > 32)
5993 goto illegal_op;
5994 if (i < 32) {
5995 if (op1 & 0x20) {
3670669c 5996 gen_ubfx(cpu_T[1], shift, (1u << i) - 1);
9ee6e8bb 5997 } else {
3670669c 5998 gen_sbfx(cpu_T[1], shift, i);
9ee6e8bb
PB
5999 }
6000 }
6001 gen_movl_reg_T1(s, rd);
6002 break;
6003 default:
6004 goto illegal_op;
6005 }
6006 break;
6007 }
6008 break;
6009 }
6010 do_ldst:
6011 /* Check for undefined extension instructions
6012 * per the ARM Bible IE:
6013 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
6014 */
6015 sh = (0xf << 20) | (0xf << 4);
6016 if (op1 == 0x7 && ((insn & sh) == sh))
6017 {
6018 goto illegal_op;
6019 }
6020 /* load/store byte/word */
6021 rn = (insn >> 16) & 0xf;
6022 rd = (insn >> 12) & 0xf;
6023 gen_movl_T1_reg(s, rn);
6024 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
6025 if (insn & (1 << 24))
6026 gen_add_data_offset(s, insn);
6027 if (insn & (1 << 20)) {
6028 /* load */
6029 s->is_mem = 1;
6030#if defined(CONFIG_USER_ONLY)
6031 if (insn & (1 << 22))
6032 gen_op_ldub_raw();
6033 else
6034 gen_op_ldl_raw();
6035#else
6036 if (insn & (1 << 22)) {
6037 if (i)
6038 gen_op_ldub_user();
6039 else
6040 gen_op_ldub_kernel();
6041 } else {
6042 if (i)
6043 gen_op_ldl_user();
6044 else
6045 gen_op_ldl_kernel();
6046 }
6047#endif
6048 } else {
6049 /* store */
6050 gen_movl_T0_reg(s, rd);
6051#if defined(CONFIG_USER_ONLY)
6052 if (insn & (1 << 22))
6053 gen_op_stb_raw();
6054 else
6055 gen_op_stl_raw();
6056#else
6057 if (insn & (1 << 22)) {
6058 if (i)
6059 gen_op_stb_user();
6060 else
6061 gen_op_stb_kernel();
6062 } else {
6063 if (i)
6064 gen_op_stl_user();
6065 else
6066 gen_op_stl_kernel();
6067 }
6068#endif
6069 }
6070 if (!(insn & (1 << 24))) {
6071 gen_add_data_offset(s, insn);
6072 gen_movl_reg_T1(s, rn);
6073 } else if (insn & (1 << 21))
6074 gen_movl_reg_T1(s, rn); {
6075 }
6076 if (insn & (1 << 20)) {
6077 /* Complete the load. */
6078 if (rd == 15)
d9ba4830 6079 gen_bx_T0(s);
9ee6e8bb
PB
6080 else
6081 gen_movl_reg_T0(s, rd);
6082 }
6083 break;
6084 case 0x08:
6085 case 0x09:
6086 {
6087 int j, n, user, loaded_base;
6088 /* load/store multiple words */
6089 /* XXX: store correct base if write back */
6090 user = 0;
6091 if (insn & (1 << 22)) {
6092 if (IS_USER(s))
6093 goto illegal_op; /* only usable in supervisor mode */
6094
6095 if ((insn & (1 << 15)) == 0)
6096 user = 1;
6097 }
6098 rn = (insn >> 16) & 0xf;
6099 gen_movl_T1_reg(s, rn);
6100
6101 /* compute total size */
6102 loaded_base = 0;
6103 n = 0;
6104 for(i=0;i<16;i++) {
6105 if (insn & (1 << i))
6106 n++;
6107 }
6108 /* XXX: test invalid n == 0 case ? */
6109 if (insn & (1 << 23)) {
6110 if (insn & (1 << 24)) {
6111 /* pre increment */
6112 gen_op_addl_T1_im(4);
6113 } else {
6114 /* post increment */
6115 }
6116 } else {
6117 if (insn & (1 << 24)) {
6118 /* pre decrement */
6119 gen_op_addl_T1_im(-(n * 4));
6120 } else {
6121 /* post decrement */
6122 if (n != 1)
6123 gen_op_addl_T1_im(-((n - 1) * 4));
6124 }
6125 }
6126 j = 0;
6127 for(i=0;i<16;i++) {
6128 if (insn & (1 << i)) {
6129 if (insn & (1 << 20)) {
6130 /* load */
6131 gen_ldst(ldl, s);
6132 if (i == 15) {
d9ba4830 6133 gen_bx_T0(s);
9ee6e8bb
PB
6134 } else if (user) {
6135 gen_op_movl_user_T0(i);
6136 } else if (i == rn) {
6137 gen_op_movl_T2_T0();
6138 loaded_base = 1;
6139 } else {
6140 gen_movl_reg_T0(s, i);
6141 }
6142 } else {
6143 /* store */
6144 if (i == 15) {
6145 /* special case: r15 = PC + 8 */
6146 val = (long)s->pc + 4;
b26eefb6 6147 gen_op_movl_T0_im(val);
9ee6e8bb
PB
6148 } else if (user) {
6149 gen_op_movl_T0_user(i);
6150 } else {
6151 gen_movl_T0_reg(s, i);
6152 }
6153 gen_ldst(stl, s);
6154 }
6155 j++;
6156 /* no need to add after the last transfer */
6157 if (j != n)
6158 gen_op_addl_T1_im(4);
6159 }
6160 }
6161 if (insn & (1 << 21)) {
6162 /* write back */
6163 if (insn & (1 << 23)) {
6164 if (insn & (1 << 24)) {
6165 /* pre increment */
6166 } else {
6167 /* post increment */
6168 gen_op_addl_T1_im(4);
6169 }
6170 } else {
6171 if (insn & (1 << 24)) {
6172 /* pre decrement */
6173 if (n != 1)
6174 gen_op_addl_T1_im(-((n - 1) * 4));
6175 } else {
6176 /* post decrement */
6177 gen_op_addl_T1_im(-(n * 4));
6178 }
6179 }
6180 gen_movl_reg_T1(s, rn);
6181 }
6182 if (loaded_base) {
6183 gen_op_movl_T0_T2();
6184 gen_movl_reg_T0(s, rn);
6185 }
6186 if ((insn & (1 << 22)) && !user) {
6187 /* Restore CPSR from SPSR. */
d9ba4830
PB
6188 tmp = load_cpu_field(spsr);
6189 gen_set_cpsr(tmp, 0xffffffff);
6190 dead_tmp(tmp);
9ee6e8bb
PB
6191 s->is_jmp = DISAS_UPDATE;
6192 }
6193 }
6194 break;
6195 case 0xa:
6196 case 0xb:
6197 {
6198 int32_t offset;
6199
6200 /* branch (and link) */
6201 val = (int32_t)s->pc;
6202 if (insn & (1 << 24)) {
6203 gen_op_movl_T0_im(val);
b26eefb6 6204 gen_movl_reg_T0(s, 14);
9ee6e8bb
PB
6205 }
6206 offset = (((int32_t)insn << 8) >> 8);
6207 val += (offset << 2) + 4;
6208 gen_jmp(s, val);
6209 }
6210 break;
6211 case 0xc:
6212 case 0xd:
6213 case 0xe:
6214 /* Coprocessor. */
6215 if (disas_coproc_insn(env, s, insn))
6216 goto illegal_op;
6217 break;
6218 case 0xf:
6219 /* swi */
6220 gen_op_movl_T0_im((long)s->pc);
b26eefb6 6221 gen_set_pc_T0();
9ee6e8bb
PB
6222 s->is_jmp = DISAS_SWI;
6223 break;
6224 default:
6225 illegal_op:
6226 gen_set_condexec(s);
6227 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 6228 gen_set_pc_T0();
d9ba4830 6229 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
6230 s->is_jmp = DISAS_JUMP;
6231 break;
6232 }
6233 }
6234}
6235
6236/* Return true if this is a Thumb-2 logical op. */
6237static int
6238thumb2_logic_op(int op)
6239{
6240 return (op < 8);
6241}
6242
6243/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
6244 then set condition code flags based on the result of the operation.
6245 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
6246 to the high bit of T1.
6247 Returns zero if the opcode is valid. */
6248
6249static int
6250gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
6251{
6252 int logic_cc;
6253
6254 logic_cc = 0;
6255 switch (op) {
6256 case 0: /* and */
6257 gen_op_andl_T0_T1();
6258 logic_cc = conds;
6259 break;
6260 case 1: /* bic */
6261 gen_op_bicl_T0_T1();
6262 logic_cc = conds;
6263 break;
6264 case 2: /* orr */
6265 gen_op_orl_T0_T1();
6266 logic_cc = conds;
6267 break;
6268 case 3: /* orn */
6269 gen_op_notl_T1();
6270 gen_op_orl_T0_T1();
6271 logic_cc = conds;
6272 break;
6273 case 4: /* eor */
6274 gen_op_xorl_T0_T1();
6275 logic_cc = conds;
6276 break;
6277 case 8: /* add */
6278 if (conds)
6279 gen_op_addl_T0_T1_cc();
6280 else
6281 gen_op_addl_T0_T1();
6282 break;
6283 case 10: /* adc */
6284 if (conds)
6285 gen_op_adcl_T0_T1_cc();
6286 else
b26eefb6 6287 gen_adc_T0_T1();
9ee6e8bb
PB
6288 break;
6289 case 11: /* sbc */
6290 if (conds)
6291 gen_op_sbcl_T0_T1_cc();
6292 else
3670669c 6293 gen_sbc_T0_T1();
9ee6e8bb
PB
6294 break;
6295 case 13: /* sub */
6296 if (conds)
6297 gen_op_subl_T0_T1_cc();
6298 else
6299 gen_op_subl_T0_T1();
6300 break;
6301 case 14: /* rsb */
6302 if (conds)
6303 gen_op_rsbl_T0_T1_cc();
6304 else
6305 gen_op_rsbl_T0_T1();
6306 break;
6307 default: /* 5, 6, 7, 9, 12, 15. */
6308 return 1;
6309 }
6310 if (logic_cc) {
6311 gen_op_logic_T0_cc();
6312 if (shifter_out)
b26eefb6 6313 gen_set_CF_bit31(cpu_T[1]);
9ee6e8bb
PB
6314 }
6315 return 0;
6316}
6317
6318/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
6319 is not legal. */
6320static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
6321{
6322 uint32_t insn, imm, shift, offset, addr;
6323 uint32_t rd, rn, rm, rs;
b26eefb6 6324 TCGv tmp;
6ddbc6e4
PB
6325 TCGv tmp2;
6326 TCGv tmp3;
9ee6e8bb
PB
6327 int op;
6328 int shiftop;
6329 int conds;
6330 int logic_cc;
6331
6332 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
6333 || arm_feature (env, ARM_FEATURE_M))) {
6334 /* Thumb-1 cores may need to tread bl and blx as a pair of
6335 16-bit instructions to get correct prefetch abort behavior. */
6336 insn = insn_hw1;
6337 if ((insn & (1 << 12)) == 0) {
6338 /* Second half of blx. */
6339 offset = ((insn & 0x7ff) << 1);
d9ba4830
PB
6340 tmp = load_reg(s, 14);
6341 tcg_gen_addi_i32(tmp, tmp, offset);
6342 tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
9ee6e8bb
PB
6343
6344 addr = (uint32_t)s->pc;
d9ba4830
PB
6345 tmp2 = new_tmp();
6346 tcg_gen_movi_i32(tmp2, addr | 1);
6347 store_reg(s, 14, tmp2);
6348 gen_bx(s, tmp);
9ee6e8bb
PB
6349 return 0;
6350 }
6351 if (insn & (1 << 11)) {
6352 /* Second half of bl. */
6353 offset = ((insn & 0x7ff) << 1) | 1;
d9ba4830
PB
6354 tmp = load_reg(s, 14);
6355 tcg_gen_addi_i32(tmp, tmp, 14);
9ee6e8bb
PB
6356
6357 addr = (uint32_t)s->pc;
d9ba4830
PB
6358 tmp2 = new_tmp();
6359 tcg_gen_movi_i32(tmp2, addr | 1);
6360 store_reg(s, 14, tmp2);
6361 gen_bx(s, tmp);
9ee6e8bb
PB
6362 return 0;
6363 }
6364 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
6365 /* Instruction spans a page boundary. Implement it as two
6366 16-bit instructions in case the second half causes an
6367 prefetch abort. */
6368 offset = ((int32_t)insn << 21) >> 9;
6369 addr = s->pc + 2 + offset;
6370 gen_op_movl_T0_im(addr);
6371 gen_movl_reg_T0(s, 14);
6372 return 0;
6373 }
6374 /* Fall through to 32-bit decode. */
6375 }
6376
6377 insn = lduw_code(s->pc);
6378 s->pc += 2;
6379 insn |= (uint32_t)insn_hw1 << 16;
6380
6381 if ((insn & 0xf800e800) != 0xf000e800) {
6382 ARCH(6T2);
6383 }
6384
6385 rn = (insn >> 16) & 0xf;
6386 rs = (insn >> 12) & 0xf;
6387 rd = (insn >> 8) & 0xf;
6388 rm = insn & 0xf;
6389 switch ((insn >> 25) & 0xf) {
6390 case 0: case 1: case 2: case 3:
6391 /* 16-bit instructions. Should never happen. */
6392 abort();
6393 case 4:
6394 if (insn & (1 << 22)) {
6395 /* Other load/store, table branch. */
6396 if (insn & 0x01200000) {
6397 /* Load/store doubleword. */
6398 if (rn == 15) {
6399 gen_op_movl_T1_im(s->pc & ~3);
6400 } else {
6401 gen_movl_T1_reg(s, rn);
6402 }
6403 offset = (insn & 0xff) * 4;
6404 if ((insn & (1 << 23)) == 0)
6405 offset = -offset;
6406 if (insn & (1 << 24)) {
6407 gen_op_addl_T1_im(offset);
6408 offset = 0;
6409 }
6410 if (insn & (1 << 20)) {
6411 /* ldrd */
6412 gen_ldst(ldl, s);
6413 gen_movl_reg_T0(s, rs);
6414 gen_op_addl_T1_im(4);
6415 gen_ldst(ldl, s);
6416 gen_movl_reg_T0(s, rd);
6417 } else {
6418 /* strd */
6419 gen_movl_T0_reg(s, rs);
6420 gen_ldst(stl, s);
6421 gen_op_addl_T1_im(4);
6422 gen_movl_T0_reg(s, rd);
6423 gen_ldst(stl, s);
6424 }
6425 if (insn & (1 << 21)) {
6426 /* Base writeback. */
6427 if (rn == 15)
6428 goto illegal_op;
6429 gen_op_addl_T1_im(offset - 4);
6430 gen_movl_reg_T1(s, rn);
6431 }
6432 } else if ((insn & (1 << 23)) == 0) {
6433 /* Load/store exclusive word. */
6434 gen_movl_T0_reg(s, rd);
2c0262af 6435 gen_movl_T1_reg(s, rn);
2c0262af 6436 if (insn & (1 << 20)) {
9ee6e8bb
PB
6437 gen_ldst(ldlex, s);
6438 } else {
6439 gen_ldst(stlex, s);
6440 }
6441 gen_movl_reg_T0(s, rd);
6442 } else if ((insn & (1 << 6)) == 0) {
6443 /* Table Branch. */
6444 if (rn == 15) {
6445 gen_op_movl_T1_im(s->pc);
6446 } else {
6447 gen_movl_T1_reg(s, rn);
6448 }
b26eefb6
PB
6449 tmp = load_reg(s, rm);
6450 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
9ee6e8bb
PB
6451 if (insn & (1 << 4)) {
6452 /* tbh */
b26eefb6
PB
6453 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6454 dead_tmp(tmp);
9ee6e8bb
PB
6455 gen_ldst(lduw, s);
6456 } else { /* tbb */
b26eefb6 6457 dead_tmp(tmp);
9ee6e8bb
PB
6458 gen_ldst(ldub, s);
6459 }
8f01245e
PB
6460 tcg_gen_shli_i32(cpu_T[0], cpu_T[0], 1);
6461 tcg_gen_addi_i32(cpu_T[0], cpu_T[0], s->pc);
6462 gen_movl_reg_T0(s, 15);
9ee6e8bb
PB
6463 } else {
6464 /* Load/store exclusive byte/halfword/doubleword. */
6465 op = (insn >> 4) & 0x3;
6466 gen_movl_T1_reg(s, rn);
6467 if (insn & (1 << 20)) {
6468 switch (op) {
6469 case 0:
6470 gen_ldst(ldbex, s);
6471 break;
2c0262af 6472 case 1:
9ee6e8bb 6473 gen_ldst(ldwex, s);
2c0262af 6474 break;
9ee6e8bb
PB
6475 case 3:
6476 gen_ldst(ldqex, s);
6477 gen_movl_reg_T1(s, rd);
2c0262af
FB
6478 break;
6479 default:
9ee6e8bb
PB
6480 goto illegal_op;
6481 }
6482 gen_movl_reg_T0(s, rs);
6483 } else {
6484 gen_movl_T0_reg(s, rs);
6485 switch (op) {
6486 case 0:
6487 gen_ldst(stbex, s);
6488 break;
6489 case 1:
6490 gen_ldst(stwex, s);
6491 break;
2c0262af 6492 case 3:
9ee6e8bb
PB
6493 gen_movl_T2_reg(s, rd);
6494 gen_ldst(stqex, s);
2c0262af 6495 break;
9ee6e8bb
PB
6496 default:
6497 goto illegal_op;
2c0262af 6498 }
9ee6e8bb
PB
6499 gen_movl_reg_T0(s, rm);
6500 }
6501 }
6502 } else {
6503 /* Load/store multiple, RFE, SRS. */
6504 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6505 /* Not available in user mode. */
6506 if (!IS_USER(s))
6507 goto illegal_op;
6508 if (insn & (1 << 20)) {
6509 /* rfe */
6510 gen_movl_T1_reg(s, rn);
6511 if (insn & (1 << 24)) {
99c475ab 6512 gen_op_addl_T1_im(4);
9ee6e8bb
PB
6513 } else {
6514 gen_op_addl_T1_im(-4);
6515 }
6516 /* Load CPSR into T2 and PC into T0. */
6517 gen_ldst(ldl, s);
6518 gen_op_movl_T2_T0();
6519 gen_op_addl_T1_im(-4);
6520 gen_ldst(ldl, s);
6521 if (insn & (1 << 21)) {
6522 /* Base writeback. */
6523 if (insn & (1 << 24))
6524 gen_op_addl_T1_im(8);
6525 gen_movl_reg_T1(s, rn);
6526 }
6527 gen_rfe(s);
6528 } else {
6529 /* srs */
6530 op = (insn & 0x1f);
6531 if (op == (env->uncached_cpsr & CPSR_M)) {
6532 gen_movl_T1_reg(s, 13);
6533 } else {
6534 gen_op_movl_T1_r13_banked(op);
6535 }
6536 if ((insn & (1 << 24)) == 0) {
6537 gen_op_addl_T1_im(-8);
6538 }
6539 gen_movl_T0_reg(s, 14);
6540 gen_ldst(stl, s);
d9ba4830 6541 gen_helper_cpsr_read(cpu_T[0]);
9ee6e8bb
PB
6542 gen_op_addl_T1_im(4);
6543 gen_ldst(stl, s);
6544 if (insn & (1 << 21)) {
6545 if ((insn & (1 << 24)) == 0) {
6546 gen_op_addl_T1_im(-4);
6547 } else {
6548 gen_op_addl_T1_im(4);
6549 }
6550 if (op == (env->uncached_cpsr & CPSR_M)) {
6551 gen_movl_reg_T1(s, 13);
6552 } else {
6553 gen_op_movl_r13_T1_banked(op);
6554 }
6555 }
6556 }
6557 } else {
6558 int i;
6559 /* Load/store multiple. */
6560 gen_movl_T1_reg(s, rn);
6561 offset = 0;
6562 for (i = 0; i < 16; i++) {
6563 if (insn & (1 << i))
6564 offset += 4;
6565 }
6566 if (insn & (1 << 24)) {
6567 gen_op_addl_T1_im(-offset);
6568 }
6569
6570 for (i = 0; i < 16; i++) {
6571 if ((insn & (1 << i)) == 0)
6572 continue;
6573 if (insn & (1 << 20)) {
6574 /* Load. */
6575 gen_ldst(ldl, s);
6576 if (i == 15) {
d9ba4830 6577 gen_bx_T0(s);
9ee6e8bb
PB
6578 } else {
6579 gen_movl_reg_T0(s, i);
6580 }
6581 } else {
6582 /* Store. */
6583 gen_movl_T0_reg(s, i);
b5ff1b31 6584 gen_ldst(stl, s);
9ee6e8bb
PB
6585 }
6586 gen_op_addl_T1_im(4);
6587 }
6588 if (insn & (1 << 21)) {
6589 /* Base register writeback. */
6590 if (insn & (1 << 24)) {
6591 gen_op_addl_T1_im(-offset);
6592 }
6593 /* Fault if writeback register is in register list. */
6594 if (insn & (1 << rn))
6595 goto illegal_op;
6596 gen_movl_reg_T1(s, rn);
6597 }
6598 }
6599 }
6600 break;
6601 case 5: /* Data processing register constant shift. */
6602 if (rn == 15)
6603 gen_op_movl_T0_im(0);
6604 else
6605 gen_movl_T0_reg(s, rn);
6606 gen_movl_T1_reg(s, rm);
6607 op = (insn >> 21) & 0xf;
6608 shiftop = (insn >> 4) & 3;
6609 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6610 conds = (insn & (1 << 20)) != 0;
6611 logic_cc = (conds && thumb2_logic_op(op));
9a119ff6 6612 gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
9ee6e8bb
PB
6613 if (gen_thumb2_data_op(s, op, conds, 0))
6614 goto illegal_op;
6615 if (rd != 15)
6616 gen_movl_reg_T0(s, rd);
6617 break;
6618 case 13: /* Misc data processing. */
6619 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6620 if (op < 4 && (insn & 0xf000) != 0xf000)
6621 goto illegal_op;
6622 switch (op) {
6623 case 0: /* Register controlled shift. */
6624 gen_movl_T0_reg(s, rm);
6625 gen_movl_T1_reg(s, rn);
6626 if ((insn & 0x70) != 0)
6627 goto illegal_op;
6628 op = (insn >> 21) & 3;
6629 if (insn & (1 << 20)) {
6630 gen_shift_T1_T0_cc[op]();
6631 gen_op_logic_T1_cc();
6632 } else {
6633 gen_shift_T1_T0[op]();
6634 }
6635 gen_movl_reg_T1(s, rd);
6636 break;
6637 case 1: /* Sign/zero extend. */
6638 gen_movl_T1_reg(s, rm);
6639 shift = (insn >> 4) & 3;
6640 /* ??? In many cases it's not neccessary to do a
6641 rotate, a shift is sufficient. */
6642 if (shift != 0)
6643 gen_op_rorl_T1_im(shift * 8);
6644 op = (insn >> 20) & 7;
6645 switch (op) {
b26eefb6
PB
6646 case 0: gen_sxth(cpu_T[1]); break;
6647 case 1: gen_uxth(cpu_T[1]); break;
6648 case 2: gen_sxtb16(cpu_T[1]); break;
6649 case 3: gen_uxtb16(cpu_T[1]); break;
6650 case 4: gen_sxtb(cpu_T[1]); break;
6651 case 5: gen_uxtb(cpu_T[1]); break;
9ee6e8bb
PB
6652 default: goto illegal_op;
6653 }
6654 if (rn != 15) {
b26eefb6 6655 tmp = load_reg(s, rn);
9ee6e8bb 6656 if ((op >> 1) == 1) {
b26eefb6 6657 gen_add16(cpu_T[1], tmp);
9ee6e8bb 6658 } else {
b26eefb6
PB
6659 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
6660 dead_tmp(tmp);
9ee6e8bb
PB
6661 }
6662 }
6663 gen_movl_reg_T1(s, rd);
6664 break;
6665 case 2: /* SIMD add/subtract. */
6666 op = (insn >> 20) & 7;
6667 shift = (insn >> 4) & 7;
6668 if ((op & 3) == 3 || (shift & 3) == 3)
6669 goto illegal_op;
6ddbc6e4
PB
6670 tmp = load_reg(s, rn);
6671 tmp2 = load_reg(s, rm);
6672 gen_thumb2_parallel_addsub(op, shift, tmp, tmp2);
6673 dead_tmp(tmp2);
6674 store_reg(s, rd, tmp);
9ee6e8bb
PB
6675 break;
6676 case 3: /* Other data processing. */
6677 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6678 if (op < 4) {
6679 /* Saturating add/subtract. */
d9ba4830
PB
6680 tmp = load_reg(s, rn);
6681 tmp2 = load_reg(s, rm);
9ee6e8bb 6682 if (op & 2)
d9ba4830 6683 gen_helper_double_saturate(tmp, tmp);
9ee6e8bb 6684 if (op & 1)
d9ba4830 6685 gen_helper_sub_saturate(tmp, tmp2, tmp);
9ee6e8bb 6686 else
d9ba4830
PB
6687 gen_helper_add_saturate(tmp, tmp, tmp2);
6688 dead_tmp(tmp2);
9ee6e8bb 6689 } else {
d9ba4830 6690 tmp = load_reg(s, rn);
9ee6e8bb
PB
6691 switch (op) {
6692 case 0x0a: /* rbit */
d9ba4830 6693 gen_helper_rbit(tmp, tmp);
9ee6e8bb
PB
6694 break;
6695 case 0x08: /* rev */
d9ba4830 6696 tcg_gen_bswap_i32(tmp, tmp);
9ee6e8bb
PB
6697 break;
6698 case 0x09: /* rev16 */
d9ba4830 6699 gen_rev16(tmp);
9ee6e8bb
PB
6700 break;
6701 case 0x0b: /* revsh */
d9ba4830 6702 gen_revsh(tmp);
9ee6e8bb
PB
6703 break;
6704 case 0x10: /* sel */
d9ba4830 6705 tmp2 = load_reg(s, rm);
6ddbc6e4
PB
6706 tmp3 = new_tmp();
6707 tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
d9ba4830 6708 gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
6ddbc6e4 6709 dead_tmp(tmp3);
d9ba4830 6710 dead_tmp(tmp2);
9ee6e8bb
PB
6711 break;
6712 case 0x18: /* clz */
d9ba4830 6713 gen_helper_clz(tmp, tmp);
9ee6e8bb
PB
6714 break;
6715 default:
6716 goto illegal_op;
6717 }
6718 }
d9ba4830 6719 store_reg(s, rd, tmp);
9ee6e8bb
PB
6720 break;
6721 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6722 op = (insn >> 4) & 0xf;
d9ba4830
PB
6723 tmp = load_reg(s, rn);
6724 tmp2 = load_reg(s, rm);
9ee6e8bb
PB
6725 switch ((insn >> 20) & 7) {
6726 case 0: /* 32 x 32 -> 32 */
d9ba4830
PB
6727 tcg_gen_mul_i32(tmp, tmp, tmp2);
6728 dead_tmp(tmp2);
9ee6e8bb 6729 if (rs != 15) {
d9ba4830 6730 tmp2 = load_reg(s, rs);
9ee6e8bb 6731 if (op)
d9ba4830 6732 tcg_gen_sub_i32(tmp, tmp2, tmp);
9ee6e8bb 6733 else
d9ba4830
PB
6734 tcg_gen_add_i32(tmp, tmp, tmp2);
6735 dead_tmp(tmp2);
9ee6e8bb 6736 }
9ee6e8bb
PB
6737 break;
6738 case 1: /* 16 x 16 -> 32 */
d9ba4830
PB
6739 gen_mulxy(tmp, tmp2, op & 2, op & 1);
6740 dead_tmp(tmp2);
9ee6e8bb 6741 if (rs != 15) {
d9ba4830
PB
6742 tmp2 = load_reg(s, rs);
6743 gen_helper_add_setq(tmp, tmp, tmp2);
6744 dead_tmp(tmp2);
9ee6e8bb 6745 }
9ee6e8bb
PB
6746 break;
6747 case 2: /* Dual multiply add. */
6748 case 4: /* Dual multiply subtract. */
6749 if (op)
d9ba4830
PB
6750 gen_swap_half(tmp2);
6751 gen_smul_dual(tmp, tmp2);
9ee6e8bb
PB
6752 /* This addition cannot overflow. */
6753 if (insn & (1 << 22)) {
d9ba4830 6754 tcg_gen_sub_i32(tmp, tmp, tmp2);
9ee6e8bb 6755 } else {
d9ba4830 6756 tcg_gen_add_i32(tmp, tmp, tmp2);
9ee6e8bb 6757 }
d9ba4830 6758 dead_tmp(tmp2);
9ee6e8bb
PB
6759 if (rs != 15)
6760 {
d9ba4830
PB
6761 tmp2 = load_reg(s, rs);
6762 gen_helper_add_setq(tmp, tmp, tmp2);
6763 dead_tmp(tmp2);
9ee6e8bb 6764 }
9ee6e8bb
PB
6765 break;
6766 case 3: /* 32 * 16 -> 32msb */
6767 if (op)
d9ba4830 6768 tcg_gen_sari_i32(tmp2, tmp2, 16);
9ee6e8bb 6769 else
d9ba4830
PB
6770 gen_sxth(tmp2);
6771 gen_imulw(tmp, tmp2);
6772 dead_tmp(tmp2);
9ee6e8bb
PB
6773 if (rs != 15)
6774 {
d9ba4830
PB
6775 tmp2 = load_reg(s, rs);
6776 gen_helper_add_setq(tmp, tmp, tmp2);
6777 dead_tmp(tmp2);
9ee6e8bb 6778 }
9ee6e8bb
PB
6779 break;
6780 case 5: case 6: /* 32 * 32 -> 32msb */
d9ba4830
PB
6781 gen_imull(tmp, tmp2);
6782 if (insn & (1 << 5)) {
6783 gen_roundqd(tmp, tmp2);
6784 dead_tmp(tmp2);
6785 } else {
6786 dead_tmp(tmp);
6787 tmp = tmp2;
6788 }
9ee6e8bb 6789 if (rs != 15) {
d9ba4830 6790 tmp2 = load_reg(s, rs);
9ee6e8bb 6791 if (insn & (1 << 21)) {
d9ba4830 6792 tcg_gen_add_i32(tmp, tmp, tmp2);
99c475ab 6793 } else {
d9ba4830 6794 tcg_gen_sub_i32(tmp, tmp2, tmp);
99c475ab 6795 }
d9ba4830 6796 dead_tmp(tmp2);
2c0262af 6797 }
9ee6e8bb
PB
6798 break;
6799 case 7: /* Unsigned sum of absolute differences. */
d9ba4830
PB
6800 gen_helper_usad8(tmp, tmp, tmp2);
6801 dead_tmp(tmp2);
9ee6e8bb 6802 if (rs != 15) {
d9ba4830
PB
6803 tmp2 = load_reg(s, rs);
6804 tcg_gen_add_i32(tmp, tmp, tmp2);
6805 dead_tmp(tmp2);
5fd46862 6806 }
9ee6e8bb 6807 break;
2c0262af 6808 }
d9ba4830 6809 store_reg(s, rd, tmp);
2c0262af 6810 break;
9ee6e8bb
PB
6811 case 6: case 7: /* 64-bit multiply, Divide. */
6812 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6813 gen_movl_T0_reg(s, rn);
6814 gen_movl_T1_reg(s, rm);
6815 if ((op & 0x50) == 0x10) {
6816 /* sdiv, udiv */
6817 if (!arm_feature(env, ARM_FEATURE_DIV))
6818 goto illegal_op;
6819 if (op & 0x20)
3670669c 6820 gen_helper_udiv(cpu_T[0], cpu_T[0], cpu_T[1]);
2c0262af 6821 else
3670669c 6822 gen_helper_sdiv(cpu_T[0], cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
6823 gen_movl_reg_T0(s, rd);
6824 } else if ((op & 0xe) == 0xc) {
6825 /* Dual multiply accumulate long. */
6826 if (op & 1)
8f01245e 6827 gen_swap_half(cpu_T[1]);
3670669c 6828 gen_smul_dual(cpu_T[0], cpu_T[1]);
9ee6e8bb
PB
6829 if (op & 0x10) {
6830 gen_op_subl_T0_T1();
b5ff1b31 6831 } else {
9ee6e8bb 6832 gen_op_addl_T0_T1();
b5ff1b31 6833 }
9ee6e8bb
PB
6834 gen_op_signbit_T1_T0();
6835 gen_op_addq_T0_T1(rs, rd);
6836 gen_movl_reg_T0(s, rs);
6837 gen_movl_reg_T1(s, rd);
2c0262af 6838 } else {
9ee6e8bb
PB
6839 if (op & 0x20) {
6840 /* Unsigned 64-bit multiply */
6841 gen_op_mull_T0_T1();
b5ff1b31 6842 } else {
9ee6e8bb
PB
6843 if (op & 8) {
6844 /* smlalxy */
d9ba4830 6845 gen_mulxy(cpu_T[0], cpu_T[1], op & 2, op & 1);
9ee6e8bb
PB
6846 gen_op_signbit_T1_T0();
6847 } else {
6848 /* Signed 64-bit multiply */
6849 gen_op_imull_T0_T1();
6850 }
b5ff1b31 6851 }
9ee6e8bb
PB
6852 if (op & 4) {
6853 /* umaal */
6854 gen_op_addq_lo_T0_T1(rs);
6855 gen_op_addq_lo_T0_T1(rd);
6856 } else if (op & 0x40) {
6857 /* 64-bit accumulate. */
6858 gen_op_addq_T0_T1(rs, rd);
6859 }
6860 gen_movl_reg_T0(s, rs);
6861 gen_movl_reg_T1(s, rd);
5fd46862 6862 }
2c0262af 6863 break;
9ee6e8bb
PB
6864 }
6865 break;
6866 case 6: case 7: case 14: case 15:
6867 /* Coprocessor. */
6868 if (((insn >> 24) & 3) == 3) {
6869 /* Translate into the equivalent ARM encoding. */
6870 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6871 if (disas_neon_data_insn(env, s, insn))
6872 goto illegal_op;
6873 } else {
6874 if (insn & (1 << 28))
6875 goto illegal_op;
6876 if (disas_coproc_insn (env, s, insn))
6877 goto illegal_op;
6878 }
6879 break;
6880 case 8: case 9: case 10: case 11:
6881 if (insn & (1 << 15)) {
6882 /* Branches, misc control. */
6883 if (insn & 0x5000) {
6884 /* Unconditional branch. */
6885 /* signextend(hw1[10:0]) -> offset[:12]. */
6886 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6887 /* hw1[10:0] -> offset[11:1]. */
6888 offset |= (insn & 0x7ff) << 1;
6889 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6890 offset[24:22] already have the same value because of the
6891 sign extension above. */
6892 offset ^= ((~insn) & (1 << 13)) << 10;
6893 offset ^= ((~insn) & (1 << 11)) << 11;
6894
6895 addr = s->pc;
6896 if (insn & (1 << 14)) {
6897 /* Branch and link. */
6898 gen_op_movl_T1_im(addr | 1);
6899 gen_movl_reg_T1(s, 14);
b5ff1b31 6900 }
3b46e624 6901
9ee6e8bb
PB
6902 addr += offset;
6903 if (insn & (1 << 12)) {
6904 /* b/bl */
6905 gen_jmp(s, addr);
6906 } else {
6907 /* blx */
6908 addr &= ~(uint32_t)2;
d9ba4830 6909 gen_bx_im(s, addr);
2c0262af 6910 }
9ee6e8bb
PB
6911 } else if (((insn >> 23) & 7) == 7) {
6912 /* Misc control */
6913 if (insn & (1 << 13))
6914 goto illegal_op;
6915
6916 if (insn & (1 << 26)) {
6917 /* Secure monitor call (v6Z) */
6918 goto illegal_op; /* not implemented. */
2c0262af 6919 } else {
9ee6e8bb
PB
6920 op = (insn >> 20) & 7;
6921 switch (op) {
6922 case 0: /* msr cpsr. */
6923 if (IS_M(env)) {
6924 gen_op_v7m_msr_T0(insn & 0xff);
6925 gen_movl_reg_T0(s, rn);
6926 gen_lookup_tb(s);
6927 break;
6928 }
6929 /* fall through */
6930 case 1: /* msr spsr. */
6931 if (IS_M(env))
6932 goto illegal_op;
6933 gen_movl_T0_reg(s, rn);
6934 if (gen_set_psr_T0(s,
6935 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6936 op == 1))
6937 goto illegal_op;
6938 break;
6939 case 2: /* cps, nop-hint. */
6940 if (((insn >> 8) & 7) == 0) {
6941 gen_nop_hint(s, insn & 0xff);
6942 }
6943 /* Implemented as NOP in user mode. */
6944 if (IS_USER(s))
6945 break;
6946 offset = 0;
6947 imm = 0;
6948 if (insn & (1 << 10)) {
6949 if (insn & (1 << 7))
6950 offset |= CPSR_A;
6951 if (insn & (1 << 6))
6952 offset |= CPSR_I;
6953 if (insn & (1 << 5))
6954 offset |= CPSR_F;
6955 if (insn & (1 << 9))
6956 imm = CPSR_A | CPSR_I | CPSR_F;
6957 }
6958 if (insn & (1 << 8)) {
6959 offset |= 0x1f;
6960 imm |= (insn & 0x1f);
6961 }
6962 if (offset) {
6963 gen_op_movl_T0_im(imm);
6964 gen_set_psr_T0(s, offset, 0);
6965 }
6966 break;
6967 case 3: /* Special control operations. */
6968 op = (insn >> 4) & 0xf;
6969 switch (op) {
6970 case 2: /* clrex */
6971 gen_op_clrex();
6972 break;
6973 case 4: /* dsb */
6974 case 5: /* dmb */
6975 case 6: /* isb */
6976 /* These execute as NOPs. */
6977 ARCH(7);
6978 break;
6979 default:
6980 goto illegal_op;
6981 }
6982 break;
6983 case 4: /* bxj */
6984 /* Trivial implementation equivalent to bx. */
d9ba4830
PB
6985 tmp = load_reg(s, rn);
6986 gen_bx(s, tmp);
9ee6e8bb
PB
6987 break;
6988 case 5: /* Exception return. */
6989 /* Unpredictable in user mode. */
6990 goto illegal_op;
6991 case 6: /* mrs cpsr. */
6992 if (IS_M(env)) {
6993 gen_op_v7m_mrs_T0(insn & 0xff);
6994 } else {
d9ba4830 6995 gen_helper_cpsr_read(cpu_T[0]);
9ee6e8bb
PB
6996 }
6997 gen_movl_reg_T0(s, rd);
6998 break;
6999 case 7: /* mrs spsr. */
7000 /* Not accessible in user mode. */
7001 if (IS_USER(s) || IS_M(env))
7002 goto illegal_op;
d9ba4830
PB
7003 tmp = load_cpu_field(spsr);
7004 store_reg(s, rd, tmp);
9ee6e8bb 7005 break;
2c0262af
FB
7006 }
7007 }
9ee6e8bb
PB
7008 } else {
7009 /* Conditional branch. */
7010 op = (insn >> 22) & 0xf;
7011 /* Generate a conditional jump to next instruction. */
7012 s->condlabel = gen_new_label();
d9ba4830 7013 gen_test_cc(op ^ 1, s->condlabel);
9ee6e8bb
PB
7014 s->condjmp = 1;
7015
7016 /* offset[11:1] = insn[10:0] */
7017 offset = (insn & 0x7ff) << 1;
7018 /* offset[17:12] = insn[21:16]. */
7019 offset |= (insn & 0x003f0000) >> 4;
7020 /* offset[31:20] = insn[26]. */
7021 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
7022 /* offset[18] = insn[13]. */
7023 offset |= (insn & (1 << 13)) << 5;
7024 /* offset[19] = insn[11]. */
7025 offset |= (insn & (1 << 11)) << 8;
7026
7027 /* jump to the offset */
7028 addr = s->pc + offset;
7029 gen_jmp(s, addr);
7030 }
7031 } else {
7032 /* Data processing immediate. */
7033 if (insn & (1 << 25)) {
7034 if (insn & (1 << 24)) {
7035 if (insn & (1 << 20))
7036 goto illegal_op;
7037 /* Bitfield/Saturate. */
7038 op = (insn >> 21) & 7;
7039 imm = insn & 0x1f;
7040 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6ddbc6e4
PB
7041 if (rn == 15) {
7042 tmp = new_tmp();
7043 tcg_gen_movi_i32(tmp, 0);
7044 } else {
7045 tmp = load_reg(s, rn);
7046 }
9ee6e8bb
PB
7047 switch (op) {
7048 case 2: /* Signed bitfield extract. */
7049 imm++;
7050 if (shift + imm > 32)
7051 goto illegal_op;
7052 if (imm < 32)
6ddbc6e4 7053 gen_sbfx(tmp, shift, imm);
9ee6e8bb
PB
7054 break;
7055 case 6: /* Unsigned bitfield extract. */
7056 imm++;
7057 if (shift + imm > 32)
7058 goto illegal_op;
7059 if (imm < 32)
6ddbc6e4 7060 gen_ubfx(tmp, shift, (1u << imm) - 1);
9ee6e8bb
PB
7061 break;
7062 case 3: /* Bitfield insert/clear. */
7063 if (imm < shift)
7064 goto illegal_op;
7065 imm = imm + 1 - shift;
7066 if (imm != 32) {
6ddbc6e4
PB
7067 tmp2 = load_reg(s, rd);
7068 gen_bfi(tmp, tmp2, tmp,
3670669c 7069 shift, ((1u << imm) - 1) << shift);
6ddbc6e4 7070 dead_tmp(tmp2);
9ee6e8bb
PB
7071 }
7072 break;
7073 case 7:
7074 goto illegal_op;
7075 default: /* Saturate. */
9ee6e8bb
PB
7076 if (shift) {
7077 if (op & 1)
6ddbc6e4 7078 tcg_gen_sari_i32(tmp, tmp, shift);
9ee6e8bb 7079 else
6ddbc6e4 7080 tcg_gen_shli_i32(tmp, tmp, shift);
9ee6e8bb 7081 }
6ddbc6e4 7082 tmp2 = tcg_const_i32(imm);
9ee6e8bb
PB
7083 if (op & 4) {
7084 /* Unsigned. */
9ee6e8bb 7085 if ((op & 1) && shift == 0)
6ddbc6e4 7086 gen_helper_usat16(tmp, tmp, tmp2);
9ee6e8bb 7087 else
6ddbc6e4 7088 gen_helper_usat(tmp, tmp, tmp2);
2c0262af 7089 } else {
9ee6e8bb 7090 /* Signed. */
9ee6e8bb 7091 if ((op & 1) && shift == 0)
6ddbc6e4 7092 gen_helper_ssat16(tmp, tmp, tmp2);
9ee6e8bb 7093 else
6ddbc6e4 7094 gen_helper_ssat(tmp, tmp, tmp2);
2c0262af 7095 }
9ee6e8bb 7096 break;
2c0262af 7097 }
6ddbc6e4 7098 store_reg(s, rd, tmp);
9ee6e8bb
PB
7099 } else {
7100 imm = ((insn & 0x04000000) >> 15)
7101 | ((insn & 0x7000) >> 4) | (insn & 0xff);
7102 if (insn & (1 << 22)) {
7103 /* 16-bit immediate. */
7104 imm |= (insn >> 4) & 0xf000;
7105 if (insn & (1 << 23)) {
7106 /* movt */
7107 gen_movl_T0_reg(s, rd);
8f01245e
PB
7108 tcg_gen_andi_i32(cpu_T[0], cpu_T[0], 0xffff);
7109 tcg_gen_ori_i32(cpu_T[0], cpu_T[0], imm << 16);
2c0262af 7110 } else {
9ee6e8bb
PB
7111 /* movw */
7112 gen_op_movl_T0_im(imm);
2c0262af
FB
7113 }
7114 } else {
9ee6e8bb
PB
7115 /* Add/sub 12-bit immediate. */
7116 if (rn == 15) {
7117 addr = s->pc & ~(uint32_t)3;
7118 if (insn & (1 << 23))
7119 addr -= imm;
7120 else
7121 addr += imm;
7122 gen_op_movl_T0_im(addr);
2c0262af 7123 } else {
9ee6e8bb
PB
7124 gen_movl_T0_reg(s, rn);
7125 gen_op_movl_T1_im(imm);
7126 if (insn & (1 << 23))
7127 gen_op_subl_T0_T1();
7128 else
7129 gen_op_addl_T0_T1();
2c0262af 7130 }
9ee6e8bb
PB
7131 }
7132 gen_movl_reg_T0(s, rd);
191abaa2 7133 }
9ee6e8bb
PB
7134 } else {
7135 int shifter_out = 0;
7136 /* modified 12-bit immediate. */
7137 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
7138 imm = (insn & 0xff);
7139 switch (shift) {
7140 case 0: /* XY */
7141 /* Nothing to do. */
7142 break;
7143 case 1: /* 00XY00XY */
7144 imm |= imm << 16;
7145 break;
7146 case 2: /* XY00XY00 */
7147 imm |= imm << 16;
7148 imm <<= 8;
7149 break;
7150 case 3: /* XYXYXYXY */
7151 imm |= imm << 16;
7152 imm |= imm << 8;
7153 break;
7154 default: /* Rotated constant. */
7155 shift = (shift << 1) | (imm >> 7);
7156 imm |= 0x80;
7157 imm = imm << (32 - shift);
7158 shifter_out = 1;
7159 break;
b5ff1b31 7160 }
9ee6e8bb
PB
7161 gen_op_movl_T1_im(imm);
7162 rn = (insn >> 16) & 0xf;
7163 if (rn == 15)
7164 gen_op_movl_T0_im(0);
7165 else
7166 gen_movl_T0_reg(s, rn);
7167 op = (insn >> 21) & 0xf;
7168 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
7169 shifter_out))
7170 goto illegal_op;
7171 rd = (insn >> 8) & 0xf;
7172 if (rd != 15) {
7173 gen_movl_reg_T0(s, rd);
2c0262af 7174 }
2c0262af 7175 }
9ee6e8bb
PB
7176 }
7177 break;
7178 case 12: /* Load/store single data item. */
7179 {
7180 int postinc = 0;
7181 int writeback = 0;
7182 if ((insn & 0x01100000) == 0x01000000) {
7183 if (disas_neon_ls_insn(env, s, insn))
c1713132 7184 goto illegal_op;
9ee6e8bb
PB
7185 break;
7186 }
7187 if (rn == 15) {
7188 /* PC relative. */
7189 /* s->pc has already been incremented by 4. */
7190 imm = s->pc & 0xfffffffc;
7191 if (insn & (1 << 23))
7192 imm += insn & 0xfff;
7193 else
7194 imm -= insn & 0xfff;
7195 gen_op_movl_T1_im(imm);
7196 } else {
7197 gen_movl_T1_reg(s, rn);
7198 if (insn & (1 << 23)) {
7199 /* Positive offset. */
7200 imm = insn & 0xfff;
7201 gen_op_addl_T1_im(imm);
7202 } else {
7203 op = (insn >> 8) & 7;
7204 imm = insn & 0xff;
7205 switch (op) {
7206 case 0: case 8: /* Shifted Register. */
7207 shift = (insn >> 4) & 0xf;
7208 if (shift > 3)
18c9b560 7209 goto illegal_op;
b26eefb6 7210 tmp = load_reg(s, rm);
9ee6e8bb 7211 if (shift)
b26eefb6
PB
7212 tcg_gen_shli_i32(tmp, tmp, shift);
7213 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
7214 dead_tmp(tmp);
9ee6e8bb
PB
7215 break;
7216 case 4: /* Negative offset. */
7217 gen_op_addl_T1_im(-imm);
7218 break;
7219 case 6: /* User privilege. */
7220 gen_op_addl_T1_im(imm);
7221 break;
7222 case 1: /* Post-decrement. */
7223 imm = -imm;
7224 /* Fall through. */
7225 case 3: /* Post-increment. */
9ee6e8bb
PB
7226 postinc = 1;
7227 writeback = 1;
7228 break;
7229 case 5: /* Pre-decrement. */
7230 imm = -imm;
7231 /* Fall through. */
7232 case 7: /* Pre-increment. */
7233 gen_op_addl_T1_im(imm);
7234 writeback = 1;
7235 break;
7236 default:
b7bcbe95 7237 goto illegal_op;
9ee6e8bb
PB
7238 }
7239 }
7240 }
7241 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
7242 if (insn & (1 << 20)) {
7243 /* Load. */
7244 if (rs == 15 && op != 2) {
7245 if (op & 2)
b5ff1b31 7246 goto illegal_op;
9ee6e8bb
PB
7247 /* Memory hint. Implemented as NOP. */
7248 } else {
7249 switch (op) {
7250 case 0: gen_ldst(ldub, s); break;
7251 case 4: gen_ldst(ldsb, s); break;
7252 case 1: gen_ldst(lduw, s); break;
7253 case 5: gen_ldst(ldsw, s); break;
7254 case 2: gen_ldst(ldl, s); break;
7255 default: goto illegal_op;
7256 }
7257 if (rs == 15) {
d9ba4830 7258 gen_bx_T0(s);
9ee6e8bb
PB
7259 } else {
7260 gen_movl_reg_T0(s, rs);
7261 }
7262 }
7263 } else {
7264 /* Store. */
7265 if (rs == 15)
b7bcbe95 7266 goto illegal_op;
9ee6e8bb
PB
7267 gen_movl_T0_reg(s, rs);
7268 switch (op) {
7269 case 0: gen_ldst(stb, s); break;
7270 case 1: gen_ldst(stw, s); break;
7271 case 2: gen_ldst(stl, s); break;
7272 default: goto illegal_op;
b7bcbe95 7273 }
2c0262af 7274 }
9ee6e8bb
PB
7275 if (postinc)
7276 gen_op_addl_T1_im(imm);
7277 if (writeback)
7278 gen_movl_reg_T1(s, rn);
7279 }
7280 break;
7281 default:
7282 goto illegal_op;
2c0262af 7283 }
9ee6e8bb
PB
7284 return 0;
7285illegal_op:
7286 return 1;
2c0262af
FB
7287}
7288
9ee6e8bb 7289static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
7290{
7291 uint32_t val, insn, op, rm, rn, rd, shift, cond;
7292 int32_t offset;
7293 int i;
b26eefb6 7294 TCGv tmp;
d9ba4830 7295 TCGv tmp2;
99c475ab 7296
9ee6e8bb
PB
7297 if (s->condexec_mask) {
7298 cond = s->condexec_cond;
7299 s->condlabel = gen_new_label();
d9ba4830 7300 gen_test_cc(cond ^ 1, s->condlabel);
9ee6e8bb
PB
7301 s->condjmp = 1;
7302 }
7303
b5ff1b31 7304 insn = lduw_code(s->pc);
99c475ab 7305 s->pc += 2;
b5ff1b31 7306
99c475ab
FB
7307 switch (insn >> 12) {
7308 case 0: case 1:
7309 rd = insn & 7;
7310 op = (insn >> 11) & 3;
7311 if (op == 3) {
7312 /* add/subtract */
7313 rn = (insn >> 3) & 7;
7314 gen_movl_T0_reg(s, rn);
7315 if (insn & (1 << 10)) {
7316 /* immediate */
7317 gen_op_movl_T1_im((insn >> 6) & 7);
7318 } else {
7319 /* reg */
7320 rm = (insn >> 6) & 7;
7321 gen_movl_T1_reg(s, rm);
7322 }
9ee6e8bb
PB
7323 if (insn & (1 << 9)) {
7324 if (s->condexec_mask)
7325 gen_op_subl_T0_T1();
7326 else
7327 gen_op_subl_T0_T1_cc();
7328 } else {
7329 if (s->condexec_mask)
7330 gen_op_addl_T0_T1();
7331 else
7332 gen_op_addl_T0_T1_cc();
7333 }
99c475ab
FB
7334 gen_movl_reg_T0(s, rd);
7335 } else {
7336 /* shift immediate */
7337 rm = (insn >> 3) & 7;
7338 shift = (insn >> 6) & 0x1f;
9a119ff6
PB
7339 tmp = load_reg(s, rm);
7340 gen_arm_shift_im(tmp, op, shift, s->condexec_mask == 0);
7341 if (!s->condexec_mask)
7342 gen_logic_CC(tmp);
7343 store_reg(s, rd, tmp);
99c475ab
FB
7344 }
7345 break;
7346 case 2: case 3:
7347 /* arithmetic large immediate */
7348 op = (insn >> 11) & 3;
7349 rd = (insn >> 8) & 0x7;
7350 if (op == 0) {
7351 gen_op_movl_T0_im(insn & 0xff);
7352 } else {
7353 gen_movl_T0_reg(s, rd);
7354 gen_op_movl_T1_im(insn & 0xff);
7355 }
7356 switch (op) {
7357 case 0: /* mov */
9ee6e8bb
PB
7358 if (!s->condexec_mask)
7359 gen_op_logic_T0_cc();
99c475ab
FB
7360 break;
7361 case 1: /* cmp */
7362 gen_op_subl_T0_T1_cc();
7363 break;
7364 case 2: /* add */
9ee6e8bb
PB
7365 if (s->condexec_mask)
7366 gen_op_addl_T0_T1();
7367 else
7368 gen_op_addl_T0_T1_cc();
99c475ab
FB
7369 break;
7370 case 3: /* sub */
9ee6e8bb
PB
7371 if (s->condexec_mask)
7372 gen_op_subl_T0_T1();
7373 else
7374 gen_op_subl_T0_T1_cc();
99c475ab
FB
7375 break;
7376 }
7377 if (op != 1)
7378 gen_movl_reg_T0(s, rd);
7379 break;
7380 case 4:
7381 if (insn & (1 << 11)) {
7382 rd = (insn >> 8) & 7;
5899f386
FB
7383 /* load pc-relative. Bit 1 of PC is ignored. */
7384 val = s->pc + 2 + ((insn & 0xff) * 4);
7385 val &= ~(uint32_t)2;
99c475ab 7386 gen_op_movl_T1_im(val);
b5ff1b31 7387 gen_ldst(ldl, s);
99c475ab
FB
7388 gen_movl_reg_T0(s, rd);
7389 break;
7390 }
7391 if (insn & (1 << 10)) {
7392 /* data processing extended or blx */
7393 rd = (insn & 7) | ((insn >> 4) & 8);
7394 rm = (insn >> 3) & 0xf;
7395 op = (insn >> 8) & 3;
7396 switch (op) {
7397 case 0: /* add */
7398 gen_movl_T0_reg(s, rd);
7399 gen_movl_T1_reg(s, rm);
7400 gen_op_addl_T0_T1();
7401 gen_movl_reg_T0(s, rd);
7402 break;
7403 case 1: /* cmp */
7404 gen_movl_T0_reg(s, rd);
7405 gen_movl_T1_reg(s, rm);
7406 gen_op_subl_T0_T1_cc();
7407 break;
7408 case 2: /* mov/cpy */
7409 gen_movl_T0_reg(s, rm);
7410 gen_movl_reg_T0(s, rd);
7411 break;
7412 case 3:/* branch [and link] exchange thumb register */
7413 if (insn & (1 << 7)) {
7414 val = (uint32_t)s->pc | 1;
7415 gen_op_movl_T1_im(val);
7416 gen_movl_reg_T1(s, 14);
7417 }
d9ba4830
PB
7418 tmp = load_reg(s, rm);
7419 gen_bx(s, tmp);
99c475ab
FB
7420 break;
7421 }
7422 break;
7423 }
7424
7425 /* data processing register */
7426 rd = insn & 7;
7427 rm = (insn >> 3) & 7;
7428 op = (insn >> 6) & 0xf;
7429 if (op == 2 || op == 3 || op == 4 || op == 7) {
7430 /* the shift/rotate ops want the operands backwards */
7431 val = rm;
7432 rm = rd;
7433 rd = val;
7434 val = 1;
7435 } else {
7436 val = 0;
7437 }
7438
7439 if (op == 9) /* neg */
7440 gen_op_movl_T0_im(0);
7441 else if (op != 0xf) /* mvn doesn't read its first operand */
7442 gen_movl_T0_reg(s, rd);
7443
7444 gen_movl_T1_reg(s, rm);
5899f386 7445 switch (op) {
99c475ab
FB
7446 case 0x0: /* and */
7447 gen_op_andl_T0_T1();
9ee6e8bb
PB
7448 if (!s->condexec_mask)
7449 gen_op_logic_T0_cc();
99c475ab
FB
7450 break;
7451 case 0x1: /* eor */
7452 gen_op_xorl_T0_T1();
9ee6e8bb
PB
7453 if (!s->condexec_mask)
7454 gen_op_logic_T0_cc();
99c475ab
FB
7455 break;
7456 case 0x2: /* lsl */
9ee6e8bb
PB
7457 if (s->condexec_mask) {
7458 gen_op_shll_T1_T0();
7459 } else {
7460 gen_op_shll_T1_T0_cc();
7461 gen_op_logic_T1_cc();
7462 }
99c475ab
FB
7463 break;
7464 case 0x3: /* lsr */
9ee6e8bb
PB
7465 if (s->condexec_mask) {
7466 gen_op_shrl_T1_T0();
7467 } else {
7468 gen_op_shrl_T1_T0_cc();
7469 gen_op_logic_T1_cc();
7470 }
99c475ab
FB
7471 break;
7472 case 0x4: /* asr */
9ee6e8bb
PB
7473 if (s->condexec_mask) {
7474 gen_op_sarl_T1_T0();
7475 } else {
7476 gen_op_sarl_T1_T0_cc();
7477 gen_op_logic_T1_cc();
7478 }
99c475ab
FB
7479 break;
7480 case 0x5: /* adc */
9ee6e8bb 7481 if (s->condexec_mask)
b26eefb6 7482 gen_adc_T0_T1();
9ee6e8bb
PB
7483 else
7484 gen_op_adcl_T0_T1_cc();
99c475ab
FB
7485 break;
7486 case 0x6: /* sbc */
9ee6e8bb 7487 if (s->condexec_mask)
3670669c 7488 gen_sbc_T0_T1();
9ee6e8bb
PB
7489 else
7490 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
7491 break;
7492 case 0x7: /* ror */
9ee6e8bb
PB
7493 if (s->condexec_mask) {
7494 gen_op_rorl_T1_T0();
7495 } else {
7496 gen_op_rorl_T1_T0_cc();
7497 gen_op_logic_T1_cc();
7498 }
99c475ab
FB
7499 break;
7500 case 0x8: /* tst */
7501 gen_op_andl_T0_T1();
7502 gen_op_logic_T0_cc();
7503 rd = 16;
5899f386 7504 break;
99c475ab 7505 case 0x9: /* neg */
9ee6e8bb
PB
7506 if (s->condexec_mask)
7507 gen_op_subl_T0_T1();
7508 else
7509 gen_op_subl_T0_T1_cc();
99c475ab
FB
7510 break;
7511 case 0xa: /* cmp */
7512 gen_op_subl_T0_T1_cc();
7513 rd = 16;
7514 break;
7515 case 0xb: /* cmn */
7516 gen_op_addl_T0_T1_cc();
7517 rd = 16;
7518 break;
7519 case 0xc: /* orr */
7520 gen_op_orl_T0_T1();
9ee6e8bb
PB
7521 if (!s->condexec_mask)
7522 gen_op_logic_T0_cc();
99c475ab
FB
7523 break;
7524 case 0xd: /* mul */
7525 gen_op_mull_T0_T1();
9ee6e8bb
PB
7526 if (!s->condexec_mask)
7527 gen_op_logic_T0_cc();
99c475ab
FB
7528 break;
7529 case 0xe: /* bic */
7530 gen_op_bicl_T0_T1();
9ee6e8bb
PB
7531 if (!s->condexec_mask)
7532 gen_op_logic_T0_cc();
99c475ab
FB
7533 break;
7534 case 0xf: /* mvn */
7535 gen_op_notl_T1();
9ee6e8bb
PB
7536 if (!s->condexec_mask)
7537 gen_op_logic_T1_cc();
99c475ab 7538 val = 1;
5899f386 7539 rm = rd;
99c475ab
FB
7540 break;
7541 }
7542 if (rd != 16) {
7543 if (val)
5899f386 7544 gen_movl_reg_T1(s, rm);
99c475ab
FB
7545 else
7546 gen_movl_reg_T0(s, rd);
7547 }
7548 break;
7549
7550 case 5:
7551 /* load/store register offset. */
7552 rd = insn & 7;
7553 rn = (insn >> 3) & 7;
7554 rm = (insn >> 6) & 7;
7555 op = (insn >> 9) & 7;
7556 gen_movl_T1_reg(s, rn);
b26eefb6
PB
7557 tmp = load_reg(s, rm);
7558 tcg_gen_add_i32(cpu_T[1], cpu_T[1], tmp);
7559 dead_tmp(tmp);
99c475ab
FB
7560
7561 if (op < 3) /* store */
7562 gen_movl_T0_reg(s, rd);
7563
7564 switch (op) {
7565 case 0: /* str */
b5ff1b31 7566 gen_ldst(stl, s);
99c475ab
FB
7567 break;
7568 case 1: /* strh */
b5ff1b31 7569 gen_ldst(stw, s);
99c475ab
FB
7570 break;
7571 case 2: /* strb */
b5ff1b31 7572 gen_ldst(stb, s);
99c475ab
FB
7573 break;
7574 case 3: /* ldrsb */
b5ff1b31 7575 gen_ldst(ldsb, s);
99c475ab
FB
7576 break;
7577 case 4: /* ldr */
b5ff1b31 7578 gen_ldst(ldl, s);
99c475ab
FB
7579 break;
7580 case 5: /* ldrh */
b5ff1b31 7581 gen_ldst(lduw, s);
99c475ab
FB
7582 break;
7583 case 6: /* ldrb */
b5ff1b31 7584 gen_ldst(ldub, s);
99c475ab
FB
7585 break;
7586 case 7: /* ldrsh */
b5ff1b31 7587 gen_ldst(ldsw, s);
99c475ab
FB
7588 break;
7589 }
7590 if (op >= 3) /* load */
7591 gen_movl_reg_T0(s, rd);
7592 break;
7593
7594 case 6:
7595 /* load/store word immediate offset */
7596 rd = insn & 7;
7597 rn = (insn >> 3) & 7;
7598 gen_movl_T1_reg(s, rn);
7599 val = (insn >> 4) & 0x7c;
b26eefb6 7600 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
99c475ab
FB
7601
7602 if (insn & (1 << 11)) {
7603 /* load */
b5ff1b31 7604 gen_ldst(ldl, s);
99c475ab
FB
7605 gen_movl_reg_T0(s, rd);
7606 } else {
7607 /* store */
7608 gen_movl_T0_reg(s, rd);
b5ff1b31 7609 gen_ldst(stl, s);
99c475ab
FB
7610 }
7611 break;
7612
7613 case 7:
7614 /* load/store byte immediate offset */
7615 rd = insn & 7;
7616 rn = (insn >> 3) & 7;
7617 gen_movl_T1_reg(s, rn);
7618 val = (insn >> 6) & 0x1f;
b26eefb6 7619 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
99c475ab
FB
7620
7621 if (insn & (1 << 11)) {
7622 /* load */
b5ff1b31 7623 gen_ldst(ldub, s);
99c475ab
FB
7624 gen_movl_reg_T0(s, rd);
7625 } else {
7626 /* store */
7627 gen_movl_T0_reg(s, rd);
b5ff1b31 7628 gen_ldst(stb, s);
99c475ab
FB
7629 }
7630 break;
7631
7632 case 8:
7633 /* load/store halfword immediate offset */
7634 rd = insn & 7;
7635 rn = (insn >> 3) & 7;
7636 gen_movl_T1_reg(s, rn);
7637 val = (insn >> 5) & 0x3e;
b26eefb6 7638 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
99c475ab
FB
7639
7640 if (insn & (1 << 11)) {
7641 /* load */
b5ff1b31 7642 gen_ldst(lduw, s);
99c475ab
FB
7643 gen_movl_reg_T0(s, rd);
7644 } else {
7645 /* store */
7646 gen_movl_T0_reg(s, rd);
b5ff1b31 7647 gen_ldst(stw, s);
99c475ab
FB
7648 }
7649 break;
7650
7651 case 9:
7652 /* load/store from stack */
7653 rd = (insn >> 8) & 7;
7654 gen_movl_T1_reg(s, 13);
7655 val = (insn & 0xff) * 4;
b26eefb6 7656 tcg_gen_addi_i32(cpu_T[1], cpu_T[1], val);
99c475ab
FB
7657
7658 if (insn & (1 << 11)) {
7659 /* load */
b5ff1b31 7660 gen_ldst(ldl, s);
99c475ab
FB
7661 gen_movl_reg_T0(s, rd);
7662 } else {
7663 /* store */
7664 gen_movl_T0_reg(s, rd);
b5ff1b31 7665 gen_ldst(stl, s);
99c475ab
FB
7666 }
7667 break;
7668
7669 case 10:
7670 /* add to high reg */
7671 rd = (insn >> 8) & 7;
5899f386
FB
7672 if (insn & (1 << 11)) {
7673 /* SP */
7674 gen_movl_T0_reg(s, 13);
7675 } else {
7676 /* PC. bit 1 is ignored. */
7677 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7678 }
99c475ab
FB
7679 val = (insn & 0xff) * 4;
7680 gen_op_movl_T1_im(val);
7681 gen_op_addl_T0_T1();
7682 gen_movl_reg_T0(s, rd);
7683 break;
7684
7685 case 11:
7686 /* misc */
7687 op = (insn >> 8) & 0xf;
7688 switch (op) {
7689 case 0:
7690 /* adjust stack pointer */
b26eefb6 7691 tmp = load_reg(s, 13);
99c475ab
FB
7692 val = (insn & 0x7f) * 4;
7693 if (insn & (1 << 7))
7694 val = -(int32_t)val;
b26eefb6
PB
7695 tcg_gen_addi_i32(tmp, tmp, val);
7696 store_reg(s, 13, tmp);
99c475ab
FB
7697 break;
7698
9ee6e8bb
PB
7699 case 2: /* sign/zero extend. */
7700 ARCH(6);
7701 rd = insn & 7;
7702 rm = (insn >> 3) & 7;
7703 gen_movl_T1_reg(s, rm);
7704 switch ((insn >> 6) & 3) {
b26eefb6
PB
7705 case 0: gen_sxth(cpu_T[1]); break;
7706 case 1: gen_sxtb(cpu_T[1]); break;
7707 case 2: gen_uxth(cpu_T[1]); break;
7708 case 3: gen_uxtb(cpu_T[1]); break;
9ee6e8bb
PB
7709 }
7710 gen_movl_reg_T1(s, rd);
7711 break;
99c475ab
FB
7712 case 4: case 5: case 0xc: case 0xd:
7713 /* push/pop */
7714 gen_movl_T1_reg(s, 13);
5899f386
FB
7715 if (insn & (1 << 8))
7716 offset = 4;
99c475ab 7717 else
5899f386
FB
7718 offset = 0;
7719 for (i = 0; i < 8; i++) {
7720 if (insn & (1 << i))
7721 offset += 4;
7722 }
7723 if ((insn & (1 << 11)) == 0) {
b26eefb6 7724 gen_op_addl_T1_im(-offset);
5899f386 7725 }
99c475ab
FB
7726 for (i = 0; i < 8; i++) {
7727 if (insn & (1 << i)) {
7728 if (insn & (1 << 11)) {
7729 /* pop */
b5ff1b31 7730 gen_ldst(ldl, s);
99c475ab
FB
7731 gen_movl_reg_T0(s, i);
7732 } else {
7733 /* push */
7734 gen_movl_T0_reg(s, i);
b5ff1b31 7735 gen_ldst(stl, s);
99c475ab 7736 }
5899f386 7737 /* advance to the next address. */
b26eefb6 7738 gen_op_addl_T1_im(4);
99c475ab
FB
7739 }
7740 }
7741 if (insn & (1 << 8)) {
7742 if (insn & (1 << 11)) {
7743 /* pop pc */
b5ff1b31 7744 gen_ldst(ldl, s);
99c475ab
FB
7745 /* don't set the pc until the rest of the instruction
7746 has completed */
7747 } else {
7748 /* push lr */
7749 gen_movl_T0_reg(s, 14);
b5ff1b31 7750 gen_ldst(stl, s);
99c475ab 7751 }
b26eefb6 7752 gen_op_addl_T1_im(4);
99c475ab 7753 }
5899f386 7754 if ((insn & (1 << 11)) == 0) {
b26eefb6 7755 gen_op_addl_T1_im(-offset);
5899f386 7756 }
99c475ab
FB
7757 /* write back the new stack pointer */
7758 gen_movl_reg_T1(s, 13);
7759 /* set the new PC value */
7760 if ((insn & 0x0900) == 0x0900)
d9ba4830 7761 gen_bx_T0(s);
99c475ab
FB
7762 break;
7763
9ee6e8bb
PB
7764 case 1: case 3: case 9: case 11: /* czb */
7765 rm = insn & 7;
d9ba4830
PB
7766 tmp = load_reg(s, rm);
7767 tmp2 = tcg_const_i32(0);
9ee6e8bb
PB
7768 s->condlabel = gen_new_label();
7769 s->condjmp = 1;
7770 if (insn & (1 << 11))
d9ba4830 7771 tcg_gen_brcond_i32(TCG_COND_EQ, tmp, tmp2, s->condlabel);
9ee6e8bb 7772 else
d9ba4830
PB
7773 tcg_gen_brcond_i32(TCG_COND_NE, tmp, tmp2, s->condlabel);
7774 dead_tmp(tmp);
9ee6e8bb
PB
7775 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7776 val = (uint32_t)s->pc + 2;
7777 val += offset;
7778 gen_jmp(s, val);
7779 break;
7780
7781 case 15: /* IT, nop-hint. */
7782 if ((insn & 0xf) == 0) {
7783 gen_nop_hint(s, (insn >> 4) & 0xf);
7784 break;
7785 }
7786 /* If Then. */
7787 s->condexec_cond = (insn >> 4) & 0xe;
7788 s->condexec_mask = insn & 0x1f;
7789 /* No actual code generated for this insn, just setup state. */
7790 break;
7791
06c949e6 7792 case 0xe: /* bkpt */
9ee6e8bb 7793 gen_set_condexec(s);
06c949e6 7794 gen_op_movl_T0_im((long)s->pc - 2);
b26eefb6 7795 gen_set_pc_T0();
d9ba4830 7796 gen_exception(EXCP_BKPT);
06c949e6
PB
7797 s->is_jmp = DISAS_JUMP;
7798 break;
7799
9ee6e8bb
PB
7800 case 0xa: /* rev */
7801 ARCH(6);
7802 rn = (insn >> 3) & 0x7;
7803 rd = insn & 0x7;
7804 gen_movl_T0_reg(s, rn);
7805 switch ((insn >> 6) & 3) {
7806 case 0: gen_op_rev_T0(); break;
3670669c
PB
7807 case 1: gen_rev16(cpu_T[0]); break;
7808 case 3: gen_revsh(cpu_T[0]); break;
9ee6e8bb
PB
7809 default: goto illegal_op;
7810 }
7811 gen_movl_reg_T0(s, rd);
7812 break;
7813
7814 case 6: /* cps */
7815 ARCH(6);
7816 if (IS_USER(s))
7817 break;
7818 if (IS_M(env)) {
7819 val = (insn & (1 << 4)) != 0;
7820 gen_op_movl_T0_im(val);
7821 /* PRIMASK */
7822 if (insn & 1)
7823 gen_op_v7m_msr_T0(16);
7824 /* FAULTMASK */
7825 if (insn & 2)
7826 gen_op_v7m_msr_T0(17);
7827
7828 gen_lookup_tb(s);
7829 } else {
7830 if (insn & (1 << 4))
7831 shift = CPSR_A | CPSR_I | CPSR_F;
7832 else
7833 shift = 0;
7834
7835 val = ((insn & 7) << 6) & shift;
7836 gen_op_movl_T0_im(val);
7837 gen_set_psr_T0(s, shift, 0);
7838 }
7839 break;
7840
99c475ab
FB
7841 default:
7842 goto undef;
7843 }
7844 break;
7845
7846 case 12:
7847 /* load/store multiple */
7848 rn = (insn >> 8) & 0x7;
7849 gen_movl_T1_reg(s, rn);
99c475ab
FB
7850 for (i = 0; i < 8; i++) {
7851 if (insn & (1 << i)) {
99c475ab
FB
7852 if (insn & (1 << 11)) {
7853 /* load */
b5ff1b31 7854 gen_ldst(ldl, s);
99c475ab
FB
7855 gen_movl_reg_T0(s, i);
7856 } else {
7857 /* store */
7858 gen_movl_T0_reg(s, i);
b5ff1b31 7859 gen_ldst(stl, s);
99c475ab 7860 }
5899f386 7861 /* advance to the next address */
b26eefb6 7862 gen_op_addl_T1_im(4);
99c475ab
FB
7863 }
7864 }
5899f386 7865 /* Base register writeback. */
b5ff1b31
FB
7866 if ((insn & (1 << rn)) == 0)
7867 gen_movl_reg_T1(s, rn);
99c475ab
FB
7868 break;
7869
7870 case 13:
7871 /* conditional branch or swi */
7872 cond = (insn >> 8) & 0xf;
7873 if (cond == 0xe)
7874 goto undef;
7875
7876 if (cond == 0xf) {
7877 /* swi */
9ee6e8bb 7878 gen_set_condexec(s);
99c475ab
FB
7879 gen_op_movl_T0_im((long)s->pc | 1);
7880 /* Don't set r15. */
b26eefb6 7881 gen_set_pc_T0();
9ee6e8bb 7882 s->is_jmp = DISAS_SWI;
99c475ab
FB
7883 break;
7884 }
7885 /* generate a conditional jump to next instruction */
e50e6a20 7886 s->condlabel = gen_new_label();
d9ba4830 7887 gen_test_cc(cond ^ 1, s->condlabel);
e50e6a20 7888 s->condjmp = 1;
99c475ab
FB
7889 gen_movl_T1_reg(s, 15);
7890
7891 /* jump to the offset */
5899f386 7892 val = (uint32_t)s->pc + 2;
99c475ab 7893 offset = ((int32_t)insn << 24) >> 24;
5899f386 7894 val += offset << 1;
8aaca4c0 7895 gen_jmp(s, val);
99c475ab
FB
7896 break;
7897
7898 case 14:
358bf29e 7899 if (insn & (1 << 11)) {
9ee6e8bb
PB
7900 if (disas_thumb2_insn(env, s, insn))
7901 goto undef32;
358bf29e
PB
7902 break;
7903 }
9ee6e8bb 7904 /* unconditional branch */
99c475ab
FB
7905 val = (uint32_t)s->pc;
7906 offset = ((int32_t)insn << 21) >> 21;
7907 val += (offset << 1) + 2;
8aaca4c0 7908 gen_jmp(s, val);
99c475ab
FB
7909 break;
7910
7911 case 15:
9ee6e8bb
PB
7912 if (disas_thumb2_insn(env, s, insn))
7913 goto undef32;
7914 break;
99c475ab
FB
7915 }
7916 return;
9ee6e8bb
PB
7917undef32:
7918 gen_set_condexec(s);
7919 gen_op_movl_T0_im((long)s->pc - 4);
b26eefb6 7920 gen_set_pc_T0();
d9ba4830 7921 gen_exception(EXCP_UDEF);
9ee6e8bb
PB
7922 s->is_jmp = DISAS_JUMP;
7923 return;
7924illegal_op:
99c475ab 7925undef:
9ee6e8bb 7926 gen_set_condexec(s);
5899f386 7927 gen_op_movl_T0_im((long)s->pc - 2);
b26eefb6 7928 gen_set_pc_T0();
d9ba4830 7929 gen_exception(EXCP_UDEF);
99c475ab
FB
7930 s->is_jmp = DISAS_JUMP;
7931}
7932
2c0262af
FB
7933/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7934 basic block 'tb'. If search_pc is TRUE, also generate PC
7935 information for each intermediate instruction. */
5fafdf24
TS
7936static inline int gen_intermediate_code_internal(CPUState *env,
7937 TranslationBlock *tb,
2c0262af
FB
7938 int search_pc)
7939{
7940 DisasContext dc1, *dc = &dc1;
7941 uint16_t *gen_opc_end;
7942 int j, lj;
0fa85d43 7943 target_ulong pc_start;
b5ff1b31 7944 uint32_t next_page_start;
3b46e624 7945
2c0262af 7946 /* generate intermediate code */
b26eefb6
PB
7947 num_temps = 0;
7948 memset(temps, 0, sizeof(temps));
7949
0fa85d43 7950 pc_start = tb->pc;
3b46e624 7951
2c0262af
FB
7952 dc->tb = tb;
7953
2c0262af 7954 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2c0262af
FB
7955
7956 dc->is_jmp = DISAS_NEXT;
7957 dc->pc = pc_start;
8aaca4c0 7958 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 7959 dc->condjmp = 0;
5899f386 7960 dc->thumb = env->thumb;
9ee6e8bb
PB
7961 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7962 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 7963 dc->is_mem = 0;
b5ff1b31 7964#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
7965 if (IS_M(env)) {
7966 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7967 } else {
7968 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7969 }
b5ff1b31
FB
7970#endif
7971 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
2c0262af 7972 lj = -1;
9ee6e8bb
PB
7973 /* Reset the conditional execution bits immediately. This avoids
7974 complications trying to do it at the end of the block. */
7975 if (env->condexec_bits)
8f01245e
PB
7976 {
7977 TCGv tmp = new_tmp();
7978 tcg_gen_movi_i32(tmp, 0);
d9ba4830 7979 store_cpu_field(tmp, condexec_bits);
8f01245e 7980 }
2c0262af 7981 do {
9ee6e8bb
PB
7982#ifndef CONFIG_USER_ONLY
7983 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7984 /* We always get here via a jump, so know we are not in a
7985 conditional execution block. */
d9ba4830 7986 gen_exception(EXCP_EXCEPTION_EXIT);
9ee6e8bb
PB
7987 }
7988#endif
7989
1fddef4b
FB
7990 if (env->nb_breakpoints > 0) {
7991 for(j = 0; j < env->nb_breakpoints; j++) {
7992 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 7993 gen_set_condexec(dc);
1fddef4b 7994 gen_op_movl_T0_im((long)dc->pc);
b26eefb6 7995 gen_set_pc_T0();
d9ba4830 7996 gen_exception(EXCP_DEBUG);
1fddef4b 7997 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
7998 /* Advance PC so that clearing the breakpoint will
7999 invalidate this TB. */
8000 dc->pc += 2;
8001 goto done_generating;
1fddef4b
FB
8002 break;
8003 }
8004 }
8005 }
2c0262af
FB
8006 if (search_pc) {
8007 j = gen_opc_ptr - gen_opc_buf;
8008 if (lj < j) {
8009 lj++;
8010 while (lj < j)
8011 gen_opc_instr_start[lj++] = 0;
8012 }
0fa85d43 8013 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
8014 gen_opc_instr_start[lj] = 1;
8015 }
e50e6a20 8016
9ee6e8bb
PB
8017 if (env->thumb) {
8018 disas_thumb_insn(env, dc);
8019 if (dc->condexec_mask) {
8020 dc->condexec_cond = (dc->condexec_cond & 0xe)
8021 | ((dc->condexec_mask >> 4) & 1);
8022 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
8023 if (dc->condexec_mask == 0) {
8024 dc->condexec_cond = 0;
8025 }
8026 }
8027 } else {
8028 disas_arm_insn(env, dc);
8029 }
b26eefb6
PB
8030 if (num_temps) {
8031 fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
8032 num_temps = 0;
8033 }
e50e6a20
FB
8034
8035 if (dc->condjmp && !dc->is_jmp) {
8036 gen_set_label(dc->condlabel);
8037 dc->condjmp = 0;
8038 }
6658ffb8
PB
8039 /* Terminate the TB on memory ops if watchpoints are present. */
8040 /* FIXME: This should be replacd by the deterministic execution
8041 * IRQ raising bits. */
8042 if (dc->is_mem && env->nb_watchpoints)
8043 break;
8044
e50e6a20
FB
8045 /* Translation stops when a conditional branch is enoutered.
8046 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
8047 * Also stop translation when a page boundary is reached. This
8048 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
8049 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
8050 !env->singlestep_enabled &&
b5ff1b31 8051 dc->pc < next_page_start);
9ee6e8bb 8052
b5ff1b31 8053 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
8054 instruction was a conditional branch or trap, and the PC has
8055 already been written. */
8aaca4c0
FB
8056 if (__builtin_expect(env->singlestep_enabled, 0)) {
8057 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 8058 if (dc->condjmp) {
9ee6e8bb
PB
8059 gen_set_condexec(dc);
8060 if (dc->is_jmp == DISAS_SWI) {
d9ba4830 8061 gen_exception(EXCP_SWI);
9ee6e8bb 8062 } else {
d9ba4830 8063 gen_exception(EXCP_DEBUG);
9ee6e8bb 8064 }
e50e6a20
FB
8065 gen_set_label(dc->condlabel);
8066 }
8067 if (dc->condjmp || !dc->is_jmp) {
8aaca4c0 8068 gen_op_movl_T0_im((long)dc->pc);
b26eefb6 8069 gen_set_pc_T0();
e50e6a20 8070 dc->condjmp = 0;
8aaca4c0 8071 }
9ee6e8bb
PB
8072 gen_set_condexec(dc);
8073 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
d9ba4830 8074 gen_exception(EXCP_SWI);
9ee6e8bb
PB
8075 } else {
8076 /* FIXME: Single stepping a WFI insn will not halt
8077 the CPU. */
d9ba4830 8078 gen_exception(EXCP_DEBUG);
9ee6e8bb 8079 }
8aaca4c0 8080 } else {
9ee6e8bb
PB
8081 /* While branches must always occur at the end of an IT block,
8082 there are a few other things that can cause us to terminate
8083 the TB in the middel of an IT block:
8084 - Exception generating instructions (bkpt, swi, undefined).
8085 - Page boundaries.
8086 - Hardware watchpoints.
8087 Hardware breakpoints have already been handled and skip this code.
8088 */
8089 gen_set_condexec(dc);
8aaca4c0 8090 switch(dc->is_jmp) {
8aaca4c0 8091 case DISAS_NEXT:
6e256c93 8092 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
8093 break;
8094 default:
8095 case DISAS_JUMP:
8096 case DISAS_UPDATE:
8097 /* indicate that the hash table must be used to find the next TB */
57fec1fe 8098 tcg_gen_exit_tb(0);
8aaca4c0
FB
8099 break;
8100 case DISAS_TB_JUMP:
8101 /* nothing more to generate */
8102 break;
9ee6e8bb 8103 case DISAS_WFI:
d9ba4830 8104 gen_helper_wfi();
9ee6e8bb
PB
8105 break;
8106 case DISAS_SWI:
d9ba4830 8107 gen_exception(EXCP_SWI);
9ee6e8bb 8108 break;
8aaca4c0 8109 }
e50e6a20
FB
8110 if (dc->condjmp) {
8111 gen_set_label(dc->condlabel);
9ee6e8bb 8112 gen_set_condexec(dc);
6e256c93 8113 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
8114 dc->condjmp = 0;
8115 }
2c0262af 8116 }
9ee6e8bb 8117done_generating:
2c0262af
FB
8118 *gen_opc_ptr = INDEX_op_end;
8119
8120#ifdef DEBUG_DISAS
e19e89a5 8121 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
8122 fprintf(logfile, "----------------\n");
8123 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 8124 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af
FB
8125 fprintf(logfile, "\n");
8126 }
8127#endif
b5ff1b31
FB
8128 if (search_pc) {
8129 j = gen_opc_ptr - gen_opc_buf;
8130 lj++;
8131 while (lj <= j)
8132 gen_opc_instr_start[lj++] = 0;
b5ff1b31 8133 } else {
2c0262af 8134 tb->size = dc->pc - pc_start;
b5ff1b31 8135 }
2c0262af
FB
8136 return 0;
8137}
8138
8139int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
8140{
8141 return gen_intermediate_code_internal(env, tb, 0);
8142}
8143
8144int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
8145{
8146 return gen_intermediate_code_internal(env, tb, 1);
8147}
8148
b5ff1b31
FB
8149static const char *cpu_mode_names[16] = {
8150 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
8151 "???", "???", "???", "und", "???", "???", "???", "sys"
8152};
9ee6e8bb 8153
5fafdf24 8154void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
8155 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
8156 int flags)
2c0262af
FB
8157{
8158 int i;
bc380d17 8159 union {
b7bcbe95
FB
8160 uint32_t i;
8161 float s;
8162 } s0, s1;
8163 CPU_DoubleU d;
a94a6abf
PB
8164 /* ??? This assumes float64 and double have the same layout.
8165 Oh well, it's only debug dumps. */
8166 union {
8167 float64 f64;
8168 double d;
8169 } d0;
b5ff1b31 8170 uint32_t psr;
2c0262af
FB
8171
8172 for(i=0;i<16;i++) {
7fe48483 8173 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 8174 if ((i % 4) == 3)
7fe48483 8175 cpu_fprintf(f, "\n");
2c0262af 8176 else
7fe48483 8177 cpu_fprintf(f, " ");
2c0262af 8178 }
b5ff1b31 8179 psr = cpsr_read(env);
687fa640
TS
8180 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
8181 psr,
b5ff1b31
FB
8182 psr & (1 << 31) ? 'N' : '-',
8183 psr & (1 << 30) ? 'Z' : '-',
8184 psr & (1 << 29) ? 'C' : '-',
8185 psr & (1 << 28) ? 'V' : '-',
5fafdf24 8186 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 8187 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95
FB
8188
8189 for (i = 0; i < 16; i++) {
8e96005d
FB
8190 d.d = env->vfp.regs[i];
8191 s0.i = d.l.lower;
8192 s1.i = d.l.upper;
a94a6abf
PB
8193 d0.f64 = d.d;
8194 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 8195 i * 2, (int)s0.i, s0.s,
a94a6abf 8196 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 8197 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 8198 d0.d);
b7bcbe95 8199 }
40f137e1 8200 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
2c0262af 8201}
a6b025d3 8202