]> git.proxmox.com Git - qemu.git/blame - target-arm/translate.c
TCG code generator
[qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
31
9ee6e8bb
PB
32#define ENABLE_ARCH_5J 0
33#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
34#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
35#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
36#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
37
38#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
39
2c0262af
FB
40/* internal defines */
41typedef struct DisasContext {
0fa85d43 42 target_ulong pc;
2c0262af 43 int is_jmp;
e50e6a20
FB
44 /* Nonzero if this instruction has been conditionally skipped. */
45 int condjmp;
46 /* The label that will be jumped to when the instruction is skipped. */
47 int condlabel;
9ee6e8bb
PB
48 /* Thumb-2 condtional execution bits. */
49 int condexec_mask;
50 int condexec_cond;
2c0262af 51 struct TranslationBlock *tb;
8aaca4c0 52 int singlestep_enabled;
5899f386 53 int thumb;
6658ffb8 54 int is_mem;
b5ff1b31
FB
55#if !defined(CONFIG_USER_ONLY)
56 int user;
57#endif
2c0262af
FB
58} DisasContext;
59
b5ff1b31
FB
60#if defined(CONFIG_USER_ONLY)
61#define IS_USER(s) 1
62#else
63#define IS_USER(s) (s->user)
64#endif
65
9ee6e8bb
PB
66/* These instructions trap after executing, so defer them until after the
67 conditional executions state has been updated. */
68#define DISAS_WFI 4
69#define DISAS_SWI 5
2c0262af 70
c53be334
FB
71#ifdef USE_DIRECT_JUMP
72#define TBPARAM(x)
73#else
74#define TBPARAM(x) (long)(x)
75#endif
76
2c0262af
FB
77/* XXX: move that elsewhere */
78static uint16_t *gen_opc_ptr;
79static uint32_t *gen_opparam_ptr;
80extern FILE *logfile;
81extern int loglevel;
82
83enum {
84#define DEF(s, n, copy_size) INDEX_op_ ## s,
85#include "opc.h"
86#undef DEF
87 NB_OPS,
88};
89
90#include "gen-op.h"
91
9ee6e8bb
PB
92#define PAS_OP(pfx) { \
93 gen_op_ ## pfx ## add16_T0_T1, \
94 gen_op_ ## pfx ## addsubx_T0_T1, \
95 gen_op_ ## pfx ## subaddx_T0_T1, \
96 gen_op_ ## pfx ## sub16_T0_T1, \
97 gen_op_ ## pfx ## add8_T0_T1, \
98 NULL, \
99 NULL, \
100 gen_op_ ## pfx ## sub8_T0_T1 }
101
102static GenOpFunc *gen_arm_parallel_addsub[8][8] = {
103 {},
104 PAS_OP(s),
105 PAS_OP(q),
106 PAS_OP(sh),
107 {},
108 PAS_OP(u),
109 PAS_OP(uq),
110 PAS_OP(uh),
111};
112#undef PAS_OP
113
114/* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings. */
115#define PAS_OP(pfx) { \
116 gen_op_ ## pfx ## add8_T0_T1, \
117 gen_op_ ## pfx ## add16_T0_T1, \
118 gen_op_ ## pfx ## addsubx_T0_T1, \
119 NULL, \
120 gen_op_ ## pfx ## sub8_T0_T1, \
121 gen_op_ ## pfx ## sub16_T0_T1, \
122 gen_op_ ## pfx ## subaddx_T0_T1, \
123 NULL }
124
125static GenOpFunc *gen_thumb2_parallel_addsub[8][8] = {
126 PAS_OP(s),
127 PAS_OP(q),
128 PAS_OP(sh),
129 {},
130 PAS_OP(u),
131 PAS_OP(uq),
132 PAS_OP(uh),
133 {}
134};
135#undef PAS_OP
136
e50e6a20 137static GenOpFunc1 *gen_test_cc[14] = {
2c0262af
FB
138 gen_op_test_eq,
139 gen_op_test_ne,
140 gen_op_test_cs,
141 gen_op_test_cc,
142 gen_op_test_mi,
143 gen_op_test_pl,
144 gen_op_test_vs,
145 gen_op_test_vc,
146 gen_op_test_hi,
147 gen_op_test_ls,
148 gen_op_test_ge,
149 gen_op_test_lt,
150 gen_op_test_gt,
151 gen_op_test_le,
152};
153
154const uint8_t table_logic_cc[16] = {
155 1, /* and */
156 1, /* xor */
157 0, /* sub */
158 0, /* rsb */
159 0, /* add */
160 0, /* adc */
161 0, /* sbc */
162 0, /* rsc */
163 1, /* andl */
164 1, /* xorl */
165 0, /* cmp */
166 0, /* cmn */
167 1, /* orr */
168 1, /* mov */
169 1, /* bic */
170 1, /* mvn */
171};
3b46e624 172
2c0262af
FB
173static GenOpFunc1 *gen_shift_T1_im[4] = {
174 gen_op_shll_T1_im,
175 gen_op_shrl_T1_im,
176 gen_op_sarl_T1_im,
177 gen_op_rorl_T1_im,
178};
179
1e8d4eec
FB
180static GenOpFunc *gen_shift_T1_0[4] = {
181 NULL,
182 gen_op_shrl_T1_0,
183 gen_op_sarl_T1_0,
184 gen_op_rrxl_T1,
185};
186
2c0262af
FB
187static GenOpFunc1 *gen_shift_T2_im[4] = {
188 gen_op_shll_T2_im,
189 gen_op_shrl_T2_im,
190 gen_op_sarl_T2_im,
191 gen_op_rorl_T2_im,
192};
193
1e8d4eec
FB
194static GenOpFunc *gen_shift_T2_0[4] = {
195 NULL,
196 gen_op_shrl_T2_0,
197 gen_op_sarl_T2_0,
198 gen_op_rrxl_T2,
199};
200
2c0262af
FB
201static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
202 gen_op_shll_T1_im_cc,
203 gen_op_shrl_T1_im_cc,
204 gen_op_sarl_T1_im_cc,
205 gen_op_rorl_T1_im_cc,
206};
207
1e8d4eec
FB
208static GenOpFunc *gen_shift_T1_0_cc[4] = {
209 NULL,
210 gen_op_shrl_T1_0_cc,
211 gen_op_sarl_T1_0_cc,
212 gen_op_rrxl_T1_cc,
213};
214
2c0262af
FB
215static GenOpFunc *gen_shift_T1_T0[4] = {
216 gen_op_shll_T1_T0,
217 gen_op_shrl_T1_T0,
218 gen_op_sarl_T1_T0,
219 gen_op_rorl_T1_T0,
220};
221
222static GenOpFunc *gen_shift_T1_T0_cc[4] = {
223 gen_op_shll_T1_T0_cc,
224 gen_op_shrl_T1_T0_cc,
225 gen_op_sarl_T1_T0_cc,
226 gen_op_rorl_T1_T0_cc,
227};
228
229static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
230 {
231 gen_op_movl_T0_r0,
232 gen_op_movl_T0_r1,
233 gen_op_movl_T0_r2,
234 gen_op_movl_T0_r3,
235 gen_op_movl_T0_r4,
236 gen_op_movl_T0_r5,
237 gen_op_movl_T0_r6,
238 gen_op_movl_T0_r7,
239 gen_op_movl_T0_r8,
240 gen_op_movl_T0_r9,
241 gen_op_movl_T0_r10,
242 gen_op_movl_T0_r11,
243 gen_op_movl_T0_r12,
244 gen_op_movl_T0_r13,
245 gen_op_movl_T0_r14,
246 gen_op_movl_T0_r15,
247 },
248 {
249 gen_op_movl_T1_r0,
250 gen_op_movl_T1_r1,
251 gen_op_movl_T1_r2,
252 gen_op_movl_T1_r3,
253 gen_op_movl_T1_r4,
254 gen_op_movl_T1_r5,
255 gen_op_movl_T1_r6,
256 gen_op_movl_T1_r7,
257 gen_op_movl_T1_r8,
258 gen_op_movl_T1_r9,
259 gen_op_movl_T1_r10,
260 gen_op_movl_T1_r11,
261 gen_op_movl_T1_r12,
262 gen_op_movl_T1_r13,
263 gen_op_movl_T1_r14,
264 gen_op_movl_T1_r15,
265 },
266 {
267 gen_op_movl_T2_r0,
268 gen_op_movl_T2_r1,
269 gen_op_movl_T2_r2,
270 gen_op_movl_T2_r3,
271 gen_op_movl_T2_r4,
272 gen_op_movl_T2_r5,
273 gen_op_movl_T2_r6,
274 gen_op_movl_T2_r7,
275 gen_op_movl_T2_r8,
276 gen_op_movl_T2_r9,
277 gen_op_movl_T2_r10,
278 gen_op_movl_T2_r11,
279 gen_op_movl_T2_r12,
280 gen_op_movl_T2_r13,
281 gen_op_movl_T2_r14,
282 gen_op_movl_T2_r15,
283 },
284};
285
286static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
287 {
288 gen_op_movl_r0_T0,
289 gen_op_movl_r1_T0,
290 gen_op_movl_r2_T0,
291 gen_op_movl_r3_T0,
292 gen_op_movl_r4_T0,
293 gen_op_movl_r5_T0,
294 gen_op_movl_r6_T0,
295 gen_op_movl_r7_T0,
296 gen_op_movl_r8_T0,
297 gen_op_movl_r9_T0,
298 gen_op_movl_r10_T0,
299 gen_op_movl_r11_T0,
300 gen_op_movl_r12_T0,
301 gen_op_movl_r13_T0,
302 gen_op_movl_r14_T0,
303 gen_op_movl_r15_T0,
304 },
305 {
306 gen_op_movl_r0_T1,
307 gen_op_movl_r1_T1,
308 gen_op_movl_r2_T1,
309 gen_op_movl_r3_T1,
310 gen_op_movl_r4_T1,
311 gen_op_movl_r5_T1,
312 gen_op_movl_r6_T1,
313 gen_op_movl_r7_T1,
314 gen_op_movl_r8_T1,
315 gen_op_movl_r9_T1,
316 gen_op_movl_r10_T1,
317 gen_op_movl_r11_T1,
318 gen_op_movl_r12_T1,
319 gen_op_movl_r13_T1,
320 gen_op_movl_r14_T1,
321 gen_op_movl_r15_T1,
322 },
323};
324
325static GenOpFunc1 *gen_op_movl_TN_im[3] = {
326 gen_op_movl_T0_im,
327 gen_op_movl_T1_im,
328 gen_op_movl_T2_im,
329};
330
9ee6e8bb
PB
331static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = {
332 gen_op_shll_T0_im_thumb_cc,
333 gen_op_shrl_T0_im_thumb_cc,
334 gen_op_sarl_T0_im_thumb_cc,
335};
336
99c475ab
FB
337static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
338 gen_op_shll_T0_im_thumb,
339 gen_op_shrl_T0_im_thumb,
340 gen_op_sarl_T0_im_thumb,
341};
342
343static inline void gen_bx(DisasContext *s)
344{
345 s->is_jmp = DISAS_UPDATE;
346 gen_op_bx_T0();
347}
348
b5ff1b31
FB
349
350#if defined(CONFIG_USER_ONLY)
351#define gen_ldst(name, s) gen_op_##name##_raw()
352#else
353#define gen_ldst(name, s) do { \
6658ffb8 354 s->is_mem = 1; \
b5ff1b31
FB
355 if (IS_USER(s)) \
356 gen_op_##name##_user(); \
357 else \
358 gen_op_##name##_kernel(); \
359 } while (0)
360#endif
361
2c0262af
FB
362static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
363{
364 int val;
365
366 if (reg == 15) {
5899f386
FB
367 /* normaly, since we updated PC, we need only to add one insn */
368 if (s->thumb)
369 val = (long)s->pc + 2;
370 else
371 val = (long)s->pc + 4;
2c0262af
FB
372 gen_op_movl_TN_im[t](val);
373 } else {
374 gen_op_movl_TN_reg[t][reg]();
375 }
376}
377
378static inline void gen_movl_T0_reg(DisasContext *s, int reg)
379{
380 gen_movl_TN_reg(s, reg, 0);
381}
382
383static inline void gen_movl_T1_reg(DisasContext *s, int reg)
384{
385 gen_movl_TN_reg(s, reg, 1);
386}
387
388static inline void gen_movl_T2_reg(DisasContext *s, int reg)
389{
390 gen_movl_TN_reg(s, reg, 2);
391}
392
393static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
394{
395 gen_op_movl_reg_TN[t][reg]();
396 if (reg == 15) {
397 s->is_jmp = DISAS_JUMP;
398 }
399}
400
401static inline void gen_movl_reg_T0(DisasContext *s, int reg)
402{
403 gen_movl_reg_TN(s, reg, 0);
404}
405
406static inline void gen_movl_reg_T1(DisasContext *s, int reg)
407{
408 gen_movl_reg_TN(s, reg, 1);
409}
410
b5ff1b31
FB
411/* Force a TB lookup after an instruction that changes the CPU state. */
412static inline void gen_lookup_tb(DisasContext *s)
413{
414 gen_op_movl_T0_im(s->pc);
415 gen_movl_reg_T0(s, 15);
416 s->is_jmp = DISAS_UPDATE;
417}
418
2c0262af
FB
419static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
420{
1e8d4eec 421 int val, rm, shift, shiftop;
2c0262af
FB
422
423 if (!(insn & (1 << 25))) {
424 /* immediate */
425 val = insn & 0xfff;
426 if (!(insn & (1 << 23)))
427 val = -val;
537730b9
FB
428 if (val != 0)
429 gen_op_addl_T1_im(val);
2c0262af
FB
430 } else {
431 /* shift/register */
432 rm = (insn) & 0xf;
433 shift = (insn >> 7) & 0x1f;
434 gen_movl_T2_reg(s, rm);
1e8d4eec 435 shiftop = (insn >> 5) & 3;
2c0262af 436 if (shift != 0) {
1e8d4eec
FB
437 gen_shift_T2_im[shiftop](shift);
438 } else if (shiftop != 0) {
439 gen_shift_T2_0[shiftop]();
2c0262af
FB
440 }
441 if (!(insn & (1 << 23)))
442 gen_op_subl_T1_T2();
443 else
444 gen_op_addl_T1_T2();
445 }
446}
447
191f9a93
PB
448static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
449 int extra)
2c0262af
FB
450{
451 int val, rm;
3b46e624 452
2c0262af
FB
453 if (insn & (1 << 22)) {
454 /* immediate */
455 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
456 if (!(insn & (1 << 23)))
457 val = -val;
18acad92 458 val += extra;
537730b9
FB
459 if (val != 0)
460 gen_op_addl_T1_im(val);
2c0262af
FB
461 } else {
462 /* register */
191f9a93
PB
463 if (extra)
464 gen_op_addl_T1_im(extra);
2c0262af
FB
465 rm = (insn) & 0xf;
466 gen_movl_T2_reg(s, rm);
467 if (!(insn & (1 << 23)))
468 gen_op_subl_T1_T2();
469 else
470 gen_op_addl_T1_T2();
471 }
472}
473
b7bcbe95
FB
474#define VFP_OP(name) \
475static inline void gen_vfp_##name(int dp) \
476{ \
477 if (dp) \
478 gen_op_vfp_##name##d(); \
479 else \
480 gen_op_vfp_##name##s(); \
481}
482
9ee6e8bb
PB
483#define VFP_OP1(name) \
484static inline void gen_vfp_##name(int dp, int arg) \
485{ \
486 if (dp) \
487 gen_op_vfp_##name##d(arg); \
488 else \
489 gen_op_vfp_##name##s(arg); \
490}
491
b7bcbe95
FB
492VFP_OP(add)
493VFP_OP(sub)
494VFP_OP(mul)
495VFP_OP(div)
496VFP_OP(neg)
497VFP_OP(abs)
498VFP_OP(sqrt)
499VFP_OP(cmp)
500VFP_OP(cmpe)
501VFP_OP(F1_ld0)
502VFP_OP(uito)
503VFP_OP(sito)
504VFP_OP(toui)
505VFP_OP(touiz)
506VFP_OP(tosi)
507VFP_OP(tosiz)
9ee6e8bb
PB
508VFP_OP1(tosh)
509VFP_OP1(tosl)
510VFP_OP1(touh)
511VFP_OP1(toul)
512VFP_OP1(shto)
513VFP_OP1(slto)
514VFP_OP1(uhto)
515VFP_OP1(ulto)
b7bcbe95
FB
516
517#undef VFP_OP
518
9ee6e8bb
PB
519static inline void gen_vfp_fconst(int dp, uint32_t val)
520{
521 if (dp)
522 gen_op_vfp_fconstd(val);
523 else
524 gen_op_vfp_fconsts(val);
525}
526
b5ff1b31
FB
527static inline void gen_vfp_ld(DisasContext *s, int dp)
528{
529 if (dp)
530 gen_ldst(vfp_ldd, s);
531 else
532 gen_ldst(vfp_lds, s);
533}
534
535static inline void gen_vfp_st(DisasContext *s, int dp)
536{
537 if (dp)
538 gen_ldst(vfp_std, s);
539 else
540 gen_ldst(vfp_sts, s);
541}
542
8e96005d
FB
543static inline long
544vfp_reg_offset (int dp, int reg)
545{
546 if (dp)
547 return offsetof(CPUARMState, vfp.regs[reg]);
548 else if (reg & 1) {
549 return offsetof(CPUARMState, vfp.regs[reg >> 1])
550 + offsetof(CPU_DoubleU, l.upper);
551 } else {
552 return offsetof(CPUARMState, vfp.regs[reg >> 1])
553 + offsetof(CPU_DoubleU, l.lower);
554 }
555}
9ee6e8bb
PB
556
557/* Return the offset of a 32-bit piece of a NEON register.
558 zero is the least significant end of the register. */
559static inline long
560neon_reg_offset (int reg, int n)
561{
562 int sreg;
563 sreg = reg * 2 + n;
564 return vfp_reg_offset(0, sreg);
565}
566
567#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
568#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
569
b7bcbe95
FB
570static inline void gen_mov_F0_vreg(int dp, int reg)
571{
572 if (dp)
8e96005d 573 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 574 else
8e96005d 575 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
576}
577
578static inline void gen_mov_F1_vreg(int dp, int reg)
579{
580 if (dp)
8e96005d 581 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
b7bcbe95 582 else
8e96005d 583 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
584}
585
586static inline void gen_mov_vreg_F0(int dp, int reg)
587{
588 if (dp)
8e96005d 589 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 590 else
8e96005d 591 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
592}
593
18c9b560
AZ
594#define ARM_CP_RW_BIT (1 << 20)
595
596static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
597{
598 int rd;
599 uint32_t offset;
600
601 rd = (insn >> 16) & 0xf;
602 gen_movl_T1_reg(s, rd);
603
604 offset = (insn & 0xff) << ((insn >> 7) & 2);
605 if (insn & (1 << 24)) {
606 /* Pre indexed */
607 if (insn & (1 << 23))
608 gen_op_addl_T1_im(offset);
609 else
610 gen_op_addl_T1_im(-offset);
611
612 if (insn & (1 << 21))
613 gen_movl_reg_T1(s, rd);
614 } else if (insn & (1 << 21)) {
615 /* Post indexed */
616 if (insn & (1 << 23))
617 gen_op_movl_T0_im(offset);
618 else
619 gen_op_movl_T0_im(- offset);
620 gen_op_addl_T0_T1();
621 gen_movl_reg_T0(s, rd);
622 } else if (!(insn & (1 << 23)))
623 return 1;
624 return 0;
625}
626
627static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
628{
629 int rd = (insn >> 0) & 0xf;
630
631 if (insn & (1 << 8))
632 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
633 return 1;
634 else
635 gen_op_iwmmxt_movl_T0_wCx(rd);
636 else
637 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
638
639 gen_op_movl_T1_im(mask);
640 gen_op_andl_T0_T1();
641 return 0;
642}
643
644/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
645 (ie. an undefined instruction). */
646static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
647{
648 int rd, wrd;
649 int rdhi, rdlo, rd0, rd1, i;
650
651 if ((insn & 0x0e000e00) == 0x0c000000) {
652 if ((insn & 0x0fe00ff0) == 0x0c400000) {
653 wrd = insn & 0xf;
654 rdlo = (insn >> 12) & 0xf;
655 rdhi = (insn >> 16) & 0xf;
656 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
657 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
658 gen_movl_reg_T0(s, rdlo);
659 gen_movl_reg_T1(s, rdhi);
660 } else { /* TMCRR */
661 gen_movl_T0_reg(s, rdlo);
662 gen_movl_T1_reg(s, rdhi);
663 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
664 gen_op_iwmmxt_set_mup();
665 }
666 return 0;
667 }
668
669 wrd = (insn >> 12) & 0xf;
670 if (gen_iwmmxt_address(s, insn))
671 return 1;
672 if (insn & ARM_CP_RW_BIT) {
673 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
674 gen_ldst(ldl, s);
675 gen_op_iwmmxt_movl_wCx_T0(wrd);
676 } else {
677 if (insn & (1 << 8))
678 if (insn & (1 << 22)) /* WLDRD */
679 gen_ldst(iwmmxt_ldq, s);
680 else /* WLDRW wRd */
681 gen_ldst(iwmmxt_ldl, s);
682 else
683 if (insn & (1 << 22)) /* WLDRH */
684 gen_ldst(iwmmxt_ldw, s);
685 else /* WLDRB */
686 gen_ldst(iwmmxt_ldb, s);
687 gen_op_iwmmxt_movq_wRn_M0(wrd);
688 }
689 } else {
690 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
691 gen_op_iwmmxt_movl_T0_wCx(wrd);
692 gen_ldst(stl, s);
693 } else {
694 gen_op_iwmmxt_movq_M0_wRn(wrd);
695 if (insn & (1 << 8))
696 if (insn & (1 << 22)) /* WSTRD */
697 gen_ldst(iwmmxt_stq, s);
698 else /* WSTRW wRd */
699 gen_ldst(iwmmxt_stl, s);
700 else
701 if (insn & (1 << 22)) /* WSTRH */
702 gen_ldst(iwmmxt_ldw, s);
703 else /* WSTRB */
704 gen_ldst(iwmmxt_stb, s);
705 }
706 }
707 return 0;
708 }
709
710 if ((insn & 0x0f000000) != 0x0e000000)
711 return 1;
712
713 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
714 case 0x000: /* WOR */
715 wrd = (insn >> 12) & 0xf;
716 rd0 = (insn >> 0) & 0xf;
717 rd1 = (insn >> 16) & 0xf;
718 gen_op_iwmmxt_movq_M0_wRn(rd0);
719 gen_op_iwmmxt_orq_M0_wRn(rd1);
720 gen_op_iwmmxt_setpsr_nz();
721 gen_op_iwmmxt_movq_wRn_M0(wrd);
722 gen_op_iwmmxt_set_mup();
723 gen_op_iwmmxt_set_cup();
724 break;
725 case 0x011: /* TMCR */
726 if (insn & 0xf)
727 return 1;
728 rd = (insn >> 12) & 0xf;
729 wrd = (insn >> 16) & 0xf;
730 switch (wrd) {
731 case ARM_IWMMXT_wCID:
732 case ARM_IWMMXT_wCASF:
733 break;
734 case ARM_IWMMXT_wCon:
735 gen_op_iwmmxt_set_cup();
736 /* Fall through. */
737 case ARM_IWMMXT_wCSSF:
738 gen_op_iwmmxt_movl_T0_wCx(wrd);
739 gen_movl_T1_reg(s, rd);
740 gen_op_bicl_T0_T1();
741 gen_op_iwmmxt_movl_wCx_T0(wrd);
742 break;
743 case ARM_IWMMXT_wCGR0:
744 case ARM_IWMMXT_wCGR1:
745 case ARM_IWMMXT_wCGR2:
746 case ARM_IWMMXT_wCGR3:
747 gen_op_iwmmxt_set_cup();
748 gen_movl_reg_T0(s, rd);
749 gen_op_iwmmxt_movl_wCx_T0(wrd);
750 break;
751 default:
752 return 1;
753 }
754 break;
755 case 0x100: /* WXOR */
756 wrd = (insn >> 12) & 0xf;
757 rd0 = (insn >> 0) & 0xf;
758 rd1 = (insn >> 16) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0);
760 gen_op_iwmmxt_xorq_M0_wRn(rd1);
761 gen_op_iwmmxt_setpsr_nz();
762 gen_op_iwmmxt_movq_wRn_M0(wrd);
763 gen_op_iwmmxt_set_mup();
764 gen_op_iwmmxt_set_cup();
765 break;
766 case 0x111: /* TMRC */
767 if (insn & 0xf)
768 return 1;
769 rd = (insn >> 12) & 0xf;
770 wrd = (insn >> 16) & 0xf;
771 gen_op_iwmmxt_movl_T0_wCx(wrd);
772 gen_movl_reg_T0(s, rd);
773 break;
774 case 0x300: /* WANDN */
775 wrd = (insn >> 12) & 0xf;
776 rd0 = (insn >> 0) & 0xf;
777 rd1 = (insn >> 16) & 0xf;
778 gen_op_iwmmxt_movq_M0_wRn(rd0);
779 gen_op_iwmmxt_negq_M0();
780 gen_op_iwmmxt_andq_M0_wRn(rd1);
781 gen_op_iwmmxt_setpsr_nz();
782 gen_op_iwmmxt_movq_wRn_M0(wrd);
783 gen_op_iwmmxt_set_mup();
784 gen_op_iwmmxt_set_cup();
785 break;
786 case 0x200: /* WAND */
787 wrd = (insn >> 12) & 0xf;
788 rd0 = (insn >> 0) & 0xf;
789 rd1 = (insn >> 16) & 0xf;
790 gen_op_iwmmxt_movq_M0_wRn(rd0);
791 gen_op_iwmmxt_andq_M0_wRn(rd1);
792 gen_op_iwmmxt_setpsr_nz();
793 gen_op_iwmmxt_movq_wRn_M0(wrd);
794 gen_op_iwmmxt_set_mup();
795 gen_op_iwmmxt_set_cup();
796 break;
797 case 0x810: case 0xa10: /* WMADD */
798 wrd = (insn >> 12) & 0xf;
799 rd0 = (insn >> 0) & 0xf;
800 rd1 = (insn >> 16) & 0xf;
801 gen_op_iwmmxt_movq_M0_wRn(rd0);
802 if (insn & (1 << 21))
803 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
804 else
805 gen_op_iwmmxt_madduq_M0_wRn(rd1);
806 gen_op_iwmmxt_movq_wRn_M0(wrd);
807 gen_op_iwmmxt_set_mup();
808 break;
809 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
810 wrd = (insn >> 12) & 0xf;
811 rd0 = (insn >> 16) & 0xf;
812 rd1 = (insn >> 0) & 0xf;
813 gen_op_iwmmxt_movq_M0_wRn(rd0);
814 switch ((insn >> 22) & 3) {
815 case 0:
816 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
817 break;
818 case 1:
819 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
820 break;
821 case 2:
822 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
823 break;
824 case 3:
825 return 1;
826 }
827 gen_op_iwmmxt_movq_wRn_M0(wrd);
828 gen_op_iwmmxt_set_mup();
829 gen_op_iwmmxt_set_cup();
830 break;
831 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
832 wrd = (insn >> 12) & 0xf;
833 rd0 = (insn >> 16) & 0xf;
834 rd1 = (insn >> 0) & 0xf;
835 gen_op_iwmmxt_movq_M0_wRn(rd0);
836 switch ((insn >> 22) & 3) {
837 case 0:
838 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
839 break;
840 case 1:
841 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
842 break;
843 case 2:
844 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
845 break;
846 case 3:
847 return 1;
848 }
849 gen_op_iwmmxt_movq_wRn_M0(wrd);
850 gen_op_iwmmxt_set_mup();
851 gen_op_iwmmxt_set_cup();
852 break;
853 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
854 wrd = (insn >> 12) & 0xf;
855 rd0 = (insn >> 16) & 0xf;
856 rd1 = (insn >> 0) & 0xf;
857 gen_op_iwmmxt_movq_M0_wRn(rd0);
858 if (insn & (1 << 22))
859 gen_op_iwmmxt_sadw_M0_wRn(rd1);
860 else
861 gen_op_iwmmxt_sadb_M0_wRn(rd1);
862 if (!(insn & (1 << 20)))
863 gen_op_iwmmxt_addl_M0_wRn(wrd);
864 gen_op_iwmmxt_movq_wRn_M0(wrd);
865 gen_op_iwmmxt_set_mup();
866 break;
867 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
868 wrd = (insn >> 12) & 0xf;
869 rd0 = (insn >> 16) & 0xf;
870 rd1 = (insn >> 0) & 0xf;
871 gen_op_iwmmxt_movq_M0_wRn(rd0);
872 if (insn & (1 << 21))
873 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
874 else
875 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
876 gen_op_iwmmxt_movq_wRn_M0(wrd);
877 gen_op_iwmmxt_set_mup();
878 break;
879 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
880 wrd = (insn >> 12) & 0xf;
881 rd0 = (insn >> 16) & 0xf;
882 rd1 = (insn >> 0) & 0xf;
883 gen_op_iwmmxt_movq_M0_wRn(rd0);
884 if (insn & (1 << 21))
885 gen_op_iwmmxt_macsw_M0_wRn(rd1);
886 else
887 gen_op_iwmmxt_macuw_M0_wRn(rd1);
888 if (!(insn & (1 << 20))) {
889 if (insn & (1 << 21))
890 gen_op_iwmmxt_addsq_M0_wRn(wrd);
891 else
892 gen_op_iwmmxt_adduq_M0_wRn(wrd);
893 }
894 gen_op_iwmmxt_movq_wRn_M0(wrd);
895 gen_op_iwmmxt_set_mup();
896 break;
897 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
898 wrd = (insn >> 12) & 0xf;
899 rd0 = (insn >> 16) & 0xf;
900 rd1 = (insn >> 0) & 0xf;
901 gen_op_iwmmxt_movq_M0_wRn(rd0);
902 switch ((insn >> 22) & 3) {
903 case 0:
904 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
905 break;
906 case 1:
907 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
908 break;
909 case 2:
910 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
911 break;
912 case 3:
913 return 1;
914 }
915 gen_op_iwmmxt_movq_wRn_M0(wrd);
916 gen_op_iwmmxt_set_mup();
917 gen_op_iwmmxt_set_cup();
918 break;
919 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
920 wrd = (insn >> 12) & 0xf;
921 rd0 = (insn >> 16) & 0xf;
922 rd1 = (insn >> 0) & 0xf;
923 gen_op_iwmmxt_movq_M0_wRn(rd0);
924 if (insn & (1 << 22))
925 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
926 else
927 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
928 gen_op_iwmmxt_movq_wRn_M0(wrd);
929 gen_op_iwmmxt_set_mup();
930 gen_op_iwmmxt_set_cup();
931 break;
932 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
933 wrd = (insn >> 12) & 0xf;
934 rd0 = (insn >> 16) & 0xf;
935 rd1 = (insn >> 0) & 0xf;
936 gen_op_iwmmxt_movq_M0_wRn(rd0);
937 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
938 gen_op_movl_T1_im(7);
939 gen_op_andl_T0_T1();
940 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
941 gen_op_iwmmxt_movq_wRn_M0(wrd);
942 gen_op_iwmmxt_set_mup();
943 break;
944 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
945 rd = (insn >> 12) & 0xf;
946 wrd = (insn >> 16) & 0xf;
947 gen_movl_T0_reg(s, rd);
948 gen_op_iwmmxt_movq_M0_wRn(wrd);
949 switch ((insn >> 6) & 3) {
950 case 0:
951 gen_op_movl_T1_im(0xff);
952 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
953 break;
954 case 1:
955 gen_op_movl_T1_im(0xffff);
956 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
957 break;
958 case 2:
959 gen_op_movl_T1_im(0xffffffff);
960 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
961 break;
962 case 3:
963 return 1;
964 }
965 gen_op_iwmmxt_movq_wRn_M0(wrd);
966 gen_op_iwmmxt_set_mup();
967 break;
968 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
969 rd = (insn >> 12) & 0xf;
970 wrd = (insn >> 16) & 0xf;
971 if (rd == 15)
972 return 1;
973 gen_op_iwmmxt_movq_M0_wRn(wrd);
974 switch ((insn >> 22) & 3) {
975 case 0:
976 if (insn & 8)
977 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
978 else {
979 gen_op_movl_T1_im(0xff);
980 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
981 }
982 break;
983 case 1:
984 if (insn & 8)
985 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
986 else {
987 gen_op_movl_T1_im(0xffff);
988 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
989 }
990 break;
991 case 2:
992 gen_op_movl_T1_im(0xffffffff);
993 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
994 break;
995 case 3:
996 return 1;
997 }
998 gen_op_movl_reg_TN[0][rd]();
999 break;
1000 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1001 if ((insn & 0x000ff008) != 0x0003f000)
1002 return 1;
1003 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1004 switch ((insn >> 22) & 3) {
1005 case 0:
1006 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1007 break;
1008 case 1:
1009 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1010 break;
1011 case 2:
1012 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1013 break;
1014 case 3:
1015 return 1;
1016 }
1017 gen_op_shll_T1_im(28);
1018 gen_op_movl_T0_T1();
1019 gen_op_movl_cpsr_T0(0xf0000000);
1020 break;
1021 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1022 rd = (insn >> 12) & 0xf;
1023 wrd = (insn >> 16) & 0xf;
1024 gen_movl_T0_reg(s, rd);
1025 switch ((insn >> 6) & 3) {
1026 case 0:
1027 gen_op_iwmmxt_bcstb_M0_T0();
1028 break;
1029 case 1:
1030 gen_op_iwmmxt_bcstw_M0_T0();
1031 break;
1032 case 2:
1033 gen_op_iwmmxt_bcstl_M0_T0();
1034 break;
1035 case 3:
1036 return 1;
1037 }
1038 gen_op_iwmmxt_movq_wRn_M0(wrd);
1039 gen_op_iwmmxt_set_mup();
1040 break;
1041 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1042 if ((insn & 0x000ff00f) != 0x0003f000)
1043 return 1;
1044 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1045 switch ((insn >> 22) & 3) {
1046 case 0:
1047 for (i = 0; i < 7; i ++) {
1048 gen_op_shll_T1_im(4);
1049 gen_op_andl_T0_T1();
1050 }
1051 break;
1052 case 1:
1053 for (i = 0; i < 3; i ++) {
1054 gen_op_shll_T1_im(8);
1055 gen_op_andl_T0_T1();
1056 }
1057 break;
1058 case 2:
1059 gen_op_shll_T1_im(16);
1060 gen_op_andl_T0_T1();
1061 break;
1062 case 3:
1063 return 1;
1064 }
1065 gen_op_movl_cpsr_T0(0xf0000000);
1066 break;
1067 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1068 wrd = (insn >> 12) & 0xf;
1069 rd0 = (insn >> 16) & 0xf;
1070 gen_op_iwmmxt_movq_M0_wRn(rd0);
1071 switch ((insn >> 22) & 3) {
1072 case 0:
1073 gen_op_iwmmxt_addcb_M0();
1074 break;
1075 case 1:
1076 gen_op_iwmmxt_addcw_M0();
1077 break;
1078 case 2:
1079 gen_op_iwmmxt_addcl_M0();
1080 break;
1081 case 3:
1082 return 1;
1083 }
1084 gen_op_iwmmxt_movq_wRn_M0(wrd);
1085 gen_op_iwmmxt_set_mup();
1086 break;
1087 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1088 if ((insn & 0x000ff00f) != 0x0003f000)
1089 return 1;
1090 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1091 switch ((insn >> 22) & 3) {
1092 case 0:
1093 for (i = 0; i < 7; i ++) {
1094 gen_op_shll_T1_im(4);
1095 gen_op_orl_T0_T1();
1096 }
1097 break;
1098 case 1:
1099 for (i = 0; i < 3; i ++) {
1100 gen_op_shll_T1_im(8);
1101 gen_op_orl_T0_T1();
1102 }
1103 break;
1104 case 2:
1105 gen_op_shll_T1_im(16);
1106 gen_op_orl_T0_T1();
1107 break;
1108 case 3:
1109 return 1;
1110 }
1111 gen_op_movl_T1_im(0xf0000000);
1112 gen_op_andl_T0_T1();
1113 gen_op_movl_cpsr_T0(0xf0000000);
1114 break;
1115 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1116 rd = (insn >> 12) & 0xf;
1117 rd0 = (insn >> 16) & 0xf;
1118 if ((insn & 0xf) != 0)
1119 return 1;
1120 gen_op_iwmmxt_movq_M0_wRn(rd0);
1121 switch ((insn >> 22) & 3) {
1122 case 0:
1123 gen_op_iwmmxt_msbb_T0_M0();
1124 break;
1125 case 1:
1126 gen_op_iwmmxt_msbw_T0_M0();
1127 break;
1128 case 2:
1129 gen_op_iwmmxt_msbl_T0_M0();
1130 break;
1131 case 3:
1132 return 1;
1133 }
1134 gen_movl_reg_T0(s, rd);
1135 break;
1136 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1137 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1138 wrd = (insn >> 12) & 0xf;
1139 rd0 = (insn >> 16) & 0xf;
1140 rd1 = (insn >> 0) & 0xf;
1141 gen_op_iwmmxt_movq_M0_wRn(rd0);
1142 switch ((insn >> 22) & 3) {
1143 case 0:
1144 if (insn & (1 << 21))
1145 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1146 else
1147 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1148 break;
1149 case 1:
1150 if (insn & (1 << 21))
1151 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1152 else
1153 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1154 break;
1155 case 2:
1156 if (insn & (1 << 21))
1157 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1158 else
1159 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1160 break;
1161 case 3:
1162 return 1;
1163 }
1164 gen_op_iwmmxt_movq_wRn_M0(wrd);
1165 gen_op_iwmmxt_set_mup();
1166 gen_op_iwmmxt_set_cup();
1167 break;
1168 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1169 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1170 wrd = (insn >> 12) & 0xf;
1171 rd0 = (insn >> 16) & 0xf;
1172 gen_op_iwmmxt_movq_M0_wRn(rd0);
1173 switch ((insn >> 22) & 3) {
1174 case 0:
1175 if (insn & (1 << 21))
1176 gen_op_iwmmxt_unpacklsb_M0();
1177 else
1178 gen_op_iwmmxt_unpacklub_M0();
1179 break;
1180 case 1:
1181 if (insn & (1 << 21))
1182 gen_op_iwmmxt_unpacklsw_M0();
1183 else
1184 gen_op_iwmmxt_unpackluw_M0();
1185 break;
1186 case 2:
1187 if (insn & (1 << 21))
1188 gen_op_iwmmxt_unpacklsl_M0();
1189 else
1190 gen_op_iwmmxt_unpacklul_M0();
1191 break;
1192 case 3:
1193 return 1;
1194 }
1195 gen_op_iwmmxt_movq_wRn_M0(wrd);
1196 gen_op_iwmmxt_set_mup();
1197 gen_op_iwmmxt_set_cup();
1198 break;
1199 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1200 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1201 wrd = (insn >> 12) & 0xf;
1202 rd0 = (insn >> 16) & 0xf;
1203 gen_op_iwmmxt_movq_M0_wRn(rd0);
1204 switch ((insn >> 22) & 3) {
1205 case 0:
1206 if (insn & (1 << 21))
1207 gen_op_iwmmxt_unpackhsb_M0();
1208 else
1209 gen_op_iwmmxt_unpackhub_M0();
1210 break;
1211 case 1:
1212 if (insn & (1 << 21))
1213 gen_op_iwmmxt_unpackhsw_M0();
1214 else
1215 gen_op_iwmmxt_unpackhuw_M0();
1216 break;
1217 case 2:
1218 if (insn & (1 << 21))
1219 gen_op_iwmmxt_unpackhsl_M0();
1220 else
1221 gen_op_iwmmxt_unpackhul_M0();
1222 break;
1223 case 3:
1224 return 1;
1225 }
1226 gen_op_iwmmxt_movq_wRn_M0(wrd);
1227 gen_op_iwmmxt_set_mup();
1228 gen_op_iwmmxt_set_cup();
1229 break;
1230 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1231 case 0x214: case 0x614: case 0xa14: case 0xe14:
1232 wrd = (insn >> 12) & 0xf;
1233 rd0 = (insn >> 16) & 0xf;
1234 gen_op_iwmmxt_movq_M0_wRn(rd0);
1235 if (gen_iwmmxt_shift(insn, 0xff))
1236 return 1;
1237 switch ((insn >> 22) & 3) {
1238 case 0:
1239 return 1;
1240 case 1:
1241 gen_op_iwmmxt_srlw_M0_T0();
1242 break;
1243 case 2:
1244 gen_op_iwmmxt_srll_M0_T0();
1245 break;
1246 case 3:
1247 gen_op_iwmmxt_srlq_M0_T0();
1248 break;
1249 }
1250 gen_op_iwmmxt_movq_wRn_M0(wrd);
1251 gen_op_iwmmxt_set_mup();
1252 gen_op_iwmmxt_set_cup();
1253 break;
1254 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1255 case 0x014: case 0x414: case 0x814: case 0xc14:
1256 wrd = (insn >> 12) & 0xf;
1257 rd0 = (insn >> 16) & 0xf;
1258 gen_op_iwmmxt_movq_M0_wRn(rd0);
1259 if (gen_iwmmxt_shift(insn, 0xff))
1260 return 1;
1261 switch ((insn >> 22) & 3) {
1262 case 0:
1263 return 1;
1264 case 1:
1265 gen_op_iwmmxt_sraw_M0_T0();
1266 break;
1267 case 2:
1268 gen_op_iwmmxt_sral_M0_T0();
1269 break;
1270 case 3:
1271 gen_op_iwmmxt_sraq_M0_T0();
1272 break;
1273 }
1274 gen_op_iwmmxt_movq_wRn_M0(wrd);
1275 gen_op_iwmmxt_set_mup();
1276 gen_op_iwmmxt_set_cup();
1277 break;
1278 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1279 case 0x114: case 0x514: case 0x914: case 0xd14:
1280 wrd = (insn >> 12) & 0xf;
1281 rd0 = (insn >> 16) & 0xf;
1282 gen_op_iwmmxt_movq_M0_wRn(rd0);
1283 if (gen_iwmmxt_shift(insn, 0xff))
1284 return 1;
1285 switch ((insn >> 22) & 3) {
1286 case 0:
1287 return 1;
1288 case 1:
1289 gen_op_iwmmxt_sllw_M0_T0();
1290 break;
1291 case 2:
1292 gen_op_iwmmxt_slll_M0_T0();
1293 break;
1294 case 3:
1295 gen_op_iwmmxt_sllq_M0_T0();
1296 break;
1297 }
1298 gen_op_iwmmxt_movq_wRn_M0(wrd);
1299 gen_op_iwmmxt_set_mup();
1300 gen_op_iwmmxt_set_cup();
1301 break;
1302 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1303 case 0x314: case 0x714: case 0xb14: case 0xf14:
1304 wrd = (insn >> 12) & 0xf;
1305 rd0 = (insn >> 16) & 0xf;
1306 gen_op_iwmmxt_movq_M0_wRn(rd0);
1307 switch ((insn >> 22) & 3) {
1308 case 0:
1309 return 1;
1310 case 1:
1311 if (gen_iwmmxt_shift(insn, 0xf))
1312 return 1;
1313 gen_op_iwmmxt_rorw_M0_T0();
1314 break;
1315 case 2:
1316 if (gen_iwmmxt_shift(insn, 0x1f))
1317 return 1;
1318 gen_op_iwmmxt_rorl_M0_T0();
1319 break;
1320 case 3:
1321 if (gen_iwmmxt_shift(insn, 0x3f))
1322 return 1;
1323 gen_op_iwmmxt_rorq_M0_T0();
1324 break;
1325 }
1326 gen_op_iwmmxt_movq_wRn_M0(wrd);
1327 gen_op_iwmmxt_set_mup();
1328 gen_op_iwmmxt_set_cup();
1329 break;
1330 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1331 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1332 wrd = (insn >> 12) & 0xf;
1333 rd0 = (insn >> 16) & 0xf;
1334 rd1 = (insn >> 0) & 0xf;
1335 gen_op_iwmmxt_movq_M0_wRn(rd0);
1336 switch ((insn >> 22) & 3) {
1337 case 0:
1338 if (insn & (1 << 21))
1339 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1340 else
1341 gen_op_iwmmxt_minub_M0_wRn(rd1);
1342 break;
1343 case 1:
1344 if (insn & (1 << 21))
1345 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1346 else
1347 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1348 break;
1349 case 2:
1350 if (insn & (1 << 21))
1351 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1352 else
1353 gen_op_iwmmxt_minul_M0_wRn(rd1);
1354 break;
1355 case 3:
1356 return 1;
1357 }
1358 gen_op_iwmmxt_movq_wRn_M0(wrd);
1359 gen_op_iwmmxt_set_mup();
1360 break;
1361 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1362 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1363 wrd = (insn >> 12) & 0xf;
1364 rd0 = (insn >> 16) & 0xf;
1365 rd1 = (insn >> 0) & 0xf;
1366 gen_op_iwmmxt_movq_M0_wRn(rd0);
1367 switch ((insn >> 22) & 3) {
1368 case 0:
1369 if (insn & (1 << 21))
1370 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1371 else
1372 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1373 break;
1374 case 1:
1375 if (insn & (1 << 21))
1376 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1377 else
1378 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1379 break;
1380 case 2:
1381 if (insn & (1 << 21))
1382 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1383 else
1384 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1385 break;
1386 case 3:
1387 return 1;
1388 }
1389 gen_op_iwmmxt_movq_wRn_M0(wrd);
1390 gen_op_iwmmxt_set_mup();
1391 break;
1392 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1393 case 0x402: case 0x502: case 0x602: case 0x702:
1394 wrd = (insn >> 12) & 0xf;
1395 rd0 = (insn >> 16) & 0xf;
1396 rd1 = (insn >> 0) & 0xf;
1397 gen_op_iwmmxt_movq_M0_wRn(rd0);
1398 gen_op_movl_T0_im((insn >> 20) & 3);
1399 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1400 gen_op_iwmmxt_movq_wRn_M0(wrd);
1401 gen_op_iwmmxt_set_mup();
1402 break;
1403 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1404 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1405 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1406 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1407 wrd = (insn >> 12) & 0xf;
1408 rd0 = (insn >> 16) & 0xf;
1409 rd1 = (insn >> 0) & 0xf;
1410 gen_op_iwmmxt_movq_M0_wRn(rd0);
1411 switch ((insn >> 20) & 0xf) {
1412 case 0x0:
1413 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1414 break;
1415 case 0x1:
1416 gen_op_iwmmxt_subub_M0_wRn(rd1);
1417 break;
1418 case 0x3:
1419 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1420 break;
1421 case 0x4:
1422 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1423 break;
1424 case 0x5:
1425 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1426 break;
1427 case 0x7:
1428 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1429 break;
1430 case 0x8:
1431 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1432 break;
1433 case 0x9:
1434 gen_op_iwmmxt_subul_M0_wRn(rd1);
1435 break;
1436 case 0xb:
1437 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1438 break;
1439 default:
1440 return 1;
1441 }
1442 gen_op_iwmmxt_movq_wRn_M0(wrd);
1443 gen_op_iwmmxt_set_mup();
1444 gen_op_iwmmxt_set_cup();
1445 break;
1446 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1447 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1448 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1449 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1450 wrd = (insn >> 12) & 0xf;
1451 rd0 = (insn >> 16) & 0xf;
1452 gen_op_iwmmxt_movq_M0_wRn(rd0);
1453 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1454 gen_op_iwmmxt_shufh_M0_T0();
1455 gen_op_iwmmxt_movq_wRn_M0(wrd);
1456 gen_op_iwmmxt_set_mup();
1457 gen_op_iwmmxt_set_cup();
1458 break;
1459 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1460 case 0x418: case 0x518: case 0x618: case 0x718:
1461 case 0x818: case 0x918: case 0xa18: case 0xb18:
1462 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 16) & 0xf;
1465 rd1 = (insn >> 0) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 switch ((insn >> 20) & 0xf) {
1468 case 0x0:
1469 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1470 break;
1471 case 0x1:
1472 gen_op_iwmmxt_addub_M0_wRn(rd1);
1473 break;
1474 case 0x3:
1475 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1476 break;
1477 case 0x4:
1478 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1479 break;
1480 case 0x5:
1481 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1482 break;
1483 case 0x7:
1484 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1485 break;
1486 case 0x8:
1487 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1488 break;
1489 case 0x9:
1490 gen_op_iwmmxt_addul_M0_wRn(rd1);
1491 break;
1492 case 0xb:
1493 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1494 break;
1495 default:
1496 return 1;
1497 }
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 gen_op_iwmmxt_set_mup();
1500 gen_op_iwmmxt_set_cup();
1501 break;
1502 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1503 case 0x408: case 0x508: case 0x608: case 0x708:
1504 case 0x808: case 0x908: case 0xa08: case 0xb08:
1505 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1506 wrd = (insn >> 12) & 0xf;
1507 rd0 = (insn >> 16) & 0xf;
1508 rd1 = (insn >> 0) & 0xf;
1509 gen_op_iwmmxt_movq_M0_wRn(rd0);
1510 if (!(insn & (1 << 20)))
1511 return 1;
1512 switch ((insn >> 22) & 3) {
1513 case 0:
1514 return 1;
1515 case 1:
1516 if (insn & (1 << 21))
1517 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1518 else
1519 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1520 break;
1521 case 2:
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_packul_M0_wRn(rd1);
1526 break;
1527 case 3:
1528 if (insn & (1 << 21))
1529 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1530 else
1531 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1532 break;
1533 }
1534 gen_op_iwmmxt_movq_wRn_M0(wrd);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1537 break;
1538 case 0x201: case 0x203: case 0x205: case 0x207:
1539 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1540 case 0x211: case 0x213: case 0x215: case 0x217:
1541 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1542 wrd = (insn >> 5) & 0xf;
1543 rd0 = (insn >> 12) & 0xf;
1544 rd1 = (insn >> 0) & 0xf;
1545 if (rd0 == 0xf || rd1 == 0xf)
1546 return 1;
1547 gen_op_iwmmxt_movq_M0_wRn(wrd);
1548 switch ((insn >> 16) & 0xf) {
1549 case 0x0: /* TMIA */
1550 gen_op_movl_TN_reg[0][rd0]();
1551 gen_op_movl_TN_reg[1][rd1]();
1552 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1553 break;
1554 case 0x8: /* TMIAPH */
1555 gen_op_movl_TN_reg[0][rd0]();
1556 gen_op_movl_TN_reg[1][rd1]();
1557 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1558 break;
1559 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1560 gen_op_movl_TN_reg[1][rd0]();
1561 if (insn & (1 << 16))
1562 gen_op_shrl_T1_im(16);
1563 gen_op_movl_T0_T1();
1564 gen_op_movl_TN_reg[1][rd1]();
1565 if (insn & (1 << 17))
1566 gen_op_shrl_T1_im(16);
1567 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1568 break;
1569 default:
1570 return 1;
1571 }
1572 gen_op_iwmmxt_movq_wRn_M0(wrd);
1573 gen_op_iwmmxt_set_mup();
1574 break;
1575 default:
1576 return 1;
1577 }
1578
1579 return 0;
1580}
1581
1582/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1583 (ie. an undefined instruction). */
1584static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1585{
1586 int acc, rd0, rd1, rdhi, rdlo;
1587
1588 if ((insn & 0x0ff00f10) == 0x0e200010) {
1589 /* Multiply with Internal Accumulate Format */
1590 rd0 = (insn >> 12) & 0xf;
1591 rd1 = insn & 0xf;
1592 acc = (insn >> 5) & 7;
1593
1594 if (acc != 0)
1595 return 1;
1596
1597 switch ((insn >> 16) & 0xf) {
1598 case 0x0: /* MIA */
1599 gen_op_movl_TN_reg[0][rd0]();
1600 gen_op_movl_TN_reg[1][rd1]();
1601 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1602 break;
1603 case 0x8: /* MIAPH */
1604 gen_op_movl_TN_reg[0][rd0]();
1605 gen_op_movl_TN_reg[1][rd1]();
1606 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1607 break;
1608 case 0xc: /* MIABB */
1609 case 0xd: /* MIABT */
1610 case 0xe: /* MIATB */
1611 case 0xf: /* MIATT */
1612 gen_op_movl_TN_reg[1][rd0]();
1613 if (insn & (1 << 16))
1614 gen_op_shrl_T1_im(16);
1615 gen_op_movl_T0_T1();
1616 gen_op_movl_TN_reg[1][rd1]();
1617 if (insn & (1 << 17))
1618 gen_op_shrl_T1_im(16);
1619 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1620 break;
1621 default:
1622 return 1;
1623 }
1624
1625 gen_op_iwmmxt_movq_wRn_M0(acc);
1626 return 0;
1627 }
1628
1629 if ((insn & 0x0fe00ff8) == 0x0c400000) {
1630 /* Internal Accumulator Access Format */
1631 rdhi = (insn >> 16) & 0xf;
1632 rdlo = (insn >> 12) & 0xf;
1633 acc = insn & 7;
1634
1635 if (acc != 0)
1636 return 1;
1637
1638 if (insn & ARM_CP_RW_BIT) { /* MRA */
1639 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1640 gen_op_movl_reg_TN[0][rdlo]();
1641 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1642 gen_op_andl_T0_T1();
1643 gen_op_movl_reg_TN[0][rdhi]();
1644 } else { /* MAR */
1645 gen_op_movl_TN_reg[0][rdlo]();
1646 gen_op_movl_TN_reg[1][rdhi]();
1647 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1648 }
1649 return 0;
1650 }
1651
1652 return 1;
1653}
1654
c1713132
AZ
1655/* Disassemble system coprocessor instruction. Return nonzero if
1656 instruction is not defined. */
1657static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1658{
1659 uint32_t rd = (insn >> 12) & 0xf;
1660 uint32_t cp = (insn >> 8) & 0xf;
1661 if (IS_USER(s)) {
1662 return 1;
1663 }
1664
18c9b560 1665 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
1666 if (!env->cp[cp].cp_read)
1667 return 1;
1668 gen_op_movl_T0_im((uint32_t) s->pc);
1669 gen_op_movl_reg_TN[0][15]();
1670 gen_op_movl_T0_cp(insn);
1671 gen_movl_reg_T0(s, rd);
1672 } else {
1673 if (!env->cp[cp].cp_write)
1674 return 1;
1675 gen_op_movl_T0_im((uint32_t) s->pc);
1676 gen_op_movl_reg_TN[0][15]();
1677 gen_movl_T0_reg(s, rd);
1678 gen_op_movl_cp_T0(insn);
1679 }
1680 return 0;
1681}
1682
9ee6e8bb
PB
1683static int cp15_user_ok(uint32_t insn)
1684{
1685 int cpn = (insn >> 16) & 0xf;
1686 int cpm = insn & 0xf;
1687 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
1688
1689 if (cpn == 13 && cpm == 0) {
1690 /* TLS register. */
1691 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
1692 return 1;
1693 }
1694 if (cpn == 7) {
1695 /* ISB, DSB, DMB. */
1696 if ((cpm == 5 && op == 4)
1697 || (cpm == 10 && (op == 4 || op == 5)))
1698 return 1;
1699 }
1700 return 0;
1701}
1702
b5ff1b31
FB
1703/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1704 instruction is not defined. */
a90b7318 1705static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
1706{
1707 uint32_t rd;
1708
9ee6e8bb
PB
1709 /* M profile cores use memory mapped registers instead of cp15. */
1710 if (arm_feature(env, ARM_FEATURE_M))
1711 return 1;
1712
1713 if ((insn & (1 << 25)) == 0) {
1714 if (insn & (1 << 20)) {
1715 /* mrrc */
1716 return 1;
1717 }
1718 /* mcrr. Used for block cache operations, so implement as no-op. */
1719 return 0;
1720 }
1721 if ((insn & (1 << 4)) == 0) {
1722 /* cdp */
1723 return 1;
1724 }
1725 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
1726 return 1;
1727 }
9332f9da
FB
1728 if ((insn & 0x0fff0fff) == 0x0e070f90
1729 || (insn & 0x0fff0fff) == 0x0e070f58) {
1730 /* Wait for interrupt. */
1731 gen_op_movl_T0_im((long)s->pc);
1732 gen_op_movl_reg_TN[0][15]();
9ee6e8bb 1733 s->is_jmp = DISAS_WFI;
9332f9da
FB
1734 return 0;
1735 }
b5ff1b31 1736 rd = (insn >> 12) & 0xf;
18c9b560 1737 if (insn & ARM_CP_RW_BIT) {
b5ff1b31
FB
1738 gen_op_movl_T0_cp15(insn);
1739 /* If the destination register is r15 then sets condition codes. */
1740 if (rd != 15)
1741 gen_movl_reg_T0(s, rd);
1742 } else {
1743 gen_movl_T0_reg(s, rd);
1744 gen_op_movl_cp15_T0(insn);
a90b7318
AZ
1745 /* Normally we would always end the TB here, but Linux
1746 * arch/arm/mach-pxa/sleep.S expects two instructions following
1747 * an MMU enable to execute from cache. Imitate this behaviour. */
1748 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1749 (insn & 0x0fff0fff) != 0x0e010f10)
1750 gen_lookup_tb(s);
b5ff1b31 1751 }
b5ff1b31
FB
1752 return 0;
1753}
1754
9ee6e8bb
PB
1755#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1756#define VFP_SREG(insn, bigbit, smallbit) \
1757 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1758#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1759 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1760 reg = (((insn) >> (bigbit)) & 0x0f) \
1761 | (((insn) >> ((smallbit) - 4)) & 0x10); \
1762 } else { \
1763 if (insn & (1 << (smallbit))) \
1764 return 1; \
1765 reg = ((insn) >> (bigbit)) & 0x0f; \
1766 }} while (0)
1767
1768#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1769#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1770#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
1771#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
1772#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
1773#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
1774
1775static inline int
1776vfp_enabled(CPUState * env)
1777{
1778 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
1779}
1780
b7bcbe95
FB
1781/* Disassemble a VFP instruction. Returns nonzero if an error occured
1782 (ie. an undefined instruction). */
1783static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1784{
1785 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1786 int dp, veclen;
1787
40f137e1
PB
1788 if (!arm_feature(env, ARM_FEATURE_VFP))
1789 return 1;
1790
9ee6e8bb
PB
1791 if (!vfp_enabled(env)) {
1792 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
1793 if ((insn & 0x0fe00fff) != 0x0ee00a10)
1794 return 1;
1795 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
1796 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
1797 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
1798 return 1;
1799 }
b7bcbe95
FB
1800 dp = ((insn & 0xf00) == 0xb00);
1801 switch ((insn >> 24) & 0xf) {
1802 case 0xe:
1803 if (insn & (1 << 4)) {
1804 /* single register transfer */
b7bcbe95
FB
1805 rd = (insn >> 12) & 0xf;
1806 if (dp) {
9ee6e8bb
PB
1807 int size;
1808 int pass;
1809
1810 VFP_DREG_N(rn, insn);
1811 if (insn & 0xf)
b7bcbe95 1812 return 1;
9ee6e8bb
PB
1813 if (insn & 0x00c00060
1814 && !arm_feature(env, ARM_FEATURE_NEON))
1815 return 1;
1816
1817 pass = (insn >> 21) & 1;
1818 if (insn & (1 << 22)) {
1819 size = 0;
1820 offset = ((insn >> 5) & 3) * 8;
1821 } else if (insn & (1 << 5)) {
1822 size = 1;
1823 offset = (insn & (1 << 6)) ? 16 : 0;
1824 } else {
1825 size = 2;
1826 offset = 0;
1827 }
18c9b560 1828 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 1829 /* vfp->arm */
9ee6e8bb
PB
1830 switch (size) {
1831 case 0:
1832 NEON_GET_REG(T1, rn, pass);
1833 if (offset)
1834 gen_op_shrl_T1_im(offset);
1835 if (insn & (1 << 23))
1836 gen_op_uxtb_T1();
1837 else
1838 gen_op_sxtb_T1();
1839 break;
1840 case 1:
1841 NEON_GET_REG(T1, rn, pass);
1842 if (insn & (1 << 23)) {
1843 if (offset) {
1844 gen_op_shrl_T1_im(16);
1845 } else {
1846 gen_op_uxth_T1();
1847 }
1848 } else {
1849 if (offset) {
1850 gen_op_sarl_T1_im(16);
1851 } else {
1852 gen_op_sxth_T1();
1853 }
1854 }
1855 break;
1856 case 2:
1857 NEON_GET_REG(T1, rn, pass);
1858 break;
1859 }
1860 gen_movl_reg_T1(s, rd);
b7bcbe95
FB
1861 } else {
1862 /* arm->vfp */
9ee6e8bb
PB
1863 gen_movl_T0_reg(s, rd);
1864 if (insn & (1 << 23)) {
1865 /* VDUP */
1866 if (size == 0) {
1867 gen_op_neon_dup_u8(0);
1868 } else if (size == 1) {
1869 gen_op_neon_dup_low16();
1870 }
1871 NEON_SET_REG(T0, rn, 0);
1872 NEON_SET_REG(T0, rn, 1);
1873 } else {
1874 /* VMOV */
1875 switch (size) {
1876 case 0:
1877 NEON_GET_REG(T2, rn, pass);
1878 gen_op_movl_T1_im(0xff);
1879 gen_op_andl_T0_T1();
1880 gen_op_neon_insert_elt(offset, ~(0xff << offset));
1881 NEON_SET_REG(T2, rn, pass);
1882 break;
1883 case 1:
1884 NEON_GET_REG(T2, rn, pass);
1885 gen_op_movl_T1_im(0xffff);
1886 gen_op_andl_T0_T1();
1887 bank_mask = offset ? 0xffff : 0xffff0000;
1888 gen_op_neon_insert_elt(offset, bank_mask);
1889 NEON_SET_REG(T2, rn, pass);
1890 break;
1891 case 2:
1892 NEON_SET_REG(T0, rn, pass);
1893 break;
1894 }
1895 }
b7bcbe95 1896 }
9ee6e8bb
PB
1897 } else { /* !dp */
1898 if ((insn & 0x6f) != 0x00)
1899 return 1;
1900 rn = VFP_SREG_N(insn);
18c9b560 1901 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
1902 /* vfp->arm */
1903 if (insn & (1 << 21)) {
1904 /* system register */
40f137e1 1905 rn >>= 1;
9ee6e8bb 1906
b7bcbe95 1907 switch (rn) {
40f137e1 1908 case ARM_VFP_FPSID:
9ee6e8bb
PB
1909 /* VFP2 allows access for FSID from userspace.
1910 VFP3 restricts all id registers to privileged
1911 accesses. */
1912 if (IS_USER(s)
1913 && arm_feature(env, ARM_FEATURE_VFP3))
1914 return 1;
1915 gen_op_vfp_movl_T0_xreg(rn);
1916 break;
40f137e1 1917 case ARM_VFP_FPEXC:
9ee6e8bb
PB
1918 if (IS_USER(s))
1919 return 1;
1920 gen_op_vfp_movl_T0_xreg(rn);
1921 break;
40f137e1
PB
1922 case ARM_VFP_FPINST:
1923 case ARM_VFP_FPINST2:
9ee6e8bb
PB
1924 /* Not present in VFP3. */
1925 if (IS_USER(s)
1926 || arm_feature(env, ARM_FEATURE_VFP3))
1927 return 1;
40f137e1 1928 gen_op_vfp_movl_T0_xreg(rn);
b7bcbe95 1929 break;
40f137e1 1930 case ARM_VFP_FPSCR:
b7bcbe95
FB
1931 if (rd == 15)
1932 gen_op_vfp_movl_T0_fpscr_flags();
1933 else
1934 gen_op_vfp_movl_T0_fpscr();
1935 break;
9ee6e8bb
PB
1936 case ARM_VFP_MVFR0:
1937 case ARM_VFP_MVFR1:
1938 if (IS_USER(s)
1939 || !arm_feature(env, ARM_FEATURE_VFP3))
1940 return 1;
1941 gen_op_vfp_movl_T0_xreg(rn);
1942 break;
b7bcbe95
FB
1943 default:
1944 return 1;
1945 }
1946 } else {
1947 gen_mov_F0_vreg(0, rn);
1948 gen_op_vfp_mrs();
1949 }
1950 if (rd == 15) {
b5ff1b31
FB
1951 /* Set the 4 flag bits in the CPSR. */
1952 gen_op_movl_cpsr_T0(0xf0000000);
b7bcbe95
FB
1953 } else
1954 gen_movl_reg_T0(s, rd);
1955 } else {
1956 /* arm->vfp */
1957 gen_movl_T0_reg(s, rd);
1958 if (insn & (1 << 21)) {
40f137e1 1959 rn >>= 1;
b7bcbe95
FB
1960 /* system register */
1961 switch (rn) {
40f137e1 1962 case ARM_VFP_FPSID:
9ee6e8bb
PB
1963 case ARM_VFP_MVFR0:
1964 case ARM_VFP_MVFR1:
b7bcbe95
FB
1965 /* Writes are ignored. */
1966 break;
40f137e1 1967 case ARM_VFP_FPSCR:
b7bcbe95 1968 gen_op_vfp_movl_fpscr_T0();
b5ff1b31 1969 gen_lookup_tb(s);
b7bcbe95 1970 break;
40f137e1 1971 case ARM_VFP_FPEXC:
9ee6e8bb
PB
1972 if (IS_USER(s))
1973 return 1;
40f137e1
PB
1974 gen_op_vfp_movl_xreg_T0(rn);
1975 gen_lookup_tb(s);
1976 break;
1977 case ARM_VFP_FPINST:
1978 case ARM_VFP_FPINST2:
1979 gen_op_vfp_movl_xreg_T0(rn);
1980 break;
b7bcbe95
FB
1981 default:
1982 return 1;
1983 }
1984 } else {
1985 gen_op_vfp_msr();
1986 gen_mov_vreg_F0(0, rn);
1987 }
1988 }
1989 }
1990 } else {
1991 /* data processing */
1992 /* The opcode is in bits 23, 21, 20 and 6. */
1993 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
1994 if (dp) {
1995 if (op == 15) {
1996 /* rn is opcode */
1997 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1998 } else {
1999 /* rn is register number */
9ee6e8bb 2000 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2001 }
2002
2003 if (op == 15 && (rn == 15 || rn > 17)) {
2004 /* Integer or single precision destination. */
9ee6e8bb 2005 rd = VFP_SREG_D(insn);
b7bcbe95 2006 } else {
9ee6e8bb 2007 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2008 }
2009
2010 if (op == 15 && (rn == 16 || rn == 17)) {
2011 /* Integer source. */
2012 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2013 } else {
9ee6e8bb 2014 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2015 }
2016 } else {
9ee6e8bb 2017 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2018 if (op == 15 && rn == 15) {
2019 /* Double precision destination. */
9ee6e8bb
PB
2020 VFP_DREG_D(rd, insn);
2021 } else {
2022 rd = VFP_SREG_D(insn);
2023 }
2024 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2025 }
2026
2027 veclen = env->vfp.vec_len;
2028 if (op == 15 && rn > 3)
2029 veclen = 0;
2030
2031 /* Shut up compiler warnings. */
2032 delta_m = 0;
2033 delta_d = 0;
2034 bank_mask = 0;
3b46e624 2035
b7bcbe95
FB
2036 if (veclen > 0) {
2037 if (dp)
2038 bank_mask = 0xc;
2039 else
2040 bank_mask = 0x18;
2041
2042 /* Figure out what type of vector operation this is. */
2043 if ((rd & bank_mask) == 0) {
2044 /* scalar */
2045 veclen = 0;
2046 } else {
2047 if (dp)
2048 delta_d = (env->vfp.vec_stride >> 1) + 1;
2049 else
2050 delta_d = env->vfp.vec_stride + 1;
2051
2052 if ((rm & bank_mask) == 0) {
2053 /* mixed scalar/vector */
2054 delta_m = 0;
2055 } else {
2056 /* vector */
2057 delta_m = delta_d;
2058 }
2059 }
2060 }
2061
2062 /* Load the initial operands. */
2063 if (op == 15) {
2064 switch (rn) {
2065 case 16:
2066 case 17:
2067 /* Integer source */
2068 gen_mov_F0_vreg(0, rm);
2069 break;
2070 case 8:
2071 case 9:
2072 /* Compare */
2073 gen_mov_F0_vreg(dp, rd);
2074 gen_mov_F1_vreg(dp, rm);
2075 break;
2076 case 10:
2077 case 11:
2078 /* Compare with zero */
2079 gen_mov_F0_vreg(dp, rd);
2080 gen_vfp_F1_ld0(dp);
2081 break;
9ee6e8bb
PB
2082 case 20:
2083 case 21:
2084 case 22:
2085 case 23:
2086 /* Source and destination the same. */
2087 gen_mov_F0_vreg(dp, rd);
2088 break;
b7bcbe95
FB
2089 default:
2090 /* One source operand. */
2091 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2092 break;
b7bcbe95
FB
2093 }
2094 } else {
2095 /* Two source operands. */
2096 gen_mov_F0_vreg(dp, rn);
2097 gen_mov_F1_vreg(dp, rm);
2098 }
2099
2100 for (;;) {
2101 /* Perform the calculation. */
2102 switch (op) {
2103 case 0: /* mac: fd + (fn * fm) */
2104 gen_vfp_mul(dp);
2105 gen_mov_F1_vreg(dp, rd);
2106 gen_vfp_add(dp);
2107 break;
2108 case 1: /* nmac: fd - (fn * fm) */
2109 gen_vfp_mul(dp);
2110 gen_vfp_neg(dp);
2111 gen_mov_F1_vreg(dp, rd);
2112 gen_vfp_add(dp);
2113 break;
2114 case 2: /* msc: -fd + (fn * fm) */
2115 gen_vfp_mul(dp);
2116 gen_mov_F1_vreg(dp, rd);
2117 gen_vfp_sub(dp);
2118 break;
2119 case 3: /* nmsc: -fd - (fn * fm) */
2120 gen_vfp_mul(dp);
2121 gen_mov_F1_vreg(dp, rd);
2122 gen_vfp_add(dp);
2123 gen_vfp_neg(dp);
2124 break;
2125 case 4: /* mul: fn * fm */
2126 gen_vfp_mul(dp);
2127 break;
2128 case 5: /* nmul: -(fn * fm) */
2129 gen_vfp_mul(dp);
2130 gen_vfp_neg(dp);
2131 break;
2132 case 6: /* add: fn + fm */
2133 gen_vfp_add(dp);
2134 break;
2135 case 7: /* sub: fn - fm */
2136 gen_vfp_sub(dp);
2137 break;
2138 case 8: /* div: fn / fm */
2139 gen_vfp_div(dp);
2140 break;
9ee6e8bb
PB
2141 case 14: /* fconst */
2142 if (!arm_feature(env, ARM_FEATURE_VFP3))
2143 return 1;
2144
2145 n = (insn << 12) & 0x80000000;
2146 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2147 if (dp) {
2148 if (i & 0x40)
2149 i |= 0x3f80;
2150 else
2151 i |= 0x4000;
2152 n |= i << 16;
2153 } else {
2154 if (i & 0x40)
2155 i |= 0x780;
2156 else
2157 i |= 0x800;
2158 n |= i << 19;
2159 }
2160 gen_vfp_fconst(dp, n);
2161 break;
b7bcbe95
FB
2162 case 15: /* extension space */
2163 switch (rn) {
2164 case 0: /* cpy */
2165 /* no-op */
2166 break;
2167 case 1: /* abs */
2168 gen_vfp_abs(dp);
2169 break;
2170 case 2: /* neg */
2171 gen_vfp_neg(dp);
2172 break;
2173 case 3: /* sqrt */
2174 gen_vfp_sqrt(dp);
2175 break;
2176 case 8: /* cmp */
2177 gen_vfp_cmp(dp);
2178 break;
2179 case 9: /* cmpe */
2180 gen_vfp_cmpe(dp);
2181 break;
2182 case 10: /* cmpz */
2183 gen_vfp_cmp(dp);
2184 break;
2185 case 11: /* cmpez */
2186 gen_vfp_F1_ld0(dp);
2187 gen_vfp_cmpe(dp);
2188 break;
2189 case 15: /* single<->double conversion */
2190 if (dp)
2191 gen_op_vfp_fcvtsd();
2192 else
2193 gen_op_vfp_fcvtds();
2194 break;
2195 case 16: /* fuito */
2196 gen_vfp_uito(dp);
2197 break;
2198 case 17: /* fsito */
2199 gen_vfp_sito(dp);
2200 break;
9ee6e8bb
PB
2201 case 20: /* fshto */
2202 if (!arm_feature(env, ARM_FEATURE_VFP3))
2203 return 1;
2204 gen_vfp_shto(dp, rm);
2205 break;
2206 case 21: /* fslto */
2207 if (!arm_feature(env, ARM_FEATURE_VFP3))
2208 return 1;
2209 gen_vfp_slto(dp, rm);
2210 break;
2211 case 22: /* fuhto */
2212 if (!arm_feature(env, ARM_FEATURE_VFP3))
2213 return 1;
2214 gen_vfp_uhto(dp, rm);
2215 break;
2216 case 23: /* fulto */
2217 if (!arm_feature(env, ARM_FEATURE_VFP3))
2218 return 1;
2219 gen_vfp_ulto(dp, rm);
2220 break;
b7bcbe95
FB
2221 case 24: /* ftoui */
2222 gen_vfp_toui(dp);
2223 break;
2224 case 25: /* ftouiz */
2225 gen_vfp_touiz(dp);
2226 break;
2227 case 26: /* ftosi */
2228 gen_vfp_tosi(dp);
2229 break;
2230 case 27: /* ftosiz */
2231 gen_vfp_tosiz(dp);
2232 break;
9ee6e8bb
PB
2233 case 28: /* ftosh */
2234 if (!arm_feature(env, ARM_FEATURE_VFP3))
2235 return 1;
2236 gen_vfp_tosh(dp, rm);
2237 break;
2238 case 29: /* ftosl */
2239 if (!arm_feature(env, ARM_FEATURE_VFP3))
2240 return 1;
2241 gen_vfp_tosl(dp, rm);
2242 break;
2243 case 30: /* ftouh */
2244 if (!arm_feature(env, ARM_FEATURE_VFP3))
2245 return 1;
2246 gen_vfp_touh(dp, rm);
2247 break;
2248 case 31: /* ftoul */
2249 if (!arm_feature(env, ARM_FEATURE_VFP3))
2250 return 1;
2251 gen_vfp_toul(dp, rm);
2252 break;
b7bcbe95
FB
2253 default: /* undefined */
2254 printf ("rn:%d\n", rn);
2255 return 1;
2256 }
2257 break;
2258 default: /* undefined */
2259 printf ("op:%d\n", op);
2260 return 1;
2261 }
2262
2263 /* Write back the result. */
2264 if (op == 15 && (rn >= 8 && rn <= 11))
2265 ; /* Comparison, do nothing. */
2266 else if (op == 15 && rn > 17)
2267 /* Integer result. */
2268 gen_mov_vreg_F0(0, rd);
2269 else if (op == 15 && rn == 15)
2270 /* conversion */
2271 gen_mov_vreg_F0(!dp, rd);
2272 else
2273 gen_mov_vreg_F0(dp, rd);
2274
2275 /* break out of the loop if we have finished */
2276 if (veclen == 0)
2277 break;
2278
2279 if (op == 15 && delta_m == 0) {
2280 /* single source one-many */
2281 while (veclen--) {
2282 rd = ((rd + delta_d) & (bank_mask - 1))
2283 | (rd & bank_mask);
2284 gen_mov_vreg_F0(dp, rd);
2285 }
2286 break;
2287 }
2288 /* Setup the next operands. */
2289 veclen--;
2290 rd = ((rd + delta_d) & (bank_mask - 1))
2291 | (rd & bank_mask);
2292
2293 if (op == 15) {
2294 /* One source operand. */
2295 rm = ((rm + delta_m) & (bank_mask - 1))
2296 | (rm & bank_mask);
2297 gen_mov_F0_vreg(dp, rm);
2298 } else {
2299 /* Two source operands. */
2300 rn = ((rn + delta_d) & (bank_mask - 1))
2301 | (rn & bank_mask);
2302 gen_mov_F0_vreg(dp, rn);
2303 if (delta_m) {
2304 rm = ((rm + delta_m) & (bank_mask - 1))
2305 | (rm & bank_mask);
2306 gen_mov_F1_vreg(dp, rm);
2307 }
2308 }
2309 }
2310 }
2311 break;
2312 case 0xc:
2313 case 0xd:
9ee6e8bb 2314 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
2315 /* two-register transfer */
2316 rn = (insn >> 16) & 0xf;
2317 rd = (insn >> 12) & 0xf;
2318 if (dp) {
9ee6e8bb
PB
2319 VFP_DREG_M(rm, insn);
2320 } else {
2321 rm = VFP_SREG_M(insn);
2322 }
b7bcbe95 2323
18c9b560 2324 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2325 /* vfp->arm */
2326 if (dp) {
2327 gen_mov_F0_vreg(1, rm);
2328 gen_op_vfp_mrrd();
2329 gen_movl_reg_T0(s, rd);
2330 gen_movl_reg_T1(s, rn);
2331 } else {
2332 gen_mov_F0_vreg(0, rm);
2333 gen_op_vfp_mrs();
2334 gen_movl_reg_T0(s, rn);
2335 gen_mov_F0_vreg(0, rm + 1);
2336 gen_op_vfp_mrs();
2337 gen_movl_reg_T0(s, rd);
2338 }
2339 } else {
2340 /* arm->vfp */
2341 if (dp) {
2342 gen_movl_T0_reg(s, rd);
2343 gen_movl_T1_reg(s, rn);
2344 gen_op_vfp_mdrr();
2345 gen_mov_vreg_F0(1, rm);
2346 } else {
2347 gen_movl_T0_reg(s, rn);
2348 gen_op_vfp_msr();
2349 gen_mov_vreg_F0(0, rm);
2350 gen_movl_T0_reg(s, rd);
2351 gen_op_vfp_msr();
2352 gen_mov_vreg_F0(0, rm + 1);
2353 }
2354 }
2355 } else {
2356 /* Load/store */
2357 rn = (insn >> 16) & 0xf;
2358 if (dp)
9ee6e8bb 2359 VFP_DREG_D(rd, insn);
b7bcbe95 2360 else
9ee6e8bb
PB
2361 rd = VFP_SREG_D(insn);
2362 if (s->thumb && rn == 15) {
2363 gen_op_movl_T1_im(s->pc & ~2);
2364 } else {
2365 gen_movl_T1_reg(s, rn);
2366 }
b7bcbe95
FB
2367 if ((insn & 0x01200000) == 0x01000000) {
2368 /* Single load/store */
2369 offset = (insn & 0xff) << 2;
2370 if ((insn & (1 << 23)) == 0)
2371 offset = -offset;
2372 gen_op_addl_T1_im(offset);
2373 if (insn & (1 << 20)) {
b5ff1b31 2374 gen_vfp_ld(s, dp);
b7bcbe95
FB
2375 gen_mov_vreg_F0(dp, rd);
2376 } else {
2377 gen_mov_F0_vreg(dp, rd);
b5ff1b31 2378 gen_vfp_st(s, dp);
b7bcbe95
FB
2379 }
2380 } else {
2381 /* load/store multiple */
2382 if (dp)
2383 n = (insn >> 1) & 0x7f;
2384 else
2385 n = insn & 0xff;
2386
2387 if (insn & (1 << 24)) /* pre-decrement */
2388 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2389
2390 if (dp)
2391 offset = 8;
2392 else
2393 offset = 4;
2394 for (i = 0; i < n; i++) {
18c9b560 2395 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2396 /* load */
b5ff1b31 2397 gen_vfp_ld(s, dp);
b7bcbe95
FB
2398 gen_mov_vreg_F0(dp, rd + i);
2399 } else {
2400 /* store */
2401 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 2402 gen_vfp_st(s, dp);
b7bcbe95
FB
2403 }
2404 gen_op_addl_T1_im(offset);
2405 }
2406 if (insn & (1 << 21)) {
2407 /* writeback */
2408 if (insn & (1 << 24))
2409 offset = -offset * n;
2410 else if (dp && (insn & 1))
2411 offset = 4;
2412 else
2413 offset = 0;
2414
2415 if (offset != 0)
2416 gen_op_addl_T1_im(offset);
2417 gen_movl_reg_T1(s, rn);
2418 }
2419 }
2420 }
2421 break;
2422 default:
2423 /* Should never happen. */
2424 return 1;
2425 }
2426 return 0;
2427}
2428
6e256c93 2429static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 2430{
6e256c93
FB
2431 TranslationBlock *tb;
2432
2433 tb = s->tb;
2434 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2435 if (n == 0)
2436 gen_op_goto_tb0(TBPARAM(tb));
2437 else
2438 gen_op_goto_tb1(TBPARAM(tb));
2439 gen_op_movl_T0_im(dest);
2440 gen_op_movl_r15_T0();
2441 gen_op_movl_T0_im((long)tb + n);
2442 gen_op_exit_tb();
2443 } else {
2444 gen_op_movl_T0_im(dest);
2445 gen_op_movl_r15_T0();
2446 gen_op_movl_T0_0();
2447 gen_op_exit_tb();
2448 }
c53be334
FB
2449}
2450
8aaca4c0
FB
2451static inline void gen_jmp (DisasContext *s, uint32_t dest)
2452{
2453 if (__builtin_expect(s->singlestep_enabled, 0)) {
2454 /* An indirect jump so that we still trigger the debug exception. */
5899f386
FB
2455 if (s->thumb)
2456 dest |= 1;
8aaca4c0
FB
2457 gen_op_movl_T0_im(dest);
2458 gen_bx(s);
2459 } else {
6e256c93 2460 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2461 s->is_jmp = DISAS_TB_JUMP;
2462 }
2463}
2464
b5ff1b31
FB
2465static inline void gen_mulxy(int x, int y)
2466{
ee097184 2467 if (x)
b5ff1b31
FB
2468 gen_op_sarl_T0_im(16);
2469 else
2470 gen_op_sxth_T0();
ee097184 2471 if (y)
b5ff1b31
FB
2472 gen_op_sarl_T1_im(16);
2473 else
2474 gen_op_sxth_T1();
2475 gen_op_mul_T0_T1();
2476}
2477
2478/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 2479static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31
FB
2480 uint32_t mask;
2481
2482 mask = 0;
2483 if (flags & (1 << 0))
2484 mask |= 0xff;
2485 if (flags & (1 << 1))
2486 mask |= 0xff00;
2487 if (flags & (1 << 2))
2488 mask |= 0xff0000;
2489 if (flags & (1 << 3))
2490 mask |= 0xff000000;
9ee6e8bb 2491
2ae23e75 2492 /* Mask out undefined bits. */
9ee6e8bb
PB
2493 mask &= ~CPSR_RESERVED;
2494 if (!arm_feature(env, ARM_FEATURE_V6))
e160c51c 2495 mask &= ~(CPSR_E | CPSR_GE);
9ee6e8bb 2496 if (!arm_feature(env, ARM_FEATURE_THUMB2))
e160c51c 2497 mask &= ~CPSR_IT;
9ee6e8bb 2498 /* Mask out execution state bits. */
2ae23e75 2499 if (!spsr)
e160c51c 2500 mask &= ~CPSR_EXEC;
b5ff1b31
FB
2501 /* Mask out privileged bits. */
2502 if (IS_USER(s))
9ee6e8bb 2503 mask &= CPSR_USER;
b5ff1b31
FB
2504 return mask;
2505}
2506
2507/* Returns nonzero if access to the PSR is not permitted. */
2508static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2509{
2510 if (spsr) {
2511 /* ??? This is also undefined in system mode. */
2512 if (IS_USER(s))
2513 return 1;
2514 gen_op_movl_spsr_T0(mask);
2515 } else {
2516 gen_op_movl_cpsr_T0(mask);
2517 }
2518 gen_lookup_tb(s);
2519 return 0;
2520}
2521
9ee6e8bb 2522/* Generate an old-style exception return. */
b5ff1b31
FB
2523static void gen_exception_return(DisasContext *s)
2524{
2525 gen_op_movl_reg_TN[0][15]();
2526 gen_op_movl_T0_spsr();
2527 gen_op_movl_cpsr_T0(0xffffffff);
2528 s->is_jmp = DISAS_UPDATE;
2529}
2530
9ee6e8bb
PB
2531/* Generate a v6 exception return. */
2532static void gen_rfe(DisasContext *s)
2c0262af 2533{
9ee6e8bb
PB
2534 gen_op_movl_cpsr_T0(0xffffffff);
2535 gen_op_movl_T0_T2();
2536 gen_op_movl_reg_TN[0][15]();
2537 s->is_jmp = DISAS_UPDATE;
2538}
3b46e624 2539
9ee6e8bb
PB
2540static inline void
2541gen_set_condexec (DisasContext *s)
2542{
2543 if (s->condexec_mask) {
2544 gen_op_set_condexec((s->condexec_cond << 4) | (s->condexec_mask >> 1));
2545 }
2546}
3b46e624 2547
9ee6e8bb
PB
2548static void gen_nop_hint(DisasContext *s, int val)
2549{
2550 switch (val) {
2551 case 3: /* wfi */
2552 gen_op_movl_T0_im((long)s->pc);
2553 gen_op_movl_reg_TN[0][15]();
2554 s->is_jmp = DISAS_WFI;
2555 break;
2556 case 2: /* wfe */
2557 case 4: /* sev */
2558 /* TODO: Implement SEV and WFE. May help SMP performance. */
2559 default: /* nop */
2560 break;
2561 }
2562}
99c475ab 2563
9ee6e8bb
PB
2564/* Neon shift by constant. The actual ops are the same as used for variable
2565 shifts. [OP][U][SIZE] */
2566static GenOpFunc *gen_neon_shift_im[8][2][4] = {
2567 { /* 0 */ /* VSHR */
2568 {
2569 gen_op_neon_shl_u8,
2570 gen_op_neon_shl_u16,
2571 gen_op_neon_shl_u32,
2572 gen_op_neon_shl_u64
2573 }, {
2574 gen_op_neon_shl_s8,
2575 gen_op_neon_shl_s16,
2576 gen_op_neon_shl_s32,
2577 gen_op_neon_shl_s64
2578 }
2579 }, { /* 1 */ /* VSRA */
2580 {
2581 gen_op_neon_shl_u8,
2582 gen_op_neon_shl_u16,
2583 gen_op_neon_shl_u32,
2584 gen_op_neon_shl_u64
2585 }, {
2586 gen_op_neon_shl_s8,
2587 gen_op_neon_shl_s16,
2588 gen_op_neon_shl_s32,
2589 gen_op_neon_shl_s64
2590 }
2591 }, { /* 2 */ /* VRSHR */
2592 {
2593 gen_op_neon_rshl_u8,
2594 gen_op_neon_rshl_u16,
2595 gen_op_neon_rshl_u32,
2596 gen_op_neon_rshl_u64
2597 }, {
2598 gen_op_neon_rshl_s8,
2599 gen_op_neon_rshl_s16,
2600 gen_op_neon_rshl_s32,
2601 gen_op_neon_rshl_s64
2602 }
2603 }, { /* 3 */ /* VRSRA */
2604 {
2605 gen_op_neon_rshl_u8,
2606 gen_op_neon_rshl_u16,
2607 gen_op_neon_rshl_u32,
2608 gen_op_neon_rshl_u64
2609 }, {
2610 gen_op_neon_rshl_s8,
2611 gen_op_neon_rshl_s16,
2612 gen_op_neon_rshl_s32,
2613 gen_op_neon_rshl_s64
2614 }
2615 }, { /* 4 */
2616 {
2617 NULL, NULL, NULL, NULL
2618 }, { /* VSRI */
2619 gen_op_neon_shl_u8,
2620 gen_op_neon_shl_u16,
2621 gen_op_neon_shl_u32,
2622 gen_op_neon_shl_u64,
2623 }
2624 }, { /* 5 */
2625 { /* VSHL */
2626 gen_op_neon_shl_u8,
2627 gen_op_neon_shl_u16,
2628 gen_op_neon_shl_u32,
2629 gen_op_neon_shl_u64,
2630 }, { /* VSLI */
2631 gen_op_neon_shl_u8,
2632 gen_op_neon_shl_u16,
2633 gen_op_neon_shl_u32,
2634 gen_op_neon_shl_u64,
2635 }
2636 }, { /* 6 */ /* VQSHL */
2637 {
2638 gen_op_neon_qshl_u8,
2639 gen_op_neon_qshl_u16,
2640 gen_op_neon_qshl_u32,
2641 gen_op_neon_qshl_u64
2642 }, {
2643 gen_op_neon_qshl_s8,
2644 gen_op_neon_qshl_s16,
2645 gen_op_neon_qshl_s32,
2646 gen_op_neon_qshl_s64
2647 }
2648 }, { /* 7 */ /* VQSHLU */
2649 {
2650 gen_op_neon_qshl_u8,
2651 gen_op_neon_qshl_u16,
2652 gen_op_neon_qshl_u32,
2653 gen_op_neon_qshl_u64
2654 }, {
2655 gen_op_neon_qshl_u8,
2656 gen_op_neon_qshl_u16,
2657 gen_op_neon_qshl_u32,
2658 gen_op_neon_qshl_u64
2659 }
99c475ab 2660 }
9ee6e8bb
PB
2661};
2662
2663/* [R][U][size - 1] */
2664static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
2665 {
2666 {
2667 gen_op_neon_shl_u16,
2668 gen_op_neon_shl_u32,
2669 gen_op_neon_shl_u64
2670 }, {
2671 gen_op_neon_shl_s16,
2672 gen_op_neon_shl_s32,
2673 gen_op_neon_shl_s64
2674 }
2675 }, {
2676 {
2677 gen_op_neon_rshl_u16,
2678 gen_op_neon_rshl_u32,
2679 gen_op_neon_rshl_u64
2680 }, {
2681 gen_op_neon_rshl_s16,
2682 gen_op_neon_rshl_s32,
2683 gen_op_neon_rshl_s64
2684 }
2c0262af 2685 }
9ee6e8bb 2686};
99c475ab 2687
9ee6e8bb
PB
2688static inline void
2689gen_op_neon_narrow_u32 ()
2690{
2691 /* No-op. */
2692}
2693
2694static GenOpFunc *gen_neon_narrow[3] = {
2695 gen_op_neon_narrow_u8,
2696 gen_op_neon_narrow_u16,
2697 gen_op_neon_narrow_u32
2698};
2699
2700static GenOpFunc *gen_neon_narrow_satu[3] = {
2701 gen_op_neon_narrow_sat_u8,
2702 gen_op_neon_narrow_sat_u16,
2703 gen_op_neon_narrow_sat_u32
2704};
2705
2706static GenOpFunc *gen_neon_narrow_sats[3] = {
2707 gen_op_neon_narrow_sat_s8,
2708 gen_op_neon_narrow_sat_s16,
2709 gen_op_neon_narrow_sat_s32
2710};
2711
2712static inline int gen_neon_add(int size)
2713{
2714 switch (size) {
2715 case 0: gen_op_neon_add_u8(); break;
2716 case 1: gen_op_neon_add_u16(); break;
2717 case 2: gen_op_addl_T0_T1(); break;
2718 default: return 1;
2719 }
2720 return 0;
2721}
2722
2723/* 32-bit pairwise ops end up the same as the elementsise versions. */
2724#define gen_op_neon_pmax_s32 gen_op_neon_max_s32
2725#define gen_op_neon_pmax_u32 gen_op_neon_max_u32
2726#define gen_op_neon_pmin_s32 gen_op_neon_min_s32
2727#define gen_op_neon_pmin_u32 gen_op_neon_min_u32
2728
2729#define GEN_NEON_INTEGER_OP(name) do { \
2730 switch ((size << 1) | u) { \
2731 case 0: gen_op_neon_##name##_s8(); break; \
2732 case 1: gen_op_neon_##name##_u8(); break; \
2733 case 2: gen_op_neon_##name##_s16(); break; \
2734 case 3: gen_op_neon_##name##_u16(); break; \
2735 case 4: gen_op_neon_##name##_s32(); break; \
2736 case 5: gen_op_neon_##name##_u32(); break; \
2737 default: return 1; \
2738 }} while (0)
2739
2740static inline void
2741gen_neon_movl_scratch_T0(int scratch)
2742{
2743 uint32_t offset;
2744
2745 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2746 gen_op_neon_setreg_T0(offset);
2747}
2748
2749static inline void
2750gen_neon_movl_scratch_T1(int scratch)
2751{
2752 uint32_t offset;
2753
2754 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2755 gen_op_neon_setreg_T1(offset);
2756}
2757
2758static inline void
2759gen_neon_movl_T0_scratch(int scratch)
2760{
2761 uint32_t offset;
2762
2763 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2764 gen_op_neon_getreg_T0(offset);
2765}
2766
2767static inline void
2768gen_neon_movl_T1_scratch(int scratch)
2769{
2770 uint32_t offset;
2771
2772 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2773 gen_op_neon_getreg_T1(offset);
2774}
2775
2776static inline void gen_op_neon_widen_u32(void)
2777{
2778 gen_op_movl_T1_im(0);
2779}
2780
2781static inline void gen_neon_get_scalar(int size, int reg)
2782{
2783 if (size == 1) {
2784 NEON_GET_REG(T0, reg >> 1, reg & 1);
2785 } else {
2786 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
2787 if (reg & 1)
2788 gen_op_neon_dup_low16();
2789 else
2790 gen_op_neon_dup_high16();
2791 }
2792}
2793
2794static void gen_neon_unzip(int reg, int q, int tmp, int size)
2795{
2796 int n;
2797
2798 for (n = 0; n < q + 1; n += 2) {
2799 NEON_GET_REG(T0, reg, n);
2800 NEON_GET_REG(T0, reg, n + n);
2801 switch (size) {
2802 case 0: gen_op_neon_unzip_u8(); break;
2803 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
2804 case 2: /* no-op */; break;
2805 default: abort();
2806 }
2807 gen_neon_movl_scratch_T0(tmp + n);
2808 gen_neon_movl_scratch_T1(tmp + n + 1);
2809 }
2810}
2811
2812static struct {
2813 int nregs;
2814 int interleave;
2815 int spacing;
2816} neon_ls_element_type[11] = {
2817 {4, 4, 1},
2818 {4, 4, 2},
2819 {4, 1, 1},
2820 {4, 2, 1},
2821 {3, 3, 1},
2822 {3, 3, 2},
2823 {3, 1, 1},
2824 {1, 1, 1},
2825 {2, 2, 1},
2826 {2, 2, 2},
2827 {2, 1, 1}
2828};
2829
2830/* Translate a NEON load/store element instruction. Return nonzero if the
2831 instruction is invalid. */
2832static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
2833{
2834 int rd, rn, rm;
2835 int op;
2836 int nregs;
2837 int interleave;
2838 int stride;
2839 int size;
2840 int reg;
2841 int pass;
2842 int load;
2843 int shift;
2844 uint32_t mask;
2845 int n;
2846
2847 if (!vfp_enabled(env))
2848 return 1;
2849 VFP_DREG_D(rd, insn);
2850 rn = (insn >> 16) & 0xf;
2851 rm = insn & 0xf;
2852 load = (insn & (1 << 21)) != 0;
2853 if ((insn & (1 << 23)) == 0) {
2854 /* Load store all elements. */
2855 op = (insn >> 8) & 0xf;
2856 size = (insn >> 6) & 3;
2857 if (op > 10 || size == 3)
2858 return 1;
2859 nregs = neon_ls_element_type[op].nregs;
2860 interleave = neon_ls_element_type[op].interleave;
2861 gen_movl_T1_reg(s, rn);
2862 stride = (1 << size) * interleave;
2863 for (reg = 0; reg < nregs; reg++) {
2864 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
2865 gen_movl_T1_reg(s, rn);
2866 gen_op_addl_T1_im((1 << size) * reg);
2867 } else if (interleave == 2 && nregs == 4 && reg == 2) {
2868 gen_movl_T1_reg(s, rn);
2869 gen_op_addl_T1_im(1 << size);
2870 }
2871 for (pass = 0; pass < 2; pass++) {
2872 if (size == 2) {
2873 if (load) {
2874 gen_ldst(ldl, s);
2875 NEON_SET_REG(T0, rd, pass);
2876 } else {
2877 NEON_GET_REG(T0, rd, pass);
2878 gen_ldst(stl, s);
2879 }
2880 gen_op_addl_T1_im(stride);
2881 } else if (size == 1) {
2882 if (load) {
2883 gen_ldst(lduw, s);
2884 gen_op_addl_T1_im(stride);
2885 gen_op_movl_T2_T0();
2886 gen_ldst(lduw, s);
2887 gen_op_addl_T1_im(stride);
2888 gen_op_neon_insert_elt(16, 0xffff);
2889 NEON_SET_REG(T2, rd, pass);
2890 } else {
2891 NEON_GET_REG(T2, rd, pass);
2892 gen_op_movl_T0_T2();
2893 gen_ldst(stw, s);
2894 gen_op_addl_T1_im(stride);
2895 gen_op_neon_extract_elt(16, 0xffff0000);
2896 gen_ldst(stw, s);
2897 gen_op_addl_T1_im(stride);
2898 }
2899 } else /* size == 0 */ {
2900 if (load) {
2901 mask = 0xff;
2902 for (n = 0; n < 4; n++) {
2903 gen_ldst(ldub, s);
2904 gen_op_addl_T1_im(stride);
2905 if (n == 0) {
2906 gen_op_movl_T2_T0();
2907 } else {
2908 gen_op_neon_insert_elt(n * 8, ~mask);
2909 }
2910 mask <<= 8;
2911 }
2912 NEON_SET_REG(T2, rd, pass);
2913 } else {
2914 NEON_GET_REG(T2, rd, pass);
2915 mask = 0xff;
2916 for (n = 0; n < 4; n++) {
2917 if (n == 0) {
2918 gen_op_movl_T0_T2();
2919 } else {
2920 gen_op_neon_extract_elt(n * 8, mask);
2921 }
2922 gen_ldst(stb, s);
2923 gen_op_addl_T1_im(stride);
2924 mask <<= 8;
2925 }
2926 }
2927 }
2928 }
2929 rd += neon_ls_element_type[op].spacing;
2930 }
2931 stride = nregs * 8;
2932 } else {
2933 size = (insn >> 10) & 3;
2934 if (size == 3) {
2935 /* Load single element to all lanes. */
2936 if (!load)
2937 return 1;
2938 size = (insn >> 6) & 3;
2939 nregs = ((insn >> 8) & 3) + 1;
2940 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 2941 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
2942 for (reg = 0; reg < nregs; reg++) {
2943 switch (size) {
2944 case 0:
2945 gen_ldst(ldub, s);
2946 gen_op_neon_dup_u8(0);
2947 break;
2948 case 1:
2949 gen_ldst(lduw, s);
2950 gen_op_neon_dup_low16();
2951 break;
2952 case 2:
2953 gen_ldst(ldl, s);
2954 break;
2955 case 3:
2956 return 1;
99c475ab 2957 }
9ee6e8bb
PB
2958 gen_op_addl_T1_im(1 << size);
2959 NEON_SET_REG(T0, rd, 0);
2960 NEON_SET_REG(T0, rd, 1);
2961 rd += stride;
2962 }
2963 stride = (1 << size) * nregs;
2964 } else {
2965 /* Single element. */
2966 pass = (insn >> 7) & 1;
2967 switch (size) {
2968 case 0:
2969 shift = ((insn >> 5) & 3) * 8;
2970 mask = 0xff << shift;
2971 stride = 1;
2972 break;
2973 case 1:
2974 shift = ((insn >> 6) & 1) * 16;
2975 mask = shift ? 0xffff0000 : 0xffff;
2976 stride = (insn & (1 << 5)) ? 2 : 1;
2977 break;
2978 case 2:
2979 shift = 0;
2980 mask = 0xffffffff;
2981 stride = (insn & (1 << 6)) ? 2 : 1;
2982 break;
2983 default:
2984 abort();
2985 }
2986 nregs = ((insn >> 8) & 3) + 1;
2987 gen_movl_T1_reg(s, rn);
2988 for (reg = 0; reg < nregs; reg++) {
2989 if (load) {
2990 if (size != 2) {
2991 NEON_GET_REG(T2, rd, pass);
2992 }
2993 switch (size) {
2994 case 0:
2995 gen_ldst(ldub, s);
2996 break;
2997 case 1:
2998 gen_ldst(lduw, s);
2999 break;
3000 case 2:
3001 gen_ldst(ldl, s);
3002 NEON_SET_REG(T0, rd, pass);
3003 break;
3004 }
3005 if (size != 2) {
3006 gen_op_neon_insert_elt(shift, ~mask);
3007 NEON_SET_REG(T0, rd, pass);
3008 }
3009 } else { /* Store */
3010 if (size == 2) {
3011 NEON_GET_REG(T0, rd, pass);
3012 } else {
3013 NEON_GET_REG(T2, rd, pass);
3014 gen_op_neon_extract_elt(shift, mask);
3015 }
3016 switch (size) {
3017 case 0:
3018 gen_ldst(stb, s);
3019 break;
3020 case 1:
3021 gen_ldst(stw, s);
3022 break;
3023 case 2:
3024 gen_ldst(stl, s);
3025 break;
99c475ab 3026 }
99c475ab 3027 }
9ee6e8bb
PB
3028 rd += stride;
3029 gen_op_addl_T1_im(1 << size);
99c475ab 3030 }
9ee6e8bb 3031 stride = nregs * (1 << size);
99c475ab 3032 }
9ee6e8bb
PB
3033 }
3034 if (rm != 15) {
3035 gen_movl_T1_reg(s, rn);
3036 if (rm == 13) {
3037 gen_op_addl_T1_im(stride);
3038 } else {
3039 gen_movl_T2_reg(s, rm);
3040 gen_op_addl_T1_T2();
3041 }
3042 gen_movl_reg_T1(s, rn);
3043 }
3044 return 0;
3045}
3b46e624 3046
9ee6e8bb
PB
3047/* Translate a NEON data processing instruction. Return nonzero if the
3048 instruction is invalid.
3049 In general we process vectors in 32-bit chunks. This means we can reuse
3050 some of the scalar ops, and hopefully the code generated for 32-bit
3051 hosts won't be too awful. The downside is that the few 64-bit operations
3052 (mainly shifts) get complicated. */
2c0262af 3053
9ee6e8bb
PB
3054static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3055{
3056 int op;
3057 int q;
3058 int rd, rn, rm;
3059 int size;
3060 int shift;
3061 int pass;
3062 int count;
3063 int pairwise;
3064 int u;
3065 int n;
3066 uint32_t imm;
3067
3068 if (!vfp_enabled(env))
3069 return 1;
3070 q = (insn & (1 << 6)) != 0;
3071 u = (insn >> 24) & 1;
3072 VFP_DREG_D(rd, insn);
3073 VFP_DREG_N(rn, insn);
3074 VFP_DREG_M(rm, insn);
3075 size = (insn >> 20) & 3;
3076 if ((insn & (1 << 23)) == 0) {
3077 /* Three register same length. */
3078 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3079 if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3080 for (pass = 0; pass < (q ? 2 : 1); pass++) {
3081 NEON_GET_REG(T0, rm, pass * 2);
3082 NEON_GET_REG(T1, rm, pass * 2 + 1);
3083 gen_neon_movl_scratch_T0(0);
3084 gen_neon_movl_scratch_T1(1);
3085 NEON_GET_REG(T0, rn, pass * 2);
3086 NEON_GET_REG(T1, rn, pass * 2 + 1);
3087 switch (op) {
3088 case 1: /* VQADD */
3089 if (u) {
3090 gen_op_neon_addl_saturate_u64();
2c0262af 3091 } else {
9ee6e8bb 3092 gen_op_neon_addl_saturate_s64();
2c0262af 3093 }
9ee6e8bb
PB
3094 break;
3095 case 5: /* VQSUB */
3096 if (u) {
3097 gen_op_neon_subl_saturate_u64();
1e8d4eec 3098 } else {
9ee6e8bb 3099 gen_op_neon_subl_saturate_s64();
1e8d4eec 3100 }
9ee6e8bb
PB
3101 break;
3102 case 16:
3103 if (u) {
3104 gen_op_neon_subl_u64();
3105 } else {
3106 gen_op_neon_addl_u64();
3107 }
3108 break;
3109 default:
3110 abort();
2c0262af 3111 }
9ee6e8bb
PB
3112 NEON_SET_REG(T0, rd, pass * 2);
3113 NEON_SET_REG(T1, rd, pass * 2 + 1);
2c0262af 3114 }
9ee6e8bb 3115 return 0;
2c0262af 3116 }
9ee6e8bb
PB
3117 switch (op) {
3118 case 8: /* VSHL */
3119 case 9: /* VQSHL */
3120 case 10: /* VRSHL */
3121 case 11: /* VQSHL */
3122 /* Shift operations have Rn and Rm reversed. */
3123 {
3124 int tmp;
3125 tmp = rn;
3126 rn = rm;
3127 rm = tmp;
3128 pairwise = 0;
3129 }
2c0262af 3130 break;
9ee6e8bb
PB
3131 case 20: /* VPMAX */
3132 case 21: /* VPMIN */
3133 case 23: /* VPADD */
3134 pairwise = 1;
2c0262af 3135 break;
9ee6e8bb
PB
3136 case 26: /* VPADD (float) */
3137 pairwise = (u && size < 2);
2c0262af 3138 break;
9ee6e8bb
PB
3139 case 30: /* VPMIN/VPMAX (float) */
3140 pairwise = u;
2c0262af 3141 break;
9ee6e8bb
PB
3142 default:
3143 pairwise = 0;
2c0262af 3144 break;
9ee6e8bb
PB
3145 }
3146 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3147
3148 if (pairwise) {
3149 /* Pairwise. */
3150 if (q)
3151 n = (pass & 1) * 2;
2c0262af 3152 else
9ee6e8bb
PB
3153 n = 0;
3154 if (pass < q + 1) {
3155 NEON_GET_REG(T0, rn, n);
3156 NEON_GET_REG(T1, rn, n + 1);
3157 } else {
3158 NEON_GET_REG(T0, rm, n);
3159 NEON_GET_REG(T1, rm, n + 1);
3160 }
3161 } else {
3162 /* Elementwise. */
3163 NEON_GET_REG(T0, rn, pass);
3164 NEON_GET_REG(T1, rm, pass);
3165 }
3166 switch (op) {
3167 case 0: /* VHADD */
3168 GEN_NEON_INTEGER_OP(hadd);
3169 break;
3170 case 1: /* VQADD */
3171 switch (size << 1| u) {
3172 case 0: gen_op_neon_qadd_s8(); break;
3173 case 1: gen_op_neon_qadd_u8(); break;
3174 case 2: gen_op_neon_qadd_s16(); break;
3175 case 3: gen_op_neon_qadd_u16(); break;
3176 case 4: gen_op_addl_T0_T1_saturate(); break;
3177 case 5: gen_op_addl_T0_T1_usaturate(); break;
3178 default: abort();
3179 }
2c0262af 3180 break;
9ee6e8bb
PB
3181 case 2: /* VRHADD */
3182 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 3183 break;
9ee6e8bb
PB
3184 case 3: /* Logic ops. */
3185 switch ((u << 2) | size) {
3186 case 0: /* VAND */
2c0262af 3187 gen_op_andl_T0_T1();
9ee6e8bb
PB
3188 break;
3189 case 1: /* BIC */
3190 gen_op_bicl_T0_T1();
3191 break;
3192 case 2: /* VORR */
3193 gen_op_orl_T0_T1();
3194 break;
3195 case 3: /* VORN */
3196 gen_op_notl_T1();
3197 gen_op_orl_T0_T1();
3198 break;
3199 case 4: /* VEOR */
3200 gen_op_xorl_T0_T1();
3201 break;
3202 case 5: /* VBSL */
3203 NEON_GET_REG(T2, rd, pass);
3204 gen_op_neon_bsl();
3205 break;
3206 case 6: /* VBIT */
3207 NEON_GET_REG(T2, rd, pass);
3208 gen_op_neon_bit();
3209 break;
3210 case 7: /* VBIF */
3211 NEON_GET_REG(T2, rd, pass);
3212 gen_op_neon_bif();
3213 break;
2c0262af
FB
3214 }
3215 break;
9ee6e8bb
PB
3216 case 4: /* VHSUB */
3217 GEN_NEON_INTEGER_OP(hsub);
3218 break;
3219 case 5: /* VQSUB */
3220 switch ((size << 1) | u) {
3221 case 0: gen_op_neon_qsub_s8(); break;
3222 case 1: gen_op_neon_qsub_u8(); break;
3223 case 2: gen_op_neon_qsub_s16(); break;
3224 case 3: gen_op_neon_qsub_u16(); break;
3225 case 4: gen_op_subl_T0_T1_saturate(); break;
3226 case 5: gen_op_subl_T0_T1_usaturate(); break;
3227 default: abort();
2c0262af
FB
3228 }
3229 break;
9ee6e8bb
PB
3230 case 6: /* VCGT */
3231 GEN_NEON_INTEGER_OP(cgt);
3232 break;
3233 case 7: /* VCGE */
3234 GEN_NEON_INTEGER_OP(cge);
3235 break;
3236 case 8: /* VSHL */
3237 switch ((size << 1) | u) {
3238 case 0: gen_op_neon_shl_s8(); break;
3239 case 1: gen_op_neon_shl_u8(); break;
3240 case 2: gen_op_neon_shl_s16(); break;
3241 case 3: gen_op_neon_shl_u16(); break;
3242 case 4: gen_op_neon_shl_s32(); break;
3243 case 5: gen_op_neon_shl_u32(); break;
3244#if 0
3245 /* ??? Implementing these is tricky because the vector ops work
3246 on 32-bit pieces. */
3247 case 6: gen_op_neon_shl_s64(); break;
3248 case 7: gen_op_neon_shl_u64(); break;
3249#else
3250 case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3251#endif
2c0262af
FB
3252 }
3253 break;
9ee6e8bb
PB
3254 case 9: /* VQSHL */
3255 switch ((size << 1) | u) {
3256 case 0: gen_op_neon_qshl_s8(); break;
3257 case 1: gen_op_neon_qshl_u8(); break;
3258 case 2: gen_op_neon_qshl_s16(); break;
3259 case 3: gen_op_neon_qshl_u16(); break;
3260 case 4: gen_op_neon_qshl_s32(); break;
3261 case 5: gen_op_neon_qshl_u32(); break;
3262#if 0
3263 /* ??? Implementing these is tricky because the vector ops work
3264 on 32-bit pieces. */
3265 case 6: gen_op_neon_qshl_s64(); break;
3266 case 7: gen_op_neon_qshl_u64(); break;
3267#else
3268 case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3269#endif
2c0262af
FB
3270 }
3271 break;
9ee6e8bb
PB
3272 case 10: /* VRSHL */
3273 switch ((size << 1) | u) {
3274 case 0: gen_op_neon_rshl_s8(); break;
3275 case 1: gen_op_neon_rshl_u8(); break;
3276 case 2: gen_op_neon_rshl_s16(); break;
3277 case 3: gen_op_neon_rshl_u16(); break;
3278 case 4: gen_op_neon_rshl_s32(); break;
3279 case 5: gen_op_neon_rshl_u32(); break;
3280#if 0
3281 /* ??? Implementing these is tricky because the vector ops work
3282 on 32-bit pieces. */
3283 case 6: gen_op_neon_rshl_s64(); break;
3284 case 7: gen_op_neon_rshl_u64(); break;
3285#else
3286 case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3287#endif
3288 }
2c0262af 3289 break;
9ee6e8bb
PB
3290 case 11: /* VQRSHL */
3291 switch ((size << 1) | u) {
3292 case 0: gen_op_neon_qrshl_s8(); break;
3293 case 1: gen_op_neon_qrshl_u8(); break;
3294 case 2: gen_op_neon_qrshl_s16(); break;
3295 case 3: gen_op_neon_qrshl_u16(); break;
3296 case 4: gen_op_neon_qrshl_s32(); break;
3297 case 5: gen_op_neon_qrshl_u32(); break;
3298#if 0
3299 /* ??? Implementing these is tricky because the vector ops work
3300 on 32-bit pieces. */
3301 case 6: gen_op_neon_qrshl_s64(); break;
3302 case 7: gen_op_neon_qrshl_u64(); break;
3303#else
3304 case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3305#endif
3306 }
3307 break;
3308 case 12: /* VMAX */
3309 GEN_NEON_INTEGER_OP(max);
3310 break;
3311 case 13: /* VMIN */
3312 GEN_NEON_INTEGER_OP(min);
3313 break;
3314 case 14: /* VABD */
3315 GEN_NEON_INTEGER_OP(abd);
3316 break;
3317 case 15: /* VABA */
3318 GEN_NEON_INTEGER_OP(abd);
3319 NEON_GET_REG(T1, rd, pass);
3320 gen_neon_add(size);
3321 break;
3322 case 16:
3323 if (!u) { /* VADD */
3324 if (gen_neon_add(size))
3325 return 1;
3326 } else { /* VSUB */
3327 switch (size) {
3328 case 0: gen_op_neon_sub_u8(); break;
3329 case 1: gen_op_neon_sub_u16(); break;
3330 case 2: gen_op_subl_T0_T1(); break;
3331 default: return 1;
3332 }
3333 }
3334 break;
3335 case 17:
3336 if (!u) { /* VTST */
3337 switch (size) {
3338 case 0: gen_op_neon_tst_u8(); break;
3339 case 1: gen_op_neon_tst_u16(); break;
3340 case 2: gen_op_neon_tst_u32(); break;
3341 default: return 1;
3342 }
3343 } else { /* VCEQ */
3344 switch (size) {
3345 case 0: gen_op_neon_ceq_u8(); break;
3346 case 1: gen_op_neon_ceq_u16(); break;
3347 case 2: gen_op_neon_ceq_u32(); break;
3348 default: return 1;
3349 }
3350 }
3351 break;
3352 case 18: /* Multiply. */
3353 switch (size) {
3354 case 0: gen_op_neon_mul_u8(); break;
3355 case 1: gen_op_neon_mul_u16(); break;
3356 case 2: gen_op_mul_T0_T1(); break;
3357 default: return 1;
3358 }
3359 NEON_GET_REG(T1, rd, pass);
3360 if (u) { /* VMLS */
3361 switch (size) {
3362 case 0: gen_op_neon_rsb_u8(); break;
3363 case 1: gen_op_neon_rsb_u16(); break;
3364 case 2: gen_op_rsbl_T0_T1(); break;
3365 default: return 1;
3366 }
3367 } else { /* VMLA */
3368 gen_neon_add(size);
3369 }
3370 break;
3371 case 19: /* VMUL */
3372 if (u) { /* polynomial */
3373 gen_op_neon_mul_p8();
3374 } else { /* Integer */
3375 switch (size) {
3376 case 0: gen_op_neon_mul_u8(); break;
3377 case 1: gen_op_neon_mul_u16(); break;
3378 case 2: gen_op_mul_T0_T1(); break;
3379 default: return 1;
3380 }
3381 }
3382 break;
3383 case 20: /* VPMAX */
3384 GEN_NEON_INTEGER_OP(pmax);
3385 break;
3386 case 21: /* VPMIN */
3387 GEN_NEON_INTEGER_OP(pmin);
3388 break;
3389 case 22: /* Hultiply high. */
3390 if (!u) { /* VQDMULH */
3391 switch (size) {
3392 case 1: gen_op_neon_qdmulh_s16(); break;
3393 case 2: gen_op_neon_qdmulh_s32(); break;
3394 default: return 1;
3395 }
3396 } else { /* VQRDHMUL */
3397 switch (size) {
3398 case 1: gen_op_neon_qrdmulh_s16(); break;
3399 case 2: gen_op_neon_qrdmulh_s32(); break;
3400 default: return 1;
3401 }
3402 }
3403 break;
3404 case 23: /* VPADD */
3405 if (u)
3406 return 1;
3407 switch (size) {
3408 case 0: gen_op_neon_padd_u8(); break;
3409 case 1: gen_op_neon_padd_u16(); break;
3410 case 2: gen_op_addl_T0_T1(); break;
3411 default: return 1;
3412 }
3413 break;
3414 case 26: /* Floating point arithnetic. */
3415 switch ((u << 2) | size) {
3416 case 0: /* VADD */
3417 gen_op_neon_add_f32();
3418 break;
3419 case 2: /* VSUB */
3420 gen_op_neon_sub_f32();
3421 break;
3422 case 4: /* VPADD */
3423 gen_op_neon_add_f32();
3424 break;
3425 case 6: /* VABD */
3426 gen_op_neon_abd_f32();
3427 break;
3428 default:
3429 return 1;
3430 }
3431 break;
3432 case 27: /* Float multiply. */
3433 gen_op_neon_mul_f32();
3434 if (!u) {
3435 NEON_GET_REG(T1, rd, pass);
3436 if (size == 0) {
3437 gen_op_neon_add_f32();
3438 } else {
3439 gen_op_neon_rsb_f32();
3440 }
3441 }
3442 break;
3443 case 28: /* Float compare. */
3444 if (!u) {
3445 gen_op_neon_ceq_f32();
b5ff1b31 3446 } else {
9ee6e8bb
PB
3447 if (size == 0)
3448 gen_op_neon_cge_f32();
3449 else
3450 gen_op_neon_cgt_f32();
b5ff1b31 3451 }
2c0262af 3452 break;
9ee6e8bb
PB
3453 case 29: /* Float compare absolute. */
3454 if (!u)
3455 return 1;
3456 if (size == 0)
3457 gen_op_neon_acge_f32();
3458 else
3459 gen_op_neon_acgt_f32();
2c0262af 3460 break;
9ee6e8bb
PB
3461 case 30: /* Float min/max. */
3462 if (size == 0)
3463 gen_op_neon_max_f32();
3464 else
3465 gen_op_neon_min_f32();
3466 break;
3467 case 31:
3468 if (size == 0)
3469 gen_op_neon_recps_f32();
3470 else
3471 gen_op_neon_rsqrts_f32();
2c0262af 3472 break;
9ee6e8bb
PB
3473 default:
3474 abort();
2c0262af 3475 }
9ee6e8bb
PB
3476 /* Save the result. For elementwise operations we can put it
3477 straight into the destination register. For pairwise operations
3478 we have to be careful to avoid clobbering the source operands. */
3479 if (pairwise && rd == rm) {
3480 gen_neon_movl_scratch_T0(pass);
3481 } else {
3482 NEON_SET_REG(T0, rd, pass);
3483 }
3484
3485 } /* for pass */
3486 if (pairwise && rd == rm) {
3487 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3488 gen_neon_movl_T0_scratch(pass);
3489 NEON_SET_REG(T0, rd, pass);
3490 }
3491 }
3492 } else if (insn & (1 << 4)) {
3493 if ((insn & 0x00380080) != 0) {
3494 /* Two registers and shift. */
3495 op = (insn >> 8) & 0xf;
3496 if (insn & (1 << 7)) {
3497 /* 64-bit shift. */
3498 size = 3;
3499 } else {
3500 size = 2;
3501 while ((insn & (1 << (size + 19))) == 0)
3502 size--;
3503 }
3504 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3505 /* To avoid excessive dumplication of ops we implement shift
3506 by immediate using the variable shift operations. */
3507 if (op < 8) {
3508 /* Shift by immediate:
3509 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3510 /* Right shifts are encoded as N - shift, where N is the
3511 element size in bits. */
3512 if (op <= 4)
3513 shift = shift - (1 << (size + 3));
3514 else
3515 shift++;
3516 if (size == 3) {
3517 count = q + 1;
3518 } else {
3519 count = q ? 4: 2;
3520 }
3521 switch (size) {
3522 case 0:
3523 imm = (uint8_t) shift;
3524 imm |= imm << 8;
3525 imm |= imm << 16;
3526 break;
3527 case 1:
3528 imm = (uint16_t) shift;
3529 imm |= imm << 16;
3530 break;
3531 case 2:
3532 case 3:
3533 imm = shift;
3534 break;
3535 default:
3536 abort();
3537 }
3538
3539 for (pass = 0; pass < count; pass++) {
3540 if (size < 3) {
3541 /* Operands in T0 and T1. */
3542 gen_op_movl_T1_im(imm);
3543 NEON_GET_REG(T0, rm, pass);
2c0262af 3544 } else {
9ee6e8bb
PB
3545 /* Operands in {T0, T1} and env->vfp.scratch. */
3546 gen_op_movl_T0_im(imm);
3547 gen_neon_movl_scratch_T0(0);
3548 gen_op_movl_T0_im((int32_t)imm >> 31);
3549 gen_neon_movl_scratch_T0(1);
3550 NEON_GET_REG(T0, rm, pass * 2);
3551 NEON_GET_REG(T1, rm, pass * 2 + 1);
3552 }
3553
3554 if (gen_neon_shift_im[op][u][size] == NULL)
3555 return 1;
3556 gen_neon_shift_im[op][u][size]();
3557
3558 if (op == 1 || op == 3) {
3559 /* Accumulate. */
3560 if (size == 3) {
3561 gen_neon_movl_scratch_T0(0);
3562 gen_neon_movl_scratch_T1(1);
3563 NEON_GET_REG(T0, rd, pass * 2);
3564 NEON_GET_REG(T1, rd, pass * 2 + 1);
3565 gen_op_neon_addl_u64();
3566 } else {
3567 NEON_GET_REG(T1, rd, pass);
3568 gen_neon_add(size);
99c475ab 3569 }
9ee6e8bb
PB
3570 } else if (op == 4 || (op == 5 && u)) {
3571 /* Insert */
3572 if (size == 3) {
3573 cpu_abort(env, "VS[LR]I.64 not implemented");
3574 }
3575 switch (size) {
3576 case 0:
3577 if (op == 4)
3578 imm = 0xff >> -shift;
3579 else
3580 imm = (uint8_t)(0xff << shift);
3581 imm |= imm << 8;
3582 imm |= imm << 16;
3583 break;
3584 case 1:
3585 if (op == 4)
3586 imm = 0xffff >> -shift;
3587 else
3588 imm = (uint16_t)(0xffff << shift);
3589 imm |= imm << 16;
3590 break;
3591 case 2:
3592 if (op == 4)
3593 imm = 0xffffffffu >> -shift;
3594 else
3595 imm = 0xffffffffu << shift;
3596 break;
3597 default:
3598 abort();
3599 }
3600 NEON_GET_REG(T1, rd, pass);
3601 gen_op_movl_T2_im(imm);
3602 gen_op_neon_bsl();
2c0262af 3603 }
9ee6e8bb
PB
3604 if (size == 3) {
3605 NEON_SET_REG(T0, rd, pass * 2);
3606 NEON_SET_REG(T1, rd, pass * 2 + 1);
3607 } else {
3608 NEON_SET_REG(T0, rd, pass);
3609 }
3610 } /* for pass */
3611 } else if (op < 10) {
3612 /* Shift by immedaiate and narrow:
3613 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
3614 shift = shift - (1 << (size + 3));
3615 size++;
3616 if (size == 3) {
3617 count = q + 1;
2c0262af 3618 } else {
9ee6e8bb
PB
3619 count = q ? 4: 2;
3620 }
3621 switch (size) {
3622 case 1:
3623 imm = (uint16_t) shift;
3624 imm |= imm << 16;
3625 break;
3626 case 2:
3627 case 3:
3628 imm = shift;
3629 break;
3630 default:
3631 abort();
3632 }
3633
3634 /* Processing MSB first means we need to do less shuffling at
3635 the end. */
3636 for (pass = count - 1; pass >= 0; pass--) {
3637 /* Avoid clobbering the second operand before it has been
3638 written. */
3639 n = pass;
3640 if (rd == rm)
3641 n ^= (count - 1);
3642 else
3643 n = pass;
3644
3645 if (size < 3) {
3646 /* Operands in T0 and T1. */
3647 gen_op_movl_T1_im(imm);
3648 NEON_GET_REG(T0, rm, n);
2c0262af 3649 } else {
9ee6e8bb
PB
3650 /* Operands in {T0, T1} and env->vfp.scratch. */
3651 gen_op_movl_T0_im(imm);
3652 gen_neon_movl_scratch_T0(0);
3653 gen_op_movl_T0_im((int32_t)imm >> 31);
3654 gen_neon_movl_scratch_T0(1);
3655 NEON_GET_REG(T0, rm, n * 2);
3656 NEON_GET_REG(T0, rm, n * 2 + 1);
3657 }
3b46e624 3658
9ee6e8bb
PB
3659 gen_neon_shift_im_narrow[q][u][size - 1]();
3660
3661 if (size < 3 && (pass & 1) == 0) {
3662 gen_neon_movl_scratch_T0(0);
3663 } else {
3664 uint32_t offset;
3665
3666 if (size < 3)
3667 gen_neon_movl_T1_scratch(0);
3668
3669 if (op == 8 && !u) {
3670 gen_neon_narrow[size - 1]();
99c475ab 3671 } else {
9ee6e8bb
PB
3672 if (op == 8)
3673 gen_neon_narrow_sats[size - 2]();
3674 else
3675 gen_neon_narrow_satu[size - 1]();
99c475ab 3676 }
9ee6e8bb
PB
3677 if (size == 3)
3678 offset = neon_reg_offset(rd, n);
3679 else
3680 offset = neon_reg_offset(rd, n >> 1);
3681 gen_op_neon_setreg_T0(offset);
3682 }
3683 } /* for pass */
3684 } else if (op == 10) {
3685 /* VSHLL */
3686 if (q)
3687 return 1;
3688 for (pass = 0; pass < 2; pass++) {
3689 /* Avoid clobbering the input operand. */
3690 if (rd == rm)
3691 n = 1 - pass;
3692 else
3693 n = pass;
3694
3695 NEON_GET_REG(T0, rm, n);
3696 GEN_NEON_INTEGER_OP(widen);
3697 if (shift != 0) {
3698 /* The shift is less than the width of the source
3699 type, so in some cases we can just
3700 shift the whole register. */
3701 if (size == 1 || (size == 0 && u)) {
3702 gen_op_shll_T0_im(shift);
3703 gen_op_shll_T1_im(shift);
3704 } else {
3705 switch (size) {
3706 case 0: gen_op_neon_shll_u16(shift); break;
3707 case 2: gen_op_neon_shll_u64(shift); break;
3708 default: abort();
3709 }
3710 }
3711 }
3712 NEON_SET_REG(T0, rd, n * 2);
3713 NEON_SET_REG(T1, rd, n * 2 + 1);
3714 }
3715 } else if (op == 15 || op == 16) {
3716 /* VCVT fixed-point. */
3717 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3718 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
3719 if (op & 1) {
3720 if (u)
3721 gen_op_vfp_ultos(shift);
3722 else
3723 gen_op_vfp_sltos(shift);
3724 } else {
3725 if (u)
3726 gen_op_vfp_touls(shift);
3727 else
3728 gen_op_vfp_tosls(shift);
2c0262af 3729 }
9ee6e8bb 3730 gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
2c0262af
FB
3731 }
3732 } else {
9ee6e8bb
PB
3733 return 1;
3734 }
3735 } else { /* (insn & 0x00380080) == 0 */
3736 int invert;
3737
3738 op = (insn >> 8) & 0xf;
3739 /* One register and immediate. */
3740 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
3741 invert = (insn & (1 << 5)) != 0;
3742 switch (op) {
3743 case 0: case 1:
3744 /* no-op */
3745 break;
3746 case 2: case 3:
3747 imm <<= 8;
3748 break;
3749 case 4: case 5:
3750 imm <<= 16;
3751 break;
3752 case 6: case 7:
3753 imm <<= 24;
3754 break;
3755 case 8: case 9:
3756 imm |= imm << 16;
3757 break;
3758 case 10: case 11:
3759 imm = (imm << 8) | (imm << 24);
3760 break;
3761 case 12:
3762 imm = (imm < 8) | 0xff;
3763 break;
3764 case 13:
3765 imm = (imm << 16) | 0xffff;
3766 break;
3767 case 14:
3768 imm |= (imm << 8) | (imm << 16) | (imm << 24);
3769 if (invert)
3770 imm = ~imm;
3771 break;
3772 case 15:
3773 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
3774 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
3775 break;
3776 }
3777 if (invert)
3778 imm = ~imm;
3779
3780 if (op != 14 || !invert)
3781 gen_op_movl_T1_im(imm);
3782
3783 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3784 if (op & 1 && op < 12) {
3785 NEON_GET_REG(T0, rd, pass);
3786 if (invert) {
3787 /* The immediate value has already been inverted, so
3788 BIC becomes AND. */
3789 gen_op_andl_T0_T1();
3790 } else {
3791 gen_op_orl_T0_T1();
3792 }
3793 NEON_SET_REG(T0, rd, pass);
3794 } else {
3795 if (op == 14 && invert) {
3796 uint32_t tmp;
3797 tmp = 0;
3798 for (n = 0; n < 4; n++) {
3799 if (imm & (1 << (n + (pass & 1) * 4)))
3800 tmp |= 0xff << (n * 8);
3801 }
3802 gen_op_movl_T1_im(tmp);
3803 }
3804 /* VMOV, VMVN. */
3805 NEON_SET_REG(T1, rd, pass);
3806 }
3807 }
3808 }
3809 } else { /* (insn & 0x00800010 == 0x00800010) */
3810 if (size != 3) {
3811 op = (insn >> 8) & 0xf;
3812 if ((insn & (1 << 6)) == 0) {
3813 /* Three registers of different lengths. */
3814 int src1_wide;
3815 int src2_wide;
3816 int prewiden;
3817 /* prewiden, src1_wide, src2_wide */
3818 static const int neon_3reg_wide[16][3] = {
3819 {1, 0, 0}, /* VADDL */
3820 {1, 1, 0}, /* VADDW */
3821 {1, 0, 0}, /* VSUBL */
3822 {1, 1, 0}, /* VSUBW */
3823 {0, 1, 1}, /* VADDHN */
3824 {0, 0, 0}, /* VABAL */
3825 {0, 1, 1}, /* VSUBHN */
3826 {0, 0, 0}, /* VABDL */
3827 {0, 0, 0}, /* VMLAL */
3828 {0, 0, 0}, /* VQDMLAL */
3829 {0, 0, 0}, /* VMLSL */
3830 {0, 0, 0}, /* VQDMLSL */
3831 {0, 0, 0}, /* Integer VMULL */
3832 {0, 0, 0}, /* VQDMULL */
3833 {0, 0, 0} /* Polynomial VMULL */
3834 };
3835
3836 prewiden = neon_3reg_wide[op][0];
3837 src1_wide = neon_3reg_wide[op][1];
3838 src2_wide = neon_3reg_wide[op][2];
3839
3840 /* Avoid overlapping operands. Wide source operands are
3841 always aligned so will never overlap with wide
3842 destinations in problematic ways. */
3843 if (rd == rm) {
3844 NEON_GET_REG(T2, rm, 1);
3845 } else if (rd == rn) {
3846 NEON_GET_REG(T2, rn, 1);
3847 }
3848 for (pass = 0; pass < 2; pass++) {
3849 /* Load the second operand into env->vfp.scratch.
3850 Also widen narrow operands. */
3851 if (pass == 1 && rd == rm) {
3852 if (prewiden) {
3853 gen_op_movl_T0_T2();
3854 } else {
3855 gen_op_movl_T1_T2();
3856 }
3857 } else {
3858 if (src2_wide) {
3859 NEON_GET_REG(T0, rm, pass * 2);
3860 NEON_GET_REG(T1, rm, pass * 2 + 1);
3861 } else {
3862 if (prewiden) {
3863 NEON_GET_REG(T0, rm, pass);
3864 } else {
3865 NEON_GET_REG(T1, rm, pass);
3866 }
3867 }
3868 }
3869 if (prewiden && !src2_wide) {
3870 GEN_NEON_INTEGER_OP(widen);
3871 }
3872 if (prewiden || src2_wide) {
3873 gen_neon_movl_scratch_T0(0);
3874 gen_neon_movl_scratch_T1(1);
3875 }
3876
3877 /* Load the first operand. */
3878 if (pass == 1 && rd == rn) {
3879 gen_op_movl_T0_T2();
3880 } else {
3881 if (src1_wide) {
3882 NEON_GET_REG(T0, rn, pass * 2);
3883 NEON_GET_REG(T1, rn, pass * 2 + 1);
3884 } else {
3885 NEON_GET_REG(T0, rn, pass);
3886 }
3887 }
3888 if (prewiden && !src1_wide) {
3889 GEN_NEON_INTEGER_OP(widen);
3890 }
3891 switch (op) {
3892 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3893 switch (size) {
3894 case 0: gen_op_neon_addl_u16(); break;
3895 case 1: gen_op_neon_addl_u32(); break;
3896 case 2: gen_op_neon_addl_u64(); break;
3897 default: abort();
3898 }
3899 break;
3900 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3901 switch (size) {
3902 case 0: gen_op_neon_subl_u16(); break;
3903 case 1: gen_op_neon_subl_u32(); break;
3904 case 2: gen_op_neon_subl_u64(); break;
3905 default: abort();
3906 }
3907 break;
3908 case 5: case 7: /* VABAL, VABDL */
3909 switch ((size << 1) | u) {
3910 case 0: gen_op_neon_abdl_s16(); break;
3911 case 1: gen_op_neon_abdl_u16(); break;
3912 case 2: gen_op_neon_abdl_s32(); break;
3913 case 3: gen_op_neon_abdl_u32(); break;
3914 case 4: gen_op_neon_abdl_s64(); break;
3915 case 5: gen_op_neon_abdl_u64(); break;
3916 default: abort();
3917 }
3918 break;
3919 case 8: case 9: case 10: case 11: case 12: case 13:
3920 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
3921 switch ((size << 1) | u) {
3922 case 0: gen_op_neon_mull_s8(); break;
3923 case 1: gen_op_neon_mull_u8(); break;
3924 case 2: gen_op_neon_mull_s16(); break;
3925 case 3: gen_op_neon_mull_u16(); break;
3926 case 4: gen_op_imull_T0_T1(); break;
3927 case 5: gen_op_mull_T0_T1(); break;
3928 default: abort();
3929 }
3930 break;
3931 case 14: /* Polynomial VMULL */
3932 cpu_abort(env, "Polynomial VMULL not implemented");
3933
3934 default: /* 15 is RESERVED. */
3935 return 1;
3936 }
3937 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
3938 /* Accumulate. */
3939 if (op == 10 || op == 11) {
3940 switch (size) {
3941 case 0: gen_op_neon_negl_u16(); break;
3942 case 1: gen_op_neon_negl_u32(); break;
3943 case 2: gen_op_neon_negl_u64(); break;
3944 default: abort();
3945 }
3946 }
3947
3948 gen_neon_movl_scratch_T0(0);
3949 gen_neon_movl_scratch_T1(1);
3950
3951 if (op != 13) {
3952 NEON_GET_REG(T0, rd, pass * 2);
3953 NEON_GET_REG(T1, rd, pass * 2 + 1);
3954 }
3955
3956 switch (op) {
3957 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
3958 switch (size) {
3959 case 0: gen_op_neon_addl_u16(); break;
3960 case 1: gen_op_neon_addl_u32(); break;
3961 case 2: gen_op_neon_addl_u64(); break;
3962 default: abort();
3963 }
3964 break;
3965 case 9: case 11: /* VQDMLAL, VQDMLSL */
3966 switch (size) {
3967 case 1: gen_op_neon_addl_saturate_s32(); break;
3968 case 2: gen_op_neon_addl_saturate_s64(); break;
3969 default: abort();
3970 }
3971 /* Fall through. */
3972 case 13: /* VQDMULL */
3973 switch (size) {
3974 case 1: gen_op_neon_addl_saturate_s32(); break;
3975 case 2: gen_op_neon_addl_saturate_s64(); break;
3976 default: abort();
3977 }
3978 break;
3979 default:
3980 abort();
3981 }
3982 NEON_SET_REG(T0, rd, pass * 2);
3983 NEON_SET_REG(T1, rd, pass * 2 + 1);
3984 } else if (op == 4 || op == 6) {
3985 /* Narrowing operation. */
3986 if (u) {
3987 switch (size) {
3988 case 0: gen_op_neon_narrow_high_u8(); break;
3989 case 1: gen_op_neon_narrow_high_u16(); break;
3990 case 2: gen_op_movl_T0_T1(); break;
3991 default: abort();
3992 }
3993 } else {
3994 switch (size) {
3995 case 0: gen_op_neon_narrow_high_round_u8(); break;
3996 case 1: gen_op_neon_narrow_high_round_u16(); break;
3997 case 2: gen_op_neon_narrow_high_round_u32(); break;
3998 default: abort();
3999 }
4000 }
4001 NEON_SET_REG(T0, rd, pass);
4002 } else {
4003 /* Write back the result. */
4004 NEON_SET_REG(T0, rd, pass * 2);
4005 NEON_SET_REG(T1, rd, pass * 2 + 1);
4006 }
4007 }
4008 } else {
4009 /* Two registers and a scalar. */
4010 switch (op) {
4011 case 0: /* Integer VMLA scalar */
4012 case 1: /* Float VMLA scalar */
4013 case 4: /* Integer VMLS scalar */
4014 case 5: /* Floating point VMLS scalar */
4015 case 8: /* Integer VMUL scalar */
4016 case 9: /* Floating point VMUL scalar */
4017 case 12: /* VQDMULH scalar */
4018 case 13: /* VQRDMULH scalar */
4019 gen_neon_get_scalar(size, rm);
4020 gen_op_movl_T2_T0();
4021 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4022 if (pass != 0)
4023 gen_op_movl_T0_T2();
4024 NEON_GET_REG(T1, rn, pass);
4025 if (op == 12) {
4026 if (size == 1) {
4027 gen_op_neon_qdmulh_s16();
4028 } else {
4029 gen_op_neon_qdmulh_s32();
4030 }
4031 } else if (op == 13) {
4032 if (size == 1) {
4033 gen_op_neon_qrdmulh_s16();
4034 } else {
4035 gen_op_neon_qrdmulh_s32();
4036 }
4037 } else if (op & 1) {
4038 gen_op_neon_mul_f32();
4039 } else {
4040 switch (size) {
4041 case 0: gen_op_neon_mul_u8(); break;
4042 case 1: gen_op_neon_mul_u16(); break;
4043 case 2: gen_op_mul_T0_T1(); break;
4044 default: return 1;
4045 }
4046 }
4047 if (op < 8) {
4048 /* Accumulate. */
4049 NEON_GET_REG(T1, rd, pass);
4050 switch (op) {
4051 case 0:
4052 gen_neon_add(size);
4053 break;
4054 case 1:
4055 gen_op_neon_add_f32();
4056 break;
4057 case 4:
4058 switch (size) {
4059 case 0: gen_op_neon_rsb_u8(); break;
4060 case 1: gen_op_neon_rsb_u16(); break;
4061 case 2: gen_op_rsbl_T0_T1(); break;
4062 default: return 1;
4063 }
4064 break;
4065 case 5:
4066 gen_op_neon_rsb_f32();
4067 break;
4068 default:
4069 abort();
4070 }
4071 }
4072 NEON_SET_REG(T0, rd, pass);
4073 }
4074 break;
4075 case 2: /* VMLAL sclar */
4076 case 3: /* VQDMLAL scalar */
4077 case 6: /* VMLSL scalar */
4078 case 7: /* VQDMLSL scalar */
4079 case 10: /* VMULL scalar */
4080 case 11: /* VQDMULL scalar */
4081 if (rd == rn) {
4082 /* Save overlapping operands before they are
4083 clobbered. */
4084 NEON_GET_REG(T0, rn, 1);
4085 gen_neon_movl_scratch_T0(2);
4086 }
4087 gen_neon_get_scalar(size, rm);
4088 gen_op_movl_T2_T0();
4089 for (pass = 0; pass < 2; pass++) {
4090 if (pass != 0) {
4091 gen_op_movl_T0_T2();
4092 }
4093 if (pass != 0 && rd == rn) {
4094 gen_neon_movl_T1_scratch(2);
4095 } else {
4096 NEON_GET_REG(T1, rn, pass);
4097 }
4098 switch ((size << 1) | u) {
4099 case 0: gen_op_neon_mull_s8(); break;
4100 case 1: gen_op_neon_mull_u8(); break;
4101 case 2: gen_op_neon_mull_s16(); break;
4102 case 3: gen_op_neon_mull_u16(); break;
4103 case 4: gen_op_imull_T0_T1(); break;
4104 case 5: gen_op_mull_T0_T1(); break;
4105 default: abort();
4106 }
4107 if (op == 6 || op == 7) {
4108 switch (size) {
4109 case 0: gen_op_neon_negl_u16(); break;
4110 case 1: gen_op_neon_negl_u32(); break;
4111 case 2: gen_op_neon_negl_u64(); break;
4112 default: abort();
4113 }
4114 }
4115 gen_neon_movl_scratch_T0(0);
4116 gen_neon_movl_scratch_T1(1);
4117 NEON_GET_REG(T0, rd, pass * 2);
4118 NEON_GET_REG(T1, rd, pass * 2 + 1);
4119 switch (op) {
4120 case 2: case 6:
4121 switch (size) {
4122 case 0: gen_op_neon_addl_u16(); break;
4123 case 1: gen_op_neon_addl_u32(); break;
4124 case 2: gen_op_neon_addl_u64(); break;
4125 default: abort();
4126 }
4127 break;
4128 case 3: case 7:
4129 switch (size) {
4130 case 1:
4131 gen_op_neon_addl_saturate_s32();
4132 gen_op_neon_addl_saturate_s32();
4133 break;
4134 case 2:
4135 gen_op_neon_addl_saturate_s64();
4136 gen_op_neon_addl_saturate_s64();
4137 break;
4138 default: abort();
4139 }
4140 break;
4141 case 10:
4142 /* no-op */
4143 break;
4144 case 11:
4145 switch (size) {
4146 case 1: gen_op_neon_addl_saturate_s32(); break;
4147 case 2: gen_op_neon_addl_saturate_s64(); break;
4148 default: abort();
4149 }
4150 break;
4151 default:
4152 abort();
4153 }
4154 NEON_SET_REG(T0, rd, pass * 2);
4155 NEON_SET_REG(T1, rd, pass * 2 + 1);
4156 }
4157 break;
4158 default: /* 14 and 15 are RESERVED */
4159 return 1;
4160 }
4161 }
4162 } else { /* size == 3 */
4163 if (!u) {
4164 /* Extract. */
4165 int reg;
4166 imm = (insn >> 8) & 0xf;
4167 reg = rn;
4168 count = q ? 4 : 2;
4169 n = imm >> 2;
4170 NEON_GET_REG(T0, reg, n);
4171 for (pass = 0; pass < count; pass++) {
4172 n++;
4173 if (n > count) {
4174 reg = rm;
4175 n -= count;
4176 }
4177 if (imm & 3) {
4178 NEON_GET_REG(T1, reg, n);
4179 gen_op_neon_extract((insn << 3) & 0x1f);
4180 }
4181 /* ??? This is broken if rd and rm overlap */
4182 NEON_SET_REG(T0, rd, pass);
4183 if (imm & 3) {
4184 gen_op_movl_T0_T1();
4185 } else {
4186 NEON_GET_REG(T0, reg, n);
4187 }
4188 }
4189 } else if ((insn & (1 << 11)) == 0) {
4190 /* Two register misc. */
4191 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4192 size = (insn >> 18) & 3;
4193 switch (op) {
4194 case 0: /* VREV64 */
4195 if (size == 3)
4196 return 1;
4197 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4198 NEON_GET_REG(T0, rm, pass * 2);
4199 NEON_GET_REG(T1, rm, pass * 2 + 1);
4200 switch (size) {
4201 case 0: gen_op_rev_T0(); break;
4202 case 1: gen_op_revh_T0(); break;
4203 case 2: /* no-op */ break;
4204 default: abort();
4205 }
4206 NEON_SET_REG(T0, rd, pass * 2 + 1);
4207 if (size == 2) {
4208 NEON_SET_REG(T1, rd, pass * 2);
4209 } else {
4210 gen_op_movl_T0_T1();
4211 switch (size) {
4212 case 0: gen_op_rev_T0(); break;
4213 case 1: gen_op_revh_T0(); break;
4214 default: abort();
4215 }
4216 NEON_SET_REG(T0, rd, pass * 2);
4217 }
4218 }
4219 break;
4220 case 4: case 5: /* VPADDL */
4221 case 12: case 13: /* VPADAL */
4222 if (size < 2)
4223 goto elementwise;
4224 if (size == 3)
4225 return 1;
4226 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4227 NEON_GET_REG(T0, rm, pass * 2);
4228 NEON_GET_REG(T1, rm, pass * 2 + 1);
4229 if (op & 1)
4230 gen_op_neon_paddl_u32();
4231 else
4232 gen_op_neon_paddl_s32();
4233 if (op >= 12) {
4234 /* Accumulate. */
4235 gen_neon_movl_scratch_T0(0);
4236 gen_neon_movl_scratch_T1(1);
4237
4238 NEON_GET_REG(T0, rd, pass * 2);
4239 NEON_GET_REG(T1, rd, pass * 2 + 1);
4240 gen_op_neon_addl_u64();
4241 }
4242 NEON_SET_REG(T0, rd, pass * 2);
4243 NEON_SET_REG(T1, rd, pass * 2 + 1);
4244 }
4245 break;
4246 case 33: /* VTRN */
4247 if (size == 2) {
4248 for (n = 0; n < (q ? 4 : 2); n += 2) {
4249 NEON_GET_REG(T0, rm, n);
4250 NEON_GET_REG(T1, rd, n + 1);
4251 NEON_SET_REG(T1, rm, n);
4252 NEON_SET_REG(T0, rd, n + 1);
4253 }
4254 } else {
4255 goto elementwise;
4256 }
4257 break;
4258 case 34: /* VUZP */
4259 /* Reg Before After
4260 Rd A3 A2 A1 A0 B2 B0 A2 A0
4261 Rm B3 B2 B1 B0 B3 B1 A3 A1
4262 */
4263 if (size == 3)
4264 return 1;
4265 gen_neon_unzip(rd, q, 0, size);
4266 gen_neon_unzip(rm, q, 4, size);
4267 if (q) {
4268 static int unzip_order_q[8] =
4269 {0, 2, 4, 6, 1, 3, 5, 7};
4270 for (n = 0; n < 8; n++) {
4271 int reg = (n < 4) ? rd : rm;
4272 gen_neon_movl_T0_scratch(unzip_order_q[n]);
4273 NEON_SET_REG(T0, reg, n % 4);
4274 }
4275 } else {
4276 static int unzip_order[4] =
4277 {0, 4, 1, 5};
4278 for (n = 0; n < 4; n++) {
4279 int reg = (n < 2) ? rd : rm;
4280 gen_neon_movl_T0_scratch(unzip_order[n]);
4281 NEON_SET_REG(T0, reg, n % 2);
4282 }
4283 }
4284 break;
4285 case 35: /* VZIP */
4286 /* Reg Before After
4287 Rd A3 A2 A1 A0 B1 A1 B0 A0
4288 Rm B3 B2 B1 B0 B3 A3 B2 A2
4289 */
4290 if (size == 3)
4291 return 1;
4292 count = (q ? 4 : 2);
4293 for (n = 0; n < count; n++) {
4294 NEON_GET_REG(T0, rd, n);
4295 NEON_GET_REG(T1, rd, n);
4296 switch (size) {
4297 case 0: gen_op_neon_zip_u8(); break;
4298 case 1: gen_op_neon_zip_u16(); break;
4299 case 2: /* no-op */; break;
4300 default: abort();
4301 }
4302 gen_neon_movl_scratch_T0(n * 2);
4303 gen_neon_movl_scratch_T1(n * 2 + 1);
4304 }
4305 for (n = 0; n < count * 2; n++) {
4306 int reg = (n < count) ? rd : rm;
4307 gen_neon_movl_T0_scratch(n);
4308 NEON_SET_REG(T0, reg, n % count);
4309 }
4310 break;
4311 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4312 for (pass = 0; pass < 2; pass++) {
4313 if (rd == rm + 1) {
4314 n = 1 - pass;
4315 } else {
4316 n = pass;
4317 }
4318 NEON_GET_REG(T0, rm, n * 2);
4319 NEON_GET_REG(T1, rm, n * 2 + 1);
4320 if (op == 36 && q == 0) {
4321 switch (size) {
4322 case 0: gen_op_neon_narrow_u8(); break;
4323 case 1: gen_op_neon_narrow_u16(); break;
4324 case 2: /* no-op */ break;
4325 default: return 1;
4326 }
4327 } else if (q) {
4328 switch (size) {
4329 case 0: gen_op_neon_narrow_sat_u8(); break;
4330 case 1: gen_op_neon_narrow_sat_u16(); break;
4331 case 2: gen_op_neon_narrow_sat_u32(); break;
4332 default: return 1;
4333 }
4334 } else {
4335 switch (size) {
4336 case 0: gen_op_neon_narrow_sat_s8(); break;
4337 case 1: gen_op_neon_narrow_sat_s16(); break;
4338 case 2: gen_op_neon_narrow_sat_s32(); break;
4339 default: return 1;
4340 }
4341 }
4342 NEON_SET_REG(T0, rd, n);
4343 }
4344 break;
4345 case 38: /* VSHLL */
4346 if (q)
4347 return 1;
4348 if (rm == rd) {
4349 NEON_GET_REG(T2, rm, 1);
4350 }
4351 for (pass = 0; pass < 2; pass++) {
4352 if (pass == 1 && rm == rd) {
4353 gen_op_movl_T0_T2();
4354 } else {
4355 NEON_GET_REG(T0, rm, pass);
4356 }
4357 switch (size) {
4358 case 0: gen_op_neon_widen_high_u8(); break;
4359 case 1: gen_op_neon_widen_high_u16(); break;
4360 case 2:
4361 gen_op_movl_T1_T0();
4362 gen_op_movl_T0_im(0);
4363 break;
4364 default: return 1;
4365 }
4366 NEON_SET_REG(T0, rd, pass * 2);
4367 NEON_SET_REG(T1, rd, pass * 2 + 1);
4368 }
4369 break;
4370 default:
4371 elementwise:
4372 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4373 if (op == 30 || op == 31 || op >= 58) {
4374 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4375 } else {
4376 NEON_GET_REG(T0, rm, pass);
4377 }
4378 switch (op) {
4379 case 1: /* VREV32 */
4380 switch (size) {
4381 case 0: gen_op_rev_T0(); break;
4382 case 1: gen_op_revh_T0(); break;
4383 default: return 1;
4384 }
4385 break;
4386 case 2: /* VREV16 */
4387 if (size != 0)
4388 return 1;
4389 gen_op_rev16_T0();
4390 break;
4391 case 4: case 5: /* VPADDL */
4392 case 12: case 13: /* VPADAL */
4393 switch ((size << 1) | (op & 1)) {
4394 case 0: gen_op_neon_paddl_s8(); break;
4395 case 1: gen_op_neon_paddl_u8(); break;
4396 case 2: gen_op_neon_paddl_s16(); break;
4397 case 3: gen_op_neon_paddl_u16(); break;
4398 default: abort();
4399 }
4400 if (op >= 12) {
4401 /* Accumulate */
4402 NEON_GET_REG(T1, rd, pass);
4403 switch (size) {
4404 case 0: gen_op_neon_add_u16(); break;
4405 case 1: gen_op_addl_T0_T1(); break;
4406 default: abort();
4407 }
4408 }
4409 break;
4410 case 8: /* CLS */
4411 switch (size) {
4412 case 0: gen_op_neon_cls_s8(); break;
4413 case 1: gen_op_neon_cls_s16(); break;
4414 case 2: gen_op_neon_cls_s32(); break;
4415 default: return 1;
4416 }
4417 break;
4418 case 9: /* CLZ */
4419 switch (size) {
4420 case 0: gen_op_neon_clz_u8(); break;
4421 case 1: gen_op_neon_clz_u16(); break;
4422 case 2: gen_op_clz_T0(); break;
4423 default: return 1;
4424 }
4425 break;
4426 case 10: /* CNT */
4427 if (size != 0)
4428 return 1;
4429 gen_op_neon_cnt_u8();
4430 break;
4431 case 11: /* VNOT */
4432 if (size != 0)
4433 return 1;
4434 gen_op_notl_T0();
4435 break;
4436 case 14: /* VQABS */
4437 switch (size) {
4438 case 0: gen_op_neon_qabs_s8(); break;
4439 case 1: gen_op_neon_qabs_s16(); break;
4440 case 2: gen_op_neon_qabs_s32(); break;
4441 default: return 1;
4442 }
4443 break;
4444 case 15: /* VQNEG */
4445 switch (size) {
4446 case 0: gen_op_neon_qneg_s8(); break;
4447 case 1: gen_op_neon_qneg_s16(); break;
4448 case 2: gen_op_neon_qneg_s32(); break;
4449 default: return 1;
4450 }
4451 break;
4452 case 16: case 19: /* VCGT #0, VCLE #0 */
4453 gen_op_movl_T1_im(0);
4454 switch(size) {
4455 case 0: gen_op_neon_cgt_s8(); break;
4456 case 1: gen_op_neon_cgt_s16(); break;
4457 case 2: gen_op_neon_cgt_s32(); break;
4458 default: return 1;
4459 }
4460 if (op == 19)
4461 gen_op_notl_T0();
4462 break;
4463 case 17: case 20: /* VCGE #0, VCLT #0 */
4464 gen_op_movl_T1_im(0);
4465 switch(size) {
4466 case 0: gen_op_neon_cge_s8(); break;
4467 case 1: gen_op_neon_cge_s16(); break;
4468 case 2: gen_op_neon_cge_s32(); break;
4469 default: return 1;
4470 }
4471 if (op == 20)
4472 gen_op_notl_T0();
4473 break;
4474 case 18: /* VCEQ #0 */
4475 gen_op_movl_T1_im(0);
4476 switch(size) {
4477 case 0: gen_op_neon_ceq_u8(); break;
4478 case 1: gen_op_neon_ceq_u16(); break;
4479 case 2: gen_op_neon_ceq_u32(); break;
4480 default: return 1;
4481 }
4482 break;
4483 case 22: /* VABS */
4484 switch(size) {
4485 case 0: gen_op_neon_abs_s8(); break;
4486 case 1: gen_op_neon_abs_s16(); break;
4487 case 2: gen_op_neon_abs_s32(); break;
4488 default: return 1;
4489 }
4490 break;
4491 case 23: /* VNEG */
4492 gen_op_movl_T1_im(0);
4493 switch(size) {
4494 case 0: gen_op_neon_rsb_u8(); break;
4495 case 1: gen_op_neon_rsb_u16(); break;
4496 case 2: gen_op_rsbl_T0_T1(); break;
4497 default: return 1;
4498 }
4499 break;
4500 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4501 gen_op_movl_T1_im(0);
4502 gen_op_neon_cgt_f32();
4503 if (op == 27)
4504 gen_op_notl_T0();
4505 break;
4506 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4507 gen_op_movl_T1_im(0);
4508 gen_op_neon_cge_f32();
4509 if (op == 28)
4510 gen_op_notl_T0();
4511 break;
4512 case 26: /* Float VCEQ #0 */
4513 gen_op_movl_T1_im(0);
4514 gen_op_neon_ceq_f32();
4515 break;
4516 case 30: /* Float VABS */
4517 gen_op_vfp_abss();
4518 break;
4519 case 31: /* Float VNEG */
4520 gen_op_vfp_negs();
4521 break;
4522 case 32: /* VSWP */
4523 NEON_GET_REG(T1, rd, pass);
4524 NEON_SET_REG(T1, rm, pass);
4525 break;
4526 case 33: /* VTRN */
4527 NEON_GET_REG(T1, rd, pass);
4528 switch (size) {
4529 case 0: gen_op_neon_trn_u8(); break;
4530 case 1: gen_op_neon_trn_u16(); break;
4531 case 2: abort();
4532 default: return 1;
4533 }
4534 NEON_SET_REG(T1, rm, pass);
4535 break;
4536 case 56: /* Integer VRECPE */
4537 gen_op_neon_recpe_u32();
4538 break;
4539 case 57: /* Integer VRSQRTE */
4540 gen_op_neon_rsqrte_u32();
4541 break;
4542 case 58: /* Float VRECPE */
4543 gen_op_neon_recpe_f32();
4544 break;
4545 case 59: /* Float VRSQRTE */
4546 gen_op_neon_rsqrte_f32();
4547 break;
4548 case 60: /* VCVT.F32.S32 */
4549 gen_op_vfp_tosizs();
4550 break;
4551 case 61: /* VCVT.F32.U32 */
4552 gen_op_vfp_touizs();
4553 break;
4554 case 62: /* VCVT.S32.F32 */
4555 gen_op_vfp_sitos();
4556 break;
4557 case 63: /* VCVT.U32.F32 */
4558 gen_op_vfp_uitos();
4559 break;
4560 default:
4561 /* Reserved: 21, 29, 39-56 */
4562 return 1;
4563 }
4564 if (op == 30 || op == 31 || op >= 58) {
4565 gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
4566 } else {
4567 NEON_SET_REG(T0, rd, pass);
4568 }
4569 }
4570 break;
4571 }
4572 } else if ((insn & (1 << 10)) == 0) {
4573 /* VTBL, VTBX. */
4574 n = (insn >> 5) & 0x18;
4575 NEON_GET_REG(T1, rm, 0);
4576 if (insn & (1 << 6)) {
4577 NEON_GET_REG(T0, rd, 0);
4578 } else {
4579 gen_op_movl_T0_im(0);
4580 }
4581 gen_op_neon_tbl(rn, n);
4582 gen_op_movl_T2_T0();
4583 NEON_GET_REG(T1, rm, 1);
4584 if (insn & (1 << 6)) {
4585 NEON_GET_REG(T0, rd, 0);
4586 } else {
4587 gen_op_movl_T0_im(0);
4588 }
4589 gen_op_neon_tbl(rn, n);
4590 NEON_SET_REG(T2, rd, 0);
4591 NEON_SET_REG(T0, rd, 1);
4592 } else if ((insn & 0x380) == 0) {
4593 /* VDUP */
4594 if (insn & (1 << 19)) {
4595 NEON_SET_REG(T0, rm, 1);
4596 } else {
4597 NEON_SET_REG(T0, rm, 0);
4598 }
4599 if (insn & (1 << 16)) {
4600 gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
4601 } else if (insn & (1 << 17)) {
4602 if ((insn >> 18) & 1)
4603 gen_op_neon_dup_high16();
4604 else
4605 gen_op_neon_dup_low16();
4606 }
4607 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4608 NEON_SET_REG(T0, rd, pass);
4609 }
4610 } else {
4611 return 1;
4612 }
4613 }
4614 }
4615 return 0;
4616}
4617
4618static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
4619{
4620 int cpnum;
4621
4622 cpnum = (insn >> 8) & 0xf;
4623 if (arm_feature(env, ARM_FEATURE_XSCALE)
4624 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
4625 return 1;
4626
4627 switch (cpnum) {
4628 case 0:
4629 case 1:
4630 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4631 return disas_iwmmxt_insn(env, s, insn);
4632 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4633 return disas_dsp_insn(env, s, insn);
4634 }
4635 return 1;
4636 case 10:
4637 case 11:
4638 return disas_vfp_insn (env, s, insn);
4639 case 15:
4640 return disas_cp15_insn (env, s, insn);
4641 default:
4642 /* Unknown coprocessor. See if the board has hooked it. */
4643 return disas_cp_insn (env, s, insn);
4644 }
4645}
4646
4647static void disas_arm_insn(CPUState * env, DisasContext *s)
4648{
4649 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4650
4651 insn = ldl_code(s->pc);
4652 s->pc += 4;
4653
4654 /* M variants do not implement ARM mode. */
4655 if (IS_M(env))
4656 goto illegal_op;
4657 cond = insn >> 28;
4658 if (cond == 0xf){
4659 /* Unconditional instructions. */
4660 if (((insn >> 25) & 7) == 1) {
4661 /* NEON Data processing. */
4662 if (!arm_feature(env, ARM_FEATURE_NEON))
4663 goto illegal_op;
4664
4665 if (disas_neon_data_insn(env, s, insn))
4666 goto illegal_op;
4667 return;
4668 }
4669 if ((insn & 0x0f100000) == 0x04000000) {
4670 /* NEON load/store. */
4671 if (!arm_feature(env, ARM_FEATURE_NEON))
4672 goto illegal_op;
4673
4674 if (disas_neon_ls_insn(env, s, insn))
4675 goto illegal_op;
4676 return;
4677 }
4678 if ((insn & 0x0d70f000) == 0x0550f000)
4679 return; /* PLD */
4680 else if ((insn & 0x0ffffdff) == 0x01010000) {
4681 ARCH(6);
4682 /* setend */
4683 if (insn & (1 << 9)) {
4684 /* BE8 mode not implemented. */
4685 goto illegal_op;
4686 }
4687 return;
4688 } else if ((insn & 0x0fffff00) == 0x057ff000) {
4689 switch ((insn >> 4) & 0xf) {
4690 case 1: /* clrex */
4691 ARCH(6K);
4692 gen_op_clrex();
4693 return;
4694 case 4: /* dsb */
4695 case 5: /* dmb */
4696 case 6: /* isb */
4697 ARCH(7);
4698 /* We don't emulate caches so these are a no-op. */
4699 return;
4700 default:
4701 goto illegal_op;
4702 }
4703 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
4704 /* srs */
4705 uint32_t offset;
4706 if (IS_USER(s))
4707 goto illegal_op;
4708 ARCH(6);
4709 op1 = (insn & 0x1f);
4710 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4711 gen_movl_T1_reg(s, 13);
4712 } else {
4713 gen_op_movl_T1_r13_banked(op1);
4714 }
4715 i = (insn >> 23) & 3;
4716 switch (i) {
4717 case 0: offset = -4; break; /* DA */
4718 case 1: offset = -8; break; /* DB */
4719 case 2: offset = 0; break; /* IA */
4720 case 3: offset = 4; break; /* IB */
4721 default: abort();
4722 }
4723 if (offset)
4724 gen_op_addl_T1_im(offset);
4725 gen_movl_T0_reg(s, 14);
4726 gen_ldst(stl, s);
4727 gen_op_movl_T0_cpsr();
4728 gen_op_addl_T1_im(4);
4729 gen_ldst(stl, s);
4730 if (insn & (1 << 21)) {
4731 /* Base writeback. */
4732 switch (i) {
4733 case 0: offset = -8; break;
4734 case 1: offset = -4; break;
4735 case 2: offset = 4; break;
4736 case 3: offset = 0; break;
4737 default: abort();
4738 }
4739 if (offset)
4740 gen_op_addl_T1_im(offset);
4741 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4742 gen_movl_reg_T1(s, 13);
4743 } else {
4744 gen_op_movl_r13_T1_banked(op1);
4745 }
4746 }
4747 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
4748 /* rfe */
4749 uint32_t offset;
4750 if (IS_USER(s))
4751 goto illegal_op;
4752 ARCH(6);
4753 rn = (insn >> 16) & 0xf;
4754 gen_movl_T1_reg(s, rn);
4755 i = (insn >> 23) & 3;
4756 switch (i) {
4757 case 0: offset = 0; break; /* DA */
4758 case 1: offset = -4; break; /* DB */
4759 case 2: offset = 4; break; /* IA */
4760 case 3: offset = 8; break; /* IB */
4761 default: abort();
4762 }
4763 if (offset)
4764 gen_op_addl_T1_im(offset);
4765 /* Load CPSR into T2 and PC into T0. */
4766 gen_ldst(ldl, s);
4767 gen_op_movl_T2_T0();
4768 gen_op_addl_T1_im(-4);
4769 gen_ldst(ldl, s);
4770 if (insn & (1 << 21)) {
4771 /* Base writeback. */
4772 switch (i) {
4773 case 0: offset = -4; break;
4774 case 1: offset = 0; break;
4775 case 2: offset = 8; break;
4776 case 3: offset = 4; break;
4777 default: abort();
4778 }
4779 if (offset)
4780 gen_op_addl_T1_im(offset);
4781 gen_movl_reg_T1(s, rn);
4782 }
4783 gen_rfe(s);
4784 } else if ((insn & 0x0e000000) == 0x0a000000) {
4785 /* branch link and change to thumb (blx <offset>) */
4786 int32_t offset;
4787
4788 val = (uint32_t)s->pc;
4789 gen_op_movl_T0_im(val);
4790 gen_movl_reg_T0(s, 14);
4791 /* Sign-extend the 24-bit offset */
4792 offset = (((int32_t)insn) << 8) >> 8;
4793 /* offset * 4 + bit24 * 2 + (thumb bit) */
4794 val += (offset << 2) | ((insn >> 23) & 2) | 1;
4795 /* pipeline offset */
4796 val += 4;
4797 gen_op_movl_T0_im(val);
4798 gen_bx(s);
4799 return;
4800 } else if ((insn & 0x0e000f00) == 0x0c000100) {
4801 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4802 /* iWMMXt register transfer. */
4803 if (env->cp15.c15_cpar & (1 << 1))
4804 if (!disas_iwmmxt_insn(env, s, insn))
4805 return;
4806 }
4807 } else if ((insn & 0x0fe00000) == 0x0c400000) {
4808 /* Coprocessor double register transfer. */
4809 } else if ((insn & 0x0f000010) == 0x0e000010) {
4810 /* Additional coprocessor register transfer. */
4811 } else if ((insn & 0x0ff10010) == 0x01000000) {
4812 uint32_t mask;
4813 uint32_t val;
4814 /* cps (privileged) */
4815 if (IS_USER(s))
4816 return;
4817 mask = val = 0;
4818 if (insn & (1 << 19)) {
4819 if (insn & (1 << 8))
4820 mask |= CPSR_A;
4821 if (insn & (1 << 7))
4822 mask |= CPSR_I;
4823 if (insn & (1 << 6))
4824 mask |= CPSR_F;
4825 if (insn & (1 << 18))
4826 val |= mask;
4827 }
4828 if (insn & (1 << 14)) {
4829 mask |= CPSR_M;
4830 val |= (insn & 0x1f);
4831 }
4832 if (mask) {
4833 gen_op_movl_T0_im(val);
4834 gen_set_psr_T0(s, mask, 0);
4835 }
4836 return;
4837 }
4838 goto illegal_op;
4839 }
4840 if (cond != 0xe) {
4841 /* if not always execute, we generate a conditional jump to
4842 next instruction */
4843 s->condlabel = gen_new_label();
4844 gen_test_cc[cond ^ 1](s->condlabel);
4845 s->condjmp = 1;
4846 }
4847 if ((insn & 0x0f900000) == 0x03000000) {
4848 if ((insn & (1 << 21)) == 0) {
4849 ARCH(6T2);
4850 rd = (insn >> 12) & 0xf;
4851 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
4852 if ((insn & (1 << 22)) == 0) {
4853 /* MOVW */
4854 gen_op_movl_T0_im(val);
4855 } else {
4856 /* MOVT */
4857 gen_movl_T0_reg(s, rd);
4858 gen_op_movl_T1_im(0xffff);
4859 gen_op_andl_T0_T1();
4860 gen_op_movl_T1_im(val << 16);
4861 gen_op_orl_T0_T1();
4862 }
4863 gen_movl_reg_T0(s, rd);
4864 } else {
4865 if (((insn >> 12) & 0xf) != 0xf)
4866 goto illegal_op;
4867 if (((insn >> 16) & 0xf) == 0) {
4868 gen_nop_hint(s, insn & 0xff);
4869 } else {
4870 /* CPSR = immediate */
4871 val = insn & 0xff;
4872 shift = ((insn >> 8) & 0xf) * 2;
4873 if (shift)
4874 val = (val >> shift) | (val << (32 - shift));
4875 gen_op_movl_T0_im(val);
4876 i = ((insn & (1 << 22)) != 0);
4877 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4878 goto illegal_op;
4879 }
4880 }
4881 } else if ((insn & 0x0f900000) == 0x01000000
4882 && (insn & 0x00000090) != 0x00000090) {
4883 /* miscellaneous instructions */
4884 op1 = (insn >> 21) & 3;
4885 sh = (insn >> 4) & 0xf;
4886 rm = insn & 0xf;
4887 switch (sh) {
4888 case 0x0: /* move program status register */
4889 if (op1 & 1) {
4890 /* PSR = reg */
4891 gen_movl_T0_reg(s, rm);
4892 i = ((op1 & 2) != 0);
4893 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4894 goto illegal_op;
4895 } else {
4896 /* reg = PSR */
4897 rd = (insn >> 12) & 0xf;
4898 if (op1 & 2) {
4899 if (IS_USER(s))
4900 goto illegal_op;
4901 gen_op_movl_T0_spsr();
4902 } else {
4903 gen_op_movl_T0_cpsr();
4904 }
4905 gen_movl_reg_T0(s, rd);
4906 }
4907 break;
4908 case 0x1:
4909 if (op1 == 1) {
4910 /* branch/exchange thumb (bx). */
4911 gen_movl_T0_reg(s, rm);
4912 gen_bx(s);
4913 } else if (op1 == 3) {
4914 /* clz */
4915 rd = (insn >> 12) & 0xf;
4916 gen_movl_T0_reg(s, rm);
4917 gen_op_clz_T0();
4918 gen_movl_reg_T0(s, rd);
4919 } else {
4920 goto illegal_op;
4921 }
4922 break;
4923 case 0x2:
4924 if (op1 == 1) {
4925 ARCH(5J); /* bxj */
4926 /* Trivial implementation equivalent to bx. */
4927 gen_movl_T0_reg(s, rm);
4928 gen_bx(s);
4929 } else {
4930 goto illegal_op;
4931 }
4932 break;
4933 case 0x3:
4934 if (op1 != 1)
4935 goto illegal_op;
4936
4937 /* branch link/exchange thumb (blx) */
4938 val = (uint32_t)s->pc;
4939 gen_op_movl_T1_im(val);
4940 gen_movl_T0_reg(s, rm);
4941 gen_movl_reg_T1(s, 14);
4942 gen_bx(s);
4943 break;
4944 case 0x5: /* saturating add/subtract */
4945 rd = (insn >> 12) & 0xf;
4946 rn = (insn >> 16) & 0xf;
4947 gen_movl_T0_reg(s, rm);
4948 gen_movl_T1_reg(s, rn);
4949 if (op1 & 2)
4950 gen_op_double_T1_saturate();
4951 if (op1 & 1)
4952 gen_op_subl_T0_T1_saturate();
4953 else
4954 gen_op_addl_T0_T1_saturate();
4955 gen_movl_reg_T0(s, rd);
4956 break;
4957 case 7: /* bkpt */
4958 gen_set_condexec(s);
4959 gen_op_movl_T0_im((long)s->pc - 4);
4960 gen_op_movl_reg_TN[0][15]();
4961 gen_op_bkpt();
4962 s->is_jmp = DISAS_JUMP;
4963 break;
4964 case 0x8: /* signed multiply */
4965 case 0xa:
4966 case 0xc:
4967 case 0xe:
4968 rs = (insn >> 8) & 0xf;
4969 rn = (insn >> 12) & 0xf;
4970 rd = (insn >> 16) & 0xf;
4971 if (op1 == 1) {
4972 /* (32 * 16) >> 16 */
4973 gen_movl_T0_reg(s, rm);
4974 gen_movl_T1_reg(s, rs);
4975 if (sh & 4)
4976 gen_op_sarl_T1_im(16);
4977 else
4978 gen_op_sxth_T1();
4979 gen_op_imulw_T0_T1();
4980 if ((sh & 2) == 0) {
4981 gen_movl_T1_reg(s, rn);
4982 gen_op_addl_T0_T1_setq();
4983 }
4984 gen_movl_reg_T0(s, rd);
4985 } else {
4986 /* 16 * 16 */
4987 gen_movl_T0_reg(s, rm);
4988 gen_movl_T1_reg(s, rs);
4989 gen_mulxy(sh & 2, sh & 4);
4990 if (op1 == 2) {
4991 gen_op_signbit_T1_T0();
4992 gen_op_addq_T0_T1(rn, rd);
4993 gen_movl_reg_T0(s, rn);
4994 gen_movl_reg_T1(s, rd);
4995 } else {
4996 if (op1 == 0) {
4997 gen_movl_T1_reg(s, rn);
4998 gen_op_addl_T0_T1_setq();
4999 }
5000 gen_movl_reg_T0(s, rd);
5001 }
5002 }
5003 break;
5004 default:
5005 goto illegal_op;
5006 }
5007 } else if (((insn & 0x0e000000) == 0 &&
5008 (insn & 0x00000090) != 0x90) ||
5009 ((insn & 0x0e000000) == (1 << 25))) {
5010 int set_cc, logic_cc, shiftop;
5011
5012 op1 = (insn >> 21) & 0xf;
5013 set_cc = (insn >> 20) & 1;
5014 logic_cc = table_logic_cc[op1] & set_cc;
5015
5016 /* data processing instruction */
5017 if (insn & (1 << 25)) {
5018 /* immediate operand */
5019 val = insn & 0xff;
5020 shift = ((insn >> 8) & 0xf) * 2;
5021 if (shift)
5022 val = (val >> shift) | (val << (32 - shift));
5023 gen_op_movl_T1_im(val);
5024 if (logic_cc && shift)
5025 gen_op_mov_CF_T1();
5026 } else {
5027 /* register */
5028 rm = (insn) & 0xf;
5029 gen_movl_T1_reg(s, rm);
5030 shiftop = (insn >> 5) & 3;
5031 if (!(insn & (1 << 4))) {
5032 shift = (insn >> 7) & 0x1f;
5033 if (shift != 0) {
5034 if (logic_cc) {
5035 gen_shift_T1_im_cc[shiftop](shift);
5036 } else {
5037 gen_shift_T1_im[shiftop](shift);
5038 }
5039 } else if (shiftop != 0) {
5040 if (logic_cc) {
5041 gen_shift_T1_0_cc[shiftop]();
5042 } else {
5043 gen_shift_T1_0[shiftop]();
5044 }
5045 }
5046 } else {
5047 rs = (insn >> 8) & 0xf;
5048 gen_movl_T0_reg(s, rs);
5049 if (logic_cc) {
5050 gen_shift_T1_T0_cc[shiftop]();
5051 } else {
5052 gen_shift_T1_T0[shiftop]();
5053 }
5054 }
5055 }
5056 if (op1 != 0x0f && op1 != 0x0d) {
5057 rn = (insn >> 16) & 0xf;
5058 gen_movl_T0_reg(s, rn);
5059 }
5060 rd = (insn >> 12) & 0xf;
5061 switch(op1) {
5062 case 0x00:
5063 gen_op_andl_T0_T1();
5064 gen_movl_reg_T0(s, rd);
5065 if (logic_cc)
5066 gen_op_logic_T0_cc();
5067 break;
5068 case 0x01:
5069 gen_op_xorl_T0_T1();
5070 gen_movl_reg_T0(s, rd);
5071 if (logic_cc)
5072 gen_op_logic_T0_cc();
5073 break;
5074 case 0x02:
5075 if (set_cc && rd == 15) {
5076 /* SUBS r15, ... is used for exception return. */
5077 if (IS_USER(s))
5078 goto illegal_op;
5079 gen_op_subl_T0_T1_cc();
5080 gen_exception_return(s);
5081 } else {
5082 if (set_cc)
5083 gen_op_subl_T0_T1_cc();
5084 else
5085 gen_op_subl_T0_T1();
5086 gen_movl_reg_T0(s, rd);
5087 }
5088 break;
5089 case 0x03:
5090 if (set_cc)
5091 gen_op_rsbl_T0_T1_cc();
5092 else
5093 gen_op_rsbl_T0_T1();
5094 gen_movl_reg_T0(s, rd);
5095 break;
5096 case 0x04:
5097 if (set_cc)
5098 gen_op_addl_T0_T1_cc();
5099 else
5100 gen_op_addl_T0_T1();
5101 gen_movl_reg_T0(s, rd);
5102 break;
5103 case 0x05:
5104 if (set_cc)
5105 gen_op_adcl_T0_T1_cc();
5106 else
5107 gen_op_adcl_T0_T1();
5108 gen_movl_reg_T0(s, rd);
5109 break;
5110 case 0x06:
5111 if (set_cc)
5112 gen_op_sbcl_T0_T1_cc();
5113 else
5114 gen_op_sbcl_T0_T1();
5115 gen_movl_reg_T0(s, rd);
5116 break;
5117 case 0x07:
5118 if (set_cc)
5119 gen_op_rscl_T0_T1_cc();
5120 else
5121 gen_op_rscl_T0_T1();
5122 gen_movl_reg_T0(s, rd);
5123 break;
5124 case 0x08:
5125 if (set_cc) {
5126 gen_op_andl_T0_T1();
5127 gen_op_logic_T0_cc();
5128 }
5129 break;
5130 case 0x09:
5131 if (set_cc) {
5132 gen_op_xorl_T0_T1();
5133 gen_op_logic_T0_cc();
5134 }
5135 break;
5136 case 0x0a:
5137 if (set_cc) {
5138 gen_op_subl_T0_T1_cc();
5139 }
5140 break;
5141 case 0x0b:
5142 if (set_cc) {
5143 gen_op_addl_T0_T1_cc();
5144 }
5145 break;
5146 case 0x0c:
5147 gen_op_orl_T0_T1();
5148 gen_movl_reg_T0(s, rd);
5149 if (logic_cc)
5150 gen_op_logic_T0_cc();
5151 break;
5152 case 0x0d:
5153 if (logic_cc && rd == 15) {
5154 /* MOVS r15, ... is used for exception return. */
5155 if (IS_USER(s))
5156 goto illegal_op;
5157 gen_op_movl_T0_T1();
5158 gen_exception_return(s);
5159 } else {
5160 gen_movl_reg_T1(s, rd);
5161 if (logic_cc)
5162 gen_op_logic_T1_cc();
5163 }
5164 break;
5165 case 0x0e:
5166 gen_op_bicl_T0_T1();
5167 gen_movl_reg_T0(s, rd);
5168 if (logic_cc)
5169 gen_op_logic_T0_cc();
5170 break;
5171 default:
5172 case 0x0f:
5173 gen_op_notl_T1();
5174 gen_movl_reg_T1(s, rd);
5175 if (logic_cc)
5176 gen_op_logic_T1_cc();
5177 break;
5178 }
5179 } else {
5180 /* other instructions */
5181 op1 = (insn >> 24) & 0xf;
5182 switch(op1) {
5183 case 0x0:
5184 case 0x1:
5185 /* multiplies, extra load/stores */
5186 sh = (insn >> 5) & 3;
5187 if (sh == 0) {
5188 if (op1 == 0x0) {
5189 rd = (insn >> 16) & 0xf;
5190 rn = (insn >> 12) & 0xf;
5191 rs = (insn >> 8) & 0xf;
5192 rm = (insn) & 0xf;
5193 op1 = (insn >> 20) & 0xf;
5194 switch (op1) {
5195 case 0: case 1: case 2: case 3: case 6:
5196 /* 32 bit mul */
5197 gen_movl_T0_reg(s, rs);
5198 gen_movl_T1_reg(s, rm);
5199 gen_op_mul_T0_T1();
5200 if (insn & (1 << 22)) {
5201 /* Subtract (mls) */
5202 ARCH(6T2);
5203 gen_movl_T1_reg(s, rn);
5204 gen_op_rsbl_T0_T1();
5205 } else if (insn & (1 << 21)) {
5206 /* Add */
5207 gen_movl_T1_reg(s, rn);
5208 gen_op_addl_T0_T1();
5209 }
5210 if (insn & (1 << 20))
5211 gen_op_logic_T0_cc();
5212 gen_movl_reg_T0(s, rd);
5213 break;
5214 default:
5215 /* 64 bit mul */
5216 gen_movl_T0_reg(s, rs);
5217 gen_movl_T1_reg(s, rm);
5218 if (insn & (1 << 22))
5219 gen_op_imull_T0_T1();
5220 else
5221 gen_op_mull_T0_T1();
5222 if (insn & (1 << 21)) /* mult accumulate */
5223 gen_op_addq_T0_T1(rn, rd);
5224 if (!(insn & (1 << 23))) { /* double accumulate */
5225 ARCH(6);
5226 gen_op_addq_lo_T0_T1(rn);
5227 gen_op_addq_lo_T0_T1(rd);
5228 }
5229 if (insn & (1 << 20))
5230 gen_op_logicq_cc();
5231 gen_movl_reg_T0(s, rn);
5232 gen_movl_reg_T1(s, rd);
5233 break;
5234 }
5235 } else {
5236 rn = (insn >> 16) & 0xf;
5237 rd = (insn >> 12) & 0xf;
5238 if (insn & (1 << 23)) {
5239 /* load/store exclusive */
5240 gen_movl_T1_reg(s, rn);
5241 if (insn & (1 << 20)) {
5242 gen_ldst(ldlex, s);
5243 } else {
5244 rm = insn & 0xf;
5245 gen_movl_T0_reg(s, rm);
5246 gen_ldst(stlex, s);
5247 }
5248 gen_movl_reg_T0(s, rd);
5249 } else {
5250 /* SWP instruction */
5251 rm = (insn) & 0xf;
5252
5253 gen_movl_T0_reg(s, rm);
5254 gen_movl_T1_reg(s, rn);
5255 if (insn & (1 << 22)) {
5256 gen_ldst(swpb, s);
5257 } else {
5258 gen_ldst(swpl, s);
5259 }
5260 gen_movl_reg_T0(s, rd);
5261 }
5262 }
5263 } else {
5264 int address_offset;
5265 int load;
5266 /* Misc load/store */
5267 rn = (insn >> 16) & 0xf;
5268 rd = (insn >> 12) & 0xf;
5269 gen_movl_T1_reg(s, rn);
5270 if (insn & (1 << 24))
5271 gen_add_datah_offset(s, insn, 0);
5272 address_offset = 0;
5273 if (insn & (1 << 20)) {
5274 /* load */
5275 switch(sh) {
5276 case 1:
5277 gen_ldst(lduw, s);
5278 break;
5279 case 2:
5280 gen_ldst(ldsb, s);
5281 break;
5282 default:
5283 case 3:
5284 gen_ldst(ldsw, s);
5285 break;
5286 }
5287 load = 1;
5288 } else if (sh & 2) {
5289 /* doubleword */
5290 if (sh & 1) {
5291 /* store */
5292 gen_movl_T0_reg(s, rd);
5293 gen_ldst(stl, s);
5294 gen_op_addl_T1_im(4);
5295 gen_movl_T0_reg(s, rd + 1);
5296 gen_ldst(stl, s);
5297 load = 0;
5298 } else {
5299 /* load */
5300 gen_ldst(ldl, s);
5301 gen_movl_reg_T0(s, rd);
5302 gen_op_addl_T1_im(4);
5303 gen_ldst(ldl, s);
5304 rd++;
5305 load = 1;
5306 }
5307 address_offset = -4;
5308 } else {
5309 /* store */
5310 gen_movl_T0_reg(s, rd);
5311 gen_ldst(stw, s);
5312 load = 0;
5313 }
5314 /* Perform base writeback before the loaded value to
5315 ensure correct behavior with overlapping index registers.
5316 ldrd with base writeback is is undefined if the
5317 destination and index registers overlap. */
5318 if (!(insn & (1 << 24))) {
5319 gen_add_datah_offset(s, insn, address_offset);
5320 gen_movl_reg_T1(s, rn);
5321 } else if (insn & (1 << 21)) {
5322 if (address_offset)
5323 gen_op_addl_T1_im(address_offset);
5324 gen_movl_reg_T1(s, rn);
5325 }
5326 if (load) {
5327 /* Complete the load. */
5328 gen_movl_reg_T0(s, rd);
5329 }
5330 }
5331 break;
5332 case 0x4:
5333 case 0x5:
5334 goto do_ldst;
5335 case 0x6:
5336 case 0x7:
5337 if (insn & (1 << 4)) {
5338 ARCH(6);
5339 /* Armv6 Media instructions. */
5340 rm = insn & 0xf;
5341 rn = (insn >> 16) & 0xf;
2c0262af 5342 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
5343 rs = (insn >> 8) & 0xf;
5344 switch ((insn >> 23) & 3) {
5345 case 0: /* Parallel add/subtract. */
5346 op1 = (insn >> 20) & 7;
5347 gen_movl_T0_reg(s, rn);
5348 gen_movl_T1_reg(s, rm);
5349 sh = (insn >> 5) & 7;
5350 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5351 goto illegal_op;
5352 gen_arm_parallel_addsub[op1][sh]();
5353 gen_movl_reg_T0(s, rd);
5354 break;
5355 case 1:
5356 if ((insn & 0x00700020) == 0) {
5357 /* Hafword pack. */
5358 gen_movl_T0_reg(s, rn);
5359 gen_movl_T1_reg(s, rm);
5360 shift = (insn >> 7) & 0x1f;
5361 if (shift)
5362 gen_op_shll_T1_im(shift);
5363 if (insn & (1 << 6))
5364 gen_op_pkhtb_T0_T1();
5365 else
5366 gen_op_pkhbt_T0_T1();
5367 gen_movl_reg_T0(s, rd);
5368 } else if ((insn & 0x00200020) == 0x00200000) {
5369 /* [us]sat */
5370 gen_movl_T1_reg(s, rm);
5371 shift = (insn >> 7) & 0x1f;
5372 if (insn & (1 << 6)) {
5373 if (shift == 0)
5374 shift = 31;
5375 gen_op_sarl_T1_im(shift);
5376 } else {
5377 gen_op_shll_T1_im(shift);
5378 }
5379 sh = (insn >> 16) & 0x1f;
5380 if (sh != 0) {
5381 if (insn & (1 << 22))
5382 gen_op_usat_T1(sh);
5383 else
5384 gen_op_ssat_T1(sh);
5385 }
5386 gen_movl_T1_reg(s, rd);
5387 } else if ((insn & 0x00300fe0) == 0x00200f20) {
5388 /* [us]sat16 */
5389 gen_movl_T1_reg(s, rm);
5390 sh = (insn >> 16) & 0x1f;
5391 if (sh != 0) {
5392 if (insn & (1 << 22))
5393 gen_op_usat16_T1(sh);
5394 else
5395 gen_op_ssat16_T1(sh);
5396 }
5397 gen_movl_T1_reg(s, rd);
5398 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5399 /* Select bytes. */
5400 gen_movl_T0_reg(s, rn);
5401 gen_movl_T1_reg(s, rm);
5402 gen_op_sel_T0_T1();
5403 gen_movl_reg_T0(s, rd);
5404 } else if ((insn & 0x000003e0) == 0x00000060) {
5405 gen_movl_T1_reg(s, rm);
5406 shift = (insn >> 10) & 3;
5407 /* ??? In many cases it's not neccessary to do a
5408 rotate, a shift is sufficient. */
5409 if (shift != 0)
5410 gen_op_rorl_T1_im(shift * 8);
5411 op1 = (insn >> 20) & 7;
5412 switch (op1) {
5413 case 0: gen_op_sxtb16_T1(); break;
5414 case 2: gen_op_sxtb_T1(); break;
5415 case 3: gen_op_sxth_T1(); break;
5416 case 4: gen_op_uxtb16_T1(); break;
5417 case 6: gen_op_uxtb_T1(); break;
5418 case 7: gen_op_uxth_T1(); break;
5419 default: goto illegal_op;
5420 }
5421 if (rn != 15) {
5422 gen_movl_T2_reg(s, rn);
5423 if ((op1 & 3) == 0) {
5424 gen_op_add16_T1_T2();
5425 } else {
5426 gen_op_addl_T1_T2();
5427 }
5428 }
5429 gen_movl_reg_T1(s, rd);
5430 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5431 /* rev */
5432 gen_movl_T0_reg(s, rm);
5433 if (insn & (1 << 22)) {
5434 if (insn & (1 << 7)) {
5435 gen_op_revsh_T0();
5436 } else {
5437 ARCH(6T2);
5438 gen_op_rbit_T0();
5439 }
5440 } else {
5441 if (insn & (1 << 7))
5442 gen_op_rev16_T0();
5443 else
5444 gen_op_rev_T0();
5445 }
5446 gen_movl_reg_T0(s, rd);
5447 } else {
5448 goto illegal_op;
5449 }
5450 break;
5451 case 2: /* Multiplies (Type 3). */
5452 gen_movl_T0_reg(s, rm);
5453 gen_movl_T1_reg(s, rs);
5454 if (insn & (1 << 20)) {
5455 /* Signed multiply most significant [accumulate]. */
5456 gen_op_imull_T0_T1();
5457 if (insn & (1 << 5))
5458 gen_op_roundqd_T0_T1();
5459 else
5460 gen_op_movl_T0_T1();
5461 if (rn != 15) {
5462 gen_movl_T1_reg(s, rn);
5463 if (insn & (1 << 6)) {
5464 gen_op_addl_T0_T1();
5465 } else {
5466 gen_op_rsbl_T0_T1();
5467 }
5468 }
5469 gen_movl_reg_T0(s, rd);
5470 } else {
5471 if (insn & (1 << 5))
5472 gen_op_swap_half_T1();
5473 gen_op_mul_dual_T0_T1();
5474 if (insn & (1 << 22)) {
5475 if (insn & (1 << 6)) {
5476 /* smlald */
5477 gen_op_addq_T0_T1_dual(rn, rd);
5478 } else {
5479 /* smlsld */
5480 gen_op_subq_T0_T1_dual(rn, rd);
5481 }
5482 } else {
5483 /* This addition cannot overflow. */
5484 if (insn & (1 << 6)) {
5485 /* sm[ul]sd */
5486 gen_op_subl_T0_T1();
5487 } else {
5488 /* sm[ul]ad */
5489 gen_op_addl_T0_T1();
5490 }
5491 if (rn != 15)
5492 {
5493 gen_movl_T1_reg(s, rn);
5494 gen_op_addl_T0_T1_setq();
5495 }
5496 gen_movl_reg_T0(s, rd);
5497 }
5498 }
5499 break;
5500 case 3:
5501 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5502 switch (op1) {
5503 case 0: /* Unsigned sum of absolute differences. */
5504 goto illegal_op;
5505 gen_movl_T0_reg(s, rm);
5506 gen_movl_T1_reg(s, rs);
5507 gen_op_usad8_T0_T1();
5508 if (rn != 15) {
5509 gen_movl_T1_reg(s, rn);
5510 gen_op_addl_T0_T1();
5511 }
5512 gen_movl_reg_T0(s, rd);
5513 break;
5514 case 0x20: case 0x24: case 0x28: case 0x2c:
5515 /* Bitfield insert/clear. */
5516 ARCH(6T2);
5517 shift = (insn >> 7) & 0x1f;
5518 i = (insn >> 16) & 0x1f;
5519 i = i + 1 - shift;
5520 if (rm == 15) {
5521 gen_op_movl_T1_im(0);
5522 } else {
5523 gen_movl_T1_reg(s, rm);
5524 }
5525 if (i != 32) {
5526 gen_movl_T0_reg(s, rd);
5527 gen_op_bfi_T1_T0(shift, ((1u << i) - 1) << shift);
5528 }
5529 gen_movl_reg_T1(s, rd);
5530 break;
5531 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5532 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5533 gen_movl_T1_reg(s, rm);
5534 shift = (insn >> 7) & 0x1f;
5535 i = ((insn >> 16) & 0x1f) + 1;
5536 if (shift + i > 32)
5537 goto illegal_op;
5538 if (i < 32) {
5539 if (op1 & 0x20) {
5540 gen_op_ubfx_T1(shift, (1u << i) - 1);
5541 } else {
5542 gen_op_sbfx_T1(shift, i);
5543 }
5544 }
5545 gen_movl_reg_T1(s, rd);
5546 break;
5547 default:
5548 goto illegal_op;
5549 }
5550 break;
5551 }
5552 break;
5553 }
5554 do_ldst:
5555 /* Check for undefined extension instructions
5556 * per the ARM Bible IE:
5557 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
5558 */
5559 sh = (0xf << 20) | (0xf << 4);
5560 if (op1 == 0x7 && ((insn & sh) == sh))
5561 {
5562 goto illegal_op;
5563 }
5564 /* load/store byte/word */
5565 rn = (insn >> 16) & 0xf;
5566 rd = (insn >> 12) & 0xf;
5567 gen_movl_T1_reg(s, rn);
5568 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
5569 if (insn & (1 << 24))
5570 gen_add_data_offset(s, insn);
5571 if (insn & (1 << 20)) {
5572 /* load */
5573 s->is_mem = 1;
5574#if defined(CONFIG_USER_ONLY)
5575 if (insn & (1 << 22))
5576 gen_op_ldub_raw();
5577 else
5578 gen_op_ldl_raw();
5579#else
5580 if (insn & (1 << 22)) {
5581 if (i)
5582 gen_op_ldub_user();
5583 else
5584 gen_op_ldub_kernel();
5585 } else {
5586 if (i)
5587 gen_op_ldl_user();
5588 else
5589 gen_op_ldl_kernel();
5590 }
5591#endif
5592 } else {
5593 /* store */
5594 gen_movl_T0_reg(s, rd);
5595#if defined(CONFIG_USER_ONLY)
5596 if (insn & (1 << 22))
5597 gen_op_stb_raw();
5598 else
5599 gen_op_stl_raw();
5600#else
5601 if (insn & (1 << 22)) {
5602 if (i)
5603 gen_op_stb_user();
5604 else
5605 gen_op_stb_kernel();
5606 } else {
5607 if (i)
5608 gen_op_stl_user();
5609 else
5610 gen_op_stl_kernel();
5611 }
5612#endif
5613 }
5614 if (!(insn & (1 << 24))) {
5615 gen_add_data_offset(s, insn);
5616 gen_movl_reg_T1(s, rn);
5617 } else if (insn & (1 << 21))
5618 gen_movl_reg_T1(s, rn); {
5619 }
5620 if (insn & (1 << 20)) {
5621 /* Complete the load. */
5622 if (rd == 15)
5623 gen_bx(s);
5624 else
5625 gen_movl_reg_T0(s, rd);
5626 }
5627 break;
5628 case 0x08:
5629 case 0x09:
5630 {
5631 int j, n, user, loaded_base;
5632 /* load/store multiple words */
5633 /* XXX: store correct base if write back */
5634 user = 0;
5635 if (insn & (1 << 22)) {
5636 if (IS_USER(s))
5637 goto illegal_op; /* only usable in supervisor mode */
5638
5639 if ((insn & (1 << 15)) == 0)
5640 user = 1;
5641 }
5642 rn = (insn >> 16) & 0xf;
5643 gen_movl_T1_reg(s, rn);
5644
5645 /* compute total size */
5646 loaded_base = 0;
5647 n = 0;
5648 for(i=0;i<16;i++) {
5649 if (insn & (1 << i))
5650 n++;
5651 }
5652 /* XXX: test invalid n == 0 case ? */
5653 if (insn & (1 << 23)) {
5654 if (insn & (1 << 24)) {
5655 /* pre increment */
5656 gen_op_addl_T1_im(4);
5657 } else {
5658 /* post increment */
5659 }
5660 } else {
5661 if (insn & (1 << 24)) {
5662 /* pre decrement */
5663 gen_op_addl_T1_im(-(n * 4));
5664 } else {
5665 /* post decrement */
5666 if (n != 1)
5667 gen_op_addl_T1_im(-((n - 1) * 4));
5668 }
5669 }
5670 j = 0;
5671 for(i=0;i<16;i++) {
5672 if (insn & (1 << i)) {
5673 if (insn & (1 << 20)) {
5674 /* load */
5675 gen_ldst(ldl, s);
5676 if (i == 15) {
5677 gen_bx(s);
5678 } else if (user) {
5679 gen_op_movl_user_T0(i);
5680 } else if (i == rn) {
5681 gen_op_movl_T2_T0();
5682 loaded_base = 1;
5683 } else {
5684 gen_movl_reg_T0(s, i);
5685 }
5686 } else {
5687 /* store */
5688 if (i == 15) {
5689 /* special case: r15 = PC + 8 */
5690 val = (long)s->pc + 4;
5691 gen_op_movl_TN_im[0](val);
5692 } else if (user) {
5693 gen_op_movl_T0_user(i);
5694 } else {
5695 gen_movl_T0_reg(s, i);
5696 }
5697 gen_ldst(stl, s);
5698 }
5699 j++;
5700 /* no need to add after the last transfer */
5701 if (j != n)
5702 gen_op_addl_T1_im(4);
5703 }
5704 }
5705 if (insn & (1 << 21)) {
5706 /* write back */
5707 if (insn & (1 << 23)) {
5708 if (insn & (1 << 24)) {
5709 /* pre increment */
5710 } else {
5711 /* post increment */
5712 gen_op_addl_T1_im(4);
5713 }
5714 } else {
5715 if (insn & (1 << 24)) {
5716 /* pre decrement */
5717 if (n != 1)
5718 gen_op_addl_T1_im(-((n - 1) * 4));
5719 } else {
5720 /* post decrement */
5721 gen_op_addl_T1_im(-(n * 4));
5722 }
5723 }
5724 gen_movl_reg_T1(s, rn);
5725 }
5726 if (loaded_base) {
5727 gen_op_movl_T0_T2();
5728 gen_movl_reg_T0(s, rn);
5729 }
5730 if ((insn & (1 << 22)) && !user) {
5731 /* Restore CPSR from SPSR. */
5732 gen_op_movl_T0_spsr();
5733 gen_op_movl_cpsr_T0(0xffffffff);
5734 s->is_jmp = DISAS_UPDATE;
5735 }
5736 }
5737 break;
5738 case 0xa:
5739 case 0xb:
5740 {
5741 int32_t offset;
5742
5743 /* branch (and link) */
5744 val = (int32_t)s->pc;
5745 if (insn & (1 << 24)) {
5746 gen_op_movl_T0_im(val);
5747 gen_op_movl_reg_TN[0][14]();
5748 }
5749 offset = (((int32_t)insn << 8) >> 8);
5750 val += (offset << 2) + 4;
5751 gen_jmp(s, val);
5752 }
5753 break;
5754 case 0xc:
5755 case 0xd:
5756 case 0xe:
5757 /* Coprocessor. */
5758 if (disas_coproc_insn(env, s, insn))
5759 goto illegal_op;
5760 break;
5761 case 0xf:
5762 /* swi */
5763 gen_op_movl_T0_im((long)s->pc);
5764 gen_op_movl_reg_TN[0][15]();
5765 s->is_jmp = DISAS_SWI;
5766 break;
5767 default:
5768 illegal_op:
5769 gen_set_condexec(s);
5770 gen_op_movl_T0_im((long)s->pc - 4);
5771 gen_op_movl_reg_TN[0][15]();
5772 gen_op_undef_insn();
5773 s->is_jmp = DISAS_JUMP;
5774 break;
5775 }
5776 }
5777}
5778
5779/* Return true if this is a Thumb-2 logical op. */
5780static int
5781thumb2_logic_op(int op)
5782{
5783 return (op < 8);
5784}
5785
5786/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
5787 then set condition code flags based on the result of the operation.
5788 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5789 to the high bit of T1.
5790 Returns zero if the opcode is valid. */
5791
5792static int
5793gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
5794{
5795 int logic_cc;
5796
5797 logic_cc = 0;
5798 switch (op) {
5799 case 0: /* and */
5800 gen_op_andl_T0_T1();
5801 logic_cc = conds;
5802 break;
5803 case 1: /* bic */
5804 gen_op_bicl_T0_T1();
5805 logic_cc = conds;
5806 break;
5807 case 2: /* orr */
5808 gen_op_orl_T0_T1();
5809 logic_cc = conds;
5810 break;
5811 case 3: /* orn */
5812 gen_op_notl_T1();
5813 gen_op_orl_T0_T1();
5814 logic_cc = conds;
5815 break;
5816 case 4: /* eor */
5817 gen_op_xorl_T0_T1();
5818 logic_cc = conds;
5819 break;
5820 case 8: /* add */
5821 if (conds)
5822 gen_op_addl_T0_T1_cc();
5823 else
5824 gen_op_addl_T0_T1();
5825 break;
5826 case 10: /* adc */
5827 if (conds)
5828 gen_op_adcl_T0_T1_cc();
5829 else
5830 gen_op_adcl_T0_T1();
5831 break;
5832 case 11: /* sbc */
5833 if (conds)
5834 gen_op_sbcl_T0_T1_cc();
5835 else
5836 gen_op_sbcl_T0_T1();
5837 break;
5838 case 13: /* sub */
5839 if (conds)
5840 gen_op_subl_T0_T1_cc();
5841 else
5842 gen_op_subl_T0_T1();
5843 break;
5844 case 14: /* rsb */
5845 if (conds)
5846 gen_op_rsbl_T0_T1_cc();
5847 else
5848 gen_op_rsbl_T0_T1();
5849 break;
5850 default: /* 5, 6, 7, 9, 12, 15. */
5851 return 1;
5852 }
5853 if (logic_cc) {
5854 gen_op_logic_T0_cc();
5855 if (shifter_out)
5856 gen_op_mov_CF_T1();
5857 }
5858 return 0;
5859}
5860
5861/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
5862 is not legal. */
5863static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
5864{
5865 uint32_t insn, imm, shift, offset, addr;
5866 uint32_t rd, rn, rm, rs;
5867 int op;
5868 int shiftop;
5869 int conds;
5870 int logic_cc;
5871
5872 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
5873 || arm_feature (env, ARM_FEATURE_M))) {
5874 /* Thumb-1 cores may need to tread bl and blx as a pair of
5875 16-bit instructions to get correct prefetch abort behavior. */
5876 insn = insn_hw1;
5877 if ((insn & (1 << 12)) == 0) {
5878 /* Second half of blx. */
5879 offset = ((insn & 0x7ff) << 1);
5880 gen_movl_T0_reg(s, 14);
5881 gen_op_movl_T1_im(offset);
5882 gen_op_addl_T0_T1();
5883 gen_op_movl_T1_im(0xfffffffc);
5884 gen_op_andl_T0_T1();
5885
5886 addr = (uint32_t)s->pc;
5887 gen_op_movl_T1_im(addr | 1);
5888 gen_movl_reg_T1(s, 14);
5889 gen_bx(s);
5890 return 0;
5891 }
5892 if (insn & (1 << 11)) {
5893 /* Second half of bl. */
5894 offset = ((insn & 0x7ff) << 1) | 1;
5895 gen_movl_T0_reg(s, 14);
5896 gen_op_movl_T1_im(offset);
5897 gen_op_addl_T0_T1();
5898
5899 addr = (uint32_t)s->pc;
5900 gen_op_movl_T1_im(addr | 1);
5901 gen_movl_reg_T1(s, 14);
5902 gen_bx(s);
5903 return 0;
5904 }
5905 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
5906 /* Instruction spans a page boundary. Implement it as two
5907 16-bit instructions in case the second half causes an
5908 prefetch abort. */
5909 offset = ((int32_t)insn << 21) >> 9;
5910 addr = s->pc + 2 + offset;
5911 gen_op_movl_T0_im(addr);
5912 gen_movl_reg_T0(s, 14);
5913 return 0;
5914 }
5915 /* Fall through to 32-bit decode. */
5916 }
5917
5918 insn = lduw_code(s->pc);
5919 s->pc += 2;
5920 insn |= (uint32_t)insn_hw1 << 16;
5921
5922 if ((insn & 0xf800e800) != 0xf000e800) {
5923 ARCH(6T2);
5924 }
5925
5926 rn = (insn >> 16) & 0xf;
5927 rs = (insn >> 12) & 0xf;
5928 rd = (insn >> 8) & 0xf;
5929 rm = insn & 0xf;
5930 switch ((insn >> 25) & 0xf) {
5931 case 0: case 1: case 2: case 3:
5932 /* 16-bit instructions. Should never happen. */
5933 abort();
5934 case 4:
5935 if (insn & (1 << 22)) {
5936 /* Other load/store, table branch. */
5937 if (insn & 0x01200000) {
5938 /* Load/store doubleword. */
5939 if (rn == 15) {
5940 gen_op_movl_T1_im(s->pc & ~3);
5941 } else {
5942 gen_movl_T1_reg(s, rn);
5943 }
5944 offset = (insn & 0xff) * 4;
5945 if ((insn & (1 << 23)) == 0)
5946 offset = -offset;
5947 if (insn & (1 << 24)) {
5948 gen_op_addl_T1_im(offset);
5949 offset = 0;
5950 }
5951 if (insn & (1 << 20)) {
5952 /* ldrd */
5953 gen_ldst(ldl, s);
5954 gen_movl_reg_T0(s, rs);
5955 gen_op_addl_T1_im(4);
5956 gen_ldst(ldl, s);
5957 gen_movl_reg_T0(s, rd);
5958 } else {
5959 /* strd */
5960 gen_movl_T0_reg(s, rs);
5961 gen_ldst(stl, s);
5962 gen_op_addl_T1_im(4);
5963 gen_movl_T0_reg(s, rd);
5964 gen_ldst(stl, s);
5965 }
5966 if (insn & (1 << 21)) {
5967 /* Base writeback. */
5968 if (rn == 15)
5969 goto illegal_op;
5970 gen_op_addl_T1_im(offset - 4);
5971 gen_movl_reg_T1(s, rn);
5972 }
5973 } else if ((insn & (1 << 23)) == 0) {
5974 /* Load/store exclusive word. */
5975 gen_movl_T0_reg(s, rd);
2c0262af 5976 gen_movl_T1_reg(s, rn);
2c0262af 5977 if (insn & (1 << 20)) {
9ee6e8bb
PB
5978 gen_ldst(ldlex, s);
5979 } else {
5980 gen_ldst(stlex, s);
5981 }
5982 gen_movl_reg_T0(s, rd);
5983 } else if ((insn & (1 << 6)) == 0) {
5984 /* Table Branch. */
5985 if (rn == 15) {
5986 gen_op_movl_T1_im(s->pc);
5987 } else {
5988 gen_movl_T1_reg(s, rn);
5989 }
5990 gen_movl_T2_reg(s, rm);
5991 gen_op_addl_T1_T2();
5992 if (insn & (1 << 4)) {
5993 /* tbh */
5994 gen_op_addl_T1_T2();
5995 gen_ldst(lduw, s);
5996 } else { /* tbb */
5997 gen_ldst(ldub, s);
5998 }
5999 gen_op_jmp_T0_im(s->pc);
6000 s->is_jmp = DISAS_JUMP;
6001 } else {
6002 /* Load/store exclusive byte/halfword/doubleword. */
6003 op = (insn >> 4) & 0x3;
6004 gen_movl_T1_reg(s, rn);
6005 if (insn & (1 << 20)) {
6006 switch (op) {
6007 case 0:
6008 gen_ldst(ldbex, s);
6009 break;
2c0262af 6010 case 1:
9ee6e8bb 6011 gen_ldst(ldwex, s);
2c0262af 6012 break;
9ee6e8bb
PB
6013 case 3:
6014 gen_ldst(ldqex, s);
6015 gen_movl_reg_T1(s, rd);
2c0262af
FB
6016 break;
6017 default:
9ee6e8bb
PB
6018 goto illegal_op;
6019 }
6020 gen_movl_reg_T0(s, rs);
6021 } else {
6022 gen_movl_T0_reg(s, rs);
6023 switch (op) {
6024 case 0:
6025 gen_ldst(stbex, s);
6026 break;
6027 case 1:
6028 gen_ldst(stwex, s);
6029 break;
2c0262af 6030 case 3:
9ee6e8bb
PB
6031 gen_movl_T2_reg(s, rd);
6032 gen_ldst(stqex, s);
2c0262af 6033 break;
9ee6e8bb
PB
6034 default:
6035 goto illegal_op;
2c0262af 6036 }
9ee6e8bb
PB
6037 gen_movl_reg_T0(s, rm);
6038 }
6039 }
6040 } else {
6041 /* Load/store multiple, RFE, SRS. */
6042 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6043 /* Not available in user mode. */
6044 if (!IS_USER(s))
6045 goto illegal_op;
6046 if (insn & (1 << 20)) {
6047 /* rfe */
6048 gen_movl_T1_reg(s, rn);
6049 if (insn & (1 << 24)) {
99c475ab 6050 gen_op_addl_T1_im(4);
9ee6e8bb
PB
6051 } else {
6052 gen_op_addl_T1_im(-4);
6053 }
6054 /* Load CPSR into T2 and PC into T0. */
6055 gen_ldst(ldl, s);
6056 gen_op_movl_T2_T0();
6057 gen_op_addl_T1_im(-4);
6058 gen_ldst(ldl, s);
6059 if (insn & (1 << 21)) {
6060 /* Base writeback. */
6061 if (insn & (1 << 24))
6062 gen_op_addl_T1_im(8);
6063 gen_movl_reg_T1(s, rn);
6064 }
6065 gen_rfe(s);
6066 } else {
6067 /* srs */
6068 op = (insn & 0x1f);
6069 if (op == (env->uncached_cpsr & CPSR_M)) {
6070 gen_movl_T1_reg(s, 13);
6071 } else {
6072 gen_op_movl_T1_r13_banked(op);
6073 }
6074 if ((insn & (1 << 24)) == 0) {
6075 gen_op_addl_T1_im(-8);
6076 }
6077 gen_movl_T0_reg(s, 14);
6078 gen_ldst(stl, s);
6079 gen_op_movl_T0_cpsr();
6080 gen_op_addl_T1_im(4);
6081 gen_ldst(stl, s);
6082 if (insn & (1 << 21)) {
6083 if ((insn & (1 << 24)) == 0) {
6084 gen_op_addl_T1_im(-4);
6085 } else {
6086 gen_op_addl_T1_im(4);
6087 }
6088 if (op == (env->uncached_cpsr & CPSR_M)) {
6089 gen_movl_reg_T1(s, 13);
6090 } else {
6091 gen_op_movl_r13_T1_banked(op);
6092 }
6093 }
6094 }
6095 } else {
6096 int i;
6097 /* Load/store multiple. */
6098 gen_movl_T1_reg(s, rn);
6099 offset = 0;
6100 for (i = 0; i < 16; i++) {
6101 if (insn & (1 << i))
6102 offset += 4;
6103 }
6104 if (insn & (1 << 24)) {
6105 gen_op_addl_T1_im(-offset);
6106 }
6107
6108 for (i = 0; i < 16; i++) {
6109 if ((insn & (1 << i)) == 0)
6110 continue;
6111 if (insn & (1 << 20)) {
6112 /* Load. */
6113 gen_ldst(ldl, s);
6114 if (i == 15) {
6115 gen_bx(s);
6116 } else {
6117 gen_movl_reg_T0(s, i);
6118 }
6119 } else {
6120 /* Store. */
6121 gen_movl_T0_reg(s, i);
b5ff1b31 6122 gen_ldst(stl, s);
9ee6e8bb
PB
6123 }
6124 gen_op_addl_T1_im(4);
6125 }
6126 if (insn & (1 << 21)) {
6127 /* Base register writeback. */
6128 if (insn & (1 << 24)) {
6129 gen_op_addl_T1_im(-offset);
6130 }
6131 /* Fault if writeback register is in register list. */
6132 if (insn & (1 << rn))
6133 goto illegal_op;
6134 gen_movl_reg_T1(s, rn);
6135 }
6136 }
6137 }
6138 break;
6139 case 5: /* Data processing register constant shift. */
6140 if (rn == 15)
6141 gen_op_movl_T0_im(0);
6142 else
6143 gen_movl_T0_reg(s, rn);
6144 gen_movl_T1_reg(s, rm);
6145 op = (insn >> 21) & 0xf;
6146 shiftop = (insn >> 4) & 3;
6147 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6148 conds = (insn & (1 << 20)) != 0;
6149 logic_cc = (conds && thumb2_logic_op(op));
6150 if (shift != 0) {
6151 if (logic_cc) {
6152 gen_shift_T1_im_cc[shiftop](shift);
6153 } else {
6154 gen_shift_T1_im[shiftop](shift);
6155 }
6156 } else if (shiftop != 0) {
6157 if (logic_cc) {
6158 gen_shift_T1_0_cc[shiftop]();
6159 } else {
6160 gen_shift_T1_0[shiftop]();
6161 }
6162 }
6163 if (gen_thumb2_data_op(s, op, conds, 0))
6164 goto illegal_op;
6165 if (rd != 15)
6166 gen_movl_reg_T0(s, rd);
6167 break;
6168 case 13: /* Misc data processing. */
6169 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6170 if (op < 4 && (insn & 0xf000) != 0xf000)
6171 goto illegal_op;
6172 switch (op) {
6173 case 0: /* Register controlled shift. */
6174 gen_movl_T0_reg(s, rm);
6175 gen_movl_T1_reg(s, rn);
6176 if ((insn & 0x70) != 0)
6177 goto illegal_op;
6178 op = (insn >> 21) & 3;
6179 if (insn & (1 << 20)) {
6180 gen_shift_T1_T0_cc[op]();
6181 gen_op_logic_T1_cc();
6182 } else {
6183 gen_shift_T1_T0[op]();
6184 }
6185 gen_movl_reg_T1(s, rd);
6186 break;
6187 case 1: /* Sign/zero extend. */
6188 gen_movl_T1_reg(s, rm);
6189 shift = (insn >> 4) & 3;
6190 /* ??? In many cases it's not neccessary to do a
6191 rotate, a shift is sufficient. */
6192 if (shift != 0)
6193 gen_op_rorl_T1_im(shift * 8);
6194 op = (insn >> 20) & 7;
6195 switch (op) {
6196 case 0: gen_op_sxth_T1(); break;
6197 case 1: gen_op_uxth_T1(); break;
6198 case 2: gen_op_sxtb16_T1(); break;
6199 case 3: gen_op_uxtb16_T1(); break;
6200 case 4: gen_op_sxtb_T1(); break;
6201 case 5: gen_op_uxtb_T1(); break;
6202 default: goto illegal_op;
6203 }
6204 if (rn != 15) {
6205 gen_movl_T2_reg(s, rn);
6206 if ((op >> 1) == 1) {
6207 gen_op_add16_T1_T2();
6208 } else {
6209 gen_op_addl_T1_T2();
6210 }
6211 }
6212 gen_movl_reg_T1(s, rd);
6213 break;
6214 case 2: /* SIMD add/subtract. */
6215 op = (insn >> 20) & 7;
6216 shift = (insn >> 4) & 7;
6217 if ((op & 3) == 3 || (shift & 3) == 3)
6218 goto illegal_op;
6219 gen_movl_T0_reg(s, rn);
6220 gen_movl_T1_reg(s, rm);
6221 gen_thumb2_parallel_addsub[op][shift]();
6222 gen_movl_reg_T0(s, rd);
6223 break;
6224 case 3: /* Other data processing. */
6225 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6226 if (op < 4) {
6227 /* Saturating add/subtract. */
6228 gen_movl_T0_reg(s, rm);
6229 gen_movl_T1_reg(s, rn);
6230 if (op & 2)
6231 gen_op_double_T1_saturate();
6232 if (op & 1)
6233 gen_op_subl_T0_T1_saturate();
6234 else
6235 gen_op_addl_T0_T1_saturate();
6236 } else {
6237 gen_movl_T0_reg(s, rn);
6238 switch (op) {
6239 case 0x0a: /* rbit */
6240 gen_op_rbit_T0();
6241 break;
6242 case 0x08: /* rev */
6243 gen_op_rev_T0();
6244 break;
6245 case 0x09: /* rev16 */
6246 gen_op_rev16_T0();
6247 break;
6248 case 0x0b: /* revsh */
6249 gen_op_revsh_T0();
6250 break;
6251 case 0x10: /* sel */
6252 gen_movl_T1_reg(s, rm);
6253 gen_op_sel_T0_T1();
6254 break;
6255 case 0x18: /* clz */
6256 gen_op_clz_T0();
6257 break;
6258 default:
6259 goto illegal_op;
6260 }
6261 }
6262 gen_movl_reg_T0(s, rd);
6263 break;
6264 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6265 op = (insn >> 4) & 0xf;
6266 gen_movl_T0_reg(s, rn);
6267 gen_movl_T1_reg(s, rm);
6268 switch ((insn >> 20) & 7) {
6269 case 0: /* 32 x 32 -> 32 */
6270 gen_op_mul_T0_T1();
6271 if (rs != 15) {
6272 gen_movl_T1_reg(s, rs);
6273 if (op)
6274 gen_op_rsbl_T0_T1();
6275 else
6276 gen_op_addl_T0_T1();
6277 }
6278 gen_movl_reg_T0(s, rd);
6279 break;
6280 case 1: /* 16 x 16 -> 32 */
6281 gen_mulxy(op & 2, op & 1);
6282 if (rs != 15) {
6283 gen_movl_T1_reg(s, rs);
6284 gen_op_addl_T0_T1_setq();
6285 }
6286 gen_movl_reg_T0(s, rd);
6287 break;
6288 case 2: /* Dual multiply add. */
6289 case 4: /* Dual multiply subtract. */
6290 if (op)
6291 gen_op_swap_half_T1();
6292 gen_op_mul_dual_T0_T1();
6293 /* This addition cannot overflow. */
6294 if (insn & (1 << 22)) {
6295 gen_op_subl_T0_T1();
6296 } else {
6297 gen_op_addl_T0_T1();
6298 }
6299 if (rs != 15)
6300 {
6301 gen_movl_T1_reg(s, rs);
6302 gen_op_addl_T0_T1_setq();
6303 }
6304 gen_movl_reg_T0(s, rd);
6305 break;
6306 case 3: /* 32 * 16 -> 32msb */
6307 if (op)
6308 gen_op_sarl_T1_im(16);
6309 else
6310 gen_op_sxth_T1();
6311 gen_op_imulw_T0_T1();
6312 if (rs != 15)
6313 {
6314 gen_movl_T1_reg(s, rs);
6315 gen_op_addl_T0_T1_setq();
6316 }
6317 gen_movl_reg_T0(s, rd);
6318 break;
6319 case 5: case 6: /* 32 * 32 -> 32msb */
6320 gen_op_imull_T0_T1();
6321 if (insn & (1 << 5))
6322 gen_op_roundqd_T0_T1();
6323 else
6324 gen_op_movl_T0_T1();
6325 if (rs != 15) {
6326 gen_movl_T1_reg(s, rs);
6327 if (insn & (1 << 21)) {
6328 gen_op_addl_T0_T1();
99c475ab 6329 } else {
9ee6e8bb 6330 gen_op_rsbl_T0_T1();
99c475ab 6331 }
2c0262af 6332 }
9ee6e8bb
PB
6333 gen_movl_reg_T0(s, rd);
6334 break;
6335 case 7: /* Unsigned sum of absolute differences. */
6336 gen_op_usad8_T0_T1();
6337 if (rs != 15) {
6338 gen_movl_T1_reg(s, rs);
6339 gen_op_addl_T0_T1();
5fd46862 6340 }
9ee6e8bb
PB
6341 gen_movl_reg_T0(s, rd);
6342 break;
2c0262af
FB
6343 }
6344 break;
9ee6e8bb
PB
6345 case 6: case 7: /* 64-bit multiply, Divide. */
6346 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6347 gen_movl_T0_reg(s, rn);
6348 gen_movl_T1_reg(s, rm);
6349 if ((op & 0x50) == 0x10) {
6350 /* sdiv, udiv */
6351 if (!arm_feature(env, ARM_FEATURE_DIV))
6352 goto illegal_op;
6353 if (op & 0x20)
6354 gen_op_udivl_T0_T1();
2c0262af 6355 else
9ee6e8bb
PB
6356 gen_op_sdivl_T0_T1();
6357 gen_movl_reg_T0(s, rd);
6358 } else if ((op & 0xe) == 0xc) {
6359 /* Dual multiply accumulate long. */
6360 if (op & 1)
6361 gen_op_swap_half_T1();
6362 gen_op_mul_dual_T0_T1();
6363 if (op & 0x10) {
6364 gen_op_subl_T0_T1();
b5ff1b31 6365 } else {
9ee6e8bb 6366 gen_op_addl_T0_T1();
b5ff1b31 6367 }
9ee6e8bb
PB
6368 gen_op_signbit_T1_T0();
6369 gen_op_addq_T0_T1(rs, rd);
6370 gen_movl_reg_T0(s, rs);
6371 gen_movl_reg_T1(s, rd);
2c0262af 6372 } else {
9ee6e8bb
PB
6373 if (op & 0x20) {
6374 /* Unsigned 64-bit multiply */
6375 gen_op_mull_T0_T1();
b5ff1b31 6376 } else {
9ee6e8bb
PB
6377 if (op & 8) {
6378 /* smlalxy */
6379 gen_mulxy(op & 2, op & 1);
6380 gen_op_signbit_T1_T0();
6381 } else {
6382 /* Signed 64-bit multiply */
6383 gen_op_imull_T0_T1();
6384 }
b5ff1b31 6385 }
9ee6e8bb
PB
6386 if (op & 4) {
6387 /* umaal */
6388 gen_op_addq_lo_T0_T1(rs);
6389 gen_op_addq_lo_T0_T1(rd);
6390 } else if (op & 0x40) {
6391 /* 64-bit accumulate. */
6392 gen_op_addq_T0_T1(rs, rd);
6393 }
6394 gen_movl_reg_T0(s, rs);
6395 gen_movl_reg_T1(s, rd);
5fd46862 6396 }
2c0262af 6397 break;
9ee6e8bb
PB
6398 }
6399 break;
6400 case 6: case 7: case 14: case 15:
6401 /* Coprocessor. */
6402 if (((insn >> 24) & 3) == 3) {
6403 /* Translate into the equivalent ARM encoding. */
6404 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6405 if (disas_neon_data_insn(env, s, insn))
6406 goto illegal_op;
6407 } else {
6408 if (insn & (1 << 28))
6409 goto illegal_op;
6410 if (disas_coproc_insn (env, s, insn))
6411 goto illegal_op;
6412 }
6413 break;
6414 case 8: case 9: case 10: case 11:
6415 if (insn & (1 << 15)) {
6416 /* Branches, misc control. */
6417 if (insn & 0x5000) {
6418 /* Unconditional branch. */
6419 /* signextend(hw1[10:0]) -> offset[:12]. */
6420 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6421 /* hw1[10:0] -> offset[11:1]. */
6422 offset |= (insn & 0x7ff) << 1;
6423 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6424 offset[24:22] already have the same value because of the
6425 sign extension above. */
6426 offset ^= ((~insn) & (1 << 13)) << 10;
6427 offset ^= ((~insn) & (1 << 11)) << 11;
6428
6429 addr = s->pc;
6430 if (insn & (1 << 14)) {
6431 /* Branch and link. */
6432 gen_op_movl_T1_im(addr | 1);
6433 gen_movl_reg_T1(s, 14);
b5ff1b31 6434 }
3b46e624 6435
9ee6e8bb
PB
6436 addr += offset;
6437 if (insn & (1 << 12)) {
6438 /* b/bl */
6439 gen_jmp(s, addr);
6440 } else {
6441 /* blx */
6442 addr &= ~(uint32_t)2;
6443 gen_op_movl_T0_im(addr);
6444 gen_bx(s);
2c0262af 6445 }
9ee6e8bb
PB
6446 } else if (((insn >> 23) & 7) == 7) {
6447 /* Misc control */
6448 if (insn & (1 << 13))
6449 goto illegal_op;
6450
6451 if (insn & (1 << 26)) {
6452 /* Secure monitor call (v6Z) */
6453 goto illegal_op; /* not implemented. */
2c0262af 6454 } else {
9ee6e8bb
PB
6455 op = (insn >> 20) & 7;
6456 switch (op) {
6457 case 0: /* msr cpsr. */
6458 if (IS_M(env)) {
6459 gen_op_v7m_msr_T0(insn & 0xff);
6460 gen_movl_reg_T0(s, rn);
6461 gen_lookup_tb(s);
6462 break;
6463 }
6464 /* fall through */
6465 case 1: /* msr spsr. */
6466 if (IS_M(env))
6467 goto illegal_op;
6468 gen_movl_T0_reg(s, rn);
6469 if (gen_set_psr_T0(s,
6470 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6471 op == 1))
6472 goto illegal_op;
6473 break;
6474 case 2: /* cps, nop-hint. */
6475 if (((insn >> 8) & 7) == 0) {
6476 gen_nop_hint(s, insn & 0xff);
6477 }
6478 /* Implemented as NOP in user mode. */
6479 if (IS_USER(s))
6480 break;
6481 offset = 0;
6482 imm = 0;
6483 if (insn & (1 << 10)) {
6484 if (insn & (1 << 7))
6485 offset |= CPSR_A;
6486 if (insn & (1 << 6))
6487 offset |= CPSR_I;
6488 if (insn & (1 << 5))
6489 offset |= CPSR_F;
6490 if (insn & (1 << 9))
6491 imm = CPSR_A | CPSR_I | CPSR_F;
6492 }
6493 if (insn & (1 << 8)) {
6494 offset |= 0x1f;
6495 imm |= (insn & 0x1f);
6496 }
6497 if (offset) {
6498 gen_op_movl_T0_im(imm);
6499 gen_set_psr_T0(s, offset, 0);
6500 }
6501 break;
6502 case 3: /* Special control operations. */
6503 op = (insn >> 4) & 0xf;
6504 switch (op) {
6505 case 2: /* clrex */
6506 gen_op_clrex();
6507 break;
6508 case 4: /* dsb */
6509 case 5: /* dmb */
6510 case 6: /* isb */
6511 /* These execute as NOPs. */
6512 ARCH(7);
6513 break;
6514 default:
6515 goto illegal_op;
6516 }
6517 break;
6518 case 4: /* bxj */
6519 /* Trivial implementation equivalent to bx. */
6520 gen_movl_T0_reg(s, rn);
6521 gen_bx(s);
6522 break;
6523 case 5: /* Exception return. */
6524 /* Unpredictable in user mode. */
6525 goto illegal_op;
6526 case 6: /* mrs cpsr. */
6527 if (IS_M(env)) {
6528 gen_op_v7m_mrs_T0(insn & 0xff);
6529 } else {
6530 gen_op_movl_T0_cpsr();
6531 }
6532 gen_movl_reg_T0(s, rd);
6533 break;
6534 case 7: /* mrs spsr. */
6535 /* Not accessible in user mode. */
6536 if (IS_USER(s) || IS_M(env))
6537 goto illegal_op;
6538 gen_op_movl_T0_spsr();
6539 gen_movl_reg_T0(s, rd);
6540 break;
2c0262af
FB
6541 }
6542 }
9ee6e8bb
PB
6543 } else {
6544 /* Conditional branch. */
6545 op = (insn >> 22) & 0xf;
6546 /* Generate a conditional jump to next instruction. */
6547 s->condlabel = gen_new_label();
6548 gen_test_cc[op ^ 1](s->condlabel);
6549 s->condjmp = 1;
6550
6551 /* offset[11:1] = insn[10:0] */
6552 offset = (insn & 0x7ff) << 1;
6553 /* offset[17:12] = insn[21:16]. */
6554 offset |= (insn & 0x003f0000) >> 4;
6555 /* offset[31:20] = insn[26]. */
6556 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
6557 /* offset[18] = insn[13]. */
6558 offset |= (insn & (1 << 13)) << 5;
6559 /* offset[19] = insn[11]. */
6560 offset |= (insn & (1 << 11)) << 8;
6561
6562 /* jump to the offset */
6563 addr = s->pc + offset;
6564 gen_jmp(s, addr);
6565 }
6566 } else {
6567 /* Data processing immediate. */
6568 if (insn & (1 << 25)) {
6569 if (insn & (1 << 24)) {
6570 if (insn & (1 << 20))
6571 goto illegal_op;
6572 /* Bitfield/Saturate. */
6573 op = (insn >> 21) & 7;
6574 imm = insn & 0x1f;
6575 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6576 if (rn == 15)
6577 gen_op_movl_T1_im(0);
6578 else
6579 gen_movl_T1_reg(s, rn);
6580 switch (op) {
6581 case 2: /* Signed bitfield extract. */
6582 imm++;
6583 if (shift + imm > 32)
6584 goto illegal_op;
6585 if (imm < 32)
6586 gen_op_sbfx_T1(shift, imm);
6587 break;
6588 case 6: /* Unsigned bitfield extract. */
6589 imm++;
6590 if (shift + imm > 32)
6591 goto illegal_op;
6592 if (imm < 32)
6593 gen_op_ubfx_T1(shift, (1u << imm) - 1);
6594 break;
6595 case 3: /* Bitfield insert/clear. */
6596 if (imm < shift)
6597 goto illegal_op;
6598 imm = imm + 1 - shift;
6599 if (imm != 32) {
6600 gen_movl_T0_reg(s, rd);
6601 gen_op_bfi_T1_T0(shift, ((1u << imm) - 1) << shift);
6602 }
6603 break;
6604 case 7:
6605 goto illegal_op;
6606 default: /* Saturate. */
6607 gen_movl_T1_reg(s, rn);
6608 if (shift) {
6609 if (op & 1)
6610 gen_op_sarl_T1_im(shift);
6611 else
6612 gen_op_shll_T1_im(shift);
6613 }
6614 if (op & 4) {
6615 /* Unsigned. */
6616 gen_op_ssat_T1(imm);
6617 if ((op & 1) && shift == 0)
6618 gen_op_usat16_T1(imm);
6619 else
6620 gen_op_usat_T1(imm);
2c0262af 6621 } else {
9ee6e8bb
PB
6622 /* Signed. */
6623 gen_op_ssat_T1(imm);
6624 if ((op & 1) && shift == 0)
6625 gen_op_ssat16_T1(imm);
6626 else
6627 gen_op_ssat_T1(imm);
2c0262af 6628 }
9ee6e8bb 6629 break;
2c0262af 6630 }
9ee6e8bb
PB
6631 gen_movl_reg_T1(s, rd);
6632 } else {
6633 imm = ((insn & 0x04000000) >> 15)
6634 | ((insn & 0x7000) >> 4) | (insn & 0xff);
6635 if (insn & (1 << 22)) {
6636 /* 16-bit immediate. */
6637 imm |= (insn >> 4) & 0xf000;
6638 if (insn & (1 << 23)) {
6639 /* movt */
6640 gen_movl_T0_reg(s, rd);
6641 gen_op_movtop_T0_im(imm << 16);
2c0262af 6642 } else {
9ee6e8bb
PB
6643 /* movw */
6644 gen_op_movl_T0_im(imm);
2c0262af
FB
6645 }
6646 } else {
9ee6e8bb
PB
6647 /* Add/sub 12-bit immediate. */
6648 if (rn == 15) {
6649 addr = s->pc & ~(uint32_t)3;
6650 if (insn & (1 << 23))
6651 addr -= imm;
6652 else
6653 addr += imm;
6654 gen_op_movl_T0_im(addr);
2c0262af 6655 } else {
9ee6e8bb
PB
6656 gen_movl_T0_reg(s, rn);
6657 gen_op_movl_T1_im(imm);
6658 if (insn & (1 << 23))
6659 gen_op_subl_T0_T1();
6660 else
6661 gen_op_addl_T0_T1();
2c0262af 6662 }
9ee6e8bb
PB
6663 }
6664 gen_movl_reg_T0(s, rd);
191abaa2 6665 }
9ee6e8bb
PB
6666 } else {
6667 int shifter_out = 0;
6668 /* modified 12-bit immediate. */
6669 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
6670 imm = (insn & 0xff);
6671 switch (shift) {
6672 case 0: /* XY */
6673 /* Nothing to do. */
6674 break;
6675 case 1: /* 00XY00XY */
6676 imm |= imm << 16;
6677 break;
6678 case 2: /* XY00XY00 */
6679 imm |= imm << 16;
6680 imm <<= 8;
6681 break;
6682 case 3: /* XYXYXYXY */
6683 imm |= imm << 16;
6684 imm |= imm << 8;
6685 break;
6686 default: /* Rotated constant. */
6687 shift = (shift << 1) | (imm >> 7);
6688 imm |= 0x80;
6689 imm = imm << (32 - shift);
6690 shifter_out = 1;
6691 break;
b5ff1b31 6692 }
9ee6e8bb
PB
6693 gen_op_movl_T1_im(imm);
6694 rn = (insn >> 16) & 0xf;
6695 if (rn == 15)
6696 gen_op_movl_T0_im(0);
6697 else
6698 gen_movl_T0_reg(s, rn);
6699 op = (insn >> 21) & 0xf;
6700 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
6701 shifter_out))
6702 goto illegal_op;
6703 rd = (insn >> 8) & 0xf;
6704 if (rd != 15) {
6705 gen_movl_reg_T0(s, rd);
2c0262af 6706 }
2c0262af 6707 }
9ee6e8bb
PB
6708 }
6709 break;
6710 case 12: /* Load/store single data item. */
6711 {
6712 int postinc = 0;
6713 int writeback = 0;
6714 if ((insn & 0x01100000) == 0x01000000) {
6715 if (disas_neon_ls_insn(env, s, insn))
c1713132 6716 goto illegal_op;
9ee6e8bb
PB
6717 break;
6718 }
6719 if (rn == 15) {
6720 /* PC relative. */
6721 /* s->pc has already been incremented by 4. */
6722 imm = s->pc & 0xfffffffc;
6723 if (insn & (1 << 23))
6724 imm += insn & 0xfff;
6725 else
6726 imm -= insn & 0xfff;
6727 gen_op_movl_T1_im(imm);
6728 } else {
6729 gen_movl_T1_reg(s, rn);
6730 if (insn & (1 << 23)) {
6731 /* Positive offset. */
6732 imm = insn & 0xfff;
6733 gen_op_addl_T1_im(imm);
6734 } else {
6735 op = (insn >> 8) & 7;
6736 imm = insn & 0xff;
6737 switch (op) {
6738 case 0: case 8: /* Shifted Register. */
6739 shift = (insn >> 4) & 0xf;
6740 if (shift > 3)
18c9b560 6741 goto illegal_op;
9ee6e8bb
PB
6742 gen_movl_T2_reg(s, rm);
6743 if (shift)
6744 gen_op_shll_T2_im(shift);
6745 gen_op_addl_T1_T2();
6746 break;
6747 case 4: /* Negative offset. */
6748 gen_op_addl_T1_im(-imm);
6749 break;
6750 case 6: /* User privilege. */
6751 gen_op_addl_T1_im(imm);
6752 break;
6753 case 1: /* Post-decrement. */
6754 imm = -imm;
6755 /* Fall through. */
6756 case 3: /* Post-increment. */
6757 gen_op_movl_T2_im(imm);
6758 postinc = 1;
6759 writeback = 1;
6760 break;
6761 case 5: /* Pre-decrement. */
6762 imm = -imm;
6763 /* Fall through. */
6764 case 7: /* Pre-increment. */
6765 gen_op_addl_T1_im(imm);
6766 writeback = 1;
6767 break;
6768 default:
b7bcbe95 6769 goto illegal_op;
9ee6e8bb
PB
6770 }
6771 }
6772 }
6773 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
6774 if (insn & (1 << 20)) {
6775 /* Load. */
6776 if (rs == 15 && op != 2) {
6777 if (op & 2)
b5ff1b31 6778 goto illegal_op;
9ee6e8bb
PB
6779 /* Memory hint. Implemented as NOP. */
6780 } else {
6781 switch (op) {
6782 case 0: gen_ldst(ldub, s); break;
6783 case 4: gen_ldst(ldsb, s); break;
6784 case 1: gen_ldst(lduw, s); break;
6785 case 5: gen_ldst(ldsw, s); break;
6786 case 2: gen_ldst(ldl, s); break;
6787 default: goto illegal_op;
6788 }
6789 if (rs == 15) {
6790 gen_bx(s);
6791 } else {
6792 gen_movl_reg_T0(s, rs);
6793 }
6794 }
6795 } else {
6796 /* Store. */
6797 if (rs == 15)
b7bcbe95 6798 goto illegal_op;
9ee6e8bb
PB
6799 gen_movl_T0_reg(s, rs);
6800 switch (op) {
6801 case 0: gen_ldst(stb, s); break;
6802 case 1: gen_ldst(stw, s); break;
6803 case 2: gen_ldst(stl, s); break;
6804 default: goto illegal_op;
b7bcbe95 6805 }
2c0262af 6806 }
9ee6e8bb
PB
6807 if (postinc)
6808 gen_op_addl_T1_im(imm);
6809 if (writeback)
6810 gen_movl_reg_T1(s, rn);
6811 }
6812 break;
6813 default:
6814 goto illegal_op;
2c0262af 6815 }
9ee6e8bb
PB
6816 return 0;
6817illegal_op:
6818 return 1;
2c0262af
FB
6819}
6820
9ee6e8bb 6821static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
6822{
6823 uint32_t val, insn, op, rm, rn, rd, shift, cond;
6824 int32_t offset;
6825 int i;
6826
9ee6e8bb
PB
6827 if (s->condexec_mask) {
6828 cond = s->condexec_cond;
6829 s->condlabel = gen_new_label();
6830 gen_test_cc[cond ^ 1](s->condlabel);
6831 s->condjmp = 1;
6832 }
6833
b5ff1b31 6834 insn = lduw_code(s->pc);
99c475ab 6835 s->pc += 2;
b5ff1b31 6836
99c475ab
FB
6837 switch (insn >> 12) {
6838 case 0: case 1:
6839 rd = insn & 7;
6840 op = (insn >> 11) & 3;
6841 if (op == 3) {
6842 /* add/subtract */
6843 rn = (insn >> 3) & 7;
6844 gen_movl_T0_reg(s, rn);
6845 if (insn & (1 << 10)) {
6846 /* immediate */
6847 gen_op_movl_T1_im((insn >> 6) & 7);
6848 } else {
6849 /* reg */
6850 rm = (insn >> 6) & 7;
6851 gen_movl_T1_reg(s, rm);
6852 }
9ee6e8bb
PB
6853 if (insn & (1 << 9)) {
6854 if (s->condexec_mask)
6855 gen_op_subl_T0_T1();
6856 else
6857 gen_op_subl_T0_T1_cc();
6858 } else {
6859 if (s->condexec_mask)
6860 gen_op_addl_T0_T1();
6861 else
6862 gen_op_addl_T0_T1_cc();
6863 }
99c475ab
FB
6864 gen_movl_reg_T0(s, rd);
6865 } else {
6866 /* shift immediate */
6867 rm = (insn >> 3) & 7;
6868 shift = (insn >> 6) & 0x1f;
6869 gen_movl_T0_reg(s, rm);
9ee6e8bb
PB
6870 if (s->condexec_mask)
6871 gen_shift_T0_im_thumb[op](shift);
6872 else
6873 gen_shift_T0_im_thumb_cc[op](shift);
99c475ab
FB
6874 gen_movl_reg_T0(s, rd);
6875 }
6876 break;
6877 case 2: case 3:
6878 /* arithmetic large immediate */
6879 op = (insn >> 11) & 3;
6880 rd = (insn >> 8) & 0x7;
6881 if (op == 0) {
6882 gen_op_movl_T0_im(insn & 0xff);
6883 } else {
6884 gen_movl_T0_reg(s, rd);
6885 gen_op_movl_T1_im(insn & 0xff);
6886 }
6887 switch (op) {
6888 case 0: /* mov */
9ee6e8bb
PB
6889 if (!s->condexec_mask)
6890 gen_op_logic_T0_cc();
99c475ab
FB
6891 break;
6892 case 1: /* cmp */
6893 gen_op_subl_T0_T1_cc();
6894 break;
6895 case 2: /* add */
9ee6e8bb
PB
6896 if (s->condexec_mask)
6897 gen_op_addl_T0_T1();
6898 else
6899 gen_op_addl_T0_T1_cc();
99c475ab
FB
6900 break;
6901 case 3: /* sub */
9ee6e8bb
PB
6902 if (s->condexec_mask)
6903 gen_op_subl_T0_T1();
6904 else
6905 gen_op_subl_T0_T1_cc();
99c475ab
FB
6906 break;
6907 }
6908 if (op != 1)
6909 gen_movl_reg_T0(s, rd);
6910 break;
6911 case 4:
6912 if (insn & (1 << 11)) {
6913 rd = (insn >> 8) & 7;
5899f386
FB
6914 /* load pc-relative. Bit 1 of PC is ignored. */
6915 val = s->pc + 2 + ((insn & 0xff) * 4);
6916 val &= ~(uint32_t)2;
99c475ab 6917 gen_op_movl_T1_im(val);
b5ff1b31 6918 gen_ldst(ldl, s);
99c475ab
FB
6919 gen_movl_reg_T0(s, rd);
6920 break;
6921 }
6922 if (insn & (1 << 10)) {
6923 /* data processing extended or blx */
6924 rd = (insn & 7) | ((insn >> 4) & 8);
6925 rm = (insn >> 3) & 0xf;
6926 op = (insn >> 8) & 3;
6927 switch (op) {
6928 case 0: /* add */
6929 gen_movl_T0_reg(s, rd);
6930 gen_movl_T1_reg(s, rm);
6931 gen_op_addl_T0_T1();
6932 gen_movl_reg_T0(s, rd);
6933 break;
6934 case 1: /* cmp */
6935 gen_movl_T0_reg(s, rd);
6936 gen_movl_T1_reg(s, rm);
6937 gen_op_subl_T0_T1_cc();
6938 break;
6939 case 2: /* mov/cpy */
6940 gen_movl_T0_reg(s, rm);
6941 gen_movl_reg_T0(s, rd);
6942 break;
6943 case 3:/* branch [and link] exchange thumb register */
6944 if (insn & (1 << 7)) {
6945 val = (uint32_t)s->pc | 1;
6946 gen_op_movl_T1_im(val);
6947 gen_movl_reg_T1(s, 14);
6948 }
6949 gen_movl_T0_reg(s, rm);
6950 gen_bx(s);
6951 break;
6952 }
6953 break;
6954 }
6955
6956 /* data processing register */
6957 rd = insn & 7;
6958 rm = (insn >> 3) & 7;
6959 op = (insn >> 6) & 0xf;
6960 if (op == 2 || op == 3 || op == 4 || op == 7) {
6961 /* the shift/rotate ops want the operands backwards */
6962 val = rm;
6963 rm = rd;
6964 rd = val;
6965 val = 1;
6966 } else {
6967 val = 0;
6968 }
6969
6970 if (op == 9) /* neg */
6971 gen_op_movl_T0_im(0);
6972 else if (op != 0xf) /* mvn doesn't read its first operand */
6973 gen_movl_T0_reg(s, rd);
6974
6975 gen_movl_T1_reg(s, rm);
5899f386 6976 switch (op) {
99c475ab
FB
6977 case 0x0: /* and */
6978 gen_op_andl_T0_T1();
9ee6e8bb
PB
6979 if (!s->condexec_mask)
6980 gen_op_logic_T0_cc();
99c475ab
FB
6981 break;
6982 case 0x1: /* eor */
6983 gen_op_xorl_T0_T1();
9ee6e8bb
PB
6984 if (!s->condexec_mask)
6985 gen_op_logic_T0_cc();
99c475ab
FB
6986 break;
6987 case 0x2: /* lsl */
9ee6e8bb
PB
6988 if (s->condexec_mask) {
6989 gen_op_shll_T1_T0();
6990 } else {
6991 gen_op_shll_T1_T0_cc();
6992 gen_op_logic_T1_cc();
6993 }
99c475ab
FB
6994 break;
6995 case 0x3: /* lsr */
9ee6e8bb
PB
6996 if (s->condexec_mask) {
6997 gen_op_shrl_T1_T0();
6998 } else {
6999 gen_op_shrl_T1_T0_cc();
7000 gen_op_logic_T1_cc();
7001 }
99c475ab
FB
7002 break;
7003 case 0x4: /* asr */
9ee6e8bb
PB
7004 if (s->condexec_mask) {
7005 gen_op_sarl_T1_T0();
7006 } else {
7007 gen_op_sarl_T1_T0_cc();
7008 gen_op_logic_T1_cc();
7009 }
99c475ab
FB
7010 break;
7011 case 0x5: /* adc */
9ee6e8bb
PB
7012 if (s->condexec_mask)
7013 gen_op_adcl_T0_T1();
7014 else
7015 gen_op_adcl_T0_T1_cc();
99c475ab
FB
7016 break;
7017 case 0x6: /* sbc */
9ee6e8bb
PB
7018 if (s->condexec_mask)
7019 gen_op_sbcl_T0_T1();
7020 else
7021 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
7022 break;
7023 case 0x7: /* ror */
9ee6e8bb
PB
7024 if (s->condexec_mask) {
7025 gen_op_rorl_T1_T0();
7026 } else {
7027 gen_op_rorl_T1_T0_cc();
7028 gen_op_logic_T1_cc();
7029 }
99c475ab
FB
7030 break;
7031 case 0x8: /* tst */
7032 gen_op_andl_T0_T1();
7033 gen_op_logic_T0_cc();
7034 rd = 16;
5899f386 7035 break;
99c475ab 7036 case 0x9: /* neg */
9ee6e8bb
PB
7037 if (s->condexec_mask)
7038 gen_op_subl_T0_T1();
7039 else
7040 gen_op_subl_T0_T1_cc();
99c475ab
FB
7041 break;
7042 case 0xa: /* cmp */
7043 gen_op_subl_T0_T1_cc();
7044 rd = 16;
7045 break;
7046 case 0xb: /* cmn */
7047 gen_op_addl_T0_T1_cc();
7048 rd = 16;
7049 break;
7050 case 0xc: /* orr */
7051 gen_op_orl_T0_T1();
9ee6e8bb
PB
7052 if (!s->condexec_mask)
7053 gen_op_logic_T0_cc();
99c475ab
FB
7054 break;
7055 case 0xd: /* mul */
7056 gen_op_mull_T0_T1();
9ee6e8bb
PB
7057 if (!s->condexec_mask)
7058 gen_op_logic_T0_cc();
99c475ab
FB
7059 break;
7060 case 0xe: /* bic */
7061 gen_op_bicl_T0_T1();
9ee6e8bb
PB
7062 if (!s->condexec_mask)
7063 gen_op_logic_T0_cc();
99c475ab
FB
7064 break;
7065 case 0xf: /* mvn */
7066 gen_op_notl_T1();
9ee6e8bb
PB
7067 if (!s->condexec_mask)
7068 gen_op_logic_T1_cc();
99c475ab 7069 val = 1;
5899f386 7070 rm = rd;
99c475ab
FB
7071 break;
7072 }
7073 if (rd != 16) {
7074 if (val)
5899f386 7075 gen_movl_reg_T1(s, rm);
99c475ab
FB
7076 else
7077 gen_movl_reg_T0(s, rd);
7078 }
7079 break;
7080
7081 case 5:
7082 /* load/store register offset. */
7083 rd = insn & 7;
7084 rn = (insn >> 3) & 7;
7085 rm = (insn >> 6) & 7;
7086 op = (insn >> 9) & 7;
7087 gen_movl_T1_reg(s, rn);
7088 gen_movl_T2_reg(s, rm);
7089 gen_op_addl_T1_T2();
7090
7091 if (op < 3) /* store */
7092 gen_movl_T0_reg(s, rd);
7093
7094 switch (op) {
7095 case 0: /* str */
b5ff1b31 7096 gen_ldst(stl, s);
99c475ab
FB
7097 break;
7098 case 1: /* strh */
b5ff1b31 7099 gen_ldst(stw, s);
99c475ab
FB
7100 break;
7101 case 2: /* strb */
b5ff1b31 7102 gen_ldst(stb, s);
99c475ab
FB
7103 break;
7104 case 3: /* ldrsb */
b5ff1b31 7105 gen_ldst(ldsb, s);
99c475ab
FB
7106 break;
7107 case 4: /* ldr */
b5ff1b31 7108 gen_ldst(ldl, s);
99c475ab
FB
7109 break;
7110 case 5: /* ldrh */
b5ff1b31 7111 gen_ldst(lduw, s);
99c475ab
FB
7112 break;
7113 case 6: /* ldrb */
b5ff1b31 7114 gen_ldst(ldub, s);
99c475ab
FB
7115 break;
7116 case 7: /* ldrsh */
b5ff1b31 7117 gen_ldst(ldsw, s);
99c475ab
FB
7118 break;
7119 }
7120 if (op >= 3) /* load */
7121 gen_movl_reg_T0(s, rd);
7122 break;
7123
7124 case 6:
7125 /* load/store word immediate offset */
7126 rd = insn & 7;
7127 rn = (insn >> 3) & 7;
7128 gen_movl_T1_reg(s, rn);
7129 val = (insn >> 4) & 0x7c;
7130 gen_op_movl_T2_im(val);
7131 gen_op_addl_T1_T2();
7132
7133 if (insn & (1 << 11)) {
7134 /* load */
b5ff1b31 7135 gen_ldst(ldl, s);
99c475ab
FB
7136 gen_movl_reg_T0(s, rd);
7137 } else {
7138 /* store */
7139 gen_movl_T0_reg(s, rd);
b5ff1b31 7140 gen_ldst(stl, s);
99c475ab
FB
7141 }
7142 break;
7143
7144 case 7:
7145 /* load/store byte immediate offset */
7146 rd = insn & 7;
7147 rn = (insn >> 3) & 7;
7148 gen_movl_T1_reg(s, rn);
7149 val = (insn >> 6) & 0x1f;
7150 gen_op_movl_T2_im(val);
7151 gen_op_addl_T1_T2();
7152
7153 if (insn & (1 << 11)) {
7154 /* load */
b5ff1b31 7155 gen_ldst(ldub, s);
99c475ab
FB
7156 gen_movl_reg_T0(s, rd);
7157 } else {
7158 /* store */
7159 gen_movl_T0_reg(s, rd);
b5ff1b31 7160 gen_ldst(stb, s);
99c475ab
FB
7161 }
7162 break;
7163
7164 case 8:
7165 /* load/store halfword immediate offset */
7166 rd = insn & 7;
7167 rn = (insn >> 3) & 7;
7168 gen_movl_T1_reg(s, rn);
7169 val = (insn >> 5) & 0x3e;
7170 gen_op_movl_T2_im(val);
7171 gen_op_addl_T1_T2();
7172
7173 if (insn & (1 << 11)) {
7174 /* load */
b5ff1b31 7175 gen_ldst(lduw, s);
99c475ab
FB
7176 gen_movl_reg_T0(s, rd);
7177 } else {
7178 /* store */
7179 gen_movl_T0_reg(s, rd);
b5ff1b31 7180 gen_ldst(stw, s);
99c475ab
FB
7181 }
7182 break;
7183
7184 case 9:
7185 /* load/store from stack */
7186 rd = (insn >> 8) & 7;
7187 gen_movl_T1_reg(s, 13);
7188 val = (insn & 0xff) * 4;
7189 gen_op_movl_T2_im(val);
7190 gen_op_addl_T1_T2();
7191
7192 if (insn & (1 << 11)) {
7193 /* load */
b5ff1b31 7194 gen_ldst(ldl, s);
99c475ab
FB
7195 gen_movl_reg_T0(s, rd);
7196 } else {
7197 /* store */
7198 gen_movl_T0_reg(s, rd);
b5ff1b31 7199 gen_ldst(stl, s);
99c475ab
FB
7200 }
7201 break;
7202
7203 case 10:
7204 /* add to high reg */
7205 rd = (insn >> 8) & 7;
5899f386
FB
7206 if (insn & (1 << 11)) {
7207 /* SP */
7208 gen_movl_T0_reg(s, 13);
7209 } else {
7210 /* PC. bit 1 is ignored. */
7211 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7212 }
99c475ab
FB
7213 val = (insn & 0xff) * 4;
7214 gen_op_movl_T1_im(val);
7215 gen_op_addl_T0_T1();
7216 gen_movl_reg_T0(s, rd);
7217 break;
7218
7219 case 11:
7220 /* misc */
7221 op = (insn >> 8) & 0xf;
7222 switch (op) {
7223 case 0:
7224 /* adjust stack pointer */
7225 gen_movl_T1_reg(s, 13);
7226 val = (insn & 0x7f) * 4;
7227 if (insn & (1 << 7))
7228 val = -(int32_t)val;
7229 gen_op_movl_T2_im(val);
7230 gen_op_addl_T1_T2();
7231 gen_movl_reg_T1(s, 13);
7232 break;
7233
9ee6e8bb
PB
7234 case 2: /* sign/zero extend. */
7235 ARCH(6);
7236 rd = insn & 7;
7237 rm = (insn >> 3) & 7;
7238 gen_movl_T1_reg(s, rm);
7239 switch ((insn >> 6) & 3) {
7240 case 0: gen_op_sxth_T1(); break;
7241 case 1: gen_op_sxtb_T1(); break;
7242 case 2: gen_op_uxth_T1(); break;
7243 case 3: gen_op_uxtb_T1(); break;
7244 }
7245 gen_movl_reg_T1(s, rd);
7246 break;
99c475ab
FB
7247 case 4: case 5: case 0xc: case 0xd:
7248 /* push/pop */
7249 gen_movl_T1_reg(s, 13);
5899f386
FB
7250 if (insn & (1 << 8))
7251 offset = 4;
99c475ab 7252 else
5899f386
FB
7253 offset = 0;
7254 for (i = 0; i < 8; i++) {
7255 if (insn & (1 << i))
7256 offset += 4;
7257 }
7258 if ((insn & (1 << 11)) == 0) {
7259 gen_op_movl_T2_im(-offset);
7260 gen_op_addl_T1_T2();
7261 }
7262 gen_op_movl_T2_im(4);
99c475ab
FB
7263 for (i = 0; i < 8; i++) {
7264 if (insn & (1 << i)) {
7265 if (insn & (1 << 11)) {
7266 /* pop */
b5ff1b31 7267 gen_ldst(ldl, s);
99c475ab
FB
7268 gen_movl_reg_T0(s, i);
7269 } else {
7270 /* push */
7271 gen_movl_T0_reg(s, i);
b5ff1b31 7272 gen_ldst(stl, s);
99c475ab 7273 }
5899f386 7274 /* advance to the next address. */
99c475ab
FB
7275 gen_op_addl_T1_T2();
7276 }
7277 }
7278 if (insn & (1 << 8)) {
7279 if (insn & (1 << 11)) {
7280 /* pop pc */
b5ff1b31 7281 gen_ldst(ldl, s);
99c475ab
FB
7282 /* don't set the pc until the rest of the instruction
7283 has completed */
7284 } else {
7285 /* push lr */
7286 gen_movl_T0_reg(s, 14);
b5ff1b31 7287 gen_ldst(stl, s);
99c475ab
FB
7288 }
7289 gen_op_addl_T1_T2();
7290 }
5899f386
FB
7291 if ((insn & (1 << 11)) == 0) {
7292 gen_op_movl_T2_im(-offset);
7293 gen_op_addl_T1_T2();
7294 }
99c475ab
FB
7295 /* write back the new stack pointer */
7296 gen_movl_reg_T1(s, 13);
7297 /* set the new PC value */
7298 if ((insn & 0x0900) == 0x0900)
7299 gen_bx(s);
7300 break;
7301
9ee6e8bb
PB
7302 case 1: case 3: case 9: case 11: /* czb */
7303 rm = insn & 7;
7304 gen_movl_T0_reg(s, rm);
7305 s->condlabel = gen_new_label();
7306 s->condjmp = 1;
7307 if (insn & (1 << 11))
7308 gen_op_testn_T0(s->condlabel);
7309 else
7310 gen_op_test_T0(s->condlabel);
7311
7312 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7313 val = (uint32_t)s->pc + 2;
7314 val += offset;
7315 gen_jmp(s, val);
7316 break;
7317
7318 case 15: /* IT, nop-hint. */
7319 if ((insn & 0xf) == 0) {
7320 gen_nop_hint(s, (insn >> 4) & 0xf);
7321 break;
7322 }
7323 /* If Then. */
7324 s->condexec_cond = (insn >> 4) & 0xe;
7325 s->condexec_mask = insn & 0x1f;
7326 /* No actual code generated for this insn, just setup state. */
7327 break;
7328
06c949e6 7329 case 0xe: /* bkpt */
9ee6e8bb 7330 gen_set_condexec(s);
06c949e6
PB
7331 gen_op_movl_T0_im((long)s->pc - 2);
7332 gen_op_movl_reg_TN[0][15]();
7333 gen_op_bkpt();
7334 s->is_jmp = DISAS_JUMP;
7335 break;
7336
9ee6e8bb
PB
7337 case 0xa: /* rev */
7338 ARCH(6);
7339 rn = (insn >> 3) & 0x7;
7340 rd = insn & 0x7;
7341 gen_movl_T0_reg(s, rn);
7342 switch ((insn >> 6) & 3) {
7343 case 0: gen_op_rev_T0(); break;
7344 case 1: gen_op_rev16_T0(); break;
7345 case 3: gen_op_revsh_T0(); break;
7346 default: goto illegal_op;
7347 }
7348 gen_movl_reg_T0(s, rd);
7349 break;
7350
7351 case 6: /* cps */
7352 ARCH(6);
7353 if (IS_USER(s))
7354 break;
7355 if (IS_M(env)) {
7356 val = (insn & (1 << 4)) != 0;
7357 gen_op_movl_T0_im(val);
7358 /* PRIMASK */
7359 if (insn & 1)
7360 gen_op_v7m_msr_T0(16);
7361 /* FAULTMASK */
7362 if (insn & 2)
7363 gen_op_v7m_msr_T0(17);
7364
7365 gen_lookup_tb(s);
7366 } else {
7367 if (insn & (1 << 4))
7368 shift = CPSR_A | CPSR_I | CPSR_F;
7369 else
7370 shift = 0;
7371
7372 val = ((insn & 7) << 6) & shift;
7373 gen_op_movl_T0_im(val);
7374 gen_set_psr_T0(s, shift, 0);
7375 }
7376 break;
7377
99c475ab
FB
7378 default:
7379 goto undef;
7380 }
7381 break;
7382
7383 case 12:
7384 /* load/store multiple */
7385 rn = (insn >> 8) & 0x7;
7386 gen_movl_T1_reg(s, rn);
7387 gen_op_movl_T2_im(4);
99c475ab
FB
7388 for (i = 0; i < 8; i++) {
7389 if (insn & (1 << i)) {
99c475ab
FB
7390 if (insn & (1 << 11)) {
7391 /* load */
b5ff1b31 7392 gen_ldst(ldl, s);
99c475ab
FB
7393 gen_movl_reg_T0(s, i);
7394 } else {
7395 /* store */
7396 gen_movl_T0_reg(s, i);
b5ff1b31 7397 gen_ldst(stl, s);
99c475ab 7398 }
5899f386
FB
7399 /* advance to the next address */
7400 gen_op_addl_T1_T2();
99c475ab
FB
7401 }
7402 }
5899f386 7403 /* Base register writeback. */
b5ff1b31
FB
7404 if ((insn & (1 << rn)) == 0)
7405 gen_movl_reg_T1(s, rn);
99c475ab
FB
7406 break;
7407
7408 case 13:
7409 /* conditional branch or swi */
7410 cond = (insn >> 8) & 0xf;
7411 if (cond == 0xe)
7412 goto undef;
7413
7414 if (cond == 0xf) {
7415 /* swi */
9ee6e8bb 7416 gen_set_condexec(s);
99c475ab
FB
7417 gen_op_movl_T0_im((long)s->pc | 1);
7418 /* Don't set r15. */
7419 gen_op_movl_reg_TN[0][15]();
9ee6e8bb 7420 s->is_jmp = DISAS_SWI;
99c475ab
FB
7421 break;
7422 }
7423 /* generate a conditional jump to next instruction */
e50e6a20
FB
7424 s->condlabel = gen_new_label();
7425 gen_test_cc[cond ^ 1](s->condlabel);
7426 s->condjmp = 1;
99c475ab
FB
7427 gen_movl_T1_reg(s, 15);
7428
7429 /* jump to the offset */
5899f386 7430 val = (uint32_t)s->pc + 2;
99c475ab 7431 offset = ((int32_t)insn << 24) >> 24;
5899f386 7432 val += offset << 1;
8aaca4c0 7433 gen_jmp(s, val);
99c475ab
FB
7434 break;
7435
7436 case 14:
358bf29e 7437 if (insn & (1 << 11)) {
9ee6e8bb
PB
7438 if (disas_thumb2_insn(env, s, insn))
7439 goto undef32;
358bf29e
PB
7440 break;
7441 }
9ee6e8bb 7442 /* unconditional branch */
99c475ab
FB
7443 val = (uint32_t)s->pc;
7444 offset = ((int32_t)insn << 21) >> 21;
7445 val += (offset << 1) + 2;
8aaca4c0 7446 gen_jmp(s, val);
99c475ab
FB
7447 break;
7448
7449 case 15:
9ee6e8bb
PB
7450 if (disas_thumb2_insn(env, s, insn))
7451 goto undef32;
7452 break;
99c475ab
FB
7453 }
7454 return;
9ee6e8bb
PB
7455undef32:
7456 gen_set_condexec(s);
7457 gen_op_movl_T0_im((long)s->pc - 4);
7458 gen_op_movl_reg_TN[0][15]();
7459 gen_op_undef_insn();
7460 s->is_jmp = DISAS_JUMP;
7461 return;
7462illegal_op:
99c475ab 7463undef:
9ee6e8bb 7464 gen_set_condexec(s);
5899f386 7465 gen_op_movl_T0_im((long)s->pc - 2);
99c475ab
FB
7466 gen_op_movl_reg_TN[0][15]();
7467 gen_op_undef_insn();
7468 s->is_jmp = DISAS_JUMP;
7469}
7470
2c0262af
FB
7471/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7472 basic block 'tb'. If search_pc is TRUE, also generate PC
7473 information for each intermediate instruction. */
5fafdf24
TS
7474static inline int gen_intermediate_code_internal(CPUState *env,
7475 TranslationBlock *tb,
2c0262af
FB
7476 int search_pc)
7477{
7478 DisasContext dc1, *dc = &dc1;
7479 uint16_t *gen_opc_end;
7480 int j, lj;
0fa85d43 7481 target_ulong pc_start;
b5ff1b31 7482 uint32_t next_page_start;
3b46e624 7483
2c0262af 7484 /* generate intermediate code */
0fa85d43 7485 pc_start = tb->pc;
3b46e624 7486
2c0262af
FB
7487 dc->tb = tb;
7488
7489 gen_opc_ptr = gen_opc_buf;
7490 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7491 gen_opparam_ptr = gen_opparam_buf;
7492
7493 dc->is_jmp = DISAS_NEXT;
7494 dc->pc = pc_start;
8aaca4c0 7495 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 7496 dc->condjmp = 0;
5899f386 7497 dc->thumb = env->thumb;
9ee6e8bb
PB
7498 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7499 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 7500 dc->is_mem = 0;
b5ff1b31 7501#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
7502 if (IS_M(env)) {
7503 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7504 } else {
7505 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7506 }
b5ff1b31
FB
7507#endif
7508 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
e50e6a20 7509 nb_gen_labels = 0;
2c0262af 7510 lj = -1;
9ee6e8bb
PB
7511 /* Reset the conditional execution bits immediately. This avoids
7512 complications trying to do it at the end of the block. */
7513 if (env->condexec_bits)
7514 gen_op_set_condexec(0);
2c0262af 7515 do {
9ee6e8bb
PB
7516#ifndef CONFIG_USER_ONLY
7517 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7518 /* We always get here via a jump, so know we are not in a
7519 conditional execution block. */
7520 gen_op_exception_exit();
7521 }
7522#endif
7523
1fddef4b
FB
7524 if (env->nb_breakpoints > 0) {
7525 for(j = 0; j < env->nb_breakpoints; j++) {
7526 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 7527 gen_set_condexec(dc);
1fddef4b
FB
7528 gen_op_movl_T0_im((long)dc->pc);
7529 gen_op_movl_reg_TN[0][15]();
7530 gen_op_debug();
7531 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
7532 /* Advance PC so that clearing the breakpoint will
7533 invalidate this TB. */
7534 dc->pc += 2;
7535 goto done_generating;
1fddef4b
FB
7536 break;
7537 }
7538 }
7539 }
2c0262af
FB
7540 if (search_pc) {
7541 j = gen_opc_ptr - gen_opc_buf;
7542 if (lj < j) {
7543 lj++;
7544 while (lj < j)
7545 gen_opc_instr_start[lj++] = 0;
7546 }
0fa85d43 7547 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
7548 gen_opc_instr_start[lj] = 1;
7549 }
e50e6a20 7550
9ee6e8bb
PB
7551 if (env->thumb) {
7552 disas_thumb_insn(env, dc);
7553 if (dc->condexec_mask) {
7554 dc->condexec_cond = (dc->condexec_cond & 0xe)
7555 | ((dc->condexec_mask >> 4) & 1);
7556 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7557 if (dc->condexec_mask == 0) {
7558 dc->condexec_cond = 0;
7559 }
7560 }
7561 } else {
7562 disas_arm_insn(env, dc);
7563 }
e50e6a20
FB
7564
7565 if (dc->condjmp && !dc->is_jmp) {
7566 gen_set_label(dc->condlabel);
7567 dc->condjmp = 0;
7568 }
6658ffb8
PB
7569 /* Terminate the TB on memory ops if watchpoints are present. */
7570 /* FIXME: This should be replacd by the deterministic execution
7571 * IRQ raising bits. */
7572 if (dc->is_mem && env->nb_watchpoints)
7573 break;
7574
e50e6a20
FB
7575 /* Translation stops when a conditional branch is enoutered.
7576 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
7577 * Also stop translation when a page boundary is reached. This
7578 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
7579 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
7580 !env->singlestep_enabled &&
b5ff1b31 7581 dc->pc < next_page_start);
9ee6e8bb 7582
b5ff1b31 7583 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
7584 instruction was a conditional branch or trap, and the PC has
7585 already been written. */
8aaca4c0
FB
7586 if (__builtin_expect(env->singlestep_enabled, 0)) {
7587 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 7588 if (dc->condjmp) {
9ee6e8bb
PB
7589 gen_set_condexec(dc);
7590 if (dc->is_jmp == DISAS_SWI) {
7591 gen_op_swi();
7592 } else {
7593 gen_op_debug();
7594 }
e50e6a20
FB
7595 gen_set_label(dc->condlabel);
7596 }
7597 if (dc->condjmp || !dc->is_jmp) {
8aaca4c0
FB
7598 gen_op_movl_T0_im((long)dc->pc);
7599 gen_op_movl_reg_TN[0][15]();
e50e6a20 7600 dc->condjmp = 0;
8aaca4c0 7601 }
9ee6e8bb
PB
7602 gen_set_condexec(dc);
7603 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
7604 gen_op_swi();
7605 } else {
7606 /* FIXME: Single stepping a WFI insn will not halt
7607 the CPU. */
7608 gen_op_debug();
7609 }
8aaca4c0 7610 } else {
9ee6e8bb
PB
7611 /* While branches must always occur at the end of an IT block,
7612 there are a few other things that can cause us to terminate
7613 the TB in the middel of an IT block:
7614 - Exception generating instructions (bkpt, swi, undefined).
7615 - Page boundaries.
7616 - Hardware watchpoints.
7617 Hardware breakpoints have already been handled and skip this code.
7618 */
7619 gen_set_condexec(dc);
8aaca4c0 7620 switch(dc->is_jmp) {
8aaca4c0 7621 case DISAS_NEXT:
6e256c93 7622 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
7623 break;
7624 default:
7625 case DISAS_JUMP:
7626 case DISAS_UPDATE:
7627 /* indicate that the hash table must be used to find the next TB */
7628 gen_op_movl_T0_0();
7629 gen_op_exit_tb();
7630 break;
7631 case DISAS_TB_JUMP:
7632 /* nothing more to generate */
7633 break;
9ee6e8bb
PB
7634 case DISAS_WFI:
7635 gen_op_wfi();
7636 break;
7637 case DISAS_SWI:
7638 gen_op_swi();
7639 break;
8aaca4c0 7640 }
e50e6a20
FB
7641 if (dc->condjmp) {
7642 gen_set_label(dc->condlabel);
9ee6e8bb 7643 gen_set_condexec(dc);
6e256c93 7644 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
7645 dc->condjmp = 0;
7646 }
2c0262af 7647 }
9ee6e8bb 7648done_generating:
2c0262af
FB
7649 *gen_opc_ptr = INDEX_op_end;
7650
7651#ifdef DEBUG_DISAS
e19e89a5 7652 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
7653 fprintf(logfile, "----------------\n");
7654 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 7655 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af 7656 fprintf(logfile, "\n");
e19e89a5
FB
7657 if (loglevel & (CPU_LOG_TB_OP)) {
7658 fprintf(logfile, "OP:\n");
7659 dump_ops(gen_opc_buf, gen_opparam_buf);
7660 fprintf(logfile, "\n");
7661 }
2c0262af
FB
7662 }
7663#endif
b5ff1b31
FB
7664 if (search_pc) {
7665 j = gen_opc_ptr - gen_opc_buf;
7666 lj++;
7667 while (lj <= j)
7668 gen_opc_instr_start[lj++] = 0;
b5ff1b31 7669 } else {
2c0262af 7670 tb->size = dc->pc - pc_start;
b5ff1b31 7671 }
2c0262af
FB
7672 return 0;
7673}
7674
7675int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7676{
7677 return gen_intermediate_code_internal(env, tb, 0);
7678}
7679
7680int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7681{
7682 return gen_intermediate_code_internal(env, tb, 1);
7683}
7684
b5ff1b31
FB
7685static const char *cpu_mode_names[16] = {
7686 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7687 "???", "???", "???", "und", "???", "???", "???", "sys"
7688};
9ee6e8bb 7689
5fafdf24 7690void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
7691 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
7692 int flags)
2c0262af
FB
7693{
7694 int i;
bc380d17 7695 union {
b7bcbe95
FB
7696 uint32_t i;
7697 float s;
7698 } s0, s1;
7699 CPU_DoubleU d;
a94a6abf
PB
7700 /* ??? This assumes float64 and double have the same layout.
7701 Oh well, it's only debug dumps. */
7702 union {
7703 float64 f64;
7704 double d;
7705 } d0;
b5ff1b31 7706 uint32_t psr;
2c0262af
FB
7707
7708 for(i=0;i<16;i++) {
7fe48483 7709 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 7710 if ((i % 4) == 3)
7fe48483 7711 cpu_fprintf(f, "\n");
2c0262af 7712 else
7fe48483 7713 cpu_fprintf(f, " ");
2c0262af 7714 }
b5ff1b31 7715 psr = cpsr_read(env);
687fa640
TS
7716 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
7717 psr,
b5ff1b31
FB
7718 psr & (1 << 31) ? 'N' : '-',
7719 psr & (1 << 30) ? 'Z' : '-',
7720 psr & (1 << 29) ? 'C' : '-',
7721 psr & (1 << 28) ? 'V' : '-',
5fafdf24 7722 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 7723 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95
FB
7724
7725 for (i = 0; i < 16; i++) {
8e96005d
FB
7726 d.d = env->vfp.regs[i];
7727 s0.i = d.l.lower;
7728 s1.i = d.l.upper;
a94a6abf
PB
7729 d0.f64 = d.d;
7730 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 7731 i * 2, (int)s0.i, s0.s,
a94a6abf 7732 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 7733 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 7734 d0.d);
b7bcbe95 7735 }
40f137e1 7736 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
2c0262af 7737}
a6b025d3 7738