]> git.proxmox.com Git - mirror_qemu.git/blame - target-arm/translate.c
Linux user memory access API change (initial patch by Thayne Harbaugh)
[mirror_qemu.git] / target-arm / translate.c
CommitLineData
2c0262af
FB
1/*
2 * ARM translation
5fafdf24 3 *
2c0262af 4 * Copyright (c) 2003 Fabrice Bellard
9ee6e8bb 5 * Copyright (c) 2005-2007 CodeSourcery
18c9b560 6 * Copyright (c) 2007 OpenedHand, Ltd.
2c0262af
FB
7 *
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
12 *
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
17 *
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22#include <stdarg.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <string.h>
26#include <inttypes.h>
27
28#include "cpu.h"
29#include "exec-all.h"
30#include "disas.h"
31
9ee6e8bb
PB
32#define ENABLE_ARCH_5J 0
33#define ENABLE_ARCH_6 arm_feature(env, ARM_FEATURE_V6)
34#define ENABLE_ARCH_6K arm_feature(env, ARM_FEATURE_V6K)
35#define ENABLE_ARCH_6T2 arm_feature(env, ARM_FEATURE_THUMB2)
36#define ENABLE_ARCH_7 arm_feature(env, ARM_FEATURE_V7)
b5ff1b31
FB
37
38#define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
39
2c0262af
FB
40/* internal defines */
41typedef struct DisasContext {
0fa85d43 42 target_ulong pc;
2c0262af 43 int is_jmp;
e50e6a20
FB
44 /* Nonzero if this instruction has been conditionally skipped. */
45 int condjmp;
46 /* The label that will be jumped to when the instruction is skipped. */
47 int condlabel;
9ee6e8bb
PB
48 /* Thumb-2 condtional execution bits. */
49 int condexec_mask;
50 int condexec_cond;
2c0262af 51 struct TranslationBlock *tb;
8aaca4c0 52 int singlestep_enabled;
5899f386 53 int thumb;
6658ffb8 54 int is_mem;
b5ff1b31
FB
55#if !defined(CONFIG_USER_ONLY)
56 int user;
57#endif
2c0262af
FB
58} DisasContext;
59
b5ff1b31
FB
60#if defined(CONFIG_USER_ONLY)
61#define IS_USER(s) 1
62#else
63#define IS_USER(s) (s->user)
64#endif
65
9ee6e8bb
PB
66/* These instructions trap after executing, so defer them until after the
67 conditional executions state has been updated. */
68#define DISAS_WFI 4
69#define DISAS_SWI 5
2c0262af 70
c53be334
FB
71#ifdef USE_DIRECT_JUMP
72#define TBPARAM(x)
73#else
74#define TBPARAM(x) (long)(x)
75#endif
76
2c0262af
FB
77/* XXX: move that elsewhere */
78static uint16_t *gen_opc_ptr;
79static uint32_t *gen_opparam_ptr;
80extern FILE *logfile;
81extern int loglevel;
82
83enum {
84#define DEF(s, n, copy_size) INDEX_op_ ## s,
85#include "opc.h"
86#undef DEF
87 NB_OPS,
88};
89
90#include "gen-op.h"
91
9ee6e8bb
PB
92#define PAS_OP(pfx) { \
93 gen_op_ ## pfx ## add16_T0_T1, \
94 gen_op_ ## pfx ## addsubx_T0_T1, \
95 gen_op_ ## pfx ## subaddx_T0_T1, \
96 gen_op_ ## pfx ## sub16_T0_T1, \
97 gen_op_ ## pfx ## add8_T0_T1, \
98 NULL, \
99 NULL, \
100 gen_op_ ## pfx ## sub8_T0_T1 }
101
102static GenOpFunc *gen_arm_parallel_addsub[8][8] = {
103 {},
104 PAS_OP(s),
105 PAS_OP(q),
106 PAS_OP(sh),
107 {},
108 PAS_OP(u),
109 PAS_OP(uq),
110 PAS_OP(uh),
111};
112#undef PAS_OP
113
114/* For unknown reasons Arm and Thumb-2 use arbitrarily diffenet encodings. */
115#define PAS_OP(pfx) { \
116 gen_op_ ## pfx ## add8_T0_T1, \
117 gen_op_ ## pfx ## add16_T0_T1, \
118 gen_op_ ## pfx ## addsubx_T0_T1, \
119 NULL, \
120 gen_op_ ## pfx ## sub8_T0_T1, \
121 gen_op_ ## pfx ## sub16_T0_T1, \
122 gen_op_ ## pfx ## subaddx_T0_T1, \
123 NULL }
124
125static GenOpFunc *gen_thumb2_parallel_addsub[8][8] = {
126 PAS_OP(s),
127 PAS_OP(q),
128 PAS_OP(sh),
129 {},
130 PAS_OP(u),
131 PAS_OP(uq),
132 PAS_OP(uh),
133 {}
134};
135#undef PAS_OP
136
e50e6a20 137static GenOpFunc1 *gen_test_cc[14] = {
2c0262af
FB
138 gen_op_test_eq,
139 gen_op_test_ne,
140 gen_op_test_cs,
141 gen_op_test_cc,
142 gen_op_test_mi,
143 gen_op_test_pl,
144 gen_op_test_vs,
145 gen_op_test_vc,
146 gen_op_test_hi,
147 gen_op_test_ls,
148 gen_op_test_ge,
149 gen_op_test_lt,
150 gen_op_test_gt,
151 gen_op_test_le,
152};
153
154const uint8_t table_logic_cc[16] = {
155 1, /* and */
156 1, /* xor */
157 0, /* sub */
158 0, /* rsb */
159 0, /* add */
160 0, /* adc */
161 0, /* sbc */
162 0, /* rsc */
163 1, /* andl */
164 1, /* xorl */
165 0, /* cmp */
166 0, /* cmn */
167 1, /* orr */
168 1, /* mov */
169 1, /* bic */
170 1, /* mvn */
171};
3b46e624 172
2c0262af
FB
173static GenOpFunc1 *gen_shift_T1_im[4] = {
174 gen_op_shll_T1_im,
175 gen_op_shrl_T1_im,
176 gen_op_sarl_T1_im,
177 gen_op_rorl_T1_im,
178};
179
1e8d4eec
FB
180static GenOpFunc *gen_shift_T1_0[4] = {
181 NULL,
182 gen_op_shrl_T1_0,
183 gen_op_sarl_T1_0,
184 gen_op_rrxl_T1,
185};
186
2c0262af
FB
187static GenOpFunc1 *gen_shift_T2_im[4] = {
188 gen_op_shll_T2_im,
189 gen_op_shrl_T2_im,
190 gen_op_sarl_T2_im,
191 gen_op_rorl_T2_im,
192};
193
1e8d4eec
FB
194static GenOpFunc *gen_shift_T2_0[4] = {
195 NULL,
196 gen_op_shrl_T2_0,
197 gen_op_sarl_T2_0,
198 gen_op_rrxl_T2,
199};
200
2c0262af
FB
201static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
202 gen_op_shll_T1_im_cc,
203 gen_op_shrl_T1_im_cc,
204 gen_op_sarl_T1_im_cc,
205 gen_op_rorl_T1_im_cc,
206};
207
1e8d4eec
FB
208static GenOpFunc *gen_shift_T1_0_cc[4] = {
209 NULL,
210 gen_op_shrl_T1_0_cc,
211 gen_op_sarl_T1_0_cc,
212 gen_op_rrxl_T1_cc,
213};
214
2c0262af
FB
215static GenOpFunc *gen_shift_T1_T0[4] = {
216 gen_op_shll_T1_T0,
217 gen_op_shrl_T1_T0,
218 gen_op_sarl_T1_T0,
219 gen_op_rorl_T1_T0,
220};
221
222static GenOpFunc *gen_shift_T1_T0_cc[4] = {
223 gen_op_shll_T1_T0_cc,
224 gen_op_shrl_T1_T0_cc,
225 gen_op_sarl_T1_T0_cc,
226 gen_op_rorl_T1_T0_cc,
227};
228
229static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
230 {
231 gen_op_movl_T0_r0,
232 gen_op_movl_T0_r1,
233 gen_op_movl_T0_r2,
234 gen_op_movl_T0_r3,
235 gen_op_movl_T0_r4,
236 gen_op_movl_T0_r5,
237 gen_op_movl_T0_r6,
238 gen_op_movl_T0_r7,
239 gen_op_movl_T0_r8,
240 gen_op_movl_T0_r9,
241 gen_op_movl_T0_r10,
242 gen_op_movl_T0_r11,
243 gen_op_movl_T0_r12,
244 gen_op_movl_T0_r13,
245 gen_op_movl_T0_r14,
246 gen_op_movl_T0_r15,
247 },
248 {
249 gen_op_movl_T1_r0,
250 gen_op_movl_T1_r1,
251 gen_op_movl_T1_r2,
252 gen_op_movl_T1_r3,
253 gen_op_movl_T1_r4,
254 gen_op_movl_T1_r5,
255 gen_op_movl_T1_r6,
256 gen_op_movl_T1_r7,
257 gen_op_movl_T1_r8,
258 gen_op_movl_T1_r9,
259 gen_op_movl_T1_r10,
260 gen_op_movl_T1_r11,
261 gen_op_movl_T1_r12,
262 gen_op_movl_T1_r13,
263 gen_op_movl_T1_r14,
264 gen_op_movl_T1_r15,
265 },
266 {
267 gen_op_movl_T2_r0,
268 gen_op_movl_T2_r1,
269 gen_op_movl_T2_r2,
270 gen_op_movl_T2_r3,
271 gen_op_movl_T2_r4,
272 gen_op_movl_T2_r5,
273 gen_op_movl_T2_r6,
274 gen_op_movl_T2_r7,
275 gen_op_movl_T2_r8,
276 gen_op_movl_T2_r9,
277 gen_op_movl_T2_r10,
278 gen_op_movl_T2_r11,
279 gen_op_movl_T2_r12,
280 gen_op_movl_T2_r13,
281 gen_op_movl_T2_r14,
282 gen_op_movl_T2_r15,
283 },
284};
285
286static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
287 {
288 gen_op_movl_r0_T0,
289 gen_op_movl_r1_T0,
290 gen_op_movl_r2_T0,
291 gen_op_movl_r3_T0,
292 gen_op_movl_r4_T0,
293 gen_op_movl_r5_T0,
294 gen_op_movl_r6_T0,
295 gen_op_movl_r7_T0,
296 gen_op_movl_r8_T0,
297 gen_op_movl_r9_T0,
298 gen_op_movl_r10_T0,
299 gen_op_movl_r11_T0,
300 gen_op_movl_r12_T0,
301 gen_op_movl_r13_T0,
302 gen_op_movl_r14_T0,
303 gen_op_movl_r15_T0,
304 },
305 {
306 gen_op_movl_r0_T1,
307 gen_op_movl_r1_T1,
308 gen_op_movl_r2_T1,
309 gen_op_movl_r3_T1,
310 gen_op_movl_r4_T1,
311 gen_op_movl_r5_T1,
312 gen_op_movl_r6_T1,
313 gen_op_movl_r7_T1,
314 gen_op_movl_r8_T1,
315 gen_op_movl_r9_T1,
316 gen_op_movl_r10_T1,
317 gen_op_movl_r11_T1,
318 gen_op_movl_r12_T1,
319 gen_op_movl_r13_T1,
320 gen_op_movl_r14_T1,
321 gen_op_movl_r15_T1,
322 },
323};
324
325static GenOpFunc1 *gen_op_movl_TN_im[3] = {
326 gen_op_movl_T0_im,
327 gen_op_movl_T1_im,
328 gen_op_movl_T2_im,
329};
330
9ee6e8bb
PB
331static GenOpFunc1 *gen_shift_T0_im_thumb_cc[3] = {
332 gen_op_shll_T0_im_thumb_cc,
333 gen_op_shrl_T0_im_thumb_cc,
334 gen_op_sarl_T0_im_thumb_cc,
335};
336
99c475ab
FB
337static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
338 gen_op_shll_T0_im_thumb,
339 gen_op_shrl_T0_im_thumb,
340 gen_op_sarl_T0_im_thumb,
341};
342
343static inline void gen_bx(DisasContext *s)
344{
345 s->is_jmp = DISAS_UPDATE;
346 gen_op_bx_T0();
347}
348
b5ff1b31
FB
349
350#if defined(CONFIG_USER_ONLY)
351#define gen_ldst(name, s) gen_op_##name##_raw()
352#else
353#define gen_ldst(name, s) do { \
6658ffb8 354 s->is_mem = 1; \
b5ff1b31
FB
355 if (IS_USER(s)) \
356 gen_op_##name##_user(); \
357 else \
358 gen_op_##name##_kernel(); \
359 } while (0)
360#endif
361
2c0262af
FB
362static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
363{
364 int val;
365
366 if (reg == 15) {
5899f386
FB
367 /* normaly, since we updated PC, we need only to add one insn */
368 if (s->thumb)
369 val = (long)s->pc + 2;
370 else
371 val = (long)s->pc + 4;
2c0262af
FB
372 gen_op_movl_TN_im[t](val);
373 } else {
374 gen_op_movl_TN_reg[t][reg]();
375 }
376}
377
378static inline void gen_movl_T0_reg(DisasContext *s, int reg)
379{
380 gen_movl_TN_reg(s, reg, 0);
381}
382
383static inline void gen_movl_T1_reg(DisasContext *s, int reg)
384{
385 gen_movl_TN_reg(s, reg, 1);
386}
387
388static inline void gen_movl_T2_reg(DisasContext *s, int reg)
389{
390 gen_movl_TN_reg(s, reg, 2);
391}
392
393static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
394{
395 gen_op_movl_reg_TN[t][reg]();
396 if (reg == 15) {
397 s->is_jmp = DISAS_JUMP;
398 }
399}
400
401static inline void gen_movl_reg_T0(DisasContext *s, int reg)
402{
403 gen_movl_reg_TN(s, reg, 0);
404}
405
406static inline void gen_movl_reg_T1(DisasContext *s, int reg)
407{
408 gen_movl_reg_TN(s, reg, 1);
409}
410
b5ff1b31
FB
411/* Force a TB lookup after an instruction that changes the CPU state. */
412static inline void gen_lookup_tb(DisasContext *s)
413{
414 gen_op_movl_T0_im(s->pc);
415 gen_movl_reg_T0(s, 15);
416 s->is_jmp = DISAS_UPDATE;
417}
418
2c0262af
FB
419static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
420{
1e8d4eec 421 int val, rm, shift, shiftop;
2c0262af
FB
422
423 if (!(insn & (1 << 25))) {
424 /* immediate */
425 val = insn & 0xfff;
426 if (!(insn & (1 << 23)))
427 val = -val;
537730b9
FB
428 if (val != 0)
429 gen_op_addl_T1_im(val);
2c0262af
FB
430 } else {
431 /* shift/register */
432 rm = (insn) & 0xf;
433 shift = (insn >> 7) & 0x1f;
434 gen_movl_T2_reg(s, rm);
1e8d4eec 435 shiftop = (insn >> 5) & 3;
2c0262af 436 if (shift != 0) {
1e8d4eec
FB
437 gen_shift_T2_im[shiftop](shift);
438 } else if (shiftop != 0) {
439 gen_shift_T2_0[shiftop]();
2c0262af
FB
440 }
441 if (!(insn & (1 << 23)))
442 gen_op_subl_T1_T2();
443 else
444 gen_op_addl_T1_T2();
445 }
446}
447
191f9a93
PB
448static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
449 int extra)
2c0262af
FB
450{
451 int val, rm;
3b46e624 452
2c0262af
FB
453 if (insn & (1 << 22)) {
454 /* immediate */
455 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
456 if (!(insn & (1 << 23)))
457 val = -val;
18acad92 458 val += extra;
537730b9
FB
459 if (val != 0)
460 gen_op_addl_T1_im(val);
2c0262af
FB
461 } else {
462 /* register */
191f9a93
PB
463 if (extra)
464 gen_op_addl_T1_im(extra);
2c0262af
FB
465 rm = (insn) & 0xf;
466 gen_movl_T2_reg(s, rm);
467 if (!(insn & (1 << 23)))
468 gen_op_subl_T1_T2();
469 else
470 gen_op_addl_T1_T2();
471 }
472}
473
b7bcbe95
FB
474#define VFP_OP(name) \
475static inline void gen_vfp_##name(int dp) \
476{ \
477 if (dp) \
478 gen_op_vfp_##name##d(); \
479 else \
480 gen_op_vfp_##name##s(); \
481}
482
9ee6e8bb
PB
483#define VFP_OP1(name) \
484static inline void gen_vfp_##name(int dp, int arg) \
485{ \
486 if (dp) \
487 gen_op_vfp_##name##d(arg); \
488 else \
489 gen_op_vfp_##name##s(arg); \
490}
491
b7bcbe95
FB
492VFP_OP(add)
493VFP_OP(sub)
494VFP_OP(mul)
495VFP_OP(div)
496VFP_OP(neg)
497VFP_OP(abs)
498VFP_OP(sqrt)
499VFP_OP(cmp)
500VFP_OP(cmpe)
501VFP_OP(F1_ld0)
502VFP_OP(uito)
503VFP_OP(sito)
504VFP_OP(toui)
505VFP_OP(touiz)
506VFP_OP(tosi)
507VFP_OP(tosiz)
9ee6e8bb
PB
508VFP_OP1(tosh)
509VFP_OP1(tosl)
510VFP_OP1(touh)
511VFP_OP1(toul)
512VFP_OP1(shto)
513VFP_OP1(slto)
514VFP_OP1(uhto)
515VFP_OP1(ulto)
b7bcbe95
FB
516
517#undef VFP_OP
518
9ee6e8bb
PB
519static inline void gen_vfp_fconst(int dp, uint32_t val)
520{
521 if (dp)
522 gen_op_vfp_fconstd(val);
523 else
524 gen_op_vfp_fconsts(val);
525}
526
b5ff1b31
FB
527static inline void gen_vfp_ld(DisasContext *s, int dp)
528{
529 if (dp)
530 gen_ldst(vfp_ldd, s);
531 else
532 gen_ldst(vfp_lds, s);
533}
534
535static inline void gen_vfp_st(DisasContext *s, int dp)
536{
537 if (dp)
538 gen_ldst(vfp_std, s);
539 else
540 gen_ldst(vfp_sts, s);
541}
542
8e96005d
FB
543static inline long
544vfp_reg_offset (int dp, int reg)
545{
546 if (dp)
547 return offsetof(CPUARMState, vfp.regs[reg]);
548 else if (reg & 1) {
549 return offsetof(CPUARMState, vfp.regs[reg >> 1])
550 + offsetof(CPU_DoubleU, l.upper);
551 } else {
552 return offsetof(CPUARMState, vfp.regs[reg >> 1])
553 + offsetof(CPU_DoubleU, l.lower);
554 }
555}
9ee6e8bb
PB
556
557/* Return the offset of a 32-bit piece of a NEON register.
558 zero is the least significant end of the register. */
559static inline long
560neon_reg_offset (int reg, int n)
561{
562 int sreg;
563 sreg = reg * 2 + n;
564 return vfp_reg_offset(0, sreg);
565}
566
567#define NEON_GET_REG(T, reg, n) gen_op_neon_getreg_##T(neon_reg_offset(reg, n))
568#define NEON_SET_REG(T, reg, n) gen_op_neon_setreg_##T(neon_reg_offset(reg, n))
569
b7bcbe95
FB
570static inline void gen_mov_F0_vreg(int dp, int reg)
571{
572 if (dp)
8e96005d 573 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 574 else
8e96005d 575 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
576}
577
578static inline void gen_mov_F1_vreg(int dp, int reg)
579{
580 if (dp)
8e96005d 581 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp, reg));
b7bcbe95 582 else
8e96005d 583 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
584}
585
586static inline void gen_mov_vreg_F0(int dp, int reg)
587{
588 if (dp)
8e96005d 589 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp, reg));
b7bcbe95 590 else
8e96005d 591 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp, reg));
b7bcbe95
FB
592}
593
18c9b560
AZ
594#define ARM_CP_RW_BIT (1 << 20)
595
596static inline int gen_iwmmxt_address(DisasContext *s, uint32_t insn)
597{
598 int rd;
599 uint32_t offset;
600
601 rd = (insn >> 16) & 0xf;
602 gen_movl_T1_reg(s, rd);
603
604 offset = (insn & 0xff) << ((insn >> 7) & 2);
605 if (insn & (1 << 24)) {
606 /* Pre indexed */
607 if (insn & (1 << 23))
608 gen_op_addl_T1_im(offset);
609 else
610 gen_op_addl_T1_im(-offset);
611
612 if (insn & (1 << 21))
613 gen_movl_reg_T1(s, rd);
614 } else if (insn & (1 << 21)) {
615 /* Post indexed */
616 if (insn & (1 << 23))
617 gen_op_movl_T0_im(offset);
618 else
619 gen_op_movl_T0_im(- offset);
620 gen_op_addl_T0_T1();
621 gen_movl_reg_T0(s, rd);
622 } else if (!(insn & (1 << 23)))
623 return 1;
624 return 0;
625}
626
627static inline int gen_iwmmxt_shift(uint32_t insn, uint32_t mask)
628{
629 int rd = (insn >> 0) & 0xf;
630
631 if (insn & (1 << 8))
632 if (rd < ARM_IWMMXT_wCGR0 || rd > ARM_IWMMXT_wCGR3)
633 return 1;
634 else
635 gen_op_iwmmxt_movl_T0_wCx(rd);
636 else
637 gen_op_iwmmxt_movl_T0_T1_wRn(rd);
638
639 gen_op_movl_T1_im(mask);
640 gen_op_andl_T0_T1();
641 return 0;
642}
643
644/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
645 (ie. an undefined instruction). */
646static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
647{
648 int rd, wrd;
649 int rdhi, rdlo, rd0, rd1, i;
650
651 if ((insn & 0x0e000e00) == 0x0c000000) {
652 if ((insn & 0x0fe00ff0) == 0x0c400000) {
653 wrd = insn & 0xf;
654 rdlo = (insn >> 12) & 0xf;
655 rdhi = (insn >> 16) & 0xf;
656 if (insn & ARM_CP_RW_BIT) { /* TMRRC */
657 gen_op_iwmmxt_movl_T0_T1_wRn(wrd);
658 gen_movl_reg_T0(s, rdlo);
659 gen_movl_reg_T1(s, rdhi);
660 } else { /* TMCRR */
661 gen_movl_T0_reg(s, rdlo);
662 gen_movl_T1_reg(s, rdhi);
663 gen_op_iwmmxt_movl_wRn_T0_T1(wrd);
664 gen_op_iwmmxt_set_mup();
665 }
666 return 0;
667 }
668
669 wrd = (insn >> 12) & 0xf;
670 if (gen_iwmmxt_address(s, insn))
671 return 1;
672 if (insn & ARM_CP_RW_BIT) {
673 if ((insn >> 28) == 0xf) { /* WLDRW wCx */
674 gen_ldst(ldl, s);
675 gen_op_iwmmxt_movl_wCx_T0(wrd);
676 } else {
677 if (insn & (1 << 8))
678 if (insn & (1 << 22)) /* WLDRD */
679 gen_ldst(iwmmxt_ldq, s);
680 else /* WLDRW wRd */
681 gen_ldst(iwmmxt_ldl, s);
682 else
683 if (insn & (1 << 22)) /* WLDRH */
684 gen_ldst(iwmmxt_ldw, s);
685 else /* WLDRB */
686 gen_ldst(iwmmxt_ldb, s);
687 gen_op_iwmmxt_movq_wRn_M0(wrd);
688 }
689 } else {
690 if ((insn >> 28) == 0xf) { /* WSTRW wCx */
691 gen_op_iwmmxt_movl_T0_wCx(wrd);
692 gen_ldst(stl, s);
693 } else {
694 gen_op_iwmmxt_movq_M0_wRn(wrd);
695 if (insn & (1 << 8))
696 if (insn & (1 << 22)) /* WSTRD */
697 gen_ldst(iwmmxt_stq, s);
698 else /* WSTRW wRd */
699 gen_ldst(iwmmxt_stl, s);
700 else
701 if (insn & (1 << 22)) /* WSTRH */
702 gen_ldst(iwmmxt_ldw, s);
703 else /* WSTRB */
704 gen_ldst(iwmmxt_stb, s);
705 }
706 }
707 return 0;
708 }
709
710 if ((insn & 0x0f000000) != 0x0e000000)
711 return 1;
712
713 switch (((insn >> 12) & 0xf00) | ((insn >> 4) & 0xff)) {
714 case 0x000: /* WOR */
715 wrd = (insn >> 12) & 0xf;
716 rd0 = (insn >> 0) & 0xf;
717 rd1 = (insn >> 16) & 0xf;
718 gen_op_iwmmxt_movq_M0_wRn(rd0);
719 gen_op_iwmmxt_orq_M0_wRn(rd1);
720 gen_op_iwmmxt_setpsr_nz();
721 gen_op_iwmmxt_movq_wRn_M0(wrd);
722 gen_op_iwmmxt_set_mup();
723 gen_op_iwmmxt_set_cup();
724 break;
725 case 0x011: /* TMCR */
726 if (insn & 0xf)
727 return 1;
728 rd = (insn >> 12) & 0xf;
729 wrd = (insn >> 16) & 0xf;
730 switch (wrd) {
731 case ARM_IWMMXT_wCID:
732 case ARM_IWMMXT_wCASF:
733 break;
734 case ARM_IWMMXT_wCon:
735 gen_op_iwmmxt_set_cup();
736 /* Fall through. */
737 case ARM_IWMMXT_wCSSF:
738 gen_op_iwmmxt_movl_T0_wCx(wrd);
739 gen_movl_T1_reg(s, rd);
740 gen_op_bicl_T0_T1();
741 gen_op_iwmmxt_movl_wCx_T0(wrd);
742 break;
743 case ARM_IWMMXT_wCGR0:
744 case ARM_IWMMXT_wCGR1:
745 case ARM_IWMMXT_wCGR2:
746 case ARM_IWMMXT_wCGR3:
747 gen_op_iwmmxt_set_cup();
748 gen_movl_reg_T0(s, rd);
749 gen_op_iwmmxt_movl_wCx_T0(wrd);
750 break;
751 default:
752 return 1;
753 }
754 break;
755 case 0x100: /* WXOR */
756 wrd = (insn >> 12) & 0xf;
757 rd0 = (insn >> 0) & 0xf;
758 rd1 = (insn >> 16) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0);
760 gen_op_iwmmxt_xorq_M0_wRn(rd1);
761 gen_op_iwmmxt_setpsr_nz();
762 gen_op_iwmmxt_movq_wRn_M0(wrd);
763 gen_op_iwmmxt_set_mup();
764 gen_op_iwmmxt_set_cup();
765 break;
766 case 0x111: /* TMRC */
767 if (insn & 0xf)
768 return 1;
769 rd = (insn >> 12) & 0xf;
770 wrd = (insn >> 16) & 0xf;
771 gen_op_iwmmxt_movl_T0_wCx(wrd);
772 gen_movl_reg_T0(s, rd);
773 break;
774 case 0x300: /* WANDN */
775 wrd = (insn >> 12) & 0xf;
776 rd0 = (insn >> 0) & 0xf;
777 rd1 = (insn >> 16) & 0xf;
778 gen_op_iwmmxt_movq_M0_wRn(rd0);
779 gen_op_iwmmxt_negq_M0();
780 gen_op_iwmmxt_andq_M0_wRn(rd1);
781 gen_op_iwmmxt_setpsr_nz();
782 gen_op_iwmmxt_movq_wRn_M0(wrd);
783 gen_op_iwmmxt_set_mup();
784 gen_op_iwmmxt_set_cup();
785 break;
786 case 0x200: /* WAND */
787 wrd = (insn >> 12) & 0xf;
788 rd0 = (insn >> 0) & 0xf;
789 rd1 = (insn >> 16) & 0xf;
790 gen_op_iwmmxt_movq_M0_wRn(rd0);
791 gen_op_iwmmxt_andq_M0_wRn(rd1);
792 gen_op_iwmmxt_setpsr_nz();
793 gen_op_iwmmxt_movq_wRn_M0(wrd);
794 gen_op_iwmmxt_set_mup();
795 gen_op_iwmmxt_set_cup();
796 break;
797 case 0x810: case 0xa10: /* WMADD */
798 wrd = (insn >> 12) & 0xf;
799 rd0 = (insn >> 0) & 0xf;
800 rd1 = (insn >> 16) & 0xf;
801 gen_op_iwmmxt_movq_M0_wRn(rd0);
802 if (insn & (1 << 21))
803 gen_op_iwmmxt_maddsq_M0_wRn(rd1);
804 else
805 gen_op_iwmmxt_madduq_M0_wRn(rd1);
806 gen_op_iwmmxt_movq_wRn_M0(wrd);
807 gen_op_iwmmxt_set_mup();
808 break;
809 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
810 wrd = (insn >> 12) & 0xf;
811 rd0 = (insn >> 16) & 0xf;
812 rd1 = (insn >> 0) & 0xf;
813 gen_op_iwmmxt_movq_M0_wRn(rd0);
814 switch ((insn >> 22) & 3) {
815 case 0:
816 gen_op_iwmmxt_unpacklb_M0_wRn(rd1);
817 break;
818 case 1:
819 gen_op_iwmmxt_unpacklw_M0_wRn(rd1);
820 break;
821 case 2:
822 gen_op_iwmmxt_unpackll_M0_wRn(rd1);
823 break;
824 case 3:
825 return 1;
826 }
827 gen_op_iwmmxt_movq_wRn_M0(wrd);
828 gen_op_iwmmxt_set_mup();
829 gen_op_iwmmxt_set_cup();
830 break;
831 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
832 wrd = (insn >> 12) & 0xf;
833 rd0 = (insn >> 16) & 0xf;
834 rd1 = (insn >> 0) & 0xf;
835 gen_op_iwmmxt_movq_M0_wRn(rd0);
836 switch ((insn >> 22) & 3) {
837 case 0:
838 gen_op_iwmmxt_unpackhb_M0_wRn(rd1);
839 break;
840 case 1:
841 gen_op_iwmmxt_unpackhw_M0_wRn(rd1);
842 break;
843 case 2:
844 gen_op_iwmmxt_unpackhl_M0_wRn(rd1);
845 break;
846 case 3:
847 return 1;
848 }
849 gen_op_iwmmxt_movq_wRn_M0(wrd);
850 gen_op_iwmmxt_set_mup();
851 gen_op_iwmmxt_set_cup();
852 break;
853 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
854 wrd = (insn >> 12) & 0xf;
855 rd0 = (insn >> 16) & 0xf;
856 rd1 = (insn >> 0) & 0xf;
857 gen_op_iwmmxt_movq_M0_wRn(rd0);
858 if (insn & (1 << 22))
859 gen_op_iwmmxt_sadw_M0_wRn(rd1);
860 else
861 gen_op_iwmmxt_sadb_M0_wRn(rd1);
862 if (!(insn & (1 << 20)))
863 gen_op_iwmmxt_addl_M0_wRn(wrd);
864 gen_op_iwmmxt_movq_wRn_M0(wrd);
865 gen_op_iwmmxt_set_mup();
866 break;
867 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
868 wrd = (insn >> 12) & 0xf;
869 rd0 = (insn >> 16) & 0xf;
870 rd1 = (insn >> 0) & 0xf;
871 gen_op_iwmmxt_movq_M0_wRn(rd0);
872 if (insn & (1 << 21))
873 gen_op_iwmmxt_mulsw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
874 else
875 gen_op_iwmmxt_muluw_M0_wRn(rd1, (insn & (1 << 20)) ? 16 : 0);
876 gen_op_iwmmxt_movq_wRn_M0(wrd);
877 gen_op_iwmmxt_set_mup();
878 break;
879 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
880 wrd = (insn >> 12) & 0xf;
881 rd0 = (insn >> 16) & 0xf;
882 rd1 = (insn >> 0) & 0xf;
883 gen_op_iwmmxt_movq_M0_wRn(rd0);
884 if (insn & (1 << 21))
885 gen_op_iwmmxt_macsw_M0_wRn(rd1);
886 else
887 gen_op_iwmmxt_macuw_M0_wRn(rd1);
888 if (!(insn & (1 << 20))) {
889 if (insn & (1 << 21))
890 gen_op_iwmmxt_addsq_M0_wRn(wrd);
891 else
892 gen_op_iwmmxt_adduq_M0_wRn(wrd);
893 }
894 gen_op_iwmmxt_movq_wRn_M0(wrd);
895 gen_op_iwmmxt_set_mup();
896 break;
897 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
898 wrd = (insn >> 12) & 0xf;
899 rd0 = (insn >> 16) & 0xf;
900 rd1 = (insn >> 0) & 0xf;
901 gen_op_iwmmxt_movq_M0_wRn(rd0);
902 switch ((insn >> 22) & 3) {
903 case 0:
904 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1);
905 break;
906 case 1:
907 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1);
908 break;
909 case 2:
910 gen_op_iwmmxt_cmpeql_M0_wRn(rd1);
911 break;
912 case 3:
913 return 1;
914 }
915 gen_op_iwmmxt_movq_wRn_M0(wrd);
916 gen_op_iwmmxt_set_mup();
917 gen_op_iwmmxt_set_cup();
918 break;
919 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
920 wrd = (insn >> 12) & 0xf;
921 rd0 = (insn >> 16) & 0xf;
922 rd1 = (insn >> 0) & 0xf;
923 gen_op_iwmmxt_movq_M0_wRn(rd0);
924 if (insn & (1 << 22))
925 gen_op_iwmmxt_avgw_M0_wRn(rd1, (insn >> 20) & 1);
926 else
927 gen_op_iwmmxt_avgb_M0_wRn(rd1, (insn >> 20) & 1);
928 gen_op_iwmmxt_movq_wRn_M0(wrd);
929 gen_op_iwmmxt_set_mup();
930 gen_op_iwmmxt_set_cup();
931 break;
932 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
933 wrd = (insn >> 12) & 0xf;
934 rd0 = (insn >> 16) & 0xf;
935 rd1 = (insn >> 0) & 0xf;
936 gen_op_iwmmxt_movq_M0_wRn(rd0);
937 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0 + ((insn >> 20) & 3));
938 gen_op_movl_T1_im(7);
939 gen_op_andl_T0_T1();
940 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
941 gen_op_iwmmxt_movq_wRn_M0(wrd);
942 gen_op_iwmmxt_set_mup();
943 break;
944 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
945 rd = (insn >> 12) & 0xf;
946 wrd = (insn >> 16) & 0xf;
947 gen_movl_T0_reg(s, rd);
948 gen_op_iwmmxt_movq_M0_wRn(wrd);
949 switch ((insn >> 6) & 3) {
950 case 0:
951 gen_op_movl_T1_im(0xff);
952 gen_op_iwmmxt_insr_M0_T0_T1((insn & 7) << 3);
953 break;
954 case 1:
955 gen_op_movl_T1_im(0xffff);
956 gen_op_iwmmxt_insr_M0_T0_T1((insn & 3) << 4);
957 break;
958 case 2:
959 gen_op_movl_T1_im(0xffffffff);
960 gen_op_iwmmxt_insr_M0_T0_T1((insn & 1) << 5);
961 break;
962 case 3:
963 return 1;
964 }
965 gen_op_iwmmxt_movq_wRn_M0(wrd);
966 gen_op_iwmmxt_set_mup();
967 break;
968 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
969 rd = (insn >> 12) & 0xf;
970 wrd = (insn >> 16) & 0xf;
971 if (rd == 15)
972 return 1;
973 gen_op_iwmmxt_movq_M0_wRn(wrd);
974 switch ((insn >> 22) & 3) {
975 case 0:
976 if (insn & 8)
977 gen_op_iwmmxt_extrsb_T0_M0((insn & 7) << 3);
978 else {
979 gen_op_movl_T1_im(0xff);
980 gen_op_iwmmxt_extru_T0_M0_T1((insn & 7) << 3);
981 }
982 break;
983 case 1:
984 if (insn & 8)
985 gen_op_iwmmxt_extrsw_T0_M0((insn & 3) << 4);
986 else {
987 gen_op_movl_T1_im(0xffff);
988 gen_op_iwmmxt_extru_T0_M0_T1((insn & 3) << 4);
989 }
990 break;
991 case 2:
992 gen_op_movl_T1_im(0xffffffff);
993 gen_op_iwmmxt_extru_T0_M0_T1((insn & 1) << 5);
994 break;
995 case 3:
996 return 1;
997 }
998 gen_op_movl_reg_TN[0][rd]();
999 break;
1000 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
1001 if ((insn & 0x000ff008) != 0x0003f000)
1002 return 1;
1003 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1004 switch ((insn >> 22) & 3) {
1005 case 0:
1006 gen_op_shrl_T1_im(((insn & 7) << 2) + 0);
1007 break;
1008 case 1:
1009 gen_op_shrl_T1_im(((insn & 3) << 3) + 4);
1010 break;
1011 case 2:
1012 gen_op_shrl_T1_im(((insn & 1) << 4) + 12);
1013 break;
1014 case 3:
1015 return 1;
1016 }
1017 gen_op_shll_T1_im(28);
1018 gen_op_movl_T0_T1();
1019 gen_op_movl_cpsr_T0(0xf0000000);
1020 break;
1021 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
1022 rd = (insn >> 12) & 0xf;
1023 wrd = (insn >> 16) & 0xf;
1024 gen_movl_T0_reg(s, rd);
1025 switch ((insn >> 6) & 3) {
1026 case 0:
1027 gen_op_iwmmxt_bcstb_M0_T0();
1028 break;
1029 case 1:
1030 gen_op_iwmmxt_bcstw_M0_T0();
1031 break;
1032 case 2:
1033 gen_op_iwmmxt_bcstl_M0_T0();
1034 break;
1035 case 3:
1036 return 1;
1037 }
1038 gen_op_iwmmxt_movq_wRn_M0(wrd);
1039 gen_op_iwmmxt_set_mup();
1040 break;
1041 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
1042 if ((insn & 0x000ff00f) != 0x0003f000)
1043 return 1;
1044 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1045 switch ((insn >> 22) & 3) {
1046 case 0:
1047 for (i = 0; i < 7; i ++) {
1048 gen_op_shll_T1_im(4);
1049 gen_op_andl_T0_T1();
1050 }
1051 break;
1052 case 1:
1053 for (i = 0; i < 3; i ++) {
1054 gen_op_shll_T1_im(8);
1055 gen_op_andl_T0_T1();
1056 }
1057 break;
1058 case 2:
1059 gen_op_shll_T1_im(16);
1060 gen_op_andl_T0_T1();
1061 break;
1062 case 3:
1063 return 1;
1064 }
1065 gen_op_movl_cpsr_T0(0xf0000000);
1066 break;
1067 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
1068 wrd = (insn >> 12) & 0xf;
1069 rd0 = (insn >> 16) & 0xf;
1070 gen_op_iwmmxt_movq_M0_wRn(rd0);
1071 switch ((insn >> 22) & 3) {
1072 case 0:
1073 gen_op_iwmmxt_addcb_M0();
1074 break;
1075 case 1:
1076 gen_op_iwmmxt_addcw_M0();
1077 break;
1078 case 2:
1079 gen_op_iwmmxt_addcl_M0();
1080 break;
1081 case 3:
1082 return 1;
1083 }
1084 gen_op_iwmmxt_movq_wRn_M0(wrd);
1085 gen_op_iwmmxt_set_mup();
1086 break;
1087 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
1088 if ((insn & 0x000ff00f) != 0x0003f000)
1089 return 1;
1090 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF);
1091 switch ((insn >> 22) & 3) {
1092 case 0:
1093 for (i = 0; i < 7; i ++) {
1094 gen_op_shll_T1_im(4);
1095 gen_op_orl_T0_T1();
1096 }
1097 break;
1098 case 1:
1099 for (i = 0; i < 3; i ++) {
1100 gen_op_shll_T1_im(8);
1101 gen_op_orl_T0_T1();
1102 }
1103 break;
1104 case 2:
1105 gen_op_shll_T1_im(16);
1106 gen_op_orl_T0_T1();
1107 break;
1108 case 3:
1109 return 1;
1110 }
1111 gen_op_movl_T1_im(0xf0000000);
1112 gen_op_andl_T0_T1();
1113 gen_op_movl_cpsr_T0(0xf0000000);
1114 break;
1115 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1116 rd = (insn >> 12) & 0xf;
1117 rd0 = (insn >> 16) & 0xf;
1118 if ((insn & 0xf) != 0)
1119 return 1;
1120 gen_op_iwmmxt_movq_M0_wRn(rd0);
1121 switch ((insn >> 22) & 3) {
1122 case 0:
1123 gen_op_iwmmxt_msbb_T0_M0();
1124 break;
1125 case 1:
1126 gen_op_iwmmxt_msbw_T0_M0();
1127 break;
1128 case 2:
1129 gen_op_iwmmxt_msbl_T0_M0();
1130 break;
1131 case 3:
1132 return 1;
1133 }
1134 gen_movl_reg_T0(s, rd);
1135 break;
1136 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1137 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1138 wrd = (insn >> 12) & 0xf;
1139 rd0 = (insn >> 16) & 0xf;
1140 rd1 = (insn >> 0) & 0xf;
1141 gen_op_iwmmxt_movq_M0_wRn(rd0);
1142 switch ((insn >> 22) & 3) {
1143 case 0:
1144 if (insn & (1 << 21))
1145 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1);
1146 else
1147 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1);
1148 break;
1149 case 1:
1150 if (insn & (1 << 21))
1151 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1);
1152 else
1153 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1);
1154 break;
1155 case 2:
1156 if (insn & (1 << 21))
1157 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1);
1158 else
1159 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1);
1160 break;
1161 case 3:
1162 return 1;
1163 }
1164 gen_op_iwmmxt_movq_wRn_M0(wrd);
1165 gen_op_iwmmxt_set_mup();
1166 gen_op_iwmmxt_set_cup();
1167 break;
1168 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1169 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1170 wrd = (insn >> 12) & 0xf;
1171 rd0 = (insn >> 16) & 0xf;
1172 gen_op_iwmmxt_movq_M0_wRn(rd0);
1173 switch ((insn >> 22) & 3) {
1174 case 0:
1175 if (insn & (1 << 21))
1176 gen_op_iwmmxt_unpacklsb_M0();
1177 else
1178 gen_op_iwmmxt_unpacklub_M0();
1179 break;
1180 case 1:
1181 if (insn & (1 << 21))
1182 gen_op_iwmmxt_unpacklsw_M0();
1183 else
1184 gen_op_iwmmxt_unpackluw_M0();
1185 break;
1186 case 2:
1187 if (insn & (1 << 21))
1188 gen_op_iwmmxt_unpacklsl_M0();
1189 else
1190 gen_op_iwmmxt_unpacklul_M0();
1191 break;
1192 case 3:
1193 return 1;
1194 }
1195 gen_op_iwmmxt_movq_wRn_M0(wrd);
1196 gen_op_iwmmxt_set_mup();
1197 gen_op_iwmmxt_set_cup();
1198 break;
1199 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1200 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1201 wrd = (insn >> 12) & 0xf;
1202 rd0 = (insn >> 16) & 0xf;
1203 gen_op_iwmmxt_movq_M0_wRn(rd0);
1204 switch ((insn >> 22) & 3) {
1205 case 0:
1206 if (insn & (1 << 21))
1207 gen_op_iwmmxt_unpackhsb_M0();
1208 else
1209 gen_op_iwmmxt_unpackhub_M0();
1210 break;
1211 case 1:
1212 if (insn & (1 << 21))
1213 gen_op_iwmmxt_unpackhsw_M0();
1214 else
1215 gen_op_iwmmxt_unpackhuw_M0();
1216 break;
1217 case 2:
1218 if (insn & (1 << 21))
1219 gen_op_iwmmxt_unpackhsl_M0();
1220 else
1221 gen_op_iwmmxt_unpackhul_M0();
1222 break;
1223 case 3:
1224 return 1;
1225 }
1226 gen_op_iwmmxt_movq_wRn_M0(wrd);
1227 gen_op_iwmmxt_set_mup();
1228 gen_op_iwmmxt_set_cup();
1229 break;
1230 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1231 case 0x214: case 0x614: case 0xa14: case 0xe14:
1232 wrd = (insn >> 12) & 0xf;
1233 rd0 = (insn >> 16) & 0xf;
1234 gen_op_iwmmxt_movq_M0_wRn(rd0);
1235 if (gen_iwmmxt_shift(insn, 0xff))
1236 return 1;
1237 switch ((insn >> 22) & 3) {
1238 case 0:
1239 return 1;
1240 case 1:
1241 gen_op_iwmmxt_srlw_M0_T0();
1242 break;
1243 case 2:
1244 gen_op_iwmmxt_srll_M0_T0();
1245 break;
1246 case 3:
1247 gen_op_iwmmxt_srlq_M0_T0();
1248 break;
1249 }
1250 gen_op_iwmmxt_movq_wRn_M0(wrd);
1251 gen_op_iwmmxt_set_mup();
1252 gen_op_iwmmxt_set_cup();
1253 break;
1254 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1255 case 0x014: case 0x414: case 0x814: case 0xc14:
1256 wrd = (insn >> 12) & 0xf;
1257 rd0 = (insn >> 16) & 0xf;
1258 gen_op_iwmmxt_movq_M0_wRn(rd0);
1259 if (gen_iwmmxt_shift(insn, 0xff))
1260 return 1;
1261 switch ((insn >> 22) & 3) {
1262 case 0:
1263 return 1;
1264 case 1:
1265 gen_op_iwmmxt_sraw_M0_T0();
1266 break;
1267 case 2:
1268 gen_op_iwmmxt_sral_M0_T0();
1269 break;
1270 case 3:
1271 gen_op_iwmmxt_sraq_M0_T0();
1272 break;
1273 }
1274 gen_op_iwmmxt_movq_wRn_M0(wrd);
1275 gen_op_iwmmxt_set_mup();
1276 gen_op_iwmmxt_set_cup();
1277 break;
1278 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1279 case 0x114: case 0x514: case 0x914: case 0xd14:
1280 wrd = (insn >> 12) & 0xf;
1281 rd0 = (insn >> 16) & 0xf;
1282 gen_op_iwmmxt_movq_M0_wRn(rd0);
1283 if (gen_iwmmxt_shift(insn, 0xff))
1284 return 1;
1285 switch ((insn >> 22) & 3) {
1286 case 0:
1287 return 1;
1288 case 1:
1289 gen_op_iwmmxt_sllw_M0_T0();
1290 break;
1291 case 2:
1292 gen_op_iwmmxt_slll_M0_T0();
1293 break;
1294 case 3:
1295 gen_op_iwmmxt_sllq_M0_T0();
1296 break;
1297 }
1298 gen_op_iwmmxt_movq_wRn_M0(wrd);
1299 gen_op_iwmmxt_set_mup();
1300 gen_op_iwmmxt_set_cup();
1301 break;
1302 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1303 case 0x314: case 0x714: case 0xb14: case 0xf14:
1304 wrd = (insn >> 12) & 0xf;
1305 rd0 = (insn >> 16) & 0xf;
1306 gen_op_iwmmxt_movq_M0_wRn(rd0);
1307 switch ((insn >> 22) & 3) {
1308 case 0:
1309 return 1;
1310 case 1:
1311 if (gen_iwmmxt_shift(insn, 0xf))
1312 return 1;
1313 gen_op_iwmmxt_rorw_M0_T0();
1314 break;
1315 case 2:
1316 if (gen_iwmmxt_shift(insn, 0x1f))
1317 return 1;
1318 gen_op_iwmmxt_rorl_M0_T0();
1319 break;
1320 case 3:
1321 if (gen_iwmmxt_shift(insn, 0x3f))
1322 return 1;
1323 gen_op_iwmmxt_rorq_M0_T0();
1324 break;
1325 }
1326 gen_op_iwmmxt_movq_wRn_M0(wrd);
1327 gen_op_iwmmxt_set_mup();
1328 gen_op_iwmmxt_set_cup();
1329 break;
1330 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1331 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1332 wrd = (insn >> 12) & 0xf;
1333 rd0 = (insn >> 16) & 0xf;
1334 rd1 = (insn >> 0) & 0xf;
1335 gen_op_iwmmxt_movq_M0_wRn(rd0);
1336 switch ((insn >> 22) & 3) {
1337 case 0:
1338 if (insn & (1 << 21))
1339 gen_op_iwmmxt_minsb_M0_wRn(rd1);
1340 else
1341 gen_op_iwmmxt_minub_M0_wRn(rd1);
1342 break;
1343 case 1:
1344 if (insn & (1 << 21))
1345 gen_op_iwmmxt_minsw_M0_wRn(rd1);
1346 else
1347 gen_op_iwmmxt_minuw_M0_wRn(rd1);
1348 break;
1349 case 2:
1350 if (insn & (1 << 21))
1351 gen_op_iwmmxt_minsl_M0_wRn(rd1);
1352 else
1353 gen_op_iwmmxt_minul_M0_wRn(rd1);
1354 break;
1355 case 3:
1356 return 1;
1357 }
1358 gen_op_iwmmxt_movq_wRn_M0(wrd);
1359 gen_op_iwmmxt_set_mup();
1360 break;
1361 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1362 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1363 wrd = (insn >> 12) & 0xf;
1364 rd0 = (insn >> 16) & 0xf;
1365 rd1 = (insn >> 0) & 0xf;
1366 gen_op_iwmmxt_movq_M0_wRn(rd0);
1367 switch ((insn >> 22) & 3) {
1368 case 0:
1369 if (insn & (1 << 21))
1370 gen_op_iwmmxt_maxsb_M0_wRn(rd1);
1371 else
1372 gen_op_iwmmxt_maxub_M0_wRn(rd1);
1373 break;
1374 case 1:
1375 if (insn & (1 << 21))
1376 gen_op_iwmmxt_maxsw_M0_wRn(rd1);
1377 else
1378 gen_op_iwmmxt_maxuw_M0_wRn(rd1);
1379 break;
1380 case 2:
1381 if (insn & (1 << 21))
1382 gen_op_iwmmxt_maxsl_M0_wRn(rd1);
1383 else
1384 gen_op_iwmmxt_maxul_M0_wRn(rd1);
1385 break;
1386 case 3:
1387 return 1;
1388 }
1389 gen_op_iwmmxt_movq_wRn_M0(wrd);
1390 gen_op_iwmmxt_set_mup();
1391 break;
1392 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1393 case 0x402: case 0x502: case 0x602: case 0x702:
1394 wrd = (insn >> 12) & 0xf;
1395 rd0 = (insn >> 16) & 0xf;
1396 rd1 = (insn >> 0) & 0xf;
1397 gen_op_iwmmxt_movq_M0_wRn(rd0);
1398 gen_op_movl_T0_im((insn >> 20) & 3);
1399 gen_op_iwmmxt_align_M0_T0_wRn(rd1);
1400 gen_op_iwmmxt_movq_wRn_M0(wrd);
1401 gen_op_iwmmxt_set_mup();
1402 break;
1403 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1404 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1405 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1406 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1407 wrd = (insn >> 12) & 0xf;
1408 rd0 = (insn >> 16) & 0xf;
1409 rd1 = (insn >> 0) & 0xf;
1410 gen_op_iwmmxt_movq_M0_wRn(rd0);
1411 switch ((insn >> 20) & 0xf) {
1412 case 0x0:
1413 gen_op_iwmmxt_subnb_M0_wRn(rd1);
1414 break;
1415 case 0x1:
1416 gen_op_iwmmxt_subub_M0_wRn(rd1);
1417 break;
1418 case 0x3:
1419 gen_op_iwmmxt_subsb_M0_wRn(rd1);
1420 break;
1421 case 0x4:
1422 gen_op_iwmmxt_subnw_M0_wRn(rd1);
1423 break;
1424 case 0x5:
1425 gen_op_iwmmxt_subuw_M0_wRn(rd1);
1426 break;
1427 case 0x7:
1428 gen_op_iwmmxt_subsw_M0_wRn(rd1);
1429 break;
1430 case 0x8:
1431 gen_op_iwmmxt_subnl_M0_wRn(rd1);
1432 break;
1433 case 0x9:
1434 gen_op_iwmmxt_subul_M0_wRn(rd1);
1435 break;
1436 case 0xb:
1437 gen_op_iwmmxt_subsl_M0_wRn(rd1);
1438 break;
1439 default:
1440 return 1;
1441 }
1442 gen_op_iwmmxt_movq_wRn_M0(wrd);
1443 gen_op_iwmmxt_set_mup();
1444 gen_op_iwmmxt_set_cup();
1445 break;
1446 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1447 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1448 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1449 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1450 wrd = (insn >> 12) & 0xf;
1451 rd0 = (insn >> 16) & 0xf;
1452 gen_op_iwmmxt_movq_M0_wRn(rd0);
1453 gen_op_movl_T0_im(((insn >> 16) & 0xf0) | (insn & 0x0f));
1454 gen_op_iwmmxt_shufh_M0_T0();
1455 gen_op_iwmmxt_movq_wRn_M0(wrd);
1456 gen_op_iwmmxt_set_mup();
1457 gen_op_iwmmxt_set_cup();
1458 break;
1459 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1460 case 0x418: case 0x518: case 0x618: case 0x718:
1461 case 0x818: case 0x918: case 0xa18: case 0xb18:
1462 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1463 wrd = (insn >> 12) & 0xf;
1464 rd0 = (insn >> 16) & 0xf;
1465 rd1 = (insn >> 0) & 0xf;
1466 gen_op_iwmmxt_movq_M0_wRn(rd0);
1467 switch ((insn >> 20) & 0xf) {
1468 case 0x0:
1469 gen_op_iwmmxt_addnb_M0_wRn(rd1);
1470 break;
1471 case 0x1:
1472 gen_op_iwmmxt_addub_M0_wRn(rd1);
1473 break;
1474 case 0x3:
1475 gen_op_iwmmxt_addsb_M0_wRn(rd1);
1476 break;
1477 case 0x4:
1478 gen_op_iwmmxt_addnw_M0_wRn(rd1);
1479 break;
1480 case 0x5:
1481 gen_op_iwmmxt_adduw_M0_wRn(rd1);
1482 break;
1483 case 0x7:
1484 gen_op_iwmmxt_addsw_M0_wRn(rd1);
1485 break;
1486 case 0x8:
1487 gen_op_iwmmxt_addnl_M0_wRn(rd1);
1488 break;
1489 case 0x9:
1490 gen_op_iwmmxt_addul_M0_wRn(rd1);
1491 break;
1492 case 0xb:
1493 gen_op_iwmmxt_addsl_M0_wRn(rd1);
1494 break;
1495 default:
1496 return 1;
1497 }
1498 gen_op_iwmmxt_movq_wRn_M0(wrd);
1499 gen_op_iwmmxt_set_mup();
1500 gen_op_iwmmxt_set_cup();
1501 break;
1502 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1503 case 0x408: case 0x508: case 0x608: case 0x708:
1504 case 0x808: case 0x908: case 0xa08: case 0xb08:
1505 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1506 wrd = (insn >> 12) & 0xf;
1507 rd0 = (insn >> 16) & 0xf;
1508 rd1 = (insn >> 0) & 0xf;
1509 gen_op_iwmmxt_movq_M0_wRn(rd0);
1510 if (!(insn & (1 << 20)))
1511 return 1;
1512 switch ((insn >> 22) & 3) {
1513 case 0:
1514 return 1;
1515 case 1:
1516 if (insn & (1 << 21))
1517 gen_op_iwmmxt_packsw_M0_wRn(rd1);
1518 else
1519 gen_op_iwmmxt_packuw_M0_wRn(rd1);
1520 break;
1521 case 2:
1522 if (insn & (1 << 21))
1523 gen_op_iwmmxt_packsl_M0_wRn(rd1);
1524 else
1525 gen_op_iwmmxt_packul_M0_wRn(rd1);
1526 break;
1527 case 3:
1528 if (insn & (1 << 21))
1529 gen_op_iwmmxt_packsq_M0_wRn(rd1);
1530 else
1531 gen_op_iwmmxt_packuq_M0_wRn(rd1);
1532 break;
1533 }
1534 gen_op_iwmmxt_movq_wRn_M0(wrd);
1535 gen_op_iwmmxt_set_mup();
1536 gen_op_iwmmxt_set_cup();
1537 break;
1538 case 0x201: case 0x203: case 0x205: case 0x207:
1539 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1540 case 0x211: case 0x213: case 0x215: case 0x217:
1541 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1542 wrd = (insn >> 5) & 0xf;
1543 rd0 = (insn >> 12) & 0xf;
1544 rd1 = (insn >> 0) & 0xf;
1545 if (rd0 == 0xf || rd1 == 0xf)
1546 return 1;
1547 gen_op_iwmmxt_movq_M0_wRn(wrd);
1548 switch ((insn >> 16) & 0xf) {
1549 case 0x0: /* TMIA */
1550 gen_op_movl_TN_reg[0][rd0]();
1551 gen_op_movl_TN_reg[1][rd1]();
1552 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1553 break;
1554 case 0x8: /* TMIAPH */
1555 gen_op_movl_TN_reg[0][rd0]();
1556 gen_op_movl_TN_reg[1][rd1]();
1557 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1558 break;
1559 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1560 gen_op_movl_TN_reg[1][rd0]();
1561 if (insn & (1 << 16))
1562 gen_op_shrl_T1_im(16);
1563 gen_op_movl_T0_T1();
1564 gen_op_movl_TN_reg[1][rd1]();
1565 if (insn & (1 << 17))
1566 gen_op_shrl_T1_im(16);
1567 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1568 break;
1569 default:
1570 return 1;
1571 }
1572 gen_op_iwmmxt_movq_wRn_M0(wrd);
1573 gen_op_iwmmxt_set_mup();
1574 break;
1575 default:
1576 return 1;
1577 }
1578
1579 return 0;
1580}
1581
1582/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1583 (ie. an undefined instruction). */
1584static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1585{
1586 int acc, rd0, rd1, rdhi, rdlo;
1587
1588 if ((insn & 0x0ff00f10) == 0x0e200010) {
1589 /* Multiply with Internal Accumulate Format */
1590 rd0 = (insn >> 12) & 0xf;
1591 rd1 = insn & 0xf;
1592 acc = (insn >> 5) & 7;
1593
1594 if (acc != 0)
1595 return 1;
1596
1597 switch ((insn >> 16) & 0xf) {
1598 case 0x0: /* MIA */
1599 gen_op_movl_TN_reg[0][rd0]();
1600 gen_op_movl_TN_reg[1][rd1]();
1601 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1602 break;
1603 case 0x8: /* MIAPH */
1604 gen_op_movl_TN_reg[0][rd0]();
1605 gen_op_movl_TN_reg[1][rd1]();
1606 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1607 break;
1608 case 0xc: /* MIABB */
1609 case 0xd: /* MIABT */
1610 case 0xe: /* MIATB */
1611 case 0xf: /* MIATT */
1612 gen_op_movl_TN_reg[1][rd0]();
1613 if (insn & (1 << 16))
1614 gen_op_shrl_T1_im(16);
1615 gen_op_movl_T0_T1();
1616 gen_op_movl_TN_reg[1][rd1]();
1617 if (insn & (1 << 17))
1618 gen_op_shrl_T1_im(16);
1619 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1620 break;
1621 default:
1622 return 1;
1623 }
1624
1625 gen_op_iwmmxt_movq_wRn_M0(acc);
1626 return 0;
1627 }
1628
1629 if ((insn & 0x0fe00ff8) == 0x0c400000) {
1630 /* Internal Accumulator Access Format */
1631 rdhi = (insn >> 16) & 0xf;
1632 rdlo = (insn >> 12) & 0xf;
1633 acc = insn & 7;
1634
1635 if (acc != 0)
1636 return 1;
1637
1638 if (insn & ARM_CP_RW_BIT) { /* MRA */
1639 gen_op_iwmmxt_movl_T0_T1_wRn(acc);
1640 gen_op_movl_reg_TN[0][rdlo]();
1641 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1642 gen_op_andl_T0_T1();
1643 gen_op_movl_reg_TN[0][rdhi]();
1644 } else { /* MAR */
1645 gen_op_movl_TN_reg[0][rdlo]();
1646 gen_op_movl_TN_reg[1][rdhi]();
1647 gen_op_iwmmxt_movl_wRn_T0_T1(acc);
1648 }
1649 return 0;
1650 }
1651
1652 return 1;
1653}
1654
c1713132
AZ
1655/* Disassemble system coprocessor instruction. Return nonzero if
1656 instruction is not defined. */
1657static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
1658{
1659 uint32_t rd = (insn >> 12) & 0xf;
1660 uint32_t cp = (insn >> 8) & 0xf;
1661 if (IS_USER(s)) {
1662 return 1;
1663 }
1664
18c9b560 1665 if (insn & ARM_CP_RW_BIT) {
c1713132
AZ
1666 if (!env->cp[cp].cp_read)
1667 return 1;
1668 gen_op_movl_T0_im((uint32_t) s->pc);
1669 gen_op_movl_reg_TN[0][15]();
1670 gen_op_movl_T0_cp(insn);
1671 gen_movl_reg_T0(s, rd);
1672 } else {
1673 if (!env->cp[cp].cp_write)
1674 return 1;
1675 gen_op_movl_T0_im((uint32_t) s->pc);
1676 gen_op_movl_reg_TN[0][15]();
1677 gen_movl_T0_reg(s, rd);
1678 gen_op_movl_cp_T0(insn);
1679 }
1680 return 0;
1681}
1682
9ee6e8bb
PB
1683static int cp15_user_ok(uint32_t insn)
1684{
1685 int cpn = (insn >> 16) & 0xf;
1686 int cpm = insn & 0xf;
1687 int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
1688
1689 if (cpn == 13 && cpm == 0) {
1690 /* TLS register. */
1691 if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
1692 return 1;
1693 }
1694 if (cpn == 7) {
1695 /* ISB, DSB, DMB. */
1696 if ((cpm == 5 && op == 4)
1697 || (cpm == 10 && (op == 4 || op == 5)))
1698 return 1;
1699 }
1700 return 0;
1701}
1702
b5ff1b31
FB
1703/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1704 instruction is not defined. */
a90b7318 1705static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
b5ff1b31
FB
1706{
1707 uint32_t rd;
1708
9ee6e8bb
PB
1709 /* M profile cores use memory mapped registers instead of cp15. */
1710 if (arm_feature(env, ARM_FEATURE_M))
1711 return 1;
1712
1713 if ((insn & (1 << 25)) == 0) {
1714 if (insn & (1 << 20)) {
1715 /* mrrc */
1716 return 1;
1717 }
1718 /* mcrr. Used for block cache operations, so implement as no-op. */
1719 return 0;
1720 }
1721 if ((insn & (1 << 4)) == 0) {
1722 /* cdp */
1723 return 1;
1724 }
1725 if (IS_USER(s) && !cp15_user_ok(insn)) {
b5ff1b31
FB
1726 return 1;
1727 }
9332f9da
FB
1728 if ((insn & 0x0fff0fff) == 0x0e070f90
1729 || (insn & 0x0fff0fff) == 0x0e070f58) {
1730 /* Wait for interrupt. */
1731 gen_op_movl_T0_im((long)s->pc);
1732 gen_op_movl_reg_TN[0][15]();
9ee6e8bb 1733 s->is_jmp = DISAS_WFI;
9332f9da
FB
1734 return 0;
1735 }
b5ff1b31 1736 rd = (insn >> 12) & 0xf;
18c9b560 1737 if (insn & ARM_CP_RW_BIT) {
b5ff1b31
FB
1738 gen_op_movl_T0_cp15(insn);
1739 /* If the destination register is r15 then sets condition codes. */
1740 if (rd != 15)
1741 gen_movl_reg_T0(s, rd);
1742 } else {
1743 gen_movl_T0_reg(s, rd);
1744 gen_op_movl_cp15_T0(insn);
a90b7318
AZ
1745 /* Normally we would always end the TB here, but Linux
1746 * arch/arm/mach-pxa/sleep.S expects two instructions following
1747 * an MMU enable to execute from cache. Imitate this behaviour. */
1748 if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
1749 (insn & 0x0fff0fff) != 0x0e010f10)
1750 gen_lookup_tb(s);
b5ff1b31 1751 }
b5ff1b31
FB
1752 return 0;
1753}
1754
9ee6e8bb
PB
1755#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
1756#define VFP_SREG(insn, bigbit, smallbit) \
1757 ((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
1758#define VFP_DREG(reg, insn, bigbit, smallbit) do { \
1759 if (arm_feature(env, ARM_FEATURE_VFP3)) { \
1760 reg = (((insn) >> (bigbit)) & 0x0f) \
1761 | (((insn) >> ((smallbit) - 4)) & 0x10); \
1762 } else { \
1763 if (insn & (1 << (smallbit))) \
1764 return 1; \
1765 reg = ((insn) >> (bigbit)) & 0x0f; \
1766 }} while (0)
1767
1768#define VFP_SREG_D(insn) VFP_SREG(insn, 12, 22)
1769#define VFP_DREG_D(reg, insn) VFP_DREG(reg, insn, 12, 22)
1770#define VFP_SREG_N(insn) VFP_SREG(insn, 16, 7)
1771#define VFP_DREG_N(reg, insn) VFP_DREG(reg, insn, 16, 7)
1772#define VFP_SREG_M(insn) VFP_SREG(insn, 0, 5)
1773#define VFP_DREG_M(reg, insn) VFP_DREG(reg, insn, 0, 5)
1774
1775static inline int
1776vfp_enabled(CPUState * env)
1777{
1778 return ((env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)) != 0);
1779}
1780
b7bcbe95
FB
1781/* Disassemble a VFP instruction. Returns nonzero if an error occured
1782 (ie. an undefined instruction). */
1783static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
1784{
1785 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
1786 int dp, veclen;
1787
40f137e1
PB
1788 if (!arm_feature(env, ARM_FEATURE_VFP))
1789 return 1;
1790
9ee6e8bb
PB
1791 if (!vfp_enabled(env)) {
1792 /* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
40f137e1
PB
1793 if ((insn & 0x0fe00fff) != 0x0ee00a10)
1794 return 1;
1795 rn = (insn >> 16) & 0xf;
9ee6e8bb
PB
1796 if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC
1797 && rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0)
40f137e1
PB
1798 return 1;
1799 }
b7bcbe95
FB
1800 dp = ((insn & 0xf00) == 0xb00);
1801 switch ((insn >> 24) & 0xf) {
1802 case 0xe:
1803 if (insn & (1 << 4)) {
1804 /* single register transfer */
b7bcbe95
FB
1805 rd = (insn >> 12) & 0xf;
1806 if (dp) {
9ee6e8bb
PB
1807 int size;
1808 int pass;
1809
1810 VFP_DREG_N(rn, insn);
1811 if (insn & 0xf)
b7bcbe95 1812 return 1;
9ee6e8bb
PB
1813 if (insn & 0x00c00060
1814 && !arm_feature(env, ARM_FEATURE_NEON))
1815 return 1;
1816
1817 pass = (insn >> 21) & 1;
1818 if (insn & (1 << 22)) {
1819 size = 0;
1820 offset = ((insn >> 5) & 3) * 8;
1821 } else if (insn & (1 << 5)) {
1822 size = 1;
1823 offset = (insn & (1 << 6)) ? 16 : 0;
1824 } else {
1825 size = 2;
1826 offset = 0;
1827 }
18c9b560 1828 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 1829 /* vfp->arm */
9ee6e8bb
PB
1830 switch (size) {
1831 case 0:
1832 NEON_GET_REG(T1, rn, pass);
1833 if (offset)
1834 gen_op_shrl_T1_im(offset);
1835 if (insn & (1 << 23))
1836 gen_op_uxtb_T1();
1837 else
1838 gen_op_sxtb_T1();
1839 break;
1840 case 1:
1841 NEON_GET_REG(T1, rn, pass);
1842 if (insn & (1 << 23)) {
1843 if (offset) {
1844 gen_op_shrl_T1_im(16);
1845 } else {
1846 gen_op_uxth_T1();
1847 }
1848 } else {
1849 if (offset) {
1850 gen_op_sarl_T1_im(16);
1851 } else {
1852 gen_op_sxth_T1();
1853 }
1854 }
1855 break;
1856 case 2:
1857 NEON_GET_REG(T1, rn, pass);
1858 break;
1859 }
1860 gen_movl_reg_T1(s, rd);
b7bcbe95
FB
1861 } else {
1862 /* arm->vfp */
9ee6e8bb
PB
1863 gen_movl_T0_reg(s, rd);
1864 if (insn & (1 << 23)) {
1865 /* VDUP */
1866 if (size == 0) {
1867 gen_op_neon_dup_u8(0);
1868 } else if (size == 1) {
1869 gen_op_neon_dup_low16();
1870 }
1871 NEON_SET_REG(T0, rn, 0);
1872 NEON_SET_REG(T0, rn, 1);
1873 } else {
1874 /* VMOV */
1875 switch (size) {
1876 case 0:
1877 NEON_GET_REG(T2, rn, pass);
1878 gen_op_movl_T1_im(0xff);
1879 gen_op_andl_T0_T1();
1880 gen_op_neon_insert_elt(offset, ~(0xff << offset));
1881 NEON_SET_REG(T2, rn, pass);
1882 break;
1883 case 1:
1884 NEON_GET_REG(T2, rn, pass);
1885 gen_op_movl_T1_im(0xffff);
1886 gen_op_andl_T0_T1();
1887 bank_mask = offset ? 0xffff : 0xffff0000;
1888 gen_op_neon_insert_elt(offset, bank_mask);
1889 NEON_SET_REG(T2, rn, pass);
1890 break;
1891 case 2:
1892 NEON_SET_REG(T0, rn, pass);
1893 break;
1894 }
1895 }
b7bcbe95 1896 }
9ee6e8bb
PB
1897 } else { /* !dp */
1898 if ((insn & 0x6f) != 0x00)
1899 return 1;
1900 rn = VFP_SREG_N(insn);
18c9b560 1901 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
1902 /* vfp->arm */
1903 if (insn & (1 << 21)) {
1904 /* system register */
40f137e1 1905 rn >>= 1;
9ee6e8bb 1906
b7bcbe95 1907 switch (rn) {
40f137e1 1908 case ARM_VFP_FPSID:
9ee6e8bb
PB
1909 /* VFP2 allows access for FSID from userspace.
1910 VFP3 restricts all id registers to privileged
1911 accesses. */
1912 if (IS_USER(s)
1913 && arm_feature(env, ARM_FEATURE_VFP3))
1914 return 1;
1915 gen_op_vfp_movl_T0_xreg(rn);
1916 break;
40f137e1 1917 case ARM_VFP_FPEXC:
9ee6e8bb
PB
1918 if (IS_USER(s))
1919 return 1;
1920 gen_op_vfp_movl_T0_xreg(rn);
1921 break;
40f137e1
PB
1922 case ARM_VFP_FPINST:
1923 case ARM_VFP_FPINST2:
9ee6e8bb
PB
1924 /* Not present in VFP3. */
1925 if (IS_USER(s)
1926 || arm_feature(env, ARM_FEATURE_VFP3))
1927 return 1;
40f137e1 1928 gen_op_vfp_movl_T0_xreg(rn);
b7bcbe95 1929 break;
40f137e1 1930 case ARM_VFP_FPSCR:
b7bcbe95
FB
1931 if (rd == 15)
1932 gen_op_vfp_movl_T0_fpscr_flags();
1933 else
1934 gen_op_vfp_movl_T0_fpscr();
1935 break;
9ee6e8bb
PB
1936 case ARM_VFP_MVFR0:
1937 case ARM_VFP_MVFR1:
1938 if (IS_USER(s)
1939 || !arm_feature(env, ARM_FEATURE_VFP3))
1940 return 1;
1941 gen_op_vfp_movl_T0_xreg(rn);
1942 break;
b7bcbe95
FB
1943 default:
1944 return 1;
1945 }
1946 } else {
1947 gen_mov_F0_vreg(0, rn);
1948 gen_op_vfp_mrs();
1949 }
1950 if (rd == 15) {
b5ff1b31
FB
1951 /* Set the 4 flag bits in the CPSR. */
1952 gen_op_movl_cpsr_T0(0xf0000000);
b7bcbe95
FB
1953 } else
1954 gen_movl_reg_T0(s, rd);
1955 } else {
1956 /* arm->vfp */
1957 gen_movl_T0_reg(s, rd);
1958 if (insn & (1 << 21)) {
40f137e1 1959 rn >>= 1;
b7bcbe95
FB
1960 /* system register */
1961 switch (rn) {
40f137e1 1962 case ARM_VFP_FPSID:
9ee6e8bb
PB
1963 case ARM_VFP_MVFR0:
1964 case ARM_VFP_MVFR1:
b7bcbe95
FB
1965 /* Writes are ignored. */
1966 break;
40f137e1 1967 case ARM_VFP_FPSCR:
b7bcbe95 1968 gen_op_vfp_movl_fpscr_T0();
b5ff1b31 1969 gen_lookup_tb(s);
b7bcbe95 1970 break;
40f137e1 1971 case ARM_VFP_FPEXC:
9ee6e8bb
PB
1972 if (IS_USER(s))
1973 return 1;
40f137e1
PB
1974 gen_op_vfp_movl_xreg_T0(rn);
1975 gen_lookup_tb(s);
1976 break;
1977 case ARM_VFP_FPINST:
1978 case ARM_VFP_FPINST2:
1979 gen_op_vfp_movl_xreg_T0(rn);
1980 break;
b7bcbe95
FB
1981 default:
1982 return 1;
1983 }
1984 } else {
1985 gen_op_vfp_msr();
1986 gen_mov_vreg_F0(0, rn);
1987 }
1988 }
1989 }
1990 } else {
1991 /* data processing */
1992 /* The opcode is in bits 23, 21, 20 and 6. */
1993 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
1994 if (dp) {
1995 if (op == 15) {
1996 /* rn is opcode */
1997 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
1998 } else {
1999 /* rn is register number */
9ee6e8bb 2000 VFP_DREG_N(rn, insn);
b7bcbe95
FB
2001 }
2002
2003 if (op == 15 && (rn == 15 || rn > 17)) {
2004 /* Integer or single precision destination. */
9ee6e8bb 2005 rd = VFP_SREG_D(insn);
b7bcbe95 2006 } else {
9ee6e8bb 2007 VFP_DREG_D(rd, insn);
b7bcbe95
FB
2008 }
2009
2010 if (op == 15 && (rn == 16 || rn == 17)) {
2011 /* Integer source. */
2012 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
2013 } else {
9ee6e8bb 2014 VFP_DREG_M(rm, insn);
b7bcbe95
FB
2015 }
2016 } else {
9ee6e8bb 2017 rn = VFP_SREG_N(insn);
b7bcbe95
FB
2018 if (op == 15 && rn == 15) {
2019 /* Double precision destination. */
9ee6e8bb
PB
2020 VFP_DREG_D(rd, insn);
2021 } else {
2022 rd = VFP_SREG_D(insn);
2023 }
2024 rm = VFP_SREG_M(insn);
b7bcbe95
FB
2025 }
2026
2027 veclen = env->vfp.vec_len;
2028 if (op == 15 && rn > 3)
2029 veclen = 0;
2030
2031 /* Shut up compiler warnings. */
2032 delta_m = 0;
2033 delta_d = 0;
2034 bank_mask = 0;
3b46e624 2035
b7bcbe95
FB
2036 if (veclen > 0) {
2037 if (dp)
2038 bank_mask = 0xc;
2039 else
2040 bank_mask = 0x18;
2041
2042 /* Figure out what type of vector operation this is. */
2043 if ((rd & bank_mask) == 0) {
2044 /* scalar */
2045 veclen = 0;
2046 } else {
2047 if (dp)
2048 delta_d = (env->vfp.vec_stride >> 1) + 1;
2049 else
2050 delta_d = env->vfp.vec_stride + 1;
2051
2052 if ((rm & bank_mask) == 0) {
2053 /* mixed scalar/vector */
2054 delta_m = 0;
2055 } else {
2056 /* vector */
2057 delta_m = delta_d;
2058 }
2059 }
2060 }
2061
2062 /* Load the initial operands. */
2063 if (op == 15) {
2064 switch (rn) {
2065 case 16:
2066 case 17:
2067 /* Integer source */
2068 gen_mov_F0_vreg(0, rm);
2069 break;
2070 case 8:
2071 case 9:
2072 /* Compare */
2073 gen_mov_F0_vreg(dp, rd);
2074 gen_mov_F1_vreg(dp, rm);
2075 break;
2076 case 10:
2077 case 11:
2078 /* Compare with zero */
2079 gen_mov_F0_vreg(dp, rd);
2080 gen_vfp_F1_ld0(dp);
2081 break;
9ee6e8bb
PB
2082 case 20:
2083 case 21:
2084 case 22:
2085 case 23:
2086 /* Source and destination the same. */
2087 gen_mov_F0_vreg(dp, rd);
2088 break;
b7bcbe95
FB
2089 default:
2090 /* One source operand. */
2091 gen_mov_F0_vreg(dp, rm);
9ee6e8bb 2092 break;
b7bcbe95
FB
2093 }
2094 } else {
2095 /* Two source operands. */
2096 gen_mov_F0_vreg(dp, rn);
2097 gen_mov_F1_vreg(dp, rm);
2098 }
2099
2100 for (;;) {
2101 /* Perform the calculation. */
2102 switch (op) {
2103 case 0: /* mac: fd + (fn * fm) */
2104 gen_vfp_mul(dp);
2105 gen_mov_F1_vreg(dp, rd);
2106 gen_vfp_add(dp);
2107 break;
2108 case 1: /* nmac: fd - (fn * fm) */
2109 gen_vfp_mul(dp);
2110 gen_vfp_neg(dp);
2111 gen_mov_F1_vreg(dp, rd);
2112 gen_vfp_add(dp);
2113 break;
2114 case 2: /* msc: -fd + (fn * fm) */
2115 gen_vfp_mul(dp);
2116 gen_mov_F1_vreg(dp, rd);
2117 gen_vfp_sub(dp);
2118 break;
2119 case 3: /* nmsc: -fd - (fn * fm) */
2120 gen_vfp_mul(dp);
2121 gen_mov_F1_vreg(dp, rd);
2122 gen_vfp_add(dp);
2123 gen_vfp_neg(dp);
2124 break;
2125 case 4: /* mul: fn * fm */
2126 gen_vfp_mul(dp);
2127 break;
2128 case 5: /* nmul: -(fn * fm) */
2129 gen_vfp_mul(dp);
2130 gen_vfp_neg(dp);
2131 break;
2132 case 6: /* add: fn + fm */
2133 gen_vfp_add(dp);
2134 break;
2135 case 7: /* sub: fn - fm */
2136 gen_vfp_sub(dp);
2137 break;
2138 case 8: /* div: fn / fm */
2139 gen_vfp_div(dp);
2140 break;
9ee6e8bb
PB
2141 case 14: /* fconst */
2142 if (!arm_feature(env, ARM_FEATURE_VFP3))
2143 return 1;
2144
2145 n = (insn << 12) & 0x80000000;
2146 i = ((insn >> 12) & 0x70) | (insn & 0xf);
2147 if (dp) {
2148 if (i & 0x40)
2149 i |= 0x3f80;
2150 else
2151 i |= 0x4000;
2152 n |= i << 16;
2153 } else {
2154 if (i & 0x40)
2155 i |= 0x780;
2156 else
2157 i |= 0x800;
2158 n |= i << 19;
2159 }
2160 gen_vfp_fconst(dp, n);
2161 break;
b7bcbe95
FB
2162 case 15: /* extension space */
2163 switch (rn) {
2164 case 0: /* cpy */
2165 /* no-op */
2166 break;
2167 case 1: /* abs */
2168 gen_vfp_abs(dp);
2169 break;
2170 case 2: /* neg */
2171 gen_vfp_neg(dp);
2172 break;
2173 case 3: /* sqrt */
2174 gen_vfp_sqrt(dp);
2175 break;
2176 case 8: /* cmp */
2177 gen_vfp_cmp(dp);
2178 break;
2179 case 9: /* cmpe */
2180 gen_vfp_cmpe(dp);
2181 break;
2182 case 10: /* cmpz */
2183 gen_vfp_cmp(dp);
2184 break;
2185 case 11: /* cmpez */
2186 gen_vfp_F1_ld0(dp);
2187 gen_vfp_cmpe(dp);
2188 break;
2189 case 15: /* single<->double conversion */
2190 if (dp)
2191 gen_op_vfp_fcvtsd();
2192 else
2193 gen_op_vfp_fcvtds();
2194 break;
2195 case 16: /* fuito */
2196 gen_vfp_uito(dp);
2197 break;
2198 case 17: /* fsito */
2199 gen_vfp_sito(dp);
2200 break;
9ee6e8bb
PB
2201 case 20: /* fshto */
2202 if (!arm_feature(env, ARM_FEATURE_VFP3))
2203 return 1;
2204 gen_vfp_shto(dp, rm);
2205 break;
2206 case 21: /* fslto */
2207 if (!arm_feature(env, ARM_FEATURE_VFP3))
2208 return 1;
2209 gen_vfp_slto(dp, rm);
2210 break;
2211 case 22: /* fuhto */
2212 if (!arm_feature(env, ARM_FEATURE_VFP3))
2213 return 1;
2214 gen_vfp_uhto(dp, rm);
2215 break;
2216 case 23: /* fulto */
2217 if (!arm_feature(env, ARM_FEATURE_VFP3))
2218 return 1;
2219 gen_vfp_ulto(dp, rm);
2220 break;
b7bcbe95
FB
2221 case 24: /* ftoui */
2222 gen_vfp_toui(dp);
2223 break;
2224 case 25: /* ftouiz */
2225 gen_vfp_touiz(dp);
2226 break;
2227 case 26: /* ftosi */
2228 gen_vfp_tosi(dp);
2229 break;
2230 case 27: /* ftosiz */
2231 gen_vfp_tosiz(dp);
2232 break;
9ee6e8bb
PB
2233 case 28: /* ftosh */
2234 if (!arm_feature(env, ARM_FEATURE_VFP3))
2235 return 1;
2236 gen_vfp_tosh(dp, rm);
2237 break;
2238 case 29: /* ftosl */
2239 if (!arm_feature(env, ARM_FEATURE_VFP3))
2240 return 1;
2241 gen_vfp_tosl(dp, rm);
2242 break;
2243 case 30: /* ftouh */
2244 if (!arm_feature(env, ARM_FEATURE_VFP3))
2245 return 1;
2246 gen_vfp_touh(dp, rm);
2247 break;
2248 case 31: /* ftoul */
2249 if (!arm_feature(env, ARM_FEATURE_VFP3))
2250 return 1;
2251 gen_vfp_toul(dp, rm);
2252 break;
b7bcbe95
FB
2253 default: /* undefined */
2254 printf ("rn:%d\n", rn);
2255 return 1;
2256 }
2257 break;
2258 default: /* undefined */
2259 printf ("op:%d\n", op);
2260 return 1;
2261 }
2262
2263 /* Write back the result. */
2264 if (op == 15 && (rn >= 8 && rn <= 11))
2265 ; /* Comparison, do nothing. */
2266 else if (op == 15 && rn > 17)
2267 /* Integer result. */
2268 gen_mov_vreg_F0(0, rd);
2269 else if (op == 15 && rn == 15)
2270 /* conversion */
2271 gen_mov_vreg_F0(!dp, rd);
2272 else
2273 gen_mov_vreg_F0(dp, rd);
2274
2275 /* break out of the loop if we have finished */
2276 if (veclen == 0)
2277 break;
2278
2279 if (op == 15 && delta_m == 0) {
2280 /* single source one-many */
2281 while (veclen--) {
2282 rd = ((rd + delta_d) & (bank_mask - 1))
2283 | (rd & bank_mask);
2284 gen_mov_vreg_F0(dp, rd);
2285 }
2286 break;
2287 }
2288 /* Setup the next operands. */
2289 veclen--;
2290 rd = ((rd + delta_d) & (bank_mask - 1))
2291 | (rd & bank_mask);
2292
2293 if (op == 15) {
2294 /* One source operand. */
2295 rm = ((rm + delta_m) & (bank_mask - 1))
2296 | (rm & bank_mask);
2297 gen_mov_F0_vreg(dp, rm);
2298 } else {
2299 /* Two source operands. */
2300 rn = ((rn + delta_d) & (bank_mask - 1))
2301 | (rn & bank_mask);
2302 gen_mov_F0_vreg(dp, rn);
2303 if (delta_m) {
2304 rm = ((rm + delta_m) & (bank_mask - 1))
2305 | (rm & bank_mask);
2306 gen_mov_F1_vreg(dp, rm);
2307 }
2308 }
2309 }
2310 }
2311 break;
2312 case 0xc:
2313 case 0xd:
9ee6e8bb 2314 if (dp && (insn & 0x03e00000) == 0x00400000) {
b7bcbe95
FB
2315 /* two-register transfer */
2316 rn = (insn >> 16) & 0xf;
2317 rd = (insn >> 12) & 0xf;
2318 if (dp) {
9ee6e8bb
PB
2319 VFP_DREG_M(rm, insn);
2320 } else {
2321 rm = VFP_SREG_M(insn);
2322 }
b7bcbe95 2323
18c9b560 2324 if (insn & ARM_CP_RW_BIT) {
b7bcbe95
FB
2325 /* vfp->arm */
2326 if (dp) {
2327 gen_mov_F0_vreg(1, rm);
2328 gen_op_vfp_mrrd();
2329 gen_movl_reg_T0(s, rd);
2330 gen_movl_reg_T1(s, rn);
2331 } else {
2332 gen_mov_F0_vreg(0, rm);
2333 gen_op_vfp_mrs();
2334 gen_movl_reg_T0(s, rn);
2335 gen_mov_F0_vreg(0, rm + 1);
2336 gen_op_vfp_mrs();
2337 gen_movl_reg_T0(s, rd);
2338 }
2339 } else {
2340 /* arm->vfp */
2341 if (dp) {
2342 gen_movl_T0_reg(s, rd);
2343 gen_movl_T1_reg(s, rn);
2344 gen_op_vfp_mdrr();
2345 gen_mov_vreg_F0(1, rm);
2346 } else {
2347 gen_movl_T0_reg(s, rn);
2348 gen_op_vfp_msr();
2349 gen_mov_vreg_F0(0, rm);
2350 gen_movl_T0_reg(s, rd);
2351 gen_op_vfp_msr();
2352 gen_mov_vreg_F0(0, rm + 1);
2353 }
2354 }
2355 } else {
2356 /* Load/store */
2357 rn = (insn >> 16) & 0xf;
2358 if (dp)
9ee6e8bb 2359 VFP_DREG_D(rd, insn);
b7bcbe95 2360 else
9ee6e8bb
PB
2361 rd = VFP_SREG_D(insn);
2362 if (s->thumb && rn == 15) {
2363 gen_op_movl_T1_im(s->pc & ~2);
2364 } else {
2365 gen_movl_T1_reg(s, rn);
2366 }
b7bcbe95
FB
2367 if ((insn & 0x01200000) == 0x01000000) {
2368 /* Single load/store */
2369 offset = (insn & 0xff) << 2;
2370 if ((insn & (1 << 23)) == 0)
2371 offset = -offset;
2372 gen_op_addl_T1_im(offset);
2373 if (insn & (1 << 20)) {
b5ff1b31 2374 gen_vfp_ld(s, dp);
b7bcbe95
FB
2375 gen_mov_vreg_F0(dp, rd);
2376 } else {
2377 gen_mov_F0_vreg(dp, rd);
b5ff1b31 2378 gen_vfp_st(s, dp);
b7bcbe95
FB
2379 }
2380 } else {
2381 /* load/store multiple */
2382 if (dp)
2383 n = (insn >> 1) & 0x7f;
2384 else
2385 n = insn & 0xff;
2386
2387 if (insn & (1 << 24)) /* pre-decrement */
2388 gen_op_addl_T1_im(-((insn & 0xff) << 2));
2389
2390 if (dp)
2391 offset = 8;
2392 else
2393 offset = 4;
2394 for (i = 0; i < n; i++) {
18c9b560 2395 if (insn & ARM_CP_RW_BIT) {
b7bcbe95 2396 /* load */
b5ff1b31 2397 gen_vfp_ld(s, dp);
b7bcbe95
FB
2398 gen_mov_vreg_F0(dp, rd + i);
2399 } else {
2400 /* store */
2401 gen_mov_F0_vreg(dp, rd + i);
b5ff1b31 2402 gen_vfp_st(s, dp);
b7bcbe95
FB
2403 }
2404 gen_op_addl_T1_im(offset);
2405 }
2406 if (insn & (1 << 21)) {
2407 /* writeback */
2408 if (insn & (1 << 24))
2409 offset = -offset * n;
2410 else if (dp && (insn & 1))
2411 offset = 4;
2412 else
2413 offset = 0;
2414
2415 if (offset != 0)
2416 gen_op_addl_T1_im(offset);
2417 gen_movl_reg_T1(s, rn);
2418 }
2419 }
2420 }
2421 break;
2422 default:
2423 /* Should never happen. */
2424 return 1;
2425 }
2426 return 0;
2427}
2428
6e256c93 2429static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
c53be334 2430{
6e256c93
FB
2431 TranslationBlock *tb;
2432
2433 tb = s->tb;
2434 if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
2435 if (n == 0)
2436 gen_op_goto_tb0(TBPARAM(tb));
2437 else
2438 gen_op_goto_tb1(TBPARAM(tb));
2439 gen_op_movl_T0_im(dest);
2440 gen_op_movl_r15_T0();
2441 gen_op_movl_T0_im((long)tb + n);
2442 gen_op_exit_tb();
2443 } else {
2444 gen_op_movl_T0_im(dest);
2445 gen_op_movl_r15_T0();
2446 gen_op_movl_T0_0();
2447 gen_op_exit_tb();
2448 }
c53be334
FB
2449}
2450
8aaca4c0
FB
2451static inline void gen_jmp (DisasContext *s, uint32_t dest)
2452{
2453 if (__builtin_expect(s->singlestep_enabled, 0)) {
2454 /* An indirect jump so that we still trigger the debug exception. */
5899f386
FB
2455 if (s->thumb)
2456 dest |= 1;
8aaca4c0
FB
2457 gen_op_movl_T0_im(dest);
2458 gen_bx(s);
2459 } else {
6e256c93 2460 gen_goto_tb(s, 0, dest);
8aaca4c0
FB
2461 s->is_jmp = DISAS_TB_JUMP;
2462 }
2463}
2464
b5ff1b31
FB
2465static inline void gen_mulxy(int x, int y)
2466{
ee097184 2467 if (x)
b5ff1b31
FB
2468 gen_op_sarl_T0_im(16);
2469 else
2470 gen_op_sxth_T0();
ee097184 2471 if (y)
b5ff1b31
FB
2472 gen_op_sarl_T1_im(16);
2473 else
2474 gen_op_sxth_T1();
2475 gen_op_mul_T0_T1();
2476}
2477
2478/* Return the mask of PSR bits set by a MSR instruction. */
9ee6e8bb 2479static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
b5ff1b31 2480 uint32_t mask;
9ee6e8bb 2481 uint32_t reserved;
b5ff1b31
FB
2482
2483 mask = 0;
2484 if (flags & (1 << 0))
2485 mask |= 0xff;
2486 if (flags & (1 << 1))
2487 mask |= 0xff00;
2488 if (flags & (1 << 2))
2489 mask |= 0xff0000;
2490 if (flags & (1 << 3))
2491 mask |= 0xff000000;
9ee6e8bb 2492
2ae23e75 2493 /* Mask out undefined bits. */
9ee6e8bb
PB
2494 mask &= ~CPSR_RESERVED;
2495 if (!arm_feature(env, ARM_FEATURE_V6))
2496 reserved &= ~(CPSR_E | CPSR_GE);
2497 if (!arm_feature(env, ARM_FEATURE_THUMB2))
2498 reserved &= ~CPSR_IT;
2499 /* Mask out execution state bits. */
2ae23e75 2500 if (!spsr)
9ee6e8bb 2501 reserved &= ~CPSR_EXEC;
b5ff1b31
FB
2502 /* Mask out privileged bits. */
2503 if (IS_USER(s))
9ee6e8bb 2504 mask &= CPSR_USER;
b5ff1b31
FB
2505 return mask;
2506}
2507
2508/* Returns nonzero if access to the PSR is not permitted. */
2509static int gen_set_psr_T0(DisasContext *s, uint32_t mask, int spsr)
2510{
2511 if (spsr) {
2512 /* ??? This is also undefined in system mode. */
2513 if (IS_USER(s))
2514 return 1;
2515 gen_op_movl_spsr_T0(mask);
2516 } else {
2517 gen_op_movl_cpsr_T0(mask);
2518 }
2519 gen_lookup_tb(s);
2520 return 0;
2521}
2522
9ee6e8bb 2523/* Generate an old-style exception return. */
b5ff1b31
FB
2524static void gen_exception_return(DisasContext *s)
2525{
2526 gen_op_movl_reg_TN[0][15]();
2527 gen_op_movl_T0_spsr();
2528 gen_op_movl_cpsr_T0(0xffffffff);
2529 s->is_jmp = DISAS_UPDATE;
2530}
2531
9ee6e8bb
PB
2532/* Generate a v6 exception return. */
2533static void gen_rfe(DisasContext *s)
2c0262af 2534{
9ee6e8bb
PB
2535 gen_op_movl_cpsr_T0(0xffffffff);
2536 gen_op_movl_T0_T2();
2537 gen_op_movl_reg_TN[0][15]();
2538 s->is_jmp = DISAS_UPDATE;
2539}
3b46e624 2540
9ee6e8bb
PB
2541static inline void
2542gen_set_condexec (DisasContext *s)
2543{
2544 if (s->condexec_mask) {
2545 gen_op_set_condexec((s->condexec_cond << 4) | (s->condexec_mask >> 1));
2546 }
2547}
3b46e624 2548
9ee6e8bb
PB
2549static void gen_nop_hint(DisasContext *s, int val)
2550{
2551 switch (val) {
2552 case 3: /* wfi */
2553 gen_op_movl_T0_im((long)s->pc);
2554 gen_op_movl_reg_TN[0][15]();
2555 s->is_jmp = DISAS_WFI;
2556 break;
2557 case 2: /* wfe */
2558 case 4: /* sev */
2559 /* TODO: Implement SEV and WFE. May help SMP performance. */
2560 default: /* nop */
2561 break;
2562 }
2563}
99c475ab 2564
9ee6e8bb
PB
2565/* Neon shift by constant. The actual ops are the same as used for variable
2566 shifts. [OP][U][SIZE] */
2567static GenOpFunc *gen_neon_shift_im[8][2][4] = {
2568 { /* 0 */ /* VSHR */
2569 {
2570 gen_op_neon_shl_u8,
2571 gen_op_neon_shl_u16,
2572 gen_op_neon_shl_u32,
2573 gen_op_neon_shl_u64
2574 }, {
2575 gen_op_neon_shl_s8,
2576 gen_op_neon_shl_s16,
2577 gen_op_neon_shl_s32,
2578 gen_op_neon_shl_s64
2579 }
2580 }, { /* 1 */ /* VSRA */
2581 {
2582 gen_op_neon_shl_u8,
2583 gen_op_neon_shl_u16,
2584 gen_op_neon_shl_u32,
2585 gen_op_neon_shl_u64
2586 }, {
2587 gen_op_neon_shl_s8,
2588 gen_op_neon_shl_s16,
2589 gen_op_neon_shl_s32,
2590 gen_op_neon_shl_s64
2591 }
2592 }, { /* 2 */ /* VRSHR */
2593 {
2594 gen_op_neon_rshl_u8,
2595 gen_op_neon_rshl_u16,
2596 gen_op_neon_rshl_u32,
2597 gen_op_neon_rshl_u64
2598 }, {
2599 gen_op_neon_rshl_s8,
2600 gen_op_neon_rshl_s16,
2601 gen_op_neon_rshl_s32,
2602 gen_op_neon_rshl_s64
2603 }
2604 }, { /* 3 */ /* VRSRA */
2605 {
2606 gen_op_neon_rshl_u8,
2607 gen_op_neon_rshl_u16,
2608 gen_op_neon_rshl_u32,
2609 gen_op_neon_rshl_u64
2610 }, {
2611 gen_op_neon_rshl_s8,
2612 gen_op_neon_rshl_s16,
2613 gen_op_neon_rshl_s32,
2614 gen_op_neon_rshl_s64
2615 }
2616 }, { /* 4 */
2617 {
2618 NULL, NULL, NULL, NULL
2619 }, { /* VSRI */
2620 gen_op_neon_shl_u8,
2621 gen_op_neon_shl_u16,
2622 gen_op_neon_shl_u32,
2623 gen_op_neon_shl_u64,
2624 }
2625 }, { /* 5 */
2626 { /* VSHL */
2627 gen_op_neon_shl_u8,
2628 gen_op_neon_shl_u16,
2629 gen_op_neon_shl_u32,
2630 gen_op_neon_shl_u64,
2631 }, { /* VSLI */
2632 gen_op_neon_shl_u8,
2633 gen_op_neon_shl_u16,
2634 gen_op_neon_shl_u32,
2635 gen_op_neon_shl_u64,
2636 }
2637 }, { /* 6 */ /* VQSHL */
2638 {
2639 gen_op_neon_qshl_u8,
2640 gen_op_neon_qshl_u16,
2641 gen_op_neon_qshl_u32,
2642 gen_op_neon_qshl_u64
2643 }, {
2644 gen_op_neon_qshl_s8,
2645 gen_op_neon_qshl_s16,
2646 gen_op_neon_qshl_s32,
2647 gen_op_neon_qshl_s64
2648 }
2649 }, { /* 7 */ /* VQSHLU */
2650 {
2651 gen_op_neon_qshl_u8,
2652 gen_op_neon_qshl_u16,
2653 gen_op_neon_qshl_u32,
2654 gen_op_neon_qshl_u64
2655 }, {
2656 gen_op_neon_qshl_u8,
2657 gen_op_neon_qshl_u16,
2658 gen_op_neon_qshl_u32,
2659 gen_op_neon_qshl_u64
2660 }
99c475ab 2661 }
9ee6e8bb
PB
2662};
2663
2664/* [R][U][size - 1] */
2665static GenOpFunc *gen_neon_shift_im_narrow[2][2][3] = {
2666 {
2667 {
2668 gen_op_neon_shl_u16,
2669 gen_op_neon_shl_u32,
2670 gen_op_neon_shl_u64
2671 }, {
2672 gen_op_neon_shl_s16,
2673 gen_op_neon_shl_s32,
2674 gen_op_neon_shl_s64
2675 }
2676 }, {
2677 {
2678 gen_op_neon_rshl_u16,
2679 gen_op_neon_rshl_u32,
2680 gen_op_neon_rshl_u64
2681 }, {
2682 gen_op_neon_rshl_s16,
2683 gen_op_neon_rshl_s32,
2684 gen_op_neon_rshl_s64
2685 }
2c0262af 2686 }
9ee6e8bb 2687};
99c475ab 2688
9ee6e8bb
PB
2689static inline void
2690gen_op_neon_narrow_u32 ()
2691{
2692 /* No-op. */
2693}
2694
2695static GenOpFunc *gen_neon_narrow[3] = {
2696 gen_op_neon_narrow_u8,
2697 gen_op_neon_narrow_u16,
2698 gen_op_neon_narrow_u32
2699};
2700
2701static GenOpFunc *gen_neon_narrow_satu[3] = {
2702 gen_op_neon_narrow_sat_u8,
2703 gen_op_neon_narrow_sat_u16,
2704 gen_op_neon_narrow_sat_u32
2705};
2706
2707static GenOpFunc *gen_neon_narrow_sats[3] = {
2708 gen_op_neon_narrow_sat_s8,
2709 gen_op_neon_narrow_sat_s16,
2710 gen_op_neon_narrow_sat_s32
2711};
2712
2713static inline int gen_neon_add(int size)
2714{
2715 switch (size) {
2716 case 0: gen_op_neon_add_u8(); break;
2717 case 1: gen_op_neon_add_u16(); break;
2718 case 2: gen_op_addl_T0_T1(); break;
2719 default: return 1;
2720 }
2721 return 0;
2722}
2723
2724/* 32-bit pairwise ops end up the same as the elementsise versions. */
2725#define gen_op_neon_pmax_s32 gen_op_neon_max_s32
2726#define gen_op_neon_pmax_u32 gen_op_neon_max_u32
2727#define gen_op_neon_pmin_s32 gen_op_neon_min_s32
2728#define gen_op_neon_pmin_u32 gen_op_neon_min_u32
2729
2730#define GEN_NEON_INTEGER_OP(name) do { \
2731 switch ((size << 1) | u) { \
2732 case 0: gen_op_neon_##name##_s8(); break; \
2733 case 1: gen_op_neon_##name##_u8(); break; \
2734 case 2: gen_op_neon_##name##_s16(); break; \
2735 case 3: gen_op_neon_##name##_u16(); break; \
2736 case 4: gen_op_neon_##name##_s32(); break; \
2737 case 5: gen_op_neon_##name##_u32(); break; \
2738 default: return 1; \
2739 }} while (0)
2740
2741static inline void
2742gen_neon_movl_scratch_T0(int scratch)
2743{
2744 uint32_t offset;
2745
2746 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2747 gen_op_neon_setreg_T0(offset);
2748}
2749
2750static inline void
2751gen_neon_movl_scratch_T1(int scratch)
2752{
2753 uint32_t offset;
2754
2755 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2756 gen_op_neon_setreg_T1(offset);
2757}
2758
2759static inline void
2760gen_neon_movl_T0_scratch(int scratch)
2761{
2762 uint32_t offset;
2763
2764 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2765 gen_op_neon_getreg_T0(offset);
2766}
2767
2768static inline void
2769gen_neon_movl_T1_scratch(int scratch)
2770{
2771 uint32_t offset;
2772
2773 offset = offsetof(CPUARMState, vfp.scratch[scratch]);
2774 gen_op_neon_getreg_T1(offset);
2775}
2776
2777static inline void gen_op_neon_widen_u32(void)
2778{
2779 gen_op_movl_T1_im(0);
2780}
2781
2782static inline void gen_neon_get_scalar(int size, int reg)
2783{
2784 if (size == 1) {
2785 NEON_GET_REG(T0, reg >> 1, reg & 1);
2786 } else {
2787 NEON_GET_REG(T0, reg >> 2, (reg >> 1) & 1);
2788 if (reg & 1)
2789 gen_op_neon_dup_low16();
2790 else
2791 gen_op_neon_dup_high16();
2792 }
2793}
2794
2795static void gen_neon_unzip(int reg, int q, int tmp, int size)
2796{
2797 int n;
2798
2799 for (n = 0; n < q + 1; n += 2) {
2800 NEON_GET_REG(T0, reg, n);
2801 NEON_GET_REG(T0, reg, n + n);
2802 switch (size) {
2803 case 0: gen_op_neon_unzip_u8(); break;
2804 case 1: gen_op_neon_zip_u16(); break; /* zip and unzip are the same. */
2805 case 2: /* no-op */; break;
2806 default: abort();
2807 }
2808 gen_neon_movl_scratch_T0(tmp + n);
2809 gen_neon_movl_scratch_T1(tmp + n + 1);
2810 }
2811}
2812
2813static struct {
2814 int nregs;
2815 int interleave;
2816 int spacing;
2817} neon_ls_element_type[11] = {
2818 {4, 4, 1},
2819 {4, 4, 2},
2820 {4, 1, 1},
2821 {4, 2, 1},
2822 {3, 3, 1},
2823 {3, 3, 2},
2824 {3, 1, 1},
2825 {1, 1, 1},
2826 {2, 2, 1},
2827 {2, 2, 2},
2828 {2, 1, 1}
2829};
2830
2831/* Translate a NEON load/store element instruction. Return nonzero if the
2832 instruction is invalid. */
2833static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
2834{
2835 int rd, rn, rm;
2836 int op;
2837 int nregs;
2838 int interleave;
2839 int stride;
2840 int size;
2841 int reg;
2842 int pass;
2843 int load;
2844 int shift;
2845 uint32_t mask;
2846 int n;
2847
2848 if (!vfp_enabled(env))
2849 return 1;
2850 VFP_DREG_D(rd, insn);
2851 rn = (insn >> 16) & 0xf;
2852 rm = insn & 0xf;
2853 load = (insn & (1 << 21)) != 0;
2854 if ((insn & (1 << 23)) == 0) {
2855 /* Load store all elements. */
2856 op = (insn >> 8) & 0xf;
2857 size = (insn >> 6) & 3;
2858 if (op > 10 || size == 3)
2859 return 1;
2860 nregs = neon_ls_element_type[op].nregs;
2861 interleave = neon_ls_element_type[op].interleave;
2862 gen_movl_T1_reg(s, rn);
2863 stride = (1 << size) * interleave;
2864 for (reg = 0; reg < nregs; reg++) {
2865 if (interleave > 2 || (interleave == 2 && nregs == 2)) {
2866 gen_movl_T1_reg(s, rn);
2867 gen_op_addl_T1_im((1 << size) * reg);
2868 } else if (interleave == 2 && nregs == 4 && reg == 2) {
2869 gen_movl_T1_reg(s, rn);
2870 gen_op_addl_T1_im(1 << size);
2871 }
2872 for (pass = 0; pass < 2; pass++) {
2873 if (size == 2) {
2874 if (load) {
2875 gen_ldst(ldl, s);
2876 NEON_SET_REG(T0, rd, pass);
2877 } else {
2878 NEON_GET_REG(T0, rd, pass);
2879 gen_ldst(stl, s);
2880 }
2881 gen_op_addl_T1_im(stride);
2882 } else if (size == 1) {
2883 if (load) {
2884 gen_ldst(lduw, s);
2885 gen_op_addl_T1_im(stride);
2886 gen_op_movl_T2_T0();
2887 gen_ldst(lduw, s);
2888 gen_op_addl_T1_im(stride);
2889 gen_op_neon_insert_elt(16, 0xffff);
2890 NEON_SET_REG(T2, rd, pass);
2891 } else {
2892 NEON_GET_REG(T2, rd, pass);
2893 gen_op_movl_T0_T2();
2894 gen_ldst(stw, s);
2895 gen_op_addl_T1_im(stride);
2896 gen_op_neon_extract_elt(16, 0xffff0000);
2897 gen_ldst(stw, s);
2898 gen_op_addl_T1_im(stride);
2899 }
2900 } else /* size == 0 */ {
2901 if (load) {
2902 mask = 0xff;
2903 for (n = 0; n < 4; n++) {
2904 gen_ldst(ldub, s);
2905 gen_op_addl_T1_im(stride);
2906 if (n == 0) {
2907 gen_op_movl_T2_T0();
2908 } else {
2909 gen_op_neon_insert_elt(n * 8, ~mask);
2910 }
2911 mask <<= 8;
2912 }
2913 NEON_SET_REG(T2, rd, pass);
2914 } else {
2915 NEON_GET_REG(T2, rd, pass);
2916 mask = 0xff;
2917 for (n = 0; n < 4; n++) {
2918 if (n == 0) {
2919 gen_op_movl_T0_T2();
2920 } else {
2921 gen_op_neon_extract_elt(n * 8, mask);
2922 }
2923 gen_ldst(stb, s);
2924 gen_op_addl_T1_im(stride);
2925 mask <<= 8;
2926 }
2927 }
2928 }
2929 }
2930 rd += neon_ls_element_type[op].spacing;
2931 }
2932 stride = nregs * 8;
2933 } else {
2934 size = (insn >> 10) & 3;
2935 if (size == 3) {
2936 /* Load single element to all lanes. */
2937 if (!load)
2938 return 1;
2939 size = (insn >> 6) & 3;
2940 nregs = ((insn >> 8) & 3) + 1;
2941 stride = (insn & (1 << 5)) ? 2 : 1;
ff8263a9 2942 gen_movl_T1_reg(s, rn);
9ee6e8bb
PB
2943 for (reg = 0; reg < nregs; reg++) {
2944 switch (size) {
2945 case 0:
2946 gen_ldst(ldub, s);
2947 gen_op_neon_dup_u8(0);
2948 break;
2949 case 1:
2950 gen_ldst(lduw, s);
2951 gen_op_neon_dup_low16();
2952 break;
2953 case 2:
2954 gen_ldst(ldl, s);
2955 break;
2956 case 3:
2957 return 1;
99c475ab 2958 }
9ee6e8bb
PB
2959 gen_op_addl_T1_im(1 << size);
2960 NEON_SET_REG(T0, rd, 0);
2961 NEON_SET_REG(T0, rd, 1);
2962 rd += stride;
2963 }
2964 stride = (1 << size) * nregs;
2965 } else {
2966 /* Single element. */
2967 pass = (insn >> 7) & 1;
2968 switch (size) {
2969 case 0:
2970 shift = ((insn >> 5) & 3) * 8;
2971 mask = 0xff << shift;
2972 stride = 1;
2973 break;
2974 case 1:
2975 shift = ((insn >> 6) & 1) * 16;
2976 mask = shift ? 0xffff0000 : 0xffff;
2977 stride = (insn & (1 << 5)) ? 2 : 1;
2978 break;
2979 case 2:
2980 shift = 0;
2981 mask = 0xffffffff;
2982 stride = (insn & (1 << 6)) ? 2 : 1;
2983 break;
2984 default:
2985 abort();
2986 }
2987 nregs = ((insn >> 8) & 3) + 1;
2988 gen_movl_T1_reg(s, rn);
2989 for (reg = 0; reg < nregs; reg++) {
2990 if (load) {
2991 if (size != 2) {
2992 NEON_GET_REG(T2, rd, pass);
2993 }
2994 switch (size) {
2995 case 0:
2996 gen_ldst(ldub, s);
2997 break;
2998 case 1:
2999 gen_ldst(lduw, s);
3000 break;
3001 case 2:
3002 gen_ldst(ldl, s);
3003 NEON_SET_REG(T0, rd, pass);
3004 break;
3005 }
3006 if (size != 2) {
3007 gen_op_neon_insert_elt(shift, ~mask);
3008 NEON_SET_REG(T0, rd, pass);
3009 }
3010 } else { /* Store */
3011 if (size == 2) {
3012 NEON_GET_REG(T0, rd, pass);
3013 } else {
3014 NEON_GET_REG(T2, rd, pass);
3015 gen_op_neon_extract_elt(shift, mask);
3016 }
3017 switch (size) {
3018 case 0:
3019 gen_ldst(stb, s);
3020 break;
3021 case 1:
3022 gen_ldst(stw, s);
3023 break;
3024 case 2:
3025 gen_ldst(stl, s);
3026 break;
99c475ab 3027 }
99c475ab 3028 }
9ee6e8bb
PB
3029 rd += stride;
3030 gen_op_addl_T1_im(1 << size);
99c475ab 3031 }
9ee6e8bb 3032 stride = nregs * (1 << size);
99c475ab 3033 }
9ee6e8bb
PB
3034 }
3035 if (rm != 15) {
3036 gen_movl_T1_reg(s, rn);
3037 if (rm == 13) {
3038 gen_op_addl_T1_im(stride);
3039 } else {
3040 gen_movl_T2_reg(s, rm);
3041 gen_op_addl_T1_T2();
3042 }
3043 gen_movl_reg_T1(s, rn);
3044 }
3045 return 0;
3046}
3b46e624 3047
9ee6e8bb
PB
3048/* Translate a NEON data processing instruction. Return nonzero if the
3049 instruction is invalid.
3050 In general we process vectors in 32-bit chunks. This means we can reuse
3051 some of the scalar ops, and hopefully the code generated for 32-bit
3052 hosts won't be too awful. The downside is that the few 64-bit operations
3053 (mainly shifts) get complicated. */
2c0262af 3054
9ee6e8bb
PB
3055static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
3056{
3057 int op;
3058 int q;
3059 int rd, rn, rm;
3060 int size;
3061 int shift;
3062 int pass;
3063 int count;
3064 int pairwise;
3065 int u;
3066 int n;
3067 uint32_t imm;
3068
3069 if (!vfp_enabled(env))
3070 return 1;
3071 q = (insn & (1 << 6)) != 0;
3072 u = (insn >> 24) & 1;
3073 VFP_DREG_D(rd, insn);
3074 VFP_DREG_N(rn, insn);
3075 VFP_DREG_M(rm, insn);
3076 size = (insn >> 20) & 3;
3077 if ((insn & (1 << 23)) == 0) {
3078 /* Three register same length. */
3079 op = ((insn >> 7) & 0x1e) | ((insn >> 4) & 1);
3080 if (size == 3 && (op == 1 || op == 5 || op == 16)) {
3081 for (pass = 0; pass < (q ? 2 : 1); pass++) {
3082 NEON_GET_REG(T0, rm, pass * 2);
3083 NEON_GET_REG(T1, rm, pass * 2 + 1);
3084 gen_neon_movl_scratch_T0(0);
3085 gen_neon_movl_scratch_T1(1);
3086 NEON_GET_REG(T0, rn, pass * 2);
3087 NEON_GET_REG(T1, rn, pass * 2 + 1);
3088 switch (op) {
3089 case 1: /* VQADD */
3090 if (u) {
3091 gen_op_neon_addl_saturate_u64();
2c0262af 3092 } else {
9ee6e8bb 3093 gen_op_neon_addl_saturate_s64();
2c0262af 3094 }
9ee6e8bb
PB
3095 break;
3096 case 5: /* VQSUB */
3097 if (u) {
3098 gen_op_neon_subl_saturate_u64();
1e8d4eec 3099 } else {
9ee6e8bb 3100 gen_op_neon_subl_saturate_s64();
1e8d4eec 3101 }
9ee6e8bb
PB
3102 break;
3103 case 16:
3104 if (u) {
3105 gen_op_neon_subl_u64();
3106 } else {
3107 gen_op_neon_addl_u64();
3108 }
3109 break;
3110 default:
3111 abort();
2c0262af 3112 }
9ee6e8bb
PB
3113 NEON_SET_REG(T0, rd, pass * 2);
3114 NEON_SET_REG(T1, rd, pass * 2 + 1);
2c0262af 3115 }
9ee6e8bb 3116 return 0;
2c0262af 3117 }
9ee6e8bb
PB
3118 switch (op) {
3119 case 8: /* VSHL */
3120 case 9: /* VQSHL */
3121 case 10: /* VRSHL */
3122 case 11: /* VQSHL */
3123 /* Shift operations have Rn and Rm reversed. */
3124 {
3125 int tmp;
3126 tmp = rn;
3127 rn = rm;
3128 rm = tmp;
3129 pairwise = 0;
3130 }
2c0262af 3131 break;
9ee6e8bb
PB
3132 case 20: /* VPMAX */
3133 case 21: /* VPMIN */
3134 case 23: /* VPADD */
3135 pairwise = 1;
2c0262af 3136 break;
9ee6e8bb
PB
3137 case 26: /* VPADD (float) */
3138 pairwise = (u && size < 2);
2c0262af 3139 break;
9ee6e8bb
PB
3140 case 30: /* VPMIN/VPMAX (float) */
3141 pairwise = u;
2c0262af 3142 break;
9ee6e8bb
PB
3143 default:
3144 pairwise = 0;
2c0262af 3145 break;
9ee6e8bb
PB
3146 }
3147 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3148
3149 if (pairwise) {
3150 /* Pairwise. */
3151 if (q)
3152 n = (pass & 1) * 2;
2c0262af 3153 else
9ee6e8bb
PB
3154 n = 0;
3155 if (pass < q + 1) {
3156 NEON_GET_REG(T0, rn, n);
3157 NEON_GET_REG(T1, rn, n + 1);
3158 } else {
3159 NEON_GET_REG(T0, rm, n);
3160 NEON_GET_REG(T1, rm, n + 1);
3161 }
3162 } else {
3163 /* Elementwise. */
3164 NEON_GET_REG(T0, rn, pass);
3165 NEON_GET_REG(T1, rm, pass);
3166 }
3167 switch (op) {
3168 case 0: /* VHADD */
3169 GEN_NEON_INTEGER_OP(hadd);
3170 break;
3171 case 1: /* VQADD */
3172 switch (size << 1| u) {
3173 case 0: gen_op_neon_qadd_s8(); break;
3174 case 1: gen_op_neon_qadd_u8(); break;
3175 case 2: gen_op_neon_qadd_s16(); break;
3176 case 3: gen_op_neon_qadd_u16(); break;
3177 case 4: gen_op_addl_T0_T1_saturate(); break;
3178 case 5: gen_op_addl_T0_T1_usaturate(); break;
3179 default: abort();
3180 }
2c0262af 3181 break;
9ee6e8bb
PB
3182 case 2: /* VRHADD */
3183 GEN_NEON_INTEGER_OP(rhadd);
2c0262af 3184 break;
9ee6e8bb
PB
3185 case 3: /* Logic ops. */
3186 switch ((u << 2) | size) {
3187 case 0: /* VAND */
2c0262af 3188 gen_op_andl_T0_T1();
9ee6e8bb
PB
3189 break;
3190 case 1: /* BIC */
3191 gen_op_bicl_T0_T1();
3192 break;
3193 case 2: /* VORR */
3194 gen_op_orl_T0_T1();
3195 break;
3196 case 3: /* VORN */
3197 gen_op_notl_T1();
3198 gen_op_orl_T0_T1();
3199 break;
3200 case 4: /* VEOR */
3201 gen_op_xorl_T0_T1();
3202 break;
3203 case 5: /* VBSL */
3204 NEON_GET_REG(T2, rd, pass);
3205 gen_op_neon_bsl();
3206 break;
3207 case 6: /* VBIT */
3208 NEON_GET_REG(T2, rd, pass);
3209 gen_op_neon_bit();
3210 break;
3211 case 7: /* VBIF */
3212 NEON_GET_REG(T2, rd, pass);
3213 gen_op_neon_bif();
3214 break;
2c0262af
FB
3215 }
3216 break;
9ee6e8bb
PB
3217 case 4: /* VHSUB */
3218 GEN_NEON_INTEGER_OP(hsub);
3219 break;
3220 case 5: /* VQSUB */
3221 switch ((size << 1) | u) {
3222 case 0: gen_op_neon_qsub_s8(); break;
3223 case 1: gen_op_neon_qsub_u8(); break;
3224 case 2: gen_op_neon_qsub_s16(); break;
3225 case 3: gen_op_neon_qsub_u16(); break;
3226 case 4: gen_op_subl_T0_T1_saturate(); break;
3227 case 5: gen_op_subl_T0_T1_usaturate(); break;
3228 default: abort();
2c0262af
FB
3229 }
3230 break;
9ee6e8bb
PB
3231 case 6: /* VCGT */
3232 GEN_NEON_INTEGER_OP(cgt);
3233 break;
3234 case 7: /* VCGE */
3235 GEN_NEON_INTEGER_OP(cge);
3236 break;
3237 case 8: /* VSHL */
3238 switch ((size << 1) | u) {
3239 case 0: gen_op_neon_shl_s8(); break;
3240 case 1: gen_op_neon_shl_u8(); break;
3241 case 2: gen_op_neon_shl_s16(); break;
3242 case 3: gen_op_neon_shl_u16(); break;
3243 case 4: gen_op_neon_shl_s32(); break;
3244 case 5: gen_op_neon_shl_u32(); break;
3245#if 0
3246 /* ??? Implementing these is tricky because the vector ops work
3247 on 32-bit pieces. */
3248 case 6: gen_op_neon_shl_s64(); break;
3249 case 7: gen_op_neon_shl_u64(); break;
3250#else
3251 case 6: case 7: cpu_abort(env, "VSHL.64 not implemented");
3252#endif
2c0262af
FB
3253 }
3254 break;
9ee6e8bb
PB
3255 case 9: /* VQSHL */
3256 switch ((size << 1) | u) {
3257 case 0: gen_op_neon_qshl_s8(); break;
3258 case 1: gen_op_neon_qshl_u8(); break;
3259 case 2: gen_op_neon_qshl_s16(); break;
3260 case 3: gen_op_neon_qshl_u16(); break;
3261 case 4: gen_op_neon_qshl_s32(); break;
3262 case 5: gen_op_neon_qshl_u32(); break;
3263#if 0
3264 /* ??? Implementing these is tricky because the vector ops work
3265 on 32-bit pieces. */
3266 case 6: gen_op_neon_qshl_s64(); break;
3267 case 7: gen_op_neon_qshl_u64(); break;
3268#else
3269 case 6: case 7: cpu_abort(env, "VQSHL.64 not implemented");
3270#endif
2c0262af
FB
3271 }
3272 break;
9ee6e8bb
PB
3273 case 10: /* VRSHL */
3274 switch ((size << 1) | u) {
3275 case 0: gen_op_neon_rshl_s8(); break;
3276 case 1: gen_op_neon_rshl_u8(); break;
3277 case 2: gen_op_neon_rshl_s16(); break;
3278 case 3: gen_op_neon_rshl_u16(); break;
3279 case 4: gen_op_neon_rshl_s32(); break;
3280 case 5: gen_op_neon_rshl_u32(); break;
3281#if 0
3282 /* ??? Implementing these is tricky because the vector ops work
3283 on 32-bit pieces. */
3284 case 6: gen_op_neon_rshl_s64(); break;
3285 case 7: gen_op_neon_rshl_u64(); break;
3286#else
3287 case 6: case 7: cpu_abort(env, "VRSHL.64 not implemented");
3288#endif
3289 }
2c0262af 3290 break;
9ee6e8bb
PB
3291 case 11: /* VQRSHL */
3292 switch ((size << 1) | u) {
3293 case 0: gen_op_neon_qrshl_s8(); break;
3294 case 1: gen_op_neon_qrshl_u8(); break;
3295 case 2: gen_op_neon_qrshl_s16(); break;
3296 case 3: gen_op_neon_qrshl_u16(); break;
3297 case 4: gen_op_neon_qrshl_s32(); break;
3298 case 5: gen_op_neon_qrshl_u32(); break;
3299#if 0
3300 /* ??? Implementing these is tricky because the vector ops work
3301 on 32-bit pieces. */
3302 case 6: gen_op_neon_qrshl_s64(); break;
3303 case 7: gen_op_neon_qrshl_u64(); break;
3304#else
3305 case 6: case 7: cpu_abort(env, "VQRSHL.64 not implemented");
3306#endif
3307 }
3308 break;
3309 case 12: /* VMAX */
3310 GEN_NEON_INTEGER_OP(max);
3311 break;
3312 case 13: /* VMIN */
3313 GEN_NEON_INTEGER_OP(min);
3314 break;
3315 case 14: /* VABD */
3316 GEN_NEON_INTEGER_OP(abd);
3317 break;
3318 case 15: /* VABA */
3319 GEN_NEON_INTEGER_OP(abd);
3320 NEON_GET_REG(T1, rd, pass);
3321 gen_neon_add(size);
3322 break;
3323 case 16:
3324 if (!u) { /* VADD */
3325 if (gen_neon_add(size))
3326 return 1;
3327 } else { /* VSUB */
3328 switch (size) {
3329 case 0: gen_op_neon_sub_u8(); break;
3330 case 1: gen_op_neon_sub_u16(); break;
3331 case 2: gen_op_subl_T0_T1(); break;
3332 default: return 1;
3333 }
3334 }
3335 break;
3336 case 17:
3337 if (!u) { /* VTST */
3338 switch (size) {
3339 case 0: gen_op_neon_tst_u8(); break;
3340 case 1: gen_op_neon_tst_u16(); break;
3341 case 2: gen_op_neon_tst_u32(); break;
3342 default: return 1;
3343 }
3344 } else { /* VCEQ */
3345 switch (size) {
3346 case 0: gen_op_neon_ceq_u8(); break;
3347 case 1: gen_op_neon_ceq_u16(); break;
3348 case 2: gen_op_neon_ceq_u32(); break;
3349 default: return 1;
3350 }
3351 }
3352 break;
3353 case 18: /* Multiply. */
3354 switch (size) {
3355 case 0: gen_op_neon_mul_u8(); break;
3356 case 1: gen_op_neon_mul_u16(); break;
3357 case 2: gen_op_mul_T0_T1(); break;
3358 default: return 1;
3359 }
3360 NEON_GET_REG(T1, rd, pass);
3361 if (u) { /* VMLS */
3362 switch (size) {
3363 case 0: gen_op_neon_rsb_u8(); break;
3364 case 1: gen_op_neon_rsb_u16(); break;
3365 case 2: gen_op_rsbl_T0_T1(); break;
3366 default: return 1;
3367 }
3368 } else { /* VMLA */
3369 gen_neon_add(size);
3370 }
3371 break;
3372 case 19: /* VMUL */
3373 if (u) { /* polynomial */
3374 gen_op_neon_mul_p8();
3375 } else { /* Integer */
3376 switch (size) {
3377 case 0: gen_op_neon_mul_u8(); break;
3378 case 1: gen_op_neon_mul_u16(); break;
3379 case 2: gen_op_mul_T0_T1(); break;
3380 default: return 1;
3381 }
3382 }
3383 break;
3384 case 20: /* VPMAX */
3385 GEN_NEON_INTEGER_OP(pmax);
3386 break;
3387 case 21: /* VPMIN */
3388 GEN_NEON_INTEGER_OP(pmin);
3389 break;
3390 case 22: /* Hultiply high. */
3391 if (!u) { /* VQDMULH */
3392 switch (size) {
3393 case 1: gen_op_neon_qdmulh_s16(); break;
3394 case 2: gen_op_neon_qdmulh_s32(); break;
3395 default: return 1;
3396 }
3397 } else { /* VQRDHMUL */
3398 switch (size) {
3399 case 1: gen_op_neon_qrdmulh_s16(); break;
3400 case 2: gen_op_neon_qrdmulh_s32(); break;
3401 default: return 1;
3402 }
3403 }
3404 break;
3405 case 23: /* VPADD */
3406 if (u)
3407 return 1;
3408 switch (size) {
3409 case 0: gen_op_neon_padd_u8(); break;
3410 case 1: gen_op_neon_padd_u16(); break;
3411 case 2: gen_op_addl_T0_T1(); break;
3412 default: return 1;
3413 }
3414 break;
3415 case 26: /* Floating point arithnetic. */
3416 switch ((u << 2) | size) {
3417 case 0: /* VADD */
3418 gen_op_neon_add_f32();
3419 break;
3420 case 2: /* VSUB */
3421 gen_op_neon_sub_f32();
3422 break;
3423 case 4: /* VPADD */
3424 gen_op_neon_add_f32();
3425 break;
3426 case 6: /* VABD */
3427 gen_op_neon_abd_f32();
3428 break;
3429 default:
3430 return 1;
3431 }
3432 break;
3433 case 27: /* Float multiply. */
3434 gen_op_neon_mul_f32();
3435 if (!u) {
3436 NEON_GET_REG(T1, rd, pass);
3437 if (size == 0) {
3438 gen_op_neon_add_f32();
3439 } else {
3440 gen_op_neon_rsb_f32();
3441 }
3442 }
3443 break;
3444 case 28: /* Float compare. */
3445 if (!u) {
3446 gen_op_neon_ceq_f32();
b5ff1b31 3447 } else {
9ee6e8bb
PB
3448 if (size == 0)
3449 gen_op_neon_cge_f32();
3450 else
3451 gen_op_neon_cgt_f32();
b5ff1b31 3452 }
2c0262af 3453 break;
9ee6e8bb
PB
3454 case 29: /* Float compare absolute. */
3455 if (!u)
3456 return 1;
3457 if (size == 0)
3458 gen_op_neon_acge_f32();
3459 else
3460 gen_op_neon_acgt_f32();
2c0262af 3461 break;
9ee6e8bb
PB
3462 case 30: /* Float min/max. */
3463 if (size == 0)
3464 gen_op_neon_max_f32();
3465 else
3466 gen_op_neon_min_f32();
3467 break;
3468 case 31:
3469 if (size == 0)
3470 gen_op_neon_recps_f32();
3471 else
3472 gen_op_neon_rsqrts_f32();
2c0262af 3473 break;
9ee6e8bb
PB
3474 default:
3475 abort();
2c0262af 3476 }
9ee6e8bb
PB
3477 /* Save the result. For elementwise operations we can put it
3478 straight into the destination register. For pairwise operations
3479 we have to be careful to avoid clobbering the source operands. */
3480 if (pairwise && rd == rm) {
3481 gen_neon_movl_scratch_T0(pass);
3482 } else {
3483 NEON_SET_REG(T0, rd, pass);
3484 }
3485
3486 } /* for pass */
3487 if (pairwise && rd == rm) {
3488 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3489 gen_neon_movl_T0_scratch(pass);
3490 NEON_SET_REG(T0, rd, pass);
3491 }
3492 }
3493 } else if (insn & (1 << 4)) {
3494 if ((insn & 0x00380080) != 0) {
3495 /* Two registers and shift. */
3496 op = (insn >> 8) & 0xf;
3497 if (insn & (1 << 7)) {
3498 /* 64-bit shift. */
3499 size = 3;
3500 } else {
3501 size = 2;
3502 while ((insn & (1 << (size + 19))) == 0)
3503 size--;
3504 }
3505 shift = (insn >> 16) & ((1 << (3 + size)) - 1);
3506 /* To avoid excessive dumplication of ops we implement shift
3507 by immediate using the variable shift operations. */
3508 if (op < 8) {
3509 /* Shift by immediate:
3510 VSHR, VSRA, VRSHR, VRSRA, VSRI, VSHL, VQSHL, VQSHLU. */
3511 /* Right shifts are encoded as N - shift, where N is the
3512 element size in bits. */
3513 if (op <= 4)
3514 shift = shift - (1 << (size + 3));
3515 else
3516 shift++;
3517 if (size == 3) {
3518 count = q + 1;
3519 } else {
3520 count = q ? 4: 2;
3521 }
3522 switch (size) {
3523 case 0:
3524 imm = (uint8_t) shift;
3525 imm |= imm << 8;
3526 imm |= imm << 16;
3527 break;
3528 case 1:
3529 imm = (uint16_t) shift;
3530 imm |= imm << 16;
3531 break;
3532 case 2:
3533 case 3:
3534 imm = shift;
3535 break;
3536 default:
3537 abort();
3538 }
3539
3540 for (pass = 0; pass < count; pass++) {
3541 if (size < 3) {
3542 /* Operands in T0 and T1. */
3543 gen_op_movl_T1_im(imm);
3544 NEON_GET_REG(T0, rm, pass);
2c0262af 3545 } else {
9ee6e8bb
PB
3546 /* Operands in {T0, T1} and env->vfp.scratch. */
3547 gen_op_movl_T0_im(imm);
3548 gen_neon_movl_scratch_T0(0);
3549 gen_op_movl_T0_im((int32_t)imm >> 31);
3550 gen_neon_movl_scratch_T0(1);
3551 NEON_GET_REG(T0, rm, pass * 2);
3552 NEON_GET_REG(T1, rm, pass * 2 + 1);
3553 }
3554
3555 if (gen_neon_shift_im[op][u][size] == NULL)
3556 return 1;
3557 gen_neon_shift_im[op][u][size]();
3558
3559 if (op == 1 || op == 3) {
3560 /* Accumulate. */
3561 if (size == 3) {
3562 gen_neon_movl_scratch_T0(0);
3563 gen_neon_movl_scratch_T1(1);
3564 NEON_GET_REG(T0, rd, pass * 2);
3565 NEON_GET_REG(T1, rd, pass * 2 + 1);
3566 gen_op_neon_addl_u64();
3567 } else {
3568 NEON_GET_REG(T1, rd, pass);
3569 gen_neon_add(size);
99c475ab 3570 }
9ee6e8bb
PB
3571 } else if (op == 4 || (op == 5 && u)) {
3572 /* Insert */
3573 if (size == 3) {
3574 cpu_abort(env, "VS[LR]I.64 not implemented");
3575 }
3576 switch (size) {
3577 case 0:
3578 if (op == 4)
3579 imm = 0xff >> -shift;
3580 else
3581 imm = (uint8_t)(0xff << shift);
3582 imm |= imm << 8;
3583 imm |= imm << 16;
3584 break;
3585 case 1:
3586 if (op == 4)
3587 imm = 0xffff >> -shift;
3588 else
3589 imm = (uint16_t)(0xffff << shift);
3590 imm |= imm << 16;
3591 break;
3592 case 2:
3593 if (op == 4)
3594 imm = 0xffffffffu >> -shift;
3595 else
3596 imm = 0xffffffffu << shift;
3597 break;
3598 default:
3599 abort();
3600 }
3601 NEON_GET_REG(T1, rd, pass);
3602 gen_op_movl_T2_im(imm);
3603 gen_op_neon_bsl();
2c0262af 3604 }
9ee6e8bb
PB
3605 if (size == 3) {
3606 NEON_SET_REG(T0, rd, pass * 2);
3607 NEON_SET_REG(T1, rd, pass * 2 + 1);
3608 } else {
3609 NEON_SET_REG(T0, rd, pass);
3610 }
3611 } /* for pass */
3612 } else if (op < 10) {
3613 /* Shift by immedaiate and narrow:
3614 VSHRN, VRSHRN, VQSHRN, VQRSHRN. */
3615 shift = shift - (1 << (size + 3));
3616 size++;
3617 if (size == 3) {
3618 count = q + 1;
2c0262af 3619 } else {
9ee6e8bb
PB
3620 count = q ? 4: 2;
3621 }
3622 switch (size) {
3623 case 1:
3624 imm = (uint16_t) shift;
3625 imm |= imm << 16;
3626 break;
3627 case 2:
3628 case 3:
3629 imm = shift;
3630 break;
3631 default:
3632 abort();
3633 }
3634
3635 /* Processing MSB first means we need to do less shuffling at
3636 the end. */
3637 for (pass = count - 1; pass >= 0; pass--) {
3638 /* Avoid clobbering the second operand before it has been
3639 written. */
3640 n = pass;
3641 if (rd == rm)
3642 n ^= (count - 1);
3643 else
3644 n = pass;
3645
3646 if (size < 3) {
3647 /* Operands in T0 and T1. */
3648 gen_op_movl_T1_im(imm);
3649 NEON_GET_REG(T0, rm, n);
2c0262af 3650 } else {
9ee6e8bb
PB
3651 /* Operands in {T0, T1} and env->vfp.scratch. */
3652 gen_op_movl_T0_im(imm);
3653 gen_neon_movl_scratch_T0(0);
3654 gen_op_movl_T0_im((int32_t)imm >> 31);
3655 gen_neon_movl_scratch_T0(1);
3656 NEON_GET_REG(T0, rm, n * 2);
3657 NEON_GET_REG(T0, rm, n * 2 + 1);
3658 }
3b46e624 3659
9ee6e8bb
PB
3660 gen_neon_shift_im_narrow[q][u][size - 1]();
3661
3662 if (size < 3 && (pass & 1) == 0) {
3663 gen_neon_movl_scratch_T0(0);
3664 } else {
3665 uint32_t offset;
3666
3667 if (size < 3)
3668 gen_neon_movl_T1_scratch(0);
3669
3670 if (op == 8 && !u) {
3671 gen_neon_narrow[size - 1]();
99c475ab 3672 } else {
9ee6e8bb
PB
3673 if (op == 8)
3674 gen_neon_narrow_sats[size - 2]();
3675 else
3676 gen_neon_narrow_satu[size - 1]();
99c475ab 3677 }
9ee6e8bb
PB
3678 if (size == 3)
3679 offset = neon_reg_offset(rd, n);
3680 else
3681 offset = neon_reg_offset(rd, n >> 1);
3682 gen_op_neon_setreg_T0(offset);
3683 }
3684 } /* for pass */
3685 } else if (op == 10) {
3686 /* VSHLL */
3687 if (q)
3688 return 1;
3689 for (pass = 0; pass < 2; pass++) {
3690 /* Avoid clobbering the input operand. */
3691 if (rd == rm)
3692 n = 1 - pass;
3693 else
3694 n = pass;
3695
3696 NEON_GET_REG(T0, rm, n);
3697 GEN_NEON_INTEGER_OP(widen);
3698 if (shift != 0) {
3699 /* The shift is less than the width of the source
3700 type, so in some cases we can just
3701 shift the whole register. */
3702 if (size == 1 || (size == 0 && u)) {
3703 gen_op_shll_T0_im(shift);
3704 gen_op_shll_T1_im(shift);
3705 } else {
3706 switch (size) {
3707 case 0: gen_op_neon_shll_u16(shift); break;
3708 case 2: gen_op_neon_shll_u64(shift); break;
3709 default: abort();
3710 }
3711 }
3712 }
3713 NEON_SET_REG(T0, rd, n * 2);
3714 NEON_SET_REG(T1, rd, n * 2 + 1);
3715 }
3716 } else if (op == 15 || op == 16) {
3717 /* VCVT fixed-point. */
3718 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3719 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
3720 if (op & 1) {
3721 if (u)
3722 gen_op_vfp_ultos(shift);
3723 else
3724 gen_op_vfp_sltos(shift);
3725 } else {
3726 if (u)
3727 gen_op_vfp_touls(shift);
3728 else
3729 gen_op_vfp_tosls(shift);
2c0262af 3730 }
9ee6e8bb 3731 gen_op_vfp_setreg_F0s(neon_reg_offset(rd, pass));
2c0262af
FB
3732 }
3733 } else {
9ee6e8bb
PB
3734 return 1;
3735 }
3736 } else { /* (insn & 0x00380080) == 0 */
3737 int invert;
3738
3739 op = (insn >> 8) & 0xf;
3740 /* One register and immediate. */
3741 imm = (u << 7) | ((insn >> 12) & 0x70) | (insn & 0xf);
3742 invert = (insn & (1 << 5)) != 0;
3743 switch (op) {
3744 case 0: case 1:
3745 /* no-op */
3746 break;
3747 case 2: case 3:
3748 imm <<= 8;
3749 break;
3750 case 4: case 5:
3751 imm <<= 16;
3752 break;
3753 case 6: case 7:
3754 imm <<= 24;
3755 break;
3756 case 8: case 9:
3757 imm |= imm << 16;
3758 break;
3759 case 10: case 11:
3760 imm = (imm << 8) | (imm << 24);
3761 break;
3762 case 12:
3763 imm = (imm < 8) | 0xff;
3764 break;
3765 case 13:
3766 imm = (imm << 16) | 0xffff;
3767 break;
3768 case 14:
3769 imm |= (imm << 8) | (imm << 16) | (imm << 24);
3770 if (invert)
3771 imm = ~imm;
3772 break;
3773 case 15:
3774 imm = ((imm & 0x80) << 24) | ((imm & 0x3f) << 19)
3775 | ((imm & 0x40) ? (0x1f << 25) : (1 << 30));
3776 break;
3777 }
3778 if (invert)
3779 imm = ~imm;
3780
3781 if (op != 14 || !invert)
3782 gen_op_movl_T1_im(imm);
3783
3784 for (pass = 0; pass < (q ? 4 : 2); pass++) {
3785 if (op & 1 && op < 12) {
3786 NEON_GET_REG(T0, rd, pass);
3787 if (invert) {
3788 /* The immediate value has already been inverted, so
3789 BIC becomes AND. */
3790 gen_op_andl_T0_T1();
3791 } else {
3792 gen_op_orl_T0_T1();
3793 }
3794 NEON_SET_REG(T0, rd, pass);
3795 } else {
3796 if (op == 14 && invert) {
3797 uint32_t tmp;
3798 tmp = 0;
3799 for (n = 0; n < 4; n++) {
3800 if (imm & (1 << (n + (pass & 1) * 4)))
3801 tmp |= 0xff << (n * 8);
3802 }
3803 gen_op_movl_T1_im(tmp);
3804 }
3805 /* VMOV, VMVN. */
3806 NEON_SET_REG(T1, rd, pass);
3807 }
3808 }
3809 }
3810 } else { /* (insn & 0x00800010 == 0x00800010) */
3811 if (size != 3) {
3812 op = (insn >> 8) & 0xf;
3813 if ((insn & (1 << 6)) == 0) {
3814 /* Three registers of different lengths. */
3815 int src1_wide;
3816 int src2_wide;
3817 int prewiden;
3818 /* prewiden, src1_wide, src2_wide */
3819 static const int neon_3reg_wide[16][3] = {
3820 {1, 0, 0}, /* VADDL */
3821 {1, 1, 0}, /* VADDW */
3822 {1, 0, 0}, /* VSUBL */
3823 {1, 1, 0}, /* VSUBW */
3824 {0, 1, 1}, /* VADDHN */
3825 {0, 0, 0}, /* VABAL */
3826 {0, 1, 1}, /* VSUBHN */
3827 {0, 0, 0}, /* VABDL */
3828 {0, 0, 0}, /* VMLAL */
3829 {0, 0, 0}, /* VQDMLAL */
3830 {0, 0, 0}, /* VMLSL */
3831 {0, 0, 0}, /* VQDMLSL */
3832 {0, 0, 0}, /* Integer VMULL */
3833 {0, 0, 0}, /* VQDMULL */
3834 {0, 0, 0} /* Polynomial VMULL */
3835 };
3836
3837 prewiden = neon_3reg_wide[op][0];
3838 src1_wide = neon_3reg_wide[op][1];
3839 src2_wide = neon_3reg_wide[op][2];
3840
3841 /* Avoid overlapping operands. Wide source operands are
3842 always aligned so will never overlap with wide
3843 destinations in problematic ways. */
3844 if (rd == rm) {
3845 NEON_GET_REG(T2, rm, 1);
3846 } else if (rd == rn) {
3847 NEON_GET_REG(T2, rn, 1);
3848 }
3849 for (pass = 0; pass < 2; pass++) {
3850 /* Load the second operand into env->vfp.scratch.
3851 Also widen narrow operands. */
3852 if (pass == 1 && rd == rm) {
3853 if (prewiden) {
3854 gen_op_movl_T0_T2();
3855 } else {
3856 gen_op_movl_T1_T2();
3857 }
3858 } else {
3859 if (src2_wide) {
3860 NEON_GET_REG(T0, rm, pass * 2);
3861 NEON_GET_REG(T1, rm, pass * 2 + 1);
3862 } else {
3863 if (prewiden) {
3864 NEON_GET_REG(T0, rm, pass);
3865 } else {
3866 NEON_GET_REG(T1, rm, pass);
3867 }
3868 }
3869 }
3870 if (prewiden && !src2_wide) {
3871 GEN_NEON_INTEGER_OP(widen);
3872 }
3873 if (prewiden || src2_wide) {
3874 gen_neon_movl_scratch_T0(0);
3875 gen_neon_movl_scratch_T1(1);
3876 }
3877
3878 /* Load the first operand. */
3879 if (pass == 1 && rd == rn) {
3880 gen_op_movl_T0_T2();
3881 } else {
3882 if (src1_wide) {
3883 NEON_GET_REG(T0, rn, pass * 2);
3884 NEON_GET_REG(T1, rn, pass * 2 + 1);
3885 } else {
3886 NEON_GET_REG(T0, rn, pass);
3887 }
3888 }
3889 if (prewiden && !src1_wide) {
3890 GEN_NEON_INTEGER_OP(widen);
3891 }
3892 switch (op) {
3893 case 0: case 1: case 4: /* VADDL, VADDW, VADDHN, VRADDHN */
3894 switch (size) {
3895 case 0: gen_op_neon_addl_u16(); break;
3896 case 1: gen_op_neon_addl_u32(); break;
3897 case 2: gen_op_neon_addl_u64(); break;
3898 default: abort();
3899 }
3900 break;
3901 case 2: case 3: case 6: /* VSUBL, VSUBW, VSUBHL, VRSUBHL */
3902 switch (size) {
3903 case 0: gen_op_neon_subl_u16(); break;
3904 case 1: gen_op_neon_subl_u32(); break;
3905 case 2: gen_op_neon_subl_u64(); break;
3906 default: abort();
3907 }
3908 break;
3909 case 5: case 7: /* VABAL, VABDL */
3910 switch ((size << 1) | u) {
3911 case 0: gen_op_neon_abdl_s16(); break;
3912 case 1: gen_op_neon_abdl_u16(); break;
3913 case 2: gen_op_neon_abdl_s32(); break;
3914 case 3: gen_op_neon_abdl_u32(); break;
3915 case 4: gen_op_neon_abdl_s64(); break;
3916 case 5: gen_op_neon_abdl_u64(); break;
3917 default: abort();
3918 }
3919 break;
3920 case 8: case 9: case 10: case 11: case 12: case 13:
3921 /* VMLAL, VQDMLAL, VMLSL, VQDMLSL, VMULL, VQDMULL */
3922 switch ((size << 1) | u) {
3923 case 0: gen_op_neon_mull_s8(); break;
3924 case 1: gen_op_neon_mull_u8(); break;
3925 case 2: gen_op_neon_mull_s16(); break;
3926 case 3: gen_op_neon_mull_u16(); break;
3927 case 4: gen_op_imull_T0_T1(); break;
3928 case 5: gen_op_mull_T0_T1(); break;
3929 default: abort();
3930 }
3931 break;
3932 case 14: /* Polynomial VMULL */
3933 cpu_abort(env, "Polynomial VMULL not implemented");
3934
3935 default: /* 15 is RESERVED. */
3936 return 1;
3937 }
3938 if (op == 5 || op == 13 || (op >= 8 && op <= 11)) {
3939 /* Accumulate. */
3940 if (op == 10 || op == 11) {
3941 switch (size) {
3942 case 0: gen_op_neon_negl_u16(); break;
3943 case 1: gen_op_neon_negl_u32(); break;
3944 case 2: gen_op_neon_negl_u64(); break;
3945 default: abort();
3946 }
3947 }
3948
3949 gen_neon_movl_scratch_T0(0);
3950 gen_neon_movl_scratch_T1(1);
3951
3952 if (op != 13) {
3953 NEON_GET_REG(T0, rd, pass * 2);
3954 NEON_GET_REG(T1, rd, pass * 2 + 1);
3955 }
3956
3957 switch (op) {
3958 case 5: case 8: case 10: /* VABAL, VMLAL, VMLSL */
3959 switch (size) {
3960 case 0: gen_op_neon_addl_u16(); break;
3961 case 1: gen_op_neon_addl_u32(); break;
3962 case 2: gen_op_neon_addl_u64(); break;
3963 default: abort();
3964 }
3965 break;
3966 case 9: case 11: /* VQDMLAL, VQDMLSL */
3967 switch (size) {
3968 case 1: gen_op_neon_addl_saturate_s32(); break;
3969 case 2: gen_op_neon_addl_saturate_s64(); break;
3970 default: abort();
3971 }
3972 /* Fall through. */
3973 case 13: /* VQDMULL */
3974 switch (size) {
3975 case 1: gen_op_neon_addl_saturate_s32(); break;
3976 case 2: gen_op_neon_addl_saturate_s64(); break;
3977 default: abort();
3978 }
3979 break;
3980 default:
3981 abort();
3982 }
3983 NEON_SET_REG(T0, rd, pass * 2);
3984 NEON_SET_REG(T1, rd, pass * 2 + 1);
3985 } else if (op == 4 || op == 6) {
3986 /* Narrowing operation. */
3987 if (u) {
3988 switch (size) {
3989 case 0: gen_op_neon_narrow_high_u8(); break;
3990 case 1: gen_op_neon_narrow_high_u16(); break;
3991 case 2: gen_op_movl_T0_T1(); break;
3992 default: abort();
3993 }
3994 } else {
3995 switch (size) {
3996 case 0: gen_op_neon_narrow_high_round_u8(); break;
3997 case 1: gen_op_neon_narrow_high_round_u16(); break;
3998 case 2: gen_op_neon_narrow_high_round_u32(); break;
3999 default: abort();
4000 }
4001 }
4002 NEON_SET_REG(T0, rd, pass);
4003 } else {
4004 /* Write back the result. */
4005 NEON_SET_REG(T0, rd, pass * 2);
4006 NEON_SET_REG(T1, rd, pass * 2 + 1);
4007 }
4008 }
4009 } else {
4010 /* Two registers and a scalar. */
4011 switch (op) {
4012 case 0: /* Integer VMLA scalar */
4013 case 1: /* Float VMLA scalar */
4014 case 4: /* Integer VMLS scalar */
4015 case 5: /* Floating point VMLS scalar */
4016 case 8: /* Integer VMUL scalar */
4017 case 9: /* Floating point VMUL scalar */
4018 case 12: /* VQDMULH scalar */
4019 case 13: /* VQRDMULH scalar */
4020 gen_neon_get_scalar(size, rm);
4021 gen_op_movl_T2_T0();
4022 for (pass = 0; pass < (u ? 4 : 2); pass++) {
4023 if (pass != 0)
4024 gen_op_movl_T0_T2();
4025 NEON_GET_REG(T1, rn, pass);
4026 if (op == 12) {
4027 if (size == 1) {
4028 gen_op_neon_qdmulh_s16();
4029 } else {
4030 gen_op_neon_qdmulh_s32();
4031 }
4032 } else if (op == 13) {
4033 if (size == 1) {
4034 gen_op_neon_qrdmulh_s16();
4035 } else {
4036 gen_op_neon_qrdmulh_s32();
4037 }
4038 } else if (op & 1) {
4039 gen_op_neon_mul_f32();
4040 } else {
4041 switch (size) {
4042 case 0: gen_op_neon_mul_u8(); break;
4043 case 1: gen_op_neon_mul_u16(); break;
4044 case 2: gen_op_mul_T0_T1(); break;
4045 default: return 1;
4046 }
4047 }
4048 if (op < 8) {
4049 /* Accumulate. */
4050 NEON_GET_REG(T1, rd, pass);
4051 switch (op) {
4052 case 0:
4053 gen_neon_add(size);
4054 break;
4055 case 1:
4056 gen_op_neon_add_f32();
4057 break;
4058 case 4:
4059 switch (size) {
4060 case 0: gen_op_neon_rsb_u8(); break;
4061 case 1: gen_op_neon_rsb_u16(); break;
4062 case 2: gen_op_rsbl_T0_T1(); break;
4063 default: return 1;
4064 }
4065 break;
4066 case 5:
4067 gen_op_neon_rsb_f32();
4068 break;
4069 default:
4070 abort();
4071 }
4072 }
4073 NEON_SET_REG(T0, rd, pass);
4074 }
4075 break;
4076 case 2: /* VMLAL sclar */
4077 case 3: /* VQDMLAL scalar */
4078 case 6: /* VMLSL scalar */
4079 case 7: /* VQDMLSL scalar */
4080 case 10: /* VMULL scalar */
4081 case 11: /* VQDMULL scalar */
4082 if (rd == rn) {
4083 /* Save overlapping operands before they are
4084 clobbered. */
4085 NEON_GET_REG(T0, rn, 1);
4086 gen_neon_movl_scratch_T0(2);
4087 }
4088 gen_neon_get_scalar(size, rm);
4089 gen_op_movl_T2_T0();
4090 for (pass = 0; pass < 2; pass++) {
4091 if (pass != 0) {
4092 gen_op_movl_T0_T2();
4093 }
4094 if (pass != 0 && rd == rn) {
4095 gen_neon_movl_T1_scratch(2);
4096 } else {
4097 NEON_GET_REG(T1, rn, pass);
4098 }
4099 switch ((size << 1) | u) {
4100 case 0: gen_op_neon_mull_s8(); break;
4101 case 1: gen_op_neon_mull_u8(); break;
4102 case 2: gen_op_neon_mull_s16(); break;
4103 case 3: gen_op_neon_mull_u16(); break;
4104 case 4: gen_op_imull_T0_T1(); break;
4105 case 5: gen_op_mull_T0_T1(); break;
4106 default: abort();
4107 }
4108 if (op == 6 || op == 7) {
4109 switch (size) {
4110 case 0: gen_op_neon_negl_u16(); break;
4111 case 1: gen_op_neon_negl_u32(); break;
4112 case 2: gen_op_neon_negl_u64(); break;
4113 default: abort();
4114 }
4115 }
4116 gen_neon_movl_scratch_T0(0);
4117 gen_neon_movl_scratch_T1(1);
4118 NEON_GET_REG(T0, rd, pass * 2);
4119 NEON_GET_REG(T1, rd, pass * 2 + 1);
4120 switch (op) {
4121 case 2: case 6:
4122 switch (size) {
4123 case 0: gen_op_neon_addl_u16(); break;
4124 case 1: gen_op_neon_addl_u32(); break;
4125 case 2: gen_op_neon_addl_u64(); break;
4126 default: abort();
4127 }
4128 break;
4129 case 3: case 7:
4130 switch (size) {
4131 case 1:
4132 gen_op_neon_addl_saturate_s32();
4133 gen_op_neon_addl_saturate_s32();
4134 break;
4135 case 2:
4136 gen_op_neon_addl_saturate_s64();
4137 gen_op_neon_addl_saturate_s64();
4138 break;
4139 default: abort();
4140 }
4141 break;
4142 case 10:
4143 /* no-op */
4144 break;
4145 case 11:
4146 switch (size) {
4147 case 1: gen_op_neon_addl_saturate_s32(); break;
4148 case 2: gen_op_neon_addl_saturate_s64(); break;
4149 default: abort();
4150 }
4151 break;
4152 default:
4153 abort();
4154 }
4155 NEON_SET_REG(T0, rd, pass * 2);
4156 NEON_SET_REG(T1, rd, pass * 2 + 1);
4157 }
4158 break;
4159 default: /* 14 and 15 are RESERVED */
4160 return 1;
4161 }
4162 }
4163 } else { /* size == 3 */
4164 if (!u) {
4165 /* Extract. */
4166 int reg;
4167 imm = (insn >> 8) & 0xf;
4168 reg = rn;
4169 count = q ? 4 : 2;
4170 n = imm >> 2;
4171 NEON_GET_REG(T0, reg, n);
4172 for (pass = 0; pass < count; pass++) {
4173 n++;
4174 if (n > count) {
4175 reg = rm;
4176 n -= count;
4177 }
4178 if (imm & 3) {
4179 NEON_GET_REG(T1, reg, n);
4180 gen_op_neon_extract((insn << 3) & 0x1f);
4181 }
4182 /* ??? This is broken if rd and rm overlap */
4183 NEON_SET_REG(T0, rd, pass);
4184 if (imm & 3) {
4185 gen_op_movl_T0_T1();
4186 } else {
4187 NEON_GET_REG(T0, reg, n);
4188 }
4189 }
4190 } else if ((insn & (1 << 11)) == 0) {
4191 /* Two register misc. */
4192 op = ((insn >> 12) & 0x30) | ((insn >> 7) & 0xf);
4193 size = (insn >> 18) & 3;
4194 switch (op) {
4195 case 0: /* VREV64 */
4196 if (size == 3)
4197 return 1;
4198 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4199 NEON_GET_REG(T0, rm, pass * 2);
4200 NEON_GET_REG(T1, rm, pass * 2 + 1);
4201 switch (size) {
4202 case 0: gen_op_rev_T0(); break;
4203 case 1: gen_op_revh_T0(); break;
4204 case 2: /* no-op */ break;
4205 default: abort();
4206 }
4207 NEON_SET_REG(T0, rd, pass * 2 + 1);
4208 if (size == 2) {
4209 NEON_SET_REG(T1, rd, pass * 2);
4210 } else {
4211 gen_op_movl_T0_T1();
4212 switch (size) {
4213 case 0: gen_op_rev_T0(); break;
4214 case 1: gen_op_revh_T0(); break;
4215 default: abort();
4216 }
4217 NEON_SET_REG(T0, rd, pass * 2);
4218 }
4219 }
4220 break;
4221 case 4: case 5: /* VPADDL */
4222 case 12: case 13: /* VPADAL */
4223 if (size < 2)
4224 goto elementwise;
4225 if (size == 3)
4226 return 1;
4227 for (pass = 0; pass < (q ? 2 : 1); pass++) {
4228 NEON_GET_REG(T0, rm, pass * 2);
4229 NEON_GET_REG(T1, rm, pass * 2 + 1);
4230 if (op & 1)
4231 gen_op_neon_paddl_u32();
4232 else
4233 gen_op_neon_paddl_s32();
4234 if (op >= 12) {
4235 /* Accumulate. */
4236 gen_neon_movl_scratch_T0(0);
4237 gen_neon_movl_scratch_T1(1);
4238
4239 NEON_GET_REG(T0, rd, pass * 2);
4240 NEON_GET_REG(T1, rd, pass * 2 + 1);
4241 gen_op_neon_addl_u64();
4242 }
4243 NEON_SET_REG(T0, rd, pass * 2);
4244 NEON_SET_REG(T1, rd, pass * 2 + 1);
4245 }
4246 break;
4247 case 33: /* VTRN */
4248 if (size == 2) {
4249 for (n = 0; n < (q ? 4 : 2); n += 2) {
4250 NEON_GET_REG(T0, rm, n);
4251 NEON_GET_REG(T1, rd, n + 1);
4252 NEON_SET_REG(T1, rm, n);
4253 NEON_SET_REG(T0, rd, n + 1);
4254 }
4255 } else {
4256 goto elementwise;
4257 }
4258 break;
4259 case 34: /* VUZP */
4260 /* Reg Before After
4261 Rd A3 A2 A1 A0 B2 B0 A2 A0
4262 Rm B3 B2 B1 B0 B3 B1 A3 A1
4263 */
4264 if (size == 3)
4265 return 1;
4266 gen_neon_unzip(rd, q, 0, size);
4267 gen_neon_unzip(rm, q, 4, size);
4268 if (q) {
4269 static int unzip_order_q[8] =
4270 {0, 2, 4, 6, 1, 3, 5, 7};
4271 for (n = 0; n < 8; n++) {
4272 int reg = (n < 4) ? rd : rm;
4273 gen_neon_movl_T0_scratch(unzip_order_q[n]);
4274 NEON_SET_REG(T0, reg, n % 4);
4275 }
4276 } else {
4277 static int unzip_order[4] =
4278 {0, 4, 1, 5};
4279 for (n = 0; n < 4; n++) {
4280 int reg = (n < 2) ? rd : rm;
4281 gen_neon_movl_T0_scratch(unzip_order[n]);
4282 NEON_SET_REG(T0, reg, n % 2);
4283 }
4284 }
4285 break;
4286 case 35: /* VZIP */
4287 /* Reg Before After
4288 Rd A3 A2 A1 A0 B1 A1 B0 A0
4289 Rm B3 B2 B1 B0 B3 A3 B2 A2
4290 */
4291 if (size == 3)
4292 return 1;
4293 count = (q ? 4 : 2);
4294 for (n = 0; n < count; n++) {
4295 NEON_GET_REG(T0, rd, n);
4296 NEON_GET_REG(T1, rd, n);
4297 switch (size) {
4298 case 0: gen_op_neon_zip_u8(); break;
4299 case 1: gen_op_neon_zip_u16(); break;
4300 case 2: /* no-op */; break;
4301 default: abort();
4302 }
4303 gen_neon_movl_scratch_T0(n * 2);
4304 gen_neon_movl_scratch_T1(n * 2 + 1);
4305 }
4306 for (n = 0; n < count * 2; n++) {
4307 int reg = (n < count) ? rd : rm;
4308 gen_neon_movl_T0_scratch(n);
4309 NEON_SET_REG(T0, reg, n % count);
4310 }
4311 break;
4312 case 36: case 37: /* VMOVN, VQMOVUN, VQMOVN */
4313 for (pass = 0; pass < 2; pass++) {
4314 if (rd == rm + 1) {
4315 n = 1 - pass;
4316 } else {
4317 n = pass;
4318 }
4319 NEON_GET_REG(T0, rm, n * 2);
4320 NEON_GET_REG(T1, rm, n * 2 + 1);
4321 if (op == 36 && q == 0) {
4322 switch (size) {
4323 case 0: gen_op_neon_narrow_u8(); break;
4324 case 1: gen_op_neon_narrow_u16(); break;
4325 case 2: /* no-op */ break;
4326 default: return 1;
4327 }
4328 } else if (q) {
4329 switch (size) {
4330 case 0: gen_op_neon_narrow_sat_u8(); break;
4331 case 1: gen_op_neon_narrow_sat_u16(); break;
4332 case 2: gen_op_neon_narrow_sat_u32(); break;
4333 default: return 1;
4334 }
4335 } else {
4336 switch (size) {
4337 case 0: gen_op_neon_narrow_sat_s8(); break;
4338 case 1: gen_op_neon_narrow_sat_s16(); break;
4339 case 2: gen_op_neon_narrow_sat_s32(); break;
4340 default: return 1;
4341 }
4342 }
4343 NEON_SET_REG(T0, rd, n);
4344 }
4345 break;
4346 case 38: /* VSHLL */
4347 if (q)
4348 return 1;
4349 if (rm == rd) {
4350 NEON_GET_REG(T2, rm, 1);
4351 }
4352 for (pass = 0; pass < 2; pass++) {
4353 if (pass == 1 && rm == rd) {
4354 gen_op_movl_T0_T2();
4355 } else {
4356 NEON_GET_REG(T0, rm, pass);
4357 }
4358 switch (size) {
4359 case 0: gen_op_neon_widen_high_u8(); break;
4360 case 1: gen_op_neon_widen_high_u16(); break;
4361 case 2:
4362 gen_op_movl_T1_T0();
4363 gen_op_movl_T0_im(0);
4364 break;
4365 default: return 1;
4366 }
4367 NEON_SET_REG(T0, rd, pass * 2);
4368 NEON_SET_REG(T1, rd, pass * 2 + 1);
4369 }
4370 break;
4371 default:
4372 elementwise:
4373 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4374 if (op == 30 || op == 31 || op >= 58) {
4375 gen_op_vfp_getreg_F0s(neon_reg_offset(rm, pass));
4376 } else {
4377 NEON_GET_REG(T0, rm, pass);
4378 }
4379 switch (op) {
4380 case 1: /* VREV32 */
4381 switch (size) {
4382 case 0: gen_op_rev_T0(); break;
4383 case 1: gen_op_revh_T0(); break;
4384 default: return 1;
4385 }
4386 break;
4387 case 2: /* VREV16 */
4388 if (size != 0)
4389 return 1;
4390 gen_op_rev16_T0();
4391 break;
4392 case 4: case 5: /* VPADDL */
4393 case 12: case 13: /* VPADAL */
4394 switch ((size << 1) | (op & 1)) {
4395 case 0: gen_op_neon_paddl_s8(); break;
4396 case 1: gen_op_neon_paddl_u8(); break;
4397 case 2: gen_op_neon_paddl_s16(); break;
4398 case 3: gen_op_neon_paddl_u16(); break;
4399 default: abort();
4400 }
4401 if (op >= 12) {
4402 /* Accumulate */
4403 NEON_GET_REG(T1, rd, pass);
4404 switch (size) {
4405 case 0: gen_op_neon_add_u16(); break;
4406 case 1: gen_op_addl_T0_T1(); break;
4407 default: abort();
4408 }
4409 }
4410 break;
4411 case 8: /* CLS */
4412 switch (size) {
4413 case 0: gen_op_neon_cls_s8(); break;
4414 case 1: gen_op_neon_cls_s16(); break;
4415 case 2: gen_op_neon_cls_s32(); break;
4416 default: return 1;
4417 }
4418 break;
4419 case 9: /* CLZ */
4420 switch (size) {
4421 case 0: gen_op_neon_clz_u8(); break;
4422 case 1: gen_op_neon_clz_u16(); break;
4423 case 2: gen_op_clz_T0(); break;
4424 default: return 1;
4425 }
4426 break;
4427 case 10: /* CNT */
4428 if (size != 0)
4429 return 1;
4430 gen_op_neon_cnt_u8();
4431 break;
4432 case 11: /* VNOT */
4433 if (size != 0)
4434 return 1;
4435 gen_op_notl_T0();
4436 break;
4437 case 14: /* VQABS */
4438 switch (size) {
4439 case 0: gen_op_neon_qabs_s8(); break;
4440 case 1: gen_op_neon_qabs_s16(); break;
4441 case 2: gen_op_neon_qabs_s32(); break;
4442 default: return 1;
4443 }
4444 break;
4445 case 15: /* VQNEG */
4446 switch (size) {
4447 case 0: gen_op_neon_qneg_s8(); break;
4448 case 1: gen_op_neon_qneg_s16(); break;
4449 case 2: gen_op_neon_qneg_s32(); break;
4450 default: return 1;
4451 }
4452 break;
4453 case 16: case 19: /* VCGT #0, VCLE #0 */
4454 gen_op_movl_T1_im(0);
4455 switch(size) {
4456 case 0: gen_op_neon_cgt_s8(); break;
4457 case 1: gen_op_neon_cgt_s16(); break;
4458 case 2: gen_op_neon_cgt_s32(); break;
4459 default: return 1;
4460 }
4461 if (op == 19)
4462 gen_op_notl_T0();
4463 break;
4464 case 17: case 20: /* VCGE #0, VCLT #0 */
4465 gen_op_movl_T1_im(0);
4466 switch(size) {
4467 case 0: gen_op_neon_cge_s8(); break;
4468 case 1: gen_op_neon_cge_s16(); break;
4469 case 2: gen_op_neon_cge_s32(); break;
4470 default: return 1;
4471 }
4472 if (op == 20)
4473 gen_op_notl_T0();
4474 break;
4475 case 18: /* VCEQ #0 */
4476 gen_op_movl_T1_im(0);
4477 switch(size) {
4478 case 0: gen_op_neon_ceq_u8(); break;
4479 case 1: gen_op_neon_ceq_u16(); break;
4480 case 2: gen_op_neon_ceq_u32(); break;
4481 default: return 1;
4482 }
4483 break;
4484 case 22: /* VABS */
4485 switch(size) {
4486 case 0: gen_op_neon_abs_s8(); break;
4487 case 1: gen_op_neon_abs_s16(); break;
4488 case 2: gen_op_neon_abs_s32(); break;
4489 default: return 1;
4490 }
4491 break;
4492 case 23: /* VNEG */
4493 gen_op_movl_T1_im(0);
4494 switch(size) {
4495 case 0: gen_op_neon_rsb_u8(); break;
4496 case 1: gen_op_neon_rsb_u16(); break;
4497 case 2: gen_op_rsbl_T0_T1(); break;
4498 default: return 1;
4499 }
4500 break;
4501 case 24: case 27: /* Float VCGT #0, Float VCLE #0 */
4502 gen_op_movl_T1_im(0);
4503 gen_op_neon_cgt_f32();
4504 if (op == 27)
4505 gen_op_notl_T0();
4506 break;
4507 case 25: case 28: /* Float VCGE #0, Float VCLT #0 */
4508 gen_op_movl_T1_im(0);
4509 gen_op_neon_cge_f32();
4510 if (op == 28)
4511 gen_op_notl_T0();
4512 break;
4513 case 26: /* Float VCEQ #0 */
4514 gen_op_movl_T1_im(0);
4515 gen_op_neon_ceq_f32();
4516 break;
4517 case 30: /* Float VABS */
4518 gen_op_vfp_abss();
4519 break;
4520 case 31: /* Float VNEG */
4521 gen_op_vfp_negs();
4522 break;
4523 case 32: /* VSWP */
4524 NEON_GET_REG(T1, rd, pass);
4525 NEON_SET_REG(T1, rm, pass);
4526 break;
4527 case 33: /* VTRN */
4528 NEON_GET_REG(T1, rd, pass);
4529 switch (size) {
4530 case 0: gen_op_neon_trn_u8(); break;
4531 case 1: gen_op_neon_trn_u16(); break;
4532 case 2: abort();
4533 default: return 1;
4534 }
4535 NEON_SET_REG(T1, rm, pass);
4536 break;
4537 case 56: /* Integer VRECPE */
4538 gen_op_neon_recpe_u32();
4539 break;
4540 case 57: /* Integer VRSQRTE */
4541 gen_op_neon_rsqrte_u32();
4542 break;
4543 case 58: /* Float VRECPE */
4544 gen_op_neon_recpe_f32();
4545 break;
4546 case 59: /* Float VRSQRTE */
4547 gen_op_neon_rsqrte_f32();
4548 break;
4549 case 60: /* VCVT.F32.S32 */
4550 gen_op_vfp_tosizs();
4551 break;
4552 case 61: /* VCVT.F32.U32 */
4553 gen_op_vfp_touizs();
4554 break;
4555 case 62: /* VCVT.S32.F32 */
4556 gen_op_vfp_sitos();
4557 break;
4558 case 63: /* VCVT.U32.F32 */
4559 gen_op_vfp_uitos();
4560 break;
4561 default:
4562 /* Reserved: 21, 29, 39-56 */
4563 return 1;
4564 }
4565 if (op == 30 || op == 31 || op >= 58) {
4566 gen_op_vfp_setreg_F0s(neon_reg_offset(rm, pass));
4567 } else {
4568 NEON_SET_REG(T0, rd, pass);
4569 }
4570 }
4571 break;
4572 }
4573 } else if ((insn & (1 << 10)) == 0) {
4574 /* VTBL, VTBX. */
4575 n = (insn >> 5) & 0x18;
4576 NEON_GET_REG(T1, rm, 0);
4577 if (insn & (1 << 6)) {
4578 NEON_GET_REG(T0, rd, 0);
4579 } else {
4580 gen_op_movl_T0_im(0);
4581 }
4582 gen_op_neon_tbl(rn, n);
4583 gen_op_movl_T2_T0();
4584 NEON_GET_REG(T1, rm, 1);
4585 if (insn & (1 << 6)) {
4586 NEON_GET_REG(T0, rd, 0);
4587 } else {
4588 gen_op_movl_T0_im(0);
4589 }
4590 gen_op_neon_tbl(rn, n);
4591 NEON_SET_REG(T2, rd, 0);
4592 NEON_SET_REG(T0, rd, 1);
4593 } else if ((insn & 0x380) == 0) {
4594 /* VDUP */
4595 if (insn & (1 << 19)) {
4596 NEON_SET_REG(T0, rm, 1);
4597 } else {
4598 NEON_SET_REG(T0, rm, 0);
4599 }
4600 if (insn & (1 << 16)) {
4601 gen_op_neon_dup_u8(((insn >> 17) & 3) * 8);
4602 } else if (insn & (1 << 17)) {
4603 if ((insn >> 18) & 1)
4604 gen_op_neon_dup_high16();
4605 else
4606 gen_op_neon_dup_low16();
4607 }
4608 for (pass = 0; pass < (q ? 4 : 2); pass++) {
4609 NEON_SET_REG(T0, rd, pass);
4610 }
4611 } else {
4612 return 1;
4613 }
4614 }
4615 }
4616 return 0;
4617}
4618
4619static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
4620{
4621 int cpnum;
4622
4623 cpnum = (insn >> 8) & 0xf;
4624 if (arm_feature(env, ARM_FEATURE_XSCALE)
4625 && ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
4626 return 1;
4627
4628 switch (cpnum) {
4629 case 0:
4630 case 1:
4631 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4632 return disas_iwmmxt_insn(env, s, insn);
4633 } else if (arm_feature(env, ARM_FEATURE_XSCALE)) {
4634 return disas_dsp_insn(env, s, insn);
4635 }
4636 return 1;
4637 case 10:
4638 case 11:
4639 return disas_vfp_insn (env, s, insn);
4640 case 15:
4641 return disas_cp15_insn (env, s, insn);
4642 default:
4643 /* Unknown coprocessor. See if the board has hooked it. */
4644 return disas_cp_insn (env, s, insn);
4645 }
4646}
4647
4648static void disas_arm_insn(CPUState * env, DisasContext *s)
4649{
4650 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
4651
4652 insn = ldl_code(s->pc);
4653 s->pc += 4;
4654
4655 /* M variants do not implement ARM mode. */
4656 if (IS_M(env))
4657 goto illegal_op;
4658 cond = insn >> 28;
4659 if (cond == 0xf){
4660 /* Unconditional instructions. */
4661 if (((insn >> 25) & 7) == 1) {
4662 /* NEON Data processing. */
4663 if (!arm_feature(env, ARM_FEATURE_NEON))
4664 goto illegal_op;
4665
4666 if (disas_neon_data_insn(env, s, insn))
4667 goto illegal_op;
4668 return;
4669 }
4670 if ((insn & 0x0f100000) == 0x04000000) {
4671 /* NEON load/store. */
4672 if (!arm_feature(env, ARM_FEATURE_NEON))
4673 goto illegal_op;
4674
4675 if (disas_neon_ls_insn(env, s, insn))
4676 goto illegal_op;
4677 return;
4678 }
4679 if ((insn & 0x0d70f000) == 0x0550f000)
4680 return; /* PLD */
4681 else if ((insn & 0x0ffffdff) == 0x01010000) {
4682 ARCH(6);
4683 /* setend */
4684 if (insn & (1 << 9)) {
4685 /* BE8 mode not implemented. */
4686 goto illegal_op;
4687 }
4688 return;
4689 } else if ((insn & 0x0fffff00) == 0x057ff000) {
4690 switch ((insn >> 4) & 0xf) {
4691 case 1: /* clrex */
4692 ARCH(6K);
4693 gen_op_clrex();
4694 return;
4695 case 4: /* dsb */
4696 case 5: /* dmb */
4697 case 6: /* isb */
4698 ARCH(7);
4699 /* We don't emulate caches so these are a no-op. */
4700 return;
4701 default:
4702 goto illegal_op;
4703 }
4704 } else if ((insn & 0x0e5fffe0) == 0x084d0500) {
4705 /* srs */
4706 uint32_t offset;
4707 if (IS_USER(s))
4708 goto illegal_op;
4709 ARCH(6);
4710 op1 = (insn & 0x1f);
4711 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4712 gen_movl_T1_reg(s, 13);
4713 } else {
4714 gen_op_movl_T1_r13_banked(op1);
4715 }
4716 i = (insn >> 23) & 3;
4717 switch (i) {
4718 case 0: offset = -4; break; /* DA */
4719 case 1: offset = -8; break; /* DB */
4720 case 2: offset = 0; break; /* IA */
4721 case 3: offset = 4; break; /* IB */
4722 default: abort();
4723 }
4724 if (offset)
4725 gen_op_addl_T1_im(offset);
4726 gen_movl_T0_reg(s, 14);
4727 gen_ldst(stl, s);
4728 gen_op_movl_T0_cpsr();
4729 gen_op_addl_T1_im(4);
4730 gen_ldst(stl, s);
4731 if (insn & (1 << 21)) {
4732 /* Base writeback. */
4733 switch (i) {
4734 case 0: offset = -8; break;
4735 case 1: offset = -4; break;
4736 case 2: offset = 4; break;
4737 case 3: offset = 0; break;
4738 default: abort();
4739 }
4740 if (offset)
4741 gen_op_addl_T1_im(offset);
4742 if (op1 == (env->uncached_cpsr & CPSR_M)) {
4743 gen_movl_reg_T1(s, 13);
4744 } else {
4745 gen_op_movl_r13_T1_banked(op1);
4746 }
4747 }
4748 } else if ((insn & 0x0e5fffe0) == 0x081d0a00) {
4749 /* rfe */
4750 uint32_t offset;
4751 if (IS_USER(s))
4752 goto illegal_op;
4753 ARCH(6);
4754 rn = (insn >> 16) & 0xf;
4755 gen_movl_T1_reg(s, rn);
4756 i = (insn >> 23) & 3;
4757 switch (i) {
4758 case 0: offset = 0; break; /* DA */
4759 case 1: offset = -4; break; /* DB */
4760 case 2: offset = 4; break; /* IA */
4761 case 3: offset = 8; break; /* IB */
4762 default: abort();
4763 }
4764 if (offset)
4765 gen_op_addl_T1_im(offset);
4766 /* Load CPSR into T2 and PC into T0. */
4767 gen_ldst(ldl, s);
4768 gen_op_movl_T2_T0();
4769 gen_op_addl_T1_im(-4);
4770 gen_ldst(ldl, s);
4771 if (insn & (1 << 21)) {
4772 /* Base writeback. */
4773 switch (i) {
4774 case 0: offset = -4; break;
4775 case 1: offset = 0; break;
4776 case 2: offset = 8; break;
4777 case 3: offset = 4; break;
4778 default: abort();
4779 }
4780 if (offset)
4781 gen_op_addl_T1_im(offset);
4782 gen_movl_reg_T1(s, rn);
4783 }
4784 gen_rfe(s);
4785 } else if ((insn & 0x0e000000) == 0x0a000000) {
4786 /* branch link and change to thumb (blx <offset>) */
4787 int32_t offset;
4788
4789 val = (uint32_t)s->pc;
4790 gen_op_movl_T0_im(val);
4791 gen_movl_reg_T0(s, 14);
4792 /* Sign-extend the 24-bit offset */
4793 offset = (((int32_t)insn) << 8) >> 8;
4794 /* offset * 4 + bit24 * 2 + (thumb bit) */
4795 val += (offset << 2) | ((insn >> 23) & 2) | 1;
4796 /* pipeline offset */
4797 val += 4;
4798 gen_op_movl_T0_im(val);
4799 gen_bx(s);
4800 return;
4801 } else if ((insn & 0x0e000f00) == 0x0c000100) {
4802 if (arm_feature(env, ARM_FEATURE_IWMMXT)) {
4803 /* iWMMXt register transfer. */
4804 if (env->cp15.c15_cpar & (1 << 1))
4805 if (!disas_iwmmxt_insn(env, s, insn))
4806 return;
4807 }
4808 } else if ((insn & 0x0fe00000) == 0x0c400000) {
4809 /* Coprocessor double register transfer. */
4810 } else if ((insn & 0x0f000010) == 0x0e000010) {
4811 /* Additional coprocessor register transfer. */
4812 } else if ((insn & 0x0ff10010) == 0x01000000) {
4813 uint32_t mask;
4814 uint32_t val;
4815 /* cps (privileged) */
4816 if (IS_USER(s))
4817 return;
4818 mask = val = 0;
4819 if (insn & (1 << 19)) {
4820 if (insn & (1 << 8))
4821 mask |= CPSR_A;
4822 if (insn & (1 << 7))
4823 mask |= CPSR_I;
4824 if (insn & (1 << 6))
4825 mask |= CPSR_F;
4826 if (insn & (1 << 18))
4827 val |= mask;
4828 }
4829 if (insn & (1 << 14)) {
4830 mask |= CPSR_M;
4831 val |= (insn & 0x1f);
4832 }
4833 if (mask) {
4834 gen_op_movl_T0_im(val);
4835 gen_set_psr_T0(s, mask, 0);
4836 }
4837 return;
4838 }
4839 goto illegal_op;
4840 }
4841 if (cond != 0xe) {
4842 /* if not always execute, we generate a conditional jump to
4843 next instruction */
4844 s->condlabel = gen_new_label();
4845 gen_test_cc[cond ^ 1](s->condlabel);
4846 s->condjmp = 1;
4847 }
4848 if ((insn & 0x0f900000) == 0x03000000) {
4849 if ((insn & (1 << 21)) == 0) {
4850 ARCH(6T2);
4851 rd = (insn >> 12) & 0xf;
4852 val = ((insn >> 4) & 0xf000) | (insn & 0xfff);
4853 if ((insn & (1 << 22)) == 0) {
4854 /* MOVW */
4855 gen_op_movl_T0_im(val);
4856 } else {
4857 /* MOVT */
4858 gen_movl_T0_reg(s, rd);
4859 gen_op_movl_T1_im(0xffff);
4860 gen_op_andl_T0_T1();
4861 gen_op_movl_T1_im(val << 16);
4862 gen_op_orl_T0_T1();
4863 }
4864 gen_movl_reg_T0(s, rd);
4865 } else {
4866 if (((insn >> 12) & 0xf) != 0xf)
4867 goto illegal_op;
4868 if (((insn >> 16) & 0xf) == 0) {
4869 gen_nop_hint(s, insn & 0xff);
4870 } else {
4871 /* CPSR = immediate */
4872 val = insn & 0xff;
4873 shift = ((insn >> 8) & 0xf) * 2;
4874 if (shift)
4875 val = (val >> shift) | (val << (32 - shift));
4876 gen_op_movl_T0_im(val);
4877 i = ((insn & (1 << 22)) != 0);
4878 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4879 goto illegal_op;
4880 }
4881 }
4882 } else if ((insn & 0x0f900000) == 0x01000000
4883 && (insn & 0x00000090) != 0x00000090) {
4884 /* miscellaneous instructions */
4885 op1 = (insn >> 21) & 3;
4886 sh = (insn >> 4) & 0xf;
4887 rm = insn & 0xf;
4888 switch (sh) {
4889 case 0x0: /* move program status register */
4890 if (op1 & 1) {
4891 /* PSR = reg */
4892 gen_movl_T0_reg(s, rm);
4893 i = ((op1 & 2) != 0);
4894 if (gen_set_psr_T0(s, msr_mask(env, s, (insn >> 16) & 0xf, i), i))
4895 goto illegal_op;
4896 } else {
4897 /* reg = PSR */
4898 rd = (insn >> 12) & 0xf;
4899 if (op1 & 2) {
4900 if (IS_USER(s))
4901 goto illegal_op;
4902 gen_op_movl_T0_spsr();
4903 } else {
4904 gen_op_movl_T0_cpsr();
4905 }
4906 gen_movl_reg_T0(s, rd);
4907 }
4908 break;
4909 case 0x1:
4910 if (op1 == 1) {
4911 /* branch/exchange thumb (bx). */
4912 gen_movl_T0_reg(s, rm);
4913 gen_bx(s);
4914 } else if (op1 == 3) {
4915 /* clz */
4916 rd = (insn >> 12) & 0xf;
4917 gen_movl_T0_reg(s, rm);
4918 gen_op_clz_T0();
4919 gen_movl_reg_T0(s, rd);
4920 } else {
4921 goto illegal_op;
4922 }
4923 break;
4924 case 0x2:
4925 if (op1 == 1) {
4926 ARCH(5J); /* bxj */
4927 /* Trivial implementation equivalent to bx. */
4928 gen_movl_T0_reg(s, rm);
4929 gen_bx(s);
4930 } else {
4931 goto illegal_op;
4932 }
4933 break;
4934 case 0x3:
4935 if (op1 != 1)
4936 goto illegal_op;
4937
4938 /* branch link/exchange thumb (blx) */
4939 val = (uint32_t)s->pc;
4940 gen_op_movl_T1_im(val);
4941 gen_movl_T0_reg(s, rm);
4942 gen_movl_reg_T1(s, 14);
4943 gen_bx(s);
4944 break;
4945 case 0x5: /* saturating add/subtract */
4946 rd = (insn >> 12) & 0xf;
4947 rn = (insn >> 16) & 0xf;
4948 gen_movl_T0_reg(s, rm);
4949 gen_movl_T1_reg(s, rn);
4950 if (op1 & 2)
4951 gen_op_double_T1_saturate();
4952 if (op1 & 1)
4953 gen_op_subl_T0_T1_saturate();
4954 else
4955 gen_op_addl_T0_T1_saturate();
4956 gen_movl_reg_T0(s, rd);
4957 break;
4958 case 7: /* bkpt */
4959 gen_set_condexec(s);
4960 gen_op_movl_T0_im((long)s->pc - 4);
4961 gen_op_movl_reg_TN[0][15]();
4962 gen_op_bkpt();
4963 s->is_jmp = DISAS_JUMP;
4964 break;
4965 case 0x8: /* signed multiply */
4966 case 0xa:
4967 case 0xc:
4968 case 0xe:
4969 rs = (insn >> 8) & 0xf;
4970 rn = (insn >> 12) & 0xf;
4971 rd = (insn >> 16) & 0xf;
4972 if (op1 == 1) {
4973 /* (32 * 16) >> 16 */
4974 gen_movl_T0_reg(s, rm);
4975 gen_movl_T1_reg(s, rs);
4976 if (sh & 4)
4977 gen_op_sarl_T1_im(16);
4978 else
4979 gen_op_sxth_T1();
4980 gen_op_imulw_T0_T1();
4981 if ((sh & 2) == 0) {
4982 gen_movl_T1_reg(s, rn);
4983 gen_op_addl_T0_T1_setq();
4984 }
4985 gen_movl_reg_T0(s, rd);
4986 } else {
4987 /* 16 * 16 */
4988 gen_movl_T0_reg(s, rm);
4989 gen_movl_T1_reg(s, rs);
4990 gen_mulxy(sh & 2, sh & 4);
4991 if (op1 == 2) {
4992 gen_op_signbit_T1_T0();
4993 gen_op_addq_T0_T1(rn, rd);
4994 gen_movl_reg_T0(s, rn);
4995 gen_movl_reg_T1(s, rd);
4996 } else {
4997 if (op1 == 0) {
4998 gen_movl_T1_reg(s, rn);
4999 gen_op_addl_T0_T1_setq();
5000 }
5001 gen_movl_reg_T0(s, rd);
5002 }
5003 }
5004 break;
5005 default:
5006 goto illegal_op;
5007 }
5008 } else if (((insn & 0x0e000000) == 0 &&
5009 (insn & 0x00000090) != 0x90) ||
5010 ((insn & 0x0e000000) == (1 << 25))) {
5011 int set_cc, logic_cc, shiftop;
5012
5013 op1 = (insn >> 21) & 0xf;
5014 set_cc = (insn >> 20) & 1;
5015 logic_cc = table_logic_cc[op1] & set_cc;
5016
5017 /* data processing instruction */
5018 if (insn & (1 << 25)) {
5019 /* immediate operand */
5020 val = insn & 0xff;
5021 shift = ((insn >> 8) & 0xf) * 2;
5022 if (shift)
5023 val = (val >> shift) | (val << (32 - shift));
5024 gen_op_movl_T1_im(val);
5025 if (logic_cc && shift)
5026 gen_op_mov_CF_T1();
5027 } else {
5028 /* register */
5029 rm = (insn) & 0xf;
5030 gen_movl_T1_reg(s, rm);
5031 shiftop = (insn >> 5) & 3;
5032 if (!(insn & (1 << 4))) {
5033 shift = (insn >> 7) & 0x1f;
5034 if (shift != 0) {
5035 if (logic_cc) {
5036 gen_shift_T1_im_cc[shiftop](shift);
5037 } else {
5038 gen_shift_T1_im[shiftop](shift);
5039 }
5040 } else if (shiftop != 0) {
5041 if (logic_cc) {
5042 gen_shift_T1_0_cc[shiftop]();
5043 } else {
5044 gen_shift_T1_0[shiftop]();
5045 }
5046 }
5047 } else {
5048 rs = (insn >> 8) & 0xf;
5049 gen_movl_T0_reg(s, rs);
5050 if (logic_cc) {
5051 gen_shift_T1_T0_cc[shiftop]();
5052 } else {
5053 gen_shift_T1_T0[shiftop]();
5054 }
5055 }
5056 }
5057 if (op1 != 0x0f && op1 != 0x0d) {
5058 rn = (insn >> 16) & 0xf;
5059 gen_movl_T0_reg(s, rn);
5060 }
5061 rd = (insn >> 12) & 0xf;
5062 switch(op1) {
5063 case 0x00:
5064 gen_op_andl_T0_T1();
5065 gen_movl_reg_T0(s, rd);
5066 if (logic_cc)
5067 gen_op_logic_T0_cc();
5068 break;
5069 case 0x01:
5070 gen_op_xorl_T0_T1();
5071 gen_movl_reg_T0(s, rd);
5072 if (logic_cc)
5073 gen_op_logic_T0_cc();
5074 break;
5075 case 0x02:
5076 if (set_cc && rd == 15) {
5077 /* SUBS r15, ... is used for exception return. */
5078 if (IS_USER(s))
5079 goto illegal_op;
5080 gen_op_subl_T0_T1_cc();
5081 gen_exception_return(s);
5082 } else {
5083 if (set_cc)
5084 gen_op_subl_T0_T1_cc();
5085 else
5086 gen_op_subl_T0_T1();
5087 gen_movl_reg_T0(s, rd);
5088 }
5089 break;
5090 case 0x03:
5091 if (set_cc)
5092 gen_op_rsbl_T0_T1_cc();
5093 else
5094 gen_op_rsbl_T0_T1();
5095 gen_movl_reg_T0(s, rd);
5096 break;
5097 case 0x04:
5098 if (set_cc)
5099 gen_op_addl_T0_T1_cc();
5100 else
5101 gen_op_addl_T0_T1();
5102 gen_movl_reg_T0(s, rd);
5103 break;
5104 case 0x05:
5105 if (set_cc)
5106 gen_op_adcl_T0_T1_cc();
5107 else
5108 gen_op_adcl_T0_T1();
5109 gen_movl_reg_T0(s, rd);
5110 break;
5111 case 0x06:
5112 if (set_cc)
5113 gen_op_sbcl_T0_T1_cc();
5114 else
5115 gen_op_sbcl_T0_T1();
5116 gen_movl_reg_T0(s, rd);
5117 break;
5118 case 0x07:
5119 if (set_cc)
5120 gen_op_rscl_T0_T1_cc();
5121 else
5122 gen_op_rscl_T0_T1();
5123 gen_movl_reg_T0(s, rd);
5124 break;
5125 case 0x08:
5126 if (set_cc) {
5127 gen_op_andl_T0_T1();
5128 gen_op_logic_T0_cc();
5129 }
5130 break;
5131 case 0x09:
5132 if (set_cc) {
5133 gen_op_xorl_T0_T1();
5134 gen_op_logic_T0_cc();
5135 }
5136 break;
5137 case 0x0a:
5138 if (set_cc) {
5139 gen_op_subl_T0_T1_cc();
5140 }
5141 break;
5142 case 0x0b:
5143 if (set_cc) {
5144 gen_op_addl_T0_T1_cc();
5145 }
5146 break;
5147 case 0x0c:
5148 gen_op_orl_T0_T1();
5149 gen_movl_reg_T0(s, rd);
5150 if (logic_cc)
5151 gen_op_logic_T0_cc();
5152 break;
5153 case 0x0d:
5154 if (logic_cc && rd == 15) {
5155 /* MOVS r15, ... is used for exception return. */
5156 if (IS_USER(s))
5157 goto illegal_op;
5158 gen_op_movl_T0_T1();
5159 gen_exception_return(s);
5160 } else {
5161 gen_movl_reg_T1(s, rd);
5162 if (logic_cc)
5163 gen_op_logic_T1_cc();
5164 }
5165 break;
5166 case 0x0e:
5167 gen_op_bicl_T0_T1();
5168 gen_movl_reg_T0(s, rd);
5169 if (logic_cc)
5170 gen_op_logic_T0_cc();
5171 break;
5172 default:
5173 case 0x0f:
5174 gen_op_notl_T1();
5175 gen_movl_reg_T1(s, rd);
5176 if (logic_cc)
5177 gen_op_logic_T1_cc();
5178 break;
5179 }
5180 } else {
5181 /* other instructions */
5182 op1 = (insn >> 24) & 0xf;
5183 switch(op1) {
5184 case 0x0:
5185 case 0x1:
5186 /* multiplies, extra load/stores */
5187 sh = (insn >> 5) & 3;
5188 if (sh == 0) {
5189 if (op1 == 0x0) {
5190 rd = (insn >> 16) & 0xf;
5191 rn = (insn >> 12) & 0xf;
5192 rs = (insn >> 8) & 0xf;
5193 rm = (insn) & 0xf;
5194 op1 = (insn >> 20) & 0xf;
5195 switch (op1) {
5196 case 0: case 1: case 2: case 3: case 6:
5197 /* 32 bit mul */
5198 gen_movl_T0_reg(s, rs);
5199 gen_movl_T1_reg(s, rm);
5200 gen_op_mul_T0_T1();
5201 if (insn & (1 << 22)) {
5202 /* Subtract (mls) */
5203 ARCH(6T2);
5204 gen_movl_T1_reg(s, rn);
5205 gen_op_rsbl_T0_T1();
5206 } else if (insn & (1 << 21)) {
5207 /* Add */
5208 gen_movl_T1_reg(s, rn);
5209 gen_op_addl_T0_T1();
5210 }
5211 if (insn & (1 << 20))
5212 gen_op_logic_T0_cc();
5213 gen_movl_reg_T0(s, rd);
5214 break;
5215 default:
5216 /* 64 bit mul */
5217 gen_movl_T0_reg(s, rs);
5218 gen_movl_T1_reg(s, rm);
5219 if (insn & (1 << 22))
5220 gen_op_imull_T0_T1();
5221 else
5222 gen_op_mull_T0_T1();
5223 if (insn & (1 << 21)) /* mult accumulate */
5224 gen_op_addq_T0_T1(rn, rd);
5225 if (!(insn & (1 << 23))) { /* double accumulate */
5226 ARCH(6);
5227 gen_op_addq_lo_T0_T1(rn);
5228 gen_op_addq_lo_T0_T1(rd);
5229 }
5230 if (insn & (1 << 20))
5231 gen_op_logicq_cc();
5232 gen_movl_reg_T0(s, rn);
5233 gen_movl_reg_T1(s, rd);
5234 break;
5235 }
5236 } else {
5237 rn = (insn >> 16) & 0xf;
5238 rd = (insn >> 12) & 0xf;
5239 if (insn & (1 << 23)) {
5240 /* load/store exclusive */
5241 gen_movl_T1_reg(s, rn);
5242 if (insn & (1 << 20)) {
5243 gen_ldst(ldlex, s);
5244 } else {
5245 rm = insn & 0xf;
5246 gen_movl_T0_reg(s, rm);
5247 gen_ldst(stlex, s);
5248 }
5249 gen_movl_reg_T0(s, rd);
5250 } else {
5251 /* SWP instruction */
5252 rm = (insn) & 0xf;
5253
5254 gen_movl_T0_reg(s, rm);
5255 gen_movl_T1_reg(s, rn);
5256 if (insn & (1 << 22)) {
5257 gen_ldst(swpb, s);
5258 } else {
5259 gen_ldst(swpl, s);
5260 }
5261 gen_movl_reg_T0(s, rd);
5262 }
5263 }
5264 } else {
5265 int address_offset;
5266 int load;
5267 /* Misc load/store */
5268 rn = (insn >> 16) & 0xf;
5269 rd = (insn >> 12) & 0xf;
5270 gen_movl_T1_reg(s, rn);
5271 if (insn & (1 << 24))
5272 gen_add_datah_offset(s, insn, 0);
5273 address_offset = 0;
5274 if (insn & (1 << 20)) {
5275 /* load */
5276 switch(sh) {
5277 case 1:
5278 gen_ldst(lduw, s);
5279 break;
5280 case 2:
5281 gen_ldst(ldsb, s);
5282 break;
5283 default:
5284 case 3:
5285 gen_ldst(ldsw, s);
5286 break;
5287 }
5288 load = 1;
5289 } else if (sh & 2) {
5290 /* doubleword */
5291 if (sh & 1) {
5292 /* store */
5293 gen_movl_T0_reg(s, rd);
5294 gen_ldst(stl, s);
5295 gen_op_addl_T1_im(4);
5296 gen_movl_T0_reg(s, rd + 1);
5297 gen_ldst(stl, s);
5298 load = 0;
5299 } else {
5300 /* load */
5301 gen_ldst(ldl, s);
5302 gen_movl_reg_T0(s, rd);
5303 gen_op_addl_T1_im(4);
5304 gen_ldst(ldl, s);
5305 rd++;
5306 load = 1;
5307 }
5308 address_offset = -4;
5309 } else {
5310 /* store */
5311 gen_movl_T0_reg(s, rd);
5312 gen_ldst(stw, s);
5313 load = 0;
5314 }
5315 /* Perform base writeback before the loaded value to
5316 ensure correct behavior with overlapping index registers.
5317 ldrd with base writeback is is undefined if the
5318 destination and index registers overlap. */
5319 if (!(insn & (1 << 24))) {
5320 gen_add_datah_offset(s, insn, address_offset);
5321 gen_movl_reg_T1(s, rn);
5322 } else if (insn & (1 << 21)) {
5323 if (address_offset)
5324 gen_op_addl_T1_im(address_offset);
5325 gen_movl_reg_T1(s, rn);
5326 }
5327 if (load) {
5328 /* Complete the load. */
5329 gen_movl_reg_T0(s, rd);
5330 }
5331 }
5332 break;
5333 case 0x4:
5334 case 0x5:
5335 goto do_ldst;
5336 case 0x6:
5337 case 0x7:
5338 if (insn & (1 << 4)) {
5339 ARCH(6);
5340 /* Armv6 Media instructions. */
5341 rm = insn & 0xf;
5342 rn = (insn >> 16) & 0xf;
2c0262af 5343 rd = (insn >> 12) & 0xf;
9ee6e8bb
PB
5344 rs = (insn >> 8) & 0xf;
5345 switch ((insn >> 23) & 3) {
5346 case 0: /* Parallel add/subtract. */
5347 op1 = (insn >> 20) & 7;
5348 gen_movl_T0_reg(s, rn);
5349 gen_movl_T1_reg(s, rm);
5350 sh = (insn >> 5) & 7;
5351 if ((op1 & 3) == 0 || sh == 5 || sh == 6)
5352 goto illegal_op;
5353 gen_arm_parallel_addsub[op1][sh]();
5354 gen_movl_reg_T0(s, rd);
5355 break;
5356 case 1:
5357 if ((insn & 0x00700020) == 0) {
5358 /* Hafword pack. */
5359 gen_movl_T0_reg(s, rn);
5360 gen_movl_T1_reg(s, rm);
5361 shift = (insn >> 7) & 0x1f;
5362 if (shift)
5363 gen_op_shll_T1_im(shift);
5364 if (insn & (1 << 6))
5365 gen_op_pkhtb_T0_T1();
5366 else
5367 gen_op_pkhbt_T0_T1();
5368 gen_movl_reg_T0(s, rd);
5369 } else if ((insn & 0x00200020) == 0x00200000) {
5370 /* [us]sat */
5371 gen_movl_T1_reg(s, rm);
5372 shift = (insn >> 7) & 0x1f;
5373 if (insn & (1 << 6)) {
5374 if (shift == 0)
5375 shift = 31;
5376 gen_op_sarl_T1_im(shift);
5377 } else {
5378 gen_op_shll_T1_im(shift);
5379 }
5380 sh = (insn >> 16) & 0x1f;
5381 if (sh != 0) {
5382 if (insn & (1 << 22))
5383 gen_op_usat_T1(sh);
5384 else
5385 gen_op_ssat_T1(sh);
5386 }
5387 gen_movl_T1_reg(s, rd);
5388 } else if ((insn & 0x00300fe0) == 0x00200f20) {
5389 /* [us]sat16 */
5390 gen_movl_T1_reg(s, rm);
5391 sh = (insn >> 16) & 0x1f;
5392 if (sh != 0) {
5393 if (insn & (1 << 22))
5394 gen_op_usat16_T1(sh);
5395 else
5396 gen_op_ssat16_T1(sh);
5397 }
5398 gen_movl_T1_reg(s, rd);
5399 } else if ((insn & 0x00700fe0) == 0x00000fa0) {
5400 /* Select bytes. */
5401 gen_movl_T0_reg(s, rn);
5402 gen_movl_T1_reg(s, rm);
5403 gen_op_sel_T0_T1();
5404 gen_movl_reg_T0(s, rd);
5405 } else if ((insn & 0x000003e0) == 0x00000060) {
5406 gen_movl_T1_reg(s, rm);
5407 shift = (insn >> 10) & 3;
5408 /* ??? In many cases it's not neccessary to do a
5409 rotate, a shift is sufficient. */
5410 if (shift != 0)
5411 gen_op_rorl_T1_im(shift * 8);
5412 op1 = (insn >> 20) & 7;
5413 switch (op1) {
5414 case 0: gen_op_sxtb16_T1(); break;
5415 case 2: gen_op_sxtb_T1(); break;
5416 case 3: gen_op_sxth_T1(); break;
5417 case 4: gen_op_uxtb16_T1(); break;
5418 case 6: gen_op_uxtb_T1(); break;
5419 case 7: gen_op_uxth_T1(); break;
5420 default: goto illegal_op;
5421 }
5422 if (rn != 15) {
5423 gen_movl_T2_reg(s, rn);
5424 if ((op1 & 3) == 0) {
5425 gen_op_add16_T1_T2();
5426 } else {
5427 gen_op_addl_T1_T2();
5428 }
5429 }
5430 gen_movl_reg_T1(s, rd);
5431 } else if ((insn & 0x003f0f60) == 0x003f0f20) {
5432 /* rev */
5433 gen_movl_T0_reg(s, rm);
5434 if (insn & (1 << 22)) {
5435 if (insn & (1 << 7)) {
5436 gen_op_revsh_T0();
5437 } else {
5438 ARCH(6T2);
5439 gen_op_rbit_T0();
5440 }
5441 } else {
5442 if (insn & (1 << 7))
5443 gen_op_rev16_T0();
5444 else
5445 gen_op_rev_T0();
5446 }
5447 gen_movl_reg_T0(s, rd);
5448 } else {
5449 goto illegal_op;
5450 }
5451 break;
5452 case 2: /* Multiplies (Type 3). */
5453 gen_movl_T0_reg(s, rm);
5454 gen_movl_T1_reg(s, rs);
5455 if (insn & (1 << 20)) {
5456 /* Signed multiply most significant [accumulate]. */
5457 gen_op_imull_T0_T1();
5458 if (insn & (1 << 5))
5459 gen_op_roundqd_T0_T1();
5460 else
5461 gen_op_movl_T0_T1();
5462 if (rn != 15) {
5463 gen_movl_T1_reg(s, rn);
5464 if (insn & (1 << 6)) {
5465 gen_op_addl_T0_T1();
5466 } else {
5467 gen_op_rsbl_T0_T1();
5468 }
5469 }
5470 gen_movl_reg_T0(s, rd);
5471 } else {
5472 if (insn & (1 << 5))
5473 gen_op_swap_half_T1();
5474 gen_op_mul_dual_T0_T1();
5475 if (insn & (1 << 22)) {
5476 if (insn & (1 << 6)) {
5477 /* smlald */
5478 gen_op_addq_T0_T1_dual(rn, rd);
5479 } else {
5480 /* smlsld */
5481 gen_op_subq_T0_T1_dual(rn, rd);
5482 }
5483 } else {
5484 /* This addition cannot overflow. */
5485 if (insn & (1 << 6)) {
5486 /* sm[ul]sd */
5487 gen_op_subl_T0_T1();
5488 } else {
5489 /* sm[ul]ad */
5490 gen_op_addl_T0_T1();
5491 }
5492 if (rn != 15)
5493 {
5494 gen_movl_T1_reg(s, rn);
5495 gen_op_addl_T0_T1_setq();
5496 }
5497 gen_movl_reg_T0(s, rd);
5498 }
5499 }
5500 break;
5501 case 3:
5502 op1 = ((insn >> 17) & 0x38) | ((insn >> 5) & 7);
5503 switch (op1) {
5504 case 0: /* Unsigned sum of absolute differences. */
5505 goto illegal_op;
5506 gen_movl_T0_reg(s, rm);
5507 gen_movl_T1_reg(s, rs);
5508 gen_op_usad8_T0_T1();
5509 if (rn != 15) {
5510 gen_movl_T1_reg(s, rn);
5511 gen_op_addl_T0_T1();
5512 }
5513 gen_movl_reg_T0(s, rd);
5514 break;
5515 case 0x20: case 0x24: case 0x28: case 0x2c:
5516 /* Bitfield insert/clear. */
5517 ARCH(6T2);
5518 shift = (insn >> 7) & 0x1f;
5519 i = (insn >> 16) & 0x1f;
5520 i = i + 1 - shift;
5521 if (rm == 15) {
5522 gen_op_movl_T1_im(0);
5523 } else {
5524 gen_movl_T1_reg(s, rm);
5525 }
5526 if (i != 32) {
5527 gen_movl_T0_reg(s, rd);
5528 gen_op_bfi_T1_T0(shift, ((1u << i) - 1) << shift);
5529 }
5530 gen_movl_reg_T1(s, rd);
5531 break;
5532 case 0x12: case 0x16: case 0x1a: case 0x1e: /* sbfx */
5533 case 0x32: case 0x36: case 0x3a: case 0x3e: /* ubfx */
5534 gen_movl_T1_reg(s, rm);
5535 shift = (insn >> 7) & 0x1f;
5536 i = ((insn >> 16) & 0x1f) + 1;
5537 if (shift + i > 32)
5538 goto illegal_op;
5539 if (i < 32) {
5540 if (op1 & 0x20) {
5541 gen_op_ubfx_T1(shift, (1u << i) - 1);
5542 } else {
5543 gen_op_sbfx_T1(shift, i);
5544 }
5545 }
5546 gen_movl_reg_T1(s, rd);
5547 break;
5548 default:
5549 goto illegal_op;
5550 }
5551 break;
5552 }
5553 break;
5554 }
5555 do_ldst:
5556 /* Check for undefined extension instructions
5557 * per the ARM Bible IE:
5558 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
5559 */
5560 sh = (0xf << 20) | (0xf << 4);
5561 if (op1 == 0x7 && ((insn & sh) == sh))
5562 {
5563 goto illegal_op;
5564 }
5565 /* load/store byte/word */
5566 rn = (insn >> 16) & 0xf;
5567 rd = (insn >> 12) & 0xf;
5568 gen_movl_T1_reg(s, rn);
5569 i = (IS_USER(s) || (insn & 0x01200000) == 0x00200000);
5570 if (insn & (1 << 24))
5571 gen_add_data_offset(s, insn);
5572 if (insn & (1 << 20)) {
5573 /* load */
5574 s->is_mem = 1;
5575#if defined(CONFIG_USER_ONLY)
5576 if (insn & (1 << 22))
5577 gen_op_ldub_raw();
5578 else
5579 gen_op_ldl_raw();
5580#else
5581 if (insn & (1 << 22)) {
5582 if (i)
5583 gen_op_ldub_user();
5584 else
5585 gen_op_ldub_kernel();
5586 } else {
5587 if (i)
5588 gen_op_ldl_user();
5589 else
5590 gen_op_ldl_kernel();
5591 }
5592#endif
5593 } else {
5594 /* store */
5595 gen_movl_T0_reg(s, rd);
5596#if defined(CONFIG_USER_ONLY)
5597 if (insn & (1 << 22))
5598 gen_op_stb_raw();
5599 else
5600 gen_op_stl_raw();
5601#else
5602 if (insn & (1 << 22)) {
5603 if (i)
5604 gen_op_stb_user();
5605 else
5606 gen_op_stb_kernel();
5607 } else {
5608 if (i)
5609 gen_op_stl_user();
5610 else
5611 gen_op_stl_kernel();
5612 }
5613#endif
5614 }
5615 if (!(insn & (1 << 24))) {
5616 gen_add_data_offset(s, insn);
5617 gen_movl_reg_T1(s, rn);
5618 } else if (insn & (1 << 21))
5619 gen_movl_reg_T1(s, rn); {
5620 }
5621 if (insn & (1 << 20)) {
5622 /* Complete the load. */
5623 if (rd == 15)
5624 gen_bx(s);
5625 else
5626 gen_movl_reg_T0(s, rd);
5627 }
5628 break;
5629 case 0x08:
5630 case 0x09:
5631 {
5632 int j, n, user, loaded_base;
5633 /* load/store multiple words */
5634 /* XXX: store correct base if write back */
5635 user = 0;
5636 if (insn & (1 << 22)) {
5637 if (IS_USER(s))
5638 goto illegal_op; /* only usable in supervisor mode */
5639
5640 if ((insn & (1 << 15)) == 0)
5641 user = 1;
5642 }
5643 rn = (insn >> 16) & 0xf;
5644 gen_movl_T1_reg(s, rn);
5645
5646 /* compute total size */
5647 loaded_base = 0;
5648 n = 0;
5649 for(i=0;i<16;i++) {
5650 if (insn & (1 << i))
5651 n++;
5652 }
5653 /* XXX: test invalid n == 0 case ? */
5654 if (insn & (1 << 23)) {
5655 if (insn & (1 << 24)) {
5656 /* pre increment */
5657 gen_op_addl_T1_im(4);
5658 } else {
5659 /* post increment */
5660 }
5661 } else {
5662 if (insn & (1 << 24)) {
5663 /* pre decrement */
5664 gen_op_addl_T1_im(-(n * 4));
5665 } else {
5666 /* post decrement */
5667 if (n != 1)
5668 gen_op_addl_T1_im(-((n - 1) * 4));
5669 }
5670 }
5671 j = 0;
5672 for(i=0;i<16;i++) {
5673 if (insn & (1 << i)) {
5674 if (insn & (1 << 20)) {
5675 /* load */
5676 gen_ldst(ldl, s);
5677 if (i == 15) {
5678 gen_bx(s);
5679 } else if (user) {
5680 gen_op_movl_user_T0(i);
5681 } else if (i == rn) {
5682 gen_op_movl_T2_T0();
5683 loaded_base = 1;
5684 } else {
5685 gen_movl_reg_T0(s, i);
5686 }
5687 } else {
5688 /* store */
5689 if (i == 15) {
5690 /* special case: r15 = PC + 8 */
5691 val = (long)s->pc + 4;
5692 gen_op_movl_TN_im[0](val);
5693 } else if (user) {
5694 gen_op_movl_T0_user(i);
5695 } else {
5696 gen_movl_T0_reg(s, i);
5697 }
5698 gen_ldst(stl, s);
5699 }
5700 j++;
5701 /* no need to add after the last transfer */
5702 if (j != n)
5703 gen_op_addl_T1_im(4);
5704 }
5705 }
5706 if (insn & (1 << 21)) {
5707 /* write back */
5708 if (insn & (1 << 23)) {
5709 if (insn & (1 << 24)) {
5710 /* pre increment */
5711 } else {
5712 /* post increment */
5713 gen_op_addl_T1_im(4);
5714 }
5715 } else {
5716 if (insn & (1 << 24)) {
5717 /* pre decrement */
5718 if (n != 1)
5719 gen_op_addl_T1_im(-((n - 1) * 4));
5720 } else {
5721 /* post decrement */
5722 gen_op_addl_T1_im(-(n * 4));
5723 }
5724 }
5725 gen_movl_reg_T1(s, rn);
5726 }
5727 if (loaded_base) {
5728 gen_op_movl_T0_T2();
5729 gen_movl_reg_T0(s, rn);
5730 }
5731 if ((insn & (1 << 22)) && !user) {
5732 /* Restore CPSR from SPSR. */
5733 gen_op_movl_T0_spsr();
5734 gen_op_movl_cpsr_T0(0xffffffff);
5735 s->is_jmp = DISAS_UPDATE;
5736 }
5737 }
5738 break;
5739 case 0xa:
5740 case 0xb:
5741 {
5742 int32_t offset;
5743
5744 /* branch (and link) */
5745 val = (int32_t)s->pc;
5746 if (insn & (1 << 24)) {
5747 gen_op_movl_T0_im(val);
5748 gen_op_movl_reg_TN[0][14]();
5749 }
5750 offset = (((int32_t)insn << 8) >> 8);
5751 val += (offset << 2) + 4;
5752 gen_jmp(s, val);
5753 }
5754 break;
5755 case 0xc:
5756 case 0xd:
5757 case 0xe:
5758 /* Coprocessor. */
5759 if (disas_coproc_insn(env, s, insn))
5760 goto illegal_op;
5761 break;
5762 case 0xf:
5763 /* swi */
5764 gen_op_movl_T0_im((long)s->pc);
5765 gen_op_movl_reg_TN[0][15]();
5766 s->is_jmp = DISAS_SWI;
5767 break;
5768 default:
5769 illegal_op:
5770 gen_set_condexec(s);
5771 gen_op_movl_T0_im((long)s->pc - 4);
5772 gen_op_movl_reg_TN[0][15]();
5773 gen_op_undef_insn();
5774 s->is_jmp = DISAS_JUMP;
5775 break;
5776 }
5777 }
5778}
5779
5780/* Return true if this is a Thumb-2 logical op. */
5781static int
5782thumb2_logic_op(int op)
5783{
5784 return (op < 8);
5785}
5786
5787/* Generate code for a Thumb-2 data processing operation. If CONDS is nonzero
5788 then set condition code flags based on the result of the operation.
5789 If SHIFTER_OUT is nonzero then set the carry flag for logical operations
5790 to the high bit of T1.
5791 Returns zero if the opcode is valid. */
5792
5793static int
5794gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out)
5795{
5796 int logic_cc;
5797
5798 logic_cc = 0;
5799 switch (op) {
5800 case 0: /* and */
5801 gen_op_andl_T0_T1();
5802 logic_cc = conds;
5803 break;
5804 case 1: /* bic */
5805 gen_op_bicl_T0_T1();
5806 logic_cc = conds;
5807 break;
5808 case 2: /* orr */
5809 gen_op_orl_T0_T1();
5810 logic_cc = conds;
5811 break;
5812 case 3: /* orn */
5813 gen_op_notl_T1();
5814 gen_op_orl_T0_T1();
5815 logic_cc = conds;
5816 break;
5817 case 4: /* eor */
5818 gen_op_xorl_T0_T1();
5819 logic_cc = conds;
5820 break;
5821 case 8: /* add */
5822 if (conds)
5823 gen_op_addl_T0_T1_cc();
5824 else
5825 gen_op_addl_T0_T1();
5826 break;
5827 case 10: /* adc */
5828 if (conds)
5829 gen_op_adcl_T0_T1_cc();
5830 else
5831 gen_op_adcl_T0_T1();
5832 break;
5833 case 11: /* sbc */
5834 if (conds)
5835 gen_op_sbcl_T0_T1_cc();
5836 else
5837 gen_op_sbcl_T0_T1();
5838 break;
5839 case 13: /* sub */
5840 if (conds)
5841 gen_op_subl_T0_T1_cc();
5842 else
5843 gen_op_subl_T0_T1();
5844 break;
5845 case 14: /* rsb */
5846 if (conds)
5847 gen_op_rsbl_T0_T1_cc();
5848 else
5849 gen_op_rsbl_T0_T1();
5850 break;
5851 default: /* 5, 6, 7, 9, 12, 15. */
5852 return 1;
5853 }
5854 if (logic_cc) {
5855 gen_op_logic_T0_cc();
5856 if (shifter_out)
5857 gen_op_mov_CF_T1();
5858 }
5859 return 0;
5860}
5861
5862/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
5863 is not legal. */
5864static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
5865{
5866 uint32_t insn, imm, shift, offset, addr;
5867 uint32_t rd, rn, rm, rs;
5868 int op;
5869 int shiftop;
5870 int conds;
5871 int logic_cc;
5872
5873 if (!(arm_feature(env, ARM_FEATURE_THUMB2)
5874 || arm_feature (env, ARM_FEATURE_M))) {
5875 /* Thumb-1 cores may need to tread bl and blx as a pair of
5876 16-bit instructions to get correct prefetch abort behavior. */
5877 insn = insn_hw1;
5878 if ((insn & (1 << 12)) == 0) {
5879 /* Second half of blx. */
5880 offset = ((insn & 0x7ff) << 1);
5881 gen_movl_T0_reg(s, 14);
5882 gen_op_movl_T1_im(offset);
5883 gen_op_addl_T0_T1();
5884 gen_op_movl_T1_im(0xfffffffc);
5885 gen_op_andl_T0_T1();
5886
5887 addr = (uint32_t)s->pc;
5888 gen_op_movl_T1_im(addr | 1);
5889 gen_movl_reg_T1(s, 14);
5890 gen_bx(s);
5891 return 0;
5892 }
5893 if (insn & (1 << 11)) {
5894 /* Second half of bl. */
5895 offset = ((insn & 0x7ff) << 1) | 1;
5896 gen_movl_T0_reg(s, 14);
5897 gen_op_movl_T1_im(offset);
5898 gen_op_addl_T0_T1();
5899
5900 addr = (uint32_t)s->pc;
5901 gen_op_movl_T1_im(addr | 1);
5902 gen_movl_reg_T1(s, 14);
5903 gen_bx(s);
5904 return 0;
5905 }
5906 if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
5907 /* Instruction spans a page boundary. Implement it as two
5908 16-bit instructions in case the second half causes an
5909 prefetch abort. */
5910 offset = ((int32_t)insn << 21) >> 9;
5911 addr = s->pc + 2 + offset;
5912 gen_op_movl_T0_im(addr);
5913 gen_movl_reg_T0(s, 14);
5914 return 0;
5915 }
5916 /* Fall through to 32-bit decode. */
5917 }
5918
5919 insn = lduw_code(s->pc);
5920 s->pc += 2;
5921 insn |= (uint32_t)insn_hw1 << 16;
5922
5923 if ((insn & 0xf800e800) != 0xf000e800) {
5924 ARCH(6T2);
5925 }
5926
5927 rn = (insn >> 16) & 0xf;
5928 rs = (insn >> 12) & 0xf;
5929 rd = (insn >> 8) & 0xf;
5930 rm = insn & 0xf;
5931 switch ((insn >> 25) & 0xf) {
5932 case 0: case 1: case 2: case 3:
5933 /* 16-bit instructions. Should never happen. */
5934 abort();
5935 case 4:
5936 if (insn & (1 << 22)) {
5937 /* Other load/store, table branch. */
5938 if (insn & 0x01200000) {
5939 /* Load/store doubleword. */
5940 if (rn == 15) {
5941 gen_op_movl_T1_im(s->pc & ~3);
5942 } else {
5943 gen_movl_T1_reg(s, rn);
5944 }
5945 offset = (insn & 0xff) * 4;
5946 if ((insn & (1 << 23)) == 0)
5947 offset = -offset;
5948 if (insn & (1 << 24)) {
5949 gen_op_addl_T1_im(offset);
5950 offset = 0;
5951 }
5952 if (insn & (1 << 20)) {
5953 /* ldrd */
5954 gen_ldst(ldl, s);
5955 gen_movl_reg_T0(s, rs);
5956 gen_op_addl_T1_im(4);
5957 gen_ldst(ldl, s);
5958 gen_movl_reg_T0(s, rd);
5959 } else {
5960 /* strd */
5961 gen_movl_T0_reg(s, rs);
5962 gen_ldst(stl, s);
5963 gen_op_addl_T1_im(4);
5964 gen_movl_T0_reg(s, rd);
5965 gen_ldst(stl, s);
5966 }
5967 if (insn & (1 << 21)) {
5968 /* Base writeback. */
5969 if (rn == 15)
5970 goto illegal_op;
5971 gen_op_addl_T1_im(offset - 4);
5972 gen_movl_reg_T1(s, rn);
5973 }
5974 } else if ((insn & (1 << 23)) == 0) {
5975 /* Load/store exclusive word. */
5976 gen_movl_T0_reg(s, rd);
2c0262af 5977 gen_movl_T1_reg(s, rn);
2c0262af 5978 if (insn & (1 << 20)) {
9ee6e8bb
PB
5979 gen_ldst(ldlex, s);
5980 } else {
5981 gen_ldst(stlex, s);
5982 }
5983 gen_movl_reg_T0(s, rd);
5984 } else if ((insn & (1 << 6)) == 0) {
5985 /* Table Branch. */
5986 if (rn == 15) {
5987 gen_op_movl_T1_im(s->pc);
5988 } else {
5989 gen_movl_T1_reg(s, rn);
5990 }
5991 gen_movl_T2_reg(s, rm);
5992 gen_op_addl_T1_T2();
5993 if (insn & (1 << 4)) {
5994 /* tbh */
5995 gen_op_addl_T1_T2();
5996 gen_ldst(lduw, s);
5997 } else { /* tbb */
5998 gen_ldst(ldub, s);
5999 }
6000 gen_op_jmp_T0_im(s->pc);
6001 s->is_jmp = DISAS_JUMP;
6002 } else {
6003 /* Load/store exclusive byte/halfword/doubleword. */
6004 op = (insn >> 4) & 0x3;
6005 gen_movl_T1_reg(s, rn);
6006 if (insn & (1 << 20)) {
6007 switch (op) {
6008 case 0:
6009 gen_ldst(ldbex, s);
6010 break;
2c0262af 6011 case 1:
9ee6e8bb 6012 gen_ldst(ldwex, s);
2c0262af 6013 break;
9ee6e8bb
PB
6014 case 3:
6015 gen_ldst(ldqex, s);
6016 gen_movl_reg_T1(s, rd);
2c0262af
FB
6017 break;
6018 default:
9ee6e8bb
PB
6019 goto illegal_op;
6020 }
6021 gen_movl_reg_T0(s, rs);
6022 } else {
6023 gen_movl_T0_reg(s, rs);
6024 switch (op) {
6025 case 0:
6026 gen_ldst(stbex, s);
6027 break;
6028 case 1:
6029 gen_ldst(stwex, s);
6030 break;
2c0262af 6031 case 3:
9ee6e8bb
PB
6032 gen_movl_T2_reg(s, rd);
6033 gen_ldst(stqex, s);
2c0262af 6034 break;
9ee6e8bb
PB
6035 default:
6036 goto illegal_op;
2c0262af 6037 }
9ee6e8bb
PB
6038 gen_movl_reg_T0(s, rm);
6039 }
6040 }
6041 } else {
6042 /* Load/store multiple, RFE, SRS. */
6043 if (((insn >> 23) & 1) == ((insn >> 24) & 1)) {
6044 /* Not available in user mode. */
6045 if (!IS_USER(s))
6046 goto illegal_op;
6047 if (insn & (1 << 20)) {
6048 /* rfe */
6049 gen_movl_T1_reg(s, rn);
6050 if (insn & (1 << 24)) {
99c475ab 6051 gen_op_addl_T1_im(4);
9ee6e8bb
PB
6052 } else {
6053 gen_op_addl_T1_im(-4);
6054 }
6055 /* Load CPSR into T2 and PC into T0. */
6056 gen_ldst(ldl, s);
6057 gen_op_movl_T2_T0();
6058 gen_op_addl_T1_im(-4);
6059 gen_ldst(ldl, s);
6060 if (insn & (1 << 21)) {
6061 /* Base writeback. */
6062 if (insn & (1 << 24))
6063 gen_op_addl_T1_im(8);
6064 gen_movl_reg_T1(s, rn);
6065 }
6066 gen_rfe(s);
6067 } else {
6068 /* srs */
6069 op = (insn & 0x1f);
6070 if (op == (env->uncached_cpsr & CPSR_M)) {
6071 gen_movl_T1_reg(s, 13);
6072 } else {
6073 gen_op_movl_T1_r13_banked(op);
6074 }
6075 if ((insn & (1 << 24)) == 0) {
6076 gen_op_addl_T1_im(-8);
6077 }
6078 gen_movl_T0_reg(s, 14);
6079 gen_ldst(stl, s);
6080 gen_op_movl_T0_cpsr();
6081 gen_op_addl_T1_im(4);
6082 gen_ldst(stl, s);
6083 if (insn & (1 << 21)) {
6084 if ((insn & (1 << 24)) == 0) {
6085 gen_op_addl_T1_im(-4);
6086 } else {
6087 gen_op_addl_T1_im(4);
6088 }
6089 if (op == (env->uncached_cpsr & CPSR_M)) {
6090 gen_movl_reg_T1(s, 13);
6091 } else {
6092 gen_op_movl_r13_T1_banked(op);
6093 }
6094 }
6095 }
6096 } else {
6097 int i;
6098 /* Load/store multiple. */
6099 gen_movl_T1_reg(s, rn);
6100 offset = 0;
6101 for (i = 0; i < 16; i++) {
6102 if (insn & (1 << i))
6103 offset += 4;
6104 }
6105 if (insn & (1 << 24)) {
6106 gen_op_addl_T1_im(-offset);
6107 }
6108
6109 for (i = 0; i < 16; i++) {
6110 if ((insn & (1 << i)) == 0)
6111 continue;
6112 if (insn & (1 << 20)) {
6113 /* Load. */
6114 gen_ldst(ldl, s);
6115 if (i == 15) {
6116 gen_bx(s);
6117 } else {
6118 gen_movl_reg_T0(s, i);
6119 }
6120 } else {
6121 /* Store. */
6122 gen_movl_T0_reg(s, i);
b5ff1b31 6123 gen_ldst(stl, s);
9ee6e8bb
PB
6124 }
6125 gen_op_addl_T1_im(4);
6126 }
6127 if (insn & (1 << 21)) {
6128 /* Base register writeback. */
6129 if (insn & (1 << 24)) {
6130 gen_op_addl_T1_im(-offset);
6131 }
6132 /* Fault if writeback register is in register list. */
6133 if (insn & (1 << rn))
6134 goto illegal_op;
6135 gen_movl_reg_T1(s, rn);
6136 }
6137 }
6138 }
6139 break;
6140 case 5: /* Data processing register constant shift. */
6141 if (rn == 15)
6142 gen_op_movl_T0_im(0);
6143 else
6144 gen_movl_T0_reg(s, rn);
6145 gen_movl_T1_reg(s, rm);
6146 op = (insn >> 21) & 0xf;
6147 shiftop = (insn >> 4) & 3;
6148 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6149 conds = (insn & (1 << 20)) != 0;
6150 logic_cc = (conds && thumb2_logic_op(op));
6151 if (shift != 0) {
6152 if (logic_cc) {
6153 gen_shift_T1_im_cc[shiftop](shift);
6154 } else {
6155 gen_shift_T1_im[shiftop](shift);
6156 }
6157 } else if (shiftop != 0) {
6158 if (logic_cc) {
6159 gen_shift_T1_0_cc[shiftop]();
6160 } else {
6161 gen_shift_T1_0[shiftop]();
6162 }
6163 }
6164 if (gen_thumb2_data_op(s, op, conds, 0))
6165 goto illegal_op;
6166 if (rd != 15)
6167 gen_movl_reg_T0(s, rd);
6168 break;
6169 case 13: /* Misc data processing. */
6170 op = ((insn >> 22) & 6) | ((insn >> 7) & 1);
6171 if (op < 4 && (insn & 0xf000) != 0xf000)
6172 goto illegal_op;
6173 switch (op) {
6174 case 0: /* Register controlled shift. */
6175 gen_movl_T0_reg(s, rm);
6176 gen_movl_T1_reg(s, rn);
6177 if ((insn & 0x70) != 0)
6178 goto illegal_op;
6179 op = (insn >> 21) & 3;
6180 if (insn & (1 << 20)) {
6181 gen_shift_T1_T0_cc[op]();
6182 gen_op_logic_T1_cc();
6183 } else {
6184 gen_shift_T1_T0[op]();
6185 }
6186 gen_movl_reg_T1(s, rd);
6187 break;
6188 case 1: /* Sign/zero extend. */
6189 gen_movl_T1_reg(s, rm);
6190 shift = (insn >> 4) & 3;
6191 /* ??? In many cases it's not neccessary to do a
6192 rotate, a shift is sufficient. */
6193 if (shift != 0)
6194 gen_op_rorl_T1_im(shift * 8);
6195 op = (insn >> 20) & 7;
6196 switch (op) {
6197 case 0: gen_op_sxth_T1(); break;
6198 case 1: gen_op_uxth_T1(); break;
6199 case 2: gen_op_sxtb16_T1(); break;
6200 case 3: gen_op_uxtb16_T1(); break;
6201 case 4: gen_op_sxtb_T1(); break;
6202 case 5: gen_op_uxtb_T1(); break;
6203 default: goto illegal_op;
6204 }
6205 if (rn != 15) {
6206 gen_movl_T2_reg(s, rn);
6207 if ((op >> 1) == 1) {
6208 gen_op_add16_T1_T2();
6209 } else {
6210 gen_op_addl_T1_T2();
6211 }
6212 }
6213 gen_movl_reg_T1(s, rd);
6214 break;
6215 case 2: /* SIMD add/subtract. */
6216 op = (insn >> 20) & 7;
6217 shift = (insn >> 4) & 7;
6218 if ((op & 3) == 3 || (shift & 3) == 3)
6219 goto illegal_op;
6220 gen_movl_T0_reg(s, rn);
6221 gen_movl_T1_reg(s, rm);
6222 gen_thumb2_parallel_addsub[op][shift]();
6223 gen_movl_reg_T0(s, rd);
6224 break;
6225 case 3: /* Other data processing. */
6226 op = ((insn >> 17) & 0x38) | ((insn >> 4) & 7);
6227 if (op < 4) {
6228 /* Saturating add/subtract. */
6229 gen_movl_T0_reg(s, rm);
6230 gen_movl_T1_reg(s, rn);
6231 if (op & 2)
6232 gen_op_double_T1_saturate();
6233 if (op & 1)
6234 gen_op_subl_T0_T1_saturate();
6235 else
6236 gen_op_addl_T0_T1_saturate();
6237 } else {
6238 gen_movl_T0_reg(s, rn);
6239 switch (op) {
6240 case 0x0a: /* rbit */
6241 gen_op_rbit_T0();
6242 break;
6243 case 0x08: /* rev */
6244 gen_op_rev_T0();
6245 break;
6246 case 0x09: /* rev16 */
6247 gen_op_rev16_T0();
6248 break;
6249 case 0x0b: /* revsh */
6250 gen_op_revsh_T0();
6251 break;
6252 case 0x10: /* sel */
6253 gen_movl_T1_reg(s, rm);
6254 gen_op_sel_T0_T1();
6255 break;
6256 case 0x18: /* clz */
6257 gen_op_clz_T0();
6258 break;
6259 default:
6260 goto illegal_op;
6261 }
6262 }
6263 gen_movl_reg_T0(s, rd);
6264 break;
6265 case 4: case 5: /* 32-bit multiply. Sum of absolute differences. */
6266 op = (insn >> 4) & 0xf;
6267 gen_movl_T0_reg(s, rn);
6268 gen_movl_T1_reg(s, rm);
6269 switch ((insn >> 20) & 7) {
6270 case 0: /* 32 x 32 -> 32 */
6271 gen_op_mul_T0_T1();
6272 if (rs != 15) {
6273 gen_movl_T1_reg(s, rs);
6274 if (op)
6275 gen_op_rsbl_T0_T1();
6276 else
6277 gen_op_addl_T0_T1();
6278 }
6279 gen_movl_reg_T0(s, rd);
6280 break;
6281 case 1: /* 16 x 16 -> 32 */
6282 gen_mulxy(op & 2, op & 1);
6283 if (rs != 15) {
6284 gen_movl_T1_reg(s, rs);
6285 gen_op_addl_T0_T1_setq();
6286 }
6287 gen_movl_reg_T0(s, rd);
6288 break;
6289 case 2: /* Dual multiply add. */
6290 case 4: /* Dual multiply subtract. */
6291 if (op)
6292 gen_op_swap_half_T1();
6293 gen_op_mul_dual_T0_T1();
6294 /* This addition cannot overflow. */
6295 if (insn & (1 << 22)) {
6296 gen_op_subl_T0_T1();
6297 } else {
6298 gen_op_addl_T0_T1();
6299 }
6300 if (rs != 15)
6301 {
6302 gen_movl_T1_reg(s, rs);
6303 gen_op_addl_T0_T1_setq();
6304 }
6305 gen_movl_reg_T0(s, rd);
6306 break;
6307 case 3: /* 32 * 16 -> 32msb */
6308 if (op)
6309 gen_op_sarl_T1_im(16);
6310 else
6311 gen_op_sxth_T1();
6312 gen_op_imulw_T0_T1();
6313 if (rs != 15)
6314 {
6315 gen_movl_T1_reg(s, rs);
6316 gen_op_addl_T0_T1_setq();
6317 }
6318 gen_movl_reg_T0(s, rd);
6319 break;
6320 case 5: case 6: /* 32 * 32 -> 32msb */
6321 gen_op_imull_T0_T1();
6322 if (insn & (1 << 5))
6323 gen_op_roundqd_T0_T1();
6324 else
6325 gen_op_movl_T0_T1();
6326 if (rs != 15) {
6327 gen_movl_T1_reg(s, rs);
6328 if (insn & (1 << 21)) {
6329 gen_op_addl_T0_T1();
99c475ab 6330 } else {
9ee6e8bb 6331 gen_op_rsbl_T0_T1();
99c475ab 6332 }
2c0262af 6333 }
9ee6e8bb
PB
6334 gen_movl_reg_T0(s, rd);
6335 break;
6336 case 7: /* Unsigned sum of absolute differences. */
6337 gen_op_usad8_T0_T1();
6338 if (rs != 15) {
6339 gen_movl_T1_reg(s, rs);
6340 gen_op_addl_T0_T1();
5fd46862 6341 }
9ee6e8bb
PB
6342 gen_movl_reg_T0(s, rd);
6343 break;
2c0262af
FB
6344 }
6345 break;
9ee6e8bb
PB
6346 case 6: case 7: /* 64-bit multiply, Divide. */
6347 op = ((insn >> 4) & 0xf) | ((insn >> 16) & 0x70);
6348 gen_movl_T0_reg(s, rn);
6349 gen_movl_T1_reg(s, rm);
6350 if ((op & 0x50) == 0x10) {
6351 /* sdiv, udiv */
6352 if (!arm_feature(env, ARM_FEATURE_DIV))
6353 goto illegal_op;
6354 if (op & 0x20)
6355 gen_op_udivl_T0_T1();
2c0262af 6356 else
9ee6e8bb
PB
6357 gen_op_sdivl_T0_T1();
6358 gen_movl_reg_T0(s, rd);
6359 } else if ((op & 0xe) == 0xc) {
6360 /* Dual multiply accumulate long. */
6361 if (op & 1)
6362 gen_op_swap_half_T1();
6363 gen_op_mul_dual_T0_T1();
6364 if (op & 0x10) {
6365 gen_op_subl_T0_T1();
b5ff1b31 6366 } else {
9ee6e8bb 6367 gen_op_addl_T0_T1();
b5ff1b31 6368 }
9ee6e8bb
PB
6369 gen_op_signbit_T1_T0();
6370 gen_op_addq_T0_T1(rs, rd);
6371 gen_movl_reg_T0(s, rs);
6372 gen_movl_reg_T1(s, rd);
2c0262af 6373 } else {
9ee6e8bb
PB
6374 if (op & 0x20) {
6375 /* Unsigned 64-bit multiply */
6376 gen_op_mull_T0_T1();
b5ff1b31 6377 } else {
9ee6e8bb
PB
6378 if (op & 8) {
6379 /* smlalxy */
6380 gen_mulxy(op & 2, op & 1);
6381 gen_op_signbit_T1_T0();
6382 } else {
6383 /* Signed 64-bit multiply */
6384 gen_op_imull_T0_T1();
6385 }
b5ff1b31 6386 }
9ee6e8bb
PB
6387 if (op & 4) {
6388 /* umaal */
6389 gen_op_addq_lo_T0_T1(rs);
6390 gen_op_addq_lo_T0_T1(rd);
6391 } else if (op & 0x40) {
6392 /* 64-bit accumulate. */
6393 gen_op_addq_T0_T1(rs, rd);
6394 }
6395 gen_movl_reg_T0(s, rs);
6396 gen_movl_reg_T1(s, rd);
5fd46862 6397 }
2c0262af 6398 break;
9ee6e8bb
PB
6399 }
6400 break;
6401 case 6: case 7: case 14: case 15:
6402 /* Coprocessor. */
6403 if (((insn >> 24) & 3) == 3) {
6404 /* Translate into the equivalent ARM encoding. */
6405 insn = (insn & 0xe2ffffff) | ((insn & (1 << 28)) >> 4);
6406 if (disas_neon_data_insn(env, s, insn))
6407 goto illegal_op;
6408 } else {
6409 if (insn & (1 << 28))
6410 goto illegal_op;
6411 if (disas_coproc_insn (env, s, insn))
6412 goto illegal_op;
6413 }
6414 break;
6415 case 8: case 9: case 10: case 11:
6416 if (insn & (1 << 15)) {
6417 /* Branches, misc control. */
6418 if (insn & 0x5000) {
6419 /* Unconditional branch. */
6420 /* signextend(hw1[10:0]) -> offset[:12]. */
6421 offset = ((int32_t)insn << 5) >> 9 & ~(int32_t)0xfff;
6422 /* hw1[10:0] -> offset[11:1]. */
6423 offset |= (insn & 0x7ff) << 1;
6424 /* (~hw2[13, 11] ^ offset[24]) -> offset[23,22]
6425 offset[24:22] already have the same value because of the
6426 sign extension above. */
6427 offset ^= ((~insn) & (1 << 13)) << 10;
6428 offset ^= ((~insn) & (1 << 11)) << 11;
6429
6430 addr = s->pc;
6431 if (insn & (1 << 14)) {
6432 /* Branch and link. */
6433 gen_op_movl_T1_im(addr | 1);
6434 gen_movl_reg_T1(s, 14);
b5ff1b31 6435 }
3b46e624 6436
9ee6e8bb
PB
6437 addr += offset;
6438 if (insn & (1 << 12)) {
6439 /* b/bl */
6440 gen_jmp(s, addr);
6441 } else {
6442 /* blx */
6443 addr &= ~(uint32_t)2;
6444 gen_op_movl_T0_im(addr);
6445 gen_bx(s);
2c0262af 6446 }
9ee6e8bb
PB
6447 } else if (((insn >> 23) & 7) == 7) {
6448 /* Misc control */
6449 if (insn & (1 << 13))
6450 goto illegal_op;
6451
6452 if (insn & (1 << 26)) {
6453 /* Secure monitor call (v6Z) */
6454 goto illegal_op; /* not implemented. */
2c0262af 6455 } else {
9ee6e8bb
PB
6456 op = (insn >> 20) & 7;
6457 switch (op) {
6458 case 0: /* msr cpsr. */
6459 if (IS_M(env)) {
6460 gen_op_v7m_msr_T0(insn & 0xff);
6461 gen_movl_reg_T0(s, rn);
6462 gen_lookup_tb(s);
6463 break;
6464 }
6465 /* fall through */
6466 case 1: /* msr spsr. */
6467 if (IS_M(env))
6468 goto illegal_op;
6469 gen_movl_T0_reg(s, rn);
6470 if (gen_set_psr_T0(s,
6471 msr_mask(env, s, (insn >> 8) & 0xf, op == 1),
6472 op == 1))
6473 goto illegal_op;
6474 break;
6475 case 2: /* cps, nop-hint. */
6476 if (((insn >> 8) & 7) == 0) {
6477 gen_nop_hint(s, insn & 0xff);
6478 }
6479 /* Implemented as NOP in user mode. */
6480 if (IS_USER(s))
6481 break;
6482 offset = 0;
6483 imm = 0;
6484 if (insn & (1 << 10)) {
6485 if (insn & (1 << 7))
6486 offset |= CPSR_A;
6487 if (insn & (1 << 6))
6488 offset |= CPSR_I;
6489 if (insn & (1 << 5))
6490 offset |= CPSR_F;
6491 if (insn & (1 << 9))
6492 imm = CPSR_A | CPSR_I | CPSR_F;
6493 }
6494 if (insn & (1 << 8)) {
6495 offset |= 0x1f;
6496 imm |= (insn & 0x1f);
6497 }
6498 if (offset) {
6499 gen_op_movl_T0_im(imm);
6500 gen_set_psr_T0(s, offset, 0);
6501 }
6502 break;
6503 case 3: /* Special control operations. */
6504 op = (insn >> 4) & 0xf;
6505 switch (op) {
6506 case 2: /* clrex */
6507 gen_op_clrex();
6508 break;
6509 case 4: /* dsb */
6510 case 5: /* dmb */
6511 case 6: /* isb */
6512 /* These execute as NOPs. */
6513 ARCH(7);
6514 break;
6515 default:
6516 goto illegal_op;
6517 }
6518 break;
6519 case 4: /* bxj */
6520 /* Trivial implementation equivalent to bx. */
6521 gen_movl_T0_reg(s, rn);
6522 gen_bx(s);
6523 break;
6524 case 5: /* Exception return. */
6525 /* Unpredictable in user mode. */
6526 goto illegal_op;
6527 case 6: /* mrs cpsr. */
6528 if (IS_M(env)) {
6529 gen_op_v7m_mrs_T0(insn & 0xff);
6530 } else {
6531 gen_op_movl_T0_cpsr();
6532 }
6533 gen_movl_reg_T0(s, rd);
6534 break;
6535 case 7: /* mrs spsr. */
6536 /* Not accessible in user mode. */
6537 if (IS_USER(s) || IS_M(env))
6538 goto illegal_op;
6539 gen_op_movl_T0_spsr();
6540 gen_movl_reg_T0(s, rd);
6541 break;
2c0262af
FB
6542 }
6543 }
9ee6e8bb
PB
6544 } else {
6545 /* Conditional branch. */
6546 op = (insn >> 22) & 0xf;
6547 /* Generate a conditional jump to next instruction. */
6548 s->condlabel = gen_new_label();
6549 gen_test_cc[op ^ 1](s->condlabel);
6550 s->condjmp = 1;
6551
6552 /* offset[11:1] = insn[10:0] */
6553 offset = (insn & 0x7ff) << 1;
6554 /* offset[17:12] = insn[21:16]. */
6555 offset |= (insn & 0x003f0000) >> 4;
6556 /* offset[31:20] = insn[26]. */
6557 offset |= ((int32_t)((insn << 5) & 0x80000000)) >> 11;
6558 /* offset[18] = insn[13]. */
6559 offset |= (insn & (1 << 13)) << 5;
6560 /* offset[19] = insn[11]. */
6561 offset |= (insn & (1 << 11)) << 8;
6562
6563 /* jump to the offset */
6564 addr = s->pc + offset;
6565 gen_jmp(s, addr);
6566 }
6567 } else {
6568 /* Data processing immediate. */
6569 if (insn & (1 << 25)) {
6570 if (insn & (1 << 24)) {
6571 if (insn & (1 << 20))
6572 goto illegal_op;
6573 /* Bitfield/Saturate. */
6574 op = (insn >> 21) & 7;
6575 imm = insn & 0x1f;
6576 shift = ((insn >> 6) & 3) | ((insn >> 10) & 0x1c);
6577 if (rn == 15)
6578 gen_op_movl_T1_im(0);
6579 else
6580 gen_movl_T1_reg(s, rn);
6581 switch (op) {
6582 case 2: /* Signed bitfield extract. */
6583 imm++;
6584 if (shift + imm > 32)
6585 goto illegal_op;
6586 if (imm < 32)
6587 gen_op_sbfx_T1(shift, imm);
6588 break;
6589 case 6: /* Unsigned bitfield extract. */
6590 imm++;
6591 if (shift + imm > 32)
6592 goto illegal_op;
6593 if (imm < 32)
6594 gen_op_ubfx_T1(shift, (1u << imm) - 1);
6595 break;
6596 case 3: /* Bitfield insert/clear. */
6597 if (imm < shift)
6598 goto illegal_op;
6599 imm = imm + 1 - shift;
6600 if (imm != 32) {
6601 gen_movl_T0_reg(s, rd);
6602 gen_op_bfi_T1_T0(shift, ((1u << imm) - 1) << shift);
6603 }
6604 break;
6605 case 7:
6606 goto illegal_op;
6607 default: /* Saturate. */
6608 gen_movl_T1_reg(s, rn);
6609 if (shift) {
6610 if (op & 1)
6611 gen_op_sarl_T1_im(shift);
6612 else
6613 gen_op_shll_T1_im(shift);
6614 }
6615 if (op & 4) {
6616 /* Unsigned. */
6617 gen_op_ssat_T1(imm);
6618 if ((op & 1) && shift == 0)
6619 gen_op_usat16_T1(imm);
6620 else
6621 gen_op_usat_T1(imm);
2c0262af 6622 } else {
9ee6e8bb
PB
6623 /* Signed. */
6624 gen_op_ssat_T1(imm);
6625 if ((op & 1) && shift == 0)
6626 gen_op_ssat16_T1(imm);
6627 else
6628 gen_op_ssat_T1(imm);
2c0262af 6629 }
9ee6e8bb 6630 break;
2c0262af 6631 }
9ee6e8bb
PB
6632 gen_movl_reg_T1(s, rd);
6633 } else {
6634 imm = ((insn & 0x04000000) >> 15)
6635 | ((insn & 0x7000) >> 4) | (insn & 0xff);
6636 if (insn & (1 << 22)) {
6637 /* 16-bit immediate. */
6638 imm |= (insn >> 4) & 0xf000;
6639 if (insn & (1 << 23)) {
6640 /* movt */
6641 gen_movl_T0_reg(s, rd);
6642 gen_op_movtop_T0_im(imm << 16);
2c0262af 6643 } else {
9ee6e8bb
PB
6644 /* movw */
6645 gen_op_movl_T0_im(imm);
2c0262af
FB
6646 }
6647 } else {
9ee6e8bb
PB
6648 /* Add/sub 12-bit immediate. */
6649 if (rn == 15) {
6650 addr = s->pc & ~(uint32_t)3;
6651 if (insn & (1 << 23))
6652 addr -= imm;
6653 else
6654 addr += imm;
6655 gen_op_movl_T0_im(addr);
2c0262af 6656 } else {
9ee6e8bb
PB
6657 gen_movl_T0_reg(s, rn);
6658 gen_op_movl_T1_im(imm);
6659 if (insn & (1 << 23))
6660 gen_op_subl_T0_T1();
6661 else
6662 gen_op_addl_T0_T1();
2c0262af 6663 }
9ee6e8bb
PB
6664 }
6665 gen_movl_reg_T0(s, rd);
191abaa2 6666 }
9ee6e8bb
PB
6667 } else {
6668 int shifter_out = 0;
6669 /* modified 12-bit immediate. */
6670 shift = ((insn & 0x04000000) >> 23) | ((insn & 0x7000) >> 12);
6671 imm = (insn & 0xff);
6672 switch (shift) {
6673 case 0: /* XY */
6674 /* Nothing to do. */
6675 break;
6676 case 1: /* 00XY00XY */
6677 imm |= imm << 16;
6678 break;
6679 case 2: /* XY00XY00 */
6680 imm |= imm << 16;
6681 imm <<= 8;
6682 break;
6683 case 3: /* XYXYXYXY */
6684 imm |= imm << 16;
6685 imm |= imm << 8;
6686 break;
6687 default: /* Rotated constant. */
6688 shift = (shift << 1) | (imm >> 7);
6689 imm |= 0x80;
6690 imm = imm << (32 - shift);
6691 shifter_out = 1;
6692 break;
b5ff1b31 6693 }
9ee6e8bb
PB
6694 gen_op_movl_T1_im(imm);
6695 rn = (insn >> 16) & 0xf;
6696 if (rn == 15)
6697 gen_op_movl_T0_im(0);
6698 else
6699 gen_movl_T0_reg(s, rn);
6700 op = (insn >> 21) & 0xf;
6701 if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
6702 shifter_out))
6703 goto illegal_op;
6704 rd = (insn >> 8) & 0xf;
6705 if (rd != 15) {
6706 gen_movl_reg_T0(s, rd);
2c0262af 6707 }
2c0262af 6708 }
9ee6e8bb
PB
6709 }
6710 break;
6711 case 12: /* Load/store single data item. */
6712 {
6713 int postinc = 0;
6714 int writeback = 0;
6715 if ((insn & 0x01100000) == 0x01000000) {
6716 if (disas_neon_ls_insn(env, s, insn))
c1713132 6717 goto illegal_op;
9ee6e8bb
PB
6718 break;
6719 }
6720 if (rn == 15) {
6721 /* PC relative. */
6722 /* s->pc has already been incremented by 4. */
6723 imm = s->pc & 0xfffffffc;
6724 if (insn & (1 << 23))
6725 imm += insn & 0xfff;
6726 else
6727 imm -= insn & 0xfff;
6728 gen_op_movl_T1_im(imm);
6729 } else {
6730 gen_movl_T1_reg(s, rn);
6731 if (insn & (1 << 23)) {
6732 /* Positive offset. */
6733 imm = insn & 0xfff;
6734 gen_op_addl_T1_im(imm);
6735 } else {
6736 op = (insn >> 8) & 7;
6737 imm = insn & 0xff;
6738 switch (op) {
6739 case 0: case 8: /* Shifted Register. */
6740 shift = (insn >> 4) & 0xf;
6741 if (shift > 3)
18c9b560 6742 goto illegal_op;
9ee6e8bb
PB
6743 gen_movl_T2_reg(s, rm);
6744 if (shift)
6745 gen_op_shll_T2_im(shift);
6746 gen_op_addl_T1_T2();
6747 break;
6748 case 4: /* Negative offset. */
6749 gen_op_addl_T1_im(-imm);
6750 break;
6751 case 6: /* User privilege. */
6752 gen_op_addl_T1_im(imm);
6753 break;
6754 case 1: /* Post-decrement. */
6755 imm = -imm;
6756 /* Fall through. */
6757 case 3: /* Post-increment. */
6758 gen_op_movl_T2_im(imm);
6759 postinc = 1;
6760 writeback = 1;
6761 break;
6762 case 5: /* Pre-decrement. */
6763 imm = -imm;
6764 /* Fall through. */
6765 case 7: /* Pre-increment. */
6766 gen_op_addl_T1_im(imm);
6767 writeback = 1;
6768 break;
6769 default:
b7bcbe95 6770 goto illegal_op;
9ee6e8bb
PB
6771 }
6772 }
6773 }
6774 op = ((insn >> 21) & 3) | ((insn >> 22) & 4);
6775 if (insn & (1 << 20)) {
6776 /* Load. */
6777 if (rs == 15 && op != 2) {
6778 if (op & 2)
b5ff1b31 6779 goto illegal_op;
9ee6e8bb
PB
6780 /* Memory hint. Implemented as NOP. */
6781 } else {
6782 switch (op) {
6783 case 0: gen_ldst(ldub, s); break;
6784 case 4: gen_ldst(ldsb, s); break;
6785 case 1: gen_ldst(lduw, s); break;
6786 case 5: gen_ldst(ldsw, s); break;
6787 case 2: gen_ldst(ldl, s); break;
6788 default: goto illegal_op;
6789 }
6790 if (rs == 15) {
6791 gen_bx(s);
6792 } else {
6793 gen_movl_reg_T0(s, rs);
6794 }
6795 }
6796 } else {
6797 /* Store. */
6798 if (rs == 15)
b7bcbe95 6799 goto illegal_op;
9ee6e8bb
PB
6800 gen_movl_T0_reg(s, rs);
6801 switch (op) {
6802 case 0: gen_ldst(stb, s); break;
6803 case 1: gen_ldst(stw, s); break;
6804 case 2: gen_ldst(stl, s); break;
6805 default: goto illegal_op;
b7bcbe95 6806 }
2c0262af 6807 }
9ee6e8bb
PB
6808 if (postinc)
6809 gen_op_addl_T1_im(imm);
6810 if (writeback)
6811 gen_movl_reg_T1(s, rn);
6812 }
6813 break;
6814 default:
6815 goto illegal_op;
2c0262af 6816 }
9ee6e8bb
PB
6817 return 0;
6818illegal_op:
6819 return 1;
2c0262af
FB
6820}
6821
9ee6e8bb 6822static void disas_thumb_insn(CPUState *env, DisasContext *s)
99c475ab
FB
6823{
6824 uint32_t val, insn, op, rm, rn, rd, shift, cond;
6825 int32_t offset;
6826 int i;
6827
9ee6e8bb
PB
6828 if (s->condexec_mask) {
6829 cond = s->condexec_cond;
6830 s->condlabel = gen_new_label();
6831 gen_test_cc[cond ^ 1](s->condlabel);
6832 s->condjmp = 1;
6833 }
6834
b5ff1b31 6835 insn = lduw_code(s->pc);
99c475ab 6836 s->pc += 2;
b5ff1b31 6837
99c475ab
FB
6838 switch (insn >> 12) {
6839 case 0: case 1:
6840 rd = insn & 7;
6841 op = (insn >> 11) & 3;
6842 if (op == 3) {
6843 /* add/subtract */
6844 rn = (insn >> 3) & 7;
6845 gen_movl_T0_reg(s, rn);
6846 if (insn & (1 << 10)) {
6847 /* immediate */
6848 gen_op_movl_T1_im((insn >> 6) & 7);
6849 } else {
6850 /* reg */
6851 rm = (insn >> 6) & 7;
6852 gen_movl_T1_reg(s, rm);
6853 }
9ee6e8bb
PB
6854 if (insn & (1 << 9)) {
6855 if (s->condexec_mask)
6856 gen_op_subl_T0_T1();
6857 else
6858 gen_op_subl_T0_T1_cc();
6859 } else {
6860 if (s->condexec_mask)
6861 gen_op_addl_T0_T1();
6862 else
6863 gen_op_addl_T0_T1_cc();
6864 }
99c475ab
FB
6865 gen_movl_reg_T0(s, rd);
6866 } else {
6867 /* shift immediate */
6868 rm = (insn >> 3) & 7;
6869 shift = (insn >> 6) & 0x1f;
6870 gen_movl_T0_reg(s, rm);
9ee6e8bb
PB
6871 if (s->condexec_mask)
6872 gen_shift_T0_im_thumb[op](shift);
6873 else
6874 gen_shift_T0_im_thumb_cc[op](shift);
99c475ab
FB
6875 gen_movl_reg_T0(s, rd);
6876 }
6877 break;
6878 case 2: case 3:
6879 /* arithmetic large immediate */
6880 op = (insn >> 11) & 3;
6881 rd = (insn >> 8) & 0x7;
6882 if (op == 0) {
6883 gen_op_movl_T0_im(insn & 0xff);
6884 } else {
6885 gen_movl_T0_reg(s, rd);
6886 gen_op_movl_T1_im(insn & 0xff);
6887 }
6888 switch (op) {
6889 case 0: /* mov */
9ee6e8bb
PB
6890 if (!s->condexec_mask)
6891 gen_op_logic_T0_cc();
99c475ab
FB
6892 break;
6893 case 1: /* cmp */
6894 gen_op_subl_T0_T1_cc();
6895 break;
6896 case 2: /* add */
9ee6e8bb
PB
6897 if (s->condexec_mask)
6898 gen_op_addl_T0_T1();
6899 else
6900 gen_op_addl_T0_T1_cc();
99c475ab
FB
6901 break;
6902 case 3: /* sub */
9ee6e8bb
PB
6903 if (s->condexec_mask)
6904 gen_op_subl_T0_T1();
6905 else
6906 gen_op_subl_T0_T1_cc();
99c475ab
FB
6907 break;
6908 }
6909 if (op != 1)
6910 gen_movl_reg_T0(s, rd);
6911 break;
6912 case 4:
6913 if (insn & (1 << 11)) {
6914 rd = (insn >> 8) & 7;
5899f386
FB
6915 /* load pc-relative. Bit 1 of PC is ignored. */
6916 val = s->pc + 2 + ((insn & 0xff) * 4);
6917 val &= ~(uint32_t)2;
99c475ab 6918 gen_op_movl_T1_im(val);
b5ff1b31 6919 gen_ldst(ldl, s);
99c475ab
FB
6920 gen_movl_reg_T0(s, rd);
6921 break;
6922 }
6923 if (insn & (1 << 10)) {
6924 /* data processing extended or blx */
6925 rd = (insn & 7) | ((insn >> 4) & 8);
6926 rm = (insn >> 3) & 0xf;
6927 op = (insn >> 8) & 3;
6928 switch (op) {
6929 case 0: /* add */
6930 gen_movl_T0_reg(s, rd);
6931 gen_movl_T1_reg(s, rm);
6932 gen_op_addl_T0_T1();
6933 gen_movl_reg_T0(s, rd);
6934 break;
6935 case 1: /* cmp */
6936 gen_movl_T0_reg(s, rd);
6937 gen_movl_T1_reg(s, rm);
6938 gen_op_subl_T0_T1_cc();
6939 break;
6940 case 2: /* mov/cpy */
6941 gen_movl_T0_reg(s, rm);
6942 gen_movl_reg_T0(s, rd);
6943 break;
6944 case 3:/* branch [and link] exchange thumb register */
6945 if (insn & (1 << 7)) {
6946 val = (uint32_t)s->pc | 1;
6947 gen_op_movl_T1_im(val);
6948 gen_movl_reg_T1(s, 14);
6949 }
6950 gen_movl_T0_reg(s, rm);
6951 gen_bx(s);
6952 break;
6953 }
6954 break;
6955 }
6956
6957 /* data processing register */
6958 rd = insn & 7;
6959 rm = (insn >> 3) & 7;
6960 op = (insn >> 6) & 0xf;
6961 if (op == 2 || op == 3 || op == 4 || op == 7) {
6962 /* the shift/rotate ops want the operands backwards */
6963 val = rm;
6964 rm = rd;
6965 rd = val;
6966 val = 1;
6967 } else {
6968 val = 0;
6969 }
6970
6971 if (op == 9) /* neg */
6972 gen_op_movl_T0_im(0);
6973 else if (op != 0xf) /* mvn doesn't read its first operand */
6974 gen_movl_T0_reg(s, rd);
6975
6976 gen_movl_T1_reg(s, rm);
5899f386 6977 switch (op) {
99c475ab
FB
6978 case 0x0: /* and */
6979 gen_op_andl_T0_T1();
9ee6e8bb
PB
6980 if (!s->condexec_mask)
6981 gen_op_logic_T0_cc();
99c475ab
FB
6982 break;
6983 case 0x1: /* eor */
6984 gen_op_xorl_T0_T1();
9ee6e8bb
PB
6985 if (!s->condexec_mask)
6986 gen_op_logic_T0_cc();
99c475ab
FB
6987 break;
6988 case 0x2: /* lsl */
9ee6e8bb
PB
6989 if (s->condexec_mask) {
6990 gen_op_shll_T1_T0();
6991 } else {
6992 gen_op_shll_T1_T0_cc();
6993 gen_op_logic_T1_cc();
6994 }
99c475ab
FB
6995 break;
6996 case 0x3: /* lsr */
9ee6e8bb
PB
6997 if (s->condexec_mask) {
6998 gen_op_shrl_T1_T0();
6999 } else {
7000 gen_op_shrl_T1_T0_cc();
7001 gen_op_logic_T1_cc();
7002 }
99c475ab
FB
7003 break;
7004 case 0x4: /* asr */
9ee6e8bb
PB
7005 if (s->condexec_mask) {
7006 gen_op_sarl_T1_T0();
7007 } else {
7008 gen_op_sarl_T1_T0_cc();
7009 gen_op_logic_T1_cc();
7010 }
99c475ab
FB
7011 break;
7012 case 0x5: /* adc */
9ee6e8bb
PB
7013 if (s->condexec_mask)
7014 gen_op_adcl_T0_T1();
7015 else
7016 gen_op_adcl_T0_T1_cc();
99c475ab
FB
7017 break;
7018 case 0x6: /* sbc */
9ee6e8bb
PB
7019 if (s->condexec_mask)
7020 gen_op_sbcl_T0_T1();
7021 else
7022 gen_op_sbcl_T0_T1_cc();
99c475ab
FB
7023 break;
7024 case 0x7: /* ror */
9ee6e8bb
PB
7025 if (s->condexec_mask) {
7026 gen_op_rorl_T1_T0();
7027 } else {
7028 gen_op_rorl_T1_T0_cc();
7029 gen_op_logic_T1_cc();
7030 }
99c475ab
FB
7031 break;
7032 case 0x8: /* tst */
7033 gen_op_andl_T0_T1();
7034 gen_op_logic_T0_cc();
7035 rd = 16;
5899f386 7036 break;
99c475ab 7037 case 0x9: /* neg */
9ee6e8bb
PB
7038 if (s->condexec_mask)
7039 gen_op_subl_T0_T1();
7040 else
7041 gen_op_subl_T0_T1_cc();
99c475ab
FB
7042 break;
7043 case 0xa: /* cmp */
7044 gen_op_subl_T0_T1_cc();
7045 rd = 16;
7046 break;
7047 case 0xb: /* cmn */
7048 gen_op_addl_T0_T1_cc();
7049 rd = 16;
7050 break;
7051 case 0xc: /* orr */
7052 gen_op_orl_T0_T1();
9ee6e8bb
PB
7053 if (!s->condexec_mask)
7054 gen_op_logic_T0_cc();
99c475ab
FB
7055 break;
7056 case 0xd: /* mul */
7057 gen_op_mull_T0_T1();
9ee6e8bb
PB
7058 if (!s->condexec_mask)
7059 gen_op_logic_T0_cc();
99c475ab
FB
7060 break;
7061 case 0xe: /* bic */
7062 gen_op_bicl_T0_T1();
9ee6e8bb
PB
7063 if (!s->condexec_mask)
7064 gen_op_logic_T0_cc();
99c475ab
FB
7065 break;
7066 case 0xf: /* mvn */
7067 gen_op_notl_T1();
9ee6e8bb
PB
7068 if (!s->condexec_mask)
7069 gen_op_logic_T1_cc();
99c475ab 7070 val = 1;
5899f386 7071 rm = rd;
99c475ab
FB
7072 break;
7073 }
7074 if (rd != 16) {
7075 if (val)
5899f386 7076 gen_movl_reg_T1(s, rm);
99c475ab
FB
7077 else
7078 gen_movl_reg_T0(s, rd);
7079 }
7080 break;
7081
7082 case 5:
7083 /* load/store register offset. */
7084 rd = insn & 7;
7085 rn = (insn >> 3) & 7;
7086 rm = (insn >> 6) & 7;
7087 op = (insn >> 9) & 7;
7088 gen_movl_T1_reg(s, rn);
7089 gen_movl_T2_reg(s, rm);
7090 gen_op_addl_T1_T2();
7091
7092 if (op < 3) /* store */
7093 gen_movl_T0_reg(s, rd);
7094
7095 switch (op) {
7096 case 0: /* str */
b5ff1b31 7097 gen_ldst(stl, s);
99c475ab
FB
7098 break;
7099 case 1: /* strh */
b5ff1b31 7100 gen_ldst(stw, s);
99c475ab
FB
7101 break;
7102 case 2: /* strb */
b5ff1b31 7103 gen_ldst(stb, s);
99c475ab
FB
7104 break;
7105 case 3: /* ldrsb */
b5ff1b31 7106 gen_ldst(ldsb, s);
99c475ab
FB
7107 break;
7108 case 4: /* ldr */
b5ff1b31 7109 gen_ldst(ldl, s);
99c475ab
FB
7110 break;
7111 case 5: /* ldrh */
b5ff1b31 7112 gen_ldst(lduw, s);
99c475ab
FB
7113 break;
7114 case 6: /* ldrb */
b5ff1b31 7115 gen_ldst(ldub, s);
99c475ab
FB
7116 break;
7117 case 7: /* ldrsh */
b5ff1b31 7118 gen_ldst(ldsw, s);
99c475ab
FB
7119 break;
7120 }
7121 if (op >= 3) /* load */
7122 gen_movl_reg_T0(s, rd);
7123 break;
7124
7125 case 6:
7126 /* load/store word immediate offset */
7127 rd = insn & 7;
7128 rn = (insn >> 3) & 7;
7129 gen_movl_T1_reg(s, rn);
7130 val = (insn >> 4) & 0x7c;
7131 gen_op_movl_T2_im(val);
7132 gen_op_addl_T1_T2();
7133
7134 if (insn & (1 << 11)) {
7135 /* load */
b5ff1b31 7136 gen_ldst(ldl, s);
99c475ab
FB
7137 gen_movl_reg_T0(s, rd);
7138 } else {
7139 /* store */
7140 gen_movl_T0_reg(s, rd);
b5ff1b31 7141 gen_ldst(stl, s);
99c475ab
FB
7142 }
7143 break;
7144
7145 case 7:
7146 /* load/store byte immediate offset */
7147 rd = insn & 7;
7148 rn = (insn >> 3) & 7;
7149 gen_movl_T1_reg(s, rn);
7150 val = (insn >> 6) & 0x1f;
7151 gen_op_movl_T2_im(val);
7152 gen_op_addl_T1_T2();
7153
7154 if (insn & (1 << 11)) {
7155 /* load */
b5ff1b31 7156 gen_ldst(ldub, s);
99c475ab
FB
7157 gen_movl_reg_T0(s, rd);
7158 } else {
7159 /* store */
7160 gen_movl_T0_reg(s, rd);
b5ff1b31 7161 gen_ldst(stb, s);
99c475ab
FB
7162 }
7163 break;
7164
7165 case 8:
7166 /* load/store halfword immediate offset */
7167 rd = insn & 7;
7168 rn = (insn >> 3) & 7;
7169 gen_movl_T1_reg(s, rn);
7170 val = (insn >> 5) & 0x3e;
7171 gen_op_movl_T2_im(val);
7172 gen_op_addl_T1_T2();
7173
7174 if (insn & (1 << 11)) {
7175 /* load */
b5ff1b31 7176 gen_ldst(lduw, s);
99c475ab
FB
7177 gen_movl_reg_T0(s, rd);
7178 } else {
7179 /* store */
7180 gen_movl_T0_reg(s, rd);
b5ff1b31 7181 gen_ldst(stw, s);
99c475ab
FB
7182 }
7183 break;
7184
7185 case 9:
7186 /* load/store from stack */
7187 rd = (insn >> 8) & 7;
7188 gen_movl_T1_reg(s, 13);
7189 val = (insn & 0xff) * 4;
7190 gen_op_movl_T2_im(val);
7191 gen_op_addl_T1_T2();
7192
7193 if (insn & (1 << 11)) {
7194 /* load */
b5ff1b31 7195 gen_ldst(ldl, s);
99c475ab
FB
7196 gen_movl_reg_T0(s, rd);
7197 } else {
7198 /* store */
7199 gen_movl_T0_reg(s, rd);
b5ff1b31 7200 gen_ldst(stl, s);
99c475ab
FB
7201 }
7202 break;
7203
7204 case 10:
7205 /* add to high reg */
7206 rd = (insn >> 8) & 7;
5899f386
FB
7207 if (insn & (1 << 11)) {
7208 /* SP */
7209 gen_movl_T0_reg(s, 13);
7210 } else {
7211 /* PC. bit 1 is ignored. */
7212 gen_op_movl_T0_im((s->pc + 2) & ~(uint32_t)2);
7213 }
99c475ab
FB
7214 val = (insn & 0xff) * 4;
7215 gen_op_movl_T1_im(val);
7216 gen_op_addl_T0_T1();
7217 gen_movl_reg_T0(s, rd);
7218 break;
7219
7220 case 11:
7221 /* misc */
7222 op = (insn >> 8) & 0xf;
7223 switch (op) {
7224 case 0:
7225 /* adjust stack pointer */
7226 gen_movl_T1_reg(s, 13);
7227 val = (insn & 0x7f) * 4;
7228 if (insn & (1 << 7))
7229 val = -(int32_t)val;
7230 gen_op_movl_T2_im(val);
7231 gen_op_addl_T1_T2();
7232 gen_movl_reg_T1(s, 13);
7233 break;
7234
9ee6e8bb
PB
7235 case 2: /* sign/zero extend. */
7236 ARCH(6);
7237 rd = insn & 7;
7238 rm = (insn >> 3) & 7;
7239 gen_movl_T1_reg(s, rm);
7240 switch ((insn >> 6) & 3) {
7241 case 0: gen_op_sxth_T1(); break;
7242 case 1: gen_op_sxtb_T1(); break;
7243 case 2: gen_op_uxth_T1(); break;
7244 case 3: gen_op_uxtb_T1(); break;
7245 }
7246 gen_movl_reg_T1(s, rd);
7247 break;
99c475ab
FB
7248 case 4: case 5: case 0xc: case 0xd:
7249 /* push/pop */
7250 gen_movl_T1_reg(s, 13);
5899f386
FB
7251 if (insn & (1 << 8))
7252 offset = 4;
99c475ab 7253 else
5899f386
FB
7254 offset = 0;
7255 for (i = 0; i < 8; i++) {
7256 if (insn & (1 << i))
7257 offset += 4;
7258 }
7259 if ((insn & (1 << 11)) == 0) {
7260 gen_op_movl_T2_im(-offset);
7261 gen_op_addl_T1_T2();
7262 }
7263 gen_op_movl_T2_im(4);
99c475ab
FB
7264 for (i = 0; i < 8; i++) {
7265 if (insn & (1 << i)) {
7266 if (insn & (1 << 11)) {
7267 /* pop */
b5ff1b31 7268 gen_ldst(ldl, s);
99c475ab
FB
7269 gen_movl_reg_T0(s, i);
7270 } else {
7271 /* push */
7272 gen_movl_T0_reg(s, i);
b5ff1b31 7273 gen_ldst(stl, s);
99c475ab 7274 }
5899f386 7275 /* advance to the next address. */
99c475ab
FB
7276 gen_op_addl_T1_T2();
7277 }
7278 }
7279 if (insn & (1 << 8)) {
7280 if (insn & (1 << 11)) {
7281 /* pop pc */
b5ff1b31 7282 gen_ldst(ldl, s);
99c475ab
FB
7283 /* don't set the pc until the rest of the instruction
7284 has completed */
7285 } else {
7286 /* push lr */
7287 gen_movl_T0_reg(s, 14);
b5ff1b31 7288 gen_ldst(stl, s);
99c475ab
FB
7289 }
7290 gen_op_addl_T1_T2();
7291 }
5899f386
FB
7292 if ((insn & (1 << 11)) == 0) {
7293 gen_op_movl_T2_im(-offset);
7294 gen_op_addl_T1_T2();
7295 }
99c475ab
FB
7296 /* write back the new stack pointer */
7297 gen_movl_reg_T1(s, 13);
7298 /* set the new PC value */
7299 if ((insn & 0x0900) == 0x0900)
7300 gen_bx(s);
7301 break;
7302
9ee6e8bb
PB
7303 case 1: case 3: case 9: case 11: /* czb */
7304 rm = insn & 7;
7305 gen_movl_T0_reg(s, rm);
7306 s->condlabel = gen_new_label();
7307 s->condjmp = 1;
7308 if (insn & (1 << 11))
7309 gen_op_testn_T0(s->condlabel);
7310 else
7311 gen_op_test_T0(s->condlabel);
7312
7313 offset = ((insn & 0xf8) >> 2) | (insn & 0x200) >> 3;
7314 val = (uint32_t)s->pc + 2;
7315 val += offset;
7316 gen_jmp(s, val);
7317 break;
7318
7319 case 15: /* IT, nop-hint. */
7320 if ((insn & 0xf) == 0) {
7321 gen_nop_hint(s, (insn >> 4) & 0xf);
7322 break;
7323 }
7324 /* If Then. */
7325 s->condexec_cond = (insn >> 4) & 0xe;
7326 s->condexec_mask = insn & 0x1f;
7327 /* No actual code generated for this insn, just setup state. */
7328 break;
7329
06c949e6 7330 case 0xe: /* bkpt */
9ee6e8bb 7331 gen_set_condexec(s);
06c949e6
PB
7332 gen_op_movl_T0_im((long)s->pc - 2);
7333 gen_op_movl_reg_TN[0][15]();
7334 gen_op_bkpt();
7335 s->is_jmp = DISAS_JUMP;
7336 break;
7337
9ee6e8bb
PB
7338 case 0xa: /* rev */
7339 ARCH(6);
7340 rn = (insn >> 3) & 0x7;
7341 rd = insn & 0x7;
7342 gen_movl_T0_reg(s, rn);
7343 switch ((insn >> 6) & 3) {
7344 case 0: gen_op_rev_T0(); break;
7345 case 1: gen_op_rev16_T0(); break;
7346 case 3: gen_op_revsh_T0(); break;
7347 default: goto illegal_op;
7348 }
7349 gen_movl_reg_T0(s, rd);
7350 break;
7351
7352 case 6: /* cps */
7353 ARCH(6);
7354 if (IS_USER(s))
7355 break;
7356 if (IS_M(env)) {
7357 val = (insn & (1 << 4)) != 0;
7358 gen_op_movl_T0_im(val);
7359 /* PRIMASK */
7360 if (insn & 1)
7361 gen_op_v7m_msr_T0(16);
7362 /* FAULTMASK */
7363 if (insn & 2)
7364 gen_op_v7m_msr_T0(17);
7365
7366 gen_lookup_tb(s);
7367 } else {
7368 if (insn & (1 << 4))
7369 shift = CPSR_A | CPSR_I | CPSR_F;
7370 else
7371 shift = 0;
7372
7373 val = ((insn & 7) << 6) & shift;
7374 gen_op_movl_T0_im(val);
7375 gen_set_psr_T0(s, shift, 0);
7376 }
7377 break;
7378
99c475ab
FB
7379 default:
7380 goto undef;
7381 }
7382 break;
7383
7384 case 12:
7385 /* load/store multiple */
7386 rn = (insn >> 8) & 0x7;
7387 gen_movl_T1_reg(s, rn);
7388 gen_op_movl_T2_im(4);
99c475ab
FB
7389 for (i = 0; i < 8; i++) {
7390 if (insn & (1 << i)) {
99c475ab
FB
7391 if (insn & (1 << 11)) {
7392 /* load */
b5ff1b31 7393 gen_ldst(ldl, s);
99c475ab
FB
7394 gen_movl_reg_T0(s, i);
7395 } else {
7396 /* store */
7397 gen_movl_T0_reg(s, i);
b5ff1b31 7398 gen_ldst(stl, s);
99c475ab 7399 }
5899f386
FB
7400 /* advance to the next address */
7401 gen_op_addl_T1_T2();
99c475ab
FB
7402 }
7403 }
5899f386 7404 /* Base register writeback. */
b5ff1b31
FB
7405 if ((insn & (1 << rn)) == 0)
7406 gen_movl_reg_T1(s, rn);
99c475ab
FB
7407 break;
7408
7409 case 13:
7410 /* conditional branch or swi */
7411 cond = (insn >> 8) & 0xf;
7412 if (cond == 0xe)
7413 goto undef;
7414
7415 if (cond == 0xf) {
7416 /* swi */
9ee6e8bb 7417 gen_set_condexec(s);
99c475ab
FB
7418 gen_op_movl_T0_im((long)s->pc | 1);
7419 /* Don't set r15. */
7420 gen_op_movl_reg_TN[0][15]();
9ee6e8bb 7421 s->is_jmp = DISAS_SWI;
99c475ab
FB
7422 break;
7423 }
7424 /* generate a conditional jump to next instruction */
e50e6a20
FB
7425 s->condlabel = gen_new_label();
7426 gen_test_cc[cond ^ 1](s->condlabel);
7427 s->condjmp = 1;
99c475ab
FB
7428 gen_movl_T1_reg(s, 15);
7429
7430 /* jump to the offset */
5899f386 7431 val = (uint32_t)s->pc + 2;
99c475ab 7432 offset = ((int32_t)insn << 24) >> 24;
5899f386 7433 val += offset << 1;
8aaca4c0 7434 gen_jmp(s, val);
99c475ab
FB
7435 break;
7436
7437 case 14:
358bf29e 7438 if (insn & (1 << 11)) {
9ee6e8bb
PB
7439 if (disas_thumb2_insn(env, s, insn))
7440 goto undef32;
358bf29e
PB
7441 break;
7442 }
9ee6e8bb 7443 /* unconditional branch */
99c475ab
FB
7444 val = (uint32_t)s->pc;
7445 offset = ((int32_t)insn << 21) >> 21;
7446 val += (offset << 1) + 2;
8aaca4c0 7447 gen_jmp(s, val);
99c475ab
FB
7448 break;
7449
7450 case 15:
9ee6e8bb
PB
7451 if (disas_thumb2_insn(env, s, insn))
7452 goto undef32;
7453 break;
99c475ab
FB
7454 }
7455 return;
9ee6e8bb
PB
7456undef32:
7457 gen_set_condexec(s);
7458 gen_op_movl_T0_im((long)s->pc - 4);
7459 gen_op_movl_reg_TN[0][15]();
7460 gen_op_undef_insn();
7461 s->is_jmp = DISAS_JUMP;
7462 return;
7463illegal_op:
99c475ab 7464undef:
9ee6e8bb 7465 gen_set_condexec(s);
5899f386 7466 gen_op_movl_T0_im((long)s->pc - 2);
99c475ab
FB
7467 gen_op_movl_reg_TN[0][15]();
7468 gen_op_undef_insn();
7469 s->is_jmp = DISAS_JUMP;
7470}
7471
2c0262af
FB
7472/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7473 basic block 'tb'. If search_pc is TRUE, also generate PC
7474 information for each intermediate instruction. */
5fafdf24
TS
7475static inline int gen_intermediate_code_internal(CPUState *env,
7476 TranslationBlock *tb,
2c0262af
FB
7477 int search_pc)
7478{
7479 DisasContext dc1, *dc = &dc1;
7480 uint16_t *gen_opc_end;
7481 int j, lj;
0fa85d43 7482 target_ulong pc_start;
b5ff1b31 7483 uint32_t next_page_start;
3b46e624 7484
2c0262af 7485 /* generate intermediate code */
0fa85d43 7486 pc_start = tb->pc;
3b46e624 7487
2c0262af
FB
7488 dc->tb = tb;
7489
7490 gen_opc_ptr = gen_opc_buf;
7491 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7492 gen_opparam_ptr = gen_opparam_buf;
7493
7494 dc->is_jmp = DISAS_NEXT;
7495 dc->pc = pc_start;
8aaca4c0 7496 dc->singlestep_enabled = env->singlestep_enabled;
e50e6a20 7497 dc->condjmp = 0;
5899f386 7498 dc->thumb = env->thumb;
9ee6e8bb
PB
7499 dc->condexec_mask = (env->condexec_bits & 0xf) << 1;
7500 dc->condexec_cond = env->condexec_bits >> 4;
6658ffb8 7501 dc->is_mem = 0;
b5ff1b31 7502#if !defined(CONFIG_USER_ONLY)
9ee6e8bb
PB
7503 if (IS_M(env)) {
7504 dc->user = ((env->v7m.exception == 0) && (env->v7m.control & 1));
7505 } else {
7506 dc->user = (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_USR;
7507 }
b5ff1b31
FB
7508#endif
7509 next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
e50e6a20 7510 nb_gen_labels = 0;
2c0262af 7511 lj = -1;
9ee6e8bb
PB
7512 /* Reset the conditional execution bits immediately. This avoids
7513 complications trying to do it at the end of the block. */
7514 if (env->condexec_bits)
7515 gen_op_set_condexec(0);
2c0262af 7516 do {
9ee6e8bb
PB
7517#ifndef CONFIG_USER_ONLY
7518 if (dc->pc >= 0xfffffff0 && IS_M(env)) {
7519 /* We always get here via a jump, so know we are not in a
7520 conditional execution block. */
7521 gen_op_exception_exit();
7522 }
7523#endif
7524
1fddef4b
FB
7525 if (env->nb_breakpoints > 0) {
7526 for(j = 0; j < env->nb_breakpoints; j++) {
7527 if (env->breakpoints[j] == dc->pc) {
9ee6e8bb 7528 gen_set_condexec(dc);
1fddef4b
FB
7529 gen_op_movl_T0_im((long)dc->pc);
7530 gen_op_movl_reg_TN[0][15]();
7531 gen_op_debug();
7532 dc->is_jmp = DISAS_JUMP;
9ee6e8bb
PB
7533 /* Advance PC so that clearing the breakpoint will
7534 invalidate this TB. */
7535 dc->pc += 2;
7536 goto done_generating;
1fddef4b
FB
7537 break;
7538 }
7539 }
7540 }
2c0262af
FB
7541 if (search_pc) {
7542 j = gen_opc_ptr - gen_opc_buf;
7543 if (lj < j) {
7544 lj++;
7545 while (lj < j)
7546 gen_opc_instr_start[lj++] = 0;
7547 }
0fa85d43 7548 gen_opc_pc[lj] = dc->pc;
2c0262af
FB
7549 gen_opc_instr_start[lj] = 1;
7550 }
e50e6a20 7551
9ee6e8bb
PB
7552 if (env->thumb) {
7553 disas_thumb_insn(env, dc);
7554 if (dc->condexec_mask) {
7555 dc->condexec_cond = (dc->condexec_cond & 0xe)
7556 | ((dc->condexec_mask >> 4) & 1);
7557 dc->condexec_mask = (dc->condexec_mask << 1) & 0x1f;
7558 if (dc->condexec_mask == 0) {
7559 dc->condexec_cond = 0;
7560 }
7561 }
7562 } else {
7563 disas_arm_insn(env, dc);
7564 }
e50e6a20
FB
7565
7566 if (dc->condjmp && !dc->is_jmp) {
7567 gen_set_label(dc->condlabel);
7568 dc->condjmp = 0;
7569 }
6658ffb8
PB
7570 /* Terminate the TB on memory ops if watchpoints are present. */
7571 /* FIXME: This should be replacd by the deterministic execution
7572 * IRQ raising bits. */
7573 if (dc->is_mem && env->nb_watchpoints)
7574 break;
7575
e50e6a20
FB
7576 /* Translation stops when a conditional branch is enoutered.
7577 * Otherwise the subsequent code could get translated several times.
b5ff1b31
FB
7578 * Also stop translation when a page boundary is reached. This
7579 * ensures prefech aborts occur at the right place. */
1fddef4b
FB
7580 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
7581 !env->singlestep_enabled &&
b5ff1b31 7582 dc->pc < next_page_start);
9ee6e8bb 7583
b5ff1b31 7584 /* At this stage dc->condjmp will only be set when the skipped
9ee6e8bb
PB
7585 instruction was a conditional branch or trap, and the PC has
7586 already been written. */
8aaca4c0
FB
7587 if (__builtin_expect(env->singlestep_enabled, 0)) {
7588 /* Make sure the pc is updated, and raise a debug exception. */
e50e6a20 7589 if (dc->condjmp) {
9ee6e8bb
PB
7590 gen_set_condexec(dc);
7591 if (dc->is_jmp == DISAS_SWI) {
7592 gen_op_swi();
7593 } else {
7594 gen_op_debug();
7595 }
e50e6a20
FB
7596 gen_set_label(dc->condlabel);
7597 }
7598 if (dc->condjmp || !dc->is_jmp) {
8aaca4c0
FB
7599 gen_op_movl_T0_im((long)dc->pc);
7600 gen_op_movl_reg_TN[0][15]();
e50e6a20 7601 dc->condjmp = 0;
8aaca4c0 7602 }
9ee6e8bb
PB
7603 gen_set_condexec(dc);
7604 if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
7605 gen_op_swi();
7606 } else {
7607 /* FIXME: Single stepping a WFI insn will not halt
7608 the CPU. */
7609 gen_op_debug();
7610 }
8aaca4c0 7611 } else {
9ee6e8bb
PB
7612 /* While branches must always occur at the end of an IT block,
7613 there are a few other things that can cause us to terminate
7614 the TB in the middel of an IT block:
7615 - Exception generating instructions (bkpt, swi, undefined).
7616 - Page boundaries.
7617 - Hardware watchpoints.
7618 Hardware breakpoints have already been handled and skip this code.
7619 */
7620 gen_set_condexec(dc);
8aaca4c0 7621 switch(dc->is_jmp) {
8aaca4c0 7622 case DISAS_NEXT:
6e256c93 7623 gen_goto_tb(dc, 1, dc->pc);
8aaca4c0
FB
7624 break;
7625 default:
7626 case DISAS_JUMP:
7627 case DISAS_UPDATE:
7628 /* indicate that the hash table must be used to find the next TB */
7629 gen_op_movl_T0_0();
7630 gen_op_exit_tb();
7631 break;
7632 case DISAS_TB_JUMP:
7633 /* nothing more to generate */
7634 break;
9ee6e8bb
PB
7635 case DISAS_WFI:
7636 gen_op_wfi();
7637 break;
7638 case DISAS_SWI:
7639 gen_op_swi();
7640 break;
8aaca4c0 7641 }
e50e6a20
FB
7642 if (dc->condjmp) {
7643 gen_set_label(dc->condlabel);
9ee6e8bb 7644 gen_set_condexec(dc);
6e256c93 7645 gen_goto_tb(dc, 1, dc->pc);
e50e6a20
FB
7646 dc->condjmp = 0;
7647 }
2c0262af 7648 }
9ee6e8bb 7649done_generating:
2c0262af
FB
7650 *gen_opc_ptr = INDEX_op_end;
7651
7652#ifdef DEBUG_DISAS
e19e89a5 7653 if (loglevel & CPU_LOG_TB_IN_ASM) {
2c0262af
FB
7654 fprintf(logfile, "----------------\n");
7655 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
5899f386 7656 target_disas(logfile, pc_start, dc->pc - pc_start, env->thumb);
2c0262af 7657 fprintf(logfile, "\n");
e19e89a5
FB
7658 if (loglevel & (CPU_LOG_TB_OP)) {
7659 fprintf(logfile, "OP:\n");
7660 dump_ops(gen_opc_buf, gen_opparam_buf);
7661 fprintf(logfile, "\n");
7662 }
2c0262af
FB
7663 }
7664#endif
b5ff1b31
FB
7665 if (search_pc) {
7666 j = gen_opc_ptr - gen_opc_buf;
7667 lj++;
7668 while (lj <= j)
7669 gen_opc_instr_start[lj++] = 0;
b5ff1b31 7670 } else {
2c0262af 7671 tb->size = dc->pc - pc_start;
b5ff1b31 7672 }
2c0262af
FB
7673 return 0;
7674}
7675
7676int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7677{
7678 return gen_intermediate_code_internal(env, tb, 0);
7679}
7680
7681int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7682{
7683 return gen_intermediate_code_internal(env, tb, 1);
7684}
7685
b5ff1b31
FB
7686static const char *cpu_mode_names[16] = {
7687 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
7688 "???", "???", "???", "und", "???", "???", "???", "sys"
7689};
9ee6e8bb 7690
5fafdf24 7691void cpu_dump_state(CPUState *env, FILE *f,
7fe48483
FB
7692 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
7693 int flags)
2c0262af
FB
7694{
7695 int i;
bc380d17 7696 union {
b7bcbe95
FB
7697 uint32_t i;
7698 float s;
7699 } s0, s1;
7700 CPU_DoubleU d;
a94a6abf
PB
7701 /* ??? This assumes float64 and double have the same layout.
7702 Oh well, it's only debug dumps. */
7703 union {
7704 float64 f64;
7705 double d;
7706 } d0;
b5ff1b31 7707 uint32_t psr;
2c0262af
FB
7708
7709 for(i=0;i<16;i++) {
7fe48483 7710 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2c0262af 7711 if ((i % 4) == 3)
7fe48483 7712 cpu_fprintf(f, "\n");
2c0262af 7713 else
7fe48483 7714 cpu_fprintf(f, " ");
2c0262af 7715 }
b5ff1b31 7716 psr = cpsr_read(env);
687fa640
TS
7717 cpu_fprintf(f, "PSR=%08x %c%c%c%c %c %s%d\n",
7718 psr,
b5ff1b31
FB
7719 psr & (1 << 31) ? 'N' : '-',
7720 psr & (1 << 30) ? 'Z' : '-',
7721 psr & (1 << 29) ? 'C' : '-',
7722 psr & (1 << 28) ? 'V' : '-',
5fafdf24 7723 psr & CPSR_T ? 'T' : 'A',
b5ff1b31 7724 cpu_mode_names[psr & 0xf], (psr & 0x10) ? 32 : 26);
b7bcbe95
FB
7725
7726 for (i = 0; i < 16; i++) {
8e96005d
FB
7727 d.d = env->vfp.regs[i];
7728 s0.i = d.l.lower;
7729 s1.i = d.l.upper;
a94a6abf
PB
7730 d0.f64 = d.d;
7731 cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
b7bcbe95 7732 i * 2, (int)s0.i, s0.s,
a94a6abf 7733 i * 2 + 1, (int)s1.i, s1.s,
b7bcbe95 7734 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
a94a6abf 7735 d0.d);
b7bcbe95 7736 }
40f137e1 7737 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.xregs[ARM_VFP_FPSCR]);
2c0262af 7738}
a6b025d3 7739