]>
git.proxmox.com Git - mirror_qemu.git/blob - target-arm/translate.c
590959c2f65277d00d398c0085bec9dc3292cb30
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
6 * Copyright (c) 2007 OpenedHand, Ltd.
8 * This library is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This library is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this library; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 #define ENABLE_ARCH_5J 0
33 #define ENABLE_ARCH_6 1
34 #define ENABLE_ARCH_6T2 1
36 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
38 /* internal defines */
39 typedef struct DisasContext
{
42 /* Nonzero if this instruction has been conditionally skipped. */
44 /* The label that will be jumped to when the instruction is skipped. */
46 struct TranslationBlock
*tb
;
47 int singlestep_enabled
;
50 #if !defined(CONFIG_USER_ONLY)
55 #if defined(CONFIG_USER_ONLY)
58 #define IS_USER(s) (s->user)
61 #define DISAS_JUMP_NEXT 4
63 #ifdef USE_DIRECT_JUMP
66 #define TBPARAM(x) (long)(x)
69 /* XXX: move that elsewhere */
70 static uint16_t *gen_opc_ptr
;
71 static uint32_t *gen_opparam_ptr
;
76 #define DEF(s, n, copy_size) INDEX_op_ ## s,
84 static GenOpFunc1
*gen_test_cc
[14] = {
101 const uint8_t table_logic_cc
[16] = {
120 static GenOpFunc1
*gen_shift_T1_im
[4] = {
127 static GenOpFunc
*gen_shift_T1_0
[4] = {
134 static GenOpFunc1
*gen_shift_T2_im
[4] = {
141 static GenOpFunc
*gen_shift_T2_0
[4] = {
148 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
149 gen_op_shll_T1_im_cc
,
150 gen_op_shrl_T1_im_cc
,
151 gen_op_sarl_T1_im_cc
,
152 gen_op_rorl_T1_im_cc
,
155 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
162 static GenOpFunc
*gen_shift_T1_T0
[4] = {
169 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
170 gen_op_shll_T1_T0_cc
,
171 gen_op_shrl_T1_T0_cc
,
172 gen_op_sarl_T1_T0_cc
,
173 gen_op_rorl_T1_T0_cc
,
176 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
233 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
272 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
278 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
279 gen_op_shll_T0_im_thumb
,
280 gen_op_shrl_T0_im_thumb
,
281 gen_op_sarl_T0_im_thumb
,
284 static inline void gen_bx(DisasContext
*s
)
286 s
->is_jmp
= DISAS_UPDATE
;
291 #if defined(CONFIG_USER_ONLY)
292 #define gen_ldst(name, s) gen_op_##name##_raw()
294 #define gen_ldst(name, s) do { \
297 gen_op_##name##_user(); \
299 gen_op_##name##_kernel(); \
303 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
308 /* normaly, since we updated PC, we need only to add one insn */
310 val
= (long)s
->pc
+ 2;
312 val
= (long)s
->pc
+ 4;
313 gen_op_movl_TN_im
[t
](val
);
315 gen_op_movl_TN_reg
[t
][reg
]();
319 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
321 gen_movl_TN_reg(s
, reg
, 0);
324 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
326 gen_movl_TN_reg(s
, reg
, 1);
329 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
331 gen_movl_TN_reg(s
, reg
, 2);
334 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
336 gen_op_movl_reg_TN
[t
][reg
]();
338 s
->is_jmp
= DISAS_JUMP
;
342 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
344 gen_movl_reg_TN(s
, reg
, 0);
347 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
349 gen_movl_reg_TN(s
, reg
, 1);
352 /* Force a TB lookup after an instruction that changes the CPU state. */
353 static inline void gen_lookup_tb(DisasContext
*s
)
355 gen_op_movl_T0_im(s
->pc
);
356 gen_movl_reg_T0(s
, 15);
357 s
->is_jmp
= DISAS_UPDATE
;
360 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
362 int val
, rm
, shift
, shiftop
;
364 if (!(insn
& (1 << 25))) {
367 if (!(insn
& (1 << 23)))
370 gen_op_addl_T1_im(val
);
374 shift
= (insn
>> 7) & 0x1f;
375 gen_movl_T2_reg(s
, rm
);
376 shiftop
= (insn
>> 5) & 3;
378 gen_shift_T2_im
[shiftop
](shift
);
379 } else if (shiftop
!= 0) {
380 gen_shift_T2_0
[shiftop
]();
382 if (!(insn
& (1 << 23)))
389 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
394 if (insn
& (1 << 22)) {
396 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
397 if (!(insn
& (1 << 23)))
401 gen_op_addl_T1_im(val
);
405 gen_op_addl_T1_im(extra
);
407 gen_movl_T2_reg(s
, rm
);
408 if (!(insn
& (1 << 23)))
415 #define VFP_OP(name) \
416 static inline void gen_vfp_##name(int dp) \
419 gen_op_vfp_##name##d(); \
421 gen_op_vfp_##name##s(); \
443 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
446 gen_ldst(vfp_ldd
, s
);
448 gen_ldst(vfp_lds
, s
);
451 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
454 gen_ldst(vfp_std
, s
);
456 gen_ldst(vfp_sts
, s
);
460 vfp_reg_offset (int dp
, int reg
)
463 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
465 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
466 + offsetof(CPU_DoubleU
, l
.upper
);
468 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
469 + offsetof(CPU_DoubleU
, l
.lower
);
472 static inline void gen_mov_F0_vreg(int dp
, int reg
)
475 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
477 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
480 static inline void gen_mov_F1_vreg(int dp
, int reg
)
483 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
485 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
488 static inline void gen_mov_vreg_F0(int dp
, int reg
)
491 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
493 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
496 #define ARM_CP_RW_BIT (1 << 20)
498 static inline int gen_iwmmxt_address(DisasContext
*s
, uint32_t insn
)
503 rd
= (insn
>> 16) & 0xf;
504 gen_movl_T1_reg(s
, rd
);
506 offset
= (insn
& 0xff) << ((insn
>> 7) & 2);
507 if (insn
& (1 << 24)) {
509 if (insn
& (1 << 23))
510 gen_op_addl_T1_im(offset
);
512 gen_op_addl_T1_im(-offset
);
514 if (insn
& (1 << 21))
515 gen_movl_reg_T1(s
, rd
);
516 } else if (insn
& (1 << 21)) {
518 if (insn
& (1 << 23))
519 gen_op_movl_T0_im(offset
);
521 gen_op_movl_T0_im(- offset
);
523 gen_movl_reg_T0(s
, rd
);
524 } else if (!(insn
& (1 << 23)))
529 static inline int gen_iwmmxt_shift(uint32_t insn
, uint32_t mask
)
531 int rd
= (insn
>> 0) & 0xf;
534 if (rd
< ARM_IWMMXT_wCGR0
|| rd
> ARM_IWMMXT_wCGR3
)
537 gen_op_iwmmxt_movl_T0_wCx(rd
);
539 gen_op_iwmmxt_movl_T0_T1_wRn(rd
);
541 gen_op_movl_T1_im(mask
);
546 /* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
547 (ie. an undefined instruction). */
548 static int disas_iwmmxt_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
551 int rdhi
, rdlo
, rd0
, rd1
, i
;
553 if ((insn
& 0x0e000e00) == 0x0c000000) {
554 if ((insn
& 0x0fe00ff0) == 0x0c400000) {
556 rdlo
= (insn
>> 12) & 0xf;
557 rdhi
= (insn
>> 16) & 0xf;
558 if (insn
& ARM_CP_RW_BIT
) { /* TMRRC */
559 gen_op_iwmmxt_movl_T0_T1_wRn(wrd
);
560 gen_movl_reg_T0(s
, rdlo
);
561 gen_movl_reg_T1(s
, rdhi
);
563 gen_movl_T0_reg(s
, rdlo
);
564 gen_movl_T1_reg(s
, rdhi
);
565 gen_op_iwmmxt_movl_wRn_T0_T1(wrd
);
566 gen_op_iwmmxt_set_mup();
571 wrd
= (insn
>> 12) & 0xf;
572 if (gen_iwmmxt_address(s
, insn
))
574 if (insn
& ARM_CP_RW_BIT
) {
575 if ((insn
>> 28) == 0xf) { /* WLDRW wCx */
577 gen_op_iwmmxt_movl_wCx_T0(wrd
);
580 if (insn
& (1 << 22)) /* WLDRD */
581 gen_ldst(iwmmxt_ldq
, s
);
583 gen_ldst(iwmmxt_ldl
, s
);
585 if (insn
& (1 << 22)) /* WLDRH */
586 gen_ldst(iwmmxt_ldw
, s
);
588 gen_ldst(iwmmxt_ldb
, s
);
589 gen_op_iwmmxt_movq_wRn_M0(wrd
);
592 if ((insn
>> 28) == 0xf) { /* WSTRW wCx */
593 gen_op_iwmmxt_movl_T0_wCx(wrd
);
596 gen_op_iwmmxt_movq_M0_wRn(wrd
);
598 if (insn
& (1 << 22)) /* WSTRD */
599 gen_ldst(iwmmxt_stq
, s
);
601 gen_ldst(iwmmxt_stl
, s
);
603 if (insn
& (1 << 22)) /* WSTRH */
604 gen_ldst(iwmmxt_ldw
, s
);
606 gen_ldst(iwmmxt_stb
, s
);
612 if ((insn
& 0x0f000000) != 0x0e000000)
615 switch (((insn
>> 12) & 0xf00) | ((insn
>> 4) & 0xff)) {
616 case 0x000: /* WOR */
617 wrd
= (insn
>> 12) & 0xf;
618 rd0
= (insn
>> 0) & 0xf;
619 rd1
= (insn
>> 16) & 0xf;
620 gen_op_iwmmxt_movq_M0_wRn(rd0
);
621 gen_op_iwmmxt_orq_M0_wRn(rd1
);
622 gen_op_iwmmxt_setpsr_nz();
623 gen_op_iwmmxt_movq_wRn_M0(wrd
);
624 gen_op_iwmmxt_set_mup();
625 gen_op_iwmmxt_set_cup();
627 case 0x011: /* TMCR */
630 rd
= (insn
>> 12) & 0xf;
631 wrd
= (insn
>> 16) & 0xf;
633 case ARM_IWMMXT_wCID
:
634 case ARM_IWMMXT_wCASF
:
636 case ARM_IWMMXT_wCon
:
637 gen_op_iwmmxt_set_cup();
639 case ARM_IWMMXT_wCSSF
:
640 gen_op_iwmmxt_movl_T0_wCx(wrd
);
641 gen_movl_T1_reg(s
, rd
);
643 gen_op_iwmmxt_movl_wCx_T0(wrd
);
645 case ARM_IWMMXT_wCGR0
:
646 case ARM_IWMMXT_wCGR1
:
647 case ARM_IWMMXT_wCGR2
:
648 case ARM_IWMMXT_wCGR3
:
649 gen_op_iwmmxt_set_cup();
650 gen_movl_reg_T0(s
, rd
);
651 gen_op_iwmmxt_movl_wCx_T0(wrd
);
657 case 0x100: /* WXOR */
658 wrd
= (insn
>> 12) & 0xf;
659 rd0
= (insn
>> 0) & 0xf;
660 rd1
= (insn
>> 16) & 0xf;
661 gen_op_iwmmxt_movq_M0_wRn(rd0
);
662 gen_op_iwmmxt_xorq_M0_wRn(rd1
);
663 gen_op_iwmmxt_setpsr_nz();
664 gen_op_iwmmxt_movq_wRn_M0(wrd
);
665 gen_op_iwmmxt_set_mup();
666 gen_op_iwmmxt_set_cup();
668 case 0x111: /* TMRC */
671 rd
= (insn
>> 12) & 0xf;
672 wrd
= (insn
>> 16) & 0xf;
673 gen_op_iwmmxt_movl_T0_wCx(wrd
);
674 gen_movl_reg_T0(s
, rd
);
676 case 0x300: /* WANDN */
677 wrd
= (insn
>> 12) & 0xf;
678 rd0
= (insn
>> 0) & 0xf;
679 rd1
= (insn
>> 16) & 0xf;
680 gen_op_iwmmxt_movq_M0_wRn(rd0
);
681 gen_op_iwmmxt_negq_M0();
682 gen_op_iwmmxt_andq_M0_wRn(rd1
);
683 gen_op_iwmmxt_setpsr_nz();
684 gen_op_iwmmxt_movq_wRn_M0(wrd
);
685 gen_op_iwmmxt_set_mup();
686 gen_op_iwmmxt_set_cup();
688 case 0x200: /* WAND */
689 wrd
= (insn
>> 12) & 0xf;
690 rd0
= (insn
>> 0) & 0xf;
691 rd1
= (insn
>> 16) & 0xf;
692 gen_op_iwmmxt_movq_M0_wRn(rd0
);
693 gen_op_iwmmxt_andq_M0_wRn(rd1
);
694 gen_op_iwmmxt_setpsr_nz();
695 gen_op_iwmmxt_movq_wRn_M0(wrd
);
696 gen_op_iwmmxt_set_mup();
697 gen_op_iwmmxt_set_cup();
699 case 0x810: case 0xa10: /* WMADD */
700 wrd
= (insn
>> 12) & 0xf;
701 rd0
= (insn
>> 0) & 0xf;
702 rd1
= (insn
>> 16) & 0xf;
703 gen_op_iwmmxt_movq_M0_wRn(rd0
);
704 if (insn
& (1 << 21))
705 gen_op_iwmmxt_maddsq_M0_wRn(rd1
);
707 gen_op_iwmmxt_madduq_M0_wRn(rd1
);
708 gen_op_iwmmxt_movq_wRn_M0(wrd
);
709 gen_op_iwmmxt_set_mup();
711 case 0x10e: case 0x50e: case 0x90e: case 0xd0e: /* WUNPCKIL */
712 wrd
= (insn
>> 12) & 0xf;
713 rd0
= (insn
>> 16) & 0xf;
714 rd1
= (insn
>> 0) & 0xf;
715 gen_op_iwmmxt_movq_M0_wRn(rd0
);
716 switch ((insn
>> 22) & 3) {
718 gen_op_iwmmxt_unpacklb_M0_wRn(rd1
);
721 gen_op_iwmmxt_unpacklw_M0_wRn(rd1
);
724 gen_op_iwmmxt_unpackll_M0_wRn(rd1
);
729 gen_op_iwmmxt_movq_wRn_M0(wrd
);
730 gen_op_iwmmxt_set_mup();
731 gen_op_iwmmxt_set_cup();
733 case 0x10c: case 0x50c: case 0x90c: case 0xd0c: /* WUNPCKIH */
734 wrd
= (insn
>> 12) & 0xf;
735 rd0
= (insn
>> 16) & 0xf;
736 rd1
= (insn
>> 0) & 0xf;
737 gen_op_iwmmxt_movq_M0_wRn(rd0
);
738 switch ((insn
>> 22) & 3) {
740 gen_op_iwmmxt_unpackhb_M0_wRn(rd1
);
743 gen_op_iwmmxt_unpackhw_M0_wRn(rd1
);
746 gen_op_iwmmxt_unpackhl_M0_wRn(rd1
);
751 gen_op_iwmmxt_movq_wRn_M0(wrd
);
752 gen_op_iwmmxt_set_mup();
753 gen_op_iwmmxt_set_cup();
755 case 0x012: case 0x112: case 0x412: case 0x512: /* WSAD */
756 wrd
= (insn
>> 12) & 0xf;
757 rd0
= (insn
>> 16) & 0xf;
758 rd1
= (insn
>> 0) & 0xf;
759 gen_op_iwmmxt_movq_M0_wRn(rd0
);
760 if (insn
& (1 << 22))
761 gen_op_iwmmxt_sadw_M0_wRn(rd1
);
763 gen_op_iwmmxt_sadb_M0_wRn(rd1
);
764 if (!(insn
& (1 << 20)))
765 gen_op_iwmmxt_addl_M0_wRn(wrd
);
766 gen_op_iwmmxt_movq_wRn_M0(wrd
);
767 gen_op_iwmmxt_set_mup();
769 case 0x010: case 0x110: case 0x210: case 0x310: /* WMUL */
770 wrd
= (insn
>> 12) & 0xf;
771 rd0
= (insn
>> 16) & 0xf;
772 rd1
= (insn
>> 0) & 0xf;
773 gen_op_iwmmxt_movq_M0_wRn(rd0
);
774 if (insn
& (1 << 21))
775 gen_op_iwmmxt_mulsw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
777 gen_op_iwmmxt_muluw_M0_wRn(rd1
, (insn
& (1 << 20)) ? 16 : 0);
778 gen_op_iwmmxt_movq_wRn_M0(wrd
);
779 gen_op_iwmmxt_set_mup();
781 case 0x410: case 0x510: case 0x610: case 0x710: /* WMAC */
782 wrd
= (insn
>> 12) & 0xf;
783 rd0
= (insn
>> 16) & 0xf;
784 rd1
= (insn
>> 0) & 0xf;
785 gen_op_iwmmxt_movq_M0_wRn(rd0
);
786 if (insn
& (1 << 21))
787 gen_op_iwmmxt_macsw_M0_wRn(rd1
);
789 gen_op_iwmmxt_macuw_M0_wRn(rd1
);
790 if (!(insn
& (1 << 20))) {
791 if (insn
& (1 << 21))
792 gen_op_iwmmxt_addsq_M0_wRn(wrd
);
794 gen_op_iwmmxt_adduq_M0_wRn(wrd
);
796 gen_op_iwmmxt_movq_wRn_M0(wrd
);
797 gen_op_iwmmxt_set_mup();
799 case 0x006: case 0x406: case 0x806: case 0xc06: /* WCMPEQ */
800 wrd
= (insn
>> 12) & 0xf;
801 rd0
= (insn
>> 16) & 0xf;
802 rd1
= (insn
>> 0) & 0xf;
803 gen_op_iwmmxt_movq_M0_wRn(rd0
);
804 switch ((insn
>> 22) & 3) {
806 gen_op_iwmmxt_cmpeqb_M0_wRn(rd1
);
809 gen_op_iwmmxt_cmpeqw_M0_wRn(rd1
);
812 gen_op_iwmmxt_cmpeql_M0_wRn(rd1
);
817 gen_op_iwmmxt_movq_wRn_M0(wrd
);
818 gen_op_iwmmxt_set_mup();
819 gen_op_iwmmxt_set_cup();
821 case 0x800: case 0x900: case 0xc00: case 0xd00: /* WAVG2 */
822 wrd
= (insn
>> 12) & 0xf;
823 rd0
= (insn
>> 16) & 0xf;
824 rd1
= (insn
>> 0) & 0xf;
825 gen_op_iwmmxt_movq_M0_wRn(rd0
);
826 if (insn
& (1 << 22))
827 gen_op_iwmmxt_avgw_M0_wRn(rd1
, (insn
>> 20) & 1);
829 gen_op_iwmmxt_avgb_M0_wRn(rd1
, (insn
>> 20) & 1);
830 gen_op_iwmmxt_movq_wRn_M0(wrd
);
831 gen_op_iwmmxt_set_mup();
832 gen_op_iwmmxt_set_cup();
834 case 0x802: case 0x902: case 0xa02: case 0xb02: /* WALIGNR */
835 wrd
= (insn
>> 12) & 0xf;
836 rd0
= (insn
>> 16) & 0xf;
837 rd1
= (insn
>> 0) & 0xf;
838 gen_op_iwmmxt_movq_M0_wRn(rd0
);
839 gen_op_iwmmxt_movl_T0_wCx(ARM_IWMMXT_wCGR0
+ ((insn
>> 20) & 3));
840 gen_op_movl_T1_im(7);
842 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
843 gen_op_iwmmxt_movq_wRn_M0(wrd
);
844 gen_op_iwmmxt_set_mup();
846 case 0x601: case 0x605: case 0x609: case 0x60d: /* TINSR */
847 rd
= (insn
>> 12) & 0xf;
848 wrd
= (insn
>> 16) & 0xf;
849 gen_movl_T0_reg(s
, rd
);
850 gen_op_iwmmxt_movq_M0_wRn(wrd
);
851 switch ((insn
>> 6) & 3) {
853 gen_op_movl_T1_im(0xff);
854 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 7) << 3);
857 gen_op_movl_T1_im(0xffff);
858 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 3) << 4);
861 gen_op_movl_T1_im(0xffffffff);
862 gen_op_iwmmxt_insr_M0_T0_T1((insn
& 1) << 5);
867 gen_op_iwmmxt_movq_wRn_M0(wrd
);
868 gen_op_iwmmxt_set_mup();
870 case 0x107: case 0x507: case 0x907: case 0xd07: /* TEXTRM */
871 rd
= (insn
>> 12) & 0xf;
872 wrd
= (insn
>> 16) & 0xf;
875 gen_op_iwmmxt_movq_M0_wRn(wrd
);
876 switch ((insn
>> 22) & 3) {
879 gen_op_iwmmxt_extrsb_T0_M0((insn
& 7) << 3);
881 gen_op_movl_T1_im(0xff);
882 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 7) << 3);
887 gen_op_iwmmxt_extrsw_T0_M0((insn
& 3) << 4);
889 gen_op_movl_T1_im(0xffff);
890 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 3) << 4);
894 gen_op_movl_T1_im(0xffffffff);
895 gen_op_iwmmxt_extru_T0_M0_T1((insn
& 1) << 5);
900 gen_op_movl_reg_TN
[0][rd
]();
902 case 0x117: case 0x517: case 0x917: case 0xd17: /* TEXTRC */
903 if ((insn
& 0x000ff008) != 0x0003f000)
905 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
906 switch ((insn
>> 22) & 3) {
908 gen_op_shrl_T1_im(((insn
& 7) << 2) + 0);
911 gen_op_shrl_T1_im(((insn
& 3) << 3) + 4);
914 gen_op_shrl_T1_im(((insn
& 1) << 4) + 12);
919 gen_op_shll_T1_im(28);
921 gen_op_movl_cpsr_T0(0xf0000000);
923 case 0x401: case 0x405: case 0x409: case 0x40d: /* TBCST */
924 rd
= (insn
>> 12) & 0xf;
925 wrd
= (insn
>> 16) & 0xf;
926 gen_movl_T0_reg(s
, rd
);
927 switch ((insn
>> 6) & 3) {
929 gen_op_iwmmxt_bcstb_M0_T0();
932 gen_op_iwmmxt_bcstw_M0_T0();
935 gen_op_iwmmxt_bcstl_M0_T0();
940 gen_op_iwmmxt_movq_wRn_M0(wrd
);
941 gen_op_iwmmxt_set_mup();
943 case 0x113: case 0x513: case 0x913: case 0xd13: /* TANDC */
944 if ((insn
& 0x000ff00f) != 0x0003f000)
946 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
947 switch ((insn
>> 22) & 3) {
949 for (i
= 0; i
< 7; i
++) {
950 gen_op_shll_T1_im(4);
955 for (i
= 0; i
< 3; i
++) {
956 gen_op_shll_T1_im(8);
961 gen_op_shll_T1_im(16);
967 gen_op_movl_cpsr_T0(0xf0000000);
969 case 0x01c: case 0x41c: case 0x81c: case 0xc1c: /* WACC */
970 wrd
= (insn
>> 12) & 0xf;
971 rd0
= (insn
>> 16) & 0xf;
972 gen_op_iwmmxt_movq_M0_wRn(rd0
);
973 switch ((insn
>> 22) & 3) {
975 gen_op_iwmmxt_addcb_M0();
978 gen_op_iwmmxt_addcw_M0();
981 gen_op_iwmmxt_addcl_M0();
986 gen_op_iwmmxt_movq_wRn_M0(wrd
);
987 gen_op_iwmmxt_set_mup();
989 case 0x115: case 0x515: case 0x915: case 0xd15: /* TORC */
990 if ((insn
& 0x000ff00f) != 0x0003f000)
992 gen_op_iwmmxt_movl_T1_wCx(ARM_IWMMXT_wCASF
);
993 switch ((insn
>> 22) & 3) {
995 for (i
= 0; i
< 7; i
++) {
996 gen_op_shll_T1_im(4);
1001 for (i
= 0; i
< 3; i
++) {
1002 gen_op_shll_T1_im(8);
1007 gen_op_shll_T1_im(16);
1013 gen_op_movl_T1_im(0xf0000000);
1014 gen_op_andl_T0_T1();
1015 gen_op_movl_cpsr_T0(0xf0000000);
1017 case 0x103: case 0x503: case 0x903: case 0xd03: /* TMOVMSK */
1018 rd
= (insn
>> 12) & 0xf;
1019 rd0
= (insn
>> 16) & 0xf;
1020 if ((insn
& 0xf) != 0)
1022 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1023 switch ((insn
>> 22) & 3) {
1025 gen_op_iwmmxt_msbb_T0_M0();
1028 gen_op_iwmmxt_msbw_T0_M0();
1031 gen_op_iwmmxt_msbl_T0_M0();
1036 gen_movl_reg_T0(s
, rd
);
1038 case 0x106: case 0x306: case 0x506: case 0x706: /* WCMPGT */
1039 case 0x906: case 0xb06: case 0xd06: case 0xf06:
1040 wrd
= (insn
>> 12) & 0xf;
1041 rd0
= (insn
>> 16) & 0xf;
1042 rd1
= (insn
>> 0) & 0xf;
1043 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1044 switch ((insn
>> 22) & 3) {
1046 if (insn
& (1 << 21))
1047 gen_op_iwmmxt_cmpgtsb_M0_wRn(rd1
);
1049 gen_op_iwmmxt_cmpgtub_M0_wRn(rd1
);
1052 if (insn
& (1 << 21))
1053 gen_op_iwmmxt_cmpgtsw_M0_wRn(rd1
);
1055 gen_op_iwmmxt_cmpgtuw_M0_wRn(rd1
);
1058 if (insn
& (1 << 21))
1059 gen_op_iwmmxt_cmpgtsl_M0_wRn(rd1
);
1061 gen_op_iwmmxt_cmpgtul_M0_wRn(rd1
);
1066 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1067 gen_op_iwmmxt_set_mup();
1068 gen_op_iwmmxt_set_cup();
1070 case 0x00e: case 0x20e: case 0x40e: case 0x60e: /* WUNPCKEL */
1071 case 0x80e: case 0xa0e: case 0xc0e: case 0xe0e:
1072 wrd
= (insn
>> 12) & 0xf;
1073 rd0
= (insn
>> 16) & 0xf;
1074 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1075 switch ((insn
>> 22) & 3) {
1077 if (insn
& (1 << 21))
1078 gen_op_iwmmxt_unpacklsb_M0();
1080 gen_op_iwmmxt_unpacklub_M0();
1083 if (insn
& (1 << 21))
1084 gen_op_iwmmxt_unpacklsw_M0();
1086 gen_op_iwmmxt_unpackluw_M0();
1089 if (insn
& (1 << 21))
1090 gen_op_iwmmxt_unpacklsl_M0();
1092 gen_op_iwmmxt_unpacklul_M0();
1097 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1098 gen_op_iwmmxt_set_mup();
1099 gen_op_iwmmxt_set_cup();
1101 case 0x00c: case 0x20c: case 0x40c: case 0x60c: /* WUNPCKEH */
1102 case 0x80c: case 0xa0c: case 0xc0c: case 0xe0c:
1103 wrd
= (insn
>> 12) & 0xf;
1104 rd0
= (insn
>> 16) & 0xf;
1105 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1106 switch ((insn
>> 22) & 3) {
1108 if (insn
& (1 << 21))
1109 gen_op_iwmmxt_unpackhsb_M0();
1111 gen_op_iwmmxt_unpackhub_M0();
1114 if (insn
& (1 << 21))
1115 gen_op_iwmmxt_unpackhsw_M0();
1117 gen_op_iwmmxt_unpackhuw_M0();
1120 if (insn
& (1 << 21))
1121 gen_op_iwmmxt_unpackhsl_M0();
1123 gen_op_iwmmxt_unpackhul_M0();
1128 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1129 gen_op_iwmmxt_set_mup();
1130 gen_op_iwmmxt_set_cup();
1132 case 0x204: case 0x604: case 0xa04: case 0xe04: /* WSRL */
1133 case 0x214: case 0x614: case 0xa14: case 0xe14:
1134 wrd
= (insn
>> 12) & 0xf;
1135 rd0
= (insn
>> 16) & 0xf;
1136 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1137 if (gen_iwmmxt_shift(insn
, 0xff))
1139 switch ((insn
>> 22) & 3) {
1143 gen_op_iwmmxt_srlw_M0_T0();
1146 gen_op_iwmmxt_srll_M0_T0();
1149 gen_op_iwmmxt_srlq_M0_T0();
1152 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1153 gen_op_iwmmxt_set_mup();
1154 gen_op_iwmmxt_set_cup();
1156 case 0x004: case 0x404: case 0x804: case 0xc04: /* WSRA */
1157 case 0x014: case 0x414: case 0x814: case 0xc14:
1158 wrd
= (insn
>> 12) & 0xf;
1159 rd0
= (insn
>> 16) & 0xf;
1160 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1161 if (gen_iwmmxt_shift(insn
, 0xff))
1163 switch ((insn
>> 22) & 3) {
1167 gen_op_iwmmxt_sraw_M0_T0();
1170 gen_op_iwmmxt_sral_M0_T0();
1173 gen_op_iwmmxt_sraq_M0_T0();
1176 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1177 gen_op_iwmmxt_set_mup();
1178 gen_op_iwmmxt_set_cup();
1180 case 0x104: case 0x504: case 0x904: case 0xd04: /* WSLL */
1181 case 0x114: case 0x514: case 0x914: case 0xd14:
1182 wrd
= (insn
>> 12) & 0xf;
1183 rd0
= (insn
>> 16) & 0xf;
1184 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1185 if (gen_iwmmxt_shift(insn
, 0xff))
1187 switch ((insn
>> 22) & 3) {
1191 gen_op_iwmmxt_sllw_M0_T0();
1194 gen_op_iwmmxt_slll_M0_T0();
1197 gen_op_iwmmxt_sllq_M0_T0();
1200 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1201 gen_op_iwmmxt_set_mup();
1202 gen_op_iwmmxt_set_cup();
1204 case 0x304: case 0x704: case 0xb04: case 0xf04: /* WROR */
1205 case 0x314: case 0x714: case 0xb14: case 0xf14:
1206 wrd
= (insn
>> 12) & 0xf;
1207 rd0
= (insn
>> 16) & 0xf;
1208 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1209 switch ((insn
>> 22) & 3) {
1213 if (gen_iwmmxt_shift(insn
, 0xf))
1215 gen_op_iwmmxt_rorw_M0_T0();
1218 if (gen_iwmmxt_shift(insn
, 0x1f))
1220 gen_op_iwmmxt_rorl_M0_T0();
1223 if (gen_iwmmxt_shift(insn
, 0x3f))
1225 gen_op_iwmmxt_rorq_M0_T0();
1228 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1229 gen_op_iwmmxt_set_mup();
1230 gen_op_iwmmxt_set_cup();
1232 case 0x116: case 0x316: case 0x516: case 0x716: /* WMIN */
1233 case 0x916: case 0xb16: case 0xd16: case 0xf16:
1234 wrd
= (insn
>> 12) & 0xf;
1235 rd0
= (insn
>> 16) & 0xf;
1236 rd1
= (insn
>> 0) & 0xf;
1237 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1238 switch ((insn
>> 22) & 3) {
1240 if (insn
& (1 << 21))
1241 gen_op_iwmmxt_minsb_M0_wRn(rd1
);
1243 gen_op_iwmmxt_minub_M0_wRn(rd1
);
1246 if (insn
& (1 << 21))
1247 gen_op_iwmmxt_minsw_M0_wRn(rd1
);
1249 gen_op_iwmmxt_minuw_M0_wRn(rd1
);
1252 if (insn
& (1 << 21))
1253 gen_op_iwmmxt_minsl_M0_wRn(rd1
);
1255 gen_op_iwmmxt_minul_M0_wRn(rd1
);
1260 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1261 gen_op_iwmmxt_set_mup();
1263 case 0x016: case 0x216: case 0x416: case 0x616: /* WMAX */
1264 case 0x816: case 0xa16: case 0xc16: case 0xe16:
1265 wrd
= (insn
>> 12) & 0xf;
1266 rd0
= (insn
>> 16) & 0xf;
1267 rd1
= (insn
>> 0) & 0xf;
1268 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1269 switch ((insn
>> 22) & 3) {
1271 if (insn
& (1 << 21))
1272 gen_op_iwmmxt_maxsb_M0_wRn(rd1
);
1274 gen_op_iwmmxt_maxub_M0_wRn(rd1
);
1277 if (insn
& (1 << 21))
1278 gen_op_iwmmxt_maxsw_M0_wRn(rd1
);
1280 gen_op_iwmmxt_maxuw_M0_wRn(rd1
);
1283 if (insn
& (1 << 21))
1284 gen_op_iwmmxt_maxsl_M0_wRn(rd1
);
1286 gen_op_iwmmxt_maxul_M0_wRn(rd1
);
1291 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1292 gen_op_iwmmxt_set_mup();
1294 case 0x002: case 0x102: case 0x202: case 0x302: /* WALIGNI */
1295 case 0x402: case 0x502: case 0x602: case 0x702:
1296 wrd
= (insn
>> 12) & 0xf;
1297 rd0
= (insn
>> 16) & 0xf;
1298 rd1
= (insn
>> 0) & 0xf;
1299 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1300 gen_op_movl_T0_im((insn
>> 20) & 3);
1301 gen_op_iwmmxt_align_M0_T0_wRn(rd1
);
1302 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1303 gen_op_iwmmxt_set_mup();
1305 case 0x01a: case 0x11a: case 0x21a: case 0x31a: /* WSUB */
1306 case 0x41a: case 0x51a: case 0x61a: case 0x71a:
1307 case 0x81a: case 0x91a: case 0xa1a: case 0xb1a:
1308 case 0xc1a: case 0xd1a: case 0xe1a: case 0xf1a:
1309 wrd
= (insn
>> 12) & 0xf;
1310 rd0
= (insn
>> 16) & 0xf;
1311 rd1
= (insn
>> 0) & 0xf;
1312 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1313 switch ((insn
>> 20) & 0xf) {
1315 gen_op_iwmmxt_subnb_M0_wRn(rd1
);
1318 gen_op_iwmmxt_subub_M0_wRn(rd1
);
1321 gen_op_iwmmxt_subsb_M0_wRn(rd1
);
1324 gen_op_iwmmxt_subnw_M0_wRn(rd1
);
1327 gen_op_iwmmxt_subuw_M0_wRn(rd1
);
1330 gen_op_iwmmxt_subsw_M0_wRn(rd1
);
1333 gen_op_iwmmxt_subnl_M0_wRn(rd1
);
1336 gen_op_iwmmxt_subul_M0_wRn(rd1
);
1339 gen_op_iwmmxt_subsl_M0_wRn(rd1
);
1344 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1345 gen_op_iwmmxt_set_mup();
1346 gen_op_iwmmxt_set_cup();
1348 case 0x01e: case 0x11e: case 0x21e: case 0x31e: /* WSHUFH */
1349 case 0x41e: case 0x51e: case 0x61e: case 0x71e:
1350 case 0x81e: case 0x91e: case 0xa1e: case 0xb1e:
1351 case 0xc1e: case 0xd1e: case 0xe1e: case 0xf1e:
1352 wrd
= (insn
>> 12) & 0xf;
1353 rd0
= (insn
>> 16) & 0xf;
1354 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1355 gen_op_movl_T0_im(((insn
>> 16) & 0xf0) | (insn
& 0x0f));
1356 gen_op_iwmmxt_shufh_M0_T0();
1357 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1358 gen_op_iwmmxt_set_mup();
1359 gen_op_iwmmxt_set_cup();
1361 case 0x018: case 0x118: case 0x218: case 0x318: /* WADD */
1362 case 0x418: case 0x518: case 0x618: case 0x718:
1363 case 0x818: case 0x918: case 0xa18: case 0xb18:
1364 case 0xc18: case 0xd18: case 0xe18: case 0xf18:
1365 wrd
= (insn
>> 12) & 0xf;
1366 rd0
= (insn
>> 16) & 0xf;
1367 rd1
= (insn
>> 0) & 0xf;
1368 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1369 switch ((insn
>> 20) & 0xf) {
1371 gen_op_iwmmxt_addnb_M0_wRn(rd1
);
1374 gen_op_iwmmxt_addub_M0_wRn(rd1
);
1377 gen_op_iwmmxt_addsb_M0_wRn(rd1
);
1380 gen_op_iwmmxt_addnw_M0_wRn(rd1
);
1383 gen_op_iwmmxt_adduw_M0_wRn(rd1
);
1386 gen_op_iwmmxt_addsw_M0_wRn(rd1
);
1389 gen_op_iwmmxt_addnl_M0_wRn(rd1
);
1392 gen_op_iwmmxt_addul_M0_wRn(rd1
);
1395 gen_op_iwmmxt_addsl_M0_wRn(rd1
);
1400 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1401 gen_op_iwmmxt_set_mup();
1402 gen_op_iwmmxt_set_cup();
1404 case 0x008: case 0x108: case 0x208: case 0x308: /* WPACK */
1405 case 0x408: case 0x508: case 0x608: case 0x708:
1406 case 0x808: case 0x908: case 0xa08: case 0xb08:
1407 case 0xc08: case 0xd08: case 0xe08: case 0xf08:
1408 wrd
= (insn
>> 12) & 0xf;
1409 rd0
= (insn
>> 16) & 0xf;
1410 rd1
= (insn
>> 0) & 0xf;
1411 gen_op_iwmmxt_movq_M0_wRn(rd0
);
1412 if (!(insn
& (1 << 20)))
1414 switch ((insn
>> 22) & 3) {
1418 if (insn
& (1 << 21))
1419 gen_op_iwmmxt_packsw_M0_wRn(rd1
);
1421 gen_op_iwmmxt_packuw_M0_wRn(rd1
);
1424 if (insn
& (1 << 21))
1425 gen_op_iwmmxt_packsl_M0_wRn(rd1
);
1427 gen_op_iwmmxt_packul_M0_wRn(rd1
);
1430 if (insn
& (1 << 21))
1431 gen_op_iwmmxt_packsq_M0_wRn(rd1
);
1433 gen_op_iwmmxt_packuq_M0_wRn(rd1
);
1436 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1437 gen_op_iwmmxt_set_mup();
1438 gen_op_iwmmxt_set_cup();
1440 case 0x201: case 0x203: case 0x205: case 0x207:
1441 case 0x209: case 0x20b: case 0x20d: case 0x20f:
1442 case 0x211: case 0x213: case 0x215: case 0x217:
1443 case 0x219: case 0x21b: case 0x21d: case 0x21f:
1444 wrd
= (insn
>> 5) & 0xf;
1445 rd0
= (insn
>> 12) & 0xf;
1446 rd1
= (insn
>> 0) & 0xf;
1447 if (rd0
== 0xf || rd1
== 0xf)
1449 gen_op_iwmmxt_movq_M0_wRn(wrd
);
1450 switch ((insn
>> 16) & 0xf) {
1451 case 0x0: /* TMIA */
1452 gen_op_movl_TN_reg
[0][rd0
]();
1453 gen_op_movl_TN_reg
[1][rd1
]();
1454 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1456 case 0x8: /* TMIAPH */
1457 gen_op_movl_TN_reg
[0][rd0
]();
1458 gen_op_movl_TN_reg
[1][rd1
]();
1459 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1461 case 0xc: case 0xd: case 0xe: case 0xf: /* TMIAxy */
1462 gen_op_movl_TN_reg
[1][rd0
]();
1463 if (insn
& (1 << 16))
1464 gen_op_shrl_T1_im(16);
1465 gen_op_movl_T0_T1();
1466 gen_op_movl_TN_reg
[1][rd1
]();
1467 if (insn
& (1 << 17))
1468 gen_op_shrl_T1_im(16);
1469 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1474 gen_op_iwmmxt_movq_wRn_M0(wrd
);
1475 gen_op_iwmmxt_set_mup();
1484 /* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
1485 (ie. an undefined instruction). */
1486 static int disas_dsp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1488 int acc
, rd0
, rd1
, rdhi
, rdlo
;
1490 if ((insn
& 0x0ff00f10) == 0x0e200010) {
1491 /* Multiply with Internal Accumulate Format */
1492 rd0
= (insn
>> 12) & 0xf;
1494 acc
= (insn
>> 5) & 7;
1499 switch ((insn
>> 16) & 0xf) {
1501 gen_op_movl_TN_reg
[0][rd0
]();
1502 gen_op_movl_TN_reg
[1][rd1
]();
1503 gen_op_iwmmxt_muladdsl_M0_T0_T1();
1505 case 0x8: /* MIAPH */
1506 gen_op_movl_TN_reg
[0][rd0
]();
1507 gen_op_movl_TN_reg
[1][rd1
]();
1508 gen_op_iwmmxt_muladdsw_M0_T0_T1();
1510 case 0xc: /* MIABB */
1511 case 0xd: /* MIABT */
1512 case 0xe: /* MIATB */
1513 case 0xf: /* MIATT */
1514 gen_op_movl_TN_reg
[1][rd0
]();
1515 if (insn
& (1 << 16))
1516 gen_op_shrl_T1_im(16);
1517 gen_op_movl_T0_T1();
1518 gen_op_movl_TN_reg
[1][rd1
]();
1519 if (insn
& (1 << 17))
1520 gen_op_shrl_T1_im(16);
1521 gen_op_iwmmxt_muladdswl_M0_T0_T1();
1527 gen_op_iwmmxt_movq_wRn_M0(acc
);
1531 if ((insn
& 0x0fe00ff8) == 0x0c400000) {
1532 /* Internal Accumulator Access Format */
1533 rdhi
= (insn
>> 16) & 0xf;
1534 rdlo
= (insn
>> 12) & 0xf;
1540 if (insn
& ARM_CP_RW_BIT
) { /* MRA */
1541 gen_op_iwmmxt_movl_T0_T1_wRn(acc
);
1542 gen_op_movl_reg_TN
[0][rdlo
]();
1543 gen_op_movl_T0_im((1 << (40 - 32)) - 1);
1544 gen_op_andl_T0_T1();
1545 gen_op_movl_reg_TN
[0][rdhi
]();
1547 gen_op_movl_TN_reg
[0][rdlo
]();
1548 gen_op_movl_TN_reg
[1][rdhi
]();
1549 gen_op_iwmmxt_movl_wRn_T0_T1(acc
);
1557 /* Disassemble system coprocessor instruction. Return nonzero if
1558 instruction is not defined. */
1559 static int disas_cp_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1561 uint32_t rd
= (insn
>> 12) & 0xf;
1562 uint32_t cp
= (insn
>> 8) & 0xf;
1567 if (insn
& ARM_CP_RW_BIT
) {
1568 if (!env
->cp
[cp
].cp_read
)
1570 gen_op_movl_T0_im((uint32_t) s
->pc
);
1571 gen_op_movl_reg_TN
[0][15]();
1572 gen_op_movl_T0_cp(insn
);
1573 gen_movl_reg_T0(s
, rd
);
1575 if (!env
->cp
[cp
].cp_write
)
1577 gen_op_movl_T0_im((uint32_t) s
->pc
);
1578 gen_op_movl_reg_TN
[0][15]();
1579 gen_movl_T0_reg(s
, rd
);
1580 gen_op_movl_cp_T0(insn
);
1585 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
1586 instruction is not defined. */
1587 static int disas_cp15_insn(CPUState
*env
, DisasContext
*s
, uint32_t insn
)
1591 /* ??? Some cp15 registers are accessible from userspace. */
1595 if ((insn
& 0x0fff0fff) == 0x0e070f90
1596 || (insn
& 0x0fff0fff) == 0x0e070f58) {
1597 /* Wait for interrupt. */
1598 gen_op_movl_T0_im((long)s
->pc
);
1599 gen_op_movl_reg_TN
[0][15]();
1601 s
->is_jmp
= DISAS_JUMP
;
1604 rd
= (insn
>> 12) & 0xf;
1605 if (insn
& ARM_CP_RW_BIT
) {
1606 gen_op_movl_T0_cp15(insn
);
1607 /* If the destination register is r15 then sets condition codes. */
1609 gen_movl_reg_T0(s
, rd
);
1611 gen_movl_T0_reg(s
, rd
);
1612 gen_op_movl_cp15_T0(insn
);
1613 /* Normally we would always end the TB here, but Linux
1614 * arch/arm/mach-pxa/sleep.S expects two instructions following
1615 * an MMU enable to execute from cache. Imitate this behaviour. */
1616 if (!arm_feature(env
, ARM_FEATURE_XSCALE
) ||
1617 (insn
& 0x0fff0fff) != 0x0e010f10)
1623 /* Disassemble a VFP instruction. Returns nonzero if an error occured
1624 (ie. an undefined instruction). */
1625 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
1627 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
1630 if (!arm_feature(env
, ARM_FEATURE_VFP
))
1633 if ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) == 0) {
1634 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
1635 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
1637 rn
= (insn
>> 16) & 0xf;
1638 if (rn
!= 0 && rn
!= 8)
1641 dp
= ((insn
& 0xf00) == 0xb00);
1642 switch ((insn
>> 24) & 0xf) {
1644 if (insn
& (1 << 4)) {
1645 /* single register transfer */
1646 if ((insn
& 0x6f) != 0x00)
1648 rd
= (insn
>> 12) & 0xf;
1652 rn
= (insn
>> 16) & 0xf;
1653 /* Get the existing value even for arm->vfp moves because
1654 we only set half the register. */
1655 gen_mov_F0_vreg(1, rn
);
1657 if (insn
& ARM_CP_RW_BIT
) {
1659 if (insn
& (1 << 21))
1660 gen_movl_reg_T1(s
, rd
);
1662 gen_movl_reg_T0(s
, rd
);
1665 if (insn
& (1 << 21))
1666 gen_movl_T1_reg(s
, rd
);
1668 gen_movl_T0_reg(s
, rd
);
1670 gen_mov_vreg_F0(dp
, rn
);
1673 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1674 if (insn
& ARM_CP_RW_BIT
) {
1676 if (insn
& (1 << 21)) {
1677 /* system register */
1682 case ARM_VFP_FPINST
:
1683 case ARM_VFP_FPINST2
:
1684 gen_op_vfp_movl_T0_xreg(rn
);
1688 gen_op_vfp_movl_T0_fpscr_flags();
1690 gen_op_vfp_movl_T0_fpscr();
1696 gen_mov_F0_vreg(0, rn
);
1700 /* Set the 4 flag bits in the CPSR. */
1701 gen_op_movl_cpsr_T0(0xf0000000);
1703 gen_movl_reg_T0(s
, rd
);
1706 gen_movl_T0_reg(s
, rd
);
1707 if (insn
& (1 << 21)) {
1709 /* system register */
1712 /* Writes are ignored. */
1715 gen_op_vfp_movl_fpscr_T0();
1719 gen_op_vfp_movl_xreg_T0(rn
);
1722 case ARM_VFP_FPINST
:
1723 case ARM_VFP_FPINST2
:
1724 gen_op_vfp_movl_xreg_T0(rn
);
1731 gen_mov_vreg_F0(0, rn
);
1736 /* data processing */
1737 /* The opcode is in bits 23, 21, 20 and 6. */
1738 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
1742 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1744 /* rn is register number */
1745 if (insn
& (1 << 7))
1747 rn
= (insn
>> 16) & 0xf;
1750 if (op
== 15 && (rn
== 15 || rn
> 17)) {
1751 /* Integer or single precision destination. */
1752 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
1754 if (insn
& (1 << 22))
1756 rd
= (insn
>> 12) & 0xf;
1759 if (op
== 15 && (rn
== 16 || rn
== 17)) {
1760 /* Integer source. */
1761 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
1763 if (insn
& (1 << 5))
1768 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
1769 if (op
== 15 && rn
== 15) {
1770 /* Double precision destination. */
1771 if (insn
& (1 << 22))
1773 rd
= (insn
>> 12) & 0xf;
1775 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
1776 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
1779 veclen
= env
->vfp
.vec_len
;
1780 if (op
== 15 && rn
> 3)
1783 /* Shut up compiler warnings. */
1794 /* Figure out what type of vector operation this is. */
1795 if ((rd
& bank_mask
) == 0) {
1800 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
1802 delta_d
= env
->vfp
.vec_stride
+ 1;
1804 if ((rm
& bank_mask
) == 0) {
1805 /* mixed scalar/vector */
1814 /* Load the initial operands. */
1819 /* Integer source */
1820 gen_mov_F0_vreg(0, rm
);
1825 gen_mov_F0_vreg(dp
, rd
);
1826 gen_mov_F1_vreg(dp
, rm
);
1830 /* Compare with zero */
1831 gen_mov_F0_vreg(dp
, rd
);
1835 /* One source operand. */
1836 gen_mov_F0_vreg(dp
, rm
);
1839 /* Two source operands. */
1840 gen_mov_F0_vreg(dp
, rn
);
1841 gen_mov_F1_vreg(dp
, rm
);
1845 /* Perform the calculation. */
1847 case 0: /* mac: fd + (fn * fm) */
1849 gen_mov_F1_vreg(dp
, rd
);
1852 case 1: /* nmac: fd - (fn * fm) */
1855 gen_mov_F1_vreg(dp
, rd
);
1858 case 2: /* msc: -fd + (fn * fm) */
1860 gen_mov_F1_vreg(dp
, rd
);
1863 case 3: /* nmsc: -fd - (fn * fm) */
1865 gen_mov_F1_vreg(dp
, rd
);
1869 case 4: /* mul: fn * fm */
1872 case 5: /* nmul: -(fn * fm) */
1876 case 6: /* add: fn + fm */
1879 case 7: /* sub: fn - fm */
1882 case 8: /* div: fn / fm */
1885 case 15: /* extension space */
1908 case 11: /* cmpez */
1912 case 15: /* single<->double conversion */
1914 gen_op_vfp_fcvtsd();
1916 gen_op_vfp_fcvtds();
1918 case 16: /* fuito */
1921 case 17: /* fsito */
1924 case 24: /* ftoui */
1927 case 25: /* ftouiz */
1930 case 26: /* ftosi */
1933 case 27: /* ftosiz */
1936 default: /* undefined */
1937 printf ("rn:%d\n", rn
);
1941 default: /* undefined */
1942 printf ("op:%d\n", op
);
1946 /* Write back the result. */
1947 if (op
== 15 && (rn
>= 8 && rn
<= 11))
1948 ; /* Comparison, do nothing. */
1949 else if (op
== 15 && rn
> 17)
1950 /* Integer result. */
1951 gen_mov_vreg_F0(0, rd
);
1952 else if (op
== 15 && rn
== 15)
1954 gen_mov_vreg_F0(!dp
, rd
);
1956 gen_mov_vreg_F0(dp
, rd
);
1958 /* break out of the loop if we have finished */
1962 if (op
== 15 && delta_m
== 0) {
1963 /* single source one-many */
1965 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
1967 gen_mov_vreg_F0(dp
, rd
);
1971 /* Setup the next operands. */
1973 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
1977 /* One source operand. */
1978 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
1980 gen_mov_F0_vreg(dp
, rm
);
1982 /* Two source operands. */
1983 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
1985 gen_mov_F0_vreg(dp
, rn
);
1987 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
1989 gen_mov_F1_vreg(dp
, rm
);
1997 if (dp
&& (insn
& (1 << 22))) {
1998 /* two-register transfer */
1999 rn
= (insn
>> 16) & 0xf;
2000 rd
= (insn
>> 12) & 0xf;
2002 if (insn
& (1 << 5))
2006 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
2008 if (insn
& ARM_CP_RW_BIT
) {
2011 gen_mov_F0_vreg(1, rm
);
2013 gen_movl_reg_T0(s
, rd
);
2014 gen_movl_reg_T1(s
, rn
);
2016 gen_mov_F0_vreg(0, rm
);
2018 gen_movl_reg_T0(s
, rn
);
2019 gen_mov_F0_vreg(0, rm
+ 1);
2021 gen_movl_reg_T0(s
, rd
);
2026 gen_movl_T0_reg(s
, rd
);
2027 gen_movl_T1_reg(s
, rn
);
2029 gen_mov_vreg_F0(1, rm
);
2031 gen_movl_T0_reg(s
, rn
);
2033 gen_mov_vreg_F0(0, rm
);
2034 gen_movl_T0_reg(s
, rd
);
2036 gen_mov_vreg_F0(0, rm
+ 1);
2041 rn
= (insn
>> 16) & 0xf;
2043 rd
= (insn
>> 12) & 0xf;
2045 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
2046 gen_movl_T1_reg(s
, rn
);
2047 if ((insn
& 0x01200000) == 0x01000000) {
2048 /* Single load/store */
2049 offset
= (insn
& 0xff) << 2;
2050 if ((insn
& (1 << 23)) == 0)
2052 gen_op_addl_T1_im(offset
);
2053 if (insn
& (1 << 20)) {
2055 gen_mov_vreg_F0(dp
, rd
);
2057 gen_mov_F0_vreg(dp
, rd
);
2061 /* load/store multiple */
2063 n
= (insn
>> 1) & 0x7f;
2067 if (insn
& (1 << 24)) /* pre-decrement */
2068 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
2074 for (i
= 0; i
< n
; i
++) {
2075 if (insn
& ARM_CP_RW_BIT
) {
2078 gen_mov_vreg_F0(dp
, rd
+ i
);
2081 gen_mov_F0_vreg(dp
, rd
+ i
);
2084 gen_op_addl_T1_im(offset
);
2086 if (insn
& (1 << 21)) {
2088 if (insn
& (1 << 24))
2089 offset
= -offset
* n
;
2090 else if (dp
&& (insn
& 1))
2096 gen_op_addl_T1_im(offset
);
2097 gen_movl_reg_T1(s
, rn
);
2103 /* Should never happen. */
2109 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
2111 TranslationBlock
*tb
;
2114 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
2116 gen_op_goto_tb0(TBPARAM(tb
));
2118 gen_op_goto_tb1(TBPARAM(tb
));
2119 gen_op_movl_T0_im(dest
);
2120 gen_op_movl_r15_T0();
2121 gen_op_movl_T0_im((long)tb
+ n
);
2124 gen_op_movl_T0_im(dest
);
2125 gen_op_movl_r15_T0();
2131 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
2133 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
2134 /* An indirect jump so that we still trigger the debug exception. */
2137 gen_op_movl_T0_im(dest
);
2140 gen_goto_tb(s
, 0, dest
);
2141 s
->is_jmp
= DISAS_TB_JUMP
;
2145 static inline void gen_mulxy(int x
, int y
)
2148 gen_op_sarl_T0_im(16);
2152 gen_op_sarl_T1_im(16);
2158 /* Return the mask of PSR bits set by a MSR instruction. */
2159 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
) {
2163 if (flags
& (1 << 0))
2165 if (flags
& (1 << 1))
2167 if (flags
& (1 << 2))
2169 if (flags
& (1 << 3))
2171 /* Mask out undefined bits. */
2173 /* Mask out state bits. */
2175 mask
&= ~0x01000020;
2176 /* Mask out privileged bits. */
2182 /* Returns nonzero if access to the PSR is not permitted. */
2183 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
2186 /* ??? This is also undefined in system mode. */
2189 gen_op_movl_spsr_T0(mask
);
2191 gen_op_movl_cpsr_T0(mask
);
2197 static void gen_exception_return(DisasContext
*s
)
2199 gen_op_movl_reg_TN
[0][15]();
2200 gen_op_movl_T0_spsr();
2201 gen_op_movl_cpsr_T0(0xffffffff);
2202 s
->is_jmp
= DISAS_UPDATE
;
2205 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
2207 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
2209 insn
= ldl_code(s
->pc
);
2214 /* Unconditional instructions. */
2215 if ((insn
& 0x0d70f000) == 0x0550f000)
2217 else if ((insn
& 0x0e000000) == 0x0a000000) {
2218 /* branch link and change to thumb (blx <offset>) */
2221 val
= (uint32_t)s
->pc
;
2222 gen_op_movl_T0_im(val
);
2223 gen_movl_reg_T0(s
, 14);
2224 /* Sign-extend the 24-bit offset */
2225 offset
= (((int32_t)insn
) << 8) >> 8;
2226 /* offset * 4 + bit24 * 2 + (thumb bit) */
2227 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
2228 /* pipeline offset */
2230 gen_op_movl_T0_im(val
);
2233 } else if ((insn
& 0x0e000f00) == 0x0c000100) {
2234 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2235 /* iWMMXt register transfer. */
2236 if (env
->cp15
.c15_cpar
& (1 << 1))
2237 if (!disas_iwmmxt_insn(env
, s
, insn
))
2240 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
2241 /* Coprocessor double register transfer. */
2242 } else if ((insn
& 0x0f000010) == 0x0e000010) {
2243 /* Additional coprocessor register transfer. */
2244 } else if ((insn
& 0x0ff10010) == 0x01000000) {
2245 /* cps (privileged) */
2246 } else if ((insn
& 0x0ffffdff) == 0x01010000) {
2248 if (insn
& (1 << 9)) {
2249 /* BE8 mode not implemented. */
2257 /* if not always execute, we generate a conditional jump to
2259 s
->condlabel
= gen_new_label();
2260 gen_test_cc
[cond
^ 1](s
->condlabel
);
2262 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2263 //s->is_jmp = DISAS_JUMP_NEXT;
2265 if ((insn
& 0x0f900000) == 0x03000000) {
2266 if ((insn
& 0x0fb0f000) != 0x0320f000)
2268 /* CPSR = immediate */
2270 shift
= ((insn
>> 8) & 0xf) * 2;
2272 val
= (val
>> shift
) | (val
<< (32 - shift
));
2273 gen_op_movl_T0_im(val
);
2274 i
= ((insn
& (1 << 22)) != 0);
2275 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
2277 } else if ((insn
& 0x0f900000) == 0x01000000
2278 && (insn
& 0x00000090) != 0x00000090) {
2279 /* miscellaneous instructions */
2280 op1
= (insn
>> 21) & 3;
2281 sh
= (insn
>> 4) & 0xf;
2284 case 0x0: /* move program status register */
2287 gen_movl_T0_reg(s
, rm
);
2288 i
= ((op1
& 2) != 0);
2289 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
2293 rd
= (insn
>> 12) & 0xf;
2297 gen_op_movl_T0_spsr();
2299 gen_op_movl_T0_cpsr();
2301 gen_movl_reg_T0(s
, rd
);
2306 /* branch/exchange thumb (bx). */
2307 gen_movl_T0_reg(s
, rm
);
2309 } else if (op1
== 3) {
2311 rd
= (insn
>> 12) & 0xf;
2312 gen_movl_T0_reg(s
, rm
);
2314 gen_movl_reg_T0(s
, rd
);
2322 /* Trivial implementation equivalent to bx. */
2323 gen_movl_T0_reg(s
, rm
);
2333 /* branch link/exchange thumb (blx) */
2334 val
= (uint32_t)s
->pc
;
2335 gen_op_movl_T1_im(val
);
2336 gen_movl_T0_reg(s
, rm
);
2337 gen_movl_reg_T1(s
, 14);
2340 case 0x5: /* saturating add/subtract */
2341 rd
= (insn
>> 12) & 0xf;
2342 rn
= (insn
>> 16) & 0xf;
2343 gen_movl_T0_reg(s
, rm
);
2344 gen_movl_T1_reg(s
, rn
);
2346 gen_op_double_T1_saturate();
2348 gen_op_subl_T0_T1_saturate();
2350 gen_op_addl_T0_T1_saturate();
2351 gen_movl_reg_T0(s
, rd
);
2354 gen_op_movl_T0_im((long)s
->pc
- 4);
2355 gen_op_movl_reg_TN
[0][15]();
2357 s
->is_jmp
= DISAS_JUMP
;
2359 case 0x8: /* signed multiply */
2363 rs
= (insn
>> 8) & 0xf;
2364 rn
= (insn
>> 12) & 0xf;
2365 rd
= (insn
>> 16) & 0xf;
2367 /* (32 * 16) >> 16 */
2368 gen_movl_T0_reg(s
, rm
);
2369 gen_movl_T1_reg(s
, rs
);
2371 gen_op_sarl_T1_im(16);
2374 gen_op_imulw_T0_T1();
2375 if ((sh
& 2) == 0) {
2376 gen_movl_T1_reg(s
, rn
);
2377 gen_op_addl_T0_T1_setq();
2379 gen_movl_reg_T0(s
, rd
);
2382 gen_movl_T0_reg(s
, rm
);
2383 gen_movl_T1_reg(s
, rs
);
2384 gen_mulxy(sh
& 2, sh
& 4);
2386 gen_op_signbit_T1_T0();
2387 gen_op_addq_T0_T1(rn
, rd
);
2388 gen_movl_reg_T0(s
, rn
);
2389 gen_movl_reg_T1(s
, rd
);
2392 gen_movl_T1_reg(s
, rn
);
2393 gen_op_addl_T0_T1_setq();
2395 gen_movl_reg_T0(s
, rd
);
2402 } else if (((insn
& 0x0e000000) == 0 &&
2403 (insn
& 0x00000090) != 0x90) ||
2404 ((insn
& 0x0e000000) == (1 << 25))) {
2405 int set_cc
, logic_cc
, shiftop
;
2407 op1
= (insn
>> 21) & 0xf;
2408 set_cc
= (insn
>> 20) & 1;
2409 logic_cc
= table_logic_cc
[op1
] & set_cc
;
2411 /* data processing instruction */
2412 if (insn
& (1 << 25)) {
2413 /* immediate operand */
2415 shift
= ((insn
>> 8) & 0xf) * 2;
2417 val
= (val
>> shift
) | (val
<< (32 - shift
));
2418 gen_op_movl_T1_im(val
);
2419 if (logic_cc
&& shift
)
2424 gen_movl_T1_reg(s
, rm
);
2425 shiftop
= (insn
>> 5) & 3;
2426 if (!(insn
& (1 << 4))) {
2427 shift
= (insn
>> 7) & 0x1f;
2430 gen_shift_T1_im_cc
[shiftop
](shift
);
2432 gen_shift_T1_im
[shiftop
](shift
);
2434 } else if (shiftop
!= 0) {
2436 gen_shift_T1_0_cc
[shiftop
]();
2438 gen_shift_T1_0
[shiftop
]();
2442 rs
= (insn
>> 8) & 0xf;
2443 gen_movl_T0_reg(s
, rs
);
2445 gen_shift_T1_T0_cc
[shiftop
]();
2447 gen_shift_T1_T0
[shiftop
]();
2451 if (op1
!= 0x0f && op1
!= 0x0d) {
2452 rn
= (insn
>> 16) & 0xf;
2453 gen_movl_T0_reg(s
, rn
);
2455 rd
= (insn
>> 12) & 0xf;
2458 gen_op_andl_T0_T1();
2459 gen_movl_reg_T0(s
, rd
);
2461 gen_op_logic_T0_cc();
2464 gen_op_xorl_T0_T1();
2465 gen_movl_reg_T0(s
, rd
);
2467 gen_op_logic_T0_cc();
2470 if (set_cc
&& rd
== 15) {
2471 /* SUBS r15, ... is used for exception return. */
2474 gen_op_subl_T0_T1_cc();
2475 gen_exception_return(s
);
2478 gen_op_subl_T0_T1_cc();
2480 gen_op_subl_T0_T1();
2481 gen_movl_reg_T0(s
, rd
);
2486 gen_op_rsbl_T0_T1_cc();
2488 gen_op_rsbl_T0_T1();
2489 gen_movl_reg_T0(s
, rd
);
2493 gen_op_addl_T0_T1_cc();
2495 gen_op_addl_T0_T1();
2496 gen_movl_reg_T0(s
, rd
);
2500 gen_op_adcl_T0_T1_cc();
2502 gen_op_adcl_T0_T1();
2503 gen_movl_reg_T0(s
, rd
);
2507 gen_op_sbcl_T0_T1_cc();
2509 gen_op_sbcl_T0_T1();
2510 gen_movl_reg_T0(s
, rd
);
2514 gen_op_rscl_T0_T1_cc();
2516 gen_op_rscl_T0_T1();
2517 gen_movl_reg_T0(s
, rd
);
2521 gen_op_andl_T0_T1();
2522 gen_op_logic_T0_cc();
2527 gen_op_xorl_T0_T1();
2528 gen_op_logic_T0_cc();
2533 gen_op_subl_T0_T1_cc();
2538 gen_op_addl_T0_T1_cc();
2543 gen_movl_reg_T0(s
, rd
);
2545 gen_op_logic_T0_cc();
2548 if (logic_cc
&& rd
== 15) {
2549 /* MOVS r15, ... is used for exception return. */
2552 gen_op_movl_T0_T1();
2553 gen_exception_return(s
);
2555 gen_movl_reg_T1(s
, rd
);
2557 gen_op_logic_T1_cc();
2561 gen_op_bicl_T0_T1();
2562 gen_movl_reg_T0(s
, rd
);
2564 gen_op_logic_T0_cc();
2569 gen_movl_reg_T1(s
, rd
);
2571 gen_op_logic_T1_cc();
2575 /* other instructions */
2576 op1
= (insn
>> 24) & 0xf;
2580 /* multiplies, extra load/stores */
2581 sh
= (insn
>> 5) & 3;
2584 rd
= (insn
>> 16) & 0xf;
2585 rn
= (insn
>> 12) & 0xf;
2586 rs
= (insn
>> 8) & 0xf;
2588 if (((insn
>> 22) & 3) == 0) {
2590 gen_movl_T0_reg(s
, rs
);
2591 gen_movl_T1_reg(s
, rm
);
2593 if (insn
& (1 << 21)) {
2594 gen_movl_T1_reg(s
, rn
);
2595 gen_op_addl_T0_T1();
2597 if (insn
& (1 << 20))
2598 gen_op_logic_T0_cc();
2599 gen_movl_reg_T0(s
, rd
);
2602 gen_movl_T0_reg(s
, rs
);
2603 gen_movl_T1_reg(s
, rm
);
2604 if (insn
& (1 << 22))
2605 gen_op_imull_T0_T1();
2607 gen_op_mull_T0_T1();
2608 if (insn
& (1 << 21)) /* mult accumulate */
2609 gen_op_addq_T0_T1(rn
, rd
);
2610 if (!(insn
& (1 << 23))) { /* double accumulate */
2612 gen_op_addq_lo_T0_T1(rn
);
2613 gen_op_addq_lo_T0_T1(rd
);
2615 if (insn
& (1 << 20))
2617 gen_movl_reg_T0(s
, rn
);
2618 gen_movl_reg_T1(s
, rd
);
2621 rn
= (insn
>> 16) & 0xf;
2622 rd
= (insn
>> 12) & 0xf;
2623 if (insn
& (1 << 23)) {
2624 /* load/store exclusive */
2627 /* SWP instruction */
2630 gen_movl_T0_reg(s
, rm
);
2631 gen_movl_T1_reg(s
, rn
);
2632 if (insn
& (1 << 22)) {
2637 gen_movl_reg_T0(s
, rd
);
2643 /* Misc load/store */
2644 rn
= (insn
>> 16) & 0xf;
2645 rd
= (insn
>> 12) & 0xf;
2646 gen_movl_T1_reg(s
, rn
);
2647 if (insn
& (1 << 24))
2648 gen_add_datah_offset(s
, insn
, 0);
2650 if (insn
& (1 << 20)) {
2665 } else if (sh
& 2) {
2669 gen_movl_T0_reg(s
, rd
);
2671 gen_op_addl_T1_im(4);
2672 gen_movl_T0_reg(s
, rd
+ 1);
2678 gen_movl_reg_T0(s
, rd
);
2679 gen_op_addl_T1_im(4);
2684 address_offset
= -4;
2687 gen_movl_T0_reg(s
, rd
);
2691 /* Perform base writeback before the loaded value to
2692 ensure correct behavior with overlapping index registers.
2693 ldrd with base writeback is is undefined if the
2694 destination and index registers overlap. */
2695 if (!(insn
& (1 << 24))) {
2696 gen_add_datah_offset(s
, insn
, address_offset
);
2697 gen_movl_reg_T1(s
, rn
);
2698 } else if (insn
& (1 << 21)) {
2700 gen_op_addl_T1_im(address_offset
);
2701 gen_movl_reg_T1(s
, rn
);
2704 /* Complete the load. */
2705 gen_movl_reg_T0(s
, rd
);
2713 /* Check for undefined extension instructions
2714 * per the ARM Bible IE:
2715 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
2717 sh
= (0xf << 20) | (0xf << 4);
2718 if (op1
== 0x7 && ((insn
& sh
) == sh
))
2722 /* load/store byte/word */
2723 rn
= (insn
>> 16) & 0xf;
2724 rd
= (insn
>> 12) & 0xf;
2725 gen_movl_T1_reg(s
, rn
);
2726 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
2727 if (insn
& (1 << 24))
2728 gen_add_data_offset(s
, insn
);
2729 if (insn
& (1 << 20)) {
2732 #if defined(CONFIG_USER_ONLY)
2733 if (insn
& (1 << 22))
2738 if (insn
& (1 << 22)) {
2742 gen_op_ldub_kernel();
2747 gen_op_ldl_kernel();
2752 gen_movl_T0_reg(s
, rd
);
2753 #if defined(CONFIG_USER_ONLY)
2754 if (insn
& (1 << 22))
2759 if (insn
& (1 << 22)) {
2763 gen_op_stb_kernel();
2768 gen_op_stl_kernel();
2772 if (!(insn
& (1 << 24))) {
2773 gen_add_data_offset(s
, insn
);
2774 gen_movl_reg_T1(s
, rn
);
2775 } else if (insn
& (1 << 21))
2776 gen_movl_reg_T1(s
, rn
); {
2778 if (insn
& (1 << 20)) {
2779 /* Complete the load. */
2783 gen_movl_reg_T0(s
, rd
);
2789 int j
, n
, user
, loaded_base
;
2790 /* load/store multiple words */
2791 /* XXX: store correct base if write back */
2793 if (insn
& (1 << 22)) {
2795 goto illegal_op
; /* only usable in supervisor mode */
2797 if ((insn
& (1 << 15)) == 0)
2800 rn
= (insn
>> 16) & 0xf;
2801 gen_movl_T1_reg(s
, rn
);
2803 /* compute total size */
2807 if (insn
& (1 << i
))
2810 /* XXX: test invalid n == 0 case ? */
2811 if (insn
& (1 << 23)) {
2812 if (insn
& (1 << 24)) {
2814 gen_op_addl_T1_im(4);
2816 /* post increment */
2819 if (insn
& (1 << 24)) {
2821 gen_op_addl_T1_im(-(n
* 4));
2823 /* post decrement */
2825 gen_op_addl_T1_im(-((n
- 1) * 4));
2830 if (insn
& (1 << i
)) {
2831 if (insn
& (1 << 20)) {
2837 gen_op_movl_user_T0(i
);
2838 } else if (i
== rn
) {
2839 gen_op_movl_T2_T0();
2842 gen_movl_reg_T0(s
, i
);
2847 /* special case: r15 = PC + 8 */
2848 val
= (long)s
->pc
+ 4;
2849 gen_op_movl_TN_im
[0](val
);
2851 gen_op_movl_T0_user(i
);
2853 gen_movl_T0_reg(s
, i
);
2858 /* no need to add after the last transfer */
2860 gen_op_addl_T1_im(4);
2863 if (insn
& (1 << 21)) {
2865 if (insn
& (1 << 23)) {
2866 if (insn
& (1 << 24)) {
2869 /* post increment */
2870 gen_op_addl_T1_im(4);
2873 if (insn
& (1 << 24)) {
2876 gen_op_addl_T1_im(-((n
- 1) * 4));
2878 /* post decrement */
2879 gen_op_addl_T1_im(-(n
* 4));
2882 gen_movl_reg_T1(s
, rn
);
2885 gen_op_movl_T0_T2();
2886 gen_movl_reg_T0(s
, rn
);
2888 if ((insn
& (1 << 22)) && !user
) {
2889 /* Restore CPSR from SPSR. */
2890 gen_op_movl_T0_spsr();
2891 gen_op_movl_cpsr_T0(0xffffffff);
2892 s
->is_jmp
= DISAS_UPDATE
;
2901 /* branch (and link) */
2902 val
= (int32_t)s
->pc
;
2903 if (insn
& (1 << 24)) {
2904 gen_op_movl_T0_im(val
);
2905 gen_op_movl_reg_TN
[0][14]();
2907 offset
= (((int32_t)insn
<< 8) >> 8);
2908 val
+= (offset
<< 2) + 4;
2916 op1
= (insn
>> 8) & 0xf;
2917 if (arm_feature(env
, ARM_FEATURE_XSCALE
) &&
2918 ((env
->cp15
.c15_cpar
^ 0x3fff) & (1 << op1
)))
2922 if (arm_feature(env
, ARM_FEATURE_IWMMXT
)) {
2923 if (disas_iwmmxt_insn(env
, s
, insn
))
2925 } else if (arm_feature(env
, ARM_FEATURE_XSCALE
)) {
2926 if (disas_dsp_insn(env
, s
, insn
))
2933 if (disas_cp_insn (env
, s
, insn
))
2938 if (disas_vfp_insn (env
, s
, insn
))
2942 if (disas_cp15_insn (env
, s
, insn
))
2946 /* unknown coprocessor. */
2952 gen_op_movl_T0_im((long)s
->pc
);
2953 gen_op_movl_reg_TN
[0][15]();
2955 s
->is_jmp
= DISAS_JUMP
;
2959 gen_op_movl_T0_im((long)s
->pc
- 4);
2960 gen_op_movl_reg_TN
[0][15]();
2961 gen_op_undef_insn();
2962 s
->is_jmp
= DISAS_JUMP
;
2968 static void disas_thumb_insn(DisasContext
*s
)
2970 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
2974 insn
= lduw_code(s
->pc
);
2977 switch (insn
>> 12) {
2980 op
= (insn
>> 11) & 3;
2983 rn
= (insn
>> 3) & 7;
2984 gen_movl_T0_reg(s
, rn
);
2985 if (insn
& (1 << 10)) {
2987 gen_op_movl_T1_im((insn
>> 6) & 7);
2990 rm
= (insn
>> 6) & 7;
2991 gen_movl_T1_reg(s
, rm
);
2993 if (insn
& (1 << 9))
2994 gen_op_subl_T0_T1_cc();
2996 gen_op_addl_T0_T1_cc();
2997 gen_movl_reg_T0(s
, rd
);
2999 /* shift immediate */
3000 rm
= (insn
>> 3) & 7;
3001 shift
= (insn
>> 6) & 0x1f;
3002 gen_movl_T0_reg(s
, rm
);
3003 gen_shift_T0_im_thumb
[op
](shift
);
3004 gen_movl_reg_T0(s
, rd
);
3008 /* arithmetic large immediate */
3009 op
= (insn
>> 11) & 3;
3010 rd
= (insn
>> 8) & 0x7;
3012 gen_op_movl_T0_im(insn
& 0xff);
3014 gen_movl_T0_reg(s
, rd
);
3015 gen_op_movl_T1_im(insn
& 0xff);
3019 gen_op_logic_T0_cc();
3022 gen_op_subl_T0_T1_cc();
3025 gen_op_addl_T0_T1_cc();
3028 gen_op_subl_T0_T1_cc();
3032 gen_movl_reg_T0(s
, rd
);
3035 if (insn
& (1 << 11)) {
3036 rd
= (insn
>> 8) & 7;
3037 /* load pc-relative. Bit 1 of PC is ignored. */
3038 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
3039 val
&= ~(uint32_t)2;
3040 gen_op_movl_T1_im(val
);
3042 gen_movl_reg_T0(s
, rd
);
3045 if (insn
& (1 << 10)) {
3046 /* data processing extended or blx */
3047 rd
= (insn
& 7) | ((insn
>> 4) & 8);
3048 rm
= (insn
>> 3) & 0xf;
3049 op
= (insn
>> 8) & 3;
3052 gen_movl_T0_reg(s
, rd
);
3053 gen_movl_T1_reg(s
, rm
);
3054 gen_op_addl_T0_T1();
3055 gen_movl_reg_T0(s
, rd
);
3058 gen_movl_T0_reg(s
, rd
);
3059 gen_movl_T1_reg(s
, rm
);
3060 gen_op_subl_T0_T1_cc();
3062 case 2: /* mov/cpy */
3063 gen_movl_T0_reg(s
, rm
);
3064 gen_movl_reg_T0(s
, rd
);
3066 case 3:/* branch [and link] exchange thumb register */
3067 if (insn
& (1 << 7)) {
3068 val
= (uint32_t)s
->pc
| 1;
3069 gen_op_movl_T1_im(val
);
3070 gen_movl_reg_T1(s
, 14);
3072 gen_movl_T0_reg(s
, rm
);
3079 /* data processing register */
3081 rm
= (insn
>> 3) & 7;
3082 op
= (insn
>> 6) & 0xf;
3083 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
3084 /* the shift/rotate ops want the operands backwards */
3093 if (op
== 9) /* neg */
3094 gen_op_movl_T0_im(0);
3095 else if (op
!= 0xf) /* mvn doesn't read its first operand */
3096 gen_movl_T0_reg(s
, rd
);
3098 gen_movl_T1_reg(s
, rm
);
3101 gen_op_andl_T0_T1();
3102 gen_op_logic_T0_cc();
3105 gen_op_xorl_T0_T1();
3106 gen_op_logic_T0_cc();
3109 gen_op_shll_T1_T0_cc();
3110 gen_op_logic_T1_cc();
3113 gen_op_shrl_T1_T0_cc();
3114 gen_op_logic_T1_cc();
3117 gen_op_sarl_T1_T0_cc();
3118 gen_op_logic_T1_cc();
3121 gen_op_adcl_T0_T1_cc();
3124 gen_op_sbcl_T0_T1_cc();
3127 gen_op_rorl_T1_T0_cc();
3128 gen_op_logic_T1_cc();
3131 gen_op_andl_T0_T1();
3132 gen_op_logic_T0_cc();
3136 gen_op_subl_T0_T1_cc();
3139 gen_op_subl_T0_T1_cc();
3143 gen_op_addl_T0_T1_cc();
3148 gen_op_logic_T0_cc();
3151 gen_op_mull_T0_T1();
3152 gen_op_logic_T0_cc();
3155 gen_op_bicl_T0_T1();
3156 gen_op_logic_T0_cc();
3160 gen_op_logic_T1_cc();
3167 gen_movl_reg_T1(s
, rm
);
3169 gen_movl_reg_T0(s
, rd
);
3174 /* load/store register offset. */
3176 rn
= (insn
>> 3) & 7;
3177 rm
= (insn
>> 6) & 7;
3178 op
= (insn
>> 9) & 7;
3179 gen_movl_T1_reg(s
, rn
);
3180 gen_movl_T2_reg(s
, rm
);
3181 gen_op_addl_T1_T2();
3183 if (op
< 3) /* store */
3184 gen_movl_T0_reg(s
, rd
);
3212 if (op
>= 3) /* load */
3213 gen_movl_reg_T0(s
, rd
);
3217 /* load/store word immediate offset */
3219 rn
= (insn
>> 3) & 7;
3220 gen_movl_T1_reg(s
, rn
);
3221 val
= (insn
>> 4) & 0x7c;
3222 gen_op_movl_T2_im(val
);
3223 gen_op_addl_T1_T2();
3225 if (insn
& (1 << 11)) {
3228 gen_movl_reg_T0(s
, rd
);
3231 gen_movl_T0_reg(s
, rd
);
3237 /* load/store byte immediate offset */
3239 rn
= (insn
>> 3) & 7;
3240 gen_movl_T1_reg(s
, rn
);
3241 val
= (insn
>> 6) & 0x1f;
3242 gen_op_movl_T2_im(val
);
3243 gen_op_addl_T1_T2();
3245 if (insn
& (1 << 11)) {
3248 gen_movl_reg_T0(s
, rd
);
3251 gen_movl_T0_reg(s
, rd
);
3257 /* load/store halfword immediate offset */
3259 rn
= (insn
>> 3) & 7;
3260 gen_movl_T1_reg(s
, rn
);
3261 val
= (insn
>> 5) & 0x3e;
3262 gen_op_movl_T2_im(val
);
3263 gen_op_addl_T1_T2();
3265 if (insn
& (1 << 11)) {
3268 gen_movl_reg_T0(s
, rd
);
3271 gen_movl_T0_reg(s
, rd
);
3277 /* load/store from stack */
3278 rd
= (insn
>> 8) & 7;
3279 gen_movl_T1_reg(s
, 13);
3280 val
= (insn
& 0xff) * 4;
3281 gen_op_movl_T2_im(val
);
3282 gen_op_addl_T1_T2();
3284 if (insn
& (1 << 11)) {
3287 gen_movl_reg_T0(s
, rd
);
3290 gen_movl_T0_reg(s
, rd
);
3296 /* add to high reg */
3297 rd
= (insn
>> 8) & 7;
3298 if (insn
& (1 << 11)) {
3300 gen_movl_T0_reg(s
, 13);
3302 /* PC. bit 1 is ignored. */
3303 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
3305 val
= (insn
& 0xff) * 4;
3306 gen_op_movl_T1_im(val
);
3307 gen_op_addl_T0_T1();
3308 gen_movl_reg_T0(s
, rd
);
3313 op
= (insn
>> 8) & 0xf;
3316 /* adjust stack pointer */
3317 gen_movl_T1_reg(s
, 13);
3318 val
= (insn
& 0x7f) * 4;
3319 if (insn
& (1 << 7))
3320 val
= -(int32_t)val
;
3321 gen_op_movl_T2_im(val
);
3322 gen_op_addl_T1_T2();
3323 gen_movl_reg_T1(s
, 13);
3326 case 4: case 5: case 0xc: case 0xd:
3328 gen_movl_T1_reg(s
, 13);
3329 if (insn
& (1 << 8))
3333 for (i
= 0; i
< 8; i
++) {
3334 if (insn
& (1 << i
))
3337 if ((insn
& (1 << 11)) == 0) {
3338 gen_op_movl_T2_im(-offset
);
3339 gen_op_addl_T1_T2();
3341 gen_op_movl_T2_im(4);
3342 for (i
= 0; i
< 8; i
++) {
3343 if (insn
& (1 << i
)) {
3344 if (insn
& (1 << 11)) {
3347 gen_movl_reg_T0(s
, i
);
3350 gen_movl_T0_reg(s
, i
);
3353 /* advance to the next address. */
3354 gen_op_addl_T1_T2();
3357 if (insn
& (1 << 8)) {
3358 if (insn
& (1 << 11)) {
3361 /* don't set the pc until the rest of the instruction
3365 gen_movl_T0_reg(s
, 14);
3368 gen_op_addl_T1_T2();
3370 if ((insn
& (1 << 11)) == 0) {
3371 gen_op_movl_T2_im(-offset
);
3372 gen_op_addl_T1_T2();
3374 /* write back the new stack pointer */
3375 gen_movl_reg_T1(s
, 13);
3376 /* set the new PC value */
3377 if ((insn
& 0x0900) == 0x0900)
3381 case 0xe: /* bkpt */
3382 gen_op_movl_T0_im((long)s
->pc
- 2);
3383 gen_op_movl_reg_TN
[0][15]();
3385 s
->is_jmp
= DISAS_JUMP
;
3394 /* load/store multiple */
3395 rn
= (insn
>> 8) & 0x7;
3396 gen_movl_T1_reg(s
, rn
);
3397 gen_op_movl_T2_im(4);
3398 for (i
= 0; i
< 8; i
++) {
3399 if (insn
& (1 << i
)) {
3400 if (insn
& (1 << 11)) {
3403 gen_movl_reg_T0(s
, i
);
3406 gen_movl_T0_reg(s
, i
);
3409 /* advance to the next address */
3410 gen_op_addl_T1_T2();
3413 /* Base register writeback. */
3414 if ((insn
& (1 << rn
)) == 0)
3415 gen_movl_reg_T1(s
, rn
);
3419 /* conditional branch or swi */
3420 cond
= (insn
>> 8) & 0xf;
3426 gen_op_movl_T0_im((long)s
->pc
| 1);
3427 /* Don't set r15. */
3428 gen_op_movl_reg_TN
[0][15]();
3430 s
->is_jmp
= DISAS_JUMP
;
3433 /* generate a conditional jump to next instruction */
3434 s
->condlabel
= gen_new_label();
3435 gen_test_cc
[cond
^ 1](s
->condlabel
);
3437 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
3438 //s->is_jmp = DISAS_JUMP_NEXT;
3439 gen_movl_T1_reg(s
, 15);
3441 /* jump to the offset */
3442 val
= (uint32_t)s
->pc
+ 2;
3443 offset
= ((int32_t)insn
<< 24) >> 24;
3449 /* unconditional branch */
3450 if (insn
& (1 << 11)) {
3451 /* Second half of blx. */
3452 offset
= ((insn
& 0x7ff) << 1);
3453 gen_movl_T0_reg(s
, 14);
3454 gen_op_movl_T1_im(offset
);
3455 gen_op_addl_T0_T1();
3456 gen_op_movl_T1_im(0xfffffffc);
3457 gen_op_andl_T0_T1();
3459 val
= (uint32_t)s
->pc
;
3460 gen_op_movl_T1_im(val
| 1);
3461 gen_movl_reg_T1(s
, 14);
3465 val
= (uint32_t)s
->pc
;
3466 offset
= ((int32_t)insn
<< 21) >> 21;
3467 val
+= (offset
<< 1) + 2;
3472 /* branch and link [and switch to arm] */
3473 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
3474 /* Instruction spans a page boundary. Implement it as two
3475 16-bit instructions in case the second half causes an
3477 offset
= ((int32_t)insn
<< 21) >> 9;
3478 val
= s
->pc
+ 2 + offset
;
3479 gen_op_movl_T0_im(val
);
3480 gen_movl_reg_T0(s
, 14);
3483 if (insn
& (1 << 11)) {
3484 /* Second half of bl. */
3485 offset
= ((insn
& 0x7ff) << 1) | 1;
3486 gen_movl_T0_reg(s
, 14);
3487 gen_op_movl_T1_im(offset
);
3488 gen_op_addl_T0_T1();
3490 val
= (uint32_t)s
->pc
;
3491 gen_op_movl_T1_im(val
| 1);
3492 gen_movl_reg_T1(s
, 14);
3496 offset
= ((int32_t)insn
<< 21) >> 10;
3497 insn
= lduw_code(s
->pc
);
3498 offset
|= insn
& 0x7ff;
3500 val
= (uint32_t)s
->pc
+ 2;
3501 gen_op_movl_T1_im(val
| 1);
3502 gen_movl_reg_T1(s
, 14);
3505 if (insn
& (1 << 12)) {
3510 val
&= ~(uint32_t)2;
3511 gen_op_movl_T0_im(val
);
3517 gen_op_movl_T0_im((long)s
->pc
- 2);
3518 gen_op_movl_reg_TN
[0][15]();
3519 gen_op_undef_insn();
3520 s
->is_jmp
= DISAS_JUMP
;
3523 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
3524 basic block 'tb'. If search_pc is TRUE, also generate PC
3525 information for each intermediate instruction. */
3526 static inline int gen_intermediate_code_internal(CPUState
*env
,
3527 TranslationBlock
*tb
,
3530 DisasContext dc1
, *dc
= &dc1
;
3531 uint16_t *gen_opc_end
;
3533 target_ulong pc_start
;
3534 uint32_t next_page_start
;
3536 /* generate intermediate code */
3541 gen_opc_ptr
= gen_opc_buf
;
3542 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
3543 gen_opparam_ptr
= gen_opparam_buf
;
3545 dc
->is_jmp
= DISAS_NEXT
;
3547 dc
->singlestep_enabled
= env
->singlestep_enabled
;
3549 dc
->thumb
= env
->thumb
;
3551 #if !defined(CONFIG_USER_ONLY)
3552 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
3554 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
3558 if (env
->nb_breakpoints
> 0) {
3559 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
3560 if (env
->breakpoints
[j
] == dc
->pc
) {
3561 gen_op_movl_T0_im((long)dc
->pc
);
3562 gen_op_movl_reg_TN
[0][15]();
3564 dc
->is_jmp
= DISAS_JUMP
;
3570 j
= gen_opc_ptr
- gen_opc_buf
;
3574 gen_opc_instr_start
[lj
++] = 0;
3576 gen_opc_pc
[lj
] = dc
->pc
;
3577 gen_opc_instr_start
[lj
] = 1;
3581 disas_thumb_insn(dc
);
3583 disas_arm_insn(env
, dc
);
3585 if (dc
->condjmp
&& !dc
->is_jmp
) {
3586 gen_set_label(dc
->condlabel
);
3589 /* Terminate the TB on memory ops if watchpoints are present. */
3590 /* FIXME: This should be replacd by the deterministic execution
3591 * IRQ raising bits. */
3592 if (dc
->is_mem
&& env
->nb_watchpoints
)
3595 /* Translation stops when a conditional branch is enoutered.
3596 * Otherwise the subsequent code could get translated several times.
3597 * Also stop translation when a page boundary is reached. This
3598 * ensures prefech aborts occur at the right place. */
3599 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
3600 !env
->singlestep_enabled
&&
3601 dc
->pc
< next_page_start
);
3602 /* At this stage dc->condjmp will only be set when the skipped
3603 * instruction was a conditional branch, and the PC has already been
3605 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
3606 /* Make sure the pc is updated, and raise a debug exception. */
3609 gen_set_label(dc
->condlabel
);
3611 if (dc
->condjmp
|| !dc
->is_jmp
) {
3612 gen_op_movl_T0_im((long)dc
->pc
);
3613 gen_op_movl_reg_TN
[0][15]();
3618 switch(dc
->is_jmp
) {
3620 gen_goto_tb(dc
, 1, dc
->pc
);
3625 /* indicate that the hash table must be used to find the next TB */
3630 /* nothing more to generate */
3634 gen_set_label(dc
->condlabel
);
3635 gen_goto_tb(dc
, 1, dc
->pc
);
3639 *gen_opc_ptr
= INDEX_op_end
;
3642 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
3643 fprintf(logfile
, "----------------\n");
3644 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
3645 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
3646 fprintf(logfile
, "\n");
3647 if (loglevel
& (CPU_LOG_TB_OP
)) {
3648 fprintf(logfile
, "OP:\n");
3649 dump_ops(gen_opc_buf
, gen_opparam_buf
);
3650 fprintf(logfile
, "\n");
3655 j
= gen_opc_ptr
- gen_opc_buf
;
3658 gen_opc_instr_start
[lj
++] = 0;
3660 tb
->size
= dc
->pc
- pc_start
;
3665 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
3667 return gen_intermediate_code_internal(env
, tb
, 0);
3670 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
3672 return gen_intermediate_code_internal(env
, tb
, 1);
3675 static const char *cpu_mode_names
[16] = {
3676 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
3677 "???", "???", "???", "und", "???", "???", "???", "sys"
3679 void cpu_dump_state(CPUState
*env
, FILE *f
,
3680 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
3689 /* ??? This assumes float64 and double have the same layout.
3690 Oh well, it's only debug dumps. */
3698 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
3700 cpu_fprintf(f
, "\n");
3702 cpu_fprintf(f
, " ");
3704 psr
= cpsr_read(env
);
3705 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d\n",
3707 psr
& (1 << 31) ? 'N' : '-',
3708 psr
& (1 << 30) ? 'Z' : '-',
3709 psr
& (1 << 29) ? 'C' : '-',
3710 psr
& (1 << 28) ? 'V' : '-',
3711 psr
& CPSR_T
? 'T' : 'A',
3712 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
3714 for (i
= 0; i
< 16; i
++) {
3715 d
.d
= env
->vfp
.regs
[i
];
3719 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
3720 i
* 2, (int)s0
.i
, s0
.s
,
3721 i
* 2 + 1, (int)s1
.i
, s1
.s
,
3722 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
3725 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);