]>
git.proxmox.com Git - qemu.git/blob - target-arm/translate.c
4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #define ENABLE_ARCH_5J 0
32 #define ENABLE_ARCH_6 1
33 #define ENABLE_ARCH_6T2 1
35 #define ARCH(x) if (!ENABLE_ARCH_##x) goto illegal_op;
37 /* internal defines */
38 typedef struct DisasContext
{
41 /* Nonzero if this instruction has been conditionally skipped. */
43 /* The label that will be jumped to when the instruction is skipped. */
45 struct TranslationBlock
*tb
;
46 int singlestep_enabled
;
49 #if !defined(CONFIG_USER_ONLY)
54 #if defined(CONFIG_USER_ONLY)
57 #define IS_USER(s) (s->user)
60 #define DISAS_JUMP_NEXT 4
62 #ifdef USE_DIRECT_JUMP
65 #define TBPARAM(x) (long)(x)
68 /* XXX: move that elsewhere */
69 static uint16_t *gen_opc_ptr
;
70 static uint32_t *gen_opparam_ptr
;
75 #define DEF(s, n, copy_size) INDEX_op_ ## s,
83 static GenOpFunc1
*gen_test_cc
[14] = {
100 const uint8_t table_logic_cc
[16] = {
119 static GenOpFunc1
*gen_shift_T1_im
[4] = {
126 static GenOpFunc
*gen_shift_T1_0
[4] = {
133 static GenOpFunc1
*gen_shift_T2_im
[4] = {
140 static GenOpFunc
*gen_shift_T2_0
[4] = {
147 static GenOpFunc1
*gen_shift_T1_im_cc
[4] = {
148 gen_op_shll_T1_im_cc
,
149 gen_op_shrl_T1_im_cc
,
150 gen_op_sarl_T1_im_cc
,
151 gen_op_rorl_T1_im_cc
,
154 static GenOpFunc
*gen_shift_T1_0_cc
[4] = {
161 static GenOpFunc
*gen_shift_T1_T0
[4] = {
168 static GenOpFunc
*gen_shift_T1_T0_cc
[4] = {
169 gen_op_shll_T1_T0_cc
,
170 gen_op_shrl_T1_T0_cc
,
171 gen_op_sarl_T1_T0_cc
,
172 gen_op_rorl_T1_T0_cc
,
175 static GenOpFunc
*gen_op_movl_TN_reg
[3][16] = {
232 static GenOpFunc
*gen_op_movl_reg_TN
[2][16] = {
271 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
277 static GenOpFunc1
*gen_shift_T0_im_thumb
[3] = {
278 gen_op_shll_T0_im_thumb
,
279 gen_op_shrl_T0_im_thumb
,
280 gen_op_sarl_T0_im_thumb
,
283 static inline void gen_bx(DisasContext
*s
)
285 s
->is_jmp
= DISAS_UPDATE
;
290 #if defined(CONFIG_USER_ONLY)
291 #define gen_ldst(name, s) gen_op_##name##_raw()
293 #define gen_ldst(name, s) do { \
296 gen_op_##name##_user(); \
298 gen_op_##name##_kernel(); \
302 static inline void gen_movl_TN_reg(DisasContext
*s
, int reg
, int t
)
307 /* normaly, since we updated PC, we need only to add one insn */
309 val
= (long)s
->pc
+ 2;
311 val
= (long)s
->pc
+ 4;
312 gen_op_movl_TN_im
[t
](val
);
314 gen_op_movl_TN_reg
[t
][reg
]();
318 static inline void gen_movl_T0_reg(DisasContext
*s
, int reg
)
320 gen_movl_TN_reg(s
, reg
, 0);
323 static inline void gen_movl_T1_reg(DisasContext
*s
, int reg
)
325 gen_movl_TN_reg(s
, reg
, 1);
328 static inline void gen_movl_T2_reg(DisasContext
*s
, int reg
)
330 gen_movl_TN_reg(s
, reg
, 2);
333 static inline void gen_movl_reg_TN(DisasContext
*s
, int reg
, int t
)
335 gen_op_movl_reg_TN
[t
][reg
]();
337 s
->is_jmp
= DISAS_JUMP
;
341 static inline void gen_movl_reg_T0(DisasContext
*s
, int reg
)
343 gen_movl_reg_TN(s
, reg
, 0);
346 static inline void gen_movl_reg_T1(DisasContext
*s
, int reg
)
348 gen_movl_reg_TN(s
, reg
, 1);
351 /* Force a TB lookup after an instruction that changes the CPU state. */
352 static inline void gen_lookup_tb(DisasContext
*s
)
354 gen_op_movl_T0_im(s
->pc
);
355 gen_movl_reg_T0(s
, 15);
356 s
->is_jmp
= DISAS_UPDATE
;
359 static inline void gen_add_data_offset(DisasContext
*s
, unsigned int insn
)
361 int val
, rm
, shift
, shiftop
;
363 if (!(insn
& (1 << 25))) {
366 if (!(insn
& (1 << 23)))
369 gen_op_addl_T1_im(val
);
373 shift
= (insn
>> 7) & 0x1f;
374 gen_movl_T2_reg(s
, rm
);
375 shiftop
= (insn
>> 5) & 3;
377 gen_shift_T2_im
[shiftop
](shift
);
378 } else if (shiftop
!= 0) {
379 gen_shift_T2_0
[shiftop
]();
381 if (!(insn
& (1 << 23)))
388 static inline void gen_add_datah_offset(DisasContext
*s
, unsigned int insn
,
393 if (insn
& (1 << 22)) {
395 val
= (insn
& 0xf) | ((insn
>> 4) & 0xf0);
396 if (!(insn
& (1 << 23)))
400 gen_op_addl_T1_im(val
);
404 gen_op_addl_T1_im(extra
);
406 gen_movl_T2_reg(s
, rm
);
407 if (!(insn
& (1 << 23)))
414 #define VFP_OP(name) \
415 static inline void gen_vfp_##name(int dp) \
418 gen_op_vfp_##name##d(); \
420 gen_op_vfp_##name##s(); \
442 static inline void gen_vfp_ld(DisasContext
*s
, int dp
)
445 gen_ldst(vfp_ldd
, s
);
447 gen_ldst(vfp_lds
, s
);
450 static inline void gen_vfp_st(DisasContext
*s
, int dp
)
453 gen_ldst(vfp_std
, s
);
455 gen_ldst(vfp_sts
, s
);
459 vfp_reg_offset (int dp
, int reg
)
462 return offsetof(CPUARMState
, vfp
.regs
[reg
]);
464 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
465 + offsetof(CPU_DoubleU
, l
.upper
);
467 return offsetof(CPUARMState
, vfp
.regs
[reg
>> 1])
468 + offsetof(CPU_DoubleU
, l
.lower
);
471 static inline void gen_mov_F0_vreg(int dp
, int reg
)
474 gen_op_vfp_getreg_F0d(vfp_reg_offset(dp
, reg
));
476 gen_op_vfp_getreg_F0s(vfp_reg_offset(dp
, reg
));
479 static inline void gen_mov_F1_vreg(int dp
, int reg
)
482 gen_op_vfp_getreg_F1d(vfp_reg_offset(dp
, reg
));
484 gen_op_vfp_getreg_F1s(vfp_reg_offset(dp
, reg
));
487 static inline void gen_mov_vreg_F0(int dp
, int reg
)
490 gen_op_vfp_setreg_F0d(vfp_reg_offset(dp
, reg
));
492 gen_op_vfp_setreg_F0s(vfp_reg_offset(dp
, reg
));
495 /* Disassemble system coprocessor (cp15) instruction. Return nonzero if
496 instruction is not defined. */
497 static int disas_cp15_insn(DisasContext
*s
, uint32_t insn
)
501 /* ??? Some cp15 registers are accessible from userspace. */
505 if ((insn
& 0x0fff0fff) == 0x0e070f90
506 || (insn
& 0x0fff0fff) == 0x0e070f58) {
507 /* Wait for interrupt. */
508 gen_op_movl_T0_im((long)s
->pc
);
509 gen_op_movl_reg_TN
[0][15]();
511 s
->is_jmp
= DISAS_JUMP
;
514 rd
= (insn
>> 12) & 0xf;
515 if (insn
& (1 << 20)) {
516 gen_op_movl_T0_cp15(insn
);
517 /* If the destination register is r15 then sets condition codes. */
519 gen_movl_reg_T0(s
, rd
);
521 gen_movl_T0_reg(s
, rd
);
522 gen_op_movl_cp15_T0(insn
);
528 /* Disassemble a VFP instruction. Returns nonzero if an error occured
529 (ie. an undefined instruction). */
530 static int disas_vfp_insn(CPUState
* env
, DisasContext
*s
, uint32_t insn
)
532 uint32_t rd
, rn
, rm
, op
, i
, n
, offset
, delta_d
, delta_m
, bank_mask
;
535 if (!arm_feature(env
, ARM_FEATURE_VFP
))
538 if ((env
->vfp
.xregs
[ARM_VFP_FPEXC
] & (1 << 30)) == 0) {
539 /* VFP disabled. Only allow fmxr/fmrx to/from fpexc and fpsid. */
540 if ((insn
& 0x0fe00fff) != 0x0ee00a10)
542 rn
= (insn
>> 16) & 0xf;
543 if (rn
!= 0 && rn
!= 8)
546 dp
= ((insn
& 0xf00) == 0xb00);
547 switch ((insn
>> 24) & 0xf) {
549 if (insn
& (1 << 4)) {
550 /* single register transfer */
551 if ((insn
& 0x6f) != 0x00)
553 rd
= (insn
>> 12) & 0xf;
557 rn
= (insn
>> 16) & 0xf;
558 /* Get the existing value even for arm->vfp moves because
559 we only set half the register. */
560 gen_mov_F0_vreg(1, rn
);
562 if (insn
& (1 << 20)) {
564 if (insn
& (1 << 21))
565 gen_movl_reg_T1(s
, rd
);
567 gen_movl_reg_T0(s
, rd
);
570 if (insn
& (1 << 21))
571 gen_movl_T1_reg(s
, rd
);
573 gen_movl_T0_reg(s
, rd
);
575 gen_mov_vreg_F0(dp
, rn
);
578 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
579 if (insn
& (1 << 20)) {
581 if (insn
& (1 << 21)) {
582 /* system register */
588 case ARM_VFP_FPINST2
:
589 gen_op_vfp_movl_T0_xreg(rn
);
593 gen_op_vfp_movl_T0_fpscr_flags();
595 gen_op_vfp_movl_T0_fpscr();
601 gen_mov_F0_vreg(0, rn
);
605 /* Set the 4 flag bits in the CPSR. */
606 gen_op_movl_cpsr_T0(0xf0000000);
608 gen_movl_reg_T0(s
, rd
);
611 gen_movl_T0_reg(s
, rd
);
612 if (insn
& (1 << 21)) {
614 /* system register */
617 /* Writes are ignored. */
620 gen_op_vfp_movl_fpscr_T0();
624 gen_op_vfp_movl_xreg_T0(rn
);
628 case ARM_VFP_FPINST2
:
629 gen_op_vfp_movl_xreg_T0(rn
);
636 gen_mov_vreg_F0(0, rn
);
641 /* data processing */
642 /* The opcode is in bits 23, 21, 20 and 6. */
643 op
= ((insn
>> 20) & 8) | ((insn
>> 19) & 6) | ((insn
>> 6) & 1);
647 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
649 /* rn is register number */
652 rn
= (insn
>> 16) & 0xf;
655 if (op
== 15 && (rn
== 15 || rn
> 17)) {
656 /* Integer or single precision destination. */
657 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
659 if (insn
& (1 << 22))
661 rd
= (insn
>> 12) & 0xf;
664 if (op
== 15 && (rn
== 16 || rn
== 17)) {
665 /* Integer source. */
666 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
673 rn
= ((insn
>> 15) & 0x1e) | ((insn
>> 7) & 1);
674 if (op
== 15 && rn
== 15) {
675 /* Double precision destination. */
676 if (insn
& (1 << 22))
678 rd
= (insn
>> 12) & 0xf;
680 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
681 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
684 veclen
= env
->vfp
.vec_len
;
685 if (op
== 15 && rn
> 3)
688 /* Shut up compiler warnings. */
699 /* Figure out what type of vector operation this is. */
700 if ((rd
& bank_mask
) == 0) {
705 delta_d
= (env
->vfp
.vec_stride
>> 1) + 1;
707 delta_d
= env
->vfp
.vec_stride
+ 1;
709 if ((rm
& bank_mask
) == 0) {
710 /* mixed scalar/vector */
719 /* Load the initial operands. */
725 gen_mov_F0_vreg(0, rm
);
730 gen_mov_F0_vreg(dp
, rd
);
731 gen_mov_F1_vreg(dp
, rm
);
735 /* Compare with zero */
736 gen_mov_F0_vreg(dp
, rd
);
740 /* One source operand. */
741 gen_mov_F0_vreg(dp
, rm
);
744 /* Two source operands. */
745 gen_mov_F0_vreg(dp
, rn
);
746 gen_mov_F1_vreg(dp
, rm
);
750 /* Perform the calculation. */
752 case 0: /* mac: fd + (fn * fm) */
754 gen_mov_F1_vreg(dp
, rd
);
757 case 1: /* nmac: fd - (fn * fm) */
760 gen_mov_F1_vreg(dp
, rd
);
763 case 2: /* msc: -fd + (fn * fm) */
765 gen_mov_F1_vreg(dp
, rd
);
768 case 3: /* nmsc: -fd - (fn * fm) */
770 gen_mov_F1_vreg(dp
, rd
);
774 case 4: /* mul: fn * fm */
777 case 5: /* nmul: -(fn * fm) */
781 case 6: /* add: fn + fm */
784 case 7: /* sub: fn - fm */
787 case 8: /* div: fn / fm */
790 case 15: /* extension space */
817 case 15: /* single<->double conversion */
832 case 25: /* ftouiz */
838 case 27: /* ftosiz */
841 default: /* undefined */
842 printf ("rn:%d\n", rn
);
846 default: /* undefined */
847 printf ("op:%d\n", op
);
851 /* Write back the result. */
852 if (op
== 15 && (rn
>= 8 && rn
<= 11))
853 ; /* Comparison, do nothing. */
854 else if (op
== 15 && rn
> 17)
855 /* Integer result. */
856 gen_mov_vreg_F0(0, rd
);
857 else if (op
== 15 && rn
== 15)
859 gen_mov_vreg_F0(!dp
, rd
);
861 gen_mov_vreg_F0(dp
, rd
);
863 /* break out of the loop if we have finished */
867 if (op
== 15 && delta_m
== 0) {
868 /* single source one-many */
870 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
872 gen_mov_vreg_F0(dp
, rd
);
876 /* Setup the next operands. */
878 rd
= ((rd
+ delta_d
) & (bank_mask
- 1))
882 /* One source operand. */
883 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
885 gen_mov_F0_vreg(dp
, rm
);
887 /* Two source operands. */
888 rn
= ((rn
+ delta_d
) & (bank_mask
- 1))
890 gen_mov_F0_vreg(dp
, rn
);
892 rm
= ((rm
+ delta_m
) & (bank_mask
- 1))
894 gen_mov_F1_vreg(dp
, rm
);
902 if (dp
&& (insn
& (1 << 22))) {
903 /* two-register transfer */
904 rn
= (insn
>> 16) & 0xf;
905 rd
= (insn
>> 12) & 0xf;
911 rm
= ((insn
<< 1) & 0x1e) | ((insn
>> 5) & 1);
913 if (insn
& (1 << 20)) {
916 gen_mov_F0_vreg(1, rm
);
918 gen_movl_reg_T0(s
, rd
);
919 gen_movl_reg_T1(s
, rn
);
921 gen_mov_F0_vreg(0, rm
);
923 gen_movl_reg_T0(s
, rn
);
924 gen_mov_F0_vreg(0, rm
+ 1);
926 gen_movl_reg_T0(s
, rd
);
931 gen_movl_T0_reg(s
, rd
);
932 gen_movl_T1_reg(s
, rn
);
934 gen_mov_vreg_F0(1, rm
);
936 gen_movl_T0_reg(s
, rn
);
938 gen_mov_vreg_F0(0, rm
);
939 gen_movl_T0_reg(s
, rd
);
941 gen_mov_vreg_F0(0, rm
+ 1);
946 rn
= (insn
>> 16) & 0xf;
948 rd
= (insn
>> 12) & 0xf;
950 rd
= ((insn
>> 11) & 0x1e) | ((insn
>> 22) & 1);
951 gen_movl_T1_reg(s
, rn
);
952 if ((insn
& 0x01200000) == 0x01000000) {
953 /* Single load/store */
954 offset
= (insn
& 0xff) << 2;
955 if ((insn
& (1 << 23)) == 0)
957 gen_op_addl_T1_im(offset
);
958 if (insn
& (1 << 20)) {
960 gen_mov_vreg_F0(dp
, rd
);
962 gen_mov_F0_vreg(dp
, rd
);
966 /* load/store multiple */
968 n
= (insn
>> 1) & 0x7f;
972 if (insn
& (1 << 24)) /* pre-decrement */
973 gen_op_addl_T1_im(-((insn
& 0xff) << 2));
979 for (i
= 0; i
< n
; i
++) {
980 if (insn
& (1 << 20)) {
983 gen_mov_vreg_F0(dp
, rd
+ i
);
986 gen_mov_F0_vreg(dp
, rd
+ i
);
989 gen_op_addl_T1_im(offset
);
991 if (insn
& (1 << 21)) {
993 if (insn
& (1 << 24))
994 offset
= -offset
* n
;
995 else if (dp
&& (insn
& 1))
1001 gen_op_addl_T1_im(offset
);
1002 gen_movl_reg_T1(s
, rn
);
1008 /* Should never happen. */
1014 static inline void gen_goto_tb(DisasContext
*s
, int n
, uint32_t dest
)
1016 TranslationBlock
*tb
;
1019 if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
1021 gen_op_goto_tb0(TBPARAM(tb
));
1023 gen_op_goto_tb1(TBPARAM(tb
));
1024 gen_op_movl_T0_im(dest
);
1025 gen_op_movl_r15_T0();
1026 gen_op_movl_T0_im((long)tb
+ n
);
1029 gen_op_movl_T0_im(dest
);
1030 gen_op_movl_r15_T0();
1036 static inline void gen_jmp (DisasContext
*s
, uint32_t dest
)
1038 if (__builtin_expect(s
->singlestep_enabled
, 0)) {
1039 /* An indirect jump so that we still trigger the debug exception. */
1042 gen_op_movl_T0_im(dest
);
1045 gen_goto_tb(s
, 0, dest
);
1046 s
->is_jmp
= DISAS_TB_JUMP
;
1050 static inline void gen_mulxy(int x
, int y
)
1053 gen_op_sarl_T0_im(16);
1057 gen_op_sarl_T1_im(16);
1063 /* Return the mask of PSR bits set by a MSR instruction. */
1064 static uint32_t msr_mask(DisasContext
*s
, int flags
, int spsr
) {
1068 if (flags
& (1 << 0))
1070 if (flags
& (1 << 1))
1072 if (flags
& (1 << 2))
1074 if (flags
& (1 << 3))
1076 /* Mask out undefined bits. */
1078 /* Mask out state bits. */
1080 mask
&= ~0x01000020;
1081 /* Mask out privileged bits. */
1087 /* Returns nonzero if access to the PSR is not permitted. */
1088 static int gen_set_psr_T0(DisasContext
*s
, uint32_t mask
, int spsr
)
1091 /* ??? This is also undefined in system mode. */
1094 gen_op_movl_spsr_T0(mask
);
1096 gen_op_movl_cpsr_T0(mask
);
1102 static void gen_exception_return(DisasContext
*s
)
1104 gen_op_movl_reg_TN
[0][15]();
1105 gen_op_movl_T0_spsr();
1106 gen_op_movl_cpsr_T0(0xffffffff);
1107 s
->is_jmp
= DISAS_UPDATE
;
1110 static void disas_arm_insn(CPUState
* env
, DisasContext
*s
)
1112 unsigned int cond
, insn
, val
, op1
, i
, shift
, rm
, rs
, rn
, rd
, sh
;
1114 insn
= ldl_code(s
->pc
);
1119 /* Unconditional instructions. */
1120 if ((insn
& 0x0d70f000) == 0x0550f000)
1122 else if ((insn
& 0x0e000000) == 0x0a000000) {
1123 /* branch link and change to thumb (blx <offset>) */
1126 val
= (uint32_t)s
->pc
;
1127 gen_op_movl_T0_im(val
);
1128 gen_movl_reg_T0(s
, 14);
1129 /* Sign-extend the 24-bit offset */
1130 offset
= (((int32_t)insn
) << 8) >> 8;
1131 /* offset * 4 + bit24 * 2 + (thumb bit) */
1132 val
+= (offset
<< 2) | ((insn
>> 23) & 2) | 1;
1133 /* pipeline offset */
1135 gen_op_movl_T0_im(val
);
1138 } else if ((insn
& 0x0fe00000) == 0x0c400000) {
1139 /* Coprocessor double register transfer. */
1140 } else if ((insn
& 0x0f000010) == 0x0e000010) {
1141 /* Additional coprocessor register transfer. */
1142 } else if ((insn
& 0x0ff10010) == 0x01000000) {
1143 /* cps (privileged) */
1144 } else if ((insn
& 0x0ffffdff) == 0x01010000) {
1146 if (insn
& (1 << 9)) {
1147 /* BE8 mode not implemented. */
1155 /* if not always execute, we generate a conditional jump to
1157 s
->condlabel
= gen_new_label();
1158 gen_test_cc
[cond
^ 1](s
->condlabel
);
1160 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1161 //s->is_jmp = DISAS_JUMP_NEXT;
1163 if ((insn
& 0x0f900000) == 0x03000000) {
1164 if ((insn
& 0x0fb0f000) != 0x0320f000)
1166 /* CPSR = immediate */
1168 shift
= ((insn
>> 8) & 0xf) * 2;
1170 val
= (val
>> shift
) | (val
<< (32 - shift
));
1171 gen_op_movl_T0_im(val
);
1172 i
= ((insn
& (1 << 22)) != 0);
1173 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
1175 } else if ((insn
& 0x0f900000) == 0x01000000
1176 && (insn
& 0x00000090) != 0x00000090) {
1177 /* miscellaneous instructions */
1178 op1
= (insn
>> 21) & 3;
1179 sh
= (insn
>> 4) & 0xf;
1182 case 0x0: /* move program status register */
1185 gen_movl_T0_reg(s
, rm
);
1186 i
= ((op1
& 2) != 0);
1187 if (gen_set_psr_T0(s
, msr_mask(s
, (insn
>> 16) & 0xf, i
), i
))
1191 rd
= (insn
>> 12) & 0xf;
1195 gen_op_movl_T0_spsr();
1197 gen_op_movl_T0_cpsr();
1199 gen_movl_reg_T0(s
, rd
);
1204 /* branch/exchange thumb (bx). */
1205 gen_movl_T0_reg(s
, rm
);
1207 } else if (op1
== 3) {
1209 rd
= (insn
>> 12) & 0xf;
1210 gen_movl_T0_reg(s
, rm
);
1212 gen_movl_reg_T0(s
, rd
);
1220 /* Trivial implementation equivalent to bx. */
1221 gen_movl_T0_reg(s
, rm
);
1231 /* branch link/exchange thumb (blx) */
1232 val
= (uint32_t)s
->pc
;
1233 gen_op_movl_T0_im(val
);
1234 gen_movl_reg_T0(s
, 14);
1235 gen_movl_T0_reg(s
, rm
);
1238 case 0x5: /* saturating add/subtract */
1239 rd
= (insn
>> 12) & 0xf;
1240 rn
= (insn
>> 16) & 0xf;
1241 gen_movl_T0_reg(s
, rm
);
1242 gen_movl_T1_reg(s
, rn
);
1244 gen_op_double_T1_saturate();
1246 gen_op_subl_T0_T1_saturate();
1248 gen_op_addl_T0_T1_saturate();
1249 gen_movl_reg_T0(s
, rd
);
1252 gen_op_movl_T0_im((long)s
->pc
- 4);
1253 gen_op_movl_reg_TN
[0][15]();
1255 s
->is_jmp
= DISAS_JUMP
;
1257 case 0x8: /* signed multiply */
1261 rs
= (insn
>> 8) & 0xf;
1262 rn
= (insn
>> 12) & 0xf;
1263 rd
= (insn
>> 16) & 0xf;
1265 /* (32 * 16) >> 16 */
1266 gen_movl_T0_reg(s
, rm
);
1267 gen_movl_T1_reg(s
, rs
);
1269 gen_op_sarl_T1_im(16);
1272 gen_op_imulw_T0_T1();
1273 if ((sh
& 2) == 0) {
1274 gen_movl_T1_reg(s
, rn
);
1275 gen_op_addl_T0_T1_setq();
1277 gen_movl_reg_T0(s
, rd
);
1280 gen_movl_T0_reg(s
, rm
);
1281 gen_movl_T1_reg(s
, rs
);
1282 gen_mulxy(sh
& 2, sh
& 4);
1284 gen_op_signbit_T1_T0();
1285 gen_op_addq_T0_T1(rn
, rd
);
1286 gen_movl_reg_T0(s
, rn
);
1287 gen_movl_reg_T1(s
, rd
);
1290 gen_movl_T1_reg(s
, rn
);
1291 gen_op_addl_T0_T1_setq();
1293 gen_movl_reg_T0(s
, rd
);
1300 } else if (((insn
& 0x0e000000) == 0 &&
1301 (insn
& 0x00000090) != 0x90) ||
1302 ((insn
& 0x0e000000) == (1 << 25))) {
1303 int set_cc
, logic_cc
, shiftop
;
1305 op1
= (insn
>> 21) & 0xf;
1306 set_cc
= (insn
>> 20) & 1;
1307 logic_cc
= table_logic_cc
[op1
] & set_cc
;
1309 /* data processing instruction */
1310 if (insn
& (1 << 25)) {
1311 /* immediate operand */
1313 shift
= ((insn
>> 8) & 0xf) * 2;
1315 val
= (val
>> shift
) | (val
<< (32 - shift
));
1316 gen_op_movl_T1_im(val
);
1317 if (logic_cc
&& shift
)
1322 gen_movl_T1_reg(s
, rm
);
1323 shiftop
= (insn
>> 5) & 3;
1324 if (!(insn
& (1 << 4))) {
1325 shift
= (insn
>> 7) & 0x1f;
1328 gen_shift_T1_im_cc
[shiftop
](shift
);
1330 gen_shift_T1_im
[shiftop
](shift
);
1332 } else if (shiftop
!= 0) {
1334 gen_shift_T1_0_cc
[shiftop
]();
1336 gen_shift_T1_0
[shiftop
]();
1340 rs
= (insn
>> 8) & 0xf;
1341 gen_movl_T0_reg(s
, rs
);
1343 gen_shift_T1_T0_cc
[shiftop
]();
1345 gen_shift_T1_T0
[shiftop
]();
1349 if (op1
!= 0x0f && op1
!= 0x0d) {
1350 rn
= (insn
>> 16) & 0xf;
1351 gen_movl_T0_reg(s
, rn
);
1353 rd
= (insn
>> 12) & 0xf;
1356 gen_op_andl_T0_T1();
1357 gen_movl_reg_T0(s
, rd
);
1359 gen_op_logic_T0_cc();
1362 gen_op_xorl_T0_T1();
1363 gen_movl_reg_T0(s
, rd
);
1365 gen_op_logic_T0_cc();
1368 if (set_cc
&& rd
== 15) {
1369 /* SUBS r15, ... is used for exception return. */
1372 gen_op_subl_T0_T1_cc();
1373 gen_exception_return(s
);
1376 gen_op_subl_T0_T1_cc();
1378 gen_op_subl_T0_T1();
1379 gen_movl_reg_T0(s
, rd
);
1384 gen_op_rsbl_T0_T1_cc();
1386 gen_op_rsbl_T0_T1();
1387 gen_movl_reg_T0(s
, rd
);
1391 gen_op_addl_T0_T1_cc();
1393 gen_op_addl_T0_T1();
1394 gen_movl_reg_T0(s
, rd
);
1398 gen_op_adcl_T0_T1_cc();
1400 gen_op_adcl_T0_T1();
1401 gen_movl_reg_T0(s
, rd
);
1405 gen_op_sbcl_T0_T1_cc();
1407 gen_op_sbcl_T0_T1();
1408 gen_movl_reg_T0(s
, rd
);
1412 gen_op_rscl_T0_T1_cc();
1414 gen_op_rscl_T0_T1();
1415 gen_movl_reg_T0(s
, rd
);
1419 gen_op_andl_T0_T1();
1420 gen_op_logic_T0_cc();
1425 gen_op_xorl_T0_T1();
1426 gen_op_logic_T0_cc();
1431 gen_op_subl_T0_T1_cc();
1436 gen_op_addl_T0_T1_cc();
1441 gen_movl_reg_T0(s
, rd
);
1443 gen_op_logic_T0_cc();
1446 if (logic_cc
&& rd
== 15) {
1447 /* MOVS r15, ... is used for exception return. */
1450 gen_op_movl_T0_T1();
1451 gen_exception_return(s
);
1453 gen_movl_reg_T1(s
, rd
);
1455 gen_op_logic_T1_cc();
1459 gen_op_bicl_T0_T1();
1460 gen_movl_reg_T0(s
, rd
);
1462 gen_op_logic_T0_cc();
1467 gen_movl_reg_T1(s
, rd
);
1469 gen_op_logic_T1_cc();
1473 /* other instructions */
1474 op1
= (insn
>> 24) & 0xf;
1478 /* multiplies, extra load/stores */
1479 sh
= (insn
>> 5) & 3;
1482 rd
= (insn
>> 16) & 0xf;
1483 rn
= (insn
>> 12) & 0xf;
1484 rs
= (insn
>> 8) & 0xf;
1486 if (((insn
>> 22) & 3) == 0) {
1488 gen_movl_T0_reg(s
, rs
);
1489 gen_movl_T1_reg(s
, rm
);
1491 if (insn
& (1 << 21)) {
1492 gen_movl_T1_reg(s
, rn
);
1493 gen_op_addl_T0_T1();
1495 if (insn
& (1 << 20))
1496 gen_op_logic_T0_cc();
1497 gen_movl_reg_T0(s
, rd
);
1500 gen_movl_T0_reg(s
, rs
);
1501 gen_movl_T1_reg(s
, rm
);
1502 if (insn
& (1 << 22))
1503 gen_op_imull_T0_T1();
1505 gen_op_mull_T0_T1();
1506 if (insn
& (1 << 21)) /* mult accumulate */
1507 gen_op_addq_T0_T1(rn
, rd
);
1508 if (!(insn
& (1 << 23))) { /* double accumulate */
1510 gen_op_addq_lo_T0_T1(rn
);
1511 gen_op_addq_lo_T0_T1(rd
);
1513 if (insn
& (1 << 20))
1515 gen_movl_reg_T0(s
, rn
);
1516 gen_movl_reg_T1(s
, rd
);
1519 rn
= (insn
>> 16) & 0xf;
1520 rd
= (insn
>> 12) & 0xf;
1521 if (insn
& (1 << 23)) {
1522 /* load/store exclusive */
1525 /* SWP instruction */
1528 gen_movl_T0_reg(s
, rm
);
1529 gen_movl_T1_reg(s
, rn
);
1530 if (insn
& (1 << 22)) {
1535 gen_movl_reg_T0(s
, rd
);
1540 /* Misc load/store */
1541 rn
= (insn
>> 16) & 0xf;
1542 rd
= (insn
>> 12) & 0xf;
1543 gen_movl_T1_reg(s
, rn
);
1544 if (insn
& (1 << 24))
1545 gen_add_datah_offset(s
, insn
, 0);
1547 if (insn
& (1 << 20)) {
1561 gen_movl_reg_T0(s
, rd
);
1562 } else if (sh
& 2) {
1566 gen_movl_T0_reg(s
, rd
);
1568 gen_op_addl_T1_im(4);
1569 gen_movl_T0_reg(s
, rd
+ 1);
1574 gen_movl_reg_T0(s
, rd
);
1575 gen_op_addl_T1_im(4);
1577 gen_movl_reg_T0(s
, rd
+ 1);
1579 address_offset
= -4;
1582 gen_movl_T0_reg(s
, rd
);
1585 if (!(insn
& (1 << 24))) {
1586 gen_add_datah_offset(s
, insn
, address_offset
);
1587 gen_movl_reg_T1(s
, rn
);
1588 } else if (insn
& (1 << 21)) {
1590 gen_op_addl_T1_im(address_offset
);
1591 gen_movl_reg_T1(s
, rn
);
1599 /* Check for undefined extension instructions
1600 * per the ARM Bible IE:
1601 * xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
1603 sh
= (0xf << 20) | (0xf << 4);
1604 if (op1
== 0x7 && ((insn
& sh
) == sh
))
1608 /* load/store byte/word */
1609 rn
= (insn
>> 16) & 0xf;
1610 rd
= (insn
>> 12) & 0xf;
1611 gen_movl_T1_reg(s
, rn
);
1612 i
= (IS_USER(s
) || (insn
& 0x01200000) == 0x00200000);
1613 if (insn
& (1 << 24))
1614 gen_add_data_offset(s
, insn
);
1615 if (insn
& (1 << 20)) {
1618 #if defined(CONFIG_USER_ONLY)
1619 if (insn
& (1 << 22))
1624 if (insn
& (1 << 22)) {
1628 gen_op_ldub_kernel();
1633 gen_op_ldl_kernel();
1639 gen_movl_reg_T0(s
, rd
);
1642 gen_movl_T0_reg(s
, rd
);
1643 #if defined(CONFIG_USER_ONLY)
1644 if (insn
& (1 << 22))
1649 if (insn
& (1 << 22)) {
1653 gen_op_stb_kernel();
1658 gen_op_stl_kernel();
1662 if (!(insn
& (1 << 24))) {
1663 gen_add_data_offset(s
, insn
);
1664 gen_movl_reg_T1(s
, rn
);
1665 } else if (insn
& (1 << 21))
1666 gen_movl_reg_T1(s
, rn
); {
1672 int j
, n
, user
, loaded_base
;
1673 /* load/store multiple words */
1674 /* XXX: store correct base if write back */
1676 if (insn
& (1 << 22)) {
1678 goto illegal_op
; /* only usable in supervisor mode */
1680 if ((insn
& (1 << 15)) == 0)
1683 rn
= (insn
>> 16) & 0xf;
1684 gen_movl_T1_reg(s
, rn
);
1686 /* compute total size */
1690 if (insn
& (1 << i
))
1693 /* XXX: test invalid n == 0 case ? */
1694 if (insn
& (1 << 23)) {
1695 if (insn
& (1 << 24)) {
1697 gen_op_addl_T1_im(4);
1699 /* post increment */
1702 if (insn
& (1 << 24)) {
1704 gen_op_addl_T1_im(-(n
* 4));
1706 /* post decrement */
1708 gen_op_addl_T1_im(-((n
- 1) * 4));
1713 if (insn
& (1 << i
)) {
1714 if (insn
& (1 << 20)) {
1720 gen_op_movl_user_T0(i
);
1721 } else if (i
== rn
) {
1722 gen_op_movl_T2_T0();
1725 gen_movl_reg_T0(s
, i
);
1730 /* special case: r15 = PC + 12 */
1731 val
= (long)s
->pc
+ 8;
1732 gen_op_movl_TN_im
[0](val
);
1734 gen_op_movl_T0_user(i
);
1736 gen_movl_T0_reg(s
, i
);
1741 /* no need to add after the last transfer */
1743 gen_op_addl_T1_im(4);
1746 if (insn
& (1 << 21)) {
1748 if (insn
& (1 << 23)) {
1749 if (insn
& (1 << 24)) {
1752 /* post increment */
1753 gen_op_addl_T1_im(4);
1756 if (insn
& (1 << 24)) {
1759 gen_op_addl_T1_im(-((n
- 1) * 4));
1761 /* post decrement */
1762 gen_op_addl_T1_im(-(n
* 4));
1765 gen_movl_reg_T1(s
, rn
);
1768 gen_op_movl_T0_T2();
1769 gen_movl_reg_T0(s
, rn
);
1771 if ((insn
& (1 << 22)) && !user
) {
1772 /* Restore CPSR from SPSR. */
1773 gen_op_movl_T0_spsr();
1774 gen_op_movl_cpsr_T0(0xffffffff);
1775 s
->is_jmp
= DISAS_UPDATE
;
1784 /* branch (and link) */
1785 val
= (int32_t)s
->pc
;
1786 if (insn
& (1 << 24)) {
1787 gen_op_movl_T0_im(val
);
1788 gen_op_movl_reg_TN
[0][14]();
1790 offset
= (((int32_t)insn
<< 8) >> 8);
1791 val
+= (offset
<< 2) + 4;
1799 op1
= (insn
>> 8) & 0xf;
1803 if (disas_vfp_insn (env
, s
, insn
))
1807 if (disas_cp15_insn (s
, insn
))
1811 /* unknown coprocessor. */
1817 gen_op_movl_T0_im((long)s
->pc
);
1818 gen_op_movl_reg_TN
[0][15]();
1820 s
->is_jmp
= DISAS_JUMP
;
1824 gen_op_movl_T0_im((long)s
->pc
- 4);
1825 gen_op_movl_reg_TN
[0][15]();
1826 gen_op_undef_insn();
1827 s
->is_jmp
= DISAS_JUMP
;
1833 static void disas_thumb_insn(DisasContext
*s
)
1835 uint32_t val
, insn
, op
, rm
, rn
, rd
, shift
, cond
;
1839 insn
= lduw_code(s
->pc
);
1842 switch (insn
>> 12) {
1845 op
= (insn
>> 11) & 3;
1848 rn
= (insn
>> 3) & 7;
1849 gen_movl_T0_reg(s
, rn
);
1850 if (insn
& (1 << 10)) {
1852 gen_op_movl_T1_im((insn
>> 6) & 7);
1855 rm
= (insn
>> 6) & 7;
1856 gen_movl_T1_reg(s
, rm
);
1858 if (insn
& (1 << 9))
1859 gen_op_subl_T0_T1_cc();
1861 gen_op_addl_T0_T1_cc();
1862 gen_movl_reg_T0(s
, rd
);
1864 /* shift immediate */
1865 rm
= (insn
>> 3) & 7;
1866 shift
= (insn
>> 6) & 0x1f;
1867 gen_movl_T0_reg(s
, rm
);
1868 gen_shift_T0_im_thumb
[op
](shift
);
1869 gen_movl_reg_T0(s
, rd
);
1873 /* arithmetic large immediate */
1874 op
= (insn
>> 11) & 3;
1875 rd
= (insn
>> 8) & 0x7;
1877 gen_op_movl_T0_im(insn
& 0xff);
1879 gen_movl_T0_reg(s
, rd
);
1880 gen_op_movl_T1_im(insn
& 0xff);
1884 gen_op_logic_T0_cc();
1887 gen_op_subl_T0_T1_cc();
1890 gen_op_addl_T0_T1_cc();
1893 gen_op_subl_T0_T1_cc();
1897 gen_movl_reg_T0(s
, rd
);
1900 if (insn
& (1 << 11)) {
1901 rd
= (insn
>> 8) & 7;
1902 /* load pc-relative. Bit 1 of PC is ignored. */
1903 val
= s
->pc
+ 2 + ((insn
& 0xff) * 4);
1904 val
&= ~(uint32_t)2;
1905 gen_op_movl_T1_im(val
);
1907 gen_movl_reg_T0(s
, rd
);
1910 if (insn
& (1 << 10)) {
1911 /* data processing extended or blx */
1912 rd
= (insn
& 7) | ((insn
>> 4) & 8);
1913 rm
= (insn
>> 3) & 0xf;
1914 op
= (insn
>> 8) & 3;
1917 gen_movl_T0_reg(s
, rd
);
1918 gen_movl_T1_reg(s
, rm
);
1919 gen_op_addl_T0_T1();
1920 gen_movl_reg_T0(s
, rd
);
1923 gen_movl_T0_reg(s
, rd
);
1924 gen_movl_T1_reg(s
, rm
);
1925 gen_op_subl_T0_T1_cc();
1927 case 2: /* mov/cpy */
1928 gen_movl_T0_reg(s
, rm
);
1929 gen_movl_reg_T0(s
, rd
);
1931 case 3:/* branch [and link] exchange thumb register */
1932 if (insn
& (1 << 7)) {
1933 val
= (uint32_t)s
->pc
| 1;
1934 gen_op_movl_T1_im(val
);
1935 gen_movl_reg_T1(s
, 14);
1937 gen_movl_T0_reg(s
, rm
);
1944 /* data processing register */
1946 rm
= (insn
>> 3) & 7;
1947 op
= (insn
>> 6) & 0xf;
1948 if (op
== 2 || op
== 3 || op
== 4 || op
== 7) {
1949 /* the shift/rotate ops want the operands backwards */
1958 if (op
== 9) /* neg */
1959 gen_op_movl_T0_im(0);
1960 else if (op
!= 0xf) /* mvn doesn't read its first operand */
1961 gen_movl_T0_reg(s
, rd
);
1963 gen_movl_T1_reg(s
, rm
);
1966 gen_op_andl_T0_T1();
1967 gen_op_logic_T0_cc();
1970 gen_op_xorl_T0_T1();
1971 gen_op_logic_T0_cc();
1974 gen_op_shll_T1_T0_cc();
1975 gen_op_logic_T1_cc();
1978 gen_op_shrl_T1_T0_cc();
1979 gen_op_logic_T1_cc();
1982 gen_op_sarl_T1_T0_cc();
1983 gen_op_logic_T1_cc();
1986 gen_op_adcl_T0_T1_cc();
1989 gen_op_sbcl_T0_T1_cc();
1992 gen_op_rorl_T1_T0_cc();
1993 gen_op_logic_T1_cc();
1996 gen_op_andl_T0_T1();
1997 gen_op_logic_T0_cc();
2001 gen_op_subl_T0_T1_cc();
2004 gen_op_subl_T0_T1_cc();
2008 gen_op_addl_T0_T1_cc();
2013 gen_op_logic_T0_cc();
2016 gen_op_mull_T0_T1();
2017 gen_op_logic_T0_cc();
2020 gen_op_bicl_T0_T1();
2021 gen_op_logic_T0_cc();
2025 gen_op_logic_T1_cc();
2032 gen_movl_reg_T1(s
, rm
);
2034 gen_movl_reg_T0(s
, rd
);
2039 /* load/store register offset. */
2041 rn
= (insn
>> 3) & 7;
2042 rm
= (insn
>> 6) & 7;
2043 op
= (insn
>> 9) & 7;
2044 gen_movl_T1_reg(s
, rn
);
2045 gen_movl_T2_reg(s
, rm
);
2046 gen_op_addl_T1_T2();
2048 if (op
< 3) /* store */
2049 gen_movl_T0_reg(s
, rd
);
2077 if (op
>= 3) /* load */
2078 gen_movl_reg_T0(s
, rd
);
2082 /* load/store word immediate offset */
2084 rn
= (insn
>> 3) & 7;
2085 gen_movl_T1_reg(s
, rn
);
2086 val
= (insn
>> 4) & 0x7c;
2087 gen_op_movl_T2_im(val
);
2088 gen_op_addl_T1_T2();
2090 if (insn
& (1 << 11)) {
2093 gen_movl_reg_T0(s
, rd
);
2096 gen_movl_T0_reg(s
, rd
);
2102 /* load/store byte immediate offset */
2104 rn
= (insn
>> 3) & 7;
2105 gen_movl_T1_reg(s
, rn
);
2106 val
= (insn
>> 6) & 0x1f;
2107 gen_op_movl_T2_im(val
);
2108 gen_op_addl_T1_T2();
2110 if (insn
& (1 << 11)) {
2113 gen_movl_reg_T0(s
, rd
);
2116 gen_movl_T0_reg(s
, rd
);
2122 /* load/store halfword immediate offset */
2124 rn
= (insn
>> 3) & 7;
2125 gen_movl_T1_reg(s
, rn
);
2126 val
= (insn
>> 5) & 0x3e;
2127 gen_op_movl_T2_im(val
);
2128 gen_op_addl_T1_T2();
2130 if (insn
& (1 << 11)) {
2133 gen_movl_reg_T0(s
, rd
);
2136 gen_movl_T0_reg(s
, rd
);
2142 /* load/store from stack */
2143 rd
= (insn
>> 8) & 7;
2144 gen_movl_T1_reg(s
, 13);
2145 val
= (insn
& 0xff) * 4;
2146 gen_op_movl_T2_im(val
);
2147 gen_op_addl_T1_T2();
2149 if (insn
& (1 << 11)) {
2152 gen_movl_reg_T0(s
, rd
);
2155 gen_movl_T0_reg(s
, rd
);
2161 /* add to high reg */
2162 rd
= (insn
>> 8) & 7;
2163 if (insn
& (1 << 11)) {
2165 gen_movl_T0_reg(s
, 13);
2167 /* PC. bit 1 is ignored. */
2168 gen_op_movl_T0_im((s
->pc
+ 2) & ~(uint32_t)2);
2170 val
= (insn
& 0xff) * 4;
2171 gen_op_movl_T1_im(val
);
2172 gen_op_addl_T0_T1();
2173 gen_movl_reg_T0(s
, rd
);
2178 op
= (insn
>> 8) & 0xf;
2181 /* adjust stack pointer */
2182 gen_movl_T1_reg(s
, 13);
2183 val
= (insn
& 0x7f) * 4;
2184 if (insn
& (1 << 7))
2185 val
= -(int32_t)val
;
2186 gen_op_movl_T2_im(val
);
2187 gen_op_addl_T1_T2();
2188 gen_movl_reg_T1(s
, 13);
2191 case 4: case 5: case 0xc: case 0xd:
2193 gen_movl_T1_reg(s
, 13);
2194 if (insn
& (1 << 8))
2198 for (i
= 0; i
< 8; i
++) {
2199 if (insn
& (1 << i
))
2202 if ((insn
& (1 << 11)) == 0) {
2203 gen_op_movl_T2_im(-offset
);
2204 gen_op_addl_T1_T2();
2206 gen_op_movl_T2_im(4);
2207 for (i
= 0; i
< 8; i
++) {
2208 if (insn
& (1 << i
)) {
2209 if (insn
& (1 << 11)) {
2212 gen_movl_reg_T0(s
, i
);
2215 gen_movl_T0_reg(s
, i
);
2218 /* advance to the next address. */
2219 gen_op_addl_T1_T2();
2222 if (insn
& (1 << 8)) {
2223 if (insn
& (1 << 11)) {
2226 /* don't set the pc until the rest of the instruction
2230 gen_movl_T0_reg(s
, 14);
2233 gen_op_addl_T1_T2();
2235 if ((insn
& (1 << 11)) == 0) {
2236 gen_op_movl_T2_im(-offset
);
2237 gen_op_addl_T1_T2();
2239 /* write back the new stack pointer */
2240 gen_movl_reg_T1(s
, 13);
2241 /* set the new PC value */
2242 if ((insn
& 0x0900) == 0x0900)
2246 case 0xe: /* bkpt */
2247 gen_op_movl_T0_im((long)s
->pc
- 2);
2248 gen_op_movl_reg_TN
[0][15]();
2250 s
->is_jmp
= DISAS_JUMP
;
2259 /* load/store multiple */
2260 rn
= (insn
>> 8) & 0x7;
2261 gen_movl_T1_reg(s
, rn
);
2262 gen_op_movl_T2_im(4);
2263 for (i
= 0; i
< 8; i
++) {
2264 if (insn
& (1 << i
)) {
2265 if (insn
& (1 << 11)) {
2268 gen_movl_reg_T0(s
, i
);
2271 gen_movl_T0_reg(s
, i
);
2274 /* advance to the next address */
2275 gen_op_addl_T1_T2();
2278 /* Base register writeback. */
2279 if ((insn
& (1 << rn
)) == 0)
2280 gen_movl_reg_T1(s
, rn
);
2284 /* conditional branch or swi */
2285 cond
= (insn
>> 8) & 0xf;
2291 gen_op_movl_T0_im((long)s
->pc
| 1);
2292 /* Don't set r15. */
2293 gen_op_movl_reg_TN
[0][15]();
2295 s
->is_jmp
= DISAS_JUMP
;
2298 /* generate a conditional jump to next instruction */
2299 s
->condlabel
= gen_new_label();
2300 gen_test_cc
[cond
^ 1](s
->condlabel
);
2302 //gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
2303 //s->is_jmp = DISAS_JUMP_NEXT;
2304 gen_movl_T1_reg(s
, 15);
2306 /* jump to the offset */
2307 val
= (uint32_t)s
->pc
+ 2;
2308 offset
= ((int32_t)insn
<< 24) >> 24;
2314 /* unconditional branch */
2315 if (insn
& (1 << 11)) {
2316 /* Second half of blx. */
2317 offset
= ((insn
& 0x7ff) << 1);
2318 gen_movl_T0_reg(s
, 14);
2319 gen_op_movl_T1_im(offset
);
2320 gen_op_addl_T0_T1();
2321 gen_op_movl_T1_im(0xfffffffc);
2322 gen_op_andl_T0_T1();
2324 val
= (uint32_t)s
->pc
;
2325 gen_op_movl_T1_im(val
| 1);
2326 gen_movl_reg_T1(s
, 14);
2330 val
= (uint32_t)s
->pc
;
2331 offset
= ((int32_t)insn
<< 21) >> 21;
2332 val
+= (offset
<< 1) + 2;
2337 /* branch and link [and switch to arm] */
2338 if ((s
->pc
& ~TARGET_PAGE_MASK
) == 0) {
2339 /* Instruction spans a page boundary. Implement it as two
2340 16-bit instructions in case the second half causes an
2342 offset
= ((int32_t)insn
<< 21) >> 9;
2343 val
= s
->pc
+ 2 + offset
;
2344 gen_op_movl_T0_im(val
);
2345 gen_movl_reg_T0(s
, 14);
2348 if (insn
& (1 << 11)) {
2349 /* Second half of bl. */
2350 offset
= ((insn
& 0x7ff) << 1) | 1;
2351 gen_movl_T0_reg(s
, 14);
2352 gen_op_movl_T1_im(offset
);
2353 gen_op_addl_T0_T1();
2355 val
= (uint32_t)s
->pc
;
2356 gen_op_movl_T1_im(val
| 1);
2357 gen_movl_reg_T1(s
, 14);
2361 offset
= ((int32_t)insn
<< 21) >> 10;
2362 insn
= lduw_code(s
->pc
);
2363 offset
|= insn
& 0x7ff;
2365 val
= (uint32_t)s
->pc
+ 2;
2366 gen_op_movl_T1_im(val
| 1);
2367 gen_movl_reg_T1(s
, 14);
2370 if (insn
& (1 << 12)) {
2375 val
&= ~(uint32_t)2;
2376 gen_op_movl_T0_im(val
);
2382 gen_op_movl_T0_im((long)s
->pc
- 2);
2383 gen_op_movl_reg_TN
[0][15]();
2384 gen_op_undef_insn();
2385 s
->is_jmp
= DISAS_JUMP
;
2388 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
2389 basic block 'tb'. If search_pc is TRUE, also generate PC
2390 information for each intermediate instruction. */
2391 static inline int gen_intermediate_code_internal(CPUState
*env
,
2392 TranslationBlock
*tb
,
2395 DisasContext dc1
, *dc
= &dc1
;
2396 uint16_t *gen_opc_end
;
2398 target_ulong pc_start
;
2399 uint32_t next_page_start
;
2401 /* generate intermediate code */
2406 gen_opc_ptr
= gen_opc_buf
;
2407 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2408 gen_opparam_ptr
= gen_opparam_buf
;
2410 dc
->is_jmp
= DISAS_NEXT
;
2412 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2414 dc
->thumb
= env
->thumb
;
2416 #if !defined(CONFIG_USER_ONLY)
2417 dc
->user
= (env
->uncached_cpsr
& 0x1f) == ARM_CPU_MODE_USR
;
2419 next_page_start
= (pc_start
& TARGET_PAGE_MASK
) + TARGET_PAGE_SIZE
;
2423 if (env
->nb_breakpoints
> 0) {
2424 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
2425 if (env
->breakpoints
[j
] == dc
->pc
) {
2426 gen_op_movl_T0_im((long)dc
->pc
);
2427 gen_op_movl_reg_TN
[0][15]();
2429 dc
->is_jmp
= DISAS_JUMP
;
2435 j
= gen_opc_ptr
- gen_opc_buf
;
2439 gen_opc_instr_start
[lj
++] = 0;
2441 gen_opc_pc
[lj
] = dc
->pc
;
2442 gen_opc_instr_start
[lj
] = 1;
2446 disas_thumb_insn(dc
);
2448 disas_arm_insn(env
, dc
);
2450 if (dc
->condjmp
&& !dc
->is_jmp
) {
2451 gen_set_label(dc
->condlabel
);
2454 /* Terminate the TB on memory ops if watchpoints are present. */
2455 /* FIXME: This should be replacd by the deterministic execution
2456 * IRQ raising bits. */
2457 if (dc
->is_mem
&& env
->nb_watchpoints
)
2460 /* Translation stops when a conditional branch is enoutered.
2461 * Otherwise the subsequent code could get translated several times.
2462 * Also stop translation when a page boundary is reached. This
2463 * ensures prefech aborts occur at the right place. */
2464 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2465 !env
->singlestep_enabled
&&
2466 dc
->pc
< next_page_start
);
2467 /* At this stage dc->condjmp will only be set when the skipped
2468 * instruction was a conditional branch, and the PC has already been
2470 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
2471 /* Make sure the pc is updated, and raise a debug exception. */
2474 gen_set_label(dc
->condlabel
);
2476 if (dc
->condjmp
|| !dc
->is_jmp
) {
2477 gen_op_movl_T0_im((long)dc
->pc
);
2478 gen_op_movl_reg_TN
[0][15]();
2483 switch(dc
->is_jmp
) {
2485 gen_goto_tb(dc
, 1, dc
->pc
);
2490 /* indicate that the hash table must be used to find the next TB */
2495 /* nothing more to generate */
2499 gen_set_label(dc
->condlabel
);
2500 gen_goto_tb(dc
, 1, dc
->pc
);
2504 *gen_opc_ptr
= INDEX_op_end
;
2507 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
2508 fprintf(logfile
, "----------------\n");
2509 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
2510 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, env
->thumb
);
2511 fprintf(logfile
, "\n");
2512 if (loglevel
& (CPU_LOG_TB_OP
)) {
2513 fprintf(logfile
, "OP:\n");
2514 dump_ops(gen_opc_buf
, gen_opparam_buf
);
2515 fprintf(logfile
, "\n");
2520 j
= gen_opc_ptr
- gen_opc_buf
;
2523 gen_opc_instr_start
[lj
++] = 0;
2526 tb
->size
= dc
->pc
- pc_start
;
2531 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
2533 return gen_intermediate_code_internal(env
, tb
, 0);
2536 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
2538 return gen_intermediate_code_internal(env
, tb
, 1);
2541 static const char *cpu_mode_names
[16] = {
2542 "usr", "fiq", "irq", "svc", "???", "???", "???", "abt",
2543 "???", "???", "???", "und", "???", "???", "???", "sys"
2545 void cpu_dump_state(CPUState
*env
, FILE *f
,
2546 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
2555 /* ??? This assumes float64 and double have the same layout.
2556 Oh well, it's only debug dumps. */
2564 cpu_fprintf(f
, "R%02d=%08x", i
, env
->regs
[i
]);
2566 cpu_fprintf(f
, "\n");
2568 cpu_fprintf(f
, " ");
2570 psr
= cpsr_read(env
);
2571 cpu_fprintf(f
, "PSR=%08x %c%c%c%c %c %s%d %x\n",
2573 psr
& (1 << 31) ? 'N' : '-',
2574 psr
& (1 << 30) ? 'Z' : '-',
2575 psr
& (1 << 29) ? 'C' : '-',
2576 psr
& (1 << 28) ? 'V' : '-',
2577 psr
& CPSR_T
? 'T' : 'A',
2578 cpu_mode_names
[psr
& 0xf], (psr
& 0x10) ? 32 : 26);
2580 for (i
= 0; i
< 16; i
++) {
2581 d
.d
= env
->vfp
.regs
[i
];
2585 cpu_fprintf(f
, "s%02d=%08x(%8g) s%02d=%08x(%8g) d%02d=%08x%08x(%8g)\n",
2586 i
* 2, (int)s0
.i
, s0
.s
,
2587 i
* 2 + 1, (int)s1
.i
, s1
.s
,
2588 i
, (int)(uint32_t)d
.l
.upper
, (int)(uint32_t)d
.l
.lower
,
2591 cpu_fprintf(f
, "FPSCR: %08x\n", (int)env
->vfp
.xregs
[ARM_VFP_FPSCR
]);