4 Copyright (C) 2003 Thomas M. Ogrisegg <tom@fnord.at>
5 Copyright (C) 2003 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 NPC/PC static optimisations (use JUMP_TB when possible)
27 Privileged instructions
28 Coprocessor-Instructions
29 Optimize synthetic instructions
30 Optional alignment and privileged instruction check
45 #define DYNAMIC_PC 1 /* dynamic pc value */
46 #define JUMP_PC 2 /* dynamic pc value which takes only two values
47 according to jump_pc[T2] */
49 typedef struct DisasContext
{
50 target_ulong pc
; /* current Program Counter: integer or DYNAMIC_PC */
51 target_ulong npc
; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
52 target_ulong jump_pc
[2]; /* used when JUMP_PC pc value is used */
55 struct TranslationBlock
*tb
;
58 static uint16_t *gen_opc_ptr
;
59 static uint32_t *gen_opparam_ptr
;
64 #define DEF(s,n,copy_size) INDEX_op_ ## s,
72 #define GET_FIELD(X, FROM, TO) \
73 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
75 #define IS_IMM (insn & (1<<13))
77 static void disas_sparc_insn(DisasContext
* dc
);
79 static GenOpFunc
*gen_op_movl_TN_reg
[2][32] = {
150 static GenOpFunc
*gen_op_movl_reg_TN
[3][32] = {
255 static GenOpFunc1
*gen_op_movl_TN_im
[3] = {
261 #define GEN32(func, NAME) \
262 static GenOpFunc *NAME ## _table [32] = { \
263 NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
264 NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
265 NAME ## 8, NAME ## 9, NAME ## 10, NAME ## 11, \
266 NAME ## 12, NAME ## 13, NAME ## 14, NAME ## 15, \
267 NAME ## 16, NAME ## 17, NAME ## 18, NAME ## 19, \
268 NAME ## 20, NAME ## 21, NAME ## 22, NAME ## 23, \
269 NAME ## 24, NAME ## 25, NAME ## 26, NAME ## 27, \
270 NAME ## 28, NAME ## 29, NAME ## 30, NAME ## 31, \
272 static inline void func(int n) \
274 NAME ## _table[n](); \
277 /* floating point registers moves */
278 GEN32(gen_op_load_fpr_FT0
, gen_op_load_fpr_FT0_fprf
);
279 GEN32(gen_op_load_fpr_FT1
, gen_op_load_fpr_FT1_fprf
);
280 GEN32(gen_op_load_fpr_FT2
, gen_op_load_fpr_FT2_fprf
);
281 GEN32(gen_op_store_FT0_fpr
, gen_op_store_FT0_fpr_fprf
);
282 GEN32(gen_op_store_FT1_fpr
, gen_op_store_FT1_fpr_fprf
);
283 GEN32(gen_op_store_FT2_fpr
, gen_op_store_FT2_fpr_fprf
);
285 GEN32(gen_op_load_fpr_DT0
, gen_op_load_fpr_DT0_fprf
);
286 GEN32(gen_op_load_fpr_DT1
, gen_op_load_fpr_DT1_fprf
);
287 GEN32(gen_op_load_fpr_DT2
, gen_op_load_fpr_DT2_fprf
);
288 GEN32(gen_op_store_DT0_fpr
, gen_op_store_DT0_fpr_fprf
);
289 GEN32(gen_op_store_DT1_fpr
, gen_op_store_DT1_fpr_fprf
);
290 GEN32(gen_op_store_DT2_fpr
, gen_op_store_DT2_fpr_fprf
);
292 #if defined(CONFIG_USER_ONLY)
293 #define gen_op_ldst(name) gen_op_##name##_raw()
294 #define OP_LD_TABLE(width) \
295 static void gen_op_##width##a(int insn, int is_ld, int size, int sign) \
298 #define supervisor(dc) 0
300 #define gen_op_ldst(name) (*gen_op_##name[dc->mem_idx])()
301 #define OP_LD_TABLE(width) \
302 static GenOpFunc *gen_op_##width[] = { \
303 &gen_op_##width##_user, \
304 &gen_op_##width##_kernel, \
307 static void gen_op_##width##a(int insn, int is_ld, int size, int sign) \
311 asi = GET_FIELD(insn, 19, 26); \
313 case 10: /* User data access */ \
314 gen_op_##width##_user(); \
316 case 11: /* Supervisor data access */ \
317 gen_op_##width##_kernel(); \
319 case 0x20 ... 0x2f: /* MMU passthrough */ \
321 gen_op_ld_asi(asi, size, sign); \
323 gen_op_st_asi(asi, size, sign); \
327 gen_op_ld_asi(asi, size, sign); \
329 gen_op_st_asi(asi, size, sign); \
334 #define supervisor(dc) (dc->mem_idx == 1)
354 static inline void gen_movl_imm_TN(int reg
, int imm
)
356 gen_op_movl_TN_im
[reg
] (imm
);
359 static inline void gen_movl_imm_T1(int val
)
361 gen_movl_imm_TN(1, val
);
364 static inline void gen_movl_imm_T0(int val
)
366 gen_movl_imm_TN(0, val
);
369 static inline void gen_movl_reg_TN(int reg
, int t
)
372 gen_op_movl_reg_TN
[t
][reg
] ();
374 gen_movl_imm_TN(t
, 0);
377 static inline void gen_movl_reg_T0(int reg
)
379 gen_movl_reg_TN(reg
, 0);
382 static inline void gen_movl_reg_T1(int reg
)
384 gen_movl_reg_TN(reg
, 1);
387 static inline void gen_movl_reg_T2(int reg
)
389 gen_movl_reg_TN(reg
, 2);
392 static inline void gen_movl_TN_reg(int reg
, int t
)
395 gen_op_movl_TN_reg
[t
][reg
] ();
398 static inline void gen_movl_T0_reg(int reg
)
400 gen_movl_TN_reg(reg
, 0);
403 static inline void gen_movl_T1_reg(int reg
)
405 gen_movl_TN_reg(reg
, 1);
408 /* call this function before using T2 as it may have been set for a jump */
409 static inline void flush_T2(DisasContext
* dc
)
411 if (dc
->npc
== JUMP_PC
) {
412 gen_op_generic_branch(dc
->jump_pc
[0], dc
->jump_pc
[1]);
413 dc
->npc
= DYNAMIC_PC
;
417 static inline void save_npc(DisasContext
* dc
)
419 if (dc
->npc
== JUMP_PC
) {
420 gen_op_generic_branch(dc
->jump_pc
[0], dc
->jump_pc
[1]);
421 dc
->npc
= DYNAMIC_PC
;
422 } else if (dc
->npc
!= DYNAMIC_PC
) {
423 gen_op_movl_npc_im(dc
->npc
);
427 static inline void save_state(DisasContext
* dc
)
429 gen_op_jmp_im((uint32_t)dc
->pc
);
433 static void gen_cond(int cond
)
488 static void gen_fcond(int cond
)
543 static void do_branch(DisasContext
* dc
, uint32_t target
, uint32_t insn
)
545 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
546 target
+= (uint32_t) dc
->pc
;
548 /* unconditional not taken */
550 dc
->pc
= dc
->npc
+ 4;
551 dc
->npc
= dc
->pc
+ 4;
554 dc
->npc
= dc
->pc
+ 4;
556 } else if (cond
== 0x8) {
557 /* unconditional taken */
560 dc
->npc
= dc
->pc
+ 4;
569 gen_op_branch_a((long)dc
->tb
, target
, dc
->npc
);
573 dc
->jump_pc
[0] = target
;
574 dc
->jump_pc
[1] = dc
->npc
+ 4;
580 static void do_fbranch(DisasContext
* dc
, uint32_t target
, uint32_t insn
)
582 unsigned int cond
= GET_FIELD(insn
, 3, 6), a
= (insn
& (1 << 29));
583 target
+= (uint32_t) dc
->pc
;
585 /* unconditional not taken */
587 dc
->pc
= dc
->npc
+ 4;
588 dc
->npc
= dc
->pc
+ 4;
591 dc
->npc
= dc
->pc
+ 4;
593 } else if (cond
== 0x8) {
594 /* unconditional taken */
597 dc
->npc
= dc
->pc
+ 4;
606 gen_op_branch_a((long)dc
->tb
, target
, dc
->npc
);
610 dc
->jump_pc
[0] = target
;
611 dc
->jump_pc
[1] = dc
->npc
+ 4;
617 static void gen_debug(DisasContext
*s
, uint32_t pc
)
624 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
626 static int sign_extend(int x
, int len
)
629 return (x
<< len
) >> len
;
632 static void disas_sparc_insn(DisasContext
* dc
)
634 unsigned int insn
, opc
, rs1
, rs2
, rd
;
636 insn
= ldl_code((uint8_t *)dc
->pc
);
637 opc
= GET_FIELD(insn
, 0, 1);
639 rd
= GET_FIELD(insn
, 2, 6);
641 case 0: /* branches/sethi */
643 unsigned int xop
= GET_FIELD(insn
, 7, 9);
645 target
= GET_FIELD(insn
, 10, 31);
648 case 0x1: /* UNIMPL */
655 target
= sign_extend(target
, 22);
656 do_branch(dc
, target
, insn
);
659 case 0x6: /* FBN+x */
661 #if !defined(CONFIG_USER_ONLY)
662 gen_op_trap_ifnofpu();
665 target
= sign_extend(target
, 22);
666 do_fbranch(dc
, target
, insn
);
669 case 0x4: /* SETHI */
674 gen_movl_imm_T0(target
<< 10);
685 unsigned int target
= GET_FIELDs(insn
, 2, 31) << 2;
687 gen_op_movl_T0_im((long) (dc
->pc
));
689 target
= dc
->pc
+ target
;
694 case 2: /* FPU & Logical Operations */
696 unsigned int xop
= GET_FIELD(insn
, 7, 12);
697 if (xop
== 0x3a) { /* generate trap */
699 rs1
= GET_FIELD(insn
, 13, 17);
700 gen_movl_reg_T0(rs1
);
702 rs2
= GET_FIELD(insn
, 25, 31);
706 gen_movl_imm_T1(rs2
);
712 rs2
= GET_FIELD(insn
, 27, 31);
716 gen_movl_reg_T1(rs2
);
723 cond
= GET_FIELD(insn
, 3, 6);
732 } else if (xop
== 0x28) {
733 rs1
= GET_FIELD(insn
, 13, 17);
740 break; /* no effect? */
744 #if !defined(CONFIG_USER_ONLY)
745 } else if (xop
== 0x29) {
751 } else if (xop
== 0x2a) {
757 } else if (xop
== 0x2b) {
764 } else if (xop
== 0x34) { /* FPU Operations */
765 #if !defined(CONFIG_USER_ONLY)
766 gen_op_trap_ifnofpu();
768 rs1
= GET_FIELD(insn
, 13, 17);
769 rs2
= GET_FIELD(insn
, 27, 31);
770 xop
= GET_FIELD(insn
, 18, 26);
772 case 0x1: /* fmovs */
773 gen_op_load_fpr_FT0(rs2
);
774 gen_op_store_FT0_fpr(rd
);
776 case 0x5: /* fnegs */
777 gen_op_load_fpr_FT1(rs2
);
779 gen_op_store_FT0_fpr(rd
);
781 case 0x9: /* fabss */
782 gen_op_load_fpr_FT1(rs2
);
784 gen_op_store_FT0_fpr(rd
);
786 case 0x29: /* fsqrts */
787 gen_op_load_fpr_FT1(rs2
);
789 gen_op_store_FT0_fpr(rd
);
791 case 0x2a: /* fsqrtd */
792 gen_op_load_fpr_DT1(rs2
);
794 gen_op_store_DT0_fpr(rd
);
796 case 0x2b: /* fsqrtq */
799 gen_op_load_fpr_FT0(rs1
);
800 gen_op_load_fpr_FT1(rs2
);
802 gen_op_store_FT0_fpr(rd
);
805 gen_op_load_fpr_DT0(rs1
);
806 gen_op_load_fpr_DT1(rs2
);
808 gen_op_store_DT0_fpr(rd
);
810 case 0x43: /* faddq */
813 gen_op_load_fpr_FT0(rs1
);
814 gen_op_load_fpr_FT1(rs2
);
816 gen_op_store_FT0_fpr(rd
);
819 gen_op_load_fpr_DT0(rs1
);
820 gen_op_load_fpr_DT1(rs2
);
822 gen_op_store_DT0_fpr(rd
);
824 case 0x47: /* fsubq */
827 gen_op_load_fpr_FT0(rs1
);
828 gen_op_load_fpr_FT1(rs2
);
830 gen_op_store_FT0_fpr(rd
);
833 gen_op_load_fpr_DT0(rs1
);
834 gen_op_load_fpr_DT1(rs2
);
836 gen_op_store_DT0_fpr(rd
);
838 case 0x4b: /* fmulq */
841 gen_op_load_fpr_FT0(rs1
);
842 gen_op_load_fpr_FT1(rs2
);
844 gen_op_store_FT0_fpr(rd
);
847 gen_op_load_fpr_DT0(rs1
);
848 gen_op_load_fpr_DT1(rs2
);
850 gen_op_store_DT0_fpr(rd
);
852 case 0x4f: /* fdivq */
855 gen_op_load_fpr_FT0(rs1
);
856 gen_op_load_fpr_FT1(rs2
);
858 gen_op_store_DT0_fpr(rd
);
860 case 0x6e: /* fdmulq */
863 gen_op_load_fpr_FT1(rs2
);
865 gen_op_store_FT0_fpr(rd
);
868 gen_op_load_fpr_DT1(rs2
);
870 gen_op_store_FT0_fpr(rd
);
872 case 0xc7: /* fqtos */
875 gen_op_load_fpr_FT1(rs2
);
877 gen_op_store_DT0_fpr(rd
);
880 gen_op_load_fpr_FT1(rs2
);
882 gen_op_store_DT0_fpr(rd
);
884 case 0xcb: /* fqtod */
886 case 0xcc: /* fitoq */
888 case 0xcd: /* fstoq */
890 case 0xce: /* fdtoq */
893 gen_op_load_fpr_FT1(rs2
);
895 gen_op_store_FT0_fpr(rd
);
898 gen_op_load_fpr_DT1(rs2
);
900 gen_op_store_FT0_fpr(rd
);
902 case 0xd3: /* fqtoi */
907 } else if (xop
== 0x35) { /* FPU Operations */
908 #if !defined(CONFIG_USER_ONLY)
909 gen_op_trap_ifnofpu();
911 rs1
= GET_FIELD(insn
, 13, 17);
912 rs2
= GET_FIELD(insn
, 27, 31);
913 xop
= GET_FIELD(insn
, 18, 26);
916 gen_op_load_fpr_FT0(rs1
);
917 gen_op_load_fpr_FT1(rs2
);
921 gen_op_load_fpr_DT0(rs1
);
922 gen_op_load_fpr_DT1(rs2
);
925 case 0x53: /* fcmpq */
927 case 0x55: /* fcmpes */
928 gen_op_load_fpr_FT0(rs1
);
929 gen_op_load_fpr_FT1(rs2
);
930 gen_op_fcmps(); /* XXX should trap if qNaN or sNaN */
932 case 0x56: /* fcmped */
933 gen_op_load_fpr_DT0(rs1
);
934 gen_op_load_fpr_DT1(rs2
);
935 gen_op_fcmpd(); /* XXX should trap if qNaN or sNaN */
937 case 0x57: /* fcmpeq */
943 } else if (xop
== 0x2) {
946 rs1
= GET_FIELD(insn
, 13, 17);
948 // or %g0, x, y -> mov T1, x; mov y, T1
949 if (IS_IMM
) { /* immediate */
950 rs2
= GET_FIELDs(insn
, 19, 31);
951 gen_movl_imm_T1(rs2
);
952 } else { /* register */
953 rs2
= GET_FIELD(insn
, 27, 31);
954 gen_movl_reg_T1(rs2
);
958 gen_movl_reg_T0(rs1
);
959 if (IS_IMM
) { /* immediate */
960 // or x, #0, y -> mov T1, x; mov y, T1
961 rs2
= GET_FIELDs(insn
, 19, 31);
963 gen_movl_imm_T1(rs2
);
966 } else { /* register */
967 // or x, %g0, y -> mov T1, x; mov y, T1
968 rs2
= GET_FIELD(insn
, 27, 31);
970 gen_movl_reg_T1(rs2
);
977 } else if (xop
< 0x38) {
978 rs1
= GET_FIELD(insn
, 13, 17);
979 gen_movl_reg_T0(rs1
);
980 if (IS_IMM
) { /* immediate */
981 rs2
= GET_FIELDs(insn
, 19, 31);
982 gen_movl_imm_T1(rs2
);
983 } else { /* register */
984 rs2
= GET_FIELD(insn
, 27, 31);
985 gen_movl_reg_T1(rs2
);
988 switch (xop
& ~0x10) {
991 gen_op_add_T1_T0_cc();
998 gen_op_logic_T0_cc();
1003 gen_op_logic_T0_cc();
1008 gen_op_logic_T0_cc();
1012 gen_op_sub_T1_T0_cc();
1017 gen_op_andn_T1_T0();
1019 gen_op_logic_T0_cc();
1024 gen_op_logic_T0_cc();
1027 gen_op_xnor_T1_T0();
1029 gen_op_logic_T0_cc();
1032 gen_op_addx_T1_T0();
1037 gen_op_umul_T1_T0();
1039 gen_op_logic_T0_cc();
1042 gen_op_smul_T1_T0();
1044 gen_op_logic_T0_cc();
1047 gen_op_subx_T1_T0();
1052 gen_op_udiv_T1_T0();
1057 gen_op_sdiv_T1_T0();
1064 gen_movl_T0_reg(rd
);
1067 case 0x20: /* taddcc */
1068 case 0x21: /* tsubcc */
1069 case 0x22: /* taddcctv */
1070 case 0x23: /* tsubcctv */
1072 case 0x24: /* mulscc */
1073 gen_op_mulscc_T1_T0();
1074 gen_movl_T0_reg(rd
);
1076 case 0x25: /* SLL */
1078 gen_movl_T0_reg(rd
);
1082 gen_movl_T0_reg(rd
);
1086 gen_movl_T0_reg(rd
);
1100 #if !defined(CONFIG_USER_ONLY)
1103 if (!supervisor(dc
))
1111 if (!supervisor(dc
))
1119 if (!supervisor(dc
))
1131 rs1
= GET_FIELD(insn
, 13, 17);
1132 gen_movl_reg_T0(rs1
);
1133 if (IS_IMM
) { /* immediate */
1134 rs2
= GET_FIELDs(insn
, 19, 31);
1138 gen_movl_imm_T1(rs2
);
1143 } else { /* register */
1144 rs2
= GET_FIELD(insn
, 27, 31);
1148 gen_movl_reg_T1(rs2
);
1155 case 0x38: /* jmpl */
1157 gen_op_movl_npc_T0();
1159 gen_op_movl_T0_im((long) (dc
->pc
));
1160 gen_movl_T0_reg(rd
);
1163 dc
->npc
= DYNAMIC_PC
;
1166 #if !defined(CONFIG_USER_ONLY)
1167 case 0x39: /* rett */
1169 if (!supervisor(dc
))
1171 gen_op_movl_npc_T0();
1176 case 0x3b: /* flush */
1179 case 0x3c: /* save */
1182 gen_movl_T0_reg(rd
);
1184 case 0x3d: /* restore */
1187 gen_movl_T0_reg(rd
);
1195 case 3: /* load/store instructions */
1197 unsigned int xop
= GET_FIELD(insn
, 7, 12);
1198 rs1
= GET_FIELD(insn
, 13, 17);
1199 gen_movl_reg_T0(rs1
);
1200 if (IS_IMM
) { /* immediate */
1201 rs2
= GET_FIELDs(insn
, 19, 31);
1205 gen_movl_imm_T1(rs2
);
1210 } else { /* register */
1211 rs2
= GET_FIELD(insn
, 27, 31);
1215 gen_movl_reg_T1(rs2
);
1221 if (xop
< 4 || (xop
> 7 && xop
< 0x14) || \
1222 (xop
> 0x17 && xop
< 0x20)) {
1224 case 0x0: /* load word */
1227 case 0x1: /* load unsigned byte */
1230 case 0x2: /* load unsigned halfword */
1233 case 0x3: /* load double word */
1235 gen_movl_T0_reg(rd
+ 1);
1237 case 0x9: /* load signed byte */
1240 case 0xa: /* load signed halfword */
1243 case 0xd: /* ldstub -- XXX: should be atomically */
1244 gen_op_ldst(ldstub
);
1246 case 0x0f: /* swap register with memory. Also atomically */
1247 gen_movl_reg_T1(rd
);
1250 #if !defined(CONFIG_USER_ONLY)
1251 case 0x10: /* load word alternate */
1252 if (!supervisor(dc
))
1254 gen_op_lda(insn
, 1, 4, 0);
1256 case 0x11: /* load unsigned byte alternate */
1257 if (!supervisor(dc
))
1259 gen_op_lduba(insn
, 1, 1, 0);
1261 case 0x12: /* load unsigned halfword alternate */
1262 if (!supervisor(dc
))
1264 gen_op_lduha(insn
, 1, 2, 0);
1266 case 0x13: /* load double word alternate */
1267 if (!supervisor(dc
))
1269 gen_op_ldda(insn
, 1, 8, 0);
1270 gen_movl_T0_reg(rd
+ 1);
1272 case 0x19: /* load signed byte alternate */
1273 if (!supervisor(dc
))
1275 gen_op_ldsba(insn
, 1, 1, 1);
1277 case 0x1a: /* load signed halfword alternate */
1278 if (!supervisor(dc
))
1280 gen_op_ldsha(insn
, 1, 2 ,1);
1282 case 0x1d: /* ldstuba -- XXX: should be atomically */
1283 if (!supervisor(dc
))
1285 gen_op_ldstuba(insn
, 1, 1, 0);
1287 case 0x1f: /* swap reg with alt. memory. Also atomically */
1288 if (!supervisor(dc
))
1290 gen_movl_reg_T1(rd
);
1291 gen_op_swapa(insn
, 1, 4, 0);
1297 gen_movl_T1_reg(rd
);
1298 } else if (xop
>= 0x20 && xop
< 0x24) {
1299 #if !defined(CONFIG_USER_ONLY)
1300 gen_op_trap_ifnofpu();
1303 case 0x20: /* load fpreg */
1305 gen_op_store_FT0_fpr(rd
);
1307 case 0x21: /* load fsr */
1309 gen_op_store_FT0_fpr(rd
);
1311 case 0x23: /* load double fpreg */
1313 gen_op_store_DT0_fpr(rd
);
1318 } else if (xop
< 8 || (xop
>= 0x14 && xop
< 0x18)) {
1319 gen_movl_reg_T1(rd
);
1332 gen_movl_reg_T2(rd
+ 1);
1335 #if !defined(CONFIG_USER_ONLY)
1337 if (!supervisor(dc
))
1339 gen_op_sta(insn
, 0, 4, 0);
1342 if (!supervisor(dc
))
1344 gen_op_stba(insn
, 0, 1, 0);
1347 if (!supervisor(dc
))
1349 gen_op_stha(insn
, 0, 2, 0);
1352 if (!supervisor(dc
))
1355 gen_movl_reg_T2(rd
+ 1);
1356 gen_op_stda(insn
, 0, 8, 0);
1362 } else if (xop
> 0x23 && xop
< 0x28) {
1363 #if !defined(CONFIG_USER_ONLY)
1364 gen_op_trap_ifnofpu();
1368 gen_op_load_fpr_FT0(rd
);
1372 gen_op_load_fpr_FT0(rd
);
1376 gen_op_load_fpr_DT0(rd
);
1379 case 0x26: /* stdfq */
1383 } else if (xop
> 0x33 && xop
< 0x38) {
1391 /* default case for non jump instructions */
1392 if (dc
->npc
== DYNAMIC_PC
) {
1393 dc
->pc
= DYNAMIC_PC
;
1395 } else if (dc
->npc
== JUMP_PC
) {
1396 /* we can do a static jump */
1397 gen_op_branch2((long)dc
->tb
, dc
->jump_pc
[0], dc
->jump_pc
[1]);
1401 dc
->npc
= dc
->npc
+ 4;
1407 gen_op_exception(TT_ILL_INSN
);
1410 #if !defined(CONFIG_USER_ONLY)
1413 gen_op_exception(TT_PRIV_INSN
);
1419 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP
);
1423 static inline int gen_intermediate_code_internal(TranslationBlock
* tb
,
1424 int spc
, CPUSPARCState
*env
)
1426 target_ulong pc_start
, last_pc
;
1427 uint16_t *gen_opc_end
;
1428 DisasContext dc1
, *dc
= &dc1
;
1431 memset(dc
, 0, sizeof(DisasContext
));
1436 dc
->npc
= (target_ulong
) tb
->cs_base
;
1437 #if defined(CONFIG_USER_ONLY)
1440 dc
->mem_idx
= ((env
->psrs
) != 0);
1442 gen_opc_ptr
= gen_opc_buf
;
1443 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
1444 gen_opparam_ptr
= gen_opparam_buf
;
1447 if (env
->nb_breakpoints
> 0) {
1448 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
1449 if (env
->breakpoints
[j
] == dc
->pc
) {
1450 if (dc
->pc
!= pc_start
)
1462 fprintf(logfile
, "Search PC...\n");
1463 j
= gen_opc_ptr
- gen_opc_buf
;
1467 gen_opc_instr_start
[lj
++] = 0;
1468 gen_opc_pc
[lj
] = dc
->pc
;
1469 gen_opc_npc
[lj
] = dc
->npc
;
1470 gen_opc_instr_start
[lj
] = 1;
1474 disas_sparc_insn(dc
);
1477 /* if the next PC is different, we abort now */
1478 if (dc
->pc
!= (last_pc
+ 4))
1480 /* if single step mode, we generate only one instruction and
1481 generate an exception */
1482 if (env
->singlestep_enabled
) {
1483 gen_op_jmp_im(dc
->pc
);
1488 } while ((gen_opc_ptr
< gen_opc_end
) &&
1489 (dc
->pc
- pc_start
) < (TARGET_PAGE_SIZE
- 32));
1493 if (dc
->pc
!= DYNAMIC_PC
&&
1494 (dc
->npc
!= DYNAMIC_PC
&& dc
->npc
!= JUMP_PC
)) {
1495 /* static PC and NPC: we can use direct chaining */
1496 gen_op_branch((long)tb
, dc
->pc
, dc
->npc
);
1498 if (dc
->pc
!= DYNAMIC_PC
)
1499 gen_op_jmp_im(dc
->pc
);
1505 *gen_opc_ptr
= INDEX_op_end
;
1507 j
= gen_opc_ptr
- gen_opc_buf
;
1510 gen_opc_instr_start
[lj
++] = 0;
1518 tb
->size
= last_pc
+ 4 - pc_start
;
1521 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
1522 fprintf(logfile
, "--------------\n");
1523 fprintf(logfile
, "IN: %s\n", lookup_symbol((uint8_t *)pc_start
));
1524 disas(logfile
, (uint8_t *)pc_start
, last_pc
+ 4 - pc_start
, 0, 0);
1525 fprintf(logfile
, "\n");
1526 if (loglevel
& CPU_LOG_TB_OP
) {
1527 fprintf(logfile
, "OP:\n");
1528 dump_ops(gen_opc_buf
, gen_opparam_buf
);
1529 fprintf(logfile
, "\n");
1536 int gen_intermediate_code(CPUSPARCState
* env
, TranslationBlock
* tb
)
1538 return gen_intermediate_code_internal(tb
, 0, env
);
1541 int gen_intermediate_code_pc(CPUSPARCState
* env
, TranslationBlock
* tb
)
1543 return gen_intermediate_code_internal(tb
, 1, env
);
1546 extern int ram_size
;
1548 void cpu_reset(CPUSPARCState
*env
)
1550 memset(env
, 0, sizeof(*env
));
1553 env
->regwptr
= env
->regbase
+ (env
->cwp
* 16);
1554 #if defined(CONFIG_USER_ONLY)
1555 env
->user_mode_only
= 1;
1558 env
->pc
= 0xffd00000;
1559 env
->gregs
[1] = ram_size
;
1560 env
->mmuregs
[0] = (0x04 << 24); /* Impl 0, ver 4, MMU disabled */
1561 env
->npc
= env
->pc
+ 4;
1565 CPUSPARCState
*cpu_sparc_init(void)
1571 if (!(env
= malloc(sizeof(CPUSPARCState
))))
1573 cpu_single_env
= env
;
1578 #define GET_FLAG(a,b) ((env->psr & a)?b:'-')
1580 void cpu_dump_state(CPUState
*env
, FILE *f
,
1581 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
1586 cpu_fprintf(f
, "pc: 0x%08x npc: 0x%08x\n", (int) env
->pc
, (int) env
->npc
);
1587 cpu_fprintf(f
, "General Registers:\n");
1588 for (i
= 0; i
< 4; i
++)
1589 cpu_fprintf(f
, "%%g%c: 0x%08x\t", i
+ '0', env
->gregs
[i
]);
1590 cpu_fprintf(f
, "\n");
1592 cpu_fprintf(f
, "%%g%c: 0x%08x\t", i
+ '0', env
->gregs
[i
]);
1593 cpu_fprintf(f
, "\nCurrent Register Window:\n");
1594 for (x
= 0; x
< 3; x
++) {
1595 for (i
= 0; i
< 4; i
++)
1596 cpu_fprintf(f
, "%%%c%d: 0x%08x\t",
1597 (x
== 0 ? 'o' : (x
== 1 ? 'l' : 'i')), i
,
1598 env
->regwptr
[i
+ x
* 8]);
1599 cpu_fprintf(f
, "\n");
1601 cpu_fprintf(f
, "%%%c%d: 0x%08x\t",
1602 (x
== 0 ? 'o' : x
== 1 ? 'l' : 'i'), i
,
1603 env
->regwptr
[i
+ x
* 8]);
1604 cpu_fprintf(f
, "\n");
1606 cpu_fprintf(f
, "\nFloating Point Registers:\n");
1607 for (i
= 0; i
< 32; i
++) {
1609 cpu_fprintf(f
, "%%f%02d:", i
);
1610 cpu_fprintf(f
, " %016lf", env
->fpr
[i
]);
1612 cpu_fprintf(f
, "\n");
1614 cpu_fprintf(f
, "psr: 0x%08x -> %c%c%c%c %c%c%c wim: 0x%08x\n", GET_PSR(env
),
1615 GET_FLAG(PSR_ZERO
, 'Z'), GET_FLAG(PSR_OVF
, 'V'),
1616 GET_FLAG(PSR_NEG
, 'N'), GET_FLAG(PSR_CARRY
, 'C'),
1617 env
->psrs
?'S':'-', env
->psrps
?'P':'-',
1618 env
->psret
?'E':'-', env
->wim
);
1619 cpu_fprintf(f
, "fsr: 0x%08x\n", env
->fsr
);
1622 #if defined(CONFIG_USER_ONLY)
1623 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
1629 target_ulong
cpu_get_phys_page_debug(CPUState
*env
, target_ulong addr
)
1632 int prot
, access_index
;
1634 if (get_physical_address(env
, &phys_addr
, &prot
, &access_index
, addr
, 2, 0) != 0)
1640 void helper_flush(target_ulong addr
)
1643 tb_invalidate_page_range(addr
, addr
+ 8);