]>
git.proxmox.com Git - qemu.git/blob - target-m68k/translate.c
4 * Copyright (c) 2005-2007 CodeSourcery
5 * Written by Paul Brook
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 #include "m68k-qreg.h"
33 //#define DEBUG_DISPATCH 1
35 static inline void qemu_assert(int cond
, const char *msg
)
38 fprintf (stderr
, "badness: %s\n", msg
);
43 /* internal defines */
44 typedef struct DisasContext
{
50 struct TranslationBlock
*tb
;
51 int singlestep_enabled
;
54 #define DISAS_JUMP_NEXT 4
56 #if defined(CONFIG_USER_ONLY)
59 #define IS_USER(s) s->user
62 /* XXX: move that elsewhere */
63 /* ??? Fix exceptions. */
64 static void *gen_throws_exception
;
65 #define gen_last_qop NULL
67 static uint16_t *gen_opc_ptr
;
68 static uint32_t *gen_opparam_ptr
;
73 #define DEF(s, n, copy_size) INDEX_op_ ## s,
81 #if defined(CONFIG_USER_ONLY)
82 #define gen_st(s, name, addr, val) gen_op_st##name##_raw(addr, val)
83 #define gen_ld(s, name, val, addr) gen_op_ld##name##_raw(val, addr)
85 #define gen_st(s, name, addr, val) do { \
87 gen_op_st##name##_user(addr, val); \
89 gen_op_st##name##_kernel(addr, val); \
91 #define gen_ld(s, name, val, addr) do { \
93 gen_op_ld##name##_user(val, addr); \
95 gen_op_ld##name##_kernel(val, addr); \
107 #define DREG(insn, pos) (((insn >> pos) & 7) + QREG_D0)
108 #define AREG(insn, pos) (((insn >> pos) & 7) + QREG_A0)
109 #define FREG(insn, pos) (((insn >> pos) & 7) + QREG_F0)
111 #define M68K_INSN_CF_A (1 << 0)
112 #define M68K_INSN_CF_B (1 << 1)
113 #define M68K_INSN_CF_C (1 << 2)
114 #define M68K_INSN_CF_MAC (1 << 3)
115 #define M68K_INSN_CF_EMAC (1 << 4)
116 #define M68K_INSN_CF_FPU (1 << 5)
123 static m68k_def_t m68k_cpu_defs
[] = {
124 {"m5206", M68K_INSN_CF_A
},
125 {"cfv4e", M68K_INSN_CF_A
| M68K_INSN_CF_B
| M68K_INSN_CF_C
126 | M68K_INSN_CF_MAC
| M68K_INSN_CF_EMAC
| M68K_INSN_CF_FPU
},
130 typedef void (*disas_proc
)(DisasContext
*, uint16_t);
132 #ifdef DEBUG_DISPATCH
133 #define DISAS_INSN(name) \
134 static void real_disas_##name (DisasContext *s, uint16_t insn); \
135 static void disas_##name (DisasContext *s, uint16_t insn) { \
136 if (logfile) fprintf(logfile, "Dispatch " #name "\n"); \
137 real_disas_##name(s, insn); } \
138 static void real_disas_##name (DisasContext *s, uint16_t insn)
140 #define DISAS_INSN(name) \
141 static void disas_##name (DisasContext *s, uint16_t insn)
144 /* Generate a load from the specified address. Narrow values are
145 sign extended to full register width. */
146 static inline int gen_load(DisasContext
* s
, int opsize
, int addr
, int sign
)
151 tmp
= gen_new_qreg(QMODE_I32
);
153 gen_ld(s
, 8s32
, tmp
, addr
);
155 gen_ld(s
, 8u32, tmp
, addr
);
158 tmp
= gen_new_qreg(QMODE_I32
);
160 gen_ld(s
, 16s32
, tmp
, addr
);
162 gen_ld(s
, 16u32, tmp
, addr
);
165 tmp
= gen_new_qreg(QMODE_I32
);
166 gen_ld(s
, 32, tmp
, addr
);
169 tmp
= gen_new_qreg(QMODE_F32
);
170 gen_ld(s
, f32
, tmp
, addr
);
173 tmp
= gen_new_qreg(QMODE_F64
);
174 gen_ld(s
, f64
, tmp
, addr
);
177 qemu_assert(0, "bad load size");
179 gen_throws_exception
= gen_last_qop
;
183 /* Generate a store. */
184 static inline void gen_store(DisasContext
*s
, int opsize
, int addr
, int val
)
188 gen_st(s
, 8, addr
, val
);
191 gen_st(s
, 16, addr
, val
);
194 gen_st(s
, 32, addr
, val
);
197 gen_st(s
, f32
, addr
, val
);
200 gen_st(s
, f64
, addr
, val
);
203 qemu_assert(0, "bad store size");
205 gen_throws_exception
= gen_last_qop
;
208 /* Generate an unsigned load if VAL is 0 a signed load if val is -1,
209 otherwise generate a store. */
210 static int gen_ldst(DisasContext
*s
, int opsize
, int addr
, int val
)
213 gen_store(s
, opsize
, addr
, val
);
216 return gen_load(s
, opsize
, addr
, val
!= 0);
220 /* Handle a base + index + displacement effective addresss. A base of
221 -1 means pc-relative. */
222 static int gen_lea_indexed(DisasContext
*s
, int opsize
, int base
)
231 ext
= lduw_code(s
->pc
);
233 tmp
= ((ext
>> 12) & 7) + ((ext
& 0x8000) ? QREG_A0
: QREG_D0
);
234 /* ??? Check W/L bit. */
235 scale
= (ext
>> 9) & 3;
239 add
= gen_new_qreg(QMODE_I32
);
240 gen_op_shl32(add
, tmp
, gen_im32(scale
));
242 tmp
= gen_new_qreg(QMODE_I32
);
244 gen_op_add32(tmp
, base
, gen_im32((int8_t)ext
));
245 gen_op_add32(tmp
, tmp
, add
);
247 gen_op_add32(tmp
, add
, gen_im32(offset
+ (int8_t)ext
));
252 /* Read a 32-bit immediate constant. */
253 static inline uint32_t read_im32(DisasContext
*s
)
256 im
= ((uint32_t)lduw_code(s
->pc
)) << 16;
258 im
|= lduw_code(s
->pc
);
264 /* Update the CPU env CC_OP state. */
265 static inline void gen_flush_cc_op(DisasContext
*s
)
267 if (s
->cc_op
!= CC_OP_DYNAMIC
)
268 gen_op_mov32(QREG_CC_OP
, gen_im32(s
->cc_op
));
271 /* Evaluate all the CC flags. */
272 static inline void gen_flush_flags(DisasContext
*s
)
274 if (s
->cc_op
== CC_OP_FLAGS
)
276 gen_op_flush_flags(s
->cc_op
);
277 s
->cc_op
= CC_OP_FLAGS
;
280 static inline int opsize_bytes(int opsize
)
283 case OS_BYTE
: return 1;
284 case OS_WORD
: return 2;
285 case OS_LONG
: return 4;
286 case OS_SINGLE
: return 4;
287 case OS_DOUBLE
: return 8;
289 qemu_assert(0, "bad operand size");
293 /* Assign value to a register. If the width is less than the register width
294 only the low part of the register is set. */
295 static void gen_partset_reg(int opsize
, int reg
, int val
)
300 gen_op_and32(reg
, reg
, gen_im32(0xffffff00));
301 tmp
= gen_new_qreg(QMODE_I32
);
302 gen_op_and32(tmp
, val
, gen_im32(0xff));
303 gen_op_or32(reg
, reg
, tmp
);
306 gen_op_and32(reg
, reg
, gen_im32(0xffff0000));
307 tmp
= gen_new_qreg(QMODE_I32
);
308 gen_op_and32(tmp
, val
, gen_im32(0xffff));
309 gen_op_or32(reg
, reg
, tmp
);
312 gen_op_mov32(reg
, val
);
315 gen_op_pack_32_f32(reg
, val
);
318 qemu_assert(0, "Bad operand size");
323 /* Sign or zero extend a value. */
324 static inline int gen_extend(int val
, int opsize
, int sign
)
330 tmp
= gen_new_qreg(QMODE_I32
);
332 gen_op_ext8s32(tmp
, val
);
334 gen_op_ext8u32(tmp
, val
);
337 tmp
= gen_new_qreg(QMODE_I32
);
339 gen_op_ext16s32(tmp
, val
);
341 gen_op_ext16u32(tmp
, val
);
347 tmp
= gen_new_qreg(QMODE_F32
);
348 gen_op_pack_f32_32(tmp
, val
);
351 qemu_assert(0, "Bad operand size");
356 /* Generate code for an "effective address". Does not adjust the base
357 register for autoincrememnt addressing modes. */
358 static int gen_lea(DisasContext
*s
, uint16_t insn
, int opsize
)
366 switch ((insn
>> 3) & 7) {
367 case 0: /* Data register direct. */
368 case 1: /* Address register direct. */
369 /* ??? generate bad addressing mode fault. */
370 qemu_assert(0, "invalid addressing mode");
371 case 2: /* Indirect register */
372 case 3: /* Indirect postincrement. */
375 case 4: /* Indirect predecrememnt. */
377 tmp
= gen_new_qreg(QMODE_I32
);
378 gen_op_sub32(tmp
, reg
, gen_im32(opsize_bytes(opsize
)));
380 case 5: /* Indirect displacement. */
382 tmp
= gen_new_qreg(QMODE_I32
);
383 ext
= lduw_code(s
->pc
);
385 gen_op_add32(tmp
, reg
, gen_im32((int16_t)ext
));
387 case 6: /* Indirect index + displacement. */
389 return gen_lea_indexed(s
, opsize
, reg
);
392 case 0: /* Absolute short. */
393 offset
= ldsw_code(s
->pc
);
395 return gen_im32(offset
);
396 case 1: /* Absolute long. */
397 offset
= read_im32(s
);
398 return gen_im32(offset
);
399 case 2: /* pc displacement */
400 tmp
= gen_new_qreg(QMODE_I32
);
402 offset
+= ldsw_code(s
->pc
);
404 return gen_im32(offset
);
405 case 3: /* pc index+displacement. */
406 return gen_lea_indexed(s
, opsize
, -1);
407 case 4: /* Immediate. */
409 /* ??? generate bad addressing mode fault. */
410 qemu_assert(0, "invalid addressing mode");
413 /* Should never happen. */
417 /* Helper function for gen_ea. Reuse the computed address between the
418 for read/write operands. */
419 static inline int gen_ea_once(DisasContext
*s
, uint16_t insn
, int opsize
,
424 if (addrp
&& val
> 0) {
427 tmp
= gen_lea(s
, insn
, opsize
);
431 return gen_ldst(s
, opsize
, tmp
, val
);
434 /* Generate code to load/store a value ito/from an EA. If VAL > 0 this is
435 a write otherwise it is a read (0 == sign extend, -1 == zero extend).
436 ADDRP is non-null for readwrite operands. */
437 static int gen_ea(DisasContext
*s
, uint16_t insn
, int opsize
, int val
,
445 switch ((insn
>> 3) & 7) {
446 case 0: /* Data register direct. */
449 gen_partset_reg(opsize
, reg
, val
);
452 return gen_extend(reg
, opsize
, val
);
454 case 1: /* Address register direct. */
457 gen_op_mov32(reg
, val
);
460 return gen_extend(reg
, opsize
, val
);
462 case 2: /* Indirect register */
464 return gen_ldst(s
, opsize
, reg
, val
);
465 case 3: /* Indirect postincrement. */
467 result
= gen_ldst(s
, opsize
, reg
, val
);
468 /* ??? This is not exception safe. The instruction may still
469 fault after this point. */
470 if (val
> 0 || !addrp
)
471 gen_op_add32(reg
, reg
, gen_im32(opsize_bytes(opsize
)));
473 case 4: /* Indirect predecrememnt. */
476 if (addrp
&& val
> 0) {
479 tmp
= gen_lea(s
, insn
, opsize
);
483 result
= gen_ldst(s
, opsize
, tmp
, val
);
484 /* ??? This is not exception safe. The instruction may still
485 fault after this point. */
486 if (val
> 0 || !addrp
) {
488 gen_op_mov32(reg
, tmp
);
492 case 5: /* Indirect displacement. */
493 case 6: /* Indirect index + displacement. */
494 return gen_ea_once(s
, insn
, opsize
, val
, addrp
);
497 case 0: /* Absolute short. */
498 case 1: /* Absolute long. */
499 case 2: /* pc displacement */
500 case 3: /* pc index+displacement. */
501 return gen_ea_once(s
, insn
, opsize
, val
, addrp
);
502 case 4: /* Immediate. */
503 /* Sign extend values for consistency. */
507 offset
= ldsb_code(s
->pc
+ 1);
509 offset
= ldub_code(s
->pc
+ 1);
514 offset
= ldsw_code(s
->pc
);
516 offset
= lduw_code(s
->pc
);
520 offset
= read_im32(s
);
523 qemu_assert(0, "Bad immediate operand");
525 return gen_im32(offset
);
527 qemu_assert(0, "invalid addressing mode");
530 /* Should never happen. */
534 static void gen_logic_cc(DisasContext
*s
, int val
)
536 gen_op_logic_cc(val
);
537 s
->cc_op
= CC_OP_LOGIC
;
540 static void gen_jmpcc(DisasContext
*s
, int cond
, int l1
)
551 case 2: /* HI (!C && !Z) */
552 tmp
= gen_new_qreg(QMODE_I32
);
553 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
| CCF_Z
));
554 gen_op_jmp_z32(tmp
, l1
);
556 case 3: /* LS (C || Z) */
557 tmp
= gen_new_qreg(QMODE_I32
);
558 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
| CCF_Z
));
559 gen_op_jmp_nz32(tmp
, l1
);
561 case 4: /* CC (!C) */
562 tmp
= gen_new_qreg(QMODE_I32
);
563 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
));
564 gen_op_jmp_z32(tmp
, l1
);
567 tmp
= gen_new_qreg(QMODE_I32
);
568 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_C
));
569 gen_op_jmp_nz32(tmp
, l1
);
571 case 6: /* NE (!Z) */
572 tmp
= gen_new_qreg(QMODE_I32
);
573 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
574 gen_op_jmp_z32(tmp
, l1
);
577 tmp
= gen_new_qreg(QMODE_I32
);
578 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
579 gen_op_jmp_nz32(tmp
, l1
);
581 case 8: /* VC (!V) */
582 tmp
= gen_new_qreg(QMODE_I32
);
583 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
584 gen_op_jmp_z32(tmp
, l1
);
587 tmp
= gen_new_qreg(QMODE_I32
);
588 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
589 gen_op_jmp_nz32(tmp
, l1
);
591 case 10: /* PL (!N) */
592 tmp
= gen_new_qreg(QMODE_I32
);
593 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_N
));
594 gen_op_jmp_z32(tmp
, l1
);
596 case 11: /* MI (N) */
597 tmp
= gen_new_qreg(QMODE_I32
);
598 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_N
));
599 gen_op_jmp_nz32(tmp
, l1
);
601 case 12: /* GE (!(N ^ V)) */
602 tmp
= gen_new_qreg(QMODE_I32
);
603 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
604 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
605 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
606 gen_op_jmp_z32(tmp
, l1
);
608 case 13: /* LT (N ^ V) */
609 tmp
= gen_new_qreg(QMODE_I32
);
610 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
611 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
612 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
613 gen_op_jmp_nz32(tmp
, l1
);
615 case 14: /* GT (!(Z || (N ^ V))) */
618 l2
= gen_new_label();
619 tmp
= gen_new_qreg(QMODE_I32
);
620 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
621 gen_op_jmp_nz32(tmp
, l2
);
622 tmp
= gen_new_qreg(QMODE_I32
);
623 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
624 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
625 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
626 gen_op_jmp_nz32(tmp
, l2
);
631 case 15: /* LE (Z || (N ^ V)) */
632 tmp
= gen_new_qreg(QMODE_I32
);
633 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_Z
));
634 gen_op_jmp_nz32(tmp
, l1
);
635 tmp
= gen_new_qreg(QMODE_I32
);
636 gen_op_shr32(tmp
, QREG_CC_DEST
, gen_im32(2));
637 gen_op_xor32(tmp
, tmp
, QREG_CC_DEST
);
638 gen_op_and32(tmp
, tmp
, gen_im32(CCF_V
));
639 gen_op_jmp_nz32(tmp
, l1
);
642 /* Should ever happen. */
653 l1
= gen_new_label();
654 cond
= (insn
>> 8) & 0xf;
656 gen_op_and32(reg
, reg
, gen_im32(0xffffff00));
657 gen_jmpcc(s
, cond
^ 1, l1
);
658 gen_op_or32(reg
, reg
, gen_im32(0xff));
662 /* Force a TB lookup after an instruction that changes the CPU state. */
663 static void gen_lookup_tb(DisasContext
*s
)
666 gen_op_mov32(QREG_PC
, gen_im32(s
->pc
));
667 s
->is_jmp
= DISAS_UPDATE
;
670 /* Generate a jump to to the address in qreg DEST. */
671 static void gen_jmp(DisasContext
*s
, int dest
)
674 gen_op_mov32(QREG_PC
, dest
);
675 s
->is_jmp
= DISAS_JUMP
;
678 static void gen_exception(DisasContext
*s
, uint32_t where
, int nr
)
681 gen_jmp(s
, gen_im32(where
));
682 gen_op_raise_exception(nr
);
685 /* Generate a jump to an immediate address. */
686 static void gen_jmp_tb(DisasContext
*s
, int n
, uint32_t dest
)
688 TranslationBlock
*tb
;
691 if (__builtin_expect (s
->singlestep_enabled
, 0)) {
692 gen_exception(s
, dest
, EXCP_DEBUG
);
693 } else if ((tb
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
) ||
694 (s
->pc
& TARGET_PAGE_MASK
) == (dest
& TARGET_PAGE_MASK
)) {
695 gen_op_goto_tb(0, n
, (long)tb
);
696 gen_op_mov32(QREG_PC
, gen_im32(dest
));
697 gen_op_mov32(QREG_T0
, gen_im32((long)tb
+ n
));
700 gen_jmp(s
, gen_im32(dest
));
701 gen_op_mov32(QREG_T0
, gen_im32(0));
704 s
->is_jmp
= DISAS_TB_JUMP
;
707 DISAS_INSN(undef_mac
)
709 gen_exception(s
, s
->pc
- 2, EXCP_LINEA
);
712 DISAS_INSN(undef_fpu
)
714 gen_exception(s
, s
->pc
- 2, EXCP_LINEF
);
719 gen_exception(s
, s
->pc
- 2, EXCP_UNSUPPORTED
);
720 cpu_abort(cpu_single_env
, "Illegal instruction: %04x @ %08x",
731 sign
= (insn
& 0x100) != 0;
733 tmp
= gen_new_qreg(QMODE_I32
);
735 gen_op_ext16s32(tmp
, reg
);
737 gen_op_ext16u32(tmp
, reg
);
738 src
= gen_ea(s
, insn
, OS_WORD
, sign
? -1 : 0, NULL
);
739 gen_op_mul32(tmp
, tmp
, src
);
740 gen_op_mov32(reg
, tmp
);
741 /* Unlike m68k, coldfire always clears the overflow bit. */
742 gen_logic_cc(s
, tmp
);
752 sign
= (insn
& 0x100) != 0;
755 gen_op_ext16s32(QREG_DIV1
, reg
);
757 gen_op_ext16u32(QREG_DIV1
, reg
);
759 src
= gen_ea(s
, insn
, OS_WORD
, sign
? -1 : 0, NULL
);
760 gen_op_mov32(QREG_DIV2
, src
);
767 tmp
= gen_new_qreg(QMODE_I32
);
768 src
= gen_new_qreg(QMODE_I32
);
769 gen_op_ext16u32(tmp
, QREG_DIV1
);
770 gen_op_shl32(src
, QREG_DIV2
, gen_im32(16));
771 gen_op_or32(reg
, tmp
, src
);
773 s
->cc_op
= CC_OP_FLAGS
;
783 ext
= lduw_code(s
->pc
);
786 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
791 gen_op_mov32(QREG_DIV1
, num
);
792 den
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
793 gen_op_mov32(QREG_DIV2
, den
);
801 gen_op_mov32 (reg
, QREG_DIV1
);
804 gen_op_mov32 (reg
, QREG_DIV2
);
807 s
->cc_op
= CC_OP_FLAGS
;
819 add
= (insn
& 0x4000) != 0;
821 dest
= gen_new_qreg(QMODE_I32
);
823 tmp
= gen_ea(s
, insn
, OS_LONG
, 0, &addr
);
827 src
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
830 gen_op_add32(dest
, tmp
, src
);
831 gen_op_update_xflag_lt(dest
, src
);
832 s
->cc_op
= CC_OP_ADD
;
834 gen_op_update_xflag_lt(tmp
, src
);
835 gen_op_sub32(dest
, tmp
, src
);
836 s
->cc_op
= CC_OP_SUB
;
838 gen_op_update_cc_add(dest
, src
);
840 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
842 gen_op_mov32(reg
, dest
);
847 /* Reverse the order of the bits in REG. */
855 val
= gen_new_qreg(QMODE_I32
);
856 tmp1
= gen_new_qreg(QMODE_I32
);
857 tmp2
= gen_new_qreg(QMODE_I32
);
859 gen_op_mov32(val
, reg
);
860 /* Reverse bits within each nibble. */
861 gen_op_shl32(tmp1
, val
, gen_im32(3));
862 gen_op_and32(tmp1
, tmp1
, gen_im32(0x88888888));
863 gen_op_shl32(tmp2
, val
, gen_im32(1));
864 gen_op_and32(tmp2
, tmp2
, gen_im32(0x44444444));
865 gen_op_or32(tmp1
, tmp1
, tmp2
);
866 gen_op_shr32(tmp2
, val
, gen_im32(1));
867 gen_op_and32(tmp2
, tmp2
, gen_im32(0x22222222));
868 gen_op_or32(tmp1
, tmp1
, tmp2
);
869 gen_op_shr32(tmp2
, val
, gen_im32(3));
870 gen_op_and32(tmp2
, tmp2
, gen_im32(0x11111111));
871 gen_op_or32(tmp1
, tmp1
, tmp2
);
872 /* Reverse nibbles withing bytes. */
873 gen_op_shl32(val
, tmp1
, gen_im32(4));
874 gen_op_and32(val
, val
, gen_im32(0xf0f0f0f0));
875 gen_op_shr32(tmp2
, tmp1
, gen_im32(4));
876 gen_op_and32(tmp2
, tmp2
, gen_im32(0x0f0f0f0f));
877 gen_op_or32(val
, val
, tmp2
);
879 gen_op_bswap32(reg
, val
);
880 gen_op_mov32(reg
, val
);
883 DISAS_INSN(bitop_reg
)
893 if ((insn
& 0x38) != 0)
897 op
= (insn
>> 6) & 3;
898 src1
= gen_ea(s
, insn
, opsize
, 0, op
? &addr
: NULL
);
899 src2
= DREG(insn
, 9);
900 dest
= gen_new_qreg(QMODE_I32
);
903 tmp
= gen_new_qreg(QMODE_I32
);
904 if (opsize
== OS_BYTE
)
905 gen_op_and32(tmp
, src2
, gen_im32(7));
907 gen_op_and32(tmp
, src2
, gen_im32(31));
909 tmp
= gen_new_qreg(QMODE_I32
);
910 gen_op_shl32(tmp
, gen_im32(1), src2
);
912 gen_op_btest(src1
, tmp
);
915 gen_op_xor32(dest
, src1
, tmp
);
918 gen_op_not32(tmp
, tmp
);
919 gen_op_and32(dest
, src1
, tmp
);
922 gen_op_or32(dest
, src1
, tmp
);
928 gen_ea(s
, insn
, opsize
, dest
, &addr
);
938 tmp
= gen_new_qreg(QMODE_I32
);
940 gen_op_and32(tmp
, QREG_CC_DEST
, gen_im32(CCF_V
));
941 l1
= gen_new_label();
942 gen_op_jmp_z32(tmp
, l1
);
943 tmp
= gen_new_qreg(QMODE_I32
);
944 gen_op_shr32(tmp
, reg
, gen_im32(31));
945 gen_op_xor32(tmp
, tmp
, gen_im32(0x80000000));
946 gen_op_mov32(reg
, tmp
);
948 gen_logic_cc(s
, tmp
);
951 static void gen_push(DisasContext
*s
, int val
)
955 tmp
= gen_new_qreg(QMODE_I32
);
956 gen_op_sub32(tmp
, QREG_SP
, gen_im32(4));
957 gen_store(s
, OS_LONG
, tmp
, val
);
958 gen_op_mov32(QREG_SP
, tmp
);
970 mask
= lduw_code(s
->pc
);
972 tmp
= gen_lea(s
, insn
, OS_LONG
);
973 addr
= gen_new_qreg(QMODE_I32
);
974 gen_op_mov32(addr
, tmp
);
975 is_load
= ((insn
& 0x0400) != 0);
976 for (i
= 0; i
< 16; i
++, mask
>>= 1) {
983 tmp
= gen_load(s
, OS_LONG
, addr
, 0);
984 gen_op_mov32(reg
, tmp
);
986 gen_store(s
, OS_LONG
, addr
, reg
);
989 gen_op_add32(addr
, addr
, gen_im32(4));
1005 if ((insn
& 0x38) != 0)
1009 op
= (insn
>> 6) & 3;
1011 bitnum
= lduw_code(s
->pc
);
1013 if (bitnum
& 0xff00) {
1014 disas_undef(s
, insn
);
1018 src1
= gen_ea(s
, insn
, opsize
, 0, op
? &addr
: NULL
);
1021 tmp
= gen_new_qreg(QMODE_I32
);
1022 if (opsize
== OS_BYTE
)
1028 gen_op_btest(src1
, gen_im32(mask
));
1030 dest
= gen_new_qreg(QMODE_I32
);
1036 gen_op_xor32(dest
, src1
, gen_im32(mask
));
1039 gen_op_and32(dest
, src1
, gen_im32(~mask
));
1042 gen_op_or32(dest
, src1
, gen_im32(mask
));
1048 gen_ea(s
, insn
, opsize
, dest
, &addr
);
1051 DISAS_INSN(arith_im
)
1059 op
= (insn
>> 9) & 7;
1060 src1
= gen_ea(s
, insn
, OS_LONG
, 0, (op
== 6) ? NULL
: &addr
);
1061 src2
= gen_im32(read_im32(s
));
1062 dest
= gen_new_qreg(QMODE_I32
);
1065 gen_op_or32(dest
, src1
, src2
);
1066 gen_logic_cc(s
, dest
);
1069 gen_op_and32(dest
, src1
, src2
);
1070 gen_logic_cc(s
, dest
);
1073 gen_op_mov32(dest
, src1
);
1074 gen_op_update_xflag_lt(dest
, src2
);
1075 gen_op_sub32(dest
, dest
, src2
);
1076 gen_op_update_cc_add(dest
, src2
);
1077 s
->cc_op
= CC_OP_SUB
;
1080 gen_op_mov32(dest
, src1
);
1081 gen_op_add32(dest
, dest
, src2
);
1082 gen_op_update_cc_add(dest
, src2
);
1083 gen_op_update_xflag_lt(dest
, src2
);
1084 s
->cc_op
= CC_OP_ADD
;
1087 gen_op_xor32(dest
, src1
, src2
);
1088 gen_logic_cc(s
, dest
);
1091 gen_op_mov32(dest
, src1
);
1092 gen_op_sub32(dest
, dest
, src2
);
1093 gen_op_update_cc_add(dest
, src2
);
1094 s
->cc_op
= CC_OP_SUB
;
1100 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
1108 reg
= DREG(insn
, 0);
1109 gen_op_bswap32(reg
, reg
);
1119 switch (insn
>> 12) {
1120 case 1: /* move.b */
1123 case 2: /* move.l */
1126 case 3: /* move.w */
1132 src
= gen_ea(s
, insn
, opsize
, -1, NULL
);
1133 op
= (insn
>> 6) & 7;
1136 /* The value will already have been sign extended. */
1137 dest
= AREG(insn
, 9);
1138 gen_op_mov32(dest
, src
);
1142 dest_ea
= ((insn
>> 9) & 7) | (op
<< 3);
1143 gen_ea(s
, dest_ea
, opsize
, src
, NULL
);
1144 /* This will be correct because loads sign extend. */
1145 gen_logic_cc(s
, src
);
1156 reg
= DREG(insn
, 0);
1157 dest
= gen_new_qreg(QMODE_I32
);
1158 gen_op_mov32 (dest
, gen_im32(0));
1159 gen_op_subx_cc(dest
, reg
);
1161 tmp
= gen_new_qreg(QMODE_I32
);
1162 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1163 gen_op_update_cc_add(dest
, reg
);
1164 gen_op_mov32(reg
, dest
);
1165 s
->cc_op
= CC_OP_DYNAMIC
;
1167 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1168 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1169 s
->cc_op
= CC_OP_FLAGS
;
1177 reg
= AREG(insn
, 9);
1178 tmp
= gen_lea(s
, insn
, OS_LONG
);
1179 gen_op_mov32(reg
, tmp
);
1186 switch ((insn
>> 6) & 3) {
1199 gen_ea (s
, insn
, opsize
, gen_im32(0), NULL
);
1200 gen_logic_cc(s
, gen_im32(0));
1203 static int gen_get_ccr(DisasContext
*s
)
1208 dest
= gen_new_qreg(QMODE_I32
);
1209 gen_op_get_xflag(dest
);
1210 gen_op_shl32(dest
, dest
, gen_im32(4));
1211 gen_op_or32(dest
, dest
, QREG_CC_DEST
);
1215 DISAS_INSN(move_from_ccr
)
1220 ccr
= gen_get_ccr(s
);
1221 reg
= DREG(insn
, 0);
1222 gen_partset_reg(OS_WORD
, reg
, ccr
);
1230 reg
= DREG(insn
, 0);
1231 src1
= gen_new_qreg(QMODE_I32
);
1232 gen_op_mov32(src1
, reg
);
1233 gen_op_neg32(reg
, src1
);
1234 s
->cc_op
= CC_OP_SUB
;
1235 gen_op_update_cc_add(reg
, src1
);
1236 gen_op_update_xflag_lt(gen_im32(0), src1
);
1237 s
->cc_op
= CC_OP_SUB
;
1240 static void gen_set_sr_im(DisasContext
*s
, uint16_t val
, int ccr_only
)
1242 gen_op_logic_cc(gen_im32(val
& 0xf));
1243 gen_op_update_xflag_tst(gen_im32((val
& 0x10) >> 4));
1245 gen_op_mov32(QREG_SR
, gen_im32(val
& 0xff00));
1249 static void gen_set_sr(DisasContext
*s
, uint16_t insn
, int ccr_only
)
1254 s
->cc_op
= CC_OP_FLAGS
;
1255 if ((insn
& 0x38) == 0)
1257 src1
= gen_new_qreg(QMODE_I32
);
1258 reg
= DREG(insn
, 0);
1259 gen_op_and32(src1
, reg
, gen_im32(0xf));
1260 gen_op_logic_cc(src1
);
1261 gen_op_shr32(src1
, reg
, gen_im32(4));
1262 gen_op_and32(src1
, src1
, gen_im32(1));
1263 gen_op_update_xflag_tst(src1
);
1265 gen_op_and32(QREG_SR
, reg
, gen_im32(0xff00));
1268 else if ((insn
& 0x3f) == 0x3c)
1271 val
= lduw_code(s
->pc
);
1273 gen_set_sr_im(s
, val
, ccr_only
);
1276 disas_undef(s
, insn
);
1279 DISAS_INSN(move_to_ccr
)
1281 gen_set_sr(s
, insn
, 1);
1288 reg
= DREG(insn
, 0);
1289 gen_op_not32(reg
, reg
);
1290 gen_logic_cc(s
, reg
);
1300 dest
= gen_new_qreg(QMODE_I32
);
1301 src1
= gen_new_qreg(QMODE_I32
);
1302 src2
= gen_new_qreg(QMODE_I32
);
1303 reg
= DREG(insn
, 0);
1304 gen_op_shl32(src1
, reg
, gen_im32(16));
1305 gen_op_shr32(src2
, reg
, gen_im32(16));
1306 gen_op_or32(dest
, src1
, src2
);
1307 gen_op_mov32(reg
, dest
);
1308 gen_logic_cc(s
, dest
);
1315 tmp
= gen_lea(s
, insn
, OS_LONG
);
1325 reg
= DREG(insn
, 0);
1326 op
= (insn
>> 6) & 7;
1327 tmp
= gen_new_qreg(QMODE_I32
);
1329 gen_op_ext16s32(tmp
, reg
);
1331 gen_op_ext8s32(tmp
, reg
);
1333 gen_partset_reg(OS_WORD
, reg
, tmp
);
1335 gen_op_mov32(reg
, tmp
);
1336 gen_logic_cc(s
, tmp
);
1344 switch ((insn
>> 6) & 3) {
1357 tmp
= gen_ea(s
, insn
, opsize
, -1, NULL
);
1358 gen_logic_cc(s
, tmp
);
1363 /* Implemented as a NOP. */
1368 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1371 /* ??? This should be atomic. */
1378 dest
= gen_new_qreg(QMODE_I32
);
1379 src1
= gen_ea(s
, insn
, OS_BYTE
, -1, &addr
);
1380 gen_logic_cc(s
, src1
);
1381 gen_op_or32(dest
, src1
, gen_im32(0x80));
1382 gen_ea(s
, insn
, OS_BYTE
, dest
, &addr
);
1392 /* The upper 32 bits of the product are discarded, so
1393 muls.l and mulu.l are functionally equivalent. */
1394 ext
= lduw_code(s
->pc
);
1397 gen_exception(s
, s
->pc
- 4, EXCP_UNSUPPORTED
);
1400 reg
= DREG(ext
, 12);
1401 src1
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
1402 dest
= gen_new_qreg(QMODE_I32
);
1403 gen_op_mul32(dest
, src1
, reg
);
1404 gen_op_mov32(reg
, dest
);
1405 /* Unlike m68k, coldfire always clears the overflow bit. */
1406 gen_logic_cc(s
, dest
);
1415 offset
= ldsw_code(s
->pc
);
1417 reg
= AREG(insn
, 0);
1418 tmp
= gen_new_qreg(QMODE_I32
);
1419 gen_op_sub32(tmp
, QREG_SP
, gen_im32(4));
1420 gen_store(s
, OS_LONG
, tmp
, reg
);
1422 gen_op_mov32(reg
, tmp
);
1423 gen_op_add32(QREG_SP
, tmp
, gen_im32(offset
));
1432 src
= gen_new_qreg(QMODE_I32
);
1433 reg
= AREG(insn
, 0);
1434 gen_op_mov32(src
, reg
);
1435 tmp
= gen_load(s
, OS_LONG
, src
, 0);
1436 gen_op_mov32(reg
, tmp
);
1437 gen_op_add32(QREG_SP
, src
, gen_im32(4));
1448 tmp
= gen_load(s
, OS_LONG
, QREG_SP
, 0);
1449 gen_op_add32(QREG_SP
, QREG_SP
, gen_im32(4));
1457 /* Load the target address first to ensure correct exception
1459 tmp
= gen_lea(s
, insn
, OS_LONG
);
1460 if ((insn
& 0x40) == 0) {
1462 gen_push(s
, gen_im32(s
->pc
));
1475 src1
= gen_ea(s
, insn
, OS_LONG
, 0, &addr
);
1476 val
= (insn
>> 9) & 7;
1479 src2
= gen_im32(val
);
1480 dest
= gen_new_qreg(QMODE_I32
);
1481 gen_op_mov32(dest
, src1
);
1482 if ((insn
& 0x38) == 0x08) {
1483 /* Don't update condition codes if the destination is an
1484 address register. */
1485 if (insn
& 0x0100) {
1486 gen_op_sub32(dest
, dest
, src2
);
1488 gen_op_add32(dest
, dest
, src2
);
1491 if (insn
& 0x0100) {
1492 gen_op_update_xflag_lt(dest
, src2
);
1493 gen_op_sub32(dest
, dest
, src2
);
1494 s
->cc_op
= CC_OP_SUB
;
1496 gen_op_add32(dest
, dest
, src2
);
1497 gen_op_update_xflag_lt(dest
, src2
);
1498 s
->cc_op
= CC_OP_ADD
;
1500 gen_op_update_cc_add(dest
, src2
);
1502 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
1508 case 2: /* One extension word. */
1511 case 3: /* Two extension words. */
1514 case 4: /* No extension words. */
1517 disas_undef(s
, insn
);
1529 op
= (insn
>> 8) & 0xf;
1530 offset
= (int8_t)insn
;
1532 offset
= ldsw_code(s
->pc
);
1534 } else if (offset
== -1) {
1535 offset
= read_im32(s
);
1539 gen_push(s
, gen_im32(s
->pc
));
1544 l1
= gen_new_label();
1545 gen_jmpcc(s
, ((insn
>> 8) & 0xf) ^ 1, l1
);
1546 gen_jmp_tb(s
, 1, base
+ offset
);
1548 gen_jmp_tb(s
, 0, s
->pc
);
1550 /* Unconditional branch. */
1551 gen_jmp_tb(s
, 0, base
+ offset
);
1559 tmp
= gen_im32((int8_t)insn
);
1560 gen_op_mov32(DREG(insn
, 9), tmp
);
1561 gen_logic_cc(s
, tmp
);
1574 src
= gen_ea(s
, insn
, opsize
, (insn
& 0x80) ? 0 : -1, NULL
);
1575 reg
= DREG(insn
, 9);
1576 gen_op_mov32(reg
, src
);
1577 gen_logic_cc(s
, src
);
1587 reg
= DREG(insn
, 9);
1588 dest
= gen_new_qreg(QMODE_I32
);
1590 src
= gen_ea(s
, insn
, OS_LONG
, 0, &addr
);
1591 gen_op_or32(dest
, src
, reg
);
1592 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
1594 src
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
1595 gen_op_or32(dest
, src
, reg
);
1596 gen_op_mov32(reg
, dest
);
1598 gen_logic_cc(s
, dest
);
1606 src
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
1607 reg
= AREG(insn
, 9);
1608 gen_op_sub32(reg
, reg
, src
);
1619 reg
= DREG(insn
, 9);
1620 src
= DREG(insn
, 0);
1621 dest
= gen_new_qreg(QMODE_I32
);
1622 gen_op_mov32 (dest
, reg
);
1623 gen_op_subx_cc(dest
, src
);
1625 tmp
= gen_new_qreg(QMODE_I32
);
1626 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1627 gen_op_update_cc_add(dest
, src
);
1628 gen_op_mov32(reg
, dest
);
1629 s
->cc_op
= CC_OP_DYNAMIC
;
1631 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1632 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1633 s
->cc_op
= CC_OP_FLAGS
;
1641 val
= (insn
>> 9) & 7;
1644 src
= gen_im32(val
);
1645 gen_logic_cc(s
, src
);
1646 gen_ea(s
, insn
, OS_LONG
, src
, NULL
);
1657 op
= (insn
>> 6) & 3;
1661 s
->cc_op
= CC_OP_CMPB
;
1665 s
->cc_op
= CC_OP_CMPW
;
1669 s
->cc_op
= CC_OP_SUB
;
1674 src
= gen_ea(s
, insn
, opsize
, -1, NULL
);
1675 reg
= DREG(insn
, 9);
1676 dest
= gen_new_qreg(QMODE_I32
);
1677 gen_op_sub32(dest
, reg
, src
);
1678 gen_op_update_cc_add(dest
, src
);
1693 src
= gen_ea(s
, insn
, opsize
, -1, NULL
);
1694 reg
= AREG(insn
, 9);
1695 dest
= gen_new_qreg(QMODE_I32
);
1696 gen_op_sub32(dest
, reg
, src
);
1697 gen_op_update_cc_add(dest
, src
);
1698 s
->cc_op
= CC_OP_SUB
;
1708 src
= gen_ea(s
, insn
, OS_LONG
, 0, &addr
);
1709 reg
= DREG(insn
, 9);
1710 dest
= gen_new_qreg(QMODE_I32
);
1711 gen_op_xor32(dest
, src
, reg
);
1712 gen_logic_cc(s
, dest
);
1713 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
1723 reg
= DREG(insn
, 9);
1724 dest
= gen_new_qreg(QMODE_I32
);
1726 src
= gen_ea(s
, insn
, OS_LONG
, 0, &addr
);
1727 gen_op_and32(dest
, src
, reg
);
1728 gen_ea(s
, insn
, OS_LONG
, dest
, &addr
);
1730 src
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
1731 gen_op_and32(dest
, src
, reg
);
1732 gen_op_mov32(reg
, dest
);
1734 gen_logic_cc(s
, dest
);
1742 src
= gen_ea(s
, insn
, OS_LONG
, 0, NULL
);
1743 reg
= AREG(insn
, 9);
1744 gen_op_add32(reg
, reg
, src
);
1755 reg
= DREG(insn
, 9);
1756 src
= DREG(insn
, 0);
1757 dest
= gen_new_qreg(QMODE_I32
);
1758 gen_op_mov32 (dest
, reg
);
1759 gen_op_addx_cc(dest
, src
);
1761 tmp
= gen_new_qreg(QMODE_I32
);
1762 gen_op_mov32 (tmp
, QREG_CC_DEST
);
1763 gen_op_update_cc_add(dest
, src
);
1764 gen_op_mov32(reg
, dest
);
1765 s
->cc_op
= CC_OP_DYNAMIC
;
1767 gen_op_or32(tmp
, tmp
, gen_im32(~CCF_Z
));
1768 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, tmp
);
1769 s
->cc_op
= CC_OP_FLAGS
;
1772 DISAS_INSN(shift_im
)
1777 reg
= DREG(insn
, 0);
1778 tmp
= (insn
>> 9) & 7;
1782 gen_op_shl_im_cc(reg
, tmp
);
1783 s
->cc_op
= CC_OP_SHL
;
1786 gen_op_shr_im_cc(reg
, tmp
);
1787 s
->cc_op
= CC_OP_SHR
;
1789 gen_op_sar_im_cc(reg
, tmp
);
1790 s
->cc_op
= CC_OP_SAR
;
1795 DISAS_INSN(shift_reg
)
1801 reg
= DREG(insn
, 0);
1802 src
= DREG(insn
, 9);
1803 tmp
= gen_new_qreg(QMODE_I32
);
1804 gen_op_and32(tmp
, src
, gen_im32(63));
1806 gen_op_shl_cc(reg
, tmp
);
1807 s
->cc_op
= CC_OP_SHL
;
1810 gen_op_shr_cc(reg
, tmp
);
1811 s
->cc_op
= CC_OP_SHR
;
1813 gen_op_sar_cc(reg
, tmp
);
1814 s
->cc_op
= CC_OP_SAR
;
1821 cpu_abort(NULL
, "Unimplemented insn: ff1");
1824 static int gen_get_sr(DisasContext
*s
)
1829 ccr
= gen_get_ccr(s
);
1830 sr
= gen_new_qreg(QMODE_I32
);
1831 gen_op_and32(sr
, QREG_SR
, gen_im32(0xffe0));
1832 gen_op_or32(sr
, sr
, ccr
);
1842 ext
= lduw_code(s
->pc
);
1844 if (ext
!= 0x46FC) {
1845 gen_exception(s
, addr
, EXCP_UNSUPPORTED
);
1848 ext
= lduw_code(s
->pc
);
1850 if (IS_USER(s
) || (ext
& SR_S
) == 0) {
1851 gen_exception(s
, addr
, EXCP_PRIVILEGE
);
1854 gen_push(s
, gen_get_sr(s
));
1855 gen_set_sr_im(s
, ext
, 0);
1858 DISAS_INSN(move_from_sr
)
1864 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1868 reg
= DREG(insn
, 0);
1869 gen_partset_reg(OS_WORD
, reg
, sr
);
1872 DISAS_INSN(move_to_sr
)
1875 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1878 gen_set_sr(s
, insn
, 0);
1882 DISAS_INSN(move_from_usp
)
1885 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1888 /* TODO: Implement USP. */
1889 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1892 DISAS_INSN(move_to_usp
)
1895 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1898 /* TODO: Implement USP. */
1899 gen_exception(s
, s
->pc
- 2, EXCP_ILLEGAL
);
1905 gen_jmp(s
, gen_im32(s
->pc
));
1914 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1918 ext
= lduw_code(s
->pc
);
1921 gen_set_sr_im(s
, ext
, 0);
1922 disas_halt(s
, insn
);
1928 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1931 gen_exception(s
, s
->pc
- 2, EXCP_RTE
);
1940 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1944 ext
= lduw_code(s
->pc
);
1948 reg
= AREG(ext
, 12);
1950 reg
= DREG(ext
, 12);
1952 gen_op_movec(gen_im32(ext
& 0xfff), reg
);
1959 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1962 /* ICache fetch. Implement as no-op. */
1968 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1971 /* Cache push/invalidate. Implement as no-op. */
1976 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1982 gen_exception(s
, s
->pc
- 2, EXCP_PRIVILEGE
);
1985 /* TODO: Implement wdebug. */
1986 qemu_assert(0, "WDEBUG not implemented");
1991 gen_exception(s
, s
->pc
- 2, EXCP_TRAP0
+ (insn
& 0xf));
1994 /* ??? FP exceptions are not implemented. Most exceptions are deferred until
1995 immediately before the next FP instruction is executed. */
2006 ext
= lduw_code(s
->pc
);
2008 opmode
= ext
& 0x7f;
2009 switch ((ext
>> 13) & 7) {
2014 case 3: /* fmove out */
2017 /* ??? TODO: Proper behavior on overflow. */
2018 switch ((ext
>> 10) & 7) {
2021 res
= gen_new_qreg(QMODE_I32
);
2022 gen_op_f64_to_i32(res
, src
);
2026 res
= gen_new_qreg(QMODE_F32
);
2027 gen_op_f64_to_f32(res
, src
);
2031 res
= gen_new_qreg(QMODE_I32
);
2032 gen_op_f64_to_i32(res
, src
);
2040 res
= gen_new_qreg(QMODE_I32
);
2041 gen_op_f64_to_i32(res
, src
);
2046 gen_ea(s
, insn
, opsize
, res
, NULL
);
2048 case 4: /* fmove to control register. */
2049 switch ((ext
>> 10) & 7) {
2051 /* Not implemented. Ignore writes. */
2056 cpu_abort(NULL
, "Unimplemented: fmove to control %d",
2060 case 5: /* fmove from control register. */
2061 switch ((ext
>> 10) & 7) {
2063 /* Not implemented. Always return zero. */
2069 cpu_abort(NULL
, "Unimplemented: fmove from control %d",
2073 gen_ea(s
, insn
, OS_LONG
, res
, NULL
);
2075 case 6: /* fmovem */
2080 if ((ext
& 0x1f00) != 0x1000 || (ext
& 0xff) == 0)
2082 src
= gen_lea(s
, insn
, OS_LONG
);
2083 addr
= gen_new_qreg(QMODE_I32
);
2084 gen_op_mov32(addr
, src
);
2089 if (ext
& (1 << 13)) {
2091 gen_st(s
, f64
, addr
, dest
);
2094 gen_ld(s
, f64
, dest
, addr
);
2096 if (ext
& (mask
- 1))
2097 gen_op_add32(addr
, addr
, gen_im32(8));
2105 if (ext
& (1 << 14)) {
2108 /* Source effective address. */
2109 switch ((ext
>> 10) & 7) {
2110 case 0: opsize
= OS_LONG
; break;
2111 case 1: opsize
= OS_SINGLE
; break;
2112 case 4: opsize
= OS_WORD
; break;
2113 case 5: opsize
= OS_DOUBLE
; break;
2114 case 6: opsize
= OS_BYTE
; break;
2118 tmp
= gen_ea(s
, insn
, opsize
, -1, NULL
);
2119 if (opsize
== OS_DOUBLE
) {
2122 src
= gen_new_qreg(QMODE_F64
);
2127 gen_op_i32_to_f64(src
, tmp
);
2130 gen_op_f32_to_f64(src
, tmp
);
2135 /* Source register. */
2136 src
= FREG(ext
, 10);
2138 dest
= FREG(ext
, 7);
2139 res
= gen_new_qreg(QMODE_F64
);
2141 gen_op_movf64(res
, dest
);
2144 case 0: case 0x40: case 0x44: /* fmove */
2145 gen_op_movf64(res
, src
);
2148 gen_op_iround_f64(res
, src
);
2151 case 3: /* fintrz */
2152 gen_op_itrunc_f64(res
, src
);
2155 case 4: case 0x41: case 0x45: /* fsqrt */
2156 gen_op_sqrtf64(res
, src
);
2158 case 0x18: case 0x58: case 0x5c: /* fabs */
2159 gen_op_absf64(res
, src
);
2161 case 0x1a: case 0x5a: case 0x5e: /* fneg */
2162 gen_op_chsf64(res
, src
);
2164 case 0x20: case 0x60: case 0x64: /* fdiv */
2165 gen_op_divf64(res
, res
, src
);
2167 case 0x22: case 0x62: case 0x66: /* fadd */
2168 gen_op_addf64(res
, res
, src
);
2170 case 0x23: case 0x63: case 0x67: /* fmul */
2171 gen_op_mulf64(res
, res
, src
);
2173 case 0x28: case 0x68: case 0x6c: /* fsub */
2174 gen_op_subf64(res
, res
, src
);
2176 case 0x38: /* fcmp */
2177 gen_op_sub_cmpf64(res
, res
, src
);
2181 case 0x3a: /* ftst */
2182 gen_op_movf64(res
, src
);
2190 if (opmode
& 0x40) {
2191 if ((opmode
& 0x4) != 0)
2193 } else if ((s
->fpcr
& M68K_FPCR_PREC
) == 0) {
2200 tmp
= gen_new_qreg(QMODE_F32
);
2201 gen_op_f64_to_f32(tmp
, res
);
2202 gen_op_f32_to_f64(res
, tmp
);
2204 gen_op_fp_result(res
);
2206 gen_op_movf64(dest
, res
);
2211 disas_undef_fpu(s
, insn
);
2223 offset
= ldsw_code(s
->pc
);
2225 if (insn
& (1 << 6)) {
2226 offset
= (offset
<< 16) | lduw_code(s
->pc
);
2230 l1
= gen_new_label();
2231 /* TODO: Raise BSUN exception. */
2232 flag
= gen_new_qreg(QMODE_I32
);
2233 zero
= gen_new_qreg(QMODE_F64
);
2234 gen_op_zerof64(zero
);
2235 gen_op_compare_quietf64(flag
, QREG_FP_RESULT
, zero
);
2236 /* Jump to l1 if condition is true. */
2237 switch (insn
& 0xf) {
2240 case 1: /* eq (=0) */
2241 gen_op_jmp_z32(flag
, l1
);
2243 case 2: /* ogt (=1) */
2244 gen_op_sub32(flag
, flag
, gen_im32(1));
2245 gen_op_jmp_z32(flag
, l1
);
2247 case 3: /* oge (=0 or =1) */
2248 gen_op_jmp_z32(flag
, l1
);
2249 gen_op_sub32(flag
, flag
, gen_im32(1));
2250 gen_op_jmp_z32(flag
, l1
);
2252 case 4: /* olt (=-1) */
2253 gen_op_jmp_s32(flag
, l1
);
2255 case 5: /* ole (=-1 or =0) */
2256 gen_op_jmp_s32(flag
, l1
);
2257 gen_op_jmp_z32(flag
, l1
);
2259 case 6: /* ogl (=-1 or =1) */
2260 gen_op_jmp_s32(flag
, l1
);
2261 gen_op_sub32(flag
, flag
, gen_im32(1));
2262 gen_op_jmp_z32(flag
, l1
);
2264 case 7: /* or (=2) */
2265 gen_op_sub32(flag
, flag
, gen_im32(2));
2266 gen_op_jmp_z32(flag
, l1
);
2268 case 8: /* un (<2) */
2269 gen_op_sub32(flag
, flag
, gen_im32(2));
2270 gen_op_jmp_s32(flag
, l1
);
2272 case 9: /* ueq (=0 or =2) */
2273 gen_op_jmp_z32(flag
, l1
);
2274 gen_op_sub32(flag
, flag
, gen_im32(2));
2275 gen_op_jmp_z32(flag
, l1
);
2277 case 10: /* ugt (>0) */
2278 /* ??? Add jmp_gtu. */
2279 gen_op_sub32(flag
, flag
, gen_im32(1));
2280 gen_op_jmp_ns32(flag
, l1
);
2282 case 11: /* uge (>=0) */
2283 gen_op_jmp_ns32(flag
, l1
);
2285 case 12: /* ult (=-1 or =2) */
2286 gen_op_jmp_s32(flag
, l1
);
2287 gen_op_sub32(flag
, flag
, gen_im32(2));
2288 gen_op_jmp_z32(flag
, l1
);
2290 case 13: /* ule (!=1) */
2291 gen_op_sub32(flag
, flag
, gen_im32(1));
2292 gen_op_jmp_nz32(flag
, l1
);
2294 case 14: /* ne (!=0) */
2295 gen_op_jmp_nz32(flag
, l1
);
2298 gen_op_mov32(flag
, gen_im32(1));
2301 gen_jmp_tb(s
, 0, s
->pc
);
2303 gen_jmp_tb(s
, 1, addr
+ offset
);
2306 DISAS_INSN(frestore
)
2308 /* TODO: Implement frestore. */
2309 qemu_assert(0, "FRESTORE not implemented");
2314 /* TODO: Implement fsave. */
2315 qemu_assert(0, "FSAVE not implemented");
2318 static disas_proc opcode_table
[65536];
2321 register_opcode (disas_proc proc
, uint16_t opcode
, uint16_t mask
)
2327 /* Sanity check. All set bits must be included in the mask. */
2330 /* This could probably be cleverer. For now just optimize the case where
2331 the top bits are known. */
2332 /* Find the first zero bit in the mask. */
2334 while ((i
& mask
) != 0)
2336 /* Iterate over all combinations of this and lower bits. */
2341 from
= opcode
& ~(i
- 1);
2343 for (i
= from
; i
< to
; i
++) {
2344 if ((i
& mask
) == opcode
)
2345 opcode_table
[i
] = proc
;
2349 /* Register m68k opcode handlers. Order is important.
2350 Later insn override earlier ones. */
2352 register_m68k_insns (m68k_def_t
*def
)
2356 iflags
= def
->insns
;
2357 #define INSN(name, opcode, mask, isa) \
2358 if (iflags & M68K_INSN_##isa) \
2359 register_opcode(disas_##name, 0x##opcode, 0x##mask)
2360 INSN(undef
, 0000, 0000, CF_A
);
2361 INSN(arith_im
, 0080, fff8
, CF_A
);
2362 INSN(bitrev
, 00c0
, fff8
, CF_C
);
2363 INSN(bitop_reg
, 0100, f1c0
, CF_A
);
2364 INSN(bitop_reg
, 0140, f1c0
, CF_A
);
2365 INSN(bitop_reg
, 0180, f1c0
, CF_A
);
2366 INSN(bitop_reg
, 01c0
, f1c0
, CF_A
);
2367 INSN(arith_im
, 0280, fff8
, CF_A
);
2368 INSN(byterev
, 02c0
, fff8
, CF_A
);
2369 INSN(arith_im
, 0480, fff8
, CF_A
);
2370 INSN(ff1
, 04c0
, fff8
, CF_C
);
2371 INSN(arith_im
, 0680, fff8
, CF_A
);
2372 INSN(bitop_im
, 0800, ffc0
, CF_A
);
2373 INSN(bitop_im
, 0840, ffc0
, CF_A
);
2374 INSN(bitop_im
, 0880, ffc0
, CF_A
);
2375 INSN(bitop_im
, 08c0
, ffc0
, CF_A
);
2376 INSN(arith_im
, 0a80
, fff8
, CF_A
);
2377 INSN(arith_im
, 0c00
, ff38
, CF_A
);
2378 INSN(move
, 1000, f000
, CF_A
);
2379 INSN(move
, 2000, f000
, CF_A
);
2380 INSN(move
, 3000, f000
, CF_A
);
2381 INSN(strldsr
, 40e7
, ffff
, CF_A
);
2382 INSN(negx
, 4080, fff8
, CF_A
);
2383 INSN(move_from_sr
, 40c0
, fff8
, CF_A
);
2384 INSN(lea
, 41c0
, f1c0
, CF_A
);
2385 INSN(clr
, 4200, ff00
, CF_A
);
2386 INSN(undef
, 42c0
, ffc0
, CF_A
);
2387 INSN(move_from_ccr
, 42c0
, fff8
, CF_A
);
2388 INSN(neg
, 4480, fff8
, CF_A
);
2389 INSN(move_to_ccr
, 44c0
, ffc0
, CF_A
);
2390 INSN(not, 4680, fff8
, CF_A
);
2391 INSN(move_to_sr
, 46c0
, ffc0
, CF_A
);
2392 INSN(pea
, 4840, ffc0
, CF_A
);
2393 INSN(swap
, 4840, fff8
, CF_A
);
2394 INSN(movem
, 48c0
, fbc0
, CF_A
);
2395 INSN(ext
, 4880, fff8
, CF_A
);
2396 INSN(ext
, 48c0
, fff8
, CF_A
);
2397 INSN(ext
, 49c0
, fff8
, CF_A
);
2398 INSN(tst
, 4a00
, ff00
, CF_A
);
2399 INSN(tas
, 4ac0
, ffc0
, CF_B
);
2400 INSN(halt
, 4ac8
, ffff
, CF_A
);
2401 INSN(pulse
, 4acc
, ffff
, CF_A
);
2402 INSN(illegal
, 4afc
, ffff
, CF_A
);
2403 INSN(mull
, 4c00
, ffc0
, CF_A
);
2404 INSN(divl
, 4c40
, ffc0
, CF_A
);
2405 INSN(sats
, 4c80
, fff8
, CF_B
);
2406 INSN(trap
, 4e40
, fff0
, CF_A
);
2407 INSN(link
, 4e50
, fff8
, CF_A
);
2408 INSN(unlk
, 4e58
, fff8
, CF_A
);
2409 INSN(move_to_usp
, 4e60
, fff8
, CF_B
);
2410 INSN(move_from_usp
, 4e68
, fff8
, CF_B
);
2411 INSN(nop
, 4e71
, ffff
, CF_A
);
2412 INSN(stop
, 4e72
, ffff
, CF_A
);
2413 INSN(rte
, 4e73
, ffff
, CF_A
);
2414 INSN(rts
, 4e75
, ffff
, CF_A
);
2415 INSN(movec
, 4e7b
, ffff
, CF_A
);
2416 INSN(jump
, 4e80
, ffc0
, CF_A
);
2417 INSN(jump
, 4ec0
, ffc0
, CF_A
);
2418 INSN(addsubq
, 5180, f1c0
, CF_A
);
2419 INSN(scc
, 50c0
, f0f8
, CF_A
);
2420 INSN(addsubq
, 5080, f1c0
, CF_A
);
2421 INSN(tpf
, 51f8
, fff8
, CF_A
);
2422 INSN(branch
, 6000, f000
, CF_A
);
2423 INSN(moveq
, 7000, f100
, CF_A
);
2424 INSN(mvzs
, 7100, f100
, CF_B
);
2425 INSN(or, 8000, f000
, CF_A
);
2426 INSN(divw
, 80c0
, f0c0
, CF_A
);
2427 INSN(addsub
, 9000, f000
, CF_A
);
2428 INSN(subx
, 9180, f1f8
, CF_A
);
2429 INSN(suba
, 91c0
, f1c0
, CF_A
);
2430 INSN(undef_mac
, a000
, f000
, CF_A
);
2431 INSN(mov3q
, a140
, f1c0
, CF_B
);
2432 INSN(cmp
, b000
, f1c0
, CF_B
); /* cmp.b */
2433 INSN(cmp
, b040
, f1c0
, CF_B
); /* cmp.w */
2434 INSN(cmpa
, b0c0
, f1c0
, CF_B
); /* cmpa.w */
2435 INSN(cmp
, b080
, f1c0
, CF_A
);
2436 INSN(cmpa
, b1c0
, f1c0
, CF_A
);
2437 INSN(eor
, b180
, f1c0
, CF_A
);
2438 INSN(and, c000
, f000
, CF_A
);
2439 INSN(mulw
, c0c0
, f0c0
, CF_A
);
2440 INSN(addsub
, d000
, f000
, CF_A
);
2441 INSN(addx
, d180
, f1f8
, CF_A
);
2442 INSN(adda
, d1c0
, f1c0
, CF_A
);
2443 INSN(shift_im
, e080
, f0f0
, CF_A
);
2444 INSN(shift_reg
, e0a0
, f0f0
, CF_A
);
2445 INSN(undef_fpu
, f000
, f000
, CF_A
);
2446 INSN(fpu
, f200
, ffc0
, CF_FPU
);
2447 INSN(fbcc
, f280
, ffc0
, CF_FPU
);
2448 INSN(frestore
, f340
, ffc0
, CF_FPU
);
2449 INSN(fsave
, f340
, ffc0
, CF_FPU
);
2450 INSN(intouch
, f340
, ffc0
, CF_A
);
2451 INSN(cpushl
, f428
, ff38
, CF_A
);
2452 INSN(wddata
, fb00
, ff00
, CF_A
);
2453 INSN(wdebug
, fbc0
, ffc0
, CF_A
);
2457 /* ??? Some of this implementation is not exception safe. We should always
2458 write back the result to memory before setting the condition codes. */
2459 static void disas_m68k_insn(CPUState
* env
, DisasContext
*s
)
2463 insn
= lduw_code(s
->pc
);
2466 opcode_table
[insn
](s
, insn
);
2470 /* Save the result of a floating point operation. */
2471 static void expand_op_fp_result(qOP
*qop
)
2473 gen_op_movf64(QREG_FP_RESULT
, qop
->args
[0]);
2476 /* Dummy op to indicate that the flags have been set. */
2477 static void expand_op_flags_set(qOP
*qop
)
2481 /* Convert the confition codes into CC_OP_FLAGS format. */
2482 static void expand_op_flush_flags(qOP
*qop
)
2486 if (qop
->args
[0] == CC_OP_DYNAMIC
)
2487 cc_opreg
= QREG_CC_OP
;
2489 cc_opreg
= gen_im32(qop
->args
[0]);
2490 gen_op_helper32(QREG_NULL
, cc_opreg
, HELPER_flush_flags
);
2493 /* Set CC_DEST after a logical or direct flag setting operation. */
2494 static void expand_op_logic_cc(qOP
*qop
)
2496 gen_op_mov32(QREG_CC_DEST
, qop
->args
[0]);
2499 /* Set CC_SRC and CC_DEST after an arithmetic operation. */
2500 static void expand_op_update_cc_add(qOP
*qop
)
2502 gen_op_mov32(QREG_CC_DEST
, qop
->args
[0]);
2503 gen_op_mov32(QREG_CC_SRC
, qop
->args
[1]);
2506 /* Update the X flag. */
2507 static void expand_op_update_xflag(qOP
*qop
)
2512 arg0
= qop
->args
[0];
2513 arg1
= qop
->args
[1];
2514 if (arg1
== QREG_NULL
) {
2516 gen_op_mov32(QREG_CC_X
, arg0
);
2518 /* CC_X = arg0 < (unsigned)arg1. */
2519 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
2523 /* Set arg0 to the contents of the X flag. */
2524 static void expand_op_get_xflag(qOP
*qop
)
2526 gen_op_mov32(qop
->args
[0], QREG_CC_X
);
2529 /* Expand a shift by immediate. The ISA only allows shifts by 1-8, so we
2530 already know the shift is within range. */
2531 static inline void expand_shift_im(qOP
*qop
, int right
, int arith
)
2541 val
= gen_new_qreg(QMODE_I32
);
2542 gen_op_mov32(val
, reg
);
2543 gen_op_mov32(QREG_CC_DEST
, val
);
2544 gen_op_mov32(QREG_CC_SRC
, tmp
);
2547 gen_op_sar32(reg
, val
, tmp
);
2549 gen_op_shr32(reg
, val
, tmp
);
2554 tmp
= gen_im32(im
- 1);
2556 gen_op_shl32(reg
, val
, tmp
);
2557 tmp
= gen_im32(32 - im
);
2559 if (tmp
!= QREG_NULL
)
2560 gen_op_shr32(val
, val
, tmp
);
2561 gen_op_and32(QREG_CC_X
, val
, gen_im32(1));
2564 static void expand_op_shl_im_cc(qOP
*qop
)
2566 expand_shift_im(qop
, 0, 0);
2569 static void expand_op_shr_im_cc(qOP
*qop
)
2571 expand_shift_im(qop
, 1, 0);
2574 static void expand_op_sar_im_cc(qOP
*qop
)
2576 expand_shift_im(qop
, 1, 1);
2579 /* Expand a shift by register. */
2580 /* ??? This gives incorrect answers for shifts by 0 or >= 32 */
2581 static inline void expand_shift_reg(qOP
*qop
, int right
, int arith
)
2589 shift
= qop
->args
[1];
2590 val
= gen_new_qreg(QMODE_I32
);
2591 gen_op_mov32(val
, reg
);
2592 gen_op_mov32(QREG_CC_DEST
, val
);
2593 gen_op_mov32(QREG_CC_SRC
, shift
);
2594 tmp
= gen_new_qreg(QMODE_I32
);
2597 gen_op_sar32(reg
, val
, shift
);
2599 gen_op_shr32(reg
, val
, shift
);
2601 gen_op_sub32(tmp
, shift
, gen_im32(1));
2603 gen_op_shl32(reg
, val
, shift
);
2604 gen_op_sub32(tmp
, gen_im32(31), shift
);
2606 gen_op_shl32(val
, val
, tmp
);
2607 gen_op_and32(QREG_CC_X
, val
, gen_im32(1));
2610 static void expand_op_shl_cc(qOP
*qop
)
2612 expand_shift_reg(qop
, 0, 0);
2615 static void expand_op_shr_cc(qOP
*qop
)
2617 expand_shift_reg(qop
, 1, 0);
2620 static void expand_op_sar_cc(qOP
*qop
)
2622 expand_shift_reg(qop
, 1, 1);
2625 /* Set the Z flag to (arg0 & arg1) == 0. */
2626 static void expand_op_btest(qOP
*qop
)
2631 l1
= gen_new_label();
2632 tmp
= gen_new_qreg(QMODE_I32
);
2633 gen_op_and32(tmp
, qop
->args
[0], qop
->args
[1]);
2634 gen_op_and32(QREG_CC_DEST
, QREG_CC_DEST
, gen_im32(~(uint32_t)CCF_Z
));
2635 gen_op_jmp_nz32(tmp
, l1
);
2636 gen_op_or32(QREG_CC_DEST
, QREG_CC_DEST
, gen_im32(CCF_Z
));
2640 /* arg0 += arg1 + CC_X */
2641 static void expand_op_addx_cc(qOP
*qop
)
2643 int arg0
= qop
->args
[0];
2644 int arg1
= qop
->args
[1];
2647 gen_op_add32 (arg0
, arg0
, arg1
);
2648 l1
= gen_new_label();
2649 l2
= gen_new_label();
2650 gen_op_jmp_z32(QREG_CC_X
, l1
);
2651 gen_op_add32(arg0
, arg0
, gen_im32(1));
2652 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_ADDX
));
2653 gen_op_set_leu32(QREG_CC_X
, arg0
, arg1
);
2656 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_ADD
));
2657 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
2661 /* arg0 -= arg1 + CC_X */
2662 static void expand_op_subx_cc(qOP
*qop
)
2664 int arg0
= qop
->args
[0];
2665 int arg1
= qop
->args
[1];
2668 l1
= gen_new_label();
2669 l2
= gen_new_label();
2670 gen_op_jmp_z32(QREG_CC_X
, l1
);
2671 gen_op_set_leu32(QREG_CC_X
, arg0
, arg1
);
2672 gen_op_sub32(arg0
, arg0
, gen_im32(1));
2673 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_SUBX
));
2676 gen_op_set_ltu32(QREG_CC_X
, arg0
, arg1
);
2677 gen_op_mov32(QREG_CC_OP
, gen_im32(CC_OP_SUB
));
2679 gen_op_sub32 (arg0
, arg0
, arg1
);
2682 /* Expand target specific ops to generic qops. */
2683 static void expand_target_qops(void)
2689 /* Copy the list of qops, expanding target specific ops as we go. */
2690 qop
= gen_first_qop
;
2691 gen_first_qop
= NULL
;
2692 gen_last_qop
= NULL
;
2693 for (; qop
; qop
= next
) {
2696 if (c
< FIRST_TARGET_OP
) {
2697 qop
->prev
= gen_last_qop
;
2700 gen_last_qop
->next
= qop
;
2702 gen_first_qop
= qop
;
2707 #define DEF(name, nargs, barrier) \
2708 case INDEX_op_##name: \
2709 expand_op_##name(qop); \
2711 #include "qop-target.def"
2714 cpu_abort(NULL
, "Unexpanded target qop");
2719 /* ??? Implement this. */
2721 optimize_flags(void)
2726 /* generate intermediate code for basic block 'tb'. */
2728 gen_intermediate_code_internal(CPUState
*env
, TranslationBlock
*tb
,
2731 DisasContext dc1
, *dc
= &dc1
;
2732 uint16_t *gen_opc_end
;
2734 target_ulong pc_start
;
2738 /* generate intermediate code */
2743 gen_opc_ptr
= gen_opc_buf
;
2744 gen_opc_end
= gen_opc_buf
+ OPC_MAX_SIZE
;
2745 gen_opparam_ptr
= gen_opparam_buf
;
2747 dc
->is_jmp
= DISAS_NEXT
;
2749 dc
->cc_op
= CC_OP_DYNAMIC
;
2750 dc
->singlestep_enabled
= env
->singlestep_enabled
;
2751 dc
->fpcr
= env
->fpcr
;
2752 dc
->user
= (env
->sr
& SR_S
) == 0;
2757 pc_offset
= dc
->pc
- pc_start
;
2758 gen_throws_exception
= NULL
;
2759 if (env
->nb_breakpoints
> 0) {
2760 for(j
= 0; j
< env
->nb_breakpoints
; j
++) {
2761 if (env
->breakpoints
[j
] == dc
->pc
) {
2762 gen_exception(dc
, dc
->pc
, EXCP_DEBUG
);
2763 dc
->is_jmp
= DISAS_JUMP
;
2771 j
= gen_opc_ptr
- gen_opc_buf
;
2775 gen_opc_instr_start
[lj
++] = 0;
2777 gen_opc_pc
[lj
] = dc
->pc
;
2778 gen_opc_instr_start
[lj
] = 1;
2780 last_cc_op
= dc
->cc_op
;
2781 disas_m68k_insn(env
, dc
);
2782 } while (!dc
->is_jmp
&& gen_opc_ptr
< gen_opc_end
&&
2783 !env
->singlestep_enabled
&&
2784 (pc_offset
) < (TARGET_PAGE_SIZE
- 32));
2786 if (__builtin_expect(env
->singlestep_enabled
, 0)) {
2787 /* Make sure the pc is updated, and raise a debug exception. */
2789 gen_flush_cc_op(dc
);
2790 gen_op_mov32(QREG_PC
, gen_im32((long)dc
->pc
));
2792 gen_op_raise_exception(EXCP_DEBUG
);
2794 switch(dc
->is_jmp
) {
2796 gen_flush_cc_op(dc
);
2797 gen_jmp_tb(dc
, 0, dc
->pc
);
2802 gen_flush_cc_op(dc
);
2803 /* indicate that the hash table must be used to find the next TB */
2804 gen_op_mov32(QREG_T0
, gen_im32(0));
2808 /* nothing more to generate */
2812 *gen_opc_ptr
= INDEX_op_end
;
2815 if (loglevel
& CPU_LOG_TB_IN_ASM
) {
2816 fprintf(logfile
, "----------------\n");
2817 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc_start
));
2818 target_disas(logfile
, pc_start
, dc
->pc
- pc_start
, 0);
2819 fprintf(logfile
, "\n");
2820 if (loglevel
& (CPU_LOG_TB_OP
)) {
2821 fprintf(logfile
, "OP:\n");
2822 dump_ops(gen_opc_buf
, gen_opparam_buf
);
2823 fprintf(logfile
, "\n");
2828 j
= gen_opc_ptr
- gen_opc_buf
;
2831 gen_opc_instr_start
[lj
++] = 0;
2834 tb
->size
= dc
->pc
- pc_start
;
2838 //expand_target_qops();
2842 int gen_intermediate_code(CPUState
*env
, TranslationBlock
*tb
)
2844 return gen_intermediate_code_internal(env
, tb
, 0);
2847 int gen_intermediate_code_pc(CPUState
*env
, TranslationBlock
*tb
)
2849 return gen_intermediate_code_internal(env
, tb
, 1);
2852 void cpu_reset(CPUM68KState
*env
)
2854 memset(env
, 0, offsetof(CPUM68KState
, breakpoints
));
2855 #if !defined (CONFIG_USER_ONLY)
2858 /* ??? FP regs should be initialized to NaN. */
2859 env
->cc_op
= CC_OP_FLAGS
;
2860 /* TODO: We should set PC from the interrupt vector. */
2865 CPUM68KState
*cpu_m68k_init(void)
2869 env
= malloc(sizeof(CPUM68KState
));
2878 void cpu_m68k_close(CPUM68KState
*env
)
2883 int cpu_m68k_set_model(CPUM68KState
*env
, const char * name
)
2887 for (def
= m68k_cpu_defs
; def
->name
; def
++) {
2888 if (strcmp(def
->name
, name
) == 0)
2894 register_m68k_insns(def
);
2899 void cpu_dump_state(CPUState
*env
, FILE *f
,
2900 int (*cpu_fprintf
)(FILE *f
, const char *fmt
, ...),
2906 for (i
= 0; i
< 8; i
++)
2908 u
.d
= env
->fregs
[i
];
2909 cpu_fprintf (f
, "D%d = %08x A%d = %08x F%d = %08x%08x (%12g)\n",
2910 i
, env
->dregs
[i
], i
, env
->aregs
[i
],
2911 i
, u
.l
.upper
, u
.l
.lower
, u
.d
);
2913 cpu_fprintf (f
, "PC = %08x ", env
->pc
);
2915 cpu_fprintf (f
, "SR = %04x %c%c%c%c%c ", sr
, (sr
& 0x10) ? 'X' : '-',
2916 (sr
& CCF_N
) ? 'N' : '-', (sr
& CCF_Z
) ? 'Z' : '-',
2917 (sr
& CCF_V
) ? 'V' : '-', (sr
& CCF_C
) ? 'C' : '-');
2918 cpu_fprintf (f
, "FPRESULT = %12g\n", env
->fp_result
);