2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
59 #define TF_MASK 0x00000100
60 #define IF_MASK 0x00000200
61 #define DF_MASK 0x00000400
62 #define IOPL_MASK 0x00003000
63 #define NT_MASK 0x00004000
64 #define RF_MASK 0x00010000
65 #define VM_MASK 0x00020000
66 #define AC_MASK 0x00040000
67 #define VIF_MASK 0x00080000
68 #define VIP_MASK 0x00100000
69 #define ID_MASK 0x00200000
76 #define EXCP05_BOUND 5
77 #define EXCP06_ILLOP 6
82 #define EXCP0B_NOSEG 11
83 #define EXCP0C_STACK 12
85 #define EXCP0E_PAGE 14
86 #define EXCP10_COPR 16
87 #define EXCP11_ALGN 17
88 #define EXCP12_MCHK 18
90 #define EXCP_INTERRUPT 256 /* async interruption */
93 CC_OP_DYNAMIC
, /* must use dynamic code to get cc_op */
94 CC_OP_EFLAGS
, /* all cc are explicitely computed, CC_SRC = flags */
95 CC_OP_MUL
, /* modify all flags, C, O = (CC_SRC != 0) */
97 CC_OP_ADDB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
101 CC_OP_ADCB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
105 CC_OP_SUBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
109 CC_OP_SBBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
113 CC_OP_LOGICB
, /* modify all flags, CC_DST = res */
117 CC_OP_INCB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
121 CC_OP_DECB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
125 CC_OP_SHLB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
129 CC_OP_SARB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
137 #define USE_X86LDOUBLE
140 #ifdef USE_X86LDOUBLE
141 typedef long double CPU86_LDouble
;
143 typedef double CPU86_LDouble
;
146 typedef struct SegmentCache
{
152 typedef struct SegmentDescriptorTable
{
155 /* this is the returned base when reading the register, just to
156 avoid that the emulated program modifies it */
157 unsigned long emu_base
;
158 } SegmentDescriptorTable
;
160 typedef struct CPUX86State
{
161 /* standard registers */
164 uint32_t eflags
; /* eflags register. During CPU emulation, CC
165 flags and DF are set to zero because they are
168 /* emulator internal eflags handling */
172 int32_t df
; /* D flag : 1 if D = 0, -1 if D = 1 */
175 unsigned int fpstt
; /* top of stack index */
178 uint8_t fptags
[8]; /* 0 = valid, 1 = empty */
179 CPU86_LDouble fpregs
[8];
181 /* emulator internal variables */
191 uint32_t segs
[6]; /* selector values */
192 SegmentCache seg_cache
[6]; /* info taken from LDT/GDT */
193 SegmentDescriptorTable gdt
;
194 SegmentDescriptorTable ldt
;
195 SegmentDescriptorTable idt
;
197 /* exception/interrupt handling */
200 int interrupt_request
;
206 /* all CPU memory access use these macros */
207 static inline int ldub(void *ptr
)
209 return *(uint8_t *)ptr
;
212 static inline int ldsb(void *ptr
)
214 return *(int8_t *)ptr
;
217 static inline void stb(void *ptr
, int v
)
222 #ifdef WORDS_BIGENDIAN
224 /* conservative code for little endian unaligned accesses */
225 static inline int lduw(void *ptr
)
229 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
233 return p
[0] | (p
[1] << 8);
237 static inline int ldsw(void *ptr
)
241 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
245 return (int16_t)(p
[0] | (p
[1] << 8));
249 static inline int ldl(void *ptr
)
253 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
257 return p
[0] | (p
[1] << 8) | (p
[2] << 16) | (p
[3] << 24);
261 static inline uint64_t ldq(void *ptr
)
267 return v1
| ((uint64_t)v2
<< 32);
270 static inline void stw(void *ptr
, int v
)
273 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr
) : "r" (v
), "r" (ptr
));
281 static inline void stl(void *ptr
, int v
)
284 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr
) : "r" (v
), "r" (ptr
));
294 static inline void stq(void *ptr
, uint64_t v
)
303 static inline float ldfl(void *ptr
)
313 static inline double ldfq(void *ptr
)
323 static inline void stfl(void *ptr
, float v
)
333 static inline void stfq(void *ptr
, double v
)
345 static inline int lduw(void *ptr
)
347 return *(uint16_t *)ptr
;
350 static inline int ldsw(void *ptr
)
352 return *(int16_t *)ptr
;
355 static inline int ldl(void *ptr
)
357 return *(uint32_t *)ptr
;
360 static inline uint64_t ldq(void *ptr
)
362 return *(uint64_t *)ptr
;
365 static inline void stw(void *ptr
, int v
)
367 *(uint16_t *)ptr
= v
;
370 static inline void stl(void *ptr
, int v
)
372 *(uint32_t *)ptr
= v
;
375 static inline void stq(void *ptr
, uint64_t v
)
377 *(uint64_t *)ptr
= v
;
382 static inline float ldfl(void *ptr
)
384 return *(float *)ptr
;
387 static inline double ldfq(void *ptr
)
389 return *(double *)ptr
;
392 static inline void stfl(void *ptr
, float v
)
397 static inline void stfq(void *ptr
, double v
)
404 void cpu_x86_outb(int addr
, int val
);
405 void cpu_x86_outw(int addr
, int val
);
406 void cpu_x86_outl(int addr
, int val
);
407 int cpu_x86_inb(int addr
);
408 int cpu_x86_inw(int addr
);
409 int cpu_x86_inl(int addr
);
412 CPUX86State
*cpu_x86_init(void);
413 int cpu_x86_exec(CPUX86State
*s
);
414 void cpu_x86_interrupt(CPUX86State
*s
);
415 void cpu_x86_close(CPUX86State
*s
);
417 /* needed to load some predefinied segment registers */
418 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
);
420 /* you can call this signal handler from your SIGBUS and SIGSEGV
421 signal handlers to inform the virtual CPU of exceptions. non zero
422 is returned if the signal was handled by the virtual CPU. */
424 int cpu_x86_signal_handler(int host_signum
, struct siginfo
*info
,
427 /* internal functions */
429 #define GEN_FLAG_CODE32_SHIFT 0
430 #define GEN_FLAG_ADDSEG_SHIFT 1
431 #define GEN_FLAG_SS32_SHIFT 2
432 #define GEN_FLAG_VM_SHIFT 3
433 #define GEN_FLAG_ST_SHIFT 4
435 int cpu_x86_gen_code(uint8_t *gen_code_buf
, int max_code_size
,
436 int *gen_code_size_ptr
,
437 uint8_t *pc_start
, uint8_t *cs_base
, int flags
);
438 void cpu_x86_tblocks_init(void);
440 #endif /* CPU_I386_H */