2 * i386 virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
59 #define TF_MASK 0x00000100
60 #define IF_MASK 0x00000200
61 #define DF_MASK 0x00000400
62 #define IOPL_MASK 0x00003000
63 #define NT_MASK 0x00004000
64 #define RF_MASK 0x00010000
65 #define VM_MASK 0x00020000
66 #define AC_MASK 0x00040000
67 #define VIF_MASK 0x00080000
68 #define VIP_MASK 0x00100000
69 #define ID_MASK 0x00200000
76 #define EXCP05_BOUND 5
77 #define EXCP06_ILLOP 6
82 #define EXCP0B_NOSEG 11
83 #define EXCP0C_STACK 12
85 #define EXCP0E_PAGE 14
86 #define EXCP10_COPR 16
87 #define EXCP11_ALGN 17
88 #define EXCP12_MCHK 18
90 #define EXCP_INTERRUPT 256 /* async interruption */
93 CC_OP_DYNAMIC
, /* must use dynamic code to get cc_op */
94 CC_OP_EFLAGS
, /* all cc are explicitely computed, CC_SRC = flags */
95 CC_OP_MUL
, /* modify all flags, C, O = (CC_SRC != 0) */
97 CC_OP_ADDB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
101 CC_OP_ADCB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
105 CC_OP_SUBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
109 CC_OP_SBBB
, /* modify all flags, CC_DST = res, CC_SRC = src1 */
113 CC_OP_LOGICB
, /* modify all flags, CC_DST = res */
117 CC_OP_INCB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
121 CC_OP_DECB
, /* modify all flags except, CC_DST = res, CC_SRC = C */
125 CC_OP_SHLB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
129 CC_OP_SARB
, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
137 #define USE_X86LDOUBLE
140 #ifdef USE_X86LDOUBLE
141 typedef long double CPU86_LDouble
;
143 typedef double CPU86_LDouble
;
146 typedef struct SegmentCache
{
152 typedef struct SegmentDescriptorTable
{
155 /* this is the returned base when reading the register, just to
156 avoid that the emulated program modifies it */
157 unsigned long emu_base
;
158 } SegmentDescriptorTable
;
160 typedef struct CPUX86State
{
161 /* standard registers */
164 uint32_t eflags
; /* eflags register. During CPU emulation, CC
165 flags and DF are set to zero because they are
168 /* emulator internal eflags handling */
172 int32_t df
; /* D flag : 1 if D = 0, -1 if D = 1 */
175 unsigned int fpstt
; /* top of stack index */
178 uint8_t fptags
[8]; /* 0 = valid, 1 = empty */
179 CPU86_LDouble fpregs
[8];
181 /* emulator internal variables */
185 uint32_t segs
[6]; /* selector values */
186 SegmentCache seg_cache
[6]; /* info taken from LDT/GDT */
187 SegmentDescriptorTable gdt
;
188 SegmentDescriptorTable ldt
;
189 SegmentDescriptorTable idt
;
191 /* exception/interrupt handling */
194 int interrupt_request
;
200 /* all CPU memory access use these macros */
201 static inline int ldub(void *ptr
)
203 return *(uint8_t *)ptr
;
206 static inline int ldsb(void *ptr
)
208 return *(int8_t *)ptr
;
211 static inline void stb(void *ptr
, int v
)
216 #ifdef WORDS_BIGENDIAN
218 /* conservative code for little endian unaligned accesses */
219 static inline int lduw(void *ptr
)
223 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
227 return p
[0] | (p
[1] << 8);
231 static inline int ldsw(void *ptr
)
235 __asm__
__volatile__ ("lhbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
239 return (int16_t)(p
[0] | (p
[1] << 8));
243 static inline int ldl(void *ptr
)
247 __asm__
__volatile__ ("lwbrx %0,0,%1" : "=r" (val
) : "r" (ptr
));
251 return p
[0] | (p
[1] << 8) | (p
[2] << 16) | (p
[3] << 24);
255 static inline uint64_t ldq(void *ptr
)
261 return v1
| ((uint64_t)v2
<< 32);
264 static inline void stw(void *ptr
, int v
)
267 __asm__
__volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr
) : "r" (v
), "r" (ptr
));
275 static inline void stl(void *ptr
, int v
)
278 __asm__
__volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr
) : "r" (v
), "r" (ptr
));
288 static inline void stq(void *ptr
, uint64_t v
)
297 static inline float ldfl(void *ptr
)
307 static inline double ldfq(void *ptr
)
317 static inline void stfl(void *ptr
, float v
)
327 static inline void stfq(void *ptr
, double v
)
339 static inline int lduw(void *ptr
)
341 return *(uint16_t *)ptr
;
344 static inline int ldsw(void *ptr
)
346 return *(int16_t *)ptr
;
349 static inline int ldl(void *ptr
)
351 return *(uint32_t *)ptr
;
354 static inline uint64_t ldq(void *ptr
)
356 return *(uint64_t *)ptr
;
359 static inline void stw(void *ptr
, int v
)
361 *(uint16_t *)ptr
= v
;
364 static inline void stl(void *ptr
, int v
)
366 *(uint32_t *)ptr
= v
;
369 static inline void stq(void *ptr
, uint64_t v
)
371 *(uint64_t *)ptr
= v
;
376 static inline float ldfl(void *ptr
)
378 return *(float *)ptr
;
381 static inline double ldfq(void *ptr
)
383 return *(double *)ptr
;
386 static inline void stfl(void *ptr
, float v
)
391 static inline void stfq(void *ptr
, double v
)
398 void cpu_x86_outb(int addr
, int val
);
399 void cpu_x86_outw(int addr
, int val
);
400 void cpu_x86_outl(int addr
, int val
);
401 int cpu_x86_inb(int addr
);
402 int cpu_x86_inw(int addr
);
403 int cpu_x86_inl(int addr
);
406 CPUX86State
*cpu_x86_init(void);
407 int cpu_x86_exec(CPUX86State
*s
);
408 void cpu_x86_interrupt(CPUX86State
*s
);
409 void cpu_x86_close(CPUX86State
*s
);
411 /* needed to load some predefinied segment registers */
412 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
);
414 /* you can call this signal handler from your SIGBUS and SIGSEGV
415 signal handlers to inform the virtual CPU of exceptions. non zero
416 is returned if the signal was handled by the virtual CPU. */
418 int cpu_x86_signal_handler(int host_signum
, struct siginfo
*info
,
421 /* internal functions */
423 #define GEN_FLAG_CODE32_SHIFT 0
424 #define GEN_FLAG_ADDSEG_SHIFT 1
425 #define GEN_FLAG_SS32_SHIFT 2
426 #define GEN_FLAG_VM_SHIFT 3
427 #define GEN_FLAG_ST_SHIFT 4
429 int cpu_x86_gen_code(uint8_t *gen_code_buf
, int max_code_size
,
430 int *gen_code_size_ptr
,
431 uint8_t *pc_start
, uint8_t *cs_base
, int flags
);
432 void cpu_x86_tblocks_init(void);
434 #endif /* CPU_I386_H */