]> git.proxmox.com Git - qemu.git/blob - cpu-i386.h
dbe18519bc6fdc11bef622a589867de01be602f1
[qemu.git] / cpu-i386.h
1 /* NOTE: this header is included in op-i386.c where global register
2 variable are used. Care must be used when including glibc headers.
3 */
4 #ifndef CPU_I386_H
5 #define CPU_I386_H
6
7 #include "config.h"
8 #include <setjmp.h>
9
10 #define R_EAX 0
11 #define R_ECX 1
12 #define R_EDX 2
13 #define R_EBX 3
14 #define R_ESP 4
15 #define R_EBP 5
16 #define R_ESI 6
17 #define R_EDI 7
18
19 #define R_AL 0
20 #define R_CL 1
21 #define R_DL 2
22 #define R_BL 3
23 #define R_AH 4
24 #define R_CH 5
25 #define R_DH 6
26 #define R_BH 7
27
28 #define R_ES 0
29 #define R_CS 1
30 #define R_SS 2
31 #define R_DS 3
32 #define R_FS 4
33 #define R_GS 5
34
35 #define CC_C 0x0001
36 #define CC_P 0x0004
37 #define CC_A 0x0010
38 #define CC_Z 0x0040
39 #define CC_S 0x0080
40 #define CC_O 0x0800
41
42 #define TRAP_FLAG 0x0100
43 #define INTERRUPT_FLAG 0x0200
44 #define DIRECTION_FLAG 0x0400
45 #define IOPL_FLAG_MASK 0x3000
46 #define NESTED_FLAG 0x4000
47 #define BYTE_FL 0x8000 /* Intel reserved! */
48 #define RF_FLAG 0x10000
49 #define VM_FLAG 0x20000
50 /* AC 0x40000 */
51
52 #define EXCP00_DIVZ 1
53 #define EXCP01_SSTP 2
54 #define EXCP02_NMI 3
55 #define EXCP03_INT3 4
56 #define EXCP04_INTO 5
57 #define EXCP05_BOUND 6
58 #define EXCP06_ILLOP 7
59 #define EXCP07_PREX 8
60 #define EXCP08_DBLE 9
61 #define EXCP09_XERR 10
62 #define EXCP0A_TSS 11
63 #define EXCP0B_NOSEG 12
64 #define EXCP0C_STACK 13
65 #define EXCP0D_GPF 14
66 #define EXCP0E_PAGE 15
67 #define EXCP10_COPR 17
68 #define EXCP11_ALGN 18
69 #define EXCP12_MCHK 19
70
71 #define EXCP_INTERRUPT 256 /* async interruption */
72
73 enum {
74 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
75 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
76 CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
77
78 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
79 CC_OP_ADDW,
80 CC_OP_ADDL,
81
82 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
83 CC_OP_ADCW,
84 CC_OP_ADCL,
85
86 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
87 CC_OP_SUBW,
88 CC_OP_SUBL,
89
90 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
91 CC_OP_SBBW,
92 CC_OP_SBBL,
93
94 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
95 CC_OP_LOGICW,
96 CC_OP_LOGICL,
97
98 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
99 CC_OP_INCW,
100 CC_OP_INCL,
101
102 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
103 CC_OP_DECW,
104 CC_OP_DECL,
105
106 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
107 CC_OP_SHLW,
108 CC_OP_SHLL,
109
110 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
111 CC_OP_SARW,
112 CC_OP_SARL,
113
114 CC_OP_NB,
115 };
116
117 #ifdef __i386__
118 #define USE_X86LDOUBLE
119 #endif
120
121 #ifdef USE_X86LDOUBLE
122 typedef long double CPU86_LDouble;
123 #else
124 typedef double CPU86_LDouble;
125 #endif
126
127 typedef struct SegmentCache {
128 uint8_t *base;
129 unsigned long limit;
130 uint8_t seg_32bit;
131 } SegmentCache;
132
133 typedef struct SegmentDescriptorTable {
134 uint8_t *base;
135 unsigned long limit;
136 /* this is the returned base when reading the register, just to
137 avoid that the emulated program modifies it */
138 unsigned long emu_base;
139 } SegmentDescriptorTable;
140
141 typedef struct CPUX86State {
142 /* standard registers */
143 uint32_t regs[8];
144 uint32_t eip;
145 uint32_t eflags;
146
147 /* emulator internal eflags handling */
148 uint32_t cc_src;
149 uint32_t cc_dst;
150 uint32_t cc_op;
151 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
152
153 /* FPU state */
154 unsigned int fpstt; /* top of stack index */
155 unsigned int fpus;
156 unsigned int fpuc;
157 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
158 CPU86_LDouble fpregs[8];
159
160 /* emulator internal variables */
161 CPU86_LDouble ft0;
162
163 /* segments */
164 uint32_t segs[6]; /* selector values */
165 SegmentCache seg_cache[6]; /* info taken from LDT/GDT */
166 SegmentDescriptorTable gdt;
167 SegmentDescriptorTable ldt;
168 SegmentDescriptorTable idt;
169
170 /* various CPU modes */
171 int vm86;
172
173 /* exception/interrupt handling */
174 jmp_buf jmp_env;
175 int exception_index;
176 int interrupt_request;
177 } CPUX86State;
178
179 /* all CPU memory access use these macros */
180 static inline int ldub(void *ptr)
181 {
182 return *(uint8_t *)ptr;
183 }
184
185 static inline int ldsb(void *ptr)
186 {
187 return *(int8_t *)ptr;
188 }
189
190 static inline void stb(void *ptr, int v)
191 {
192 *(uint8_t *)ptr = v;
193 }
194
195 #ifdef WORDS_BIGENDIAN
196
197 /* conservative code for little endian unaligned accesses */
198 static inline int lduw(void *ptr)
199 {
200 #ifdef __powerpc__
201 int val;
202 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
203 return val;
204 #else
205 uint8_t *p = ptr;
206 return p[0] | (p[1] << 8);
207 #endif
208 }
209
210 static inline int ldsw(void *ptr)
211 {
212 #ifdef __powerpc__
213 int val;
214 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
215 return (int16_t)val;
216 #else
217 uint8_t *p = ptr;
218 return (int16_t)(p[0] | (p[1] << 8));
219 #endif
220 }
221
222 static inline int ldl(void *ptr)
223 {
224 #ifdef __powerpc__
225 int val;
226 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
227 return val;
228 #else
229 uint8_t *p = ptr;
230 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
231 #endif
232 }
233
234 static inline uint64_t ldq(void *ptr)
235 {
236 uint8_t *p = ptr;
237 uint32_t v1, v2;
238 v1 = ldl(p);
239 v2 = ldl(p + 4);
240 return v1 | ((uint64_t)v2 << 32);
241 }
242
243 static inline void stw(void *ptr, int v)
244 {
245 #ifdef __powerpc__
246 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
247 #else
248 uint8_t *p = ptr;
249 p[0] = v;
250 p[1] = v >> 8;
251 #endif
252 }
253
254 static inline void stl(void *ptr, int v)
255 {
256 #ifdef __powerpc__
257 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
258 #else
259 uint8_t *p = ptr;
260 p[0] = v;
261 p[1] = v >> 8;
262 p[2] = v >> 16;
263 p[3] = v >> 24;
264 #endif
265 }
266
267 static inline void stq(void *ptr, uint64_t v)
268 {
269 uint8_t *p = ptr;
270 stl(p, (uint32_t)v);
271 stl(p + 4, v >> 32);
272 }
273
274 /* float access */
275
276 static inline float ldfl(void *ptr)
277 {
278 union {
279 float f;
280 uint32_t i;
281 } u;
282 u.i = ldl(ptr);
283 return u.f;
284 }
285
286 static inline double ldfq(void *ptr)
287 {
288 union {
289 double d;
290 uint64_t i;
291 } u;
292 u.i = ldq(ptr);
293 return u.d;
294 }
295
296 static inline void stfl(void *ptr, float v)
297 {
298 union {
299 float f;
300 uint32_t i;
301 } u;
302 u.f = v;
303 stl(ptr, u.i);
304 }
305
306 static inline void stfq(void *ptr, double v)
307 {
308 union {
309 double d;
310 uint64_t i;
311 } u;
312 u.d = v;
313 stq(ptr, u.i);
314 }
315
316 #else
317
318 static inline int lduw(void *ptr)
319 {
320 return *(uint16_t *)ptr;
321 }
322
323 static inline int ldsw(void *ptr)
324 {
325 return *(int16_t *)ptr;
326 }
327
328 static inline int ldl(void *ptr)
329 {
330 return *(uint32_t *)ptr;
331 }
332
333 static inline uint64_t ldq(void *ptr)
334 {
335 return *(uint64_t *)ptr;
336 }
337
338 static inline void stw(void *ptr, int v)
339 {
340 *(uint16_t *)ptr = v;
341 }
342
343 static inline void stl(void *ptr, int v)
344 {
345 *(uint32_t *)ptr = v;
346 }
347
348 static inline void stq(void *ptr, uint64_t v)
349 {
350 *(uint64_t *)ptr = v;
351 }
352
353 /* float access */
354
355 static inline float ldfl(void *ptr)
356 {
357 return *(float *)ptr;
358 }
359
360 static inline double ldfq(void *ptr)
361 {
362 return *(double *)ptr;
363 }
364
365 static inline void stfl(void *ptr, float v)
366 {
367 *(float *)ptr = v;
368 }
369
370 static inline void stfq(void *ptr, double v)
371 {
372 *(double *)ptr = v;
373 }
374 #endif
375
376 #ifndef IN_OP_I386
377 void cpu_x86_outb(int addr, int val);
378 void cpu_x86_outw(int addr, int val);
379 void cpu_x86_outl(int addr, int val);
380 int cpu_x86_inb(int addr);
381 int cpu_x86_inw(int addr);
382 int cpu_x86_inl(int addr);
383 #endif
384
385 CPUX86State *cpu_x86_init(void);
386 int cpu_x86_exec(CPUX86State *s);
387 void cpu_x86_interrupt(CPUX86State *s);
388 void cpu_x86_close(CPUX86State *s);
389
390 /* needed to load some predefinied segment registers */
391 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
392
393 /* you can call these signal handler from you SIGBUS and SIGSEGV
394 signal handlers to inform the virtual CPU of exceptions. non zero
395 is returned if the signal was handled by the virtual CPU. */
396 struct siginfo;
397 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
398 void *puc);
399
400 /* internal functions */
401
402 #define GEN_FLAG_CODE32_SHIFT 0
403 #define GEN_FLAG_ADDSEG_SHIFT 1
404 #define GEN_FLAG_SS32_SHIFT 2
405 #define GEN_FLAG_ST_SHIFT 3
406
407 int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
408 int *gen_code_size_ptr,
409 uint8_t *pc_start, uint8_t *cs_base, int flags);
410 void cpu_x86_tblocks_init(void);
411
412 #endif /* CPU_I386_H */