]> git.proxmox.com Git - mirror_qemu.git/blob - cpu-i386.h
update (test)
[mirror_qemu.git] / cpu-i386.h
1 /*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #ifndef CPU_I386_H
21 #define CPU_I386_H
22
23 #include "config.h"
24 #include <setjmp.h>
25
26 #define R_EAX 0
27 #define R_ECX 1
28 #define R_EDX 2
29 #define R_EBX 3
30 #define R_ESP 4
31 #define R_EBP 5
32 #define R_ESI 6
33 #define R_EDI 7
34
35 #define R_AL 0
36 #define R_CL 1
37 #define R_DL 2
38 #define R_BL 3
39 #define R_AH 4
40 #define R_CH 5
41 #define R_DH 6
42 #define R_BH 7
43
44 #define R_ES 0
45 #define R_CS 1
46 #define R_SS 2
47 #define R_DS 3
48 #define R_FS 4
49 #define R_GS 5
50
51 /* eflags masks */
52 #define CC_C 0x0001
53 #define CC_P 0x0004
54 #define CC_A 0x0010
55 #define CC_Z 0x0040
56 #define CC_S 0x0080
57 #define CC_O 0x0800
58
59 #define TF_MASK 0x00000100
60 #define IF_MASK 0x00000200
61 #define DF_MASK 0x00000400
62 #define IOPL_MASK 0x00003000
63 #define NT_MASK 0x00004000
64 #define RF_MASK 0x00010000
65 #define VM_MASK 0x00020000
66 #define AC_MASK 0x00040000
67 #define VIF_MASK 0x00080000
68 #define VIP_MASK 0x00100000
69 #define ID_MASK 0x00200000
70
71 #define EXCP00_DIVZ 0
72 #define EXCP01_SSTP 1
73 #define EXCP02_NMI 2
74 #define EXCP03_INT3 3
75 #define EXCP04_INTO 4
76 #define EXCP05_BOUND 5
77 #define EXCP06_ILLOP 6
78 #define EXCP07_PREX 7
79 #define EXCP08_DBLE 8
80 #define EXCP09_XERR 9
81 #define EXCP0A_TSS 10
82 #define EXCP0B_NOSEG 11
83 #define EXCP0C_STACK 12
84 #define EXCP0D_GPF 13
85 #define EXCP0E_PAGE 14
86 #define EXCP10_COPR 16
87 #define EXCP11_ALGN 17
88 #define EXCP12_MCHK 18
89
90 #define EXCP_INTERRUPT 256 /* async interruption */
91
92 enum {
93 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
94 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
95 CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
96
97 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
98 CC_OP_ADDW,
99 CC_OP_ADDL,
100
101 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
102 CC_OP_ADCW,
103 CC_OP_ADCL,
104
105 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
106 CC_OP_SUBW,
107 CC_OP_SUBL,
108
109 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
110 CC_OP_SBBW,
111 CC_OP_SBBL,
112
113 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
114 CC_OP_LOGICW,
115 CC_OP_LOGICL,
116
117 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
118 CC_OP_INCW,
119 CC_OP_INCL,
120
121 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
122 CC_OP_DECW,
123 CC_OP_DECL,
124
125 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
126 CC_OP_SHLW,
127 CC_OP_SHLL,
128
129 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
130 CC_OP_SARW,
131 CC_OP_SARL,
132
133 CC_OP_NB,
134 };
135
136 #ifdef __i386__
137 #define USE_X86LDOUBLE
138 #endif
139
140 #ifdef USE_X86LDOUBLE
141 typedef long double CPU86_LDouble;
142 #else
143 typedef double CPU86_LDouble;
144 #endif
145
146 typedef struct SegmentCache {
147 uint8_t *base;
148 unsigned long limit;
149 uint8_t seg_32bit;
150 } SegmentCache;
151
152 typedef struct SegmentDescriptorTable {
153 uint8_t *base;
154 unsigned long limit;
155 /* this is the returned base when reading the register, just to
156 avoid that the emulated program modifies it */
157 unsigned long emu_base;
158 } SegmentDescriptorTable;
159
160 typedef struct CPUX86State {
161 /* standard registers */
162 uint32_t regs[8];
163 uint32_t eip;
164 uint32_t eflags; /* eflags register. During CPU emulation, CC
165 flags and DF are set to zero because they are
166 stored elsewhere */
167
168 /* emulator internal eflags handling */
169 uint32_t cc_src;
170 uint32_t cc_dst;
171 uint32_t cc_op;
172 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
173
174 /* FPU state */
175 unsigned int fpstt; /* top of stack index */
176 unsigned int fpus;
177 unsigned int fpuc;
178 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
179 CPU86_LDouble fpregs[8];
180
181 /* emulator internal variables */
182 CPU86_LDouble ft0;
183 union {
184 float f;
185 double d;
186 int i32;
187 int64_t i64;
188 } fp_convert;
189
190 /* segments */
191 uint32_t segs[6]; /* selector values */
192 SegmentCache seg_cache[6]; /* info taken from LDT/GDT */
193 SegmentDescriptorTable gdt;
194 SegmentDescriptorTable ldt;
195 SegmentDescriptorTable idt;
196
197 /* exception/interrupt handling */
198 jmp_buf jmp_env;
199 int exception_index;
200 int interrupt_request;
201
202 /* user data */
203 void *opaque;
204 } CPUX86State;
205
206 /* all CPU memory access use these macros */
207 static inline int ldub(void *ptr)
208 {
209 return *(uint8_t *)ptr;
210 }
211
212 static inline int ldsb(void *ptr)
213 {
214 return *(int8_t *)ptr;
215 }
216
217 static inline void stb(void *ptr, int v)
218 {
219 *(uint8_t *)ptr = v;
220 }
221
222 #ifdef WORDS_BIGENDIAN
223
224 /* conservative code for little endian unaligned accesses */
225 static inline int lduw(void *ptr)
226 {
227 #ifdef __powerpc__
228 int val;
229 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
230 return val;
231 #else
232 uint8_t *p = ptr;
233 return p[0] | (p[1] << 8);
234 #endif
235 }
236
237 static inline int ldsw(void *ptr)
238 {
239 #ifdef __powerpc__
240 int val;
241 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
242 return (int16_t)val;
243 #else
244 uint8_t *p = ptr;
245 return (int16_t)(p[0] | (p[1] << 8));
246 #endif
247 }
248
249 static inline int ldl(void *ptr)
250 {
251 #ifdef __powerpc__
252 int val;
253 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
254 return val;
255 #else
256 uint8_t *p = ptr;
257 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
258 #endif
259 }
260
261 static inline uint64_t ldq(void *ptr)
262 {
263 uint8_t *p = ptr;
264 uint32_t v1, v2;
265 v1 = ldl(p);
266 v2 = ldl(p + 4);
267 return v1 | ((uint64_t)v2 << 32);
268 }
269
270 static inline void stw(void *ptr, int v)
271 {
272 #ifdef __powerpc__
273 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
274 #else
275 uint8_t *p = ptr;
276 p[0] = v;
277 p[1] = v >> 8;
278 #endif
279 }
280
281 static inline void stl(void *ptr, int v)
282 {
283 #ifdef __powerpc__
284 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
285 #else
286 uint8_t *p = ptr;
287 p[0] = v;
288 p[1] = v >> 8;
289 p[2] = v >> 16;
290 p[3] = v >> 24;
291 #endif
292 }
293
294 static inline void stq(void *ptr, uint64_t v)
295 {
296 uint8_t *p = ptr;
297 stl(p, (uint32_t)v);
298 stl(p + 4, v >> 32);
299 }
300
301 /* float access */
302
303 static inline float ldfl(void *ptr)
304 {
305 union {
306 float f;
307 uint32_t i;
308 } u;
309 u.i = ldl(ptr);
310 return u.f;
311 }
312
313 static inline double ldfq(void *ptr)
314 {
315 union {
316 double d;
317 uint64_t i;
318 } u;
319 u.i = ldq(ptr);
320 return u.d;
321 }
322
323 static inline void stfl(void *ptr, float v)
324 {
325 union {
326 float f;
327 uint32_t i;
328 } u;
329 u.f = v;
330 stl(ptr, u.i);
331 }
332
333 static inline void stfq(void *ptr, double v)
334 {
335 union {
336 double d;
337 uint64_t i;
338 } u;
339 u.d = v;
340 stq(ptr, u.i);
341 }
342
343 #else
344
345 static inline int lduw(void *ptr)
346 {
347 return *(uint16_t *)ptr;
348 }
349
350 static inline int ldsw(void *ptr)
351 {
352 return *(int16_t *)ptr;
353 }
354
355 static inline int ldl(void *ptr)
356 {
357 return *(uint32_t *)ptr;
358 }
359
360 static inline uint64_t ldq(void *ptr)
361 {
362 return *(uint64_t *)ptr;
363 }
364
365 static inline void stw(void *ptr, int v)
366 {
367 *(uint16_t *)ptr = v;
368 }
369
370 static inline void stl(void *ptr, int v)
371 {
372 *(uint32_t *)ptr = v;
373 }
374
375 static inline void stq(void *ptr, uint64_t v)
376 {
377 *(uint64_t *)ptr = v;
378 }
379
380 /* float access */
381
382 static inline float ldfl(void *ptr)
383 {
384 return *(float *)ptr;
385 }
386
387 static inline double ldfq(void *ptr)
388 {
389 return *(double *)ptr;
390 }
391
392 static inline void stfl(void *ptr, float v)
393 {
394 *(float *)ptr = v;
395 }
396
397 static inline void stfq(void *ptr, double v)
398 {
399 *(double *)ptr = v;
400 }
401 #endif
402
403 #ifndef IN_OP_I386
404 void cpu_x86_outb(int addr, int val);
405 void cpu_x86_outw(int addr, int val);
406 void cpu_x86_outl(int addr, int val);
407 int cpu_x86_inb(int addr);
408 int cpu_x86_inw(int addr);
409 int cpu_x86_inl(int addr);
410 #endif
411
412 CPUX86State *cpu_x86_init(void);
413 int cpu_x86_exec(CPUX86State *s);
414 void cpu_x86_interrupt(CPUX86State *s);
415 void cpu_x86_close(CPUX86State *s);
416
417 /* needed to load some predefinied segment registers */
418 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
419
420 /* you can call this signal handler from your SIGBUS and SIGSEGV
421 signal handlers to inform the virtual CPU of exceptions. non zero
422 is returned if the signal was handled by the virtual CPU. */
423 struct siginfo;
424 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
425 void *puc);
426
427 /* internal functions */
428
429 #define GEN_FLAG_CODE32_SHIFT 0
430 #define GEN_FLAG_ADDSEG_SHIFT 1
431 #define GEN_FLAG_SS32_SHIFT 2
432 #define GEN_FLAG_VM_SHIFT 3
433 #define GEN_FLAG_ST_SHIFT 4
434
435 int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
436 int *gen_code_size_ptr,
437 uint8_t *pc_start, uint8_t *cs_base, int flags);
438 void cpu_x86_tblocks_init(void);
439
440 #endif /* CPU_I386_H */