]> git.proxmox.com Git - qemu.git/blob - cpu-i386.h
added getrusage
[qemu.git] / cpu-i386.h
1 /*
2 * i386 virtual CPU header
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #ifndef CPU_I386_H
21 #define CPU_I386_H
22
23 #include "config.h"
24 #include <setjmp.h>
25
26 #define R_EAX 0
27 #define R_ECX 1
28 #define R_EDX 2
29 #define R_EBX 3
30 #define R_ESP 4
31 #define R_EBP 5
32 #define R_ESI 6
33 #define R_EDI 7
34
35 #define R_AL 0
36 #define R_CL 1
37 #define R_DL 2
38 #define R_BL 3
39 #define R_AH 4
40 #define R_CH 5
41 #define R_DH 6
42 #define R_BH 7
43
44 #define R_ES 0
45 #define R_CS 1
46 #define R_SS 2
47 #define R_DS 3
48 #define R_FS 4
49 #define R_GS 5
50
51 /* eflags masks */
52 #define CC_C 0x0001
53 #define CC_P 0x0004
54 #define CC_A 0x0010
55 #define CC_Z 0x0040
56 #define CC_S 0x0080
57 #define CC_O 0x0800
58
59 #define TF_MASK 0x00000100
60 #define IF_MASK 0x00000200
61 #define DF_MASK 0x00000400
62 #define IOPL_MASK 0x00003000
63 #define NT_MASK 0x00004000
64 #define RF_MASK 0x00010000
65 #define VM_MASK 0x00020000
66 #define AC_MASK 0x00040000
67 #define VIF_MASK 0x00080000
68 #define VIP_MASK 0x00100000
69 #define ID_MASK 0x00200000
70
71 #define EXCP00_DIVZ 0
72 #define EXCP01_SSTP 1
73 #define EXCP02_NMI 2
74 #define EXCP03_INT3 3
75 #define EXCP04_INTO 4
76 #define EXCP05_BOUND 5
77 #define EXCP06_ILLOP 6
78 #define EXCP07_PREX 7
79 #define EXCP08_DBLE 8
80 #define EXCP09_XERR 9
81 #define EXCP0A_TSS 10
82 #define EXCP0B_NOSEG 11
83 #define EXCP0C_STACK 12
84 #define EXCP0D_GPF 13
85 #define EXCP0E_PAGE 14
86 #define EXCP10_COPR 16
87 #define EXCP11_ALGN 17
88 #define EXCP12_MCHK 18
89
90 #define EXCP_INTERRUPT 256 /* async interruption */
91
92 enum {
93 CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */
94 CC_OP_EFLAGS, /* all cc are explicitely computed, CC_SRC = flags */
95 CC_OP_MUL, /* modify all flags, C, O = (CC_SRC != 0) */
96
97 CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
98 CC_OP_ADDW,
99 CC_OP_ADDL,
100
101 CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
102 CC_OP_ADCW,
103 CC_OP_ADCL,
104
105 CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
106 CC_OP_SUBW,
107 CC_OP_SUBL,
108
109 CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */
110 CC_OP_SBBW,
111 CC_OP_SBBL,
112
113 CC_OP_LOGICB, /* modify all flags, CC_DST = res */
114 CC_OP_LOGICW,
115 CC_OP_LOGICL,
116
117 CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */
118 CC_OP_INCW,
119 CC_OP_INCL,
120
121 CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */
122 CC_OP_DECW,
123 CC_OP_DECL,
124
125 CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
126 CC_OP_SHLW,
127 CC_OP_SHLL,
128
129 CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */
130 CC_OP_SARW,
131 CC_OP_SARL,
132
133 CC_OP_NB,
134 };
135
136 #ifdef __i386__
137 #define USE_X86LDOUBLE
138 #endif
139
140 #ifdef USE_X86LDOUBLE
141 typedef long double CPU86_LDouble;
142 #else
143 typedef double CPU86_LDouble;
144 #endif
145
146 typedef struct SegmentCache {
147 uint8_t *base;
148 unsigned long limit;
149 uint8_t seg_32bit;
150 } SegmentCache;
151
152 typedef struct SegmentDescriptorTable {
153 uint8_t *base;
154 unsigned long limit;
155 /* this is the returned base when reading the register, just to
156 avoid that the emulated program modifies it */
157 unsigned long emu_base;
158 } SegmentDescriptorTable;
159
160 typedef struct CPUX86State {
161 /* standard registers */
162 uint32_t regs[8];
163 uint32_t eip;
164 uint32_t eflags; /* eflags register. During CPU emulation, CC
165 flags and DF are set to zero because they are
166 stored elsewhere */
167
168 /* emulator internal eflags handling */
169 uint32_t cc_src;
170 uint32_t cc_dst;
171 uint32_t cc_op;
172 int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */
173
174 /* FPU state */
175 unsigned int fpstt; /* top of stack index */
176 unsigned int fpus;
177 unsigned int fpuc;
178 uint8_t fptags[8]; /* 0 = valid, 1 = empty */
179 CPU86_LDouble fpregs[8];
180
181 /* emulator internal variables */
182 CPU86_LDouble ft0;
183 union {
184 float f;
185 double d;
186 int i32;
187 int64_t i64;
188 } fp_convert;
189
190 /* segments */
191 uint32_t segs[6]; /* selector values */
192 SegmentCache seg_cache[6]; /* info taken from LDT/GDT */
193 SegmentDescriptorTable gdt;
194 SegmentDescriptorTable ldt;
195 SegmentDescriptorTable idt;
196
197 /* exception/interrupt handling */
198 jmp_buf jmp_env;
199 int exception_index;
200 int error_code;
201 uint32_t cr2;
202 int interrupt_request;
203
204 /* user data */
205 void *opaque;
206 } CPUX86State;
207
208 /* all CPU memory access use these macros */
209 static inline int ldub(void *ptr)
210 {
211 return *(uint8_t *)ptr;
212 }
213
214 static inline int ldsb(void *ptr)
215 {
216 return *(int8_t *)ptr;
217 }
218
219 static inline void stb(void *ptr, int v)
220 {
221 *(uint8_t *)ptr = v;
222 }
223
224 #ifdef WORDS_BIGENDIAN
225
226 /* conservative code for little endian unaligned accesses */
227 static inline int lduw(void *ptr)
228 {
229 #ifdef __powerpc__
230 int val;
231 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
232 return val;
233 #else
234 uint8_t *p = ptr;
235 return p[0] | (p[1] << 8);
236 #endif
237 }
238
239 static inline int ldsw(void *ptr)
240 {
241 #ifdef __powerpc__
242 int val;
243 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
244 return (int16_t)val;
245 #else
246 uint8_t *p = ptr;
247 return (int16_t)(p[0] | (p[1] << 8));
248 #endif
249 }
250
251 static inline int ldl(void *ptr)
252 {
253 #ifdef __powerpc__
254 int val;
255 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
256 return val;
257 #else
258 uint8_t *p = ptr;
259 return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
260 #endif
261 }
262
263 static inline uint64_t ldq(void *ptr)
264 {
265 uint8_t *p = ptr;
266 uint32_t v1, v2;
267 v1 = ldl(p);
268 v2 = ldl(p + 4);
269 return v1 | ((uint64_t)v2 << 32);
270 }
271
272 static inline void stw(void *ptr, int v)
273 {
274 #ifdef __powerpc__
275 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
276 #else
277 uint8_t *p = ptr;
278 p[0] = v;
279 p[1] = v >> 8;
280 #endif
281 }
282
283 static inline void stl(void *ptr, int v)
284 {
285 #ifdef __powerpc__
286 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
287 #else
288 uint8_t *p = ptr;
289 p[0] = v;
290 p[1] = v >> 8;
291 p[2] = v >> 16;
292 p[3] = v >> 24;
293 #endif
294 }
295
296 static inline void stq(void *ptr, uint64_t v)
297 {
298 uint8_t *p = ptr;
299 stl(p, (uint32_t)v);
300 stl(p + 4, v >> 32);
301 }
302
303 /* float access */
304
305 static inline float ldfl(void *ptr)
306 {
307 union {
308 float f;
309 uint32_t i;
310 } u;
311 u.i = ldl(ptr);
312 return u.f;
313 }
314
315 static inline double ldfq(void *ptr)
316 {
317 union {
318 double d;
319 uint64_t i;
320 } u;
321 u.i = ldq(ptr);
322 return u.d;
323 }
324
325 static inline void stfl(void *ptr, float v)
326 {
327 union {
328 float f;
329 uint32_t i;
330 } u;
331 u.f = v;
332 stl(ptr, u.i);
333 }
334
335 static inline void stfq(void *ptr, double v)
336 {
337 union {
338 double d;
339 uint64_t i;
340 } u;
341 u.d = v;
342 stq(ptr, u.i);
343 }
344
345 #else
346
347 static inline int lduw(void *ptr)
348 {
349 return *(uint16_t *)ptr;
350 }
351
352 static inline int ldsw(void *ptr)
353 {
354 return *(int16_t *)ptr;
355 }
356
357 static inline int ldl(void *ptr)
358 {
359 return *(uint32_t *)ptr;
360 }
361
362 static inline uint64_t ldq(void *ptr)
363 {
364 return *(uint64_t *)ptr;
365 }
366
367 static inline void stw(void *ptr, int v)
368 {
369 *(uint16_t *)ptr = v;
370 }
371
372 static inline void stl(void *ptr, int v)
373 {
374 *(uint32_t *)ptr = v;
375 }
376
377 static inline void stq(void *ptr, uint64_t v)
378 {
379 *(uint64_t *)ptr = v;
380 }
381
382 /* float access */
383
384 static inline float ldfl(void *ptr)
385 {
386 return *(float *)ptr;
387 }
388
389 static inline double ldfq(void *ptr)
390 {
391 return *(double *)ptr;
392 }
393
394 static inline void stfl(void *ptr, float v)
395 {
396 *(float *)ptr = v;
397 }
398
399 static inline void stfq(void *ptr, double v)
400 {
401 *(double *)ptr = v;
402 }
403 #endif
404
405 #ifndef IN_OP_I386
406 void cpu_x86_outb(CPUX86State *env, int addr, int val);
407 void cpu_x86_outw(CPUX86State *env, int addr, int val);
408 void cpu_x86_outl(CPUX86State *env, int addr, int val);
409 int cpu_x86_inb(CPUX86State *env, int addr);
410 int cpu_x86_inw(CPUX86State *env, int addr);
411 int cpu_x86_inl(CPUX86State *env, int addr);
412 #endif
413
414 CPUX86State *cpu_x86_init(void);
415 int cpu_x86_exec(CPUX86State *s);
416 void cpu_x86_interrupt(CPUX86State *s);
417 void cpu_x86_close(CPUX86State *s);
418
419 /* needed to load some predefinied segment registers */
420 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
421
422 /* you can call this signal handler from your SIGBUS and SIGSEGV
423 signal handlers to inform the virtual CPU of exceptions. non zero
424 is returned if the signal was handled by the virtual CPU. */
425 struct siginfo;
426 int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
427 void *puc);
428
429 /* used to debug */
430 #define X86_DUMP_FPU 0x0001 /* dump FPU state too */
431 #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
432 void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags);
433
434 /* page related stuff */
435 #define TARGET_PAGE_BITS 12
436 #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
437 #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
438 #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
439
440 extern unsigned long real_host_page_size;
441 extern unsigned long host_page_bits;
442 extern unsigned long host_page_size;
443 extern unsigned long host_page_mask;
444
445 #define HOST_PAGE_ALIGN(addr) (((addr) + host_page_size - 1) & host_page_mask)
446
447 /* same as PROT_xxx */
448 #define PAGE_READ 0x0001
449 #define PAGE_WRITE 0x0002
450 #define PAGE_EXEC 0x0004
451 #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
452 #define PAGE_VALID 0x0008
453 /* original state of the write flag (used when tracking self-modifying
454 code */
455 #define PAGE_WRITE_ORG 0x0010
456
457 void page_dump(FILE *f);
458 int page_get_flags(unsigned long address);
459 void page_set_flags(unsigned long start, unsigned long end, int flags);
460 void page_unprotect_range(uint8_t *data, unsigned long data_size);
461
462 /***************************************************/
463 /* internal functions */
464
465 #define GEN_FLAG_CODE32_SHIFT 0
466 #define GEN_FLAG_ADDSEG_SHIFT 1
467 #define GEN_FLAG_SS32_SHIFT 2
468 #define GEN_FLAG_VM_SHIFT 3
469 #define GEN_FLAG_ST_SHIFT 4
470 #define GEN_FLAG_CPL_SHIFT 7
471 #define GEN_FLAG_IOPL_SHIFT 9
472 #define GEN_FLAG_TF_SHIFT 11
473
474 int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
475 int *gen_code_size_ptr,
476 uint8_t *pc_start, uint8_t *cs_base, int flags,
477 int *code_size_ptr);
478 void cpu_x86_tblocks_init(void);
479 void page_init(void);
480 int page_unprotect(unsigned long address);
481
482 #define CODE_GEN_MAX_SIZE 65536
483 #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
484
485 #define CODE_GEN_HASH_BITS 15
486 #define CODE_GEN_HASH_SIZE (1 << CODE_GEN_HASH_BITS)
487
488 /* maximum total translate dcode allocated */
489 #define CODE_GEN_BUFFER_SIZE (2048 * 1024)
490 //#define CODE_GEN_BUFFER_SIZE (128 * 1024)
491
492 typedef struct TranslationBlock {
493 unsigned long pc; /* simulated PC corresponding to this block (EIP + CS base) */
494 unsigned long cs_base; /* CS base for this block */
495 unsigned int flags; /* flags defining in which context the code was generated */
496 uint16_t size; /* size of target code for this block (1 <=
497 size <= TARGET_PAGE_SIZE) */
498 uint8_t *tc_ptr; /* pointer to the translated code */
499 struct TranslationBlock *hash_next; /* next matching block */
500 struct TranslationBlock *page_next[2]; /* next blocks in even/odd page */
501 } TranslationBlock;
502
503 static inline unsigned int tb_hash_func(unsigned long pc)
504 {
505 return pc & (CODE_GEN_HASH_SIZE - 1);
506 }
507
508 void tb_flush(void);
509 TranslationBlock *tb_alloc(unsigned long pc,
510 unsigned long size);
511
512 extern TranslationBlock *tb_hash[CODE_GEN_HASH_SIZE];
513
514 extern uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE];
515 extern uint8_t *code_gen_ptr;
516
517 /* find a translation block in the translation cache. If not found,
518 return NULL and the pointer to the last element of the list in pptb */
519 static inline TranslationBlock *tb_find(TranslationBlock ***pptb,
520 unsigned long pc,
521 unsigned long cs_base,
522 unsigned int flags)
523 {
524 TranslationBlock **ptb, *tb;
525 unsigned int h;
526
527 h = tb_hash_func(pc);
528 ptb = &tb_hash[h];
529 for(;;) {
530 tb = *ptb;
531 if (!tb)
532 break;
533 if (tb->pc == pc && tb->cs_base == cs_base && tb->flags == flags)
534 return tb;
535 ptb = &tb->hash_next;
536 }
537 *pptb = ptb;
538 return NULL;
539 }
540
541 #ifndef offsetof
542 #define offsetof(type, field) ((size_t) &((type *)0)->field)
543 #endif
544
545 #ifdef __powerpc__
546 static inline int testandset (int *p)
547 {
548 int ret;
549 __asm__ __volatile__ (
550 "0: lwarx %0,0,%1 ;"
551 " xor. %0,%3,%0;"
552 " bne 1f;"
553 " stwcx. %2,0,%1;"
554 " bne- 0b;"
555 "1: "
556 : "=&r" (ret)
557 : "r" (p), "r" (1), "r" (0)
558 : "cr0", "memory");
559 return ret;
560 }
561 #endif
562
563 #ifdef __i386__
564 static inline int testandset (int *p)
565 {
566 char ret;
567 long int readval;
568
569 __asm__ __volatile__ ("lock; cmpxchgl %3, %1; sete %0"
570 : "=q" (ret), "=m" (*p), "=a" (readval)
571 : "r" (1), "m" (*p), "a" (0)
572 : "memory");
573 return ret;
574 }
575 #endif
576
577 #ifdef __s390__
578 static inline int testandset (int *p)
579 {
580 int ret;
581
582 __asm__ __volatile__ ("0: cs %0,%1,0(%2)\n"
583 " jl 0b"
584 : "=&d" (ret)
585 : "r" (1), "a" (p), "0" (*p)
586 : "cc", "memory" );
587 return ret;
588 }
589 #endif
590
591 #ifdef __alpha__
592 int testandset (int *p)
593 {
594 int ret;
595 unsigned long one;
596
597 __asm__ __volatile__ ("0: mov 1,%2\n"
598 " ldl_l %0,%1\n"
599 " stl_c %2,%1\n"
600 " beq %2,1f\n"
601 ".subsection 2\n"
602 "1: br 0b\n"
603 ".previous"
604 : "=r" (ret), "=m" (*p), "=r" (one)
605 : "m" (*p));
606 return ret;
607 }
608 #endif
609
610 #ifdef __sparc__
611 static inline int testandset (int *p)
612 {
613 int ret;
614
615 __asm__ __volatile__("ldstub [%1], %0"
616 : "=r" (ret)
617 : "r" (p)
618 : "memory");
619
620 return (ret ? 1 : 0);
621 }
622 #endif
623
624 typedef int spinlock_t;
625
626 #define SPIN_LOCK_UNLOCKED 0
627
628 static inline void spin_lock(spinlock_t *lock)
629 {
630 while (testandset(lock));
631 }
632
633 static inline void spin_unlock(spinlock_t *lock)
634 {
635 *lock = 0;
636 }
637
638 static inline int spin_trylock(spinlock_t *lock)
639 {
640 return !testandset(lock);
641 }
642
643 extern spinlock_t tb_lock;
644
645 #endif /* CPU_I386_H */