4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec-i386.h"
22 const uint8_t parity_table
[256] = {
23 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
24 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
25 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
26 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
27 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
28 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
29 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
30 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
31 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
32 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
36 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
40 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
46 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 const uint8_t rclw_table
[32] = {
59 0, 1, 2, 3, 4, 5, 6, 7,
60 8, 9,10,11,12,13,14,15,
61 16, 0, 1, 2, 3, 4, 5, 6,
62 7, 8, 9,10,11,12,13,14,
66 const uint8_t rclb_table
[32] = {
67 0, 1, 2, 3, 4, 5, 6, 7,
68 8, 0, 1, 2, 3, 4, 5, 6,
69 7, 8, 0, 1, 2, 3, 4, 5,
70 6, 7, 8, 0, 1, 2, 3, 4,
73 const CPU86_LDouble f15rk
[7] =
75 0.00000000000000000000L,
76 1.00000000000000000000L,
77 3.14159265358979323851L, /*pi*/
78 0.30102999566398119523L, /*lg2*/
79 0.69314718055994530943L, /*ln2*/
80 1.44269504088896340739L, /*l2e*/
81 3.32192809488736234781L, /*l2t*/
86 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
90 spin_lock(&global_cpu_lock
);
95 spin_unlock(&global_cpu_lock
);
98 void cpu_loop_exit(void)
100 /* NOTE: the register at this point must be saved by hand because
101 longjmp restore them */
103 env
->regs
[R_EAX
] = EAX
;
106 env
->regs
[R_ECX
] = ECX
;
109 env
->regs
[R_EDX
] = EDX
;
112 env
->regs
[R_EBX
] = EBX
;
115 env
->regs
[R_ESP
] = ESP
;
118 env
->regs
[R_EBP
] = EBP
;
121 env
->regs
[R_ESI
] = ESI
;
124 env
->regs
[R_EDI
] = EDI
;
126 longjmp(env
->jmp_env
, 1);
129 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
130 uint32_t *esp_ptr
, int dpl
)
132 int type
, index
, shift
;
137 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
138 for(i
=0;i
<env
->tr
.limit
;i
++) {
139 printf("%02x ", env
->tr
.base
[i
]);
140 if ((i
& 7) == 7) printf("\n");
146 if (!(env
->tr
.flags
& DESC_P_MASK
))
147 cpu_abort(env
, "invalid tss");
148 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
150 cpu_abort(env
, "invalid tss type");
152 index
= (dpl
* 4 + 2) << shift
;
153 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
154 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
156 *esp_ptr
= lduw(env
->tr
.base
+ index
);
157 *ss_ptr
= lduw(env
->tr
.base
+ index
+ 2);
159 *esp_ptr
= ldl(env
->tr
.base
+ index
);
160 *ss_ptr
= lduw(env
->tr
.base
+ index
+ 4);
164 /* return non zero if error */
165 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
176 index
= selector
& ~7;
177 if ((index
+ 7) > dt
->limit
)
179 ptr
= dt
->base
+ index
;
181 *e2_ptr
= ldl(ptr
+ 4);
186 /* protected mode interrupt */
187 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
188 unsigned int next_eip
)
192 int type
, dpl
, cpl
, selector
, ss_dpl
;
193 int has_error_code
, new_stack
, shift
;
194 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
, push_size
;
195 uint32_t old_cs
, old_ss
, old_esp
, old_eip
;
198 if (intno
* 8 + 7 > dt
->limit
)
199 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
200 ptr
= dt
->base
+ intno
* 8;
203 /* check gate type */
204 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
206 case 5: /* task gate */
207 cpu_abort(env
, "task gate not supported");
209 case 6: /* 286 interrupt gate */
210 case 7: /* 286 trap gate */
211 case 14: /* 386 interrupt gate */
212 case 15: /* 386 trap gate */
215 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
218 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
219 if (env
->eflags
& VM_MASK
)
222 cpl
= env
->segs
[R_CS
].selector
& 3;
223 /* check privledge if software int */
224 if (is_int
&& dpl
< cpl
)
225 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
226 /* check valid bit */
227 if (!(e2
& DESC_P_MASK
))
228 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
230 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
231 if ((selector
& 0xfffc) == 0)
232 raise_exception_err(EXCP0D_GPF
, 0);
234 if (load_segment(&e1
, &e2
, selector
) != 0)
235 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
236 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
237 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
238 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
240 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
241 if (!(e2
& DESC_P_MASK
))
242 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
243 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
244 /* to inner priviledge */
245 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
246 if ((ss
& 0xfffc) == 0)
247 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
249 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
250 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
251 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
252 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
254 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
255 if (!(ss_e2
& DESC_S_MASK
) ||
256 (ss_e2
& DESC_CS_MASK
) ||
257 !(ss_e2
& DESC_W_MASK
))
258 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
259 if (!(ss_e2
& DESC_P_MASK
))
260 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
262 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
263 /* to same priviledge */
266 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
267 new_stack
= 0; /* avoid warning */
285 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
286 if (env
->eflags
& VM_MASK
)
290 /* XXX: check that enough room is available */
292 old_esp
= env
->regs
[R_ESP
];
293 old_ss
= env
->segs
[R_SS
].selector
;
294 load_seg(R_SS
, ss
, env
->eip
);
298 esp
= env
->regs
[R_ESP
];
304 old_cs
= env
->segs
[R_CS
].selector
;
305 load_seg(R_CS
, selector
, env
->eip
);
307 env
->regs
[R_ESP
] = esp
- push_size
;
308 ssp
= env
->segs
[R_SS
].base
+ esp
;
311 if (env
->eflags
& VM_MASK
) {
313 stl(ssp
, env
->segs
[R_GS
].selector
);
315 stl(ssp
, env
->segs
[R_FS
].selector
);
317 stl(ssp
, env
->segs
[R_DS
].selector
);
319 stl(ssp
, env
->segs
[R_ES
].selector
);
328 old_eflags
= compute_eflags();
329 stl(ssp
, old_eflags
);
334 if (has_error_code
) {
336 stl(ssp
, error_code
);
346 stw(ssp
, compute_eflags());
351 if (has_error_code
) {
353 stw(ssp
, error_code
);
357 /* interrupt gate clear IF mask */
358 if ((type
& 1) == 0) {
359 env
->eflags
&= ~IF_MASK
;
361 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
364 /* real mode interrupt */
365 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
366 unsigned int next_eip
)
371 uint32_t offset
, esp
;
372 uint32_t old_cs
, old_eip
;
374 /* real mode (simpler !) */
376 if (intno
* 4 + 3 > dt
->limit
)
377 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
378 ptr
= dt
->base
+ intno
* 4;
380 selector
= lduw(ptr
+ 2);
381 esp
= env
->regs
[R_ESP
] & 0xffff;
382 ssp
= env
->segs
[R_SS
].base
+ esp
;
387 old_cs
= env
->segs
[R_CS
].selector
;
389 stw(ssp
, compute_eflags());
396 /* update processor state */
397 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
399 env
->segs
[R_CS
].selector
= selector
;
400 env
->segs
[R_CS
].base
= (uint8_t *)(selector
<< 4);
401 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
404 /* fake user mode interrupt */
405 void do_interrupt_user(int intno
, int is_int
, int error_code
,
406 unsigned int next_eip
)
414 ptr
= dt
->base
+ (intno
* 8);
417 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
419 /* check privledge if software int */
420 if (is_int
&& dpl
< cpl
)
421 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
423 /* Since we emulate only user space, we cannot do more than
424 exiting the emulation with the suitable exception and error
431 * Begin excution of an interruption. is_int is TRUE if coming from
432 * the int instruction. next_eip is the EIP value AFTER the interrupt
433 * instruction. It is only relevant if is_int is TRUE.
435 void do_interrupt(int intno
, int is_int
, int error_code
,
436 unsigned int next_eip
)
438 if (env
->cr
[0] & CR0_PE_MASK
) {
439 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
);
441 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
446 * Signal an interruption. It is executed in the main CPU loop.
447 * is_int is TRUE if coming from the int instruction. next_eip is the
448 * EIP value AFTER the interrupt instruction. It is only relevant if
451 void raise_interrupt(int intno
, int is_int
, int error_code
,
452 unsigned int next_eip
)
454 env
->exception_index
= intno
;
455 env
->error_code
= error_code
;
456 env
->exception_is_int
= is_int
;
457 env
->exception_next_eip
= next_eip
;
461 /* shortcuts to generate exceptions */
462 void raise_exception_err(int exception_index
, int error_code
)
464 raise_interrupt(exception_index
, 0, error_code
, 0);
467 void raise_exception(int exception_index
)
469 raise_interrupt(exception_index
, 0, 0, 0);
472 #ifdef BUGGY_GCC_DIV64
473 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
474 call it from another function */
475 uint32_t div64(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
481 int32_t idiv64(int32_t *q_ptr
, int64_t num
, int32_t den
)
488 void helper_divl_EAX_T0(uint32_t eip
)
490 unsigned int den
, q
, r
;
493 num
= EAX
| ((uint64_t)EDX
<< 32);
497 raise_exception(EXCP00_DIVZ
);
499 #ifdef BUGGY_GCC_DIV64
500 r
= div64(&q
, num
, den
);
509 void helper_idivl_EAX_T0(uint32_t eip
)
514 num
= EAX
| ((uint64_t)EDX
<< 32);
518 raise_exception(EXCP00_DIVZ
);
520 #ifdef BUGGY_GCC_DIV64
521 r
= idiv64(&q
, num
, den
);
530 void helper_cmpxchg8b(void)
535 eflags
= cc_table
[CC_OP
].compute_all();
536 d
= ldq((uint8_t *)A0
);
537 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
538 stq((uint8_t *)A0
, ((uint64_t)ECX
<< 32) | EBX
);
548 /* We simulate a pre-MMX pentium as in valgrind */
549 #define CPUID_FP87 (1 << 0)
550 #define CPUID_VME (1 << 1)
551 #define CPUID_DE (1 << 2)
552 #define CPUID_PSE (1 << 3)
553 #define CPUID_TSC (1 << 4)
554 #define CPUID_MSR (1 << 5)
555 #define CPUID_PAE (1 << 6)
556 #define CPUID_MCE (1 << 7)
557 #define CPUID_CX8 (1 << 8)
558 #define CPUID_APIC (1 << 9)
559 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
560 #define CPUID_MTRR (1 << 12)
561 #define CPUID_PGE (1 << 13)
562 #define CPUID_MCA (1 << 14)
563 #define CPUID_CMOV (1 << 15)
565 #define CPUID_MMX (1 << 23)
566 #define CPUID_FXSR (1 << 24)
567 #define CPUID_SSE (1 << 25)
568 #define CPUID_SSE2 (1 << 26)
570 void helper_cpuid(void)
573 EAX
= 1; /* max EAX index supported */
577 } else if (EAX
== 1) {
578 int family
, model
, stepping
;
591 EAX
= (family
<< 8) | (model
<< 4) | stepping
;
594 EDX
= CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
595 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
596 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
;
600 static inline void load_seg_cache(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
602 sc
->base
= (void *)((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
603 sc
->limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
604 if (e2
& DESC_G_MASK
)
605 sc
->limit
= (sc
->limit
<< 12) | 0xfff;
609 void helper_lldt_T0(void)
617 selector
= T0
& 0xffff;
618 if ((selector
& 0xfffc) == 0) {
619 /* XXX: NULL selector case: invalid LDT */
620 env
->ldt
.base
= NULL
;
624 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
626 index
= selector
& ~7;
627 if ((index
+ 7) > dt
->limit
)
628 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
629 ptr
= dt
->base
+ index
;
632 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
633 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
634 if (!(e2
& DESC_P_MASK
))
635 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
636 load_seg_cache(&env
->ldt
, e1
, e2
);
638 env
->ldt
.selector
= selector
;
641 void helper_ltr_T0(void)
649 selector
= T0
& 0xffff;
650 if ((selector
& 0xfffc) == 0) {
651 /* NULL selector case: invalid LDT */
657 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
659 index
= selector
& ~7;
660 if ((index
+ 7) > dt
->limit
)
661 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
662 ptr
= dt
->base
+ index
;
665 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
666 if ((e2
& DESC_S_MASK
) ||
667 (type
!= 2 && type
!= 9))
668 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
669 if (!(e2
& DESC_P_MASK
))
670 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
671 load_seg_cache(&env
->tr
, e1
, e2
);
672 e2
|= 0x00000200; /* set the busy bit */
675 env
->tr
.selector
= selector
;
678 /* only works if protected mode and not VM86 */
679 void load_seg(int seg_reg
, int selector
, unsigned int cur_eip
)
684 sc
= &env
->segs
[seg_reg
];
685 if ((selector
& 0xfffc) == 0) {
686 /* null selector case */
687 if (seg_reg
== R_SS
) {
689 raise_exception_err(EXCP0D_GPF
, 0);
691 /* XXX: each access should trigger an exception */
697 if (load_segment(&e1
, &e2
, selector
) != 0) {
699 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
701 if (!(e2
& DESC_S_MASK
) ||
702 (e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
704 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
707 if (seg_reg
== R_SS
) {
708 if ((e2
& (DESC_CS_MASK
| DESC_W_MASK
)) == 0) {
710 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
713 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
715 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
719 if (!(e2
& DESC_P_MASK
)) {
722 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
724 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
726 load_seg_cache(sc
, e1
, e2
);
728 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
729 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
732 sc
->selector
= selector
;
735 /* protected mode jump */
736 void jmp_seg(int selector
, unsigned int new_eip
)
739 uint32_t e1
, e2
, cpl
, dpl
, rpl
;
741 if ((selector
& 0xfffc) == 0) {
742 raise_exception_err(EXCP0D_GPF
, 0);
745 if (load_segment(&e1
, &e2
, selector
) != 0)
746 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
747 cpl
= env
->segs
[R_CS
].selector
& 3;
748 if (e2
& DESC_S_MASK
) {
749 if (!(e2
& DESC_CS_MASK
))
750 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
751 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
752 if (e2
& DESC_CS_MASK
) {
753 /* conforming code segment */
755 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
757 /* non conforming code segment */
760 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
762 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
764 if (!(e2
& DESC_P_MASK
))
765 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
766 load_seg_cache(&sc1
, e1
, e2
);
767 if (new_eip
> sc1
.limit
)
768 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
769 env
->segs
[R_CS
].base
= sc1
.base
;
770 env
->segs
[R_CS
].limit
= sc1
.limit
;
771 env
->segs
[R_CS
].flags
= sc1
.flags
;
772 env
->segs
[R_CS
].selector
= (selector
& 0xfffc) | cpl
;
775 cpu_abort(env
, "jmp to call/task gate not supported 0x%04x:0x%08x",
780 /* init the segment cache in vm86 mode */
781 static inline void load_seg_vm(int seg
, int selector
)
783 SegmentCache
*sc
= &env
->segs
[seg
];
785 sc
->base
= (uint8_t *)(selector
<< 4);
786 sc
->selector
= selector
;
791 /* protected mode iret */
792 void helper_iret_protected(int shift
)
794 uint32_t sp
, new_cs
, new_eip
, new_eflags
, new_esp
, new_ss
;
795 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
797 int cpl
, dpl
, rpl
, eflags_mask
;
800 sp
= env
->regs
[R_ESP
];
801 if (!(env
->segs
[R_SS
].flags
& DESC_B_MASK
))
803 ssp
= env
->segs
[R_SS
].base
+ sp
;
806 new_eflags
= ldl(ssp
+ 8);
807 new_cs
= ldl(ssp
+ 4) & 0xffff;
809 if (new_eflags
& VM_MASK
)
813 new_eflags
= lduw(ssp
+ 4);
814 new_cs
= lduw(ssp
+ 2);
817 if ((new_cs
& 0xfffc) == 0)
818 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
819 if (load_segment(&e1
, &e2
, new_cs
) != 0)
820 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
821 if (!(e2
& DESC_S_MASK
) ||
822 !(e2
& DESC_CS_MASK
))
823 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
824 cpl
= env
->segs
[R_CS
].selector
& 3;
827 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
828 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
829 if (e2
& DESC_CS_MASK
) {
831 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
834 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
836 if (!(e2
& DESC_P_MASK
))
837 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
840 /* return to same priledge level */
841 load_seg(R_CS
, new_cs
, env
->eip
);
842 new_esp
= sp
+ (6 << shift
);
844 /* return to differentr priviledge level */
847 new_esp
= ldl(ssp
+ 12);
848 new_ss
= ldl(ssp
+ 16) & 0xffff;
851 new_esp
= lduw(ssp
+ 6);
852 new_ss
= lduw(ssp
+ 8);
855 if ((new_ss
& 3) != rpl
)
856 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
857 if (load_segment(&e1
, &e2
, new_ss
) != 0)
858 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
859 if (!(e2
& DESC_S_MASK
) ||
860 (e2
& DESC_CS_MASK
) ||
862 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
863 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
865 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
866 if (!(e2
& DESC_P_MASK
))
867 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
869 load_seg(R_CS
, new_cs
, env
->eip
);
870 load_seg(R_SS
, new_ss
, env
->eip
);
872 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
873 env
->regs
[R_ESP
] = new_esp
;
875 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & 0xffff0000) |
879 eflags_mask
= FL_UPDATE_CPL0_MASK
;
881 eflags_mask
= FL_UPDATE_MASK32
;
883 eflags_mask
&= 0xffff;
884 load_eflags(new_eflags
, eflags_mask
);
888 new_esp
= ldl(ssp
+ 12);
889 new_ss
= ldl(ssp
+ 16);
890 new_es
= ldl(ssp
+ 20);
891 new_ds
= ldl(ssp
+ 24);
892 new_fs
= ldl(ssp
+ 28);
893 new_gs
= ldl(ssp
+ 32);
895 /* modify processor state */
896 load_eflags(new_eflags
, FL_UPDATE_CPL0_MASK
| VM_MASK
| VIF_MASK
| VIP_MASK
);
897 load_seg_vm(R_CS
, new_cs
);
898 load_seg_vm(R_SS
, new_ss
);
899 load_seg_vm(R_ES
, new_es
);
900 load_seg_vm(R_DS
, new_ds
);
901 load_seg_vm(R_FS
, new_fs
);
902 load_seg_vm(R_GS
, new_gs
);
905 env
->regs
[R_ESP
] = new_esp
;
908 void helper_movl_crN_T0(int reg
)
913 cpu_x86_update_cr0(env
);
916 cpu_x86_update_cr3(env
);
922 void helper_movl_drN_T0(int reg
)
927 void helper_invlpg(unsigned int addr
)
929 cpu_x86_flush_tlb(env
, addr
);
937 void helper_rdtsc(void)
941 asm("rdtsc" : "=A" (val
));
943 /* better than nothing: the time increases */
950 void helper_wrmsr(void)
953 case MSR_IA32_SYSENTER_CS
:
954 env
->sysenter_cs
= EAX
& 0xffff;
956 case MSR_IA32_SYSENTER_ESP
:
957 env
->sysenter_esp
= EAX
;
959 case MSR_IA32_SYSENTER_EIP
:
960 env
->sysenter_eip
= EAX
;
963 /* XXX: exception ? */
968 void helper_rdmsr(void)
971 case MSR_IA32_SYSENTER_CS
:
972 EAX
= env
->sysenter_cs
;
975 case MSR_IA32_SYSENTER_ESP
:
976 EAX
= env
->sysenter_esp
;
979 case MSR_IA32_SYSENTER_EIP
:
980 EAX
= env
->sysenter_eip
;
984 /* XXX: exception ? */
989 void helper_lsl(void)
991 unsigned int selector
, limit
;
994 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
995 selector
= T0
& 0xffff;
996 if (load_segment(&e1
, &e2
, selector
) != 0)
998 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
1000 limit
= (limit
<< 12) | 0xfff;
1005 void helper_lar(void)
1007 unsigned int selector
;
1010 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1011 selector
= T0
& 0xffff;
1012 if (load_segment(&e1
, &e2
, selector
) != 0)
1014 T1
= e2
& 0x00f0ff00;
1020 #ifndef USE_X86LDOUBLE
1021 void helper_fldt_ST0_A0(void)
1024 new_fpstt
= (env
->fpstt
- 1) & 7;
1025 env
->fpregs
[new_fpstt
] = helper_fldt((uint8_t *)A0
);
1026 env
->fpstt
= new_fpstt
;
1027 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
1030 void helper_fstt_ST0_A0(void)
1032 helper_fstt(ST0
, (uint8_t *)A0
);
1038 #define MUL10(iv) ( iv + iv + (iv << 3) )
1040 void helper_fbld_ST0_A0(void)
1048 for(i
= 8; i
>= 0; i
--) {
1049 v
= ldub((uint8_t *)A0
+ i
);
1050 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
1053 if (ldub((uint8_t *)A0
+ 9) & 0x80)
1059 void helper_fbst_ST0_A0(void)
1063 uint8_t *mem_ref
, *mem_end
;
1068 mem_ref
= (uint8_t *)A0
;
1069 mem_end
= mem_ref
+ 9;
1076 while (mem_ref
< mem_end
) {
1081 v
= ((v
/ 10) << 4) | (v
% 10);
1084 while (mem_ref
< mem_end
) {
1089 void helper_f2xm1(void)
1091 ST0
= pow(2.0,ST0
) - 1.0;
1094 void helper_fyl2x(void)
1096 CPU86_LDouble fptemp
;
1100 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
1104 env
->fpus
&= (~0x4700);
1109 void helper_fptan(void)
1111 CPU86_LDouble fptemp
;
1114 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
1120 env
->fpus
&= (~0x400); /* C2 <-- 0 */
1121 /* the above code is for |arg| < 2**52 only */
1125 void helper_fpatan(void)
1127 CPU86_LDouble fptemp
, fpsrcop
;
1131 ST1
= atan2(fpsrcop
,fptemp
);
1135 void helper_fxtract(void)
1137 CPU86_LDoubleU temp
;
1138 unsigned int expdif
;
1141 expdif
= EXPD(temp
) - EXPBIAS
;
1142 /*DP exponent bias*/
1149 void helper_fprem1(void)
1151 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
1152 CPU86_LDoubleU fpsrcop1
, fptemp1
;
1158 fpsrcop1
.d
= fpsrcop
;
1160 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
1162 dblq
= fpsrcop
/ fptemp
;
1163 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
1164 ST0
= fpsrcop
- fptemp
*dblq
;
1165 q
= (int)dblq
; /* cutting off top bits is assumed here */
1166 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1167 /* (C0,C1,C3) <-- (q2,q1,q0) */
1168 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
1169 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
1170 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
1172 env
->fpus
|= 0x400; /* C2 <-- 1 */
1173 fptemp
= pow(2.0, expdif
-50);
1174 fpsrcop
= (ST0
/ ST1
) / fptemp
;
1175 /* fpsrcop = integer obtained by rounding to the nearest */
1176 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
1177 floor(fpsrcop
): ceil(fpsrcop
);
1178 ST0
-= (ST1
* fpsrcop
* fptemp
);
1182 void helper_fprem(void)
1184 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
1185 CPU86_LDoubleU fpsrcop1
, fptemp1
;
1191 fpsrcop1
.d
= fpsrcop
;
1193 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
1194 if ( expdif
< 53 ) {
1195 dblq
= fpsrcop
/ fptemp
;
1196 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
1197 ST0
= fpsrcop
- fptemp
*dblq
;
1198 q
= (int)dblq
; /* cutting off top bits is assumed here */
1199 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1200 /* (C0,C1,C3) <-- (q2,q1,q0) */
1201 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
1202 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
1203 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
1205 env
->fpus
|= 0x400; /* C2 <-- 1 */
1206 fptemp
= pow(2.0, expdif
-50);
1207 fpsrcop
= (ST0
/ ST1
) / fptemp
;
1208 /* fpsrcop = integer obtained by chopping */
1209 fpsrcop
= (fpsrcop
< 0.0)?
1210 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
1211 ST0
-= (ST1
* fpsrcop
* fptemp
);
1215 void helper_fyl2xp1(void)
1217 CPU86_LDouble fptemp
;
1220 if ((fptemp
+1.0)>0.0) {
1221 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
1225 env
->fpus
&= (~0x4700);
1230 void helper_fsqrt(void)
1232 CPU86_LDouble fptemp
;
1236 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1242 void helper_fsincos(void)
1244 CPU86_LDouble fptemp
;
1247 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
1253 env
->fpus
&= (~0x400); /* C2 <-- 0 */
1254 /* the above code is for |arg| < 2**63 only */
1258 void helper_frndint(void)
1264 switch(env
->fpuc
& RC_MASK
) {
1267 asm("rndd %0, %1" : "=f" (a
) : "f"(a
));
1270 asm("rnddm %0, %1" : "=f" (a
) : "f"(a
));
1273 asm("rnddp %0, %1" : "=f" (a
) : "f"(a
));
1276 asm("rnddz %0, %1" : "=f" (a
) : "f"(a
));
1285 void helper_fscale(void)
1287 CPU86_LDouble fpsrcop
, fptemp
;
1290 fptemp
= pow(fpsrcop
,ST1
);
1294 void helper_fsin(void)
1296 CPU86_LDouble fptemp
;
1299 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
1303 env
->fpus
&= (~0x400); /* C2 <-- 0 */
1304 /* the above code is for |arg| < 2**53 only */
1308 void helper_fcos(void)
1310 CPU86_LDouble fptemp
;
1313 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
1317 env
->fpus
&= (~0x400); /* C2 <-- 0 */
1318 /* the above code is for |arg5 < 2**63 only */
1322 void helper_fxam_ST0(void)
1324 CPU86_LDoubleU temp
;
1329 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
1331 env
->fpus
|= 0x200; /* C1 <-- 1 */
1333 expdif
= EXPD(temp
);
1334 if (expdif
== MAXEXPD
) {
1335 if (MANTD(temp
) == 0)
1336 env
->fpus
|= 0x500 /*Infinity*/;
1338 env
->fpus
|= 0x100 /*NaN*/;
1339 } else if (expdif
== 0) {
1340 if (MANTD(temp
) == 0)
1341 env
->fpus
|= 0x4000 /*Zero*/;
1343 env
->fpus
|= 0x4400 /*Denormal*/;
1349 void helper_fstenv(uint8_t *ptr
, int data32
)
1351 int fpus
, fptag
, exp
, i
;
1355 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
1357 for (i
=7; i
>=0; i
--) {
1359 if (env
->fptags
[i
]) {
1362 tmp
.d
= env
->fpregs
[i
];
1365 if (exp
== 0 && mant
== 0) {
1368 } else if (exp
== 0 || exp
== MAXEXPD
1369 #ifdef USE_X86LDOUBLE
1370 || (mant
& (1LL << 63)) == 0
1373 /* NaNs, infinity, denormal */
1380 stl(ptr
, env
->fpuc
);
1382 stl(ptr
+ 8, fptag
);
1389 stw(ptr
, env
->fpuc
);
1391 stw(ptr
+ 4, fptag
);
1399 void helper_fldenv(uint8_t *ptr
, int data32
)
1404 env
->fpuc
= lduw(ptr
);
1405 fpus
= lduw(ptr
+ 4);
1406 fptag
= lduw(ptr
+ 8);
1409 env
->fpuc
= lduw(ptr
);
1410 fpus
= lduw(ptr
+ 2);
1411 fptag
= lduw(ptr
+ 4);
1413 env
->fpstt
= (fpus
>> 11) & 7;
1414 env
->fpus
= fpus
& ~0x3800;
1415 for(i
= 0;i
< 7; i
++) {
1416 env
->fptags
[i
] = ((fptag
& 3) == 3);
1421 void helper_fsave(uint8_t *ptr
, int data32
)
1426 helper_fstenv(ptr
, data32
);
1428 ptr
+= (14 << data32
);
1429 for(i
= 0;i
< 8; i
++) {
1431 #ifdef USE_X86LDOUBLE
1432 *(long double *)ptr
= tmp
;
1434 helper_fstt(tmp
, ptr
);
1453 void helper_frstor(uint8_t *ptr
, int data32
)
1458 helper_fldenv(ptr
, data32
);
1459 ptr
+= (14 << data32
);
1461 for(i
= 0;i
< 8; i
++) {
1462 #ifdef USE_X86LDOUBLE
1463 tmp
= *(long double *)ptr
;
1465 tmp
= helper_fldt(ptr
);