4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
27 printf("raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table
[256] = {
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 const uint8_t rclw_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table
[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk
[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
100 spin_lock(&global_cpu_lock
);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock
);
108 void cpu_loop_exit(void)
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
113 env
->regs
[R_EAX
] = EAX
;
116 env
->regs
[R_ECX
] = ECX
;
119 env
->regs
[R_EDX
] = EDX
;
122 env
->regs
[R_EBX
] = EBX
;
125 env
->regs
[R_ESP
] = ESP
;
128 env
->regs
[R_EBP
] = EBP
;
131 env
->regs
[R_ESI
] = ESI
;
134 env
->regs
[R_EDI
] = EDI
;
136 longjmp(env
->jmp_env
, 1);
139 /* return non zero if error */
140 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
151 index
= selector
& ~7;
152 if ((index
+ 7) > dt
->limit
)
154 ptr
= dt
->base
+ index
;
155 *e1_ptr
= ldl_kernel(ptr
);
156 *e2_ptr
= ldl_kernel(ptr
+ 4);
160 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
163 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
164 if (e2
& DESC_G_MASK
)
165 limit
= (limit
<< 12) | 0xfff;
169 static inline uint8_t *get_seg_base(uint32_t e1
, uint32_t e2
)
171 return (uint8_t *)((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
174 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
176 sc
->base
= get_seg_base(e1
, e2
);
177 sc
->limit
= get_seg_limit(e1
, e2
);
181 /* init the segment cache in vm86 mode. */
182 static inline void load_seg_vm(int seg
, int selector
)
185 cpu_x86_load_seg_cache(env
, seg
, selector
,
186 (uint8_t *)(selector
<< 4), 0xffff, 0);
189 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
190 uint32_t *esp_ptr
, int dpl
)
192 int type
, index
, shift
;
197 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
198 for(i
=0;i
<env
->tr
.limit
;i
++) {
199 printf("%02x ", env
->tr
.base
[i
]);
200 if ((i
& 7) == 7) printf("\n");
206 if (!(env
->tr
.flags
& DESC_P_MASK
))
207 cpu_abort(env
, "invalid tss");
208 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
210 cpu_abort(env
, "invalid tss type");
212 index
= (dpl
* 4 + 2) << shift
;
213 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
214 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
216 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
219 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
220 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
224 /* XXX: merge with load_seg() */
225 static void tss_load_seg(int seg_reg
, int selector
)
230 if ((selector
& 0xfffc) != 0) {
231 if (load_segment(&e1
, &e2
, selector
) != 0)
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 if (!(e2
& DESC_S_MASK
))
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
237 cpl
= env
->hflags
& HF_CPL_MASK
;
238 if (seg_reg
== R_CS
) {
239 if (!(e2
& DESC_CS_MASK
))
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
244 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 } else if (seg_reg
== R_SS
) {
247 /* SS must be writable data */
248 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
249 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 if (dpl
!= cpl
|| dpl
!= rpl
)
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* not readable code */
254 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
256 /* if data or non conforming code, checks the rights */
257 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
258 if (dpl
< cpl
|| dpl
< rpl
)
259 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
262 if (!(e2
& DESC_P_MASK
))
263 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
264 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
265 get_seg_base(e1
, e2
),
266 get_seg_limit(e1
, e2
),
269 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
270 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
274 #define SWITCH_TSS_JMP 0
275 #define SWITCH_TSS_IRET 1
276 #define SWITCH_TSS_CALL 2
278 /* XXX: restore CPU state in registers (PowerPC case) */
279 static void switch_tss(int tss_selector
,
280 uint32_t e1
, uint32_t e2
, int source
)
282 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
284 uint32_t new_regs
[8], new_segs
[6];
285 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
286 uint32_t old_eflags
, eflags_mask
;
291 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
294 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
297 /* if task gate, we read the TSS segment and we load it */
299 if (!(e2
& DESC_P_MASK
))
300 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
301 tss_selector
= e1
>> 16;
302 if (tss_selector
& 4)
303 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
304 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
305 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
306 if (e2
& DESC_S_MASK
)
307 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
308 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
310 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
313 if (!(e2
& DESC_P_MASK
))
314 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
320 tss_limit
= get_seg_limit(e1
, e2
);
321 tss_base
= get_seg_base(e1
, e2
);
322 if ((tss_selector
& 4) != 0 ||
323 tss_limit
< tss_limit_max
)
324 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
325 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
327 old_tss_limit_max
= 103;
329 old_tss_limit_max
= 43;
331 /* read all the registers from the new TSS */
334 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
335 new_eip
= ldl_kernel(tss_base
+ 0x20);
336 new_eflags
= ldl_kernel(tss_base
+ 0x24);
337 for(i
= 0; i
< 8; i
++)
338 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
339 for(i
= 0; i
< 6; i
++)
340 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
341 new_ldt
= lduw_kernel(tss_base
+ 0x60);
342 new_trap
= ldl_kernel(tss_base
+ 0x64);
346 new_eip
= lduw_kernel(tss_base
+ 0x0e);
347 new_eflags
= lduw_kernel(tss_base
+ 0x10);
348 for(i
= 0; i
< 8; i
++)
349 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
350 for(i
= 0; i
< 4; i
++)
351 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
352 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
358 /* NOTE: we must avoid memory exceptions during the task switch,
359 so we make dummy accesses before */
360 /* XXX: it can still fail in some cases, so a bigger hack is
361 necessary to valid the TLB after having done the accesses */
363 v1
= ldub_kernel(env
->tr
.base
);
364 v2
= ldub(env
->tr
.base
+ old_tss_limit_max
);
365 stb_kernel(env
->tr
.base
, v1
);
366 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
368 /* clear busy bit (it is restartable) */
369 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
372 ptr
= env
->gdt
.base
+ (env
->tr
.selector
<< 3);
373 e2
= ldl_kernel(ptr
+ 4);
374 e2
&= ~DESC_TSS_BUSY_MASK
;
375 stl_kernel(ptr
+ 4, e2
);
377 old_eflags
= compute_eflags();
378 if (source
== SWITCH_TSS_IRET
)
379 old_eflags
&= ~NT_MASK
;
381 /* save the current state in the old TSS */
384 stl_kernel(env
->tr
.base
+ 0x20, env
->eip
);
385 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
386 for(i
= 0; i
< 8; i
++)
387 stl_kernel(env
->tr
.base
+ (0x28 + i
* 4), env
->regs
[i
]);
388 for(i
= 0; i
< 6; i
++)
389 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
392 stw_kernel(env
->tr
.base
+ 0x0e, new_eip
);
393 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
394 for(i
= 0; i
< 8; i
++)
395 stw_kernel(env
->tr
.base
+ (0x12 + i
* 2), env
->regs
[i
]);
396 for(i
= 0; i
< 4; i
++)
397 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
400 /* now if an exception occurs, it will occurs in the next task
403 if (source
== SWITCH_TSS_CALL
) {
404 stw_kernel(tss_base
, env
->tr
.selector
);
405 new_eflags
|= NT_MASK
;
409 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
412 ptr
= env
->gdt
.base
+ (tss_selector
<< 3);
413 e2
= ldl_kernel(ptr
+ 4);
414 e2
|= DESC_TSS_BUSY_MASK
;
415 stl_kernel(ptr
+ 4, e2
);
418 /* set the new CPU state */
419 /* from this point, any exception which occurs can give problems */
420 env
->cr
[0] |= CR0_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
437 load_eflags(new_eflags
, eflags_mask
);
438 for(i
= 0; i
< 8; i
++)
439 env
->regs
[i
] = new_regs
[i
];
440 if (new_eflags
& VM_MASK
) {
441 for(i
= 0; i
< 6; i
++)
442 load_seg_vm(i
, new_segs
[i
]);
443 /* in vm86, CPL is always 3 */
444 cpu_x86_set_cpl(env
, 3);
446 /* CPL is set the RPL of CS */
447 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
448 /* first just selectors as the rest may trigger exceptions */
449 for(i
= 0; i
< 6; i
++)
450 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], NULL
, 0, 0);
453 env
->ldt
.selector
= new_ldt
& ~4;
454 env
->ldt
.base
= NULL
;
460 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
462 if ((new_ldt
& 0xfffc) != 0) {
464 index
= new_ldt
& ~7;
465 if ((index
+ 7) > dt
->limit
)
466 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
467 ptr
= dt
->base
+ index
;
468 e1
= ldl_kernel(ptr
);
469 e2
= ldl_kernel(ptr
+ 4);
470 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
471 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
472 if (!(e2
& DESC_P_MASK
))
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
477 /* load the segments */
478 if (!(new_eflags
& VM_MASK
)) {
479 tss_load_seg(R_CS
, new_segs
[R_CS
]);
480 tss_load_seg(R_SS
, new_segs
[R_SS
]);
481 tss_load_seg(R_ES
, new_segs
[R_ES
]);
482 tss_load_seg(R_DS
, new_segs
[R_DS
]);
483 tss_load_seg(R_FS
, new_segs
[R_FS
]);
484 tss_load_seg(R_GS
, new_segs
[R_GS
]);
487 /* check that EIP is in the CS segment limits */
488 if (new_eip
> env
->segs
[R_CS
].limit
) {
489 raise_exception_err(EXCP0D_GPF
, 0);
493 /* check if Port I/O is allowed in TSS */
494 static inline void check_io(int addr
, int size
)
496 int io_offset
, val
, mask
;
498 /* TSS must be a valid 32 bit one */
499 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
500 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
503 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
504 io_offset
+= (addr
>> 3);
505 /* Note: the check needs two bytes */
506 if ((io_offset
+ 1) > env
->tr
.limit
)
508 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
510 mask
= (1 << size
) - 1;
511 /* all bits must be zero to allow the I/O */
512 if ((val
& mask
) != 0) {
514 raise_exception_err(EXCP0D_GPF
, 0);
518 void check_iob_T0(void)
523 void check_iow_T0(void)
528 void check_iol_T0(void)
533 void check_iob_DX(void)
535 check_io(EDX
& 0xffff, 1);
538 void check_iow_DX(void)
540 check_io(EDX
& 0xffff, 2);
543 void check_iol_DX(void)
545 check_io(EDX
& 0xffff, 4);
548 static inline unsigned int get_sp_mask(unsigned int e2
)
550 if (e2
& DESC_B_MASK
)
556 /* XXX: add a is_user flag to have proper security support */
557 #define PUSHW(ssp, sp, sp_mask, val)\
560 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
563 #define PUSHL(ssp, sp, sp_mask, val)\
566 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
569 #define POPW(ssp, sp, sp_mask, val)\
571 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
575 #define POPL(ssp, sp, sp_mask, val)\
577 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
581 /* protected mode interrupt */
582 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
583 unsigned int next_eip
, int is_hw
)
587 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
588 int has_error_code
, new_stack
, shift
;
589 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
593 if (!is_int
&& !is_hw
) {
608 if (intno
* 8 + 7 > dt
->limit
)
609 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
610 ptr
= dt
->base
+ intno
* 8;
611 e1
= ldl_kernel(ptr
);
612 e2
= ldl_kernel(ptr
+ 4);
613 /* check gate type */
614 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
616 case 5: /* task gate */
617 /* must do that check here to return the correct error code */
618 if (!(e2
& DESC_P_MASK
))
619 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
620 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
);
621 if (has_error_code
) {
623 /* push the error code */
624 shift
= (env
->segs
[R_CS
].flags
>> DESC_B_SHIFT
) & 1;
625 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
629 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
630 ssp
= env
->segs
[R_SS
].base
+ esp
;
632 stl_kernel(ssp
, error_code
);
634 stw_kernel(ssp
, error_code
);
635 env
->regs
[R_ESP
] = (esp
& mask
) | (env
->regs
[R_ESP
] & ~mask
);
638 case 6: /* 286 interrupt gate */
639 case 7: /* 286 trap gate */
640 case 14: /* 386 interrupt gate */
641 case 15: /* 386 trap gate */
644 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
647 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
648 cpl
= env
->hflags
& HF_CPL_MASK
;
649 /* check privledge if software int */
650 if (is_int
&& dpl
< cpl
)
651 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
652 /* check valid bit */
653 if (!(e2
& DESC_P_MASK
))
654 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
656 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
657 if ((selector
& 0xfffc) == 0)
658 raise_exception_err(EXCP0D_GPF
, 0);
660 if (load_segment(&e1
, &e2
, selector
) != 0)
661 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
662 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
663 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
664 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
666 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
667 if (!(e2
& DESC_P_MASK
))
668 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
669 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
670 /* to inner priviledge */
671 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
672 if ((ss
& 0xfffc) == 0)
673 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
675 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
676 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
677 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
678 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
680 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
681 if (!(ss_e2
& DESC_S_MASK
) ||
682 (ss_e2
& DESC_CS_MASK
) ||
683 !(ss_e2
& DESC_W_MASK
))
684 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
685 if (!(ss_e2
& DESC_P_MASK
))
686 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
688 sp_mask
= get_sp_mask(ss_e2
);
689 ssp
= get_seg_base(ss_e1
, ss_e2
);
690 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
691 /* to same priviledge */
692 if (env
->eflags
& VM_MASK
)
693 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
695 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
696 ssp
= env
->segs
[R_SS
].base
;
700 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
701 new_stack
= 0; /* avoid warning */
702 sp_mask
= 0; /* avoid warning */
703 ssp
= NULL
; /* avoid warning */
704 esp
= 0; /* avoid warning */
710 /* XXX: check that enough room is available */
711 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
712 if (env
->eflags
& VM_MASK
)
722 if (env
->eflags
& VM_MASK
) {
723 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
724 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
725 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
726 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
729 PUSHL(ssp
, esp
, sp_mask
, ESP
);
731 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
732 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
733 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
734 if (has_error_code
) {
735 PUSHL(ssp
, esp
, sp_mask
, error_code
);
739 if (env
->eflags
& VM_MASK
) {
740 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
741 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
742 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
743 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
746 PUSHW(ssp
, esp
, sp_mask
, ESP
);
748 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
749 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
750 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
751 if (has_error_code
) {
752 PUSHW(ssp
, esp
, sp_mask
, error_code
);
757 if (env
->eflags
& VM_MASK
) {
758 /* XXX: explain me why W2K hangs if the whole segment cache is
761 env
->segs
[R_ES
].selector
= 0;
762 env
->segs
[R_ES
].flags
= 0;
763 env
->segs
[R_DS
].selector
= 0;
764 env
->segs
[R_DS
].flags
= 0;
765 env
->segs
[R_FS
].selector
= 0;
766 env
->segs
[R_FS
].flags
= 0;
767 env
->segs
[R_GS
].selector
= 0;
768 env
->segs
[R_GS
].flags
= 0;
770 cpu_x86_load_seg_cache(env
, R_ES
, 0, NULL
, 0, 0);
771 cpu_x86_load_seg_cache(env
, R_DS
, 0, NULL
, 0, 0);
772 cpu_x86_load_seg_cache(env
, R_FS
, 0, NULL
, 0, 0);
773 cpu_x86_load_seg_cache(env
, R_GS
, 0, NULL
, 0, 0);
776 ss
= (ss
& ~3) | dpl
;
777 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
778 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
780 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
782 selector
= (selector
& ~3) | dpl
;
783 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
784 get_seg_base(e1
, e2
),
785 get_seg_limit(e1
, e2
),
787 cpu_x86_set_cpl(env
, dpl
);
790 /* interrupt gate clear IF mask */
791 if ((type
& 1) == 0) {
792 env
->eflags
&= ~IF_MASK
;
794 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
797 /* real mode interrupt */
798 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
799 unsigned int next_eip
)
804 uint32_t offset
, esp
;
805 uint32_t old_cs
, old_eip
;
807 /* real mode (simpler !) */
809 if (intno
* 4 + 3 > dt
->limit
)
810 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
811 ptr
= dt
->base
+ intno
* 4;
812 offset
= lduw_kernel(ptr
);
813 selector
= lduw_kernel(ptr
+ 2);
815 ssp
= env
->segs
[R_SS
].base
;
820 old_cs
= env
->segs
[R_CS
].selector
;
821 /* XXX: use SS segment size ? */
822 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
823 PUSHW(ssp
, esp
, 0xffff, old_cs
);
824 PUSHW(ssp
, esp
, 0xffff, old_eip
);
826 /* update processor state */
827 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
829 env
->segs
[R_CS
].selector
= selector
;
830 env
->segs
[R_CS
].base
= (uint8_t *)(selector
<< 4);
831 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
834 /* fake user mode interrupt */
835 void do_interrupt_user(int intno
, int is_int
, int error_code
,
836 unsigned int next_eip
)
844 ptr
= dt
->base
+ (intno
* 8);
845 e2
= ldl_kernel(ptr
+ 4);
847 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
848 cpl
= env
->hflags
& HF_CPL_MASK
;
849 /* check privledge if software int */
850 if (is_int
&& dpl
< cpl
)
851 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
853 /* Since we emulate only user space, we cannot do more than
854 exiting the emulation with the suitable exception and error
861 * Begin excution of an interruption. is_int is TRUE if coming from
862 * the int instruction. next_eip is the EIP value AFTER the interrupt
863 * instruction. It is only relevant if is_int is TRUE.
865 void do_interrupt(int intno
, int is_int
, int error_code
,
866 unsigned int next_eip
, int is_hw
)
872 if (env
->cr
[0] & CR0_PE_MASK
) {
873 fprintf(stdout
, "%d: v=%02x e=%04x i=%d CPL=%d CS:EIP=%04x:%08x SS:ESP=%04x:%08x",
874 count
, intno
, error_code
, is_int
,
875 env
->hflags
& HF_CPL_MASK
,
876 env
->segs
[R_CS
].selector
, EIP
,
877 env
->segs
[R_SS
].selector
, ESP
);
879 fprintf(stdout
, " CR2=%08x", env
->cr
[2]);
881 fprintf(stdout
, " EAX=%08x", env
->regs
[R_EAX
]);
883 fprintf(stdout
, "\n");
886 cpu_x86_dump_state(env
, stdout
, X86_DUMP_CCOP
);
891 fprintf(stdout
, " code=");
892 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
893 for(i
= 0; i
< 16; i
++) {
894 fprintf(stdout
, " %02x", ldub(ptr
+ i
));
896 fprintf(stdout
, "\n");
907 fprintf(logfile
, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
908 count
, intno
, error_code
, is_int
);
909 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
914 fprintf(logfile
, " code=");
915 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
916 for(i
= 0; i
< 16; i
++) {
917 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
919 fprintf(logfile
, "\n");
925 if (env
->cr
[0] & CR0_PE_MASK
) {
926 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
928 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
933 * Signal an interruption. It is executed in the main CPU loop.
934 * is_int is TRUE if coming from the int instruction. next_eip is the
935 * EIP value AFTER the interrupt instruction. It is only relevant if
938 void raise_interrupt(int intno
, int is_int
, int error_code
,
939 unsigned int next_eip
)
941 env
->exception_index
= intno
;
942 env
->error_code
= error_code
;
943 env
->exception_is_int
= is_int
;
944 env
->exception_next_eip
= next_eip
;
948 /* shortcuts to generate exceptions */
950 void (raise_exception_err
)(int exception_index
, int error_code
)
952 raise_interrupt(exception_index
, 0, error_code
, 0);
955 void raise_exception(int exception_index
)
957 raise_interrupt(exception_index
, 0, 0, 0);
960 #ifdef BUGGY_GCC_DIV64
961 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
962 call it from another function */
963 uint32_t div64(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
969 int32_t idiv64(int32_t *q_ptr
, int64_t num
, int32_t den
)
976 void helper_divl_EAX_T0(uint32_t eip
)
978 unsigned int den
, q
, r
;
981 num
= EAX
| ((uint64_t)EDX
<< 32);
985 raise_exception(EXCP00_DIVZ
);
987 #ifdef BUGGY_GCC_DIV64
988 r
= div64(&q
, num
, den
);
997 void helper_idivl_EAX_T0(uint32_t eip
)
1002 num
= EAX
| ((uint64_t)EDX
<< 32);
1006 raise_exception(EXCP00_DIVZ
);
1008 #ifdef BUGGY_GCC_DIV64
1009 r
= idiv64(&q
, num
, den
);
1018 void helper_cmpxchg8b(void)
1023 eflags
= cc_table
[CC_OP
].compute_all();
1024 d
= ldq((uint8_t *)A0
);
1025 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
1026 stq((uint8_t *)A0
, ((uint64_t)ECX
<< 32) | EBX
);
1036 #define CPUID_FP87 (1 << 0)
1037 #define CPUID_VME (1 << 1)
1038 #define CPUID_DE (1 << 2)
1039 #define CPUID_PSE (1 << 3)
1040 #define CPUID_TSC (1 << 4)
1041 #define CPUID_MSR (1 << 5)
1042 #define CPUID_PAE (1 << 6)
1043 #define CPUID_MCE (1 << 7)
1044 #define CPUID_CX8 (1 << 8)
1045 #define CPUID_APIC (1 << 9)
1046 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1047 #define CPUID_MTRR (1 << 12)
1048 #define CPUID_PGE (1 << 13)
1049 #define CPUID_MCA (1 << 14)
1050 #define CPUID_CMOV (1 << 15)
1052 #define CPUID_MMX (1 << 23)
1053 #define CPUID_FXSR (1 << 24)
1054 #define CPUID_SSE (1 << 25)
1055 #define CPUID_SSE2 (1 << 26)
1057 void helper_cpuid(void)
1061 EAX
= 2; /* max EAX index supported */
1068 int family
, model
, stepping
;
1071 /* pentium 75-200 */
1081 EAX
= (family
<< 8) | (model
<< 4) | stepping
;
1084 EDX
= CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
1085 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
1086 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
;
1090 /* cache info: needed for Pentium Pro compatibility */
1099 void helper_lldt_T0(void)
1107 selector
= T0
& 0xffff;
1108 if ((selector
& 0xfffc) == 0) {
1109 /* XXX: NULL selector case: invalid LDT */
1110 env
->ldt
.base
= NULL
;
1114 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1116 index
= selector
& ~7;
1117 if ((index
+ 7) > dt
->limit
)
1118 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1119 ptr
= dt
->base
+ index
;
1120 e1
= ldl_kernel(ptr
);
1121 e2
= ldl_kernel(ptr
+ 4);
1122 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1123 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1124 if (!(e2
& DESC_P_MASK
))
1125 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1126 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1128 env
->ldt
.selector
= selector
;
1131 void helper_ltr_T0(void)
1139 selector
= T0
& 0xffff;
1140 if ((selector
& 0xfffc) == 0) {
1141 /* NULL selector case: invalid LDT */
1142 env
->tr
.base
= NULL
;
1147 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1149 index
= selector
& ~7;
1150 if ((index
+ 7) > dt
->limit
)
1151 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1152 ptr
= dt
->base
+ index
;
1153 e1
= ldl_kernel(ptr
);
1154 e2
= ldl_kernel(ptr
+ 4);
1155 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1156 if ((e2
& DESC_S_MASK
) ||
1157 (type
!= 1 && type
!= 9))
1158 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1159 if (!(e2
& DESC_P_MASK
))
1160 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1161 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1162 e2
|= DESC_TSS_BUSY_MASK
;
1163 stl_kernel(ptr
+ 4, e2
);
1165 env
->tr
.selector
= selector
;
1168 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1169 void load_seg(int seg_reg
, int selector
)
1178 if ((selector
& 0xfffc) == 0) {
1179 /* null selector case */
1180 if (seg_reg
== R_SS
)
1181 raise_exception_err(EXCP0D_GPF
, 0);
1182 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, NULL
, 0, 0);
1189 index
= selector
& ~7;
1190 if ((index
+ 7) > dt
->limit
)
1191 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1192 ptr
= dt
->base
+ index
;
1193 e1
= ldl_kernel(ptr
);
1194 e2
= ldl_kernel(ptr
+ 4);
1196 if (!(e2
& DESC_S_MASK
))
1197 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1199 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1200 cpl
= env
->hflags
& HF_CPL_MASK
;
1201 if (seg_reg
== R_SS
) {
1202 /* must be writable segment */
1203 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1204 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1205 if (rpl
!= cpl
|| dpl
!= cpl
)
1206 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1208 /* must be readable segment */
1209 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1210 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1212 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1213 /* if not conforming code, test rights */
1214 if (dpl
< cpl
|| dpl
< rpl
)
1215 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1219 if (!(e2
& DESC_P_MASK
)) {
1220 if (seg_reg
== R_SS
)
1221 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1223 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1226 /* set the access bit if not already set */
1227 if (!(e2
& DESC_A_MASK
)) {
1229 stl_kernel(ptr
+ 4, e2
);
1232 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1233 get_seg_base(e1
, e2
),
1234 get_seg_limit(e1
, e2
),
1237 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1238 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1243 /* protected mode jump */
1244 void helper_ljmp_protected_T0_T1(void)
1246 int new_cs
, new_eip
, gate_cs
, type
;
1247 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1251 if ((new_cs
& 0xfffc) == 0)
1252 raise_exception_err(EXCP0D_GPF
, 0);
1253 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1254 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1255 cpl
= env
->hflags
& HF_CPL_MASK
;
1256 if (e2
& DESC_S_MASK
) {
1257 if (!(e2
& DESC_CS_MASK
))
1258 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1259 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1260 if (e2
& DESC_C_MASK
) {
1261 /* conforming code segment */
1263 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1265 /* non conforming code segment */
1268 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1270 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1272 if (!(e2
& DESC_P_MASK
))
1273 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1274 limit
= get_seg_limit(e1
, e2
);
1275 if (new_eip
> limit
)
1276 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1277 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1278 get_seg_base(e1
, e2
), limit
, e2
);
1281 /* jump to call or task gate */
1282 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1284 cpl
= env
->hflags
& HF_CPL_MASK
;
1285 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1287 case 1: /* 286 TSS */
1288 case 9: /* 386 TSS */
1289 case 5: /* task gate */
1290 if (dpl
< cpl
|| dpl
< rpl
)
1291 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1292 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
);
1294 case 4: /* 286 call gate */
1295 case 12: /* 386 call gate */
1296 if ((dpl
< cpl
) || (dpl
< rpl
))
1297 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1298 if (!(e2
& DESC_P_MASK
))
1299 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1301 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
1302 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1303 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1304 /* must be code segment */
1305 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1306 (DESC_S_MASK
| DESC_CS_MASK
)))
1307 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1308 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1309 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
1310 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1311 if (!(e2
& DESC_P_MASK
))
1312 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1313 new_eip
= (e1
& 0xffff);
1315 new_eip
|= (e2
& 0xffff0000);
1316 limit
= get_seg_limit(e1
, e2
);
1317 if (new_eip
> limit
)
1318 raise_exception_err(EXCP0D_GPF
, 0);
1319 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1320 get_seg_base(e1
, e2
), limit
, e2
);
1324 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1330 /* real mode call */
1331 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
1333 int new_cs
, new_eip
;
1334 uint32_t esp
, esp_mask
;
1340 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1341 ssp
= env
->segs
[R_SS
].base
;
1343 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1344 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1346 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1347 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1350 ESP
= (ESP
& ~esp_mask
) | (esp
& esp_mask
);
1352 env
->segs
[R_CS
].selector
= new_cs
;
1353 env
->segs
[R_CS
].base
= (uint8_t *)(new_cs
<< 4);
1356 /* protected mode call */
1357 void helper_lcall_protected_T0_T1(int shift
, int next_eip
)
1359 int new_cs
, new_eip
, new_stack
, i
;
1360 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1361 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
1362 uint32_t val
, limit
, old_sp_mask
;
1363 uint8_t *ssp
, *old_ssp
;
1369 fprintf(logfile
, "lcall %04x:%08x\n",
1371 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1374 if ((new_cs
& 0xfffc) == 0)
1375 raise_exception_err(EXCP0D_GPF
, 0);
1376 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1377 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1378 cpl
= env
->hflags
& HF_CPL_MASK
;
1381 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
1384 if (e2
& DESC_S_MASK
) {
1385 if (!(e2
& DESC_CS_MASK
))
1386 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1387 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1388 if (e2
& DESC_C_MASK
) {
1389 /* conforming code segment */
1391 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1393 /* non conforming code segment */
1396 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1398 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1400 if (!(e2
& DESC_P_MASK
))
1401 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1404 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1405 ssp
= env
->segs
[R_SS
].base
;
1407 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1408 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1410 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1411 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1414 limit
= get_seg_limit(e1
, e2
);
1415 if (new_eip
> limit
)
1416 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1417 /* from this point, not restartable */
1418 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1419 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1420 get_seg_base(e1
, e2
), limit
, e2
);
1423 /* check gate type */
1424 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1425 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1428 case 1: /* available 286 TSS */
1429 case 9: /* available 386 TSS */
1430 case 5: /* task gate */
1431 if (dpl
< cpl
|| dpl
< rpl
)
1432 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1433 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
);
1435 case 4: /* 286 call gate */
1436 case 12: /* 386 call gate */
1439 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1444 if (dpl
< cpl
|| dpl
< rpl
)
1445 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1446 /* check valid bit */
1447 if (!(e2
& DESC_P_MASK
))
1448 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1449 selector
= e1
>> 16;
1450 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1451 param_count
= e2
& 0x1f;
1452 if ((selector
& 0xfffc) == 0)
1453 raise_exception_err(EXCP0D_GPF
, 0);
1455 if (load_segment(&e1
, &e2
, selector
) != 0)
1456 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1457 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1458 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1459 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1461 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1462 if (!(e2
& DESC_P_MASK
))
1463 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1465 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1466 /* to inner priviledge */
1467 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1470 fprintf(logfile
, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
1471 ss
, sp
, param_count
, ESP
);
1473 if ((ss
& 0xfffc) == 0)
1474 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1475 if ((ss
& 3) != dpl
)
1476 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1477 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
1478 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1479 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1481 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1482 if (!(ss_e2
& DESC_S_MASK
) ||
1483 (ss_e2
& DESC_CS_MASK
) ||
1484 !(ss_e2
& DESC_W_MASK
))
1485 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1486 if (!(ss_e2
& DESC_P_MASK
))
1487 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1489 // push_size = ((param_count * 2) + 8) << shift;
1491 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1492 old_ssp
= env
->segs
[R_SS
].base
;
1494 sp_mask
= get_sp_mask(ss_e2
);
1495 ssp
= get_seg_base(ss_e1
, ss_e2
);
1497 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1498 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1499 for(i
= param_count
- 1; i
>= 0; i
--) {
1500 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1501 PUSHL(ssp
, sp
, sp_mask
, val
);
1504 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1505 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1506 for(i
= param_count
- 1; i
>= 0; i
--) {
1507 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1508 PUSHW(ssp
, sp
, sp_mask
, val
);
1513 /* to same priviledge */
1515 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1516 ssp
= env
->segs
[R_SS
].base
;
1517 // push_size = (4 << shift);
1522 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1523 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1525 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1526 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1529 /* from this point, not restartable */
1532 ss
= (ss
& ~3) | dpl
;
1533 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1535 get_seg_limit(ss_e1
, ss_e2
),
1539 selector
= (selector
& ~3) | dpl
;
1540 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1541 get_seg_base(e1
, e2
),
1542 get_seg_limit(e1
, e2
),
1544 cpu_x86_set_cpl(env
, dpl
);
1545 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1550 /* real and vm86 mode iret */
1551 void helper_iret_real(int shift
)
1553 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1557 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
1559 ssp
= env
->segs
[R_SS
].base
;
1562 POPL(ssp
, sp
, sp_mask
, new_eip
);
1563 POPL(ssp
, sp
, sp_mask
, new_cs
);
1565 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1568 POPW(ssp
, sp
, sp_mask
, new_eip
);
1569 POPW(ssp
, sp
, sp_mask
, new_cs
);
1570 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1572 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1573 load_seg_vm(R_CS
, new_cs
);
1575 if (env
->eflags
& VM_MASK
)
1576 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
1578 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
1580 eflags_mask
&= 0xffff;
1581 load_eflags(new_eflags
, eflags_mask
);
1584 static inline void validate_seg(int seg_reg
, int cpl
)
1589 e2
= env
->segs
[seg_reg
].flags
;
1590 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1591 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1592 /* data or non conforming code segment */
1594 cpu_x86_load_seg_cache(env
, seg_reg
, 0, NULL
, 0, 0);
1599 /* protected mode iret */
1600 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1602 uint32_t sp
, new_cs
, new_eip
, new_eflags
, new_esp
, new_ss
, sp_mask
;
1603 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1604 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1605 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1608 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1610 ssp
= env
->segs
[R_SS
].base
;
1613 POPL(ssp
, sp
, sp_mask
, new_eip
);
1614 POPL(ssp
, sp
, sp_mask
, new_cs
);
1617 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1618 if (new_eflags
& VM_MASK
)
1619 goto return_to_vm86
;
1623 POPW(ssp
, sp
, sp_mask
, new_eip
);
1624 POPW(ssp
, sp
, sp_mask
, new_cs
);
1626 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1630 fprintf(logfile
, "lret new %04x:%08x addend=0x%x\n",
1631 new_cs
, new_eip
, addend
);
1632 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1635 if ((new_cs
& 0xfffc) == 0)
1636 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1637 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1638 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1639 if (!(e2
& DESC_S_MASK
) ||
1640 !(e2
& DESC_CS_MASK
))
1641 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1642 cpl
= env
->hflags
& HF_CPL_MASK
;
1645 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1646 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1647 if (e2
& DESC_C_MASK
) {
1649 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1652 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1654 if (!(e2
& DESC_P_MASK
))
1655 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1659 /* return to same priledge level */
1660 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1661 get_seg_base(e1
, e2
),
1662 get_seg_limit(e1
, e2
),
1665 /* return to different priviledge level */
1668 POPL(ssp
, sp
, sp_mask
, new_esp
);
1669 POPL(ssp
, sp
, sp_mask
, new_ss
);
1673 POPW(ssp
, sp
, sp_mask
, new_esp
);
1674 POPW(ssp
, sp
, sp_mask
, new_ss
);
1677 if ((new_ss
& 3) != rpl
)
1678 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1679 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
1680 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1681 if (!(ss_e2
& DESC_S_MASK
) ||
1682 (ss_e2
& DESC_CS_MASK
) ||
1683 !(ss_e2
& DESC_W_MASK
))
1684 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1685 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1687 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1688 if (!(ss_e2
& DESC_P_MASK
))
1689 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
1691 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1692 get_seg_base(e1
, e2
),
1693 get_seg_limit(e1
, e2
),
1695 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
1696 get_seg_base(ss_e1
, ss_e2
),
1697 get_seg_limit(ss_e1
, ss_e2
),
1699 cpu_x86_set_cpl(env
, rpl
);
1701 /* XXX: change sp_mask according to old segment ? */
1703 /* validate data segments */
1704 validate_seg(R_ES
, cpl
);
1705 validate_seg(R_DS
, cpl
);
1706 validate_seg(R_FS
, cpl
);
1707 validate_seg(R_GS
, cpl
);
1709 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1712 /* NOTE: 'cpl' is the _old_ CPL */
1713 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
1715 eflags_mask
|= IOPL_MASK
;
1716 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
1718 eflags_mask
|= IF_MASK
;
1720 eflags_mask
&= 0xffff;
1721 load_eflags(new_eflags
, eflags_mask
);
1726 POPL(ssp
, sp
, sp_mask
, new_esp
);
1727 POPL(ssp
, sp
, sp_mask
, new_ss
);
1728 POPL(ssp
, sp
, sp_mask
, new_es
);
1729 POPL(ssp
, sp
, sp_mask
, new_ds
);
1730 POPL(ssp
, sp
, sp_mask
, new_fs
);
1731 POPL(ssp
, sp
, sp_mask
, new_gs
);
1733 /* modify processor state */
1734 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
1735 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
1736 load_seg_vm(R_CS
, new_cs
& 0xffff);
1737 cpu_x86_set_cpl(env
, 3);
1738 load_seg_vm(R_SS
, new_ss
& 0xffff);
1739 load_seg_vm(R_ES
, new_es
& 0xffff);
1740 load_seg_vm(R_DS
, new_ds
& 0xffff);
1741 load_seg_vm(R_FS
, new_fs
& 0xffff);
1742 load_seg_vm(R_GS
, new_gs
& 0xffff);
1748 void helper_iret_protected(int shift
)
1750 int tss_selector
, type
;
1753 /* specific case for TSS */
1754 if (env
->eflags
& NT_MASK
) {
1755 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
1756 if (tss_selector
& 4)
1757 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1758 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
1759 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1760 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
1761 /* NOTE: we check both segment and busy TSS */
1763 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1764 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
);
1766 helper_ret_protected(shift
, 1, 0);
1770 void helper_lret_protected(int shift
, int addend
)
1772 helper_ret_protected(shift
, 0, addend
);
1775 void helper_movl_crN_T0(int reg
)
1779 cpu_x86_update_cr0(env
, T0
);
1782 cpu_x86_update_cr3(env
, T0
);
1785 cpu_x86_update_cr4(env
, T0
);
1794 void helper_movl_drN_T0(int reg
)
1799 void helper_invlpg(unsigned int addr
)
1801 cpu_x86_flush_tlb(env
, addr
);
1809 void helper_rdtsc(void)
1813 asm("rdtsc" : "=A" (val
));
1815 /* better than nothing: the time increases */
1822 void helper_wrmsr(void)
1825 case MSR_IA32_SYSENTER_CS
:
1826 env
->sysenter_cs
= EAX
& 0xffff;
1828 case MSR_IA32_SYSENTER_ESP
:
1829 env
->sysenter_esp
= EAX
;
1831 case MSR_IA32_SYSENTER_EIP
:
1832 env
->sysenter_eip
= EAX
;
1835 /* XXX: exception ? */
1840 void helper_rdmsr(void)
1843 case MSR_IA32_SYSENTER_CS
:
1844 EAX
= env
->sysenter_cs
;
1847 case MSR_IA32_SYSENTER_ESP
:
1848 EAX
= env
->sysenter_esp
;
1851 case MSR_IA32_SYSENTER_EIP
:
1852 EAX
= env
->sysenter_eip
;
1856 /* XXX: exception ? */
1861 void helper_lsl(void)
1863 unsigned int selector
, limit
;
1865 int rpl
, dpl
, cpl
, type
;
1867 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1868 selector
= T0
& 0xffff;
1869 if (load_segment(&e1
, &e2
, selector
) != 0)
1872 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1873 cpl
= env
->hflags
& HF_CPL_MASK
;
1874 if (e2
& DESC_S_MASK
) {
1875 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1878 if (dpl
< cpl
|| dpl
< rpl
)
1882 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1893 if (dpl
< cpl
|| dpl
< rpl
)
1896 limit
= get_seg_limit(e1
, e2
);
1901 void helper_lar(void)
1903 unsigned int selector
;
1905 int rpl
, dpl
, cpl
, type
;
1907 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1908 selector
= T0
& 0xffff;
1909 if ((selector
& 0xfffc) == 0)
1911 if (load_segment(&e1
, &e2
, selector
) != 0)
1914 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1915 cpl
= env
->hflags
& HF_CPL_MASK
;
1916 if (e2
& DESC_S_MASK
) {
1917 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1920 if (dpl
< cpl
|| dpl
< rpl
)
1924 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1938 if (dpl
< cpl
|| dpl
< rpl
)
1941 T1
= e2
& 0x00f0ff00;
1945 void helper_verr(void)
1947 unsigned int selector
;
1951 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1952 selector
= T0
& 0xffff;
1953 if ((selector
& 0xfffc) == 0)
1955 if (load_segment(&e1
, &e2
, selector
) != 0)
1957 if (!(e2
& DESC_S_MASK
))
1960 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1961 cpl
= env
->hflags
& HF_CPL_MASK
;
1962 if (e2
& DESC_CS_MASK
) {
1963 if (!(e2
& DESC_R_MASK
))
1965 if (!(e2
& DESC_C_MASK
)) {
1966 if (dpl
< cpl
|| dpl
< rpl
)
1970 if (dpl
< cpl
|| dpl
< rpl
)
1976 void helper_verw(void)
1978 unsigned int selector
;
1982 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1983 selector
= T0
& 0xffff;
1984 if ((selector
& 0xfffc) == 0)
1986 if (load_segment(&e1
, &e2
, selector
) != 0)
1988 if (!(e2
& DESC_S_MASK
))
1991 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1992 cpl
= env
->hflags
& HF_CPL_MASK
;
1993 if (e2
& DESC_CS_MASK
) {
1996 if (dpl
< cpl
|| dpl
< rpl
)
1998 if (!(e2
& DESC_W_MASK
))
2006 void helper_fldt_ST0_A0(void)
2009 new_fpstt
= (env
->fpstt
- 1) & 7;
2010 env
->fpregs
[new_fpstt
] = helper_fldt((uint8_t *)A0
);
2011 env
->fpstt
= new_fpstt
;
2012 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
2015 void helper_fstt_ST0_A0(void)
2017 helper_fstt(ST0
, (uint8_t *)A0
);
2022 #define MUL10(iv) ( iv + iv + (iv << 3) )
2024 void helper_fbld_ST0_A0(void)
2032 for(i
= 8; i
>= 0; i
--) {
2033 v
= ldub((uint8_t *)A0
+ i
);
2034 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
2037 if (ldub((uint8_t *)A0
+ 9) & 0x80)
2043 void helper_fbst_ST0_A0(void)
2047 uint8_t *mem_ref
, *mem_end
;
2052 mem_ref
= (uint8_t *)A0
;
2053 mem_end
= mem_ref
+ 9;
2060 while (mem_ref
< mem_end
) {
2065 v
= ((v
/ 10) << 4) | (v
% 10);
2068 while (mem_ref
< mem_end
) {
2073 void helper_f2xm1(void)
2075 ST0
= pow(2.0,ST0
) - 1.0;
2078 void helper_fyl2x(void)
2080 CPU86_LDouble fptemp
;
2084 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
2088 env
->fpus
&= (~0x4700);
2093 void helper_fptan(void)
2095 CPU86_LDouble fptemp
;
2098 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2104 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2105 /* the above code is for |arg| < 2**52 only */
2109 void helper_fpatan(void)
2111 CPU86_LDouble fptemp
, fpsrcop
;
2115 ST1
= atan2(fpsrcop
,fptemp
);
2119 void helper_fxtract(void)
2121 CPU86_LDoubleU temp
;
2122 unsigned int expdif
;
2125 expdif
= EXPD(temp
) - EXPBIAS
;
2126 /*DP exponent bias*/
2133 void helper_fprem1(void)
2135 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2136 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2142 fpsrcop1
.d
= fpsrcop
;
2144 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2146 dblq
= fpsrcop
/ fptemp
;
2147 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2148 ST0
= fpsrcop
- fptemp
*dblq
;
2149 q
= (int)dblq
; /* cutting off top bits is assumed here */
2150 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2151 /* (C0,C1,C3) <-- (q2,q1,q0) */
2152 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2153 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2154 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2156 env
->fpus
|= 0x400; /* C2 <-- 1 */
2157 fptemp
= pow(2.0, expdif
-50);
2158 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2159 /* fpsrcop = integer obtained by rounding to the nearest */
2160 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
2161 floor(fpsrcop
): ceil(fpsrcop
);
2162 ST0
-= (ST1
* fpsrcop
* fptemp
);
2166 void helper_fprem(void)
2168 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2169 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2175 fpsrcop1
.d
= fpsrcop
;
2177 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2178 if ( expdif
< 53 ) {
2179 dblq
= fpsrcop
/ fptemp
;
2180 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2181 ST0
= fpsrcop
- fptemp
*dblq
;
2182 q
= (int)dblq
; /* cutting off top bits is assumed here */
2183 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2184 /* (C0,C1,C3) <-- (q2,q1,q0) */
2185 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2186 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2187 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2189 env
->fpus
|= 0x400; /* C2 <-- 1 */
2190 fptemp
= pow(2.0, expdif
-50);
2191 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2192 /* fpsrcop = integer obtained by chopping */
2193 fpsrcop
= (fpsrcop
< 0.0)?
2194 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
2195 ST0
-= (ST1
* fpsrcop
* fptemp
);
2199 void helper_fyl2xp1(void)
2201 CPU86_LDouble fptemp
;
2204 if ((fptemp
+1.0)>0.0) {
2205 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
2209 env
->fpus
&= (~0x4700);
2214 void helper_fsqrt(void)
2216 CPU86_LDouble fptemp
;
2220 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2226 void helper_fsincos(void)
2228 CPU86_LDouble fptemp
;
2231 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2237 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2238 /* the above code is for |arg| < 2**63 only */
2242 void helper_frndint(void)
2248 switch(env
->fpuc
& RC_MASK
) {
2251 asm("rndd %0, %1" : "=f" (a
) : "f"(a
));
2254 asm("rnddm %0, %1" : "=f" (a
) : "f"(a
));
2257 asm("rnddp %0, %1" : "=f" (a
) : "f"(a
));
2260 asm("rnddz %0, %1" : "=f" (a
) : "f"(a
));
2269 void helper_fscale(void)
2271 CPU86_LDouble fpsrcop
, fptemp
;
2274 fptemp
= pow(fpsrcop
,ST1
);
2278 void helper_fsin(void)
2280 CPU86_LDouble fptemp
;
2283 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2287 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2288 /* the above code is for |arg| < 2**53 only */
2292 void helper_fcos(void)
2294 CPU86_LDouble fptemp
;
2297 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2301 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2302 /* the above code is for |arg5 < 2**63 only */
2306 void helper_fxam_ST0(void)
2308 CPU86_LDoubleU temp
;
2313 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2315 env
->fpus
|= 0x200; /* C1 <-- 1 */
2317 expdif
= EXPD(temp
);
2318 if (expdif
== MAXEXPD
) {
2319 if (MANTD(temp
) == 0)
2320 env
->fpus
|= 0x500 /*Infinity*/;
2322 env
->fpus
|= 0x100 /*NaN*/;
2323 } else if (expdif
== 0) {
2324 if (MANTD(temp
) == 0)
2325 env
->fpus
|= 0x4000 /*Zero*/;
2327 env
->fpus
|= 0x4400 /*Denormal*/;
2333 void helper_fstenv(uint8_t *ptr
, int data32
)
2335 int fpus
, fptag
, exp
, i
;
2339 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
2341 for (i
=7; i
>=0; i
--) {
2343 if (env
->fptags
[i
]) {
2346 tmp
.d
= env
->fpregs
[i
];
2349 if (exp
== 0 && mant
== 0) {
2352 } else if (exp
== 0 || exp
== MAXEXPD
2353 #ifdef USE_X86LDOUBLE
2354 || (mant
& (1LL << 63)) == 0
2357 /* NaNs, infinity, denormal */
2364 stl(ptr
, env
->fpuc
);
2366 stl(ptr
+ 8, fptag
);
2367 stl(ptr
+ 12, 0); /* fpip */
2368 stl(ptr
+ 16, 0); /* fpcs */
2369 stl(ptr
+ 20, 0); /* fpoo */
2370 stl(ptr
+ 24, 0); /* fpos */
2373 stw(ptr
, env
->fpuc
);
2375 stw(ptr
+ 4, fptag
);
2383 void helper_fldenv(uint8_t *ptr
, int data32
)
2388 env
->fpuc
= lduw(ptr
);
2389 fpus
= lduw(ptr
+ 4);
2390 fptag
= lduw(ptr
+ 8);
2393 env
->fpuc
= lduw(ptr
);
2394 fpus
= lduw(ptr
+ 2);
2395 fptag
= lduw(ptr
+ 4);
2397 env
->fpstt
= (fpus
>> 11) & 7;
2398 env
->fpus
= fpus
& ~0x3800;
2399 for(i
= 0;i
< 8; i
++) {
2400 env
->fptags
[i
] = ((fptag
& 3) == 3);
2405 void helper_fsave(uint8_t *ptr
, int data32
)
2410 helper_fstenv(ptr
, data32
);
2412 ptr
+= (14 << data32
);
2413 for(i
= 0;i
< 8; i
++) {
2415 helper_fstt(tmp
, ptr
);
2433 void helper_frstor(uint8_t *ptr
, int data32
)
2438 helper_fldenv(ptr
, data32
);
2439 ptr
+= (14 << data32
);
2441 for(i
= 0;i
< 8; i
++) {
2442 tmp
= helper_fldt(ptr
);
2448 #if !defined(CONFIG_USER_ONLY)
2450 #define MMUSUFFIX _mmu
2451 #define GETPC() (__builtin_return_address(0))
2454 #include "softmmu_template.h"
2457 #include "softmmu_template.h"
2460 #include "softmmu_template.h"
2463 #include "softmmu_template.h"
2467 /* try to fill the TLB and return an exception if error. If retaddr is
2468 NULL, it means that the function was called in C code (i.e. not
2469 from generated code or from helper.c) */
2470 /* XXX: fix it to restore all registers */
2471 void tlb_fill(unsigned long addr
, int is_write
, int is_user
, void *retaddr
)
2473 TranslationBlock
*tb
;
2476 CPUX86State
*saved_env
;
2478 /* XXX: hack to restore env in all cases, even if not called from
2481 env
= cpu_single_env
;
2483 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
2486 /* now we have a real cpu fault */
2487 pc
= (unsigned long)retaddr
;
2488 tb
= tb_find_pc(pc
);
2490 /* the PC is inside the translated code. It means that we have
2491 a virtual CPU fault */
2492 cpu_restore_state(tb
, env
, pc
, NULL
);
2495 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);