4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 const uint8_t parity_table
[256] = {
25 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
26 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
27 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
28 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
29 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
30 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
31 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
32 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
33 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
34 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
35 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
36 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
37 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
38 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
39 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
40 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
50 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
54 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 const uint8_t rclw_table
[32] = {
61 0, 1, 2, 3, 4, 5, 6, 7,
62 8, 9,10,11,12,13,14,15,
63 16, 0, 1, 2, 3, 4, 5, 6,
64 7, 8, 9,10,11,12,13,14,
68 const uint8_t rclb_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 0, 1, 2, 3, 4, 5, 6,
71 7, 8, 0, 1, 2, 3, 4, 5,
72 6, 7, 8, 0, 1, 2, 3, 4,
75 const CPU86_LDouble f15rk
[7] =
77 0.00000000000000000000L,
78 1.00000000000000000000L,
79 3.14159265358979323851L, /*pi*/
80 0.30102999566398119523L, /*lg2*/
81 0.69314718055994530943L, /*ln2*/
82 1.44269504088896340739L, /*l2e*/
83 3.32192809488736234781L, /*l2t*/
88 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
92 spin_lock(&global_cpu_lock
);
97 spin_unlock(&global_cpu_lock
);
100 void cpu_loop_exit(void)
102 /* NOTE: the register at this point must be saved by hand because
103 longjmp restore them */
105 env
->regs
[R_EAX
] = EAX
;
108 env
->regs
[R_ECX
] = ECX
;
111 env
->regs
[R_EDX
] = EDX
;
114 env
->regs
[R_EBX
] = EBX
;
117 env
->regs
[R_ESP
] = ESP
;
120 env
->regs
[R_EBP
] = EBP
;
123 env
->regs
[R_ESI
] = ESI
;
126 env
->regs
[R_EDI
] = EDI
;
128 longjmp(env
->jmp_env
, 1);
131 /* return non zero if error */
132 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
143 index
= selector
& ~7;
144 if ((index
+ 7) > dt
->limit
)
146 ptr
= dt
->base
+ index
;
147 *e1_ptr
= ldl_kernel(ptr
);
148 *e2_ptr
= ldl_kernel(ptr
+ 4);
152 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
155 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
156 if (e2
& DESC_G_MASK
)
157 limit
= (limit
<< 12) | 0xfff;
161 static inline uint8_t *get_seg_base(uint32_t e1
, uint32_t e2
)
163 return (uint8_t *)((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
166 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
168 sc
->base
= get_seg_base(e1
, e2
);
169 sc
->limit
= get_seg_limit(e1
, e2
);
173 /* init the segment cache in vm86 mode. */
174 static inline void load_seg_vm(int seg
, int selector
)
177 cpu_x86_load_seg_cache(env
, seg
, selector
,
178 (uint8_t *)(selector
<< 4), 0xffff, 0);
181 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
182 uint32_t *esp_ptr
, int dpl
)
184 int type
, index
, shift
;
189 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
190 for(i
=0;i
<env
->tr
.limit
;i
++) {
191 printf("%02x ", env
->tr
.base
[i
]);
192 if ((i
& 7) == 7) printf("\n");
198 if (!(env
->tr
.flags
& DESC_P_MASK
))
199 cpu_abort(env
, "invalid tss");
200 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
202 cpu_abort(env
, "invalid tss type");
204 index
= (dpl
* 4 + 2) << shift
;
205 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
206 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
208 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
209 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
211 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
212 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
216 /* XXX: merge with load_seg() */
217 static void tss_load_seg(int seg_reg
, int selector
)
222 if ((selector
& 0xfffc) != 0) {
223 if (load_segment(&e1
, &e2
, selector
) != 0)
224 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
225 if (!(e2
& DESC_S_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
228 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
229 cpl
= env
->hflags
& HF_CPL_MASK
;
230 if (seg_reg
== R_CS
) {
231 if (!(e2
& DESC_CS_MASK
))
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
235 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
236 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
238 } else if (seg_reg
== R_SS
) {
239 /* SS must be writable data */
240 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 if (dpl
!= cpl
|| dpl
!= rpl
)
243 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
245 /* not readable code */
246 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
247 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
248 /* if data or non conforming code, checks the rights */
249 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
250 if (dpl
< cpl
|| dpl
< rpl
)
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
254 if (!(e2
& DESC_P_MASK
))
255 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
256 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
257 get_seg_base(e1
, e2
),
258 get_seg_limit(e1
, e2
),
261 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
262 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
266 #define SWITCH_TSS_JMP 0
267 #define SWITCH_TSS_IRET 1
268 #define SWITCH_TSS_CALL 2
270 /* XXX: restore CPU state in registers (PowerPC case) */
271 static void switch_tss(int tss_selector
,
272 uint32_t e1
, uint32_t e2
, int source
)
274 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
276 uint32_t new_regs
[8], new_segs
[6];
277 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
278 uint32_t old_eflags
, eflags_mask
;
283 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
285 /* if task gate, we read the TSS segment and we load it */
287 if (!(e2
& DESC_P_MASK
))
288 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
289 tss_selector
= e1
>> 16;
290 if (tss_selector
& 4)
291 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
292 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
293 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
294 if (e2
& DESC_S_MASK
)
295 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
296 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
298 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
301 if (!(e2
& DESC_P_MASK
))
302 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
308 tss_limit
= get_seg_limit(e1
, e2
);
309 tss_base
= get_seg_base(e1
, e2
);
310 if ((tss_selector
& 4) != 0 ||
311 tss_limit
< tss_limit_max
)
312 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
313 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
315 old_tss_limit_max
= 103;
317 old_tss_limit_max
= 43;
319 /* read all the registers from the new TSS */
322 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
323 new_eip
= ldl_kernel(tss_base
+ 0x20);
324 new_eflags
= ldl_kernel(tss_base
+ 0x24);
325 for(i
= 0; i
< 8; i
++)
326 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
327 for(i
= 0; i
< 6; i
++)
328 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
329 new_ldt
= lduw_kernel(tss_base
+ 0x60);
330 new_trap
= ldl_kernel(tss_base
+ 0x64);
334 new_eip
= lduw_kernel(tss_base
+ 0x0e);
335 new_eflags
= lduw_kernel(tss_base
+ 0x10);
336 for(i
= 0; i
< 8; i
++)
337 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
338 for(i
= 0; i
< 4; i
++)
339 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
340 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
346 /* NOTE: we must avoid memory exceptions during the task switch,
347 so we make dummy accesses before */
348 /* XXX: it can still fail in some cases, so a bigger hack is
349 necessary to valid the TLB after having done the accesses */
351 v1
= ldub_kernel(env
->tr
.base
);
352 v2
= ldub(env
->tr
.base
+ old_tss_limit_max
);
353 stb_kernel(env
->tr
.base
, v1
);
354 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
356 /* clear busy bit (it is restartable) */
357 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
360 ptr
= env
->gdt
.base
+ (env
->tr
.selector
<< 3);
361 e2
= ldl_kernel(ptr
+ 4);
362 e2
&= ~DESC_TSS_BUSY_MASK
;
363 stl_kernel(ptr
+ 4, e2
);
365 old_eflags
= compute_eflags();
366 if (source
== SWITCH_TSS_IRET
)
367 old_eflags
&= ~NT_MASK
;
369 /* save the current state in the old TSS */
372 stl_kernel(env
->tr
.base
+ 0x20, env
->eip
);
373 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
374 for(i
= 0; i
< 8; i
++)
375 stl_kernel(env
->tr
.base
+ (0x28 + i
* 4), env
->regs
[i
]);
376 for(i
= 0; i
< 6; i
++)
377 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
380 stw_kernel(env
->tr
.base
+ 0x0e, new_eip
);
381 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
382 for(i
= 0; i
< 8; i
++)
383 stw_kernel(env
->tr
.base
+ (0x12 + i
* 2), env
->regs
[i
]);
384 for(i
= 0; i
< 4; i
++)
385 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
388 /* now if an exception occurs, it will occurs in the next task
391 if (source
== SWITCH_TSS_CALL
) {
392 stw_kernel(tss_base
, env
->tr
.selector
);
393 new_eflags
|= NT_MASK
;
397 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
400 ptr
= env
->gdt
.base
+ (tss_selector
<< 3);
401 e2
= ldl_kernel(ptr
+ 4);
402 e2
|= DESC_TSS_BUSY_MASK
;
403 stl_kernel(ptr
+ 4, e2
);
406 /* set the new CPU state */
407 /* from this point, any exception which occurs can give problems */
408 env
->cr
[0] |= CR0_TS_MASK
;
409 env
->tr
.selector
= tss_selector
;
410 env
->tr
.base
= tss_base
;
411 env
->tr
.limit
= tss_limit
;
412 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
414 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
415 env
->cr
[3] = new_cr3
;
416 cpu_x86_update_cr3(env
);
419 /* load all registers without an exception, then reload them with
420 possible exception */
422 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
423 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
;
425 eflags_mask
&= 0xffff;
426 load_eflags(new_eflags
, eflags_mask
);
427 for(i
= 0; i
< 8; i
++)
428 env
->regs
[i
] = new_regs
[i
];
429 if (new_eflags
& VM_MASK
) {
430 for(i
= 0; i
< 6; i
++)
431 load_seg_vm(i
, new_segs
[i
]);
432 /* in vm86, CPL is always 3 */
433 cpu_x86_set_cpl(env
, 3);
435 /* CPL is set the RPL of CS */
436 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
437 /* first just selectors as the rest may trigger exceptions */
438 for(i
= 0; i
< 6; i
++)
439 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], NULL
, 0, 0);
442 env
->ldt
.selector
= new_ldt
& ~4;
443 env
->ldt
.base
= NULL
;
449 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
452 index
= new_ldt
& ~7;
453 if ((index
+ 7) > dt
->limit
)
454 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
455 ptr
= dt
->base
+ index
;
456 e1
= ldl_kernel(ptr
);
457 e2
= ldl_kernel(ptr
+ 4);
458 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
459 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
460 if (!(e2
& DESC_P_MASK
))
461 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
462 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
464 /* load the segments */
465 if (!(new_eflags
& VM_MASK
)) {
466 tss_load_seg(R_CS
, new_segs
[R_CS
]);
467 tss_load_seg(R_SS
, new_segs
[R_SS
]);
468 tss_load_seg(R_ES
, new_segs
[R_ES
]);
469 tss_load_seg(R_DS
, new_segs
[R_DS
]);
470 tss_load_seg(R_FS
, new_segs
[R_FS
]);
471 tss_load_seg(R_GS
, new_segs
[R_GS
]);
474 /* check that EIP is in the CS segment limits */
475 if (new_eip
> env
->segs
[R_CS
].limit
) {
476 raise_exception_err(EXCP0D_GPF
, 0);
480 /* check if Port I/O is allowed in TSS */
481 static inline void check_io(int addr
, int size
)
483 int io_offset
, val
, mask
;
485 /* TSS must be a valid 32 bit one */
486 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
487 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
490 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
491 io_offset
+= (addr
>> 3);
492 /* Note: the check needs two bytes */
493 if ((io_offset
+ 1) > env
->tr
.limit
)
495 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
497 mask
= (1 << size
) - 1;
498 /* all bits must be zero to allow the I/O */
499 if ((val
& mask
) != 0) {
501 raise_exception_err(EXCP0D_GPF
, 0);
505 void check_iob_T0(void)
510 void check_iow_T0(void)
515 void check_iol_T0(void)
520 void check_iob_DX(void)
522 check_io(EDX
& 0xffff, 1);
525 void check_iow_DX(void)
527 check_io(EDX
& 0xffff, 2);
530 void check_iol_DX(void)
532 check_io(EDX
& 0xffff, 4);
535 static inline unsigned int get_sp_mask(unsigned int e2
)
537 if (e2
& DESC_B_MASK
)
543 /* XXX: add a is_user flag to have proper security support */
544 #define PUSHW(ssp, sp, sp_mask, val)\
547 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
550 #define PUSHL(ssp, sp, sp_mask, val)\
553 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
556 #define POPW(ssp, sp, sp_mask, val)\
558 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
562 #define POPL(ssp, sp, sp_mask, val)\
564 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
568 /* protected mode interrupt */
569 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
570 unsigned int next_eip
, int is_hw
)
574 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
575 int has_error_code
, new_stack
, shift
;
576 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
580 if (!is_int
&& !is_hw
) {
595 if (intno
* 8 + 7 > dt
->limit
)
596 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
597 ptr
= dt
->base
+ intno
* 8;
598 e1
= ldl_kernel(ptr
);
599 e2
= ldl_kernel(ptr
+ 4);
600 /* check gate type */
601 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
603 case 5: /* task gate */
604 /* must do that check here to return the correct error code */
605 if (!(e2
& DESC_P_MASK
))
606 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
607 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
);
608 if (has_error_code
) {
610 /* push the error code */
611 shift
= (env
->segs
[R_CS
].flags
>> DESC_B_SHIFT
) & 1;
612 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
616 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
617 ssp
= env
->segs
[R_SS
].base
+ esp
;
619 stl_kernel(ssp
, error_code
);
621 stw_kernel(ssp
, error_code
);
622 env
->regs
[R_ESP
] = (esp
& mask
) | (env
->regs
[R_ESP
] & ~mask
);
625 case 6: /* 286 interrupt gate */
626 case 7: /* 286 trap gate */
627 case 14: /* 386 interrupt gate */
628 case 15: /* 386 trap gate */
631 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
634 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
635 cpl
= env
->hflags
& HF_CPL_MASK
;
636 /* check privledge if software int */
637 if (is_int
&& dpl
< cpl
)
638 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
639 /* check valid bit */
640 if (!(e2
& DESC_P_MASK
))
641 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
643 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
644 if ((selector
& 0xfffc) == 0)
645 raise_exception_err(EXCP0D_GPF
, 0);
647 if (load_segment(&e1
, &e2
, selector
) != 0)
648 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
649 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
650 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
651 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
653 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
654 if (!(e2
& DESC_P_MASK
))
655 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
656 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
657 /* to inner priviledge */
658 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
659 if ((ss
& 0xfffc) == 0)
660 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
662 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
663 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
664 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
665 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
667 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
668 if (!(ss_e2
& DESC_S_MASK
) ||
669 (ss_e2
& DESC_CS_MASK
) ||
670 !(ss_e2
& DESC_W_MASK
))
671 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
672 if (!(ss_e2
& DESC_P_MASK
))
673 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
675 sp_mask
= get_sp_mask(ss_e2
);
676 ssp
= get_seg_base(ss_e1
, ss_e2
);
677 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
678 /* to same priviledge */
680 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
681 ssp
= env
->segs
[R_SS
].base
;
684 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
685 new_stack
= 0; /* avoid warning */
686 sp_mask
= 0; /* avoid warning */
687 ssp
= NULL
; /* avoid warning */
688 esp
= 0; /* avoid warning */
694 /* XXX: check that enough room is available */
695 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
696 if (env
->eflags
& VM_MASK
)
705 if (env
->eflags
& VM_MASK
) {
706 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
707 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
708 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
709 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
712 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
713 PUSHL(ssp
, esp
, sp_mask
, ESP
);
715 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
716 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
717 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
718 if (has_error_code
) {
719 PUSHL(ssp
, esp
, sp_mask
, error_code
);
723 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
724 PUSHW(ssp
, esp
, sp_mask
, ESP
);
726 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
727 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
728 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
729 if (has_error_code
) {
730 PUSHW(ssp
, esp
, sp_mask
, error_code
);
735 ss
= (ss
& ~3) | dpl
;
736 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
737 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
739 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
741 selector
= (selector
& ~3) | dpl
;
742 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
743 get_seg_base(e1
, e2
),
744 get_seg_limit(e1
, e2
),
746 cpu_x86_set_cpl(env
, dpl
);
749 /* interrupt gate clear IF mask */
750 if ((type
& 1) == 0) {
751 env
->eflags
&= ~IF_MASK
;
753 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
756 /* real mode interrupt */
757 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
758 unsigned int next_eip
)
763 uint32_t offset
, esp
;
764 uint32_t old_cs
, old_eip
;
766 /* real mode (simpler !) */
768 if (intno
* 4 + 3 > dt
->limit
)
769 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
770 ptr
= dt
->base
+ intno
* 4;
771 offset
= lduw_kernel(ptr
);
772 selector
= lduw_kernel(ptr
+ 2);
774 ssp
= env
->segs
[R_SS
].base
;
779 old_cs
= env
->segs
[R_CS
].selector
;
780 /* XXX: use SS segment size ? */
781 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
782 PUSHW(ssp
, esp
, 0xffff, old_cs
);
783 PUSHW(ssp
, esp
, 0xffff, old_eip
);
785 /* update processor state */
786 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
788 env
->segs
[R_CS
].selector
= selector
;
789 env
->segs
[R_CS
].base
= (uint8_t *)(selector
<< 4);
790 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
793 /* fake user mode interrupt */
794 void do_interrupt_user(int intno
, int is_int
, int error_code
,
795 unsigned int next_eip
)
803 ptr
= dt
->base
+ (intno
* 8);
804 e2
= ldl_kernel(ptr
+ 4);
806 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
807 cpl
= env
->hflags
& HF_CPL_MASK
;
808 /* check privledge if software int */
809 if (is_int
&& dpl
< cpl
)
810 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
812 /* Since we emulate only user space, we cannot do more than
813 exiting the emulation with the suitable exception and error
820 * Begin excution of an interruption. is_int is TRUE if coming from
821 * the int instruction. next_eip is the EIP value AFTER the interrupt
822 * instruction. It is only relevant if is_int is TRUE.
824 void do_interrupt(int intno
, int is_int
, int error_code
,
825 unsigned int next_eip
, int is_hw
)
830 fprintf(logfile
, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
831 count
, intno
, error_code
, is_int
);
832 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
838 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
839 for(i
= 0; i
< 16; i
++) {
840 printf(" %02x", ldub(ptr
+ i
));
848 if (env
->cr
[0] & CR0_PE_MASK
) {
849 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
851 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
856 * Signal an interruption. It is executed in the main CPU loop.
857 * is_int is TRUE if coming from the int instruction. next_eip is the
858 * EIP value AFTER the interrupt instruction. It is only relevant if
861 void raise_interrupt(int intno
, int is_int
, int error_code
,
862 unsigned int next_eip
)
864 env
->exception_index
= intno
;
865 env
->error_code
= error_code
;
866 env
->exception_is_int
= is_int
;
867 env
->exception_next_eip
= next_eip
;
871 /* shortcuts to generate exceptions */
872 void raise_exception_err(int exception_index
, int error_code
)
874 raise_interrupt(exception_index
, 0, error_code
, 0);
877 void raise_exception(int exception_index
)
879 raise_interrupt(exception_index
, 0, 0, 0);
882 #ifdef BUGGY_GCC_DIV64
883 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
884 call it from another function */
885 uint32_t div64(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
891 int32_t idiv64(int32_t *q_ptr
, int64_t num
, int32_t den
)
898 void helper_divl_EAX_T0(uint32_t eip
)
900 unsigned int den
, q
, r
;
903 num
= EAX
| ((uint64_t)EDX
<< 32);
907 raise_exception(EXCP00_DIVZ
);
909 #ifdef BUGGY_GCC_DIV64
910 r
= div64(&q
, num
, den
);
919 void helper_idivl_EAX_T0(uint32_t eip
)
924 num
= EAX
| ((uint64_t)EDX
<< 32);
928 raise_exception(EXCP00_DIVZ
);
930 #ifdef BUGGY_GCC_DIV64
931 r
= idiv64(&q
, num
, den
);
940 void helper_cmpxchg8b(void)
945 eflags
= cc_table
[CC_OP
].compute_all();
946 d
= ldq((uint8_t *)A0
);
947 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
948 stq((uint8_t *)A0
, ((uint64_t)ECX
<< 32) | EBX
);
958 /* We simulate a pre-MMX pentium as in valgrind */
959 #define CPUID_FP87 (1 << 0)
960 #define CPUID_VME (1 << 1)
961 #define CPUID_DE (1 << 2)
962 #define CPUID_PSE (1 << 3)
963 #define CPUID_TSC (1 << 4)
964 #define CPUID_MSR (1 << 5)
965 #define CPUID_PAE (1 << 6)
966 #define CPUID_MCE (1 << 7)
967 #define CPUID_CX8 (1 << 8)
968 #define CPUID_APIC (1 << 9)
969 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
970 #define CPUID_MTRR (1 << 12)
971 #define CPUID_PGE (1 << 13)
972 #define CPUID_MCA (1 << 14)
973 #define CPUID_CMOV (1 << 15)
975 #define CPUID_MMX (1 << 23)
976 #define CPUID_FXSR (1 << 24)
977 #define CPUID_SSE (1 << 25)
978 #define CPUID_SSE2 (1 << 26)
980 void helper_cpuid(void)
983 EAX
= 1; /* max EAX index supported */
987 } else if (EAX
== 1) {
988 int family
, model
, stepping
;
1001 EAX
= (family
<< 8) | (model
<< 4) | stepping
;
1004 EDX
= CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
1005 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
1006 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
;
1010 void helper_lldt_T0(void)
1018 selector
= T0
& 0xffff;
1019 if ((selector
& 0xfffc) == 0) {
1020 /* XXX: NULL selector case: invalid LDT */
1021 env
->ldt
.base
= NULL
;
1025 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1027 index
= selector
& ~7;
1028 if ((index
+ 7) > dt
->limit
)
1029 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1030 ptr
= dt
->base
+ index
;
1031 e1
= ldl_kernel(ptr
);
1032 e2
= ldl_kernel(ptr
+ 4);
1033 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1034 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1035 if (!(e2
& DESC_P_MASK
))
1036 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1037 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1039 env
->ldt
.selector
= selector
;
1042 void helper_ltr_T0(void)
1050 selector
= T0
& 0xffff;
1051 if ((selector
& 0xfffc) == 0) {
1052 /* NULL selector case: invalid LDT */
1053 env
->tr
.base
= NULL
;
1058 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1060 index
= selector
& ~7;
1061 if ((index
+ 7) > dt
->limit
)
1062 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1063 ptr
= dt
->base
+ index
;
1064 e1
= ldl_kernel(ptr
);
1065 e2
= ldl_kernel(ptr
+ 4);
1066 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1067 if ((e2
& DESC_S_MASK
) ||
1068 (type
!= 1 && type
!= 9))
1069 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1070 if (!(e2
& DESC_P_MASK
))
1071 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1072 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1073 e2
|= 0x00000200; /* set the busy bit */
1074 stl_kernel(ptr
+ 4, e2
);
1076 env
->tr
.selector
= selector
;
1079 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1080 void load_seg(int seg_reg
, int selector
, unsigned int cur_eip
)
1088 if ((selector
& 0xfffc) == 0) {
1089 /* null selector case */
1090 if (seg_reg
== R_SS
) {
1092 raise_exception_err(EXCP0D_GPF
, 0);
1094 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, NULL
, 0, 0);
1102 index
= selector
& ~7;
1103 if ((index
+ 7) > dt
->limit
) {
1105 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1107 ptr
= dt
->base
+ index
;
1108 e1
= ldl_kernel(ptr
);
1109 e2
= ldl_kernel(ptr
+ 4);
1111 if (!(e2
& DESC_S_MASK
)) {
1113 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1116 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1117 cpl
= env
->hflags
& HF_CPL_MASK
;
1118 if (seg_reg
== R_SS
) {
1119 /* must be writable segment */
1120 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1122 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1124 if (rpl
!= cpl
|| dpl
!= cpl
) {
1126 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1129 /* must be readable segment */
1130 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1132 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1135 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1136 /* if not conforming code, test rights */
1137 if (dpl
< cpl
|| dpl
< rpl
) {
1139 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1144 if (!(e2
& DESC_P_MASK
)) {
1146 if (seg_reg
== R_SS
)
1147 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1149 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1152 /* set the access bit if not already set */
1153 if (!(e2
& DESC_A_MASK
)) {
1155 stl_kernel(ptr
+ 4, e2
);
1158 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1159 get_seg_base(e1
, e2
),
1160 get_seg_limit(e1
, e2
),
1163 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1164 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1169 /* protected mode jump */
1170 void helper_ljmp_protected_T0_T1(void)
1172 int new_cs
, new_eip
, gate_cs
, type
;
1173 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1177 if ((new_cs
& 0xfffc) == 0)
1178 raise_exception_err(EXCP0D_GPF
, 0);
1179 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1180 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1181 cpl
= env
->hflags
& HF_CPL_MASK
;
1182 if (e2
& DESC_S_MASK
) {
1183 if (!(e2
& DESC_CS_MASK
))
1184 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1185 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1186 if (e2
& DESC_C_MASK
) {
1187 /* conforming code segment */
1189 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1191 /* non conforming code segment */
1194 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1196 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1198 if (!(e2
& DESC_P_MASK
))
1199 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1200 limit
= get_seg_limit(e1
, e2
);
1201 if (new_eip
> limit
)
1202 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1203 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1204 get_seg_base(e1
, e2
), limit
, e2
);
1207 /* jump to call or task gate */
1208 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1210 cpl
= env
->hflags
& HF_CPL_MASK
;
1211 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1213 case 1: /* 286 TSS */
1214 case 9: /* 386 TSS */
1215 case 5: /* task gate */
1216 if (dpl
< cpl
|| dpl
< rpl
)
1217 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1218 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
);
1220 case 4: /* 286 call gate */
1221 case 12: /* 386 call gate */
1222 if ((dpl
< cpl
) || (dpl
< rpl
))
1223 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1224 if (!(e2
& DESC_P_MASK
))
1225 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1227 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
1228 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1229 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1230 /* must be code segment */
1231 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1232 (DESC_S_MASK
| DESC_CS_MASK
)))
1233 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1234 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1235 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
1236 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1237 if (!(e2
& DESC_P_MASK
))
1238 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1239 new_eip
= (e1
& 0xffff);
1241 new_eip
|= (e2
& 0xffff0000);
1242 limit
= get_seg_limit(e1
, e2
);
1243 if (new_eip
> limit
)
1244 raise_exception_err(EXCP0D_GPF
, 0);
1245 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1246 get_seg_base(e1
, e2
), limit
, e2
);
1250 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1256 /* real mode call */
1257 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
1259 int new_cs
, new_eip
;
1260 uint32_t esp
, esp_mask
;
1266 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1267 ssp
= env
->segs
[R_SS
].base
;
1269 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1270 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1272 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1273 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1276 ESP
= (ESP
& ~esp_mask
) | (esp
& esp_mask
);
1278 env
->segs
[R_CS
].selector
= new_cs
;
1279 env
->segs
[R_CS
].base
= (uint8_t *)(new_cs
<< 4);
1282 /* protected mode call */
1283 void helper_lcall_protected_T0_T1(int shift
, int next_eip
)
1285 int new_cs
, new_eip
, new_stack
, i
;
1286 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1287 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
1288 uint32_t val
, limit
, old_sp_mask
;
1289 uint8_t *ssp
, *old_ssp
;
1295 fprintf(logfile
, "lcall %04x:%08x\n",
1297 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1300 if ((new_cs
& 0xfffc) == 0)
1301 raise_exception_err(EXCP0D_GPF
, 0);
1302 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1303 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1304 cpl
= env
->hflags
& HF_CPL_MASK
;
1307 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
1310 if (e2
& DESC_S_MASK
) {
1311 if (!(e2
& DESC_CS_MASK
))
1312 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1313 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1314 if (e2
& DESC_C_MASK
) {
1315 /* conforming code segment */
1317 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1319 /* non conforming code segment */
1322 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1324 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1326 if (!(e2
& DESC_P_MASK
))
1327 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1330 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1331 ssp
= env
->segs
[R_SS
].base
;
1333 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1334 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1336 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1337 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1340 limit
= get_seg_limit(e1
, e2
);
1341 if (new_eip
> limit
)
1342 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1343 /* from this point, not restartable */
1344 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1345 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1346 get_seg_base(e1
, e2
), limit
, e2
);
1349 /* check gate type */
1350 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1351 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1354 case 1: /* available 286 TSS */
1355 case 9: /* available 386 TSS */
1356 case 5: /* task gate */
1357 if (dpl
< cpl
|| dpl
< rpl
)
1358 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1359 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
);
1361 case 4: /* 286 call gate */
1362 case 12: /* 386 call gate */
1365 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1370 if (dpl
< cpl
|| dpl
< rpl
)
1371 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1372 /* check valid bit */
1373 if (!(e2
& DESC_P_MASK
))
1374 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1375 selector
= e1
>> 16;
1376 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1377 param_count
= e2
& 0x1f;
1378 if ((selector
& 0xfffc) == 0)
1379 raise_exception_err(EXCP0D_GPF
, 0);
1381 if (load_segment(&e1
, &e2
, selector
) != 0)
1382 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1383 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1384 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1385 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1387 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1388 if (!(e2
& DESC_P_MASK
))
1389 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1391 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1392 /* to inner priviledge */
1393 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1396 fprintf(logfile
, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
1397 ss
, sp
, param_count
, ESP
);
1399 if ((ss
& 0xfffc) == 0)
1400 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1401 if ((ss
& 3) != dpl
)
1402 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1403 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
1404 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1405 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1407 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1408 if (!(ss_e2
& DESC_S_MASK
) ||
1409 (ss_e2
& DESC_CS_MASK
) ||
1410 !(ss_e2
& DESC_W_MASK
))
1411 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1412 if (!(ss_e2
& DESC_P_MASK
))
1413 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1415 // push_size = ((param_count * 2) + 8) << shift;
1417 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1418 old_ssp
= env
->segs
[R_SS
].base
;
1420 sp_mask
= get_sp_mask(ss_e2
);
1421 ssp
= get_seg_base(ss_e1
, ss_e2
);
1423 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1424 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1425 for(i
= param_count
- 1; i
>= 0; i
--) {
1426 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1427 PUSHL(ssp
, sp
, sp_mask
, val
);
1430 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1431 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1432 for(i
= param_count
- 1; i
>= 0; i
--) {
1433 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1434 PUSHW(ssp
, sp
, sp_mask
, val
);
1439 /* to same priviledge */
1441 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1442 ssp
= env
->segs
[R_SS
].base
;
1443 // push_size = (4 << shift);
1448 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1449 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1451 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1452 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1455 /* from this point, not restartable */
1458 ss
= (ss
& ~3) | dpl
;
1459 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1461 get_seg_limit(ss_e1
, ss_e2
),
1465 selector
= (selector
& ~3) | dpl
;
1466 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1467 get_seg_base(e1
, e2
),
1468 get_seg_limit(e1
, e2
),
1470 cpu_x86_set_cpl(env
, dpl
);
1471 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1476 /* real and vm86 mode iret */
1477 void helper_iret_real(int shift
)
1479 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1483 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
1485 ssp
= env
->segs
[R_SS
].base
;
1488 POPL(ssp
, sp
, sp_mask
, new_eip
);
1489 POPL(ssp
, sp
, sp_mask
, new_cs
);
1491 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1494 POPW(ssp
, sp
, sp_mask
, new_eip
);
1495 POPW(ssp
, sp
, sp_mask
, new_cs
);
1496 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1498 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1499 load_seg_vm(R_CS
, new_cs
);
1501 if (env
->eflags
& VM_MASK
)
1502 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
;
1504 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
;
1506 eflags_mask
&= 0xffff;
1507 load_eflags(new_eflags
, eflags_mask
);
1510 /* protected mode iret */
1511 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1513 uint32_t sp
, new_cs
, new_eip
, new_eflags
, new_esp
, new_ss
, sp_mask
;
1514 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1515 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1516 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1519 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1521 ssp
= env
->segs
[R_SS
].base
;
1524 POPL(ssp
, sp
, sp_mask
, new_eip
);
1525 POPL(ssp
, sp
, sp_mask
, new_cs
);
1528 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1529 if (new_eflags
& VM_MASK
)
1530 goto return_to_vm86
;
1534 POPW(ssp
, sp
, sp_mask
, new_eip
);
1535 POPW(ssp
, sp
, sp_mask
, new_cs
);
1537 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1541 fprintf(logfile
, "lret new %04x:%08x addend=0x%x\n",
1542 new_cs
, new_eip
, addend
);
1543 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1546 if ((new_cs
& 0xfffc) == 0)
1547 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1548 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1549 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1550 if (!(e2
& DESC_S_MASK
) ||
1551 !(e2
& DESC_CS_MASK
))
1552 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1553 cpl
= env
->hflags
& HF_CPL_MASK
;
1556 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1557 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1558 if (e2
& DESC_C_MASK
) {
1560 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1563 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1565 if (!(e2
& DESC_P_MASK
))
1566 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1570 /* return to same priledge level */
1571 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1572 get_seg_base(e1
, e2
),
1573 get_seg_limit(e1
, e2
),
1576 /* return to different priviledge level */
1579 POPL(ssp
, sp
, sp_mask
, new_esp
);
1580 POPL(ssp
, sp
, sp_mask
, new_ss
);
1584 POPW(ssp
, sp
, sp_mask
, new_esp
);
1585 POPW(ssp
, sp
, sp_mask
, new_ss
);
1588 if ((new_ss
& 3) != rpl
)
1589 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1590 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
1591 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1592 if (!(ss_e2
& DESC_S_MASK
) ||
1593 (ss_e2
& DESC_CS_MASK
) ||
1594 !(ss_e2
& DESC_W_MASK
))
1595 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1596 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1598 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1599 if (!(ss_e2
& DESC_P_MASK
))
1600 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
1602 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1603 get_seg_base(e1
, e2
),
1604 get_seg_limit(e1
, e2
),
1606 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
1607 get_seg_base(ss_e1
, ss_e2
),
1608 get_seg_limit(ss_e1
, ss_e2
),
1610 cpu_x86_set_cpl(env
, rpl
);
1612 /* XXX: change sp_mask according to old segment ? */
1614 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1617 /* NOTE: 'cpl' is the _old_ CPL */
1618 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
;
1620 eflags_mask
|= IOPL_MASK
;
1621 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
1623 eflags_mask
|= IF_MASK
;
1625 eflags_mask
&= 0xffff;
1626 load_eflags(new_eflags
, eflags_mask
);
1631 POPL(ssp
, sp
, sp_mask
, new_esp
);
1632 POPL(ssp
, sp
, sp_mask
, new_ss
);
1633 POPL(ssp
, sp
, sp_mask
, new_es
);
1634 POPL(ssp
, sp
, sp_mask
, new_ds
);
1635 POPL(ssp
, sp
, sp_mask
, new_fs
);
1636 POPL(ssp
, sp
, sp_mask
, new_gs
);
1638 /* modify processor state */
1639 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
1640 IF_MASK
| IOPL_MASK
| VM_MASK
| VIF_MASK
| VIP_MASK
);
1641 load_seg_vm(R_CS
, new_cs
& 0xffff);
1642 cpu_x86_set_cpl(env
, 3);
1643 load_seg_vm(R_SS
, new_ss
& 0xffff);
1644 load_seg_vm(R_ES
, new_es
& 0xffff);
1645 load_seg_vm(R_DS
, new_ds
& 0xffff);
1646 load_seg_vm(R_FS
, new_fs
& 0xffff);
1647 load_seg_vm(R_GS
, new_gs
& 0xffff);
1653 void helper_iret_protected(int shift
)
1655 int tss_selector
, type
;
1658 /* specific case for TSS */
1659 if (env
->eflags
& NT_MASK
) {
1660 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
1661 if (tss_selector
& 4)
1662 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1663 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
1664 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1665 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
1666 /* NOTE: we check both segment and busy TSS */
1668 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1669 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
);
1671 helper_ret_protected(shift
, 1, 0);
1675 void helper_lret_protected(int shift
, int addend
)
1677 helper_ret_protected(shift
, 0, addend
);
1680 void helper_movl_crN_T0(int reg
)
1685 cpu_x86_update_cr0(env
);
1688 cpu_x86_update_cr3(env
);
1694 void helper_movl_drN_T0(int reg
)
1699 void helper_invlpg(unsigned int addr
)
1701 cpu_x86_flush_tlb(env
, addr
);
1709 void helper_rdtsc(void)
1713 asm("rdtsc" : "=A" (val
));
1715 /* better than nothing: the time increases */
1722 void helper_wrmsr(void)
1725 case MSR_IA32_SYSENTER_CS
:
1726 env
->sysenter_cs
= EAX
& 0xffff;
1728 case MSR_IA32_SYSENTER_ESP
:
1729 env
->sysenter_esp
= EAX
;
1731 case MSR_IA32_SYSENTER_EIP
:
1732 env
->sysenter_eip
= EAX
;
1735 /* XXX: exception ? */
1740 void helper_rdmsr(void)
1743 case MSR_IA32_SYSENTER_CS
:
1744 EAX
= env
->sysenter_cs
;
1747 case MSR_IA32_SYSENTER_ESP
:
1748 EAX
= env
->sysenter_esp
;
1751 case MSR_IA32_SYSENTER_EIP
:
1752 EAX
= env
->sysenter_eip
;
1756 /* XXX: exception ? */
1761 void helper_lsl(void)
1763 unsigned int selector
, limit
;
1765 int rpl
, dpl
, cpl
, type
;
1767 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1768 selector
= T0
& 0xffff;
1769 if (load_segment(&e1
, &e2
, selector
) != 0)
1772 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1773 cpl
= env
->hflags
& HF_CPL_MASK
;
1774 if (e2
& DESC_S_MASK
) {
1775 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1778 if (dpl
< cpl
|| dpl
< rpl
)
1782 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1793 if (dpl
< cpl
|| dpl
< rpl
)
1796 limit
= get_seg_limit(e1
, e2
);
1801 void helper_lar(void)
1803 unsigned int selector
;
1805 int rpl
, dpl
, cpl
, type
;
1807 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1808 selector
= T0
& 0xffff;
1809 if ((selector
& 0xfffc) == 0)
1811 if (load_segment(&e1
, &e2
, selector
) != 0)
1814 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1815 cpl
= env
->hflags
& HF_CPL_MASK
;
1816 if (e2
& DESC_S_MASK
) {
1817 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1820 if (dpl
< cpl
|| dpl
< rpl
)
1824 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1838 if (dpl
< cpl
|| dpl
< rpl
)
1841 T1
= e2
& 0x00f0ff00;
1845 void helper_verr(void)
1847 unsigned int selector
;
1851 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1852 selector
= T0
& 0xffff;
1853 if ((selector
& 0xfffc) == 0)
1855 if (load_segment(&e1
, &e2
, selector
) != 0)
1857 if (!(e2
& DESC_S_MASK
))
1860 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1861 cpl
= env
->hflags
& HF_CPL_MASK
;
1862 if (e2
& DESC_CS_MASK
) {
1863 if (!(e2
& DESC_R_MASK
))
1865 if (!(e2
& DESC_C_MASK
)) {
1866 if (dpl
< cpl
|| dpl
< rpl
)
1870 if (dpl
< cpl
|| dpl
< rpl
)
1876 void helper_verw(void)
1878 unsigned int selector
;
1882 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1883 selector
= T0
& 0xffff;
1884 if ((selector
& 0xfffc) == 0)
1886 if (load_segment(&e1
, &e2
, selector
) != 0)
1888 if (!(e2
& DESC_S_MASK
))
1891 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1892 cpl
= env
->hflags
& HF_CPL_MASK
;
1893 if (e2
& DESC_CS_MASK
) {
1896 if (dpl
< cpl
|| dpl
< rpl
)
1898 if (!(e2
& DESC_W_MASK
))
1906 void helper_fldt_ST0_A0(void)
1909 new_fpstt
= (env
->fpstt
- 1) & 7;
1910 env
->fpregs
[new_fpstt
] = helper_fldt((uint8_t *)A0
);
1911 env
->fpstt
= new_fpstt
;
1912 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
1915 void helper_fstt_ST0_A0(void)
1917 helper_fstt(ST0
, (uint8_t *)A0
);
1922 #define MUL10(iv) ( iv + iv + (iv << 3) )
1924 void helper_fbld_ST0_A0(void)
1932 for(i
= 8; i
>= 0; i
--) {
1933 v
= ldub((uint8_t *)A0
+ i
);
1934 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
1937 if (ldub((uint8_t *)A0
+ 9) & 0x80)
1943 void helper_fbst_ST0_A0(void)
1947 uint8_t *mem_ref
, *mem_end
;
1952 mem_ref
= (uint8_t *)A0
;
1953 mem_end
= mem_ref
+ 9;
1960 while (mem_ref
< mem_end
) {
1965 v
= ((v
/ 10) << 4) | (v
% 10);
1968 while (mem_ref
< mem_end
) {
1973 void helper_f2xm1(void)
1975 ST0
= pow(2.0,ST0
) - 1.0;
1978 void helper_fyl2x(void)
1980 CPU86_LDouble fptemp
;
1984 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
1988 env
->fpus
&= (~0x4700);
1993 void helper_fptan(void)
1995 CPU86_LDouble fptemp
;
1998 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2004 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2005 /* the above code is for |arg| < 2**52 only */
2009 void helper_fpatan(void)
2011 CPU86_LDouble fptemp
, fpsrcop
;
2015 ST1
= atan2(fpsrcop
,fptemp
);
2019 void helper_fxtract(void)
2021 CPU86_LDoubleU temp
;
2022 unsigned int expdif
;
2025 expdif
= EXPD(temp
) - EXPBIAS
;
2026 /*DP exponent bias*/
2033 void helper_fprem1(void)
2035 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2036 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2042 fpsrcop1
.d
= fpsrcop
;
2044 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2046 dblq
= fpsrcop
/ fptemp
;
2047 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2048 ST0
= fpsrcop
- fptemp
*dblq
;
2049 q
= (int)dblq
; /* cutting off top bits is assumed here */
2050 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2051 /* (C0,C1,C3) <-- (q2,q1,q0) */
2052 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2053 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2054 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2056 env
->fpus
|= 0x400; /* C2 <-- 1 */
2057 fptemp
= pow(2.0, expdif
-50);
2058 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2059 /* fpsrcop = integer obtained by rounding to the nearest */
2060 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
2061 floor(fpsrcop
): ceil(fpsrcop
);
2062 ST0
-= (ST1
* fpsrcop
* fptemp
);
2066 void helper_fprem(void)
2068 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2069 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2075 fpsrcop1
.d
= fpsrcop
;
2077 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2078 if ( expdif
< 53 ) {
2079 dblq
= fpsrcop
/ fptemp
;
2080 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2081 ST0
= fpsrcop
- fptemp
*dblq
;
2082 q
= (int)dblq
; /* cutting off top bits is assumed here */
2083 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2084 /* (C0,C1,C3) <-- (q2,q1,q0) */
2085 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2086 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2087 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2089 env
->fpus
|= 0x400; /* C2 <-- 1 */
2090 fptemp
= pow(2.0, expdif
-50);
2091 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2092 /* fpsrcop = integer obtained by chopping */
2093 fpsrcop
= (fpsrcop
< 0.0)?
2094 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
2095 ST0
-= (ST1
* fpsrcop
* fptemp
);
2099 void helper_fyl2xp1(void)
2101 CPU86_LDouble fptemp
;
2104 if ((fptemp
+1.0)>0.0) {
2105 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
2109 env
->fpus
&= (~0x4700);
2114 void helper_fsqrt(void)
2116 CPU86_LDouble fptemp
;
2120 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2126 void helper_fsincos(void)
2128 CPU86_LDouble fptemp
;
2131 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2137 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2138 /* the above code is for |arg| < 2**63 only */
2142 void helper_frndint(void)
2148 switch(env
->fpuc
& RC_MASK
) {
2151 asm("rndd %0, %1" : "=f" (a
) : "f"(a
));
2154 asm("rnddm %0, %1" : "=f" (a
) : "f"(a
));
2157 asm("rnddp %0, %1" : "=f" (a
) : "f"(a
));
2160 asm("rnddz %0, %1" : "=f" (a
) : "f"(a
));
2169 void helper_fscale(void)
2171 CPU86_LDouble fpsrcop
, fptemp
;
2174 fptemp
= pow(fpsrcop
,ST1
);
2178 void helper_fsin(void)
2180 CPU86_LDouble fptemp
;
2183 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2187 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2188 /* the above code is for |arg| < 2**53 only */
2192 void helper_fcos(void)
2194 CPU86_LDouble fptemp
;
2197 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2201 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2202 /* the above code is for |arg5 < 2**63 only */
2206 void helper_fxam_ST0(void)
2208 CPU86_LDoubleU temp
;
2213 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2215 env
->fpus
|= 0x200; /* C1 <-- 1 */
2217 expdif
= EXPD(temp
);
2218 if (expdif
== MAXEXPD
) {
2219 if (MANTD(temp
) == 0)
2220 env
->fpus
|= 0x500 /*Infinity*/;
2222 env
->fpus
|= 0x100 /*NaN*/;
2223 } else if (expdif
== 0) {
2224 if (MANTD(temp
) == 0)
2225 env
->fpus
|= 0x4000 /*Zero*/;
2227 env
->fpus
|= 0x4400 /*Denormal*/;
2233 void helper_fstenv(uint8_t *ptr
, int data32
)
2235 int fpus
, fptag
, exp
, i
;
2239 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
2241 for (i
=7; i
>=0; i
--) {
2243 if (env
->fptags
[i
]) {
2246 tmp
.d
= env
->fpregs
[i
];
2249 if (exp
== 0 && mant
== 0) {
2252 } else if (exp
== 0 || exp
== MAXEXPD
2253 #ifdef USE_X86LDOUBLE
2254 || (mant
& (1LL << 63)) == 0
2257 /* NaNs, infinity, denormal */
2264 stl(ptr
, env
->fpuc
);
2266 stl(ptr
+ 8, fptag
);
2273 stw(ptr
, env
->fpuc
);
2275 stw(ptr
+ 4, fptag
);
2283 void helper_fldenv(uint8_t *ptr
, int data32
)
2288 env
->fpuc
= lduw(ptr
);
2289 fpus
= lduw(ptr
+ 4);
2290 fptag
= lduw(ptr
+ 8);
2293 env
->fpuc
= lduw(ptr
);
2294 fpus
= lduw(ptr
+ 2);
2295 fptag
= lduw(ptr
+ 4);
2297 env
->fpstt
= (fpus
>> 11) & 7;
2298 env
->fpus
= fpus
& ~0x3800;
2299 for(i
= 0;i
< 7; i
++) {
2300 env
->fptags
[i
] = ((fptag
& 3) == 3);
2305 void helper_fsave(uint8_t *ptr
, int data32
)
2310 helper_fstenv(ptr
, data32
);
2312 ptr
+= (14 << data32
);
2313 for(i
= 0;i
< 8; i
++) {
2315 helper_fstt(tmp
, ptr
);
2333 void helper_frstor(uint8_t *ptr
, int data32
)
2338 helper_fldenv(ptr
, data32
);
2339 ptr
+= (14 << data32
);
2341 for(i
= 0;i
< 8; i
++) {
2342 tmp
= helper_fldt(ptr
);
2348 #if !defined(CONFIG_USER_ONLY)
2350 #define MMUSUFFIX _mmu
2351 #define GETPC() (__builtin_return_address(0))
2354 #include "softmmu_template.h"
2357 #include "softmmu_template.h"
2360 #include "softmmu_template.h"
2363 #include "softmmu_template.h"
2367 /* try to fill the TLB and return an exception if error. If retaddr is
2368 NULL, it means that the function was called in C code (i.e. not
2369 from generated code or from helper.c) */
2370 /* XXX: fix it to restore all registers */
2371 void tlb_fill(unsigned long addr
, int is_write
, int is_user
, void *retaddr
)
2373 TranslationBlock
*tb
;
2376 CPUX86State
*saved_env
;
2378 /* XXX: hack to restore env in all cases, even if not called from
2381 env
= cpu_single_env
;
2382 if (is_write
&& page_unprotect(addr
)) {
2383 /* nothing more to do: the page was write protected because
2384 there was code in it. page_unprotect() flushed the code. */
2387 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
2390 /* now we have a real cpu fault */
2391 pc
= (unsigned long)retaddr
;
2392 tb
= tb_find_pc(pc
);
2394 /* the PC is inside the translated code. It means that we have
2395 a virtual CPU fault */
2396 cpu_restore_state(tb
, env
, pc
);
2399 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);