4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
27 printf("raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table
[256] = {
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 const uint8_t rclw_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table
[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk
[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
100 spin_lock(&global_cpu_lock
);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock
);
108 void cpu_loop_exit(void)
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
113 env
->regs
[R_EAX
] = EAX
;
116 env
->regs
[R_ECX
] = ECX
;
119 env
->regs
[R_EDX
] = EDX
;
122 env
->regs
[R_EBX
] = EBX
;
125 env
->regs
[R_ESP
] = ESP
;
128 env
->regs
[R_EBP
] = EBP
;
131 env
->regs
[R_ESI
] = ESI
;
134 env
->regs
[R_EDI
] = EDI
;
136 longjmp(env
->jmp_env
, 1);
139 /* return non zero if error */
140 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
151 index
= selector
& ~7;
152 if ((index
+ 7) > dt
->limit
)
154 ptr
= dt
->base
+ index
;
155 *e1_ptr
= ldl_kernel(ptr
);
156 *e2_ptr
= ldl_kernel(ptr
+ 4);
160 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
163 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
164 if (e2
& DESC_G_MASK
)
165 limit
= (limit
<< 12) | 0xfff;
169 static inline uint8_t *get_seg_base(uint32_t e1
, uint32_t e2
)
171 return (uint8_t *)((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
174 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
176 sc
->base
= get_seg_base(e1
, e2
);
177 sc
->limit
= get_seg_limit(e1
, e2
);
181 /* init the segment cache in vm86 mode. */
182 static inline void load_seg_vm(int seg
, int selector
)
185 cpu_x86_load_seg_cache(env
, seg
, selector
,
186 (uint8_t *)(selector
<< 4), 0xffff, 0);
189 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
190 uint32_t *esp_ptr
, int dpl
)
192 int type
, index
, shift
;
197 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
198 for(i
=0;i
<env
->tr
.limit
;i
++) {
199 printf("%02x ", env
->tr
.base
[i
]);
200 if ((i
& 7) == 7) printf("\n");
206 if (!(env
->tr
.flags
& DESC_P_MASK
))
207 cpu_abort(env
, "invalid tss");
208 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
210 cpu_abort(env
, "invalid tss type");
212 index
= (dpl
* 4 + 2) << shift
;
213 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
214 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
216 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
219 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
220 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
224 /* XXX: merge with load_seg() */
225 static void tss_load_seg(int seg_reg
, int selector
)
230 if ((selector
& 0xfffc) != 0) {
231 if (load_segment(&e1
, &e2
, selector
) != 0)
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 if (!(e2
& DESC_S_MASK
))
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
237 cpl
= env
->hflags
& HF_CPL_MASK
;
238 if (seg_reg
== R_CS
) {
239 if (!(e2
& DESC_CS_MASK
))
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
244 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 } else if (seg_reg
== R_SS
) {
247 /* SS must be writable data */
248 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
249 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 if (dpl
!= cpl
|| dpl
!= rpl
)
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* not readable code */
254 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
256 /* if data or non conforming code, checks the rights */
257 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
258 if (dpl
< cpl
|| dpl
< rpl
)
259 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
262 if (!(e2
& DESC_P_MASK
))
263 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
264 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
265 get_seg_base(e1
, e2
),
266 get_seg_limit(e1
, e2
),
269 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
270 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
274 #define SWITCH_TSS_JMP 0
275 #define SWITCH_TSS_IRET 1
276 #define SWITCH_TSS_CALL 2
278 /* XXX: restore CPU state in registers (PowerPC case) */
279 static void switch_tss(int tss_selector
,
280 uint32_t e1
, uint32_t e2
, int source
,
283 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
285 uint32_t new_regs
[8], new_segs
[6];
286 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
287 uint32_t old_eflags
, eflags_mask
;
292 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
294 if (loglevel
& CPU_LOG_PCALL
)
295 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
298 /* if task gate, we read the TSS segment and we load it */
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
302 tss_selector
= e1
>> 16;
303 if (tss_selector
& 4)
304 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
305 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
306 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
307 if (e2
& DESC_S_MASK
)
308 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
309 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
311 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
314 if (!(e2
& DESC_P_MASK
))
315 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
321 tss_limit
= get_seg_limit(e1
, e2
);
322 tss_base
= get_seg_base(e1
, e2
);
323 if ((tss_selector
& 4) != 0 ||
324 tss_limit
< tss_limit_max
)
325 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
326 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
328 old_tss_limit_max
= 103;
330 old_tss_limit_max
= 43;
332 /* read all the registers from the new TSS */
335 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
336 new_eip
= ldl_kernel(tss_base
+ 0x20);
337 new_eflags
= ldl_kernel(tss_base
+ 0x24);
338 for(i
= 0; i
< 8; i
++)
339 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
340 for(i
= 0; i
< 6; i
++)
341 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
342 new_ldt
= lduw_kernel(tss_base
+ 0x60);
343 new_trap
= ldl_kernel(tss_base
+ 0x64);
347 new_eip
= lduw_kernel(tss_base
+ 0x0e);
348 new_eflags
= lduw_kernel(tss_base
+ 0x10);
349 for(i
= 0; i
< 8; i
++)
350 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
351 for(i
= 0; i
< 4; i
++)
352 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
353 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
359 /* NOTE: we must avoid memory exceptions during the task switch,
360 so we make dummy accesses before */
361 /* XXX: it can still fail in some cases, so a bigger hack is
362 necessary to valid the TLB after having done the accesses */
364 v1
= ldub_kernel(env
->tr
.base
);
365 v2
= ldub(env
->tr
.base
+ old_tss_limit_max
);
366 stb_kernel(env
->tr
.base
, v1
);
367 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
369 /* clear busy bit (it is restartable) */
370 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
373 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
374 e2
= ldl_kernel(ptr
+ 4);
375 e2
&= ~DESC_TSS_BUSY_MASK
;
376 stl_kernel(ptr
+ 4, e2
);
378 old_eflags
= compute_eflags();
379 if (source
== SWITCH_TSS_IRET
)
380 old_eflags
&= ~NT_MASK
;
382 /* save the current state in the old TSS */
385 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
386 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
387 for(i
= 0; i
< 8; i
++)
388 stl_kernel(env
->tr
.base
+ (0x28 + i
* 4), env
->regs
[i
]);
389 for(i
= 0; i
< 6; i
++)
390 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
393 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
394 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
395 for(i
= 0; i
< 8; i
++)
396 stw_kernel(env
->tr
.base
+ (0x12 + i
* 2), env
->regs
[i
]);
397 for(i
= 0; i
< 4; i
++)
398 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
401 /* now if an exception occurs, it will occurs in the next task
404 if (source
== SWITCH_TSS_CALL
) {
405 stw_kernel(tss_base
, env
->tr
.selector
);
406 new_eflags
|= NT_MASK
;
410 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
413 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
414 e2
= ldl_kernel(ptr
+ 4);
415 e2
|= DESC_TSS_BUSY_MASK
;
416 stl_kernel(ptr
+ 4, e2
);
419 /* set the new CPU state */
420 /* from this point, any exception which occurs can give problems */
421 env
->cr
[0] |= CR0_TS_MASK
;
422 env
->hflags
|= HF_TS_MASK
;
423 env
->tr
.selector
= tss_selector
;
424 env
->tr
.base
= tss_base
;
425 env
->tr
.limit
= tss_limit
;
426 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
428 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
429 cpu_x86_update_cr3(env
, new_cr3
);
432 /* load all registers without an exception, then reload them with
433 possible exception */
435 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
436 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
438 eflags_mask
&= 0xffff;
439 load_eflags(new_eflags
, eflags_mask
);
440 for(i
= 0; i
< 8; i
++)
441 env
->regs
[i
] = new_regs
[i
];
442 if (new_eflags
& VM_MASK
) {
443 for(i
= 0; i
< 6; i
++)
444 load_seg_vm(i
, new_segs
[i
]);
445 /* in vm86, CPL is always 3 */
446 cpu_x86_set_cpl(env
, 3);
448 /* CPL is set the RPL of CS */
449 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
450 /* first just selectors as the rest may trigger exceptions */
451 for(i
= 0; i
< 6; i
++)
452 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], NULL
, 0, 0);
455 env
->ldt
.selector
= new_ldt
& ~4;
456 env
->ldt
.base
= NULL
;
462 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
464 if ((new_ldt
& 0xfffc) != 0) {
466 index
= new_ldt
& ~7;
467 if ((index
+ 7) > dt
->limit
)
468 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 ptr
= dt
->base
+ index
;
470 e1
= ldl_kernel(ptr
);
471 e2
= ldl_kernel(ptr
+ 4);
472 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 if (!(e2
& DESC_P_MASK
))
475 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
476 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
479 /* load the segments */
480 if (!(new_eflags
& VM_MASK
)) {
481 tss_load_seg(R_CS
, new_segs
[R_CS
]);
482 tss_load_seg(R_SS
, new_segs
[R_SS
]);
483 tss_load_seg(R_ES
, new_segs
[R_ES
]);
484 tss_load_seg(R_DS
, new_segs
[R_DS
]);
485 tss_load_seg(R_FS
, new_segs
[R_FS
]);
486 tss_load_seg(R_GS
, new_segs
[R_GS
]);
489 /* check that EIP is in the CS segment limits */
490 if (new_eip
> env
->segs
[R_CS
].limit
) {
491 /* XXX: different exception if CALL ? */
492 raise_exception_err(EXCP0D_GPF
, 0);
496 /* check if Port I/O is allowed in TSS */
497 static inline void check_io(int addr
, int size
)
499 int io_offset
, val
, mask
;
501 /* TSS must be a valid 32 bit one */
502 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
503 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
506 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
507 io_offset
+= (addr
>> 3);
508 /* Note: the check needs two bytes */
509 if ((io_offset
+ 1) > env
->tr
.limit
)
511 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
513 mask
= (1 << size
) - 1;
514 /* all bits must be zero to allow the I/O */
515 if ((val
& mask
) != 0) {
517 raise_exception_err(EXCP0D_GPF
, 0);
521 void check_iob_T0(void)
526 void check_iow_T0(void)
531 void check_iol_T0(void)
536 void check_iob_DX(void)
538 check_io(EDX
& 0xffff, 1);
541 void check_iow_DX(void)
543 check_io(EDX
& 0xffff, 2);
546 void check_iol_DX(void)
548 check_io(EDX
& 0xffff, 4);
551 static inline unsigned int get_sp_mask(unsigned int e2
)
553 if (e2
& DESC_B_MASK
)
559 /* XXX: add a is_user flag to have proper security support */
560 #define PUSHW(ssp, sp, sp_mask, val)\
563 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
566 #define PUSHL(ssp, sp, sp_mask, val)\
569 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
572 #define POPW(ssp, sp, sp_mask, val)\
574 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578 #define POPL(ssp, sp, sp_mask, val)\
580 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
584 /* protected mode interrupt */
585 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
586 unsigned int next_eip
, int is_hw
)
590 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
591 int has_error_code
, new_stack
, shift
;
592 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
596 if (!is_int
&& !is_hw
) {
615 if (intno
* 8 + 7 > dt
->limit
)
616 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
617 ptr
= dt
->base
+ intno
* 8;
618 e1
= ldl_kernel(ptr
);
619 e2
= ldl_kernel(ptr
+ 4);
620 /* check gate type */
621 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
623 case 5: /* task gate */
624 /* must do that check here to return the correct error code */
625 if (!(e2
& DESC_P_MASK
))
626 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
627 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
628 if (has_error_code
) {
630 /* push the error code */
631 shift
= (env
->segs
[R_CS
].flags
>> DESC_B_SHIFT
) & 1;
632 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
636 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
637 ssp
= env
->segs
[R_SS
].base
+ esp
;
639 stl_kernel(ssp
, error_code
);
641 stw_kernel(ssp
, error_code
);
642 env
->regs
[R_ESP
] = (esp
& mask
) | (env
->regs
[R_ESP
] & ~mask
);
645 case 6: /* 286 interrupt gate */
646 case 7: /* 286 trap gate */
647 case 14: /* 386 interrupt gate */
648 case 15: /* 386 trap gate */
651 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
654 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
655 cpl
= env
->hflags
& HF_CPL_MASK
;
656 /* check privledge if software int */
657 if (is_int
&& dpl
< cpl
)
658 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
659 /* check valid bit */
660 if (!(e2
& DESC_P_MASK
))
661 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
663 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
664 if ((selector
& 0xfffc) == 0)
665 raise_exception_err(EXCP0D_GPF
, 0);
667 if (load_segment(&e1
, &e2
, selector
) != 0)
668 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
669 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
670 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
671 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
673 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
674 if (!(e2
& DESC_P_MASK
))
675 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
676 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
677 /* to inner priviledge */
678 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
679 if ((ss
& 0xfffc) == 0)
680 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
682 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
683 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
684 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
685 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
687 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
688 if (!(ss_e2
& DESC_S_MASK
) ||
689 (ss_e2
& DESC_CS_MASK
) ||
690 !(ss_e2
& DESC_W_MASK
))
691 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
692 if (!(ss_e2
& DESC_P_MASK
))
693 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
695 sp_mask
= get_sp_mask(ss_e2
);
696 ssp
= get_seg_base(ss_e1
, ss_e2
);
697 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
698 /* to same priviledge */
699 if (env
->eflags
& VM_MASK
)
700 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
702 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
703 ssp
= env
->segs
[R_SS
].base
;
707 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
708 new_stack
= 0; /* avoid warning */
709 sp_mask
= 0; /* avoid warning */
710 ssp
= NULL
; /* avoid warning */
711 esp
= 0; /* avoid warning */
717 /* XXX: check that enough room is available */
718 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
719 if (env
->eflags
& VM_MASK
)
725 if (env
->eflags
& VM_MASK
) {
726 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
729 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
731 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
732 PUSHL(ssp
, esp
, sp_mask
, ESP
);
734 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
735 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
736 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
737 if (has_error_code
) {
738 PUSHL(ssp
, esp
, sp_mask
, error_code
);
742 if (env
->eflags
& VM_MASK
) {
743 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
746 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
748 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
749 PUSHW(ssp
, esp
, sp_mask
, ESP
);
751 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
752 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
753 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
754 if (has_error_code
) {
755 PUSHW(ssp
, esp
, sp_mask
, error_code
);
760 if (env
->eflags
& VM_MASK
) {
761 cpu_x86_load_seg_cache(env
, R_ES
, 0, NULL
, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_DS
, 0, NULL
, 0, 0);
763 cpu_x86_load_seg_cache(env
, R_FS
, 0, NULL
, 0, 0);
764 cpu_x86_load_seg_cache(env
, R_GS
, 0, NULL
, 0, 0);
766 ss
= (ss
& ~3) | dpl
;
767 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
768 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
770 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
772 selector
= (selector
& ~3) | dpl
;
773 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
774 get_seg_base(e1
, e2
),
775 get_seg_limit(e1
, e2
),
777 cpu_x86_set_cpl(env
, dpl
);
780 /* interrupt gate clear IF mask */
781 if ((type
& 1) == 0) {
782 env
->eflags
&= ~IF_MASK
;
784 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
787 /* real mode interrupt */
788 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
789 unsigned int next_eip
)
794 uint32_t offset
, esp
;
795 uint32_t old_cs
, old_eip
;
797 /* real mode (simpler !) */
799 if (intno
* 4 + 3 > dt
->limit
)
800 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
801 ptr
= dt
->base
+ intno
* 4;
802 offset
= lduw_kernel(ptr
);
803 selector
= lduw_kernel(ptr
+ 2);
805 ssp
= env
->segs
[R_SS
].base
;
810 old_cs
= env
->segs
[R_CS
].selector
;
811 /* XXX: use SS segment size ? */
812 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
813 PUSHW(ssp
, esp
, 0xffff, old_cs
);
814 PUSHW(ssp
, esp
, 0xffff, old_eip
);
816 /* update processor state */
817 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
819 env
->segs
[R_CS
].selector
= selector
;
820 env
->segs
[R_CS
].base
= (uint8_t *)(selector
<< 4);
821 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
824 /* fake user mode interrupt */
825 void do_interrupt_user(int intno
, int is_int
, int error_code
,
826 unsigned int next_eip
)
834 ptr
= dt
->base
+ (intno
* 8);
835 e2
= ldl_kernel(ptr
+ 4);
837 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
838 cpl
= env
->hflags
& HF_CPL_MASK
;
839 /* check privledge if software int */
840 if (is_int
&& dpl
< cpl
)
841 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
843 /* Since we emulate only user space, we cannot do more than
844 exiting the emulation with the suitable exception and error
851 * Begin execution of an interruption. is_int is TRUE if coming from
852 * the int instruction. next_eip is the EIP value AFTER the interrupt
853 * instruction. It is only relevant if is_int is TRUE.
855 void do_interrupt(int intno
, int is_int
, int error_code
,
856 unsigned int next_eip
, int is_hw
)
859 if (loglevel
& (CPU_LOG_PCALL
| CPU_LOG_INT
)) {
860 if ((env
->cr
[0] & CR0_PE_MASK
)) {
862 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x SP=%04x:%08x",
863 count
, intno
, error_code
, is_int
,
864 env
->hflags
& HF_CPL_MASK
,
865 env
->segs
[R_CS
].selector
, EIP
,
866 env
->segs
[R_SS
].selector
, ESP
);
868 fprintf(logfile
, " CR2=%08x", env
->cr
[2]);
870 fprintf(logfile
, " EAX=%08x", env
->regs
[R_EAX
]);
872 fprintf(logfile
, "\n");
874 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
878 fprintf(logfile
, " code=");
879 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
880 for(i
= 0; i
< 16; i
++) {
881 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
883 fprintf(logfile
, "\n");
890 if (env
->cr
[0] & CR0_PE_MASK
) {
891 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
893 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
898 * Signal an interruption. It is executed in the main CPU loop.
899 * is_int is TRUE if coming from the int instruction. next_eip is the
900 * EIP value AFTER the interrupt instruction. It is only relevant if
903 void raise_interrupt(int intno
, int is_int
, int error_code
,
904 unsigned int next_eip
)
906 env
->exception_index
= intno
;
907 env
->error_code
= error_code
;
908 env
->exception_is_int
= is_int
;
909 env
->exception_next_eip
= next_eip
;
913 /* shortcuts to generate exceptions */
915 void (raise_exception_err
)(int exception_index
, int error_code
)
917 raise_interrupt(exception_index
, 0, error_code
, 0);
920 void raise_exception(int exception_index
)
922 raise_interrupt(exception_index
, 0, 0, 0);
925 #ifdef BUGGY_GCC_DIV64
926 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
927 call it from another function */
928 uint32_t div64(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
934 int32_t idiv64(int32_t *q_ptr
, int64_t num
, int32_t den
)
941 void helper_divl_EAX_T0(uint32_t eip
)
943 unsigned int den
, q
, r
;
946 num
= EAX
| ((uint64_t)EDX
<< 32);
950 raise_exception(EXCP00_DIVZ
);
952 #ifdef BUGGY_GCC_DIV64
953 r
= div64(&q
, num
, den
);
962 void helper_idivl_EAX_T0(uint32_t eip
)
967 num
= EAX
| ((uint64_t)EDX
<< 32);
971 raise_exception(EXCP00_DIVZ
);
973 #ifdef BUGGY_GCC_DIV64
974 r
= idiv64(&q
, num
, den
);
983 void helper_cmpxchg8b(void)
988 eflags
= cc_table
[CC_OP
].compute_all();
989 d
= ldq((uint8_t *)A0
);
990 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
991 stq((uint8_t *)A0
, ((uint64_t)ECX
<< 32) | EBX
);
1001 #define CPUID_FP87 (1 << 0)
1002 #define CPUID_VME (1 << 1)
1003 #define CPUID_DE (1 << 2)
1004 #define CPUID_PSE (1 << 3)
1005 #define CPUID_TSC (1 << 4)
1006 #define CPUID_MSR (1 << 5)
1007 #define CPUID_PAE (1 << 6)
1008 #define CPUID_MCE (1 << 7)
1009 #define CPUID_CX8 (1 << 8)
1010 #define CPUID_APIC (1 << 9)
1011 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1012 #define CPUID_MTRR (1 << 12)
1013 #define CPUID_PGE (1 << 13)
1014 #define CPUID_MCA (1 << 14)
1015 #define CPUID_CMOV (1 << 15)
1017 #define CPUID_MMX (1 << 23)
1018 #define CPUID_FXSR (1 << 24)
1019 #define CPUID_SSE (1 << 25)
1020 #define CPUID_SSE2 (1 << 26)
1022 void helper_cpuid(void)
1026 EAX
= 2; /* max EAX index supported */
1033 int family
, model
, stepping
;
1036 /* pentium 75-200 */
1046 EAX
= (family
<< 8) | (model
<< 4) | stepping
;
1049 EDX
= CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
1050 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
1051 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
;
1055 /* cache info: needed for Pentium Pro compatibility */
1064 void helper_lldt_T0(void)
1072 selector
= T0
& 0xffff;
1073 if ((selector
& 0xfffc) == 0) {
1074 /* XXX: NULL selector case: invalid LDT */
1075 env
->ldt
.base
= NULL
;
1079 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1081 index
= selector
& ~7;
1082 if ((index
+ 7) > dt
->limit
)
1083 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1084 ptr
= dt
->base
+ index
;
1085 e1
= ldl_kernel(ptr
);
1086 e2
= ldl_kernel(ptr
+ 4);
1087 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1088 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1089 if (!(e2
& DESC_P_MASK
))
1090 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1091 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1093 env
->ldt
.selector
= selector
;
1096 void helper_ltr_T0(void)
1104 selector
= T0
& 0xffff;
1105 if ((selector
& 0xfffc) == 0) {
1106 /* NULL selector case: invalid LDT */
1107 env
->tr
.base
= NULL
;
1112 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1114 index
= selector
& ~7;
1115 if ((index
+ 7) > dt
->limit
)
1116 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1117 ptr
= dt
->base
+ index
;
1118 e1
= ldl_kernel(ptr
);
1119 e2
= ldl_kernel(ptr
+ 4);
1120 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1121 if ((e2
& DESC_S_MASK
) ||
1122 (type
!= 1 && type
!= 9))
1123 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1124 if (!(e2
& DESC_P_MASK
))
1125 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1126 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1127 e2
|= DESC_TSS_BUSY_MASK
;
1128 stl_kernel(ptr
+ 4, e2
);
1130 env
->tr
.selector
= selector
;
1133 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1134 void load_seg(int seg_reg
, int selector
)
1143 if ((selector
& 0xfffc) == 0) {
1144 /* null selector case */
1145 if (seg_reg
== R_SS
)
1146 raise_exception_err(EXCP0D_GPF
, 0);
1147 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, NULL
, 0, 0);
1154 index
= selector
& ~7;
1155 if ((index
+ 7) > dt
->limit
)
1156 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1157 ptr
= dt
->base
+ index
;
1158 e1
= ldl_kernel(ptr
);
1159 e2
= ldl_kernel(ptr
+ 4);
1161 if (!(e2
& DESC_S_MASK
))
1162 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1164 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1165 cpl
= env
->hflags
& HF_CPL_MASK
;
1166 if (seg_reg
== R_SS
) {
1167 /* must be writable segment */
1168 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1169 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1170 if (rpl
!= cpl
|| dpl
!= cpl
)
1171 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1173 /* must be readable segment */
1174 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1175 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1177 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1178 /* if not conforming code, test rights */
1179 if (dpl
< cpl
|| dpl
< rpl
)
1180 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1184 if (!(e2
& DESC_P_MASK
)) {
1185 if (seg_reg
== R_SS
)
1186 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1188 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1191 /* set the access bit if not already set */
1192 if (!(e2
& DESC_A_MASK
)) {
1194 stl_kernel(ptr
+ 4, e2
);
1197 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1198 get_seg_base(e1
, e2
),
1199 get_seg_limit(e1
, e2
),
1202 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1203 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1208 /* protected mode jump */
1209 void helper_ljmp_protected_T0_T1(int next_eip
)
1211 int new_cs
, new_eip
, gate_cs
, type
;
1212 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1216 if ((new_cs
& 0xfffc) == 0)
1217 raise_exception_err(EXCP0D_GPF
, 0);
1218 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1219 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1220 cpl
= env
->hflags
& HF_CPL_MASK
;
1221 if (e2
& DESC_S_MASK
) {
1222 if (!(e2
& DESC_CS_MASK
))
1223 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1224 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1225 if (e2
& DESC_C_MASK
) {
1226 /* conforming code segment */
1228 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1230 /* non conforming code segment */
1233 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1235 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1237 if (!(e2
& DESC_P_MASK
))
1238 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1239 limit
= get_seg_limit(e1
, e2
);
1240 if (new_eip
> limit
)
1241 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1242 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1243 get_seg_base(e1
, e2
), limit
, e2
);
1246 /* jump to call or task gate */
1247 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1249 cpl
= env
->hflags
& HF_CPL_MASK
;
1250 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1252 case 1: /* 286 TSS */
1253 case 9: /* 386 TSS */
1254 case 5: /* task gate */
1255 if (dpl
< cpl
|| dpl
< rpl
)
1256 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1257 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1259 case 4: /* 286 call gate */
1260 case 12: /* 386 call gate */
1261 if ((dpl
< cpl
) || (dpl
< rpl
))
1262 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1263 if (!(e2
& DESC_P_MASK
))
1264 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1266 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
1267 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1268 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1269 /* must be code segment */
1270 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1271 (DESC_S_MASK
| DESC_CS_MASK
)))
1272 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1273 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1274 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
1275 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1276 if (!(e2
& DESC_P_MASK
))
1277 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1278 new_eip
= (e1
& 0xffff);
1280 new_eip
|= (e2
& 0xffff0000);
1281 limit
= get_seg_limit(e1
, e2
);
1282 if (new_eip
> limit
)
1283 raise_exception_err(EXCP0D_GPF
, 0);
1284 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1285 get_seg_base(e1
, e2
), limit
, e2
);
1289 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1295 /* real mode call */
1296 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
1298 int new_cs
, new_eip
;
1299 uint32_t esp
, esp_mask
;
1305 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1306 ssp
= env
->segs
[R_SS
].base
;
1308 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1309 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1311 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1312 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1315 ESP
= (ESP
& ~esp_mask
) | (esp
& esp_mask
);
1317 env
->segs
[R_CS
].selector
= new_cs
;
1318 env
->segs
[R_CS
].base
= (uint8_t *)(new_cs
<< 4);
1321 /* protected mode call */
1322 void helper_lcall_protected_T0_T1(int shift
, int next_eip
)
1324 int new_cs
, new_eip
, new_stack
, i
;
1325 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1326 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
1327 uint32_t val
, limit
, old_sp_mask
;
1328 uint8_t *ssp
, *old_ssp
;
1333 if (loglevel
& CPU_LOG_PCALL
) {
1334 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
1335 new_cs
, new_eip
, shift
);
1336 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1339 if ((new_cs
& 0xfffc) == 0)
1340 raise_exception_err(EXCP0D_GPF
, 0);
1341 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1342 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1343 cpl
= env
->hflags
& HF_CPL_MASK
;
1345 if (loglevel
& CPU_LOG_PCALL
) {
1346 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
1349 if (e2
& DESC_S_MASK
) {
1350 if (!(e2
& DESC_CS_MASK
))
1351 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1352 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1353 if (e2
& DESC_C_MASK
) {
1354 /* conforming code segment */
1356 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1358 /* non conforming code segment */
1361 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1363 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1365 if (!(e2
& DESC_P_MASK
))
1366 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1369 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1370 ssp
= env
->segs
[R_SS
].base
;
1372 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1373 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1375 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1376 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1379 limit
= get_seg_limit(e1
, e2
);
1380 if (new_eip
> limit
)
1381 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1382 /* from this point, not restartable */
1383 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1384 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1385 get_seg_base(e1
, e2
), limit
, e2
);
1388 /* check gate type */
1389 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1390 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1393 case 1: /* available 286 TSS */
1394 case 9: /* available 386 TSS */
1395 case 5: /* task gate */
1396 if (dpl
< cpl
|| dpl
< rpl
)
1397 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1398 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1400 case 4: /* 286 call gate */
1401 case 12: /* 386 call gate */
1404 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1409 if (dpl
< cpl
|| dpl
< rpl
)
1410 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1411 /* check valid bit */
1412 if (!(e2
& DESC_P_MASK
))
1413 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1414 selector
= e1
>> 16;
1415 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1416 param_count
= e2
& 0x1f;
1417 if ((selector
& 0xfffc) == 0)
1418 raise_exception_err(EXCP0D_GPF
, 0);
1420 if (load_segment(&e1
, &e2
, selector
) != 0)
1421 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1422 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1423 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1424 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1426 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1427 if (!(e2
& DESC_P_MASK
))
1428 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1430 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1431 /* to inner priviledge */
1432 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1434 if (loglevel
& CPU_LOG_PCALL
)
1435 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n",
1436 ss
, sp
, param_count
, ESP
);
1438 if ((ss
& 0xfffc) == 0)
1439 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1440 if ((ss
& 3) != dpl
)
1441 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1442 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
1443 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1444 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1446 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1447 if (!(ss_e2
& DESC_S_MASK
) ||
1448 (ss_e2
& DESC_CS_MASK
) ||
1449 !(ss_e2
& DESC_W_MASK
))
1450 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1451 if (!(ss_e2
& DESC_P_MASK
))
1452 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1454 // push_size = ((param_count * 2) + 8) << shift;
1456 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1457 old_ssp
= env
->segs
[R_SS
].base
;
1459 sp_mask
= get_sp_mask(ss_e2
);
1460 ssp
= get_seg_base(ss_e1
, ss_e2
);
1462 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1463 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1464 for(i
= param_count
- 1; i
>= 0; i
--) {
1465 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1466 PUSHL(ssp
, sp
, sp_mask
, val
);
1469 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1470 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1471 for(i
= param_count
- 1; i
>= 0; i
--) {
1472 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1473 PUSHW(ssp
, sp
, sp_mask
, val
);
1478 /* to same priviledge */
1480 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1481 ssp
= env
->segs
[R_SS
].base
;
1482 // push_size = (4 << shift);
1487 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1488 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1490 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1491 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1494 /* from this point, not restartable */
1497 ss
= (ss
& ~3) | dpl
;
1498 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1500 get_seg_limit(ss_e1
, ss_e2
),
1504 selector
= (selector
& ~3) | dpl
;
1505 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1506 get_seg_base(e1
, e2
),
1507 get_seg_limit(e1
, e2
),
1509 cpu_x86_set_cpl(env
, dpl
);
1510 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1515 /* real and vm86 mode iret */
1516 void helper_iret_real(int shift
)
1518 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1522 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
1524 ssp
= env
->segs
[R_SS
].base
;
1527 POPL(ssp
, sp
, sp_mask
, new_eip
);
1528 POPL(ssp
, sp
, sp_mask
, new_cs
);
1530 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1533 POPW(ssp
, sp
, sp_mask
, new_eip
);
1534 POPW(ssp
, sp
, sp_mask
, new_cs
);
1535 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1537 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1538 load_seg_vm(R_CS
, new_cs
);
1540 if (env
->eflags
& VM_MASK
)
1541 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
1543 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
1545 eflags_mask
&= 0xffff;
1546 load_eflags(new_eflags
, eflags_mask
);
1549 static inline void validate_seg(int seg_reg
, int cpl
)
1554 e2
= env
->segs
[seg_reg
].flags
;
1555 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1556 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1557 /* data or non conforming code segment */
1559 cpu_x86_load_seg_cache(env
, seg_reg
, 0, NULL
, 0, 0);
1564 /* protected mode iret */
1565 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1567 uint32_t sp
, new_cs
, new_eip
, new_eflags
, new_esp
, new_ss
, sp_mask
;
1568 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1569 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1570 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1573 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1575 ssp
= env
->segs
[R_SS
].base
;
1578 POPL(ssp
, sp
, sp_mask
, new_eip
);
1579 POPL(ssp
, sp
, sp_mask
, new_cs
);
1582 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1583 if (new_eflags
& VM_MASK
)
1584 goto return_to_vm86
;
1588 POPW(ssp
, sp
, sp_mask
, new_eip
);
1589 POPW(ssp
, sp
, sp_mask
, new_cs
);
1591 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1594 if (loglevel
& CPU_LOG_PCALL
) {
1595 fprintf(logfile
, "lret new %04x:%08x s=%d addend=0x%x\n",
1596 new_cs
, new_eip
, shift
, addend
);
1597 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1600 if ((new_cs
& 0xfffc) == 0)
1601 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1602 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1603 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1604 if (!(e2
& DESC_S_MASK
) ||
1605 !(e2
& DESC_CS_MASK
))
1606 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1607 cpl
= env
->hflags
& HF_CPL_MASK
;
1610 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1611 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1612 if (e2
& DESC_C_MASK
) {
1614 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1617 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1619 if (!(e2
& DESC_P_MASK
))
1620 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1624 /* return to same priledge level */
1625 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1626 get_seg_base(e1
, e2
),
1627 get_seg_limit(e1
, e2
),
1630 /* return to different priviledge level */
1633 POPL(ssp
, sp
, sp_mask
, new_esp
);
1634 POPL(ssp
, sp
, sp_mask
, new_ss
);
1638 POPW(ssp
, sp
, sp_mask
, new_esp
);
1639 POPW(ssp
, sp
, sp_mask
, new_ss
);
1642 if (loglevel
& CPU_LOG_PCALL
) {
1643 fprintf(logfile
, "new ss:esp=%04x:%08x\n",
1648 if ((new_ss
& 3) != rpl
)
1649 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1650 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
1651 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1652 if (!(ss_e2
& DESC_S_MASK
) ||
1653 (ss_e2
& DESC_CS_MASK
) ||
1654 !(ss_e2
& DESC_W_MASK
))
1655 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1656 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1658 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1659 if (!(ss_e2
& DESC_P_MASK
))
1660 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
1662 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1663 get_seg_base(e1
, e2
),
1664 get_seg_limit(e1
, e2
),
1666 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
1667 get_seg_base(ss_e1
, ss_e2
),
1668 get_seg_limit(ss_e1
, ss_e2
),
1670 cpu_x86_set_cpl(env
, rpl
);
1672 sp_mask
= get_sp_mask(ss_e2
);
1674 /* validate data segments */
1675 validate_seg(R_ES
, cpl
);
1676 validate_seg(R_DS
, cpl
);
1677 validate_seg(R_FS
, cpl
);
1678 validate_seg(R_GS
, cpl
);
1682 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1685 /* NOTE: 'cpl' is the _old_ CPL */
1686 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
1688 eflags_mask
|= IOPL_MASK
;
1689 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
1691 eflags_mask
|= IF_MASK
;
1693 eflags_mask
&= 0xffff;
1694 load_eflags(new_eflags
, eflags_mask
);
1699 POPL(ssp
, sp
, sp_mask
, new_esp
);
1700 POPL(ssp
, sp
, sp_mask
, new_ss
);
1701 POPL(ssp
, sp
, sp_mask
, new_es
);
1702 POPL(ssp
, sp
, sp_mask
, new_ds
);
1703 POPL(ssp
, sp
, sp_mask
, new_fs
);
1704 POPL(ssp
, sp
, sp_mask
, new_gs
);
1706 /* modify processor state */
1707 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
1708 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
1709 load_seg_vm(R_CS
, new_cs
& 0xffff);
1710 cpu_x86_set_cpl(env
, 3);
1711 load_seg_vm(R_SS
, new_ss
& 0xffff);
1712 load_seg_vm(R_ES
, new_es
& 0xffff);
1713 load_seg_vm(R_DS
, new_ds
& 0xffff);
1714 load_seg_vm(R_FS
, new_fs
& 0xffff);
1715 load_seg_vm(R_GS
, new_gs
& 0xffff);
1717 env
->eip
= new_eip
& 0xffff;
1721 void helper_iret_protected(int shift
, int next_eip
)
1723 int tss_selector
, type
;
1726 /* specific case for TSS */
1727 if (env
->eflags
& NT_MASK
) {
1728 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
1729 if (tss_selector
& 4)
1730 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1731 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
1732 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1733 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
1734 /* NOTE: we check both segment and busy TSS */
1736 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1737 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
1739 helper_ret_protected(shift
, 1, 0);
1743 void helper_lret_protected(int shift
, int addend
)
1745 helper_ret_protected(shift
, 0, addend
);
1748 void helper_movl_crN_T0(int reg
)
1752 cpu_x86_update_cr0(env
, T0
);
1755 cpu_x86_update_cr3(env
, T0
);
1758 cpu_x86_update_cr4(env
, T0
);
1767 void helper_movl_drN_T0(int reg
)
1772 void helper_invlpg(unsigned int addr
)
1774 cpu_x86_flush_tlb(env
, addr
);
1778 #if !defined(__i386__) && !defined(__x86_64__)
1782 void helper_rdtsc(void)
1785 #if defined(__i386__) || defined(__x86_64__)
1786 asm volatile ("rdtsc" : "=A" (val
));
1788 /* better than nothing: the time increases */
1795 void helper_wrmsr(void)
1798 case MSR_IA32_SYSENTER_CS
:
1799 env
->sysenter_cs
= EAX
& 0xffff;
1801 case MSR_IA32_SYSENTER_ESP
:
1802 env
->sysenter_esp
= EAX
;
1804 case MSR_IA32_SYSENTER_EIP
:
1805 env
->sysenter_eip
= EAX
;
1808 /* XXX: exception ? */
1813 void helper_rdmsr(void)
1816 case MSR_IA32_SYSENTER_CS
:
1817 EAX
= env
->sysenter_cs
;
1820 case MSR_IA32_SYSENTER_ESP
:
1821 EAX
= env
->sysenter_esp
;
1824 case MSR_IA32_SYSENTER_EIP
:
1825 EAX
= env
->sysenter_eip
;
1829 /* XXX: exception ? */
1834 void helper_lsl(void)
1836 unsigned int selector
, limit
;
1838 int rpl
, dpl
, cpl
, type
;
1840 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1841 selector
= T0
& 0xffff;
1842 if (load_segment(&e1
, &e2
, selector
) != 0)
1845 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1846 cpl
= env
->hflags
& HF_CPL_MASK
;
1847 if (e2
& DESC_S_MASK
) {
1848 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1851 if (dpl
< cpl
|| dpl
< rpl
)
1855 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1866 if (dpl
< cpl
|| dpl
< rpl
)
1869 limit
= get_seg_limit(e1
, e2
);
1874 void helper_lar(void)
1876 unsigned int selector
;
1878 int rpl
, dpl
, cpl
, type
;
1880 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1881 selector
= T0
& 0xffff;
1882 if ((selector
& 0xfffc) == 0)
1884 if (load_segment(&e1
, &e2
, selector
) != 0)
1887 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1888 cpl
= env
->hflags
& HF_CPL_MASK
;
1889 if (e2
& DESC_S_MASK
) {
1890 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1893 if (dpl
< cpl
|| dpl
< rpl
)
1897 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1911 if (dpl
< cpl
|| dpl
< rpl
)
1914 T1
= e2
& 0x00f0ff00;
1918 void helper_verr(void)
1920 unsigned int selector
;
1924 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1925 selector
= T0
& 0xffff;
1926 if ((selector
& 0xfffc) == 0)
1928 if (load_segment(&e1
, &e2
, selector
) != 0)
1930 if (!(e2
& DESC_S_MASK
))
1933 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1934 cpl
= env
->hflags
& HF_CPL_MASK
;
1935 if (e2
& DESC_CS_MASK
) {
1936 if (!(e2
& DESC_R_MASK
))
1938 if (!(e2
& DESC_C_MASK
)) {
1939 if (dpl
< cpl
|| dpl
< rpl
)
1943 if (dpl
< cpl
|| dpl
< rpl
)
1949 void helper_verw(void)
1951 unsigned int selector
;
1955 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1956 selector
= T0
& 0xffff;
1957 if ((selector
& 0xfffc) == 0)
1959 if (load_segment(&e1
, &e2
, selector
) != 0)
1961 if (!(e2
& DESC_S_MASK
))
1964 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1965 cpl
= env
->hflags
& HF_CPL_MASK
;
1966 if (e2
& DESC_CS_MASK
) {
1969 if (dpl
< cpl
|| dpl
< rpl
)
1971 if (!(e2
& DESC_W_MASK
))
1979 void helper_fldt_ST0_A0(void)
1982 new_fpstt
= (env
->fpstt
- 1) & 7;
1983 env
->fpregs
[new_fpstt
] = helper_fldt((uint8_t *)A0
);
1984 env
->fpstt
= new_fpstt
;
1985 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
1988 void helper_fstt_ST0_A0(void)
1990 helper_fstt(ST0
, (uint8_t *)A0
);
1995 void helper_fbld_ST0_A0(void)
2003 for(i
= 8; i
>= 0; i
--) {
2004 v
= ldub((uint8_t *)A0
+ i
);
2005 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
2008 if (ldub((uint8_t *)A0
+ 9) & 0x80)
2014 void helper_fbst_ST0_A0(void)
2018 uint8_t *mem_ref
, *mem_end
;
2023 mem_ref
= (uint8_t *)A0
;
2024 mem_end
= mem_ref
+ 9;
2031 while (mem_ref
< mem_end
) {
2036 v
= ((v
/ 10) << 4) | (v
% 10);
2039 while (mem_ref
< mem_end
) {
2044 void helper_f2xm1(void)
2046 ST0
= pow(2.0,ST0
) - 1.0;
2049 void helper_fyl2x(void)
2051 CPU86_LDouble fptemp
;
2055 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
2059 env
->fpus
&= (~0x4700);
2064 void helper_fptan(void)
2066 CPU86_LDouble fptemp
;
2069 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2075 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2076 /* the above code is for |arg| < 2**52 only */
2080 void helper_fpatan(void)
2082 CPU86_LDouble fptemp
, fpsrcop
;
2086 ST1
= atan2(fpsrcop
,fptemp
);
2090 void helper_fxtract(void)
2092 CPU86_LDoubleU temp
;
2093 unsigned int expdif
;
2096 expdif
= EXPD(temp
) - EXPBIAS
;
2097 /*DP exponent bias*/
2104 void helper_fprem1(void)
2106 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2107 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2113 fpsrcop1
.d
= fpsrcop
;
2115 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2117 dblq
= fpsrcop
/ fptemp
;
2118 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2119 ST0
= fpsrcop
- fptemp
*dblq
;
2120 q
= (int)dblq
; /* cutting off top bits is assumed here */
2121 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2122 /* (C0,C1,C3) <-- (q2,q1,q0) */
2123 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2124 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2125 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2127 env
->fpus
|= 0x400; /* C2 <-- 1 */
2128 fptemp
= pow(2.0, expdif
-50);
2129 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2130 /* fpsrcop = integer obtained by rounding to the nearest */
2131 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
2132 floor(fpsrcop
): ceil(fpsrcop
);
2133 ST0
-= (ST1
* fpsrcop
* fptemp
);
2137 void helper_fprem(void)
2139 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2140 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2146 fpsrcop1
.d
= fpsrcop
;
2148 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2149 if ( expdif
< 53 ) {
2150 dblq
= fpsrcop
/ fptemp
;
2151 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2152 ST0
= fpsrcop
- fptemp
*dblq
;
2153 q
= (int)dblq
; /* cutting off top bits is assumed here */
2154 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2155 /* (C0,C1,C3) <-- (q2,q1,q0) */
2156 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2157 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2158 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2160 env
->fpus
|= 0x400; /* C2 <-- 1 */
2161 fptemp
= pow(2.0, expdif
-50);
2162 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2163 /* fpsrcop = integer obtained by chopping */
2164 fpsrcop
= (fpsrcop
< 0.0)?
2165 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
2166 ST0
-= (ST1
* fpsrcop
* fptemp
);
2170 void helper_fyl2xp1(void)
2172 CPU86_LDouble fptemp
;
2175 if ((fptemp
+1.0)>0.0) {
2176 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
2180 env
->fpus
&= (~0x4700);
2185 void helper_fsqrt(void)
2187 CPU86_LDouble fptemp
;
2191 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2197 void helper_fsincos(void)
2199 CPU86_LDouble fptemp
;
2202 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2208 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2209 /* the above code is for |arg| < 2**63 only */
2213 void helper_frndint(void)
2219 switch(env
->fpuc
& RC_MASK
) {
2222 asm("rndd %0, %1" : "=f" (a
) : "f"(a
));
2225 asm("rnddm %0, %1" : "=f" (a
) : "f"(a
));
2228 asm("rnddp %0, %1" : "=f" (a
) : "f"(a
));
2231 asm("rnddz %0, %1" : "=f" (a
) : "f"(a
));
2240 void helper_fscale(void)
2242 CPU86_LDouble fpsrcop
, fptemp
;
2245 fptemp
= pow(fpsrcop
,ST1
);
2249 void helper_fsin(void)
2251 CPU86_LDouble fptemp
;
2254 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2258 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2259 /* the above code is for |arg| < 2**53 only */
2263 void helper_fcos(void)
2265 CPU86_LDouble fptemp
;
2268 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2272 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2273 /* the above code is for |arg5 < 2**63 only */
2277 void helper_fxam_ST0(void)
2279 CPU86_LDoubleU temp
;
2284 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2286 env
->fpus
|= 0x200; /* C1 <-- 1 */
2288 expdif
= EXPD(temp
);
2289 if (expdif
== MAXEXPD
) {
2290 if (MANTD(temp
) == 0)
2291 env
->fpus
|= 0x500 /*Infinity*/;
2293 env
->fpus
|= 0x100 /*NaN*/;
2294 } else if (expdif
== 0) {
2295 if (MANTD(temp
) == 0)
2296 env
->fpus
|= 0x4000 /*Zero*/;
2298 env
->fpus
|= 0x4400 /*Denormal*/;
2304 void helper_fstenv(uint8_t *ptr
, int data32
)
2306 int fpus
, fptag
, exp
, i
;
2310 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
2312 for (i
=7; i
>=0; i
--) {
2314 if (env
->fptags
[i
]) {
2317 tmp
.d
= env
->fpregs
[i
];
2320 if (exp
== 0 && mant
== 0) {
2323 } else if (exp
== 0 || exp
== MAXEXPD
2324 #ifdef USE_X86LDOUBLE
2325 || (mant
& (1LL << 63)) == 0
2328 /* NaNs, infinity, denormal */
2335 stl(ptr
, env
->fpuc
);
2337 stl(ptr
+ 8, fptag
);
2338 stl(ptr
+ 12, 0); /* fpip */
2339 stl(ptr
+ 16, 0); /* fpcs */
2340 stl(ptr
+ 20, 0); /* fpoo */
2341 stl(ptr
+ 24, 0); /* fpos */
2344 stw(ptr
, env
->fpuc
);
2346 stw(ptr
+ 4, fptag
);
2354 void helper_fldenv(uint8_t *ptr
, int data32
)
2359 env
->fpuc
= lduw(ptr
);
2360 fpus
= lduw(ptr
+ 4);
2361 fptag
= lduw(ptr
+ 8);
2364 env
->fpuc
= lduw(ptr
);
2365 fpus
= lduw(ptr
+ 2);
2366 fptag
= lduw(ptr
+ 4);
2368 env
->fpstt
= (fpus
>> 11) & 7;
2369 env
->fpus
= fpus
& ~0x3800;
2370 for(i
= 0;i
< 8; i
++) {
2371 env
->fptags
[i
] = ((fptag
& 3) == 3);
2376 void helper_fsave(uint8_t *ptr
, int data32
)
2381 helper_fstenv(ptr
, data32
);
2383 ptr
+= (14 << data32
);
2384 for(i
= 0;i
< 8; i
++) {
2386 helper_fstt(tmp
, ptr
);
2404 void helper_frstor(uint8_t *ptr
, int data32
)
2409 helper_fldenv(ptr
, data32
);
2410 ptr
+= (14 << data32
);
2412 for(i
= 0;i
< 8; i
++) {
2413 tmp
= helper_fldt(ptr
);
2419 /* XXX: merge with helper_fstt ? */
2421 #ifndef USE_X86LDOUBLE
2423 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
2425 CPU86_LDoubleU temp
;
2430 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
2431 /* exponent + sign */
2432 e
= EXPD(temp
) - EXPBIAS
+ 16383;
2433 e
|= SIGND(temp
) >> 16;
2437 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
2439 CPU86_LDoubleU temp
;
2443 /* XXX: handle overflow ? */
2444 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
2445 e
|= (upper
>> 4) & 0x800; /* sign */
2446 ll
= (mant
>> 11) & ((1LL << 52) - 1);
2448 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
2451 temp
.ll
= ll
| ((uint64_t)e
<< 52);
2458 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
2460 CPU86_LDoubleU temp
;
2463 *pmant
= temp
.l
.lower
;
2464 *pexp
= temp
.l
.upper
;
2467 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
2469 CPU86_LDoubleU temp
;
2471 temp
.l
.upper
= upper
;
2472 temp
.l
.lower
= mant
;
2477 #if !defined(CONFIG_USER_ONLY)
2479 #define MMUSUFFIX _mmu
2480 #define GETPC() (__builtin_return_address(0))
2483 #include "softmmu_template.h"
2486 #include "softmmu_template.h"
2489 #include "softmmu_template.h"
2492 #include "softmmu_template.h"
2496 /* try to fill the TLB and return an exception if error. If retaddr is
2497 NULL, it means that the function was called in C code (i.e. not
2498 from generated code or from helper.c) */
2499 /* XXX: fix it to restore all registers */
2500 void tlb_fill(unsigned long addr
, int is_write
, int is_user
, void *retaddr
)
2502 TranslationBlock
*tb
;
2505 CPUX86State
*saved_env
;
2507 /* XXX: hack to restore env in all cases, even if not called from
2510 env
= cpu_single_env
;
2512 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
2515 /* now we have a real cpu fault */
2516 pc
= (unsigned long)retaddr
;
2517 tb
= tb_find_pc(pc
);
2519 /* the PC is inside the translated code. It means that we have
2520 a virtual CPU fault */
2521 cpu_restore_state(tb
, env
, pc
, NULL
);
2524 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);