4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #define raise_exception_err(a, b)\
27 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
28 (raise_exception_err)(a, b);\
32 const uint8_t parity_table
[256] = {
33 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
34 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
35 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
36 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
40 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
41 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
42 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
43 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
44 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
48 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
52 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
56 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
57 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
58 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
59 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
60 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
64 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
68 const uint8_t rclw_table
[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 9,10,11,12,13,14,15,
71 16, 0, 1, 2, 3, 4, 5, 6,
72 7, 8, 9,10,11,12,13,14,
76 const uint8_t rclb_table
[32] = {
77 0, 1, 2, 3, 4, 5, 6, 7,
78 8, 0, 1, 2, 3, 4, 5, 6,
79 7, 8, 0, 1, 2, 3, 4, 5,
80 6, 7, 8, 0, 1, 2, 3, 4,
83 const CPU86_LDouble f15rk
[7] =
85 0.00000000000000000000L,
86 1.00000000000000000000L,
87 3.14159265358979323851L, /*pi*/
88 0.30102999566398119523L, /*lg2*/
89 0.69314718055994530943L, /*ln2*/
90 1.44269504088896340739L, /*l2e*/
91 3.32192809488736234781L, /*l2t*/
96 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
100 spin_lock(&global_cpu_lock
);
103 void cpu_unlock(void)
105 spin_unlock(&global_cpu_lock
);
108 void cpu_loop_exit(void)
110 /* NOTE: the register at this point must be saved by hand because
111 longjmp restore them */
113 env
->regs
[R_EAX
] = EAX
;
116 env
->regs
[R_ECX
] = ECX
;
119 env
->regs
[R_EDX
] = EDX
;
122 env
->regs
[R_EBX
] = EBX
;
125 env
->regs
[R_ESP
] = ESP
;
128 env
->regs
[R_EBP
] = EBP
;
131 env
->regs
[R_ESI
] = ESI
;
134 env
->regs
[R_EDI
] = EDI
;
136 longjmp(env
->jmp_env
, 1);
139 /* return non zero if error */
140 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
151 index
= selector
& ~7;
152 if ((index
+ 7) > dt
->limit
)
154 ptr
= dt
->base
+ index
;
155 *e1_ptr
= ldl_kernel(ptr
);
156 *e2_ptr
= ldl_kernel(ptr
+ 4);
160 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
163 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
164 if (e2
& DESC_G_MASK
)
165 limit
= (limit
<< 12) | 0xfff;
169 static inline uint8_t *get_seg_base(uint32_t e1
, uint32_t e2
)
171 return (uint8_t *)((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
174 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
176 sc
->base
= get_seg_base(e1
, e2
);
177 sc
->limit
= get_seg_limit(e1
, e2
);
181 /* init the segment cache in vm86 mode. */
182 static inline void load_seg_vm(int seg
, int selector
)
185 cpu_x86_load_seg_cache(env
, seg
, selector
,
186 (uint8_t *)(selector
<< 4), 0xffff, 0);
189 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
190 uint32_t *esp_ptr
, int dpl
)
192 int type
, index
, shift
;
197 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
198 for(i
=0;i
<env
->tr
.limit
;i
++) {
199 printf("%02x ", env
->tr
.base
[i
]);
200 if ((i
& 7) == 7) printf("\n");
206 if (!(env
->tr
.flags
& DESC_P_MASK
))
207 cpu_abort(env
, "invalid tss");
208 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
210 cpu_abort(env
, "invalid tss type");
212 index
= (dpl
* 4 + 2) << shift
;
213 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
214 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
216 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
217 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
219 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
220 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
224 /* XXX: merge with load_seg() */
225 static void tss_load_seg(int seg_reg
, int selector
)
230 if ((selector
& 0xfffc) != 0) {
231 if (load_segment(&e1
, &e2
, selector
) != 0)
232 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 if (!(e2
& DESC_S_MASK
))
234 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
236 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
237 cpl
= env
->hflags
& HF_CPL_MASK
;
238 if (seg_reg
== R_CS
) {
239 if (!(e2
& DESC_CS_MASK
))
240 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
242 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
243 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
244 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
246 } else if (seg_reg
== R_SS
) {
247 /* SS must be writable data */
248 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
249 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
250 if (dpl
!= cpl
|| dpl
!= rpl
)
251 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
253 /* not readable code */
254 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
255 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
256 /* if data or non conforming code, checks the rights */
257 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
258 if (dpl
< cpl
|| dpl
< rpl
)
259 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
262 if (!(e2
& DESC_P_MASK
))
263 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
264 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
265 get_seg_base(e1
, e2
),
266 get_seg_limit(e1
, e2
),
269 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
270 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
274 #define SWITCH_TSS_JMP 0
275 #define SWITCH_TSS_IRET 1
276 #define SWITCH_TSS_CALL 2
278 /* XXX: restore CPU state in registers (PowerPC case) */
279 static void switch_tss(int tss_selector
,
280 uint32_t e1
, uint32_t e2
, int source
,
283 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
285 uint32_t new_regs
[8], new_segs
[6];
286 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
287 uint32_t old_eflags
, eflags_mask
;
292 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
294 if (loglevel
& CPU_LOG_PCALL
)
295 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
298 /* if task gate, we read the TSS segment and we load it */
300 if (!(e2
& DESC_P_MASK
))
301 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
302 tss_selector
= e1
>> 16;
303 if (tss_selector
& 4)
304 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
305 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
306 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
307 if (e2
& DESC_S_MASK
)
308 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
309 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
311 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
314 if (!(e2
& DESC_P_MASK
))
315 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
321 tss_limit
= get_seg_limit(e1
, e2
);
322 tss_base
= get_seg_base(e1
, e2
);
323 if ((tss_selector
& 4) != 0 ||
324 tss_limit
< tss_limit_max
)
325 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
326 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
328 old_tss_limit_max
= 103;
330 old_tss_limit_max
= 43;
332 /* read all the registers from the new TSS */
335 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
336 new_eip
= ldl_kernel(tss_base
+ 0x20);
337 new_eflags
= ldl_kernel(tss_base
+ 0x24);
338 for(i
= 0; i
< 8; i
++)
339 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
340 for(i
= 0; i
< 6; i
++)
341 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
342 new_ldt
= lduw_kernel(tss_base
+ 0x60);
343 new_trap
= ldl_kernel(tss_base
+ 0x64);
347 new_eip
= lduw_kernel(tss_base
+ 0x0e);
348 new_eflags
= lduw_kernel(tss_base
+ 0x10);
349 for(i
= 0; i
< 8; i
++)
350 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
351 for(i
= 0; i
< 4; i
++)
352 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
353 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
359 /* NOTE: we must avoid memory exceptions during the task switch,
360 so we make dummy accesses before */
361 /* XXX: it can still fail in some cases, so a bigger hack is
362 necessary to valid the TLB after having done the accesses */
364 v1
= ldub_kernel(env
->tr
.base
);
365 v2
= ldub(env
->tr
.base
+ old_tss_limit_max
);
366 stb_kernel(env
->tr
.base
, v1
);
367 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
369 /* clear busy bit (it is restartable) */
370 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
373 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
374 e2
= ldl_kernel(ptr
+ 4);
375 e2
&= ~DESC_TSS_BUSY_MASK
;
376 stl_kernel(ptr
+ 4, e2
);
378 old_eflags
= compute_eflags();
379 if (source
== SWITCH_TSS_IRET
)
380 old_eflags
&= ~NT_MASK
;
382 /* save the current state in the old TSS */
385 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
386 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
387 for(i
= 0; i
< 8; i
++)
388 stl_kernel(env
->tr
.base
+ (0x28 + i
* 4), env
->regs
[i
]);
389 for(i
= 0; i
< 6; i
++)
390 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
393 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
394 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
395 for(i
= 0; i
< 8; i
++)
396 stw_kernel(env
->tr
.base
+ (0x12 + i
* 2), env
->regs
[i
]);
397 for(i
= 0; i
< 4; i
++)
398 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
401 /* now if an exception occurs, it will occurs in the next task
404 if (source
== SWITCH_TSS_CALL
) {
405 stw_kernel(tss_base
, env
->tr
.selector
);
406 new_eflags
|= NT_MASK
;
410 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
413 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
414 e2
= ldl_kernel(ptr
+ 4);
415 e2
|= DESC_TSS_BUSY_MASK
;
416 stl_kernel(ptr
+ 4, e2
);
419 /* set the new CPU state */
420 /* from this point, any exception which occurs can give problems */
421 env
->cr
[0] |= CR0_TS_MASK
;
422 env
->hflags
|= HF_TS_MASK
;
423 env
->tr
.selector
= tss_selector
;
424 env
->tr
.base
= tss_base
;
425 env
->tr
.limit
= tss_limit
;
426 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
428 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
429 cpu_x86_update_cr3(env
, new_cr3
);
432 /* load all registers without an exception, then reload them with
433 possible exception */
435 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
436 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
438 eflags_mask
&= 0xffff;
439 load_eflags(new_eflags
, eflags_mask
);
440 for(i
= 0; i
< 8; i
++)
441 env
->regs
[i
] = new_regs
[i
];
442 if (new_eflags
& VM_MASK
) {
443 for(i
= 0; i
< 6; i
++)
444 load_seg_vm(i
, new_segs
[i
]);
445 /* in vm86, CPL is always 3 */
446 cpu_x86_set_cpl(env
, 3);
448 /* CPL is set the RPL of CS */
449 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
450 /* first just selectors as the rest may trigger exceptions */
451 for(i
= 0; i
< 6; i
++)
452 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], NULL
, 0, 0);
455 env
->ldt
.selector
= new_ldt
& ~4;
456 env
->ldt
.base
= NULL
;
462 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
464 if ((new_ldt
& 0xfffc) != 0) {
466 index
= new_ldt
& ~7;
467 if ((index
+ 7) > dt
->limit
)
468 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
469 ptr
= dt
->base
+ index
;
470 e1
= ldl_kernel(ptr
);
471 e2
= ldl_kernel(ptr
+ 4);
472 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
473 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
474 if (!(e2
& DESC_P_MASK
))
475 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
476 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
479 /* load the segments */
480 if (!(new_eflags
& VM_MASK
)) {
481 tss_load_seg(R_CS
, new_segs
[R_CS
]);
482 tss_load_seg(R_SS
, new_segs
[R_SS
]);
483 tss_load_seg(R_ES
, new_segs
[R_ES
]);
484 tss_load_seg(R_DS
, new_segs
[R_DS
]);
485 tss_load_seg(R_FS
, new_segs
[R_FS
]);
486 tss_load_seg(R_GS
, new_segs
[R_GS
]);
489 /* check that EIP is in the CS segment limits */
490 if (new_eip
> env
->segs
[R_CS
].limit
) {
491 /* XXX: different exception if CALL ? */
492 raise_exception_err(EXCP0D_GPF
, 0);
496 /* check if Port I/O is allowed in TSS */
497 static inline void check_io(int addr
, int size
)
499 int io_offset
, val
, mask
;
501 /* TSS must be a valid 32 bit one */
502 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
503 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
506 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
507 io_offset
+= (addr
>> 3);
508 /* Note: the check needs two bytes */
509 if ((io_offset
+ 1) > env
->tr
.limit
)
511 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
513 mask
= (1 << size
) - 1;
514 /* all bits must be zero to allow the I/O */
515 if ((val
& mask
) != 0) {
517 raise_exception_err(EXCP0D_GPF
, 0);
521 void check_iob_T0(void)
526 void check_iow_T0(void)
531 void check_iol_T0(void)
536 void check_iob_DX(void)
538 check_io(EDX
& 0xffff, 1);
541 void check_iow_DX(void)
543 check_io(EDX
& 0xffff, 2);
546 void check_iol_DX(void)
548 check_io(EDX
& 0xffff, 4);
551 static inline unsigned int get_sp_mask(unsigned int e2
)
553 if (e2
& DESC_B_MASK
)
559 /* XXX: add a is_user flag to have proper security support */
560 #define PUSHW(ssp, sp, sp_mask, val)\
563 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
566 #define PUSHL(ssp, sp, sp_mask, val)\
569 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
572 #define POPW(ssp, sp, sp_mask, val)\
574 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
578 #define POPL(ssp, sp, sp_mask, val)\
580 val = ldl_kernel((ssp) + (sp & (sp_mask)));\
584 /* protected mode interrupt */
585 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
586 unsigned int next_eip
, int is_hw
)
590 int type
, dpl
, selector
, ss_dpl
, cpl
, sp_mask
;
591 int has_error_code
, new_stack
, shift
;
592 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
596 if (!is_int
&& !is_hw
) {
615 if (intno
* 8 + 7 > dt
->limit
)
616 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
617 ptr
= dt
->base
+ intno
* 8;
618 e1
= ldl_kernel(ptr
);
619 e2
= ldl_kernel(ptr
+ 4);
620 /* check gate type */
621 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
623 case 5: /* task gate */
624 /* must do that check here to return the correct error code */
625 if (!(e2
& DESC_P_MASK
))
626 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
627 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
628 if (has_error_code
) {
630 /* push the error code */
631 shift
= (env
->segs
[R_CS
].flags
>> DESC_B_SHIFT
) & 1;
632 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
636 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
637 ssp
= env
->segs
[R_SS
].base
+ esp
;
639 stl_kernel(ssp
, error_code
);
641 stw_kernel(ssp
, error_code
);
642 env
->regs
[R_ESP
] = (esp
& mask
) | (env
->regs
[R_ESP
] & ~mask
);
645 case 6: /* 286 interrupt gate */
646 case 7: /* 286 trap gate */
647 case 14: /* 386 interrupt gate */
648 case 15: /* 386 trap gate */
651 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
654 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
655 cpl
= env
->hflags
& HF_CPL_MASK
;
656 /* check privledge if software int */
657 if (is_int
&& dpl
< cpl
)
658 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
659 /* check valid bit */
660 if (!(e2
& DESC_P_MASK
))
661 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
663 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
664 if ((selector
& 0xfffc) == 0)
665 raise_exception_err(EXCP0D_GPF
, 0);
667 if (load_segment(&e1
, &e2
, selector
) != 0)
668 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
669 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
670 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
671 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
673 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
674 if (!(e2
& DESC_P_MASK
))
675 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
676 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
677 /* to inner priviledge */
678 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
679 if ((ss
& 0xfffc) == 0)
680 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
682 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
683 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
684 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
685 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
687 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
688 if (!(ss_e2
& DESC_S_MASK
) ||
689 (ss_e2
& DESC_CS_MASK
) ||
690 !(ss_e2
& DESC_W_MASK
))
691 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
692 if (!(ss_e2
& DESC_P_MASK
))
693 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
695 sp_mask
= get_sp_mask(ss_e2
);
696 ssp
= get_seg_base(ss_e1
, ss_e2
);
697 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
698 /* to same priviledge */
699 if (env
->eflags
& VM_MASK
)
700 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
702 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
703 ssp
= env
->segs
[R_SS
].base
;
707 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
708 new_stack
= 0; /* avoid warning */
709 sp_mask
= 0; /* avoid warning */
710 ssp
= NULL
; /* avoid warning */
711 esp
= 0; /* avoid warning */
717 /* XXX: check that enough room is available */
718 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
719 if (env
->eflags
& VM_MASK
)
725 if (env
->eflags
& VM_MASK
) {
726 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
727 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
728 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
729 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
731 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
732 PUSHL(ssp
, esp
, sp_mask
, ESP
);
734 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
735 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
736 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
737 if (has_error_code
) {
738 PUSHL(ssp
, esp
, sp_mask
, error_code
);
742 if (env
->eflags
& VM_MASK
) {
743 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
744 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
745 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
746 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
748 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
749 PUSHW(ssp
, esp
, sp_mask
, ESP
);
751 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
752 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
753 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
754 if (has_error_code
) {
755 PUSHW(ssp
, esp
, sp_mask
, error_code
);
760 if (env
->eflags
& VM_MASK
) {
761 cpu_x86_load_seg_cache(env
, R_ES
, 0, NULL
, 0, 0);
762 cpu_x86_load_seg_cache(env
, R_DS
, 0, NULL
, 0, 0);
763 cpu_x86_load_seg_cache(env
, R_FS
, 0, NULL
, 0, 0);
764 cpu_x86_load_seg_cache(env
, R_GS
, 0, NULL
, 0, 0);
766 ss
= (ss
& ~3) | dpl
;
767 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
768 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
770 ESP
= (ESP
& ~sp_mask
) | (esp
& sp_mask
);
772 selector
= (selector
& ~3) | dpl
;
773 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
774 get_seg_base(e1
, e2
),
775 get_seg_limit(e1
, e2
),
777 cpu_x86_set_cpl(env
, dpl
);
780 /* interrupt gate clear IF mask */
781 if ((type
& 1) == 0) {
782 env
->eflags
&= ~IF_MASK
;
784 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
787 /* real mode interrupt */
788 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
789 unsigned int next_eip
)
794 uint32_t offset
, esp
;
795 uint32_t old_cs
, old_eip
;
797 /* real mode (simpler !) */
799 if (intno
* 4 + 3 > dt
->limit
)
800 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
801 ptr
= dt
->base
+ intno
* 4;
802 offset
= lduw_kernel(ptr
);
803 selector
= lduw_kernel(ptr
+ 2);
805 ssp
= env
->segs
[R_SS
].base
;
810 old_cs
= env
->segs
[R_CS
].selector
;
811 /* XXX: use SS segment size ? */
812 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
813 PUSHW(ssp
, esp
, 0xffff, old_cs
);
814 PUSHW(ssp
, esp
, 0xffff, old_eip
);
816 /* update processor state */
817 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
819 env
->segs
[R_CS
].selector
= selector
;
820 env
->segs
[R_CS
].base
= (uint8_t *)(selector
<< 4);
821 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
824 /* fake user mode interrupt */
825 void do_interrupt_user(int intno
, int is_int
, int error_code
,
826 unsigned int next_eip
)
834 ptr
= dt
->base
+ (intno
* 8);
835 e2
= ldl_kernel(ptr
+ 4);
837 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
838 cpl
= env
->hflags
& HF_CPL_MASK
;
839 /* check privledge if software int */
840 if (is_int
&& dpl
< cpl
)
841 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
843 /* Since we emulate only user space, we cannot do more than
844 exiting the emulation with the suitable exception and error
851 * Begin execution of an interruption. is_int is TRUE if coming from
852 * the int instruction. next_eip is the EIP value AFTER the interrupt
853 * instruction. It is only relevant if is_int is TRUE.
855 void do_interrupt(int intno
, int is_int
, int error_code
,
856 unsigned int next_eip
, int is_hw
)
859 if (loglevel
& (CPU_LOG_PCALL
| CPU_LOG_INT
)) {
860 if ((env
->cr
[0] & CR0_PE_MASK
)) {
862 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
863 count
, intno
, error_code
, is_int
,
864 env
->hflags
& HF_CPL_MASK
,
865 env
->segs
[R_CS
].selector
, EIP
,
866 (int)env
->segs
[R_CS
].base
+ EIP
,
867 env
->segs
[R_SS
].selector
, ESP
);
869 fprintf(logfile
, " CR2=%08x", env
->cr
[2]);
871 fprintf(logfile
, " EAX=%08x", env
->regs
[R_EAX
]);
873 fprintf(logfile
, "\n");
875 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
879 fprintf(logfile
, " code=");
880 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
881 for(i
= 0; i
< 16; i
++) {
882 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
884 fprintf(logfile
, "\n");
891 if (env
->cr
[0] & CR0_PE_MASK
) {
892 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
894 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
899 * Signal an interruption. It is executed in the main CPU loop.
900 * is_int is TRUE if coming from the int instruction. next_eip is the
901 * EIP value AFTER the interrupt instruction. It is only relevant if
904 void raise_interrupt(int intno
, int is_int
, int error_code
,
905 unsigned int next_eip
)
907 env
->exception_index
= intno
;
908 env
->error_code
= error_code
;
909 env
->exception_is_int
= is_int
;
910 env
->exception_next_eip
= next_eip
;
914 /* shortcuts to generate exceptions */
916 void (raise_exception_err
)(int exception_index
, int error_code
)
918 raise_interrupt(exception_index
, 0, error_code
, 0);
921 void raise_exception(int exception_index
)
923 raise_interrupt(exception_index
, 0, 0, 0);
926 #ifdef BUGGY_GCC_DIV64
927 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
928 call it from another function */
929 uint32_t div64(uint32_t *q_ptr
, uint64_t num
, uint32_t den
)
935 int32_t idiv64(int32_t *q_ptr
, int64_t num
, int32_t den
)
942 void helper_divl_EAX_T0(uint32_t eip
)
944 unsigned int den
, q
, r
;
947 num
= EAX
| ((uint64_t)EDX
<< 32);
951 raise_exception(EXCP00_DIVZ
);
953 #ifdef BUGGY_GCC_DIV64
954 r
= div64(&q
, num
, den
);
963 void helper_idivl_EAX_T0(uint32_t eip
)
968 num
= EAX
| ((uint64_t)EDX
<< 32);
972 raise_exception(EXCP00_DIVZ
);
974 #ifdef BUGGY_GCC_DIV64
975 r
= idiv64(&q
, num
, den
);
984 void helper_cmpxchg8b(void)
989 eflags
= cc_table
[CC_OP
].compute_all();
990 d
= ldq((uint8_t *)A0
);
991 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
992 stq((uint8_t *)A0
, ((uint64_t)ECX
<< 32) | EBX
);
1002 #define CPUID_FP87 (1 << 0)
1003 #define CPUID_VME (1 << 1)
1004 #define CPUID_DE (1 << 2)
1005 #define CPUID_PSE (1 << 3)
1006 #define CPUID_TSC (1 << 4)
1007 #define CPUID_MSR (1 << 5)
1008 #define CPUID_PAE (1 << 6)
1009 #define CPUID_MCE (1 << 7)
1010 #define CPUID_CX8 (1 << 8)
1011 #define CPUID_APIC (1 << 9)
1012 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1013 #define CPUID_MTRR (1 << 12)
1014 #define CPUID_PGE (1 << 13)
1015 #define CPUID_MCA (1 << 14)
1016 #define CPUID_CMOV (1 << 15)
1018 #define CPUID_MMX (1 << 23)
1019 #define CPUID_FXSR (1 << 24)
1020 #define CPUID_SSE (1 << 25)
1021 #define CPUID_SSE2 (1 << 26)
1023 void helper_cpuid(void)
1027 EAX
= 2; /* max EAX index supported */
1034 int family
, model
, stepping
;
1037 /* pentium 75-200 */
1047 EAX
= (family
<< 8) | (model
<< 4) | stepping
;
1050 EDX
= CPUID_FP87
| CPUID_DE
| CPUID_PSE
|
1051 CPUID_TSC
| CPUID_MSR
| CPUID_MCE
|
1052 CPUID_CX8
| CPUID_PGE
| CPUID_CMOV
;
1056 /* cache info: needed for Pentium Pro compatibility */
1065 void helper_lldt_T0(void)
1073 selector
= T0
& 0xffff;
1074 if ((selector
& 0xfffc) == 0) {
1075 /* XXX: NULL selector case: invalid LDT */
1076 env
->ldt
.base
= NULL
;
1080 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1082 index
= selector
& ~7;
1083 if ((index
+ 7) > dt
->limit
)
1084 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1085 ptr
= dt
->base
+ index
;
1086 e1
= ldl_kernel(ptr
);
1087 e2
= ldl_kernel(ptr
+ 4);
1088 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1089 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1090 if (!(e2
& DESC_P_MASK
))
1091 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1092 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1094 env
->ldt
.selector
= selector
;
1097 void helper_ltr_T0(void)
1105 selector
= T0
& 0xffff;
1106 if ((selector
& 0xfffc) == 0) {
1107 /* NULL selector case: invalid LDT */
1108 env
->tr
.base
= NULL
;
1113 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1115 index
= selector
& ~7;
1116 if ((index
+ 7) > dt
->limit
)
1117 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1118 ptr
= dt
->base
+ index
;
1119 e1
= ldl_kernel(ptr
);
1120 e2
= ldl_kernel(ptr
+ 4);
1121 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1122 if ((e2
& DESC_S_MASK
) ||
1123 (type
!= 1 && type
!= 9))
1124 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1125 if (!(e2
& DESC_P_MASK
))
1126 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1127 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1128 e2
|= DESC_TSS_BUSY_MASK
;
1129 stl_kernel(ptr
+ 4, e2
);
1131 env
->tr
.selector
= selector
;
1134 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1135 void load_seg(int seg_reg
, int selector
)
1144 if ((selector
& 0xfffc) == 0) {
1145 /* null selector case */
1146 if (seg_reg
== R_SS
)
1147 raise_exception_err(EXCP0D_GPF
, 0);
1148 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, NULL
, 0, 0);
1155 index
= selector
& ~7;
1156 if ((index
+ 7) > dt
->limit
)
1157 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1158 ptr
= dt
->base
+ index
;
1159 e1
= ldl_kernel(ptr
);
1160 e2
= ldl_kernel(ptr
+ 4);
1162 if (!(e2
& DESC_S_MASK
))
1163 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1165 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1166 cpl
= env
->hflags
& HF_CPL_MASK
;
1167 if (seg_reg
== R_SS
) {
1168 /* must be writable segment */
1169 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1170 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1171 if (rpl
!= cpl
|| dpl
!= cpl
)
1172 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1174 /* must be readable segment */
1175 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1176 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1178 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1179 /* if not conforming code, test rights */
1180 if (dpl
< cpl
|| dpl
< rpl
)
1181 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1185 if (!(e2
& DESC_P_MASK
)) {
1186 if (seg_reg
== R_SS
)
1187 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1189 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1192 /* set the access bit if not already set */
1193 if (!(e2
& DESC_A_MASK
)) {
1195 stl_kernel(ptr
+ 4, e2
);
1198 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1199 get_seg_base(e1
, e2
),
1200 get_seg_limit(e1
, e2
),
1203 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1204 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1209 /* protected mode jump */
1210 void helper_ljmp_protected_T0_T1(int next_eip
)
1212 int new_cs
, new_eip
, gate_cs
, type
;
1213 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1217 if ((new_cs
& 0xfffc) == 0)
1218 raise_exception_err(EXCP0D_GPF
, 0);
1219 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1220 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1221 cpl
= env
->hflags
& HF_CPL_MASK
;
1222 if (e2
& DESC_S_MASK
) {
1223 if (!(e2
& DESC_CS_MASK
))
1224 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1225 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1226 if (e2
& DESC_C_MASK
) {
1227 /* conforming code segment */
1229 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1231 /* non conforming code segment */
1234 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1236 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1238 if (!(e2
& DESC_P_MASK
))
1239 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1240 limit
= get_seg_limit(e1
, e2
);
1241 if (new_eip
> limit
)
1242 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1243 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1244 get_seg_base(e1
, e2
), limit
, e2
);
1247 /* jump to call or task gate */
1248 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1250 cpl
= env
->hflags
& HF_CPL_MASK
;
1251 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1253 case 1: /* 286 TSS */
1254 case 9: /* 386 TSS */
1255 case 5: /* task gate */
1256 if (dpl
< cpl
|| dpl
< rpl
)
1257 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1258 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1260 case 4: /* 286 call gate */
1261 case 12: /* 386 call gate */
1262 if ((dpl
< cpl
) || (dpl
< rpl
))
1263 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1264 if (!(e2
& DESC_P_MASK
))
1265 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1267 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
1268 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1269 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1270 /* must be code segment */
1271 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1272 (DESC_S_MASK
| DESC_CS_MASK
)))
1273 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1274 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1275 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
1276 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1277 if (!(e2
& DESC_P_MASK
))
1278 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
1279 new_eip
= (e1
& 0xffff);
1281 new_eip
|= (e2
& 0xffff0000);
1282 limit
= get_seg_limit(e1
, e2
);
1283 if (new_eip
> limit
)
1284 raise_exception_err(EXCP0D_GPF
, 0);
1285 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1286 get_seg_base(e1
, e2
), limit
, e2
);
1290 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1296 /* real mode call */
1297 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
1299 int new_cs
, new_eip
;
1300 uint32_t esp
, esp_mask
;
1306 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1307 ssp
= env
->segs
[R_SS
].base
;
1309 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1310 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1312 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1313 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1316 ESP
= (ESP
& ~esp_mask
) | (esp
& esp_mask
);
1318 env
->segs
[R_CS
].selector
= new_cs
;
1319 env
->segs
[R_CS
].base
= (uint8_t *)(new_cs
<< 4);
1322 /* protected mode call */
1323 void helper_lcall_protected_T0_T1(int shift
, int next_eip
)
1325 int new_cs
, new_eip
, new_stack
, i
;
1326 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1327 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
1328 uint32_t val
, limit
, old_sp_mask
;
1329 uint8_t *ssp
, *old_ssp
;
1334 if (loglevel
& CPU_LOG_PCALL
) {
1335 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
1336 new_cs
, new_eip
, shift
);
1337 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1340 if ((new_cs
& 0xfffc) == 0)
1341 raise_exception_err(EXCP0D_GPF
, 0);
1342 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1343 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1344 cpl
= env
->hflags
& HF_CPL_MASK
;
1346 if (loglevel
& CPU_LOG_PCALL
) {
1347 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
1350 if (e2
& DESC_S_MASK
) {
1351 if (!(e2
& DESC_CS_MASK
))
1352 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1353 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1354 if (e2
& DESC_C_MASK
) {
1355 /* conforming code segment */
1357 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1359 /* non conforming code segment */
1362 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1364 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1366 if (!(e2
& DESC_P_MASK
))
1367 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1370 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1371 ssp
= env
->segs
[R_SS
].base
;
1373 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1374 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1376 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1377 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1380 limit
= get_seg_limit(e1
, e2
);
1381 if (new_eip
> limit
)
1382 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1383 /* from this point, not restartable */
1384 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1385 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1386 get_seg_base(e1
, e2
), limit
, e2
);
1389 /* check gate type */
1390 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1391 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1394 case 1: /* available 286 TSS */
1395 case 9: /* available 386 TSS */
1396 case 5: /* task gate */
1397 if (dpl
< cpl
|| dpl
< rpl
)
1398 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1399 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1401 case 4: /* 286 call gate */
1402 case 12: /* 386 call gate */
1405 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1410 if (dpl
< cpl
|| dpl
< rpl
)
1411 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1412 /* check valid bit */
1413 if (!(e2
& DESC_P_MASK
))
1414 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1415 selector
= e1
>> 16;
1416 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1417 param_count
= e2
& 0x1f;
1418 if ((selector
& 0xfffc) == 0)
1419 raise_exception_err(EXCP0D_GPF
, 0);
1421 if (load_segment(&e1
, &e2
, selector
) != 0)
1422 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1423 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
1424 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1425 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1427 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1428 if (!(e2
& DESC_P_MASK
))
1429 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1431 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1432 /* to inner priviledge */
1433 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1435 if (loglevel
& CPU_LOG_PCALL
)
1436 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n",
1437 ss
, sp
, param_count
, ESP
);
1439 if ((ss
& 0xfffc) == 0)
1440 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1441 if ((ss
& 3) != dpl
)
1442 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1443 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
1444 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1445 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1447 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1448 if (!(ss_e2
& DESC_S_MASK
) ||
1449 (ss_e2
& DESC_CS_MASK
) ||
1450 !(ss_e2
& DESC_W_MASK
))
1451 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1452 if (!(ss_e2
& DESC_P_MASK
))
1453 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
1455 // push_size = ((param_count * 2) + 8) << shift;
1457 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1458 old_ssp
= env
->segs
[R_SS
].base
;
1460 sp_mask
= get_sp_mask(ss_e2
);
1461 ssp
= get_seg_base(ss_e1
, ss_e2
);
1463 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1464 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1465 for(i
= param_count
- 1; i
>= 0; i
--) {
1466 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1467 PUSHL(ssp
, sp
, sp_mask
, val
);
1470 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1471 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1472 for(i
= param_count
- 1; i
>= 0; i
--) {
1473 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1474 PUSHW(ssp
, sp
, sp_mask
, val
);
1479 /* to same priviledge */
1481 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1482 ssp
= env
->segs
[R_SS
].base
;
1483 // push_size = (4 << shift);
1488 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1489 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1491 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1492 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1495 /* from this point, not restartable */
1498 ss
= (ss
& ~3) | dpl
;
1499 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1501 get_seg_limit(ss_e1
, ss_e2
),
1505 selector
= (selector
& ~3) | dpl
;
1506 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1507 get_seg_base(e1
, e2
),
1508 get_seg_limit(e1
, e2
),
1510 cpu_x86_set_cpl(env
, dpl
);
1511 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1516 /* real and vm86 mode iret */
1517 void helper_iret_real(int shift
)
1519 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1523 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
1525 ssp
= env
->segs
[R_SS
].base
;
1528 POPL(ssp
, sp
, sp_mask
, new_eip
);
1529 POPL(ssp
, sp
, sp_mask
, new_cs
);
1531 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1534 POPW(ssp
, sp
, sp_mask
, new_eip
);
1535 POPW(ssp
, sp
, sp_mask
, new_cs
);
1536 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1538 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1539 load_seg_vm(R_CS
, new_cs
);
1541 if (env
->eflags
& VM_MASK
)
1542 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
1544 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
1546 eflags_mask
&= 0xffff;
1547 load_eflags(new_eflags
, eflags_mask
);
1550 static inline void validate_seg(int seg_reg
, int cpl
)
1555 e2
= env
->segs
[seg_reg
].flags
;
1556 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1557 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1558 /* data or non conforming code segment */
1560 cpu_x86_load_seg_cache(env
, seg_reg
, 0, NULL
, 0, 0);
1565 /* protected mode iret */
1566 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1568 uint32_t sp
, new_cs
, new_eip
, new_eflags
, new_esp
, new_ss
, sp_mask
;
1569 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1570 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1571 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1574 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1576 ssp
= env
->segs
[R_SS
].base
;
1579 POPL(ssp
, sp
, sp_mask
, new_eip
);
1580 POPL(ssp
, sp
, sp_mask
, new_cs
);
1583 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1584 if (new_eflags
& VM_MASK
)
1585 goto return_to_vm86
;
1589 POPW(ssp
, sp
, sp_mask
, new_eip
);
1590 POPW(ssp
, sp
, sp_mask
, new_cs
);
1592 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1595 if (loglevel
& CPU_LOG_PCALL
) {
1596 fprintf(logfile
, "lret new %04x:%08x s=%d addend=0x%x\n",
1597 new_cs
, new_eip
, shift
, addend
);
1598 cpu_x86_dump_state(env
, logfile
, X86_DUMP_CCOP
);
1601 if ((new_cs
& 0xfffc) == 0)
1602 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1603 if (load_segment(&e1
, &e2
, new_cs
) != 0)
1604 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1605 if (!(e2
& DESC_S_MASK
) ||
1606 !(e2
& DESC_CS_MASK
))
1607 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1608 cpl
= env
->hflags
& HF_CPL_MASK
;
1611 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1612 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1613 if (e2
& DESC_C_MASK
) {
1615 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1618 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
1620 if (!(e2
& DESC_P_MASK
))
1621 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
1625 /* return to same priledge level */
1626 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1627 get_seg_base(e1
, e2
),
1628 get_seg_limit(e1
, e2
),
1631 /* return to different priviledge level */
1634 POPL(ssp
, sp
, sp_mask
, new_esp
);
1635 POPL(ssp
, sp
, sp_mask
, new_ss
);
1639 POPW(ssp
, sp
, sp_mask
, new_esp
);
1640 POPW(ssp
, sp
, sp_mask
, new_ss
);
1643 if (loglevel
& CPU_LOG_PCALL
) {
1644 fprintf(logfile
, "new ss:esp=%04x:%08x\n",
1649 if ((new_ss
& 3) != rpl
)
1650 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1651 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
1652 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1653 if (!(ss_e2
& DESC_S_MASK
) ||
1654 (ss_e2
& DESC_CS_MASK
) ||
1655 !(ss_e2
& DESC_W_MASK
))
1656 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1657 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1659 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
1660 if (!(ss_e2
& DESC_P_MASK
))
1661 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
1663 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
1664 get_seg_base(e1
, e2
),
1665 get_seg_limit(e1
, e2
),
1667 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
1668 get_seg_base(ss_e1
, ss_e2
),
1669 get_seg_limit(ss_e1
, ss_e2
),
1671 cpu_x86_set_cpl(env
, rpl
);
1673 sp_mask
= get_sp_mask(ss_e2
);
1675 /* validate data segments */
1676 validate_seg(R_ES
, cpl
);
1677 validate_seg(R_DS
, cpl
);
1678 validate_seg(R_FS
, cpl
);
1679 validate_seg(R_GS
, cpl
);
1683 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1686 /* NOTE: 'cpl' is the _old_ CPL */
1687 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
1689 eflags_mask
|= IOPL_MASK
;
1690 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
1692 eflags_mask
|= IF_MASK
;
1694 eflags_mask
&= 0xffff;
1695 load_eflags(new_eflags
, eflags_mask
);
1700 POPL(ssp
, sp
, sp_mask
, new_esp
);
1701 POPL(ssp
, sp
, sp_mask
, new_ss
);
1702 POPL(ssp
, sp
, sp_mask
, new_es
);
1703 POPL(ssp
, sp
, sp_mask
, new_ds
);
1704 POPL(ssp
, sp
, sp_mask
, new_fs
);
1705 POPL(ssp
, sp
, sp_mask
, new_gs
);
1707 /* modify processor state */
1708 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
1709 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
1710 load_seg_vm(R_CS
, new_cs
& 0xffff);
1711 cpu_x86_set_cpl(env
, 3);
1712 load_seg_vm(R_SS
, new_ss
& 0xffff);
1713 load_seg_vm(R_ES
, new_es
& 0xffff);
1714 load_seg_vm(R_DS
, new_ds
& 0xffff);
1715 load_seg_vm(R_FS
, new_fs
& 0xffff);
1716 load_seg_vm(R_GS
, new_gs
& 0xffff);
1718 env
->eip
= new_eip
& 0xffff;
1722 void helper_iret_protected(int shift
, int next_eip
)
1724 int tss_selector
, type
;
1727 /* specific case for TSS */
1728 if (env
->eflags
& NT_MASK
) {
1729 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
1730 if (tss_selector
& 4)
1731 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1732 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
1733 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1734 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
1735 /* NOTE: we check both segment and busy TSS */
1737 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
1738 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
1740 helper_ret_protected(shift
, 1, 0);
1744 void helper_lret_protected(int shift
, int addend
)
1746 helper_ret_protected(shift
, 0, addend
);
1749 void helper_sysenter(void)
1751 if (env
->sysenter_cs
== 0) {
1752 raise_exception_err(EXCP0D_GPF
, 0);
1754 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
1755 cpu_x86_set_cpl(env
, 0);
1756 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
1758 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1760 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1761 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
1763 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1765 DESC_W_MASK
| DESC_A_MASK
);
1766 ESP
= env
->sysenter_esp
;
1767 EIP
= env
->sysenter_eip
;
1770 void helper_sysexit(void)
1774 cpl
= env
->hflags
& HF_CPL_MASK
;
1775 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
1776 raise_exception_err(EXCP0D_GPF
, 0);
1778 cpu_x86_set_cpl(env
, 3);
1779 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
1781 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1782 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1783 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1784 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
1786 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1787 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1788 DESC_W_MASK
| DESC_A_MASK
);
1793 void helper_movl_crN_T0(int reg
)
1797 cpu_x86_update_cr0(env
, T0
);
1800 cpu_x86_update_cr3(env
, T0
);
1803 cpu_x86_update_cr4(env
, T0
);
1812 void helper_movl_drN_T0(int reg
)
1817 void helper_invlpg(unsigned int addr
)
1819 cpu_x86_flush_tlb(env
, addr
);
1822 void helper_rdtsc(void)
1826 val
= cpu_get_tsc(env
);
1831 void helper_wrmsr(void)
1834 case MSR_IA32_SYSENTER_CS
:
1835 env
->sysenter_cs
= EAX
& 0xffff;
1837 case MSR_IA32_SYSENTER_ESP
:
1838 env
->sysenter_esp
= EAX
;
1840 case MSR_IA32_SYSENTER_EIP
:
1841 env
->sysenter_eip
= EAX
;
1844 /* XXX: exception ? */
1849 void helper_rdmsr(void)
1852 case MSR_IA32_SYSENTER_CS
:
1853 EAX
= env
->sysenter_cs
;
1856 case MSR_IA32_SYSENTER_ESP
:
1857 EAX
= env
->sysenter_esp
;
1860 case MSR_IA32_SYSENTER_EIP
:
1861 EAX
= env
->sysenter_eip
;
1865 /* XXX: exception ? */
1870 void helper_lsl(void)
1872 unsigned int selector
, limit
;
1874 int rpl
, dpl
, cpl
, type
;
1876 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1877 selector
= T0
& 0xffff;
1878 if (load_segment(&e1
, &e2
, selector
) != 0)
1881 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1882 cpl
= env
->hflags
& HF_CPL_MASK
;
1883 if (e2
& DESC_S_MASK
) {
1884 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1887 if (dpl
< cpl
|| dpl
< rpl
)
1891 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1902 if (dpl
< cpl
|| dpl
< rpl
)
1905 limit
= get_seg_limit(e1
, e2
);
1910 void helper_lar(void)
1912 unsigned int selector
;
1914 int rpl
, dpl
, cpl
, type
;
1916 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1917 selector
= T0
& 0xffff;
1918 if ((selector
& 0xfffc) == 0)
1920 if (load_segment(&e1
, &e2
, selector
) != 0)
1923 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1924 cpl
= env
->hflags
& HF_CPL_MASK
;
1925 if (e2
& DESC_S_MASK
) {
1926 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
1929 if (dpl
< cpl
|| dpl
< rpl
)
1933 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1947 if (dpl
< cpl
|| dpl
< rpl
)
1950 T1
= e2
& 0x00f0ff00;
1954 void helper_verr(void)
1956 unsigned int selector
;
1960 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1961 selector
= T0
& 0xffff;
1962 if ((selector
& 0xfffc) == 0)
1964 if (load_segment(&e1
, &e2
, selector
) != 0)
1966 if (!(e2
& DESC_S_MASK
))
1969 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1970 cpl
= env
->hflags
& HF_CPL_MASK
;
1971 if (e2
& DESC_CS_MASK
) {
1972 if (!(e2
& DESC_R_MASK
))
1974 if (!(e2
& DESC_C_MASK
)) {
1975 if (dpl
< cpl
|| dpl
< rpl
)
1979 if (dpl
< cpl
|| dpl
< rpl
)
1985 void helper_verw(void)
1987 unsigned int selector
;
1991 CC_SRC
= cc_table
[CC_OP
].compute_all() & ~CC_Z
;
1992 selector
= T0
& 0xffff;
1993 if ((selector
& 0xfffc) == 0)
1995 if (load_segment(&e1
, &e2
, selector
) != 0)
1997 if (!(e2
& DESC_S_MASK
))
2000 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2001 cpl
= env
->hflags
& HF_CPL_MASK
;
2002 if (e2
& DESC_CS_MASK
) {
2005 if (dpl
< cpl
|| dpl
< rpl
)
2007 if (!(e2
& DESC_W_MASK
))
2015 void helper_fldt_ST0_A0(void)
2018 new_fpstt
= (env
->fpstt
- 1) & 7;
2019 env
->fpregs
[new_fpstt
] = helper_fldt((uint8_t *)A0
);
2020 env
->fpstt
= new_fpstt
;
2021 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
2024 void helper_fstt_ST0_A0(void)
2026 helper_fstt(ST0
, (uint8_t *)A0
);
2029 void fpu_set_exception(int mask
)
2032 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
2033 env
->fpus
|= FPUS_SE
| FPUS_B
;
2036 CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
2039 fpu_set_exception(FPUS_ZE
);
2043 void fpu_raise_exception(void)
2045 if (env
->cr
[0] & CR0_NE_MASK
) {
2046 raise_exception(EXCP10_COPR
);
2048 #if !defined(CONFIG_USER_ONLY)
2057 void helper_fbld_ST0_A0(void)
2065 for(i
= 8; i
>= 0; i
--) {
2066 v
= ldub((uint8_t *)A0
+ i
);
2067 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
2070 if (ldub((uint8_t *)A0
+ 9) & 0x80)
2076 void helper_fbst_ST0_A0(void)
2080 uint8_t *mem_ref
, *mem_end
;
2085 mem_ref
= (uint8_t *)A0
;
2086 mem_end
= mem_ref
+ 9;
2093 while (mem_ref
< mem_end
) {
2098 v
= ((v
/ 10) << 4) | (v
% 10);
2101 while (mem_ref
< mem_end
) {
2106 void helper_f2xm1(void)
2108 ST0
= pow(2.0,ST0
) - 1.0;
2111 void helper_fyl2x(void)
2113 CPU86_LDouble fptemp
;
2117 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
2121 env
->fpus
&= (~0x4700);
2126 void helper_fptan(void)
2128 CPU86_LDouble fptemp
;
2131 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2137 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2138 /* the above code is for |arg| < 2**52 only */
2142 void helper_fpatan(void)
2144 CPU86_LDouble fptemp
, fpsrcop
;
2148 ST1
= atan2(fpsrcop
,fptemp
);
2152 void helper_fxtract(void)
2154 CPU86_LDoubleU temp
;
2155 unsigned int expdif
;
2158 expdif
= EXPD(temp
) - EXPBIAS
;
2159 /*DP exponent bias*/
2166 void helper_fprem1(void)
2168 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2169 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2175 fpsrcop1
.d
= fpsrcop
;
2177 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2179 dblq
= fpsrcop
/ fptemp
;
2180 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2181 ST0
= fpsrcop
- fptemp
*dblq
;
2182 q
= (int)dblq
; /* cutting off top bits is assumed here */
2183 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2184 /* (C0,C1,C3) <-- (q2,q1,q0) */
2185 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2186 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2187 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2189 env
->fpus
|= 0x400; /* C2 <-- 1 */
2190 fptemp
= pow(2.0, expdif
-50);
2191 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2192 /* fpsrcop = integer obtained by rounding to the nearest */
2193 fpsrcop
= (fpsrcop
-floor(fpsrcop
) < ceil(fpsrcop
)-fpsrcop
)?
2194 floor(fpsrcop
): ceil(fpsrcop
);
2195 ST0
-= (ST1
* fpsrcop
* fptemp
);
2199 void helper_fprem(void)
2201 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
2202 CPU86_LDoubleU fpsrcop1
, fptemp1
;
2208 fpsrcop1
.d
= fpsrcop
;
2210 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
2211 if ( expdif
< 53 ) {
2212 dblq
= fpsrcop
/ fptemp
;
2213 dblq
= (dblq
< 0.0)? ceil(dblq
): floor(dblq
);
2214 ST0
= fpsrcop
- fptemp
*dblq
;
2215 q
= (int)dblq
; /* cutting off top bits is assumed here */
2216 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2217 /* (C0,C1,C3) <-- (q2,q1,q0) */
2218 env
->fpus
|= (q
&0x4) << 6; /* (C0) <-- q2 */
2219 env
->fpus
|= (q
&0x2) << 8; /* (C1) <-- q1 */
2220 env
->fpus
|= (q
&0x1) << 14; /* (C3) <-- q0 */
2222 env
->fpus
|= 0x400; /* C2 <-- 1 */
2223 fptemp
= pow(2.0, expdif
-50);
2224 fpsrcop
= (ST0
/ ST1
) / fptemp
;
2225 /* fpsrcop = integer obtained by chopping */
2226 fpsrcop
= (fpsrcop
< 0.0)?
2227 -(floor(fabs(fpsrcop
))): floor(fpsrcop
);
2228 ST0
-= (ST1
* fpsrcop
* fptemp
);
2232 void helper_fyl2xp1(void)
2234 CPU86_LDouble fptemp
;
2237 if ((fptemp
+1.0)>0.0) {
2238 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
2242 env
->fpus
&= (~0x4700);
2247 void helper_fsqrt(void)
2249 CPU86_LDouble fptemp
;
2253 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2259 void helper_fsincos(void)
2261 CPU86_LDouble fptemp
;
2264 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2270 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2271 /* the above code is for |arg| < 2**63 only */
2275 void helper_frndint(void)
2281 switch(env
->fpuc
& RC_MASK
) {
2284 asm("rndd %0, %1" : "=f" (a
) : "f"(a
));
2287 asm("rnddm %0, %1" : "=f" (a
) : "f"(a
));
2290 asm("rnddp %0, %1" : "=f" (a
) : "f"(a
));
2293 asm("rnddz %0, %1" : "=f" (a
) : "f"(a
));
2302 void helper_fscale(void)
2304 CPU86_LDouble fpsrcop
, fptemp
;
2307 fptemp
= pow(fpsrcop
,ST1
);
2311 void helper_fsin(void)
2313 CPU86_LDouble fptemp
;
2316 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2320 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2321 /* the above code is for |arg| < 2**53 only */
2325 void helper_fcos(void)
2327 CPU86_LDouble fptemp
;
2330 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
2334 env
->fpus
&= (~0x400); /* C2 <-- 0 */
2335 /* the above code is for |arg5 < 2**63 only */
2339 void helper_fxam_ST0(void)
2341 CPU86_LDoubleU temp
;
2346 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2348 env
->fpus
|= 0x200; /* C1 <-- 1 */
2350 expdif
= EXPD(temp
);
2351 if (expdif
== MAXEXPD
) {
2352 if (MANTD(temp
) == 0)
2353 env
->fpus
|= 0x500 /*Infinity*/;
2355 env
->fpus
|= 0x100 /*NaN*/;
2356 } else if (expdif
== 0) {
2357 if (MANTD(temp
) == 0)
2358 env
->fpus
|= 0x4000 /*Zero*/;
2360 env
->fpus
|= 0x4400 /*Denormal*/;
2366 void helper_fstenv(uint8_t *ptr
, int data32
)
2368 int fpus
, fptag
, exp
, i
;
2372 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
2374 for (i
=7; i
>=0; i
--) {
2376 if (env
->fptags
[i
]) {
2379 tmp
.d
= env
->fpregs
[i
];
2382 if (exp
== 0 && mant
== 0) {
2385 } else if (exp
== 0 || exp
== MAXEXPD
2386 #ifdef USE_X86LDOUBLE
2387 || (mant
& (1LL << 63)) == 0
2390 /* NaNs, infinity, denormal */
2397 stl(ptr
, env
->fpuc
);
2399 stl(ptr
+ 8, fptag
);
2400 stl(ptr
+ 12, 0); /* fpip */
2401 stl(ptr
+ 16, 0); /* fpcs */
2402 stl(ptr
+ 20, 0); /* fpoo */
2403 stl(ptr
+ 24, 0); /* fpos */
2406 stw(ptr
, env
->fpuc
);
2408 stw(ptr
+ 4, fptag
);
2416 void helper_fldenv(uint8_t *ptr
, int data32
)
2421 env
->fpuc
= lduw(ptr
);
2422 fpus
= lduw(ptr
+ 4);
2423 fptag
= lduw(ptr
+ 8);
2426 env
->fpuc
= lduw(ptr
);
2427 fpus
= lduw(ptr
+ 2);
2428 fptag
= lduw(ptr
+ 4);
2430 env
->fpstt
= (fpus
>> 11) & 7;
2431 env
->fpus
= fpus
& ~0x3800;
2432 for(i
= 0;i
< 8; i
++) {
2433 env
->fptags
[i
] = ((fptag
& 3) == 3);
2438 void helper_fsave(uint8_t *ptr
, int data32
)
2443 helper_fstenv(ptr
, data32
);
2445 ptr
+= (14 << data32
);
2446 for(i
= 0;i
< 8; i
++) {
2448 helper_fstt(tmp
, ptr
);
2466 void helper_frstor(uint8_t *ptr
, int data32
)
2471 helper_fldenv(ptr
, data32
);
2472 ptr
+= (14 << data32
);
2474 for(i
= 0;i
< 8; i
++) {
2475 tmp
= helper_fldt(ptr
);
2481 /* XXX: merge with helper_fstt ? */
2483 #ifndef USE_X86LDOUBLE
2485 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
2487 CPU86_LDoubleU temp
;
2492 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
2493 /* exponent + sign */
2494 e
= EXPD(temp
) - EXPBIAS
+ 16383;
2495 e
|= SIGND(temp
) >> 16;
2499 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
2501 CPU86_LDoubleU temp
;
2505 /* XXX: handle overflow ? */
2506 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
2507 e
|= (upper
>> 4) & 0x800; /* sign */
2508 ll
= (mant
>> 11) & ((1LL << 52) - 1);
2510 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
2513 temp
.ll
= ll
| ((uint64_t)e
<< 52);
2520 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
2522 CPU86_LDoubleU temp
;
2525 *pmant
= temp
.l
.lower
;
2526 *pexp
= temp
.l
.upper
;
2529 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
2531 CPU86_LDoubleU temp
;
2533 temp
.l
.upper
= upper
;
2534 temp
.l
.lower
= mant
;
2539 #if !defined(CONFIG_USER_ONLY)
2541 #define MMUSUFFIX _mmu
2542 #define GETPC() (__builtin_return_address(0))
2545 #include "softmmu_template.h"
2548 #include "softmmu_template.h"
2551 #include "softmmu_template.h"
2554 #include "softmmu_template.h"
2558 /* try to fill the TLB and return an exception if error. If retaddr is
2559 NULL, it means that the function was called in C code (i.e. not
2560 from generated code or from helper.c) */
2561 /* XXX: fix it to restore all registers */
2562 void tlb_fill(unsigned long addr
, int is_write
, int is_user
, void *retaddr
)
2564 TranslationBlock
*tb
;
2567 CPUX86State
*saved_env
;
2569 /* XXX: hack to restore env in all cases, even if not called from
2572 env
= cpu_single_env
;
2574 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, is_user
, 1);
2577 /* now we have a real cpu fault */
2578 pc
= (unsigned long)retaddr
;
2579 tb
= tb_find_pc(pc
);
2581 /* the PC is inside the translated code. It means that we have
2582 a virtual CPU fault */
2583 cpu_restore_state(tb
, env
, pc
, NULL
);
2586 raise_exception_err(EXCP0E_PAGE
, env
->error_code
);