4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "host-utils.h"
26 #define raise_exception_err(a, b)\
29 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30 (raise_exception_err)(a, b);\
34 const uint8_t parity_table
[256] = {
35 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
36 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
37 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
38 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
39 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
40 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
41 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
42 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
43 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
44 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
45 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
46 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
47 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
48 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
49 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
50 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
51 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
52 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
53 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
54 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
55 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
56 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
57 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
58 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
59 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
60 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
61 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
62 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
63 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
64 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
65 CC_P
, 0, 0, CC_P
, 0, CC_P
, CC_P
, 0,
66 0, CC_P
, CC_P
, 0, CC_P
, 0, 0, CC_P
,
70 const uint8_t rclw_table
[32] = {
71 0, 1, 2, 3, 4, 5, 6, 7,
72 8, 9,10,11,12,13,14,15,
73 16, 0, 1, 2, 3, 4, 5, 6,
74 7, 8, 9,10,11,12,13,14,
78 const uint8_t rclb_table
[32] = {
79 0, 1, 2, 3, 4, 5, 6, 7,
80 8, 0, 1, 2, 3, 4, 5, 6,
81 7, 8, 0, 1, 2, 3, 4, 5,
82 6, 7, 8, 0, 1, 2, 3, 4,
85 const CPU86_LDouble f15rk
[7] =
87 0.00000000000000000000L,
88 1.00000000000000000000L,
89 3.14159265358979323851L, /*pi*/
90 0.30102999566398119523L, /*lg2*/
91 0.69314718055994530943L, /*ln2*/
92 1.44269504088896340739L, /*l2e*/
93 3.32192809488736234781L, /*l2t*/
98 spinlock_t global_cpu_lock
= SPIN_LOCK_UNLOCKED
;
102 spin_lock(&global_cpu_lock
);
105 void cpu_unlock(void)
107 spin_unlock(&global_cpu_lock
);
110 /* return non zero if error */
111 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
122 index
= selector
& ~7;
123 if ((index
+ 7) > dt
->limit
)
125 ptr
= dt
->base
+ index
;
126 *e1_ptr
= ldl_kernel(ptr
);
127 *e2_ptr
= ldl_kernel(ptr
+ 4);
131 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
134 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
135 if (e2
& DESC_G_MASK
)
136 limit
= (limit
<< 12) | 0xfff;
140 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
142 return ((e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000));
145 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
, uint32_t e2
)
147 sc
->base
= get_seg_base(e1
, e2
);
148 sc
->limit
= get_seg_limit(e1
, e2
);
152 /* init the segment cache in vm86 mode. */
153 static inline void load_seg_vm(int seg
, int selector
)
156 cpu_x86_load_seg_cache(env
, seg
, selector
,
157 (selector
<< 4), 0xffff, 0);
160 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
161 uint32_t *esp_ptr
, int dpl
)
163 int type
, index
, shift
;
168 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
169 for(i
=0;i
<env
->tr
.limit
;i
++) {
170 printf("%02x ", env
->tr
.base
[i
]);
171 if ((i
& 7) == 7) printf("\n");
177 if (!(env
->tr
.flags
& DESC_P_MASK
))
178 cpu_abort(env
, "invalid tss");
179 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
181 cpu_abort(env
, "invalid tss type");
183 index
= (dpl
* 4 + 2) << shift
;
184 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
)
185 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
187 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
188 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
190 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
191 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
195 /* XXX: merge with load_seg() */
196 static void tss_load_seg(int seg_reg
, int selector
)
201 if ((selector
& 0xfffc) != 0) {
202 if (load_segment(&e1
, &e2
, selector
) != 0)
203 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
204 if (!(e2
& DESC_S_MASK
))
205 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
207 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
208 cpl
= env
->hflags
& HF_CPL_MASK
;
209 if (seg_reg
== R_CS
) {
210 if (!(e2
& DESC_CS_MASK
))
211 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
212 /* XXX: is it correct ? */
214 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
215 if ((e2
& DESC_C_MASK
) && dpl
> rpl
)
216 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
217 } else if (seg_reg
== R_SS
) {
218 /* SS must be writable data */
219 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
220 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
221 if (dpl
!= cpl
|| dpl
!= rpl
)
222 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
224 /* not readable code */
225 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
))
226 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
227 /* if data or non conforming code, checks the rights */
228 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
229 if (dpl
< cpl
|| dpl
< rpl
)
230 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
233 if (!(e2
& DESC_P_MASK
))
234 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
235 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
236 get_seg_base(e1
, e2
),
237 get_seg_limit(e1
, e2
),
240 if (seg_reg
== R_SS
|| seg_reg
== R_CS
)
241 raise_exception_err(EXCP0A_TSS
, selector
& 0xfffc);
245 #define SWITCH_TSS_JMP 0
246 #define SWITCH_TSS_IRET 1
247 #define SWITCH_TSS_CALL 2
249 /* XXX: restore CPU state in registers (PowerPC case) */
250 static void switch_tss(int tss_selector
,
251 uint32_t e1
, uint32_t e2
, int source
,
254 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
255 target_ulong tss_base
;
256 uint32_t new_regs
[8], new_segs
[6];
257 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
258 uint32_t old_eflags
, eflags_mask
;
263 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
265 if (loglevel
& CPU_LOG_PCALL
)
266 fprintf(logfile
, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
, source
);
269 /* if task gate, we read the TSS segment and we load it */
271 if (!(e2
& DESC_P_MASK
))
272 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
273 tss_selector
= e1
>> 16;
274 if (tss_selector
& 4)
275 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
276 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
277 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
278 if (e2
& DESC_S_MASK
)
279 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
280 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
282 raise_exception_err(EXCP0D_GPF
, tss_selector
& 0xfffc);
285 if (!(e2
& DESC_P_MASK
))
286 raise_exception_err(EXCP0B_NOSEG
, tss_selector
& 0xfffc);
292 tss_limit
= get_seg_limit(e1
, e2
);
293 tss_base
= get_seg_base(e1
, e2
);
294 if ((tss_selector
& 4) != 0 ||
295 tss_limit
< tss_limit_max
)
296 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
297 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
299 old_tss_limit_max
= 103;
301 old_tss_limit_max
= 43;
303 /* read all the registers from the new TSS */
306 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
307 new_eip
= ldl_kernel(tss_base
+ 0x20);
308 new_eflags
= ldl_kernel(tss_base
+ 0x24);
309 for(i
= 0; i
< 8; i
++)
310 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
311 for(i
= 0; i
< 6; i
++)
312 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
313 new_ldt
= lduw_kernel(tss_base
+ 0x60);
314 new_trap
= ldl_kernel(tss_base
+ 0x64);
318 new_eip
= lduw_kernel(tss_base
+ 0x0e);
319 new_eflags
= lduw_kernel(tss_base
+ 0x10);
320 for(i
= 0; i
< 8; i
++)
321 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
322 for(i
= 0; i
< 4; i
++)
323 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
324 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
330 /* NOTE: we must avoid memory exceptions during the task switch,
331 so we make dummy accesses before */
332 /* XXX: it can still fail in some cases, so a bigger hack is
333 necessary to valid the TLB after having done the accesses */
335 v1
= ldub_kernel(env
->tr
.base
);
336 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
337 stb_kernel(env
->tr
.base
, v1
);
338 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
340 /* clear busy bit (it is restartable) */
341 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
344 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
345 e2
= ldl_kernel(ptr
+ 4);
346 e2
&= ~DESC_TSS_BUSY_MASK
;
347 stl_kernel(ptr
+ 4, e2
);
349 old_eflags
= compute_eflags();
350 if (source
== SWITCH_TSS_IRET
)
351 old_eflags
&= ~NT_MASK
;
353 /* save the current state in the old TSS */
356 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
357 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
358 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
359 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
360 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
361 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
362 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
363 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
364 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
365 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
366 for(i
= 0; i
< 6; i
++)
367 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
370 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
371 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
372 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
373 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
374 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
375 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
376 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
377 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
378 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
379 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
380 for(i
= 0; i
< 4; i
++)
381 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
384 /* now if an exception occurs, it will occurs in the next task
387 if (source
== SWITCH_TSS_CALL
) {
388 stw_kernel(tss_base
, env
->tr
.selector
);
389 new_eflags
|= NT_MASK
;
393 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
396 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
397 e2
= ldl_kernel(ptr
+ 4);
398 e2
|= DESC_TSS_BUSY_MASK
;
399 stl_kernel(ptr
+ 4, e2
);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env
->cr
[0] |= CR0_TS_MASK
;
405 env
->hflags
|= HF_TS_MASK
;
406 env
->tr
.selector
= tss_selector
;
407 env
->tr
.base
= tss_base
;
408 env
->tr
.limit
= tss_limit
;
409 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
411 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
412 cpu_x86_update_cr3(env
, new_cr3
);
415 /* load all registers without an exception, then reload them with
416 possible exception */
418 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
419 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
421 eflags_mask
&= 0xffff;
422 load_eflags(new_eflags
, eflags_mask
);
423 /* XXX: what to do in 16 bit case ? */
432 if (new_eflags
& VM_MASK
) {
433 for(i
= 0; i
< 6; i
++)
434 load_seg_vm(i
, new_segs
[i
]);
435 /* in vm86, CPL is always 3 */
436 cpu_x86_set_cpl(env
, 3);
438 /* CPL is set the RPL of CS */
439 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
440 /* first just selectors as the rest may trigger exceptions */
441 for(i
= 0; i
< 6; i
++)
442 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
445 env
->ldt
.selector
= new_ldt
& ~4;
452 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
454 if ((new_ldt
& 0xfffc) != 0) {
456 index
= new_ldt
& ~7;
457 if ((index
+ 7) > dt
->limit
)
458 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
459 ptr
= dt
->base
+ index
;
460 e1
= ldl_kernel(ptr
);
461 e2
= ldl_kernel(ptr
+ 4);
462 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
463 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
464 if (!(e2
& DESC_P_MASK
))
465 raise_exception_err(EXCP0A_TSS
, new_ldt
& 0xfffc);
466 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
469 /* load the segments */
470 if (!(new_eflags
& VM_MASK
)) {
471 tss_load_seg(R_CS
, new_segs
[R_CS
]);
472 tss_load_seg(R_SS
, new_segs
[R_SS
]);
473 tss_load_seg(R_ES
, new_segs
[R_ES
]);
474 tss_load_seg(R_DS
, new_segs
[R_DS
]);
475 tss_load_seg(R_FS
, new_segs
[R_FS
]);
476 tss_load_seg(R_GS
, new_segs
[R_GS
]);
479 /* check that EIP is in the CS segment limits */
480 if (new_eip
> env
->segs
[R_CS
].limit
) {
481 /* XXX: different exception if CALL ? */
482 raise_exception_err(EXCP0D_GPF
, 0);
486 /* check if Port I/O is allowed in TSS */
487 static inline void check_io(int addr
, int size
)
489 int io_offset
, val
, mask
;
491 /* TSS must be a valid 32 bit one */
492 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
493 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
496 io_offset
= lduw_kernel(env
->tr
.base
+ 0x66);
497 io_offset
+= (addr
>> 3);
498 /* Note: the check needs two bytes */
499 if ((io_offset
+ 1) > env
->tr
.limit
)
501 val
= lduw_kernel(env
->tr
.base
+ io_offset
);
503 mask
= (1 << size
) - 1;
504 /* all bits must be zero to allow the I/O */
505 if ((val
& mask
) != 0) {
507 raise_exception_err(EXCP0D_GPF
, 0);
511 void check_iob_T0(void)
516 void check_iow_T0(void)
521 void check_iol_T0(void)
526 void check_iob_DX(void)
528 check_io(EDX
& 0xffff, 1);
531 void check_iow_DX(void)
533 check_io(EDX
& 0xffff, 2);
536 void check_iol_DX(void)
538 check_io(EDX
& 0xffff, 4);
541 static inline unsigned int get_sp_mask(unsigned int e2
)
543 if (e2
& DESC_B_MASK
)
550 #define SET_ESP(val, sp_mask)\
552 if ((sp_mask) == 0xffff)\
553 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554 else if ((sp_mask) == 0xffffffffLL)\
555 ESP = (uint32_t)(val);\
560 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
563 /* XXX: add a is_user flag to have proper security support */
564 #define PUSHW(ssp, sp, sp_mask, val)\
567 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
570 #define PUSHL(ssp, sp, sp_mask, val)\
573 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
576 #define POPW(ssp, sp, sp_mask, val)\
578 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
582 #define POPL(ssp, sp, sp_mask, val)\
584 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
588 /* protected mode interrupt */
589 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
590 unsigned int next_eip
, int is_hw
)
593 target_ulong ptr
, ssp
;
594 int type
, dpl
, selector
, ss_dpl
, cpl
;
595 int has_error_code
, new_stack
, shift
;
596 uint32_t e1
, e2
, offset
, ss
, esp
, ss_e1
, ss_e2
;
597 uint32_t old_eip
, sp_mask
;
598 int svm_should_check
= 1;
600 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
602 svm_should_check
= 0;
606 && (INTERCEPTEDl(_exceptions
, 1 << intno
)
608 raise_interrupt(intno
, is_int
, error_code
, 0);
611 if (!is_int
&& !is_hw
) {
630 if (intno
* 8 + 7 > dt
->limit
)
631 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
632 ptr
= dt
->base
+ intno
* 8;
633 e1
= ldl_kernel(ptr
);
634 e2
= ldl_kernel(ptr
+ 4);
635 /* check gate type */
636 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
638 case 5: /* task gate */
639 /* must do that check here to return the correct error code */
640 if (!(e2
& DESC_P_MASK
))
641 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
642 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
643 if (has_error_code
) {
646 /* push the error code */
647 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
649 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
)
653 esp
= (ESP
- (2 << shift
)) & mask
;
654 ssp
= env
->segs
[R_SS
].base
+ esp
;
656 stl_kernel(ssp
, error_code
);
658 stw_kernel(ssp
, error_code
);
662 case 6: /* 286 interrupt gate */
663 case 7: /* 286 trap gate */
664 case 14: /* 386 interrupt gate */
665 case 15: /* 386 trap gate */
668 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
671 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
672 cpl
= env
->hflags
& HF_CPL_MASK
;
673 /* check privledge if software int */
674 if (is_int
&& dpl
< cpl
)
675 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
676 /* check valid bit */
677 if (!(e2
& DESC_P_MASK
))
678 raise_exception_err(EXCP0B_NOSEG
, intno
* 8 + 2);
680 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
681 if ((selector
& 0xfffc) == 0)
682 raise_exception_err(EXCP0D_GPF
, 0);
684 if (load_segment(&e1
, &e2
, selector
) != 0)
685 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
686 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
687 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
688 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
690 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
691 if (!(e2
& DESC_P_MASK
))
692 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
693 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
696 if ((ss
& 0xfffc) == 0)
697 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
699 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
700 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
701 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
702 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
704 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
705 if (!(ss_e2
& DESC_S_MASK
) ||
706 (ss_e2
& DESC_CS_MASK
) ||
707 !(ss_e2
& DESC_W_MASK
))
708 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
709 if (!(ss_e2
& DESC_P_MASK
))
710 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
712 sp_mask
= get_sp_mask(ss_e2
);
713 ssp
= get_seg_base(ss_e1
, ss_e2
);
714 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
715 /* to same privilege */
716 if (env
->eflags
& VM_MASK
)
717 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
719 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
720 ssp
= env
->segs
[R_SS
].base
;
724 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
725 new_stack
= 0; /* avoid warning */
726 sp_mask
= 0; /* avoid warning */
727 ssp
= 0; /* avoid warning */
728 esp
= 0; /* avoid warning */
734 /* XXX: check that enough room is available */
735 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
736 if (env
->eflags
& VM_MASK
)
742 if (env
->eflags
& VM_MASK
) {
743 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
744 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
745 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
746 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
748 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
749 PUSHL(ssp
, esp
, sp_mask
, ESP
);
751 PUSHL(ssp
, esp
, sp_mask
, compute_eflags());
752 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
753 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
754 if (has_error_code
) {
755 PUSHL(ssp
, esp
, sp_mask
, error_code
);
759 if (env
->eflags
& VM_MASK
) {
760 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
761 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
762 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
763 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
765 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
766 PUSHW(ssp
, esp
, sp_mask
, ESP
);
768 PUSHW(ssp
, esp
, sp_mask
, compute_eflags());
769 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
770 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
771 if (has_error_code
) {
772 PUSHW(ssp
, esp
, sp_mask
, error_code
);
777 if (env
->eflags
& VM_MASK
) {
778 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
781 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
783 ss
= (ss
& ~3) | dpl
;
784 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
785 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
787 SET_ESP(esp
, sp_mask
);
789 selector
= (selector
& ~3) | dpl
;
790 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
791 get_seg_base(e1
, e2
),
792 get_seg_limit(e1
, e2
),
794 cpu_x86_set_cpl(env
, dpl
);
797 /* interrupt gate clear IF mask */
798 if ((type
& 1) == 0) {
799 env
->eflags
&= ~IF_MASK
;
801 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
806 #define PUSHQ(sp, val)\
809 stq_kernel(sp, (val));\
812 #define POPQ(sp, val)\
814 val = ldq_kernel(sp);\
818 static inline target_ulong
get_rsp_from_tss(int level
)
823 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
824 env
->tr
.base
, env
->tr
.limit
);
827 if (!(env
->tr
.flags
& DESC_P_MASK
))
828 cpu_abort(env
, "invalid tss");
829 index
= 8 * level
+ 4;
830 if ((index
+ 7) > env
->tr
.limit
)
831 raise_exception_err(EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
832 return ldq_kernel(env
->tr
.base
+ index
);
835 /* 64 bit interrupt */
836 static void do_interrupt64(int intno
, int is_int
, int error_code
,
837 target_ulong next_eip
, int is_hw
)
841 int type
, dpl
, selector
, cpl
, ist
;
842 int has_error_code
, new_stack
;
843 uint32_t e1
, e2
, e3
, ss
;
844 target_ulong old_eip
, esp
, offset
;
845 int svm_should_check
= 1;
847 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
849 svm_should_check
= 0;
852 && INTERCEPTEDl(_exceptions
, 1 << intno
)
854 raise_interrupt(intno
, is_int
, error_code
, 0);
857 if (!is_int
&& !is_hw
) {
876 if (intno
* 16 + 15 > dt
->limit
)
877 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
878 ptr
= dt
->base
+ intno
* 16;
879 e1
= ldl_kernel(ptr
);
880 e2
= ldl_kernel(ptr
+ 4);
881 e3
= ldl_kernel(ptr
+ 8);
882 /* check gate type */
883 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
892 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
893 cpl
= env
->hflags
& HF_CPL_MASK
;
894 /* check privledge if software int */
895 if (is_int
&& dpl
< cpl
)
896 raise_exception_err(EXCP0D_GPF
, intno
* 16 + 2);
897 /* check valid bit */
898 if (!(e2
& DESC_P_MASK
))
899 raise_exception_err(EXCP0B_NOSEG
, intno
* 16 + 2);
901 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
903 if ((selector
& 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF
, 0);
906 if (load_segment(&e1
, &e2
, selector
) != 0)
907 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
908 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
909 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
910 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
912 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
913 if (!(e2
& DESC_P_MASK
))
914 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
915 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
))
916 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
917 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
918 /* to inner privilege */
920 esp
= get_rsp_from_tss(ist
+ 3);
922 esp
= get_rsp_from_tss(dpl
);
923 esp
&= ~0xfLL
; /* align stack */
926 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
927 /* to same privilege */
928 if (env
->eflags
& VM_MASK
)
929 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
932 esp
= get_rsp_from_tss(ist
+ 3);
935 esp
&= ~0xfLL
; /* align stack */
938 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
939 new_stack
= 0; /* avoid warning */
940 esp
= 0; /* avoid warning */
943 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
945 PUSHQ(esp
, compute_eflags());
946 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
948 if (has_error_code
) {
949 PUSHQ(esp
, error_code
);
954 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
958 selector
= (selector
& ~3) | dpl
;
959 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
960 get_seg_base(e1
, e2
),
961 get_seg_limit(e1
, e2
),
963 cpu_x86_set_cpl(env
, dpl
);
966 /* interrupt gate clear IF mask */
967 if ((type
& 1) == 0) {
968 env
->eflags
&= ~IF_MASK
;
970 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
974 #if defined(CONFIG_USER_ONLY)
975 void helper_syscall(int next_eip_addend
)
977 env
->exception_index
= EXCP_SYSCALL
;
978 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
982 void helper_syscall(int next_eip_addend
)
986 if (!(env
->efer
& MSR_EFER_SCE
)) {
987 raise_exception_err(EXCP06_ILLOP
, 0);
989 selector
= (env
->star
>> 32) & 0xffff;
991 if (env
->hflags
& HF_LMA_MASK
) {
994 ECX
= env
->eip
+ next_eip_addend
;
995 env
->regs
[11] = compute_eflags();
997 code64
= env
->hflags
& HF_CS64_MASK
;
999 cpu_x86_set_cpl(env
, 0);
1000 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1002 DESC_G_MASK
| DESC_P_MASK
|
1004 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
| DESC_L_MASK
);
1005 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1007 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1009 DESC_W_MASK
| DESC_A_MASK
);
1010 env
->eflags
&= ~env
->fmask
;
1012 env
->eip
= env
->lstar
;
1014 env
->eip
= env
->cstar
;
1018 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
1020 cpu_x86_set_cpl(env
, 0);
1021 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1023 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1025 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1026 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1028 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1030 DESC_W_MASK
| DESC_A_MASK
);
1031 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1032 env
->eip
= (uint32_t)env
->star
;
1037 void helper_sysret(int dflag
)
1041 if (!(env
->efer
& MSR_EFER_SCE
)) {
1042 raise_exception_err(EXCP06_ILLOP
, 0);
1044 cpl
= env
->hflags
& HF_CPL_MASK
;
1045 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1046 raise_exception_err(EXCP0D_GPF
, 0);
1048 selector
= (env
->star
>> 48) & 0xffff;
1049 #ifdef TARGET_X86_64
1050 if (env
->hflags
& HF_LMA_MASK
) {
1052 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1054 DESC_G_MASK
| DESC_P_MASK
|
1055 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1056 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1060 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1062 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1063 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1064 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1065 env
->eip
= (uint32_t)ECX
;
1067 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1069 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1070 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1071 DESC_W_MASK
| DESC_A_MASK
);
1072 load_eflags((uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
| ID_MASK
|
1073 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1074 cpu_x86_set_cpl(env
, 3);
1078 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1080 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1081 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1082 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1083 env
->eip
= (uint32_t)ECX
;
1084 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1086 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1087 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1088 DESC_W_MASK
| DESC_A_MASK
);
1089 env
->eflags
|= IF_MASK
;
1090 cpu_x86_set_cpl(env
, 3);
1093 if (kqemu_is_ok(env
)) {
1094 if (env
->hflags
& HF_LMA_MASK
)
1095 CC_OP
= CC_OP_EFLAGS
;
1096 env
->exception_index
= -1;
1102 /* real mode interrupt */
1103 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1104 unsigned int next_eip
)
1107 target_ulong ptr
, ssp
;
1109 uint32_t offset
, esp
;
1110 uint32_t old_cs
, old_eip
;
1111 int svm_should_check
= 1;
1113 if ((env
->intercept
& INTERCEPT_SVM_MASK
) && !is_int
&& next_eip
==-1) {
1115 svm_should_check
= 0;
1117 if (svm_should_check
1118 && INTERCEPTEDl(_exceptions
, 1 << intno
)
1120 raise_interrupt(intno
, is_int
, error_code
, 0);
1122 /* real mode (simpler !) */
1124 if (intno
* 4 + 3 > dt
->limit
)
1125 raise_exception_err(EXCP0D_GPF
, intno
* 8 + 2);
1126 ptr
= dt
->base
+ intno
* 4;
1127 offset
= lduw_kernel(ptr
);
1128 selector
= lduw_kernel(ptr
+ 2);
1130 ssp
= env
->segs
[R_SS
].base
;
1135 old_cs
= env
->segs
[R_CS
].selector
;
1136 /* XXX: use SS segment size ? */
1137 PUSHW(ssp
, esp
, 0xffff, compute_eflags());
1138 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1139 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1141 /* update processor state */
1142 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1144 env
->segs
[R_CS
].selector
= selector
;
1145 env
->segs
[R_CS
].base
= (selector
<< 4);
1146 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1149 /* fake user mode interrupt */
1150 void do_interrupt_user(int intno
, int is_int
, int error_code
,
1151 target_ulong next_eip
)
1155 int dpl
, cpl
, shift
;
1159 if (env
->hflags
& HF_LMA_MASK
) {
1164 ptr
= dt
->base
+ (intno
<< shift
);
1165 e2
= ldl_kernel(ptr
+ 4);
1167 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1168 cpl
= env
->hflags
& HF_CPL_MASK
;
1169 /* check privledge if software int */
1170 if (is_int
&& dpl
< cpl
)
1171 raise_exception_err(EXCP0D_GPF
, (intno
<< shift
) + 2);
1173 /* Since we emulate only user space, we cannot do more than
1174 exiting the emulation with the suitable exception and error
1181 * Begin execution of an interruption. is_int is TRUE if coming from
1182 * the int instruction. next_eip is the EIP value AFTER the interrupt
1183 * instruction. It is only relevant if is_int is TRUE.
1185 void do_interrupt(int intno
, int is_int
, int error_code
,
1186 target_ulong next_eip
, int is_hw
)
1188 if (loglevel
& CPU_LOG_INT
) {
1189 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1191 fprintf(logfile
, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
" pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1192 count
, intno
, error_code
, is_int
,
1193 env
->hflags
& HF_CPL_MASK
,
1194 env
->segs
[R_CS
].selector
, EIP
,
1195 (int)env
->segs
[R_CS
].base
+ EIP
,
1196 env
->segs
[R_SS
].selector
, ESP
);
1197 if (intno
== 0x0e) {
1198 fprintf(logfile
, " CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1200 fprintf(logfile
, " EAX=" TARGET_FMT_lx
, EAX
);
1202 fprintf(logfile
, "\n");
1203 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1208 fprintf(logfile
, " code=");
1209 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1210 for(i
= 0; i
< 16; i
++) {
1211 fprintf(logfile
, " %02x", ldub(ptr
+ i
));
1213 fprintf(logfile
, "\n");
1219 if (env
->cr
[0] & CR0_PE_MASK
) {
1221 if (env
->hflags
& HF_LMA_MASK
) {
1222 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1226 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1229 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1234 * Check nested exceptions and change to double or triple fault if
1235 * needed. It should only be called, if this is not an interrupt.
1236 * Returns the new exception number.
1238 int check_exception(int intno
, int *error_code
)
1240 char first_contributory
= env
->old_exception
== 0 ||
1241 (env
->old_exception
>= 10 &&
1242 env
->old_exception
<= 13);
1243 char second_contributory
= intno
== 0 ||
1244 (intno
>= 10 && intno
<= 13);
1246 if (loglevel
& CPU_LOG_INT
)
1247 fprintf(logfile
, "check_exception old: %x new %x\n",
1248 env
->old_exception
, intno
);
1250 if (env
->old_exception
== EXCP08_DBLE
)
1251 cpu_abort(env
, "triple fault");
1253 if ((first_contributory
&& second_contributory
)
1254 || (env
->old_exception
== EXCP0E_PAGE
&&
1255 (second_contributory
|| (intno
== EXCP0E_PAGE
)))) {
1256 intno
= EXCP08_DBLE
;
1260 if (second_contributory
|| (intno
== EXCP0E_PAGE
) ||
1261 (intno
== EXCP08_DBLE
))
1262 env
->old_exception
= intno
;
1268 * Signal an interruption. It is executed in the main CPU loop.
1269 * is_int is TRUE if coming from the int instruction. next_eip is the
1270 * EIP value AFTER the interrupt instruction. It is only relevant if
1273 void raise_interrupt(int intno
, int is_int
, int error_code
,
1274 int next_eip_addend
)
1277 svm_check_intercept_param(SVM_EXIT_EXCP_BASE
+ intno
, error_code
);
1278 intno
= check_exception(intno
, &error_code
);
1281 env
->exception_index
= intno
;
1282 env
->error_code
= error_code
;
1283 env
->exception_is_int
= is_int
;
1284 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
1288 /* same as raise_exception_err, but do not restore global registers */
1289 static void raise_exception_err_norestore(int exception_index
, int error_code
)
1291 exception_index
= check_exception(exception_index
, &error_code
);
1293 env
->exception_index
= exception_index
;
1294 env
->error_code
= error_code
;
1295 env
->exception_is_int
= 0;
1296 env
->exception_next_eip
= 0;
1297 longjmp(env
->jmp_env
, 1);
1300 /* shortcuts to generate exceptions */
1302 void (raise_exception_err
)(int exception_index
, int error_code
)
1304 raise_interrupt(exception_index
, 0, error_code
, 0);
1307 void raise_exception(int exception_index
)
1309 raise_interrupt(exception_index
, 0, 0, 0);
1314 #if defined(CONFIG_USER_ONLY)
1316 void do_smm_enter(void)
1320 void helper_rsm(void)
1326 #ifdef TARGET_X86_64
1327 #define SMM_REVISION_ID 0x00020064
1329 #define SMM_REVISION_ID 0x00020000
1332 void do_smm_enter(void)
1334 target_ulong sm_state
;
1338 if (loglevel
& CPU_LOG_INT
) {
1339 fprintf(logfile
, "SMM: enter\n");
1340 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1343 env
->hflags
|= HF_SMM_MASK
;
1344 cpu_smm_update(env
);
1346 sm_state
= env
->smbase
+ 0x8000;
1348 #ifdef TARGET_X86_64
1349 for(i
= 0; i
< 6; i
++) {
1351 offset
= 0x7e00 + i
* 16;
1352 stw_phys(sm_state
+ offset
, dt
->selector
);
1353 stw_phys(sm_state
+ offset
+ 2, (dt
->flags
>> 8) & 0xf0ff);
1354 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1355 stq_phys(sm_state
+ offset
+ 8, dt
->base
);
1358 stq_phys(sm_state
+ 0x7e68, env
->gdt
.base
);
1359 stl_phys(sm_state
+ 0x7e64, env
->gdt
.limit
);
1361 stw_phys(sm_state
+ 0x7e70, env
->ldt
.selector
);
1362 stq_phys(sm_state
+ 0x7e78, env
->ldt
.base
);
1363 stl_phys(sm_state
+ 0x7e74, env
->ldt
.limit
);
1364 stw_phys(sm_state
+ 0x7e72, (env
->ldt
.flags
>> 8) & 0xf0ff);
1366 stq_phys(sm_state
+ 0x7e88, env
->idt
.base
);
1367 stl_phys(sm_state
+ 0x7e84, env
->idt
.limit
);
1369 stw_phys(sm_state
+ 0x7e90, env
->tr
.selector
);
1370 stq_phys(sm_state
+ 0x7e98, env
->tr
.base
);
1371 stl_phys(sm_state
+ 0x7e94, env
->tr
.limit
);
1372 stw_phys(sm_state
+ 0x7e92, (env
->tr
.flags
>> 8) & 0xf0ff);
1374 stq_phys(sm_state
+ 0x7ed0, env
->efer
);
1376 stq_phys(sm_state
+ 0x7ff8, EAX
);
1377 stq_phys(sm_state
+ 0x7ff0, ECX
);
1378 stq_phys(sm_state
+ 0x7fe8, EDX
);
1379 stq_phys(sm_state
+ 0x7fe0, EBX
);
1380 stq_phys(sm_state
+ 0x7fd8, ESP
);
1381 stq_phys(sm_state
+ 0x7fd0, EBP
);
1382 stq_phys(sm_state
+ 0x7fc8, ESI
);
1383 stq_phys(sm_state
+ 0x7fc0, EDI
);
1384 for(i
= 8; i
< 16; i
++)
1385 stq_phys(sm_state
+ 0x7ff8 - i
* 8, env
->regs
[i
]);
1386 stq_phys(sm_state
+ 0x7f78, env
->eip
);
1387 stl_phys(sm_state
+ 0x7f70, compute_eflags());
1388 stl_phys(sm_state
+ 0x7f68, env
->dr
[6]);
1389 stl_phys(sm_state
+ 0x7f60, env
->dr
[7]);
1391 stl_phys(sm_state
+ 0x7f48, env
->cr
[4]);
1392 stl_phys(sm_state
+ 0x7f50, env
->cr
[3]);
1393 stl_phys(sm_state
+ 0x7f58, env
->cr
[0]);
1395 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1396 stl_phys(sm_state
+ 0x7f00, env
->smbase
);
1398 stl_phys(sm_state
+ 0x7ffc, env
->cr
[0]);
1399 stl_phys(sm_state
+ 0x7ff8, env
->cr
[3]);
1400 stl_phys(sm_state
+ 0x7ff4, compute_eflags());
1401 stl_phys(sm_state
+ 0x7ff0, env
->eip
);
1402 stl_phys(sm_state
+ 0x7fec, EDI
);
1403 stl_phys(sm_state
+ 0x7fe8, ESI
);
1404 stl_phys(sm_state
+ 0x7fe4, EBP
);
1405 stl_phys(sm_state
+ 0x7fe0, ESP
);
1406 stl_phys(sm_state
+ 0x7fdc, EBX
);
1407 stl_phys(sm_state
+ 0x7fd8, EDX
);
1408 stl_phys(sm_state
+ 0x7fd4, ECX
);
1409 stl_phys(sm_state
+ 0x7fd0, EAX
);
1410 stl_phys(sm_state
+ 0x7fcc, env
->dr
[6]);
1411 stl_phys(sm_state
+ 0x7fc8, env
->dr
[7]);
1413 stl_phys(sm_state
+ 0x7fc4, env
->tr
.selector
);
1414 stl_phys(sm_state
+ 0x7f64, env
->tr
.base
);
1415 stl_phys(sm_state
+ 0x7f60, env
->tr
.limit
);
1416 stl_phys(sm_state
+ 0x7f5c, (env
->tr
.flags
>> 8) & 0xf0ff);
1418 stl_phys(sm_state
+ 0x7fc0, env
->ldt
.selector
);
1419 stl_phys(sm_state
+ 0x7f80, env
->ldt
.base
);
1420 stl_phys(sm_state
+ 0x7f7c, env
->ldt
.limit
);
1421 stl_phys(sm_state
+ 0x7f78, (env
->ldt
.flags
>> 8) & 0xf0ff);
1423 stl_phys(sm_state
+ 0x7f74, env
->gdt
.base
);
1424 stl_phys(sm_state
+ 0x7f70, env
->gdt
.limit
);
1426 stl_phys(sm_state
+ 0x7f58, env
->idt
.base
);
1427 stl_phys(sm_state
+ 0x7f54, env
->idt
.limit
);
1429 for(i
= 0; i
< 6; i
++) {
1432 offset
= 0x7f84 + i
* 12;
1434 offset
= 0x7f2c + (i
- 3) * 12;
1435 stl_phys(sm_state
+ 0x7fa8 + i
* 4, dt
->selector
);
1436 stl_phys(sm_state
+ offset
+ 8, dt
->base
);
1437 stl_phys(sm_state
+ offset
+ 4, dt
->limit
);
1438 stl_phys(sm_state
+ offset
, (dt
->flags
>> 8) & 0xf0ff);
1440 stl_phys(sm_state
+ 0x7f14, env
->cr
[4]);
1442 stl_phys(sm_state
+ 0x7efc, SMM_REVISION_ID
);
1443 stl_phys(sm_state
+ 0x7ef8, env
->smbase
);
1445 /* init SMM cpu state */
1447 #ifdef TARGET_X86_64
1449 env
->hflags
&= ~HF_LMA_MASK
;
1451 load_eflags(0, ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1452 env
->eip
= 0x00008000;
1453 cpu_x86_load_seg_cache(env
, R_CS
, (env
->smbase
>> 4) & 0xffff, env
->smbase
,
1455 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0xffffffff, 0);
1456 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0xffffffff, 0);
1457 cpu_x86_load_seg_cache(env
, R_SS
, 0, 0, 0xffffffff, 0);
1458 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0xffffffff, 0);
1459 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0xffffffff, 0);
1461 cpu_x86_update_cr0(env
,
1462 env
->cr
[0] & ~(CR0_PE_MASK
| CR0_EM_MASK
| CR0_TS_MASK
| CR0_PG_MASK
));
1463 cpu_x86_update_cr4(env
, 0);
1464 env
->dr
[7] = 0x00000400;
1465 CC_OP
= CC_OP_EFLAGS
;
1468 void helper_rsm(void)
1470 target_ulong sm_state
;
1474 sm_state
= env
->smbase
+ 0x8000;
1475 #ifdef TARGET_X86_64
1476 env
->efer
= ldq_phys(sm_state
+ 0x7ed0);
1477 if (env
->efer
& MSR_EFER_LMA
)
1478 env
->hflags
|= HF_LMA_MASK
;
1480 env
->hflags
&= ~HF_LMA_MASK
;
1482 for(i
= 0; i
< 6; i
++) {
1483 offset
= 0x7e00 + i
* 16;
1484 cpu_x86_load_seg_cache(env
, i
,
1485 lduw_phys(sm_state
+ offset
),
1486 ldq_phys(sm_state
+ offset
+ 8),
1487 ldl_phys(sm_state
+ offset
+ 4),
1488 (lduw_phys(sm_state
+ offset
+ 2) & 0xf0ff) << 8);
1491 env
->gdt
.base
= ldq_phys(sm_state
+ 0x7e68);
1492 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7e64);
1494 env
->ldt
.selector
= lduw_phys(sm_state
+ 0x7e70);
1495 env
->ldt
.base
= ldq_phys(sm_state
+ 0x7e78);
1496 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7e74);
1497 env
->ldt
.flags
= (lduw_phys(sm_state
+ 0x7e72) & 0xf0ff) << 8;
1499 env
->idt
.base
= ldq_phys(sm_state
+ 0x7e88);
1500 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7e84);
1502 env
->tr
.selector
= lduw_phys(sm_state
+ 0x7e90);
1503 env
->tr
.base
= ldq_phys(sm_state
+ 0x7e98);
1504 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7e94);
1505 env
->tr
.flags
= (lduw_phys(sm_state
+ 0x7e92) & 0xf0ff) << 8;
1507 EAX
= ldq_phys(sm_state
+ 0x7ff8);
1508 ECX
= ldq_phys(sm_state
+ 0x7ff0);
1509 EDX
= ldq_phys(sm_state
+ 0x7fe8);
1510 EBX
= ldq_phys(sm_state
+ 0x7fe0);
1511 ESP
= ldq_phys(sm_state
+ 0x7fd8);
1512 EBP
= ldq_phys(sm_state
+ 0x7fd0);
1513 ESI
= ldq_phys(sm_state
+ 0x7fc8);
1514 EDI
= ldq_phys(sm_state
+ 0x7fc0);
1515 for(i
= 8; i
< 16; i
++)
1516 env
->regs
[i
] = ldq_phys(sm_state
+ 0x7ff8 - i
* 8);
1517 env
->eip
= ldq_phys(sm_state
+ 0x7f78);
1518 load_eflags(ldl_phys(sm_state
+ 0x7f70),
1519 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1520 env
->dr
[6] = ldl_phys(sm_state
+ 0x7f68);
1521 env
->dr
[7] = ldl_phys(sm_state
+ 0x7f60);
1523 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f48));
1524 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7f50));
1525 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7f58));
1527 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1528 if (val
& 0x20000) {
1529 env
->smbase
= ldl_phys(sm_state
+ 0x7f00) & ~0x7fff;
1532 cpu_x86_update_cr0(env
, ldl_phys(sm_state
+ 0x7ffc));
1533 cpu_x86_update_cr3(env
, ldl_phys(sm_state
+ 0x7ff8));
1534 load_eflags(ldl_phys(sm_state
+ 0x7ff4),
1535 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
1536 env
->eip
= ldl_phys(sm_state
+ 0x7ff0);
1537 EDI
= ldl_phys(sm_state
+ 0x7fec);
1538 ESI
= ldl_phys(sm_state
+ 0x7fe8);
1539 EBP
= ldl_phys(sm_state
+ 0x7fe4);
1540 ESP
= ldl_phys(sm_state
+ 0x7fe0);
1541 EBX
= ldl_phys(sm_state
+ 0x7fdc);
1542 EDX
= ldl_phys(sm_state
+ 0x7fd8);
1543 ECX
= ldl_phys(sm_state
+ 0x7fd4);
1544 EAX
= ldl_phys(sm_state
+ 0x7fd0);
1545 env
->dr
[6] = ldl_phys(sm_state
+ 0x7fcc);
1546 env
->dr
[7] = ldl_phys(sm_state
+ 0x7fc8);
1548 env
->tr
.selector
= ldl_phys(sm_state
+ 0x7fc4) & 0xffff;
1549 env
->tr
.base
= ldl_phys(sm_state
+ 0x7f64);
1550 env
->tr
.limit
= ldl_phys(sm_state
+ 0x7f60);
1551 env
->tr
.flags
= (ldl_phys(sm_state
+ 0x7f5c) & 0xf0ff) << 8;
1553 env
->ldt
.selector
= ldl_phys(sm_state
+ 0x7fc0) & 0xffff;
1554 env
->ldt
.base
= ldl_phys(sm_state
+ 0x7f80);
1555 env
->ldt
.limit
= ldl_phys(sm_state
+ 0x7f7c);
1556 env
->ldt
.flags
= (ldl_phys(sm_state
+ 0x7f78) & 0xf0ff) << 8;
1558 env
->gdt
.base
= ldl_phys(sm_state
+ 0x7f74);
1559 env
->gdt
.limit
= ldl_phys(sm_state
+ 0x7f70);
1561 env
->idt
.base
= ldl_phys(sm_state
+ 0x7f58);
1562 env
->idt
.limit
= ldl_phys(sm_state
+ 0x7f54);
1564 for(i
= 0; i
< 6; i
++) {
1566 offset
= 0x7f84 + i
* 12;
1568 offset
= 0x7f2c + (i
- 3) * 12;
1569 cpu_x86_load_seg_cache(env
, i
,
1570 ldl_phys(sm_state
+ 0x7fa8 + i
* 4) & 0xffff,
1571 ldl_phys(sm_state
+ offset
+ 8),
1572 ldl_phys(sm_state
+ offset
+ 4),
1573 (ldl_phys(sm_state
+ offset
) & 0xf0ff) << 8);
1575 cpu_x86_update_cr4(env
, ldl_phys(sm_state
+ 0x7f14));
1577 val
= ldl_phys(sm_state
+ 0x7efc); /* revision ID */
1578 if (val
& 0x20000) {
1579 env
->smbase
= ldl_phys(sm_state
+ 0x7ef8) & ~0x7fff;
1582 CC_OP
= CC_OP_EFLAGS
;
1583 env
->hflags
&= ~HF_SMM_MASK
;
1584 cpu_smm_update(env
);
1586 if (loglevel
& CPU_LOG_INT
) {
1587 fprintf(logfile
, "SMM: after RSM\n");
1588 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
1592 #endif /* !CONFIG_USER_ONLY */
1595 #ifdef BUGGY_GCC_DIV64
1596 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1597 call it from another function */
1598 uint32_t div32(uint64_t *q_ptr
, uint64_t num
, uint32_t den
)
1604 int32_t idiv32(int64_t *q_ptr
, int64_t num
, int32_t den
)
1611 void helper_divl_EAX_T0(void)
1613 unsigned int den
, r
;
1616 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1619 raise_exception(EXCP00_DIVZ
);
1621 #ifdef BUGGY_GCC_DIV64
1622 r
= div32(&q
, num
, den
);
1628 raise_exception(EXCP00_DIVZ
);
1633 void helper_idivl_EAX_T0(void)
1638 num
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
1641 raise_exception(EXCP00_DIVZ
);
1643 #ifdef BUGGY_GCC_DIV64
1644 r
= idiv32(&q
, num
, den
);
1649 if (q
!= (int32_t)q
)
1650 raise_exception(EXCP00_DIVZ
);
1655 void helper_cmpxchg8b(void)
1660 eflags
= cc_table
[CC_OP
].compute_all();
1662 if (d
== (((uint64_t)EDX
<< 32) | EAX
)) {
1663 stq(A0
, ((uint64_t)ECX
<< 32) | EBX
);
1673 void helper_single_step()
1675 env
->dr
[6] |= 0x4000;
1676 raise_exception(EXCP01_SSTP
);
1679 void helper_cpuid(void)
1682 index
= (uint32_t)EAX
;
1684 /* test if maximum index reached */
1685 if (index
& 0x80000000) {
1686 if (index
> env
->cpuid_xlevel
)
1687 index
= env
->cpuid_level
;
1689 if (index
> env
->cpuid_level
)
1690 index
= env
->cpuid_level
;
1695 EAX
= env
->cpuid_level
;
1696 EBX
= env
->cpuid_vendor1
;
1697 EDX
= env
->cpuid_vendor2
;
1698 ECX
= env
->cpuid_vendor3
;
1701 EAX
= env
->cpuid_version
;
1702 EBX
= (env
->cpuid_apic_id
<< 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1703 ECX
= env
->cpuid_ext_features
;
1704 EDX
= env
->cpuid_features
;
1707 /* cache info: needed for Pentium Pro compatibility */
1714 EAX
= env
->cpuid_xlevel
;
1715 EBX
= env
->cpuid_vendor1
;
1716 EDX
= env
->cpuid_vendor2
;
1717 ECX
= env
->cpuid_vendor3
;
1720 EAX
= env
->cpuid_features
;
1722 ECX
= env
->cpuid_ext3_features
;
1723 EDX
= env
->cpuid_ext2_features
;
1728 EAX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 0];
1729 EBX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 1];
1730 ECX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 2];
1731 EDX
= env
->cpuid_model
[(index
- 0x80000002) * 4 + 3];
1734 /* cache info (L1 cache) */
1741 /* cache info (L2 cache) */
1748 /* virtual & phys address size in low 2 bytes. */
1755 /* reserved values: zero */
1764 void helper_enter_level(int level
, int data32
)
1767 uint32_t esp_mask
, esp
, ebp
;
1769 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1770 ssp
= env
->segs
[R_SS
].base
;
1779 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1782 stl(ssp
+ (esp
& esp_mask
), T1
);
1789 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1792 stw(ssp
+ (esp
& esp_mask
), T1
);
1796 #ifdef TARGET_X86_64
1797 void helper_enter64_level(int level
, int data64
)
1799 target_ulong esp
, ebp
;
1819 stw(esp
, lduw(ebp
));
1827 void helper_lldt_T0(void)
1832 int index
, entry_limit
;
1835 selector
= T0
& 0xffff;
1836 if ((selector
& 0xfffc) == 0) {
1837 /* XXX: NULL selector case: invalid LDT */
1842 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1844 index
= selector
& ~7;
1845 #ifdef TARGET_X86_64
1846 if (env
->hflags
& HF_LMA_MASK
)
1851 if ((index
+ entry_limit
) > dt
->limit
)
1852 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1853 ptr
= dt
->base
+ index
;
1854 e1
= ldl_kernel(ptr
);
1855 e2
= ldl_kernel(ptr
+ 4);
1856 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2)
1857 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1858 if (!(e2
& DESC_P_MASK
))
1859 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1860 #ifdef TARGET_X86_64
1861 if (env
->hflags
& HF_LMA_MASK
) {
1863 e3
= ldl_kernel(ptr
+ 8);
1864 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1865 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1869 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1872 env
->ldt
.selector
= selector
;
1875 void helper_ltr_T0(void)
1880 int index
, type
, entry_limit
;
1883 selector
= T0
& 0xffff;
1884 if ((selector
& 0xfffc) == 0) {
1885 /* NULL selector case: invalid TR */
1891 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1893 index
= selector
& ~7;
1894 #ifdef TARGET_X86_64
1895 if (env
->hflags
& HF_LMA_MASK
)
1900 if ((index
+ entry_limit
) > dt
->limit
)
1901 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1902 ptr
= dt
->base
+ index
;
1903 e1
= ldl_kernel(ptr
);
1904 e2
= ldl_kernel(ptr
+ 4);
1905 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1906 if ((e2
& DESC_S_MASK
) ||
1907 (type
!= 1 && type
!= 9))
1908 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1909 if (!(e2
& DESC_P_MASK
))
1910 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1911 #ifdef TARGET_X86_64
1912 if (env
->hflags
& HF_LMA_MASK
) {
1914 e3
= ldl_kernel(ptr
+ 8);
1915 e4
= ldl_kernel(ptr
+ 12);
1916 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf)
1917 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1918 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1919 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1923 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1925 e2
|= DESC_TSS_BUSY_MASK
;
1926 stl_kernel(ptr
+ 4, e2
);
1928 env
->tr
.selector
= selector
;
1931 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1932 void load_seg(int seg_reg
, int selector
)
1941 cpl
= env
->hflags
& HF_CPL_MASK
;
1942 if ((selector
& 0xfffc) == 0) {
1943 /* null selector case */
1945 #ifdef TARGET_X86_64
1946 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1949 raise_exception_err(EXCP0D_GPF
, 0);
1950 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1957 index
= selector
& ~7;
1958 if ((index
+ 7) > dt
->limit
)
1959 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1960 ptr
= dt
->base
+ index
;
1961 e1
= ldl_kernel(ptr
);
1962 e2
= ldl_kernel(ptr
+ 4);
1964 if (!(e2
& DESC_S_MASK
))
1965 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1967 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1968 if (seg_reg
== R_SS
) {
1969 /* must be writable segment */
1970 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
))
1971 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1972 if (rpl
!= cpl
|| dpl
!= cpl
)
1973 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1975 /* must be readable segment */
1976 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
)
1977 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1979 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1980 /* if not conforming code, test rights */
1981 if (dpl
< cpl
|| dpl
< rpl
)
1982 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
1986 if (!(e2
& DESC_P_MASK
)) {
1987 if (seg_reg
== R_SS
)
1988 raise_exception_err(EXCP0C_STACK
, selector
& 0xfffc);
1990 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
1993 /* set the access bit if not already set */
1994 if (!(e2
& DESC_A_MASK
)) {
1996 stl_kernel(ptr
+ 4, e2
);
1999 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2000 get_seg_base(e1
, e2
),
2001 get_seg_limit(e1
, e2
),
2004 fprintf(logfile
, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2005 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
2010 /* protected mode jump */
2011 void helper_ljmp_protected_T0_T1(int next_eip_addend
)
2013 int new_cs
, gate_cs
, type
;
2014 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
2015 target_ulong new_eip
, next_eip
;
2019 if ((new_cs
& 0xfffc) == 0)
2020 raise_exception_err(EXCP0D_GPF
, 0);
2021 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2022 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2023 cpl
= env
->hflags
& HF_CPL_MASK
;
2024 if (e2
& DESC_S_MASK
) {
2025 if (!(e2
& DESC_CS_MASK
))
2026 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2027 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2028 if (e2
& DESC_C_MASK
) {
2029 /* conforming code segment */
2031 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2033 /* non conforming code segment */
2036 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2038 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2040 if (!(e2
& DESC_P_MASK
))
2041 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2042 limit
= get_seg_limit(e1
, e2
);
2043 if (new_eip
> limit
&&
2044 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
))
2045 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2046 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2047 get_seg_base(e1
, e2
), limit
, e2
);
2050 /* jump to call or task gate */
2051 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2053 cpl
= env
->hflags
& HF_CPL_MASK
;
2054 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2056 case 1: /* 286 TSS */
2057 case 9: /* 386 TSS */
2058 case 5: /* task gate */
2059 if (dpl
< cpl
|| dpl
< rpl
)
2060 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2061 next_eip
= env
->eip
+ next_eip_addend
;
2062 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
2063 CC_OP
= CC_OP_EFLAGS
;
2065 case 4: /* 286 call gate */
2066 case 12: /* 386 call gate */
2067 if ((dpl
< cpl
) || (dpl
< rpl
))
2068 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2069 if (!(e2
& DESC_P_MASK
))
2070 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2072 new_eip
= (e1
& 0xffff);
2074 new_eip
|= (e2
& 0xffff0000);
2075 if (load_segment(&e1
, &e2
, gate_cs
) != 0)
2076 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2077 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2078 /* must be code segment */
2079 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
2080 (DESC_S_MASK
| DESC_CS_MASK
)))
2081 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2082 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
2083 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
)))
2084 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2085 if (!(e2
& DESC_P_MASK
))
2086 raise_exception_err(EXCP0D_GPF
, gate_cs
& 0xfffc);
2087 limit
= get_seg_limit(e1
, e2
);
2088 if (new_eip
> limit
)
2089 raise_exception_err(EXCP0D_GPF
, 0);
2090 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
2091 get_seg_base(e1
, e2
), limit
, e2
);
2095 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2101 /* real mode call */
2102 void helper_lcall_real_T0_T1(int shift
, int next_eip
)
2104 int new_cs
, new_eip
;
2105 uint32_t esp
, esp_mask
;
2111 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2112 ssp
= env
->segs
[R_SS
].base
;
2114 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2115 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
2117 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
2118 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
2121 SET_ESP(esp
, esp_mask
);
2123 env
->segs
[R_CS
].selector
= new_cs
;
2124 env
->segs
[R_CS
].base
= (new_cs
<< 4);
2127 /* protected mode call */
2128 void helper_lcall_protected_T0_T1(int shift
, int next_eip_addend
)
2130 int new_cs
, new_stack
, i
;
2131 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
2132 uint32_t ss
, ss_e1
, ss_e2
, sp
, type
, ss_dpl
, sp_mask
;
2133 uint32_t val
, limit
, old_sp_mask
;
2134 target_ulong ssp
, old_ssp
, next_eip
, new_eip
;
2138 next_eip
= env
->eip
+ next_eip_addend
;
2140 if (loglevel
& CPU_LOG_PCALL
) {
2141 fprintf(logfile
, "lcall %04x:%08x s=%d\n",
2142 new_cs
, (uint32_t)new_eip
, shift
);
2143 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2146 if ((new_cs
& 0xfffc) == 0)
2147 raise_exception_err(EXCP0D_GPF
, 0);
2148 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2149 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2150 cpl
= env
->hflags
& HF_CPL_MASK
;
2152 if (loglevel
& CPU_LOG_PCALL
) {
2153 fprintf(logfile
, "desc=%08x:%08x\n", e1
, e2
);
2156 if (e2
& DESC_S_MASK
) {
2157 if (!(e2
& DESC_CS_MASK
))
2158 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2159 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2160 if (e2
& DESC_C_MASK
) {
2161 /* conforming code segment */
2163 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2165 /* non conforming code segment */
2168 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2170 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2172 if (!(e2
& DESC_P_MASK
))
2173 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2175 #ifdef TARGET_X86_64
2176 /* XXX: check 16/32 bit cases in long mode */
2181 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
2182 PUSHQ(rsp
, next_eip
);
2183 /* from this point, not restartable */
2185 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2186 get_seg_base(e1
, e2
),
2187 get_seg_limit(e1
, e2
), e2
);
2193 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2194 ssp
= env
->segs
[R_SS
].base
;
2196 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2197 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2199 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2200 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2203 limit
= get_seg_limit(e1
, e2
);
2204 if (new_eip
> limit
)
2205 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2206 /* from this point, not restartable */
2207 SET_ESP(sp
, sp_mask
);
2208 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
2209 get_seg_base(e1
, e2
), limit
, e2
);
2213 /* check gate type */
2214 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
2215 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2218 case 1: /* available 286 TSS */
2219 case 9: /* available 386 TSS */
2220 case 5: /* task gate */
2221 if (dpl
< cpl
|| dpl
< rpl
)
2222 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2223 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
2224 CC_OP
= CC_OP_EFLAGS
;
2226 case 4: /* 286 call gate */
2227 case 12: /* 386 call gate */
2230 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2235 if (dpl
< cpl
|| dpl
< rpl
)
2236 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2237 /* check valid bit */
2238 if (!(e2
& DESC_P_MASK
))
2239 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2240 selector
= e1
>> 16;
2241 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
2242 param_count
= e2
& 0x1f;
2243 if ((selector
& 0xfffc) == 0)
2244 raise_exception_err(EXCP0D_GPF
, 0);
2246 if (load_segment(&e1
, &e2
, selector
) != 0)
2247 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2248 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
)))
2249 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2250 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2252 raise_exception_err(EXCP0D_GPF
, selector
& 0xfffc);
2253 if (!(e2
& DESC_P_MASK
))
2254 raise_exception_err(EXCP0B_NOSEG
, selector
& 0xfffc);
2256 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
2257 /* to inner privilege */
2258 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
2260 if (loglevel
& CPU_LOG_PCALL
)
2261 fprintf(logfile
, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
"\n",
2262 ss
, sp
, param_count
, ESP
);
2264 if ((ss
& 0xfffc) == 0)
2265 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2266 if ((ss
& 3) != dpl
)
2267 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2268 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0)
2269 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2270 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2272 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2273 if (!(ss_e2
& DESC_S_MASK
) ||
2274 (ss_e2
& DESC_CS_MASK
) ||
2275 !(ss_e2
& DESC_W_MASK
))
2276 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2277 if (!(ss_e2
& DESC_P_MASK
))
2278 raise_exception_err(EXCP0A_TSS
, ss
& 0xfffc);
2280 // push_size = ((param_count * 2) + 8) << shift;
2282 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2283 old_ssp
= env
->segs
[R_SS
].base
;
2285 sp_mask
= get_sp_mask(ss_e2
);
2286 ssp
= get_seg_base(ss_e1
, ss_e2
);
2288 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2289 PUSHL(ssp
, sp
, sp_mask
, ESP
);
2290 for(i
= param_count
- 1; i
>= 0; i
--) {
2291 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
2292 PUSHL(ssp
, sp
, sp_mask
, val
);
2295 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
2296 PUSHW(ssp
, sp
, sp_mask
, ESP
);
2297 for(i
= param_count
- 1; i
>= 0; i
--) {
2298 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
2299 PUSHW(ssp
, sp
, sp_mask
, val
);
2304 /* to same privilege */
2306 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2307 ssp
= env
->segs
[R_SS
].base
;
2308 // push_size = (4 << shift);
2313 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2314 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
2316 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
2317 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
2320 /* from this point, not restartable */
2323 ss
= (ss
& ~3) | dpl
;
2324 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
2326 get_seg_limit(ss_e1
, ss_e2
),
2330 selector
= (selector
& ~3) | dpl
;
2331 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
2332 get_seg_base(e1
, e2
),
2333 get_seg_limit(e1
, e2
),
2335 cpu_x86_set_cpl(env
, dpl
);
2336 SET_ESP(sp
, sp_mask
);
2340 if (kqemu_is_ok(env
)) {
2341 env
->exception_index
= -1;
2347 /* real and vm86 mode iret */
2348 void helper_iret_real(int shift
)
2350 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
2354 sp_mask
= 0xffff; /* XXXX: use SS segment size ? */
2356 ssp
= env
->segs
[R_SS
].base
;
2359 POPL(ssp
, sp
, sp_mask
, new_eip
);
2360 POPL(ssp
, sp
, sp_mask
, new_cs
);
2362 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2365 POPW(ssp
, sp
, sp_mask
, new_eip
);
2366 POPW(ssp
, sp
, sp_mask
, new_cs
);
2367 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2369 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
2370 load_seg_vm(R_CS
, new_cs
);
2372 if (env
->eflags
& VM_MASK
)
2373 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
| NT_MASK
;
2375 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
| RF_MASK
| NT_MASK
;
2377 eflags_mask
&= 0xffff;
2378 load_eflags(new_eflags
, eflags_mask
);
2381 static inline void validate_seg(int seg_reg
, int cpl
)
2386 /* XXX: on x86_64, we do not want to nullify FS and GS because
2387 they may still contain a valid base. I would be interested to
2388 know how a real x86_64 CPU behaves */
2389 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
2390 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0)
2393 e2
= env
->segs
[seg_reg
].flags
;
2394 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2395 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2396 /* data or non conforming code segment */
2398 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2403 /* protected mode iret */
2404 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
2406 uint32_t new_cs
, new_eflags
, new_ss
;
2407 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2408 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2409 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2410 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2412 #ifdef TARGET_X86_64
2417 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2419 ssp
= env
->segs
[R_SS
].base
;
2420 new_eflags
= 0; /* avoid warning */
2421 #ifdef TARGET_X86_64
2427 POPQ(sp
, new_eflags
);
2433 POPL(ssp
, sp
, sp_mask
, new_eip
);
2434 POPL(ssp
, sp
, sp_mask
, new_cs
);
2437 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2438 if (new_eflags
& VM_MASK
)
2439 goto return_to_vm86
;
2443 POPW(ssp
, sp
, sp_mask
, new_eip
);
2444 POPW(ssp
, sp
, sp_mask
, new_cs
);
2446 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2449 if (loglevel
& CPU_LOG_PCALL
) {
2450 fprintf(logfile
, "lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2451 new_cs
, new_eip
, shift
, addend
);
2452 cpu_dump_state(env
, logfile
, fprintf
, X86_DUMP_CCOP
);
2455 if ((new_cs
& 0xfffc) == 0)
2456 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2457 if (load_segment(&e1
, &e2
, new_cs
) != 0)
2458 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2459 if (!(e2
& DESC_S_MASK
) ||
2460 !(e2
& DESC_CS_MASK
))
2461 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2462 cpl
= env
->hflags
& HF_CPL_MASK
;
2465 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2466 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2467 if (e2
& DESC_C_MASK
) {
2469 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2472 raise_exception_err(EXCP0D_GPF
, new_cs
& 0xfffc);
2474 if (!(e2
& DESC_P_MASK
))
2475 raise_exception_err(EXCP0B_NOSEG
, new_cs
& 0xfffc);
2478 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2479 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2480 /* return to same priledge level */
2481 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2482 get_seg_base(e1
, e2
),
2483 get_seg_limit(e1
, e2
),
2486 /* return to different privilege level */
2487 #ifdef TARGET_X86_64
2496 POPL(ssp
, sp
, sp_mask
, new_esp
);
2497 POPL(ssp
, sp
, sp_mask
, new_ss
);
2501 POPW(ssp
, sp
, sp_mask
, new_esp
);
2502 POPW(ssp
, sp
, sp_mask
, new_ss
);
2505 if (loglevel
& CPU_LOG_PCALL
) {
2506 fprintf(logfile
, "new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2510 if ((new_ss
& 0xfffc) == 0) {
2511 #ifdef TARGET_X86_64
2512 /* NULL ss is allowed in long mode if cpl != 3*/
2513 /* XXX: test CS64 ? */
2514 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2515 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2517 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2518 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2519 DESC_W_MASK
| DESC_A_MASK
);
2520 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed ? */
2524 raise_exception_err(EXCP0D_GPF
, 0);
2527 if ((new_ss
& 3) != rpl
)
2528 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2529 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0)
2530 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2531 if (!(ss_e2
& DESC_S_MASK
) ||
2532 (ss_e2
& DESC_CS_MASK
) ||
2533 !(ss_e2
& DESC_W_MASK
))
2534 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2535 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2537 raise_exception_err(EXCP0D_GPF
, new_ss
& 0xfffc);
2538 if (!(ss_e2
& DESC_P_MASK
))
2539 raise_exception_err(EXCP0B_NOSEG
, new_ss
& 0xfffc);
2540 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2541 get_seg_base(ss_e1
, ss_e2
),
2542 get_seg_limit(ss_e1
, ss_e2
),
2546 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2547 get_seg_base(e1
, e2
),
2548 get_seg_limit(e1
, e2
),
2550 cpu_x86_set_cpl(env
, rpl
);
2552 #ifdef TARGET_X86_64
2553 if (env
->hflags
& HF_CS64_MASK
)
2557 sp_mask
= get_sp_mask(ss_e2
);
2559 /* validate data segments */
2560 validate_seg(R_ES
, rpl
);
2561 validate_seg(R_DS
, rpl
);
2562 validate_seg(R_FS
, rpl
);
2563 validate_seg(R_GS
, rpl
);
2567 SET_ESP(sp
, sp_mask
);
2570 /* NOTE: 'cpl' is the _old_ CPL */
2571 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2573 eflags_mask
|= IOPL_MASK
;
2574 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2576 eflags_mask
|= IF_MASK
;
2578 eflags_mask
&= 0xffff;
2579 load_eflags(new_eflags
, eflags_mask
);
2584 POPL(ssp
, sp
, sp_mask
, new_esp
);
2585 POPL(ssp
, sp
, sp_mask
, new_ss
);
2586 POPL(ssp
, sp
, sp_mask
, new_es
);
2587 POPL(ssp
, sp
, sp_mask
, new_ds
);
2588 POPL(ssp
, sp
, sp_mask
, new_fs
);
2589 POPL(ssp
, sp
, sp_mask
, new_gs
);
2591 /* modify processor state */
2592 load_eflags(new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2593 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
| VIP_MASK
);
2594 load_seg_vm(R_CS
, new_cs
& 0xffff);
2595 cpu_x86_set_cpl(env
, 3);
2596 load_seg_vm(R_SS
, new_ss
& 0xffff);
2597 load_seg_vm(R_ES
, new_es
& 0xffff);
2598 load_seg_vm(R_DS
, new_ds
& 0xffff);
2599 load_seg_vm(R_FS
, new_fs
& 0xffff);
2600 load_seg_vm(R_GS
, new_gs
& 0xffff);
2602 env
->eip
= new_eip
& 0xffff;
2606 void helper_iret_protected(int shift
, int next_eip
)
2608 int tss_selector
, type
;
2611 /* specific case for TSS */
2612 if (env
->eflags
& NT_MASK
) {
2613 #ifdef TARGET_X86_64
2614 if (env
->hflags
& HF_LMA_MASK
)
2615 raise_exception_err(EXCP0D_GPF
, 0);
2617 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2618 if (tss_selector
& 4)
2619 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2620 if (load_segment(&e1
, &e2
, tss_selector
) != 0)
2621 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2622 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2623 /* NOTE: we check both segment and busy TSS */
2625 raise_exception_err(EXCP0A_TSS
, tss_selector
& 0xfffc);
2626 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2628 helper_ret_protected(shift
, 1, 0);
2631 if (kqemu_is_ok(env
)) {
2632 CC_OP
= CC_OP_EFLAGS
;
2633 env
->exception_index
= -1;
2639 void helper_lret_protected(int shift
, int addend
)
2641 helper_ret_protected(shift
, 0, addend
);
2643 if (kqemu_is_ok(env
)) {
2644 env
->exception_index
= -1;
2650 void helper_sysenter(void)
2652 if (env
->sysenter_cs
== 0) {
2653 raise_exception_err(EXCP0D_GPF
, 0);
2655 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2656 cpu_x86_set_cpl(env
, 0);
2657 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2659 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2661 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2662 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2664 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2666 DESC_W_MASK
| DESC_A_MASK
);
2667 ESP
= env
->sysenter_esp
;
2668 EIP
= env
->sysenter_eip
;
2671 void helper_sysexit(void)
2675 cpl
= env
->hflags
& HF_CPL_MASK
;
2676 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2677 raise_exception_err(EXCP0D_GPF
, 0);
2679 cpu_x86_set_cpl(env
, 3);
2680 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) | 3,
2682 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2683 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2684 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2685 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) | 3,
2687 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2688 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2689 DESC_W_MASK
| DESC_A_MASK
);
2693 if (kqemu_is_ok(env
)) {
2694 env
->exception_index
= -1;
2700 void helper_movl_crN_T0(int reg
)
2702 #if !defined(CONFIG_USER_ONLY)
2705 cpu_x86_update_cr0(env
, T0
);
2708 cpu_x86_update_cr3(env
, T0
);
2711 cpu_x86_update_cr4(env
, T0
);
2714 cpu_set_apic_tpr(env
, T0
);
2724 void helper_movl_drN_T0(int reg
)
2729 void helper_invlpg(target_ulong addr
)
2731 cpu_x86_flush_tlb(env
, addr
);
2734 void helper_rdtsc(void)
2738 if ((env
->cr
[4] & CR4_TSD_MASK
) && ((env
->hflags
& HF_CPL_MASK
) != 0)) {
2739 raise_exception(EXCP0D_GPF
);
2741 val
= cpu_get_tsc(env
);
2742 EAX
= (uint32_t)(val
);
2743 EDX
= (uint32_t)(val
>> 32);
2746 #if defined(CONFIG_USER_ONLY)
2747 void helper_wrmsr(void)
2751 void helper_rdmsr(void)
2755 void helper_wrmsr(void)
2759 val
= ((uint32_t)EAX
) | ((uint64_t)((uint32_t)EDX
) << 32);
2761 switch((uint32_t)ECX
) {
2762 case MSR_IA32_SYSENTER_CS
:
2763 env
->sysenter_cs
= val
& 0xffff;
2765 case MSR_IA32_SYSENTER_ESP
:
2766 env
->sysenter_esp
= val
;
2768 case MSR_IA32_SYSENTER_EIP
:
2769 env
->sysenter_eip
= val
;
2771 case MSR_IA32_APICBASE
:
2772 cpu_set_apic_base(env
, val
);
2776 uint64_t update_mask
;
2778 if (env
->cpuid_ext2_features
& CPUID_EXT2_SYSCALL
)
2779 update_mask
|= MSR_EFER_SCE
;
2780 if (env
->cpuid_ext2_features
& CPUID_EXT2_LM
)
2781 update_mask
|= MSR_EFER_LME
;
2782 if (env
->cpuid_ext2_features
& CPUID_EXT2_FFXSR
)
2783 update_mask
|= MSR_EFER_FFXSR
;
2784 if (env
->cpuid_ext2_features
& CPUID_EXT2_NX
)
2785 update_mask
|= MSR_EFER_NXE
;
2786 env
->efer
= (env
->efer
& ~update_mask
) |
2787 (val
& update_mask
);
2796 case MSR_VM_HSAVE_PA
:
2797 env
->vm_hsave
= val
;
2799 #ifdef TARGET_X86_64
2810 env
->segs
[R_FS
].base
= val
;
2813 env
->segs
[R_GS
].base
= val
;
2815 case MSR_KERNELGSBASE
:
2816 env
->kernelgsbase
= val
;
2820 /* XXX: exception ? */
2825 void helper_rdmsr(void)
2828 switch((uint32_t)ECX
) {
2829 case MSR_IA32_SYSENTER_CS
:
2830 val
= env
->sysenter_cs
;
2832 case MSR_IA32_SYSENTER_ESP
:
2833 val
= env
->sysenter_esp
;
2835 case MSR_IA32_SYSENTER_EIP
:
2836 val
= env
->sysenter_eip
;
2838 case MSR_IA32_APICBASE
:
2839 val
= cpu_get_apic_base(env
);
2850 case MSR_VM_HSAVE_PA
:
2851 val
= env
->vm_hsave
;
2853 #ifdef TARGET_X86_64
2864 val
= env
->segs
[R_FS
].base
;
2867 val
= env
->segs
[R_GS
].base
;
2869 case MSR_KERNELGSBASE
:
2870 val
= env
->kernelgsbase
;
2874 /* XXX: exception ? */
2878 EAX
= (uint32_t)(val
);
2879 EDX
= (uint32_t)(val
>> 32);
2883 void helper_lsl(void)
2885 unsigned int selector
, limit
;
2886 uint32_t e1
, e2
, eflags
;
2887 int rpl
, dpl
, cpl
, type
;
2889 eflags
= cc_table
[CC_OP
].compute_all();
2890 selector
= T0
& 0xffff;
2891 if (load_segment(&e1
, &e2
, selector
) != 0)
2894 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2895 cpl
= env
->hflags
& HF_CPL_MASK
;
2896 if (e2
& DESC_S_MASK
) {
2897 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2900 if (dpl
< cpl
|| dpl
< rpl
)
2904 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2915 if (dpl
< cpl
|| dpl
< rpl
) {
2917 CC_SRC
= eflags
& ~CC_Z
;
2921 limit
= get_seg_limit(e1
, e2
);
2923 CC_SRC
= eflags
| CC_Z
;
2926 void helper_lar(void)
2928 unsigned int selector
;
2929 uint32_t e1
, e2
, eflags
;
2930 int rpl
, dpl
, cpl
, type
;
2932 eflags
= cc_table
[CC_OP
].compute_all();
2933 selector
= T0
& 0xffff;
2934 if ((selector
& 0xfffc) == 0)
2936 if (load_segment(&e1
, &e2
, selector
) != 0)
2939 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2940 cpl
= env
->hflags
& HF_CPL_MASK
;
2941 if (e2
& DESC_S_MASK
) {
2942 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2945 if (dpl
< cpl
|| dpl
< rpl
)
2949 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2963 if (dpl
< cpl
|| dpl
< rpl
) {
2965 CC_SRC
= eflags
& ~CC_Z
;
2969 T1
= e2
& 0x00f0ff00;
2970 CC_SRC
= eflags
| CC_Z
;
2973 void helper_verr(void)
2975 unsigned int selector
;
2976 uint32_t e1
, e2
, eflags
;
2979 eflags
= cc_table
[CC_OP
].compute_all();
2980 selector
= T0
& 0xffff;
2981 if ((selector
& 0xfffc) == 0)
2983 if (load_segment(&e1
, &e2
, selector
) != 0)
2985 if (!(e2
& DESC_S_MASK
))
2988 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2989 cpl
= env
->hflags
& HF_CPL_MASK
;
2990 if (e2
& DESC_CS_MASK
) {
2991 if (!(e2
& DESC_R_MASK
))
2993 if (!(e2
& DESC_C_MASK
)) {
2994 if (dpl
< cpl
|| dpl
< rpl
)
2998 if (dpl
< cpl
|| dpl
< rpl
) {
3000 CC_SRC
= eflags
& ~CC_Z
;
3004 CC_SRC
= eflags
| CC_Z
;
3007 void helper_verw(void)
3009 unsigned int selector
;
3010 uint32_t e1
, e2
, eflags
;
3013 eflags
= cc_table
[CC_OP
].compute_all();
3014 selector
= T0
& 0xffff;
3015 if ((selector
& 0xfffc) == 0)
3017 if (load_segment(&e1
, &e2
, selector
) != 0)
3019 if (!(e2
& DESC_S_MASK
))
3022 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
3023 cpl
= env
->hflags
& HF_CPL_MASK
;
3024 if (e2
& DESC_CS_MASK
) {
3027 if (dpl
< cpl
|| dpl
< rpl
)
3029 if (!(e2
& DESC_W_MASK
)) {
3031 CC_SRC
= eflags
& ~CC_Z
;
3035 CC_SRC
= eflags
| CC_Z
;
3040 void helper_fldt_ST0_A0(void)
3043 new_fpstt
= (env
->fpstt
- 1) & 7;
3044 env
->fpregs
[new_fpstt
].d
= helper_fldt(A0
);
3045 env
->fpstt
= new_fpstt
;
3046 env
->fptags
[new_fpstt
] = 0; /* validate stack entry */
3049 void helper_fstt_ST0_A0(void)
3051 helper_fstt(ST0
, A0
);
3054 void fpu_set_exception(int mask
)
3057 if (env
->fpus
& (~env
->fpuc
& FPUC_EM
))
3058 env
->fpus
|= FPUS_SE
| FPUS_B
;
3061 CPU86_LDouble
helper_fdiv(CPU86_LDouble a
, CPU86_LDouble b
)
3064 fpu_set_exception(FPUS_ZE
);
3068 void fpu_raise_exception(void)
3070 if (env
->cr
[0] & CR0_NE_MASK
) {
3071 raise_exception(EXCP10_COPR
);
3073 #if !defined(CONFIG_USER_ONLY)
3082 void helper_fbld_ST0_A0(void)
3090 for(i
= 8; i
>= 0; i
--) {
3092 val
= (val
* 100) + ((v
>> 4) * 10) + (v
& 0xf);
3095 if (ldub(A0
+ 9) & 0x80)
3101 void helper_fbst_ST0_A0(void)
3104 target_ulong mem_ref
, mem_end
;
3107 val
= floatx_to_int64(ST0
, &env
->fp_status
);
3109 mem_end
= mem_ref
+ 9;
3116 while (mem_ref
< mem_end
) {
3121 v
= ((v
/ 10) << 4) | (v
% 10);
3124 while (mem_ref
< mem_end
) {
3129 void helper_f2xm1(void)
3131 ST0
= pow(2.0,ST0
) - 1.0;
3134 void helper_fyl2x(void)
3136 CPU86_LDouble fptemp
;
3140 fptemp
= log(fptemp
)/log(2.0); /* log2(ST) */
3144 env
->fpus
&= (~0x4700);
3149 void helper_fptan(void)
3151 CPU86_LDouble fptemp
;
3154 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3160 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3161 /* the above code is for |arg| < 2**52 only */
3165 void helper_fpatan(void)
3167 CPU86_LDouble fptemp
, fpsrcop
;
3171 ST1
= atan2(fpsrcop
,fptemp
);
3175 void helper_fxtract(void)
3177 CPU86_LDoubleU temp
;
3178 unsigned int expdif
;
3181 expdif
= EXPD(temp
) - EXPBIAS
;
3182 /*DP exponent bias*/
3189 void helper_fprem1(void)
3191 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3192 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3194 signed long long int q
;
3196 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3197 ST0
= 0.0 / 0.0; /* NaN */
3198 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3204 fpsrcop1
.d
= fpsrcop
;
3206 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3209 /* optimisation? taken from the AMD docs */
3210 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3211 /* ST0 is unchanged */
3216 dblq
= fpsrcop
/ fptemp
;
3217 /* round dblq towards nearest integer */
3219 ST0
= fpsrcop
- fptemp
* dblq
;
3221 /* convert dblq to q by truncating towards zero */
3223 q
= (signed long long int)(-dblq
);
3225 q
= (signed long long int)dblq
;
3227 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3228 /* (C0,C3,C1) <-- (q2,q1,q0) */
3229 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3230 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3231 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3233 env
->fpus
|= 0x400; /* C2 <-- 1 */
3234 fptemp
= pow(2.0, expdif
- 50);
3235 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3236 /* fpsrcop = integer obtained by chopping */
3237 fpsrcop
= (fpsrcop
< 0.0) ?
3238 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3239 ST0
-= (ST1
* fpsrcop
* fptemp
);
3243 void helper_fprem(void)
3245 CPU86_LDouble dblq
, fpsrcop
, fptemp
;
3246 CPU86_LDoubleU fpsrcop1
, fptemp1
;
3248 signed long long int q
;
3250 if (isinf(ST0
) || isnan(ST0
) || isnan(ST1
) || (ST1
== 0.0)) {
3251 ST0
= 0.0 / 0.0; /* NaN */
3252 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3256 fpsrcop
= (CPU86_LDouble
)ST0
;
3257 fptemp
= (CPU86_LDouble
)ST1
;
3258 fpsrcop1
.d
= fpsrcop
;
3260 expdif
= EXPD(fpsrcop1
) - EXPD(fptemp1
);
3263 /* optimisation? taken from the AMD docs */
3264 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3265 /* ST0 is unchanged */
3269 if ( expdif
< 53 ) {
3270 dblq
= fpsrcop
/*ST0*/ / fptemp
/*ST1*/;
3271 /* round dblq towards zero */
3272 dblq
= (dblq
< 0.0) ? ceil(dblq
) : floor(dblq
);
3273 ST0
= fpsrcop
/*ST0*/ - fptemp
* dblq
;
3275 /* convert dblq to q by truncating towards zero */
3277 q
= (signed long long int)(-dblq
);
3279 q
= (signed long long int)dblq
;
3281 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3282 /* (C0,C3,C1) <-- (q2,q1,q0) */
3283 env
->fpus
|= (q
& 0x4) << (8 - 2); /* (C0) <-- q2 */
3284 env
->fpus
|= (q
& 0x2) << (14 - 1); /* (C3) <-- q1 */
3285 env
->fpus
|= (q
& 0x1) << (9 - 0); /* (C1) <-- q0 */
3287 int N
= 32 + (expdif
% 32); /* as per AMD docs */
3288 env
->fpus
|= 0x400; /* C2 <-- 1 */
3289 fptemp
= pow(2.0, (double)(expdif
- N
));
3290 fpsrcop
= (ST0
/ ST1
) / fptemp
;
3291 /* fpsrcop = integer obtained by chopping */
3292 fpsrcop
= (fpsrcop
< 0.0) ?
3293 -(floor(fabs(fpsrcop
))) : floor(fpsrcop
);
3294 ST0
-= (ST1
* fpsrcop
* fptemp
);
3298 void helper_fyl2xp1(void)
3300 CPU86_LDouble fptemp
;
3303 if ((fptemp
+1.0)>0.0) {
3304 fptemp
= log(fptemp
+1.0) / log(2.0); /* log2(ST+1.0) */
3308 env
->fpus
&= (~0x4700);
3313 void helper_fsqrt(void)
3315 CPU86_LDouble fptemp
;
3319 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3325 void helper_fsincos(void)
3327 CPU86_LDouble fptemp
;
3330 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3336 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3337 /* the above code is for |arg| < 2**63 only */
3341 void helper_frndint(void)
3343 ST0
= floatx_round_to_int(ST0
, &env
->fp_status
);
3346 void helper_fscale(void)
3348 ST0
= ldexp (ST0
, (int)(ST1
));
3351 void helper_fsin(void)
3353 CPU86_LDouble fptemp
;
3356 if ((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3360 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3361 /* the above code is for |arg| < 2**53 only */
3365 void helper_fcos(void)
3367 CPU86_LDouble fptemp
;
3370 if((fptemp
> MAXTAN
)||(fptemp
< -MAXTAN
)) {
3374 env
->fpus
&= (~0x400); /* C2 <-- 0 */
3375 /* the above code is for |arg5 < 2**63 only */
3379 void helper_fxam_ST0(void)
3381 CPU86_LDoubleU temp
;
3386 env
->fpus
&= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3388 env
->fpus
|= 0x200; /* C1 <-- 1 */
3390 /* XXX: test fptags too */
3391 expdif
= EXPD(temp
);
3392 if (expdif
== MAXEXPD
) {
3393 #ifdef USE_X86LDOUBLE
3394 if (MANTD(temp
) == 0x8000000000000000ULL
)
3396 if (MANTD(temp
) == 0)
3398 env
->fpus
|= 0x500 /*Infinity*/;
3400 env
->fpus
|= 0x100 /*NaN*/;
3401 } else if (expdif
== 0) {
3402 if (MANTD(temp
) == 0)
3403 env
->fpus
|= 0x4000 /*Zero*/;
3405 env
->fpus
|= 0x4400 /*Denormal*/;
3411 void helper_fstenv(target_ulong ptr
, int data32
)
3413 int fpus
, fptag
, exp
, i
;
3417 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3419 for (i
=7; i
>=0; i
--) {
3421 if (env
->fptags
[i
]) {
3424 tmp
.d
= env
->fpregs
[i
].d
;
3427 if (exp
== 0 && mant
== 0) {
3430 } else if (exp
== 0 || exp
== MAXEXPD
3431 #ifdef USE_X86LDOUBLE
3432 || (mant
& (1LL << 63)) == 0
3435 /* NaNs, infinity, denormal */
3442 stl(ptr
, env
->fpuc
);
3444 stl(ptr
+ 8, fptag
);
3445 stl(ptr
+ 12, 0); /* fpip */
3446 stl(ptr
+ 16, 0); /* fpcs */
3447 stl(ptr
+ 20, 0); /* fpoo */
3448 stl(ptr
+ 24, 0); /* fpos */
3451 stw(ptr
, env
->fpuc
);
3453 stw(ptr
+ 4, fptag
);
3461 void helper_fldenv(target_ulong ptr
, int data32
)
3466 env
->fpuc
= lduw(ptr
);
3467 fpus
= lduw(ptr
+ 4);
3468 fptag
= lduw(ptr
+ 8);
3471 env
->fpuc
= lduw(ptr
);
3472 fpus
= lduw(ptr
+ 2);
3473 fptag
= lduw(ptr
+ 4);
3475 env
->fpstt
= (fpus
>> 11) & 7;
3476 env
->fpus
= fpus
& ~0x3800;
3477 for(i
= 0;i
< 8; i
++) {
3478 env
->fptags
[i
] = ((fptag
& 3) == 3);
3483 void helper_fsave(target_ulong ptr
, int data32
)
3488 helper_fstenv(ptr
, data32
);
3490 ptr
+= (14 << data32
);
3491 for(i
= 0;i
< 8; i
++) {
3493 helper_fstt(tmp
, ptr
);
3511 void helper_frstor(target_ulong ptr
, int data32
)
3516 helper_fldenv(ptr
, data32
);
3517 ptr
+= (14 << data32
);
3519 for(i
= 0;i
< 8; i
++) {
3520 tmp
= helper_fldt(ptr
);
3526 void helper_fxsave(target_ulong ptr
, int data64
)
3528 int fpus
, fptag
, i
, nb_xmm_regs
;
3532 fpus
= (env
->fpus
& ~0x3800) | (env
->fpstt
& 0x7) << 11;
3534 for(i
= 0; i
< 8; i
++) {
3535 fptag
|= (env
->fptags
[i
] << i
);
3537 stw(ptr
, env
->fpuc
);
3539 stw(ptr
+ 4, fptag
^ 0xff);
3542 for(i
= 0;i
< 8; i
++) {
3544 helper_fstt(tmp
, addr
);
3548 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3549 /* XXX: finish it */
3550 stl(ptr
+ 0x18, env
->mxcsr
); /* mxcsr */
3551 stl(ptr
+ 0x1c, 0x0000ffff); /* mxcsr_mask */
3552 nb_xmm_regs
= 8 << data64
;
3554 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3555 stq(addr
, env
->xmm_regs
[i
].XMM_Q(0));
3556 stq(addr
+ 8, env
->xmm_regs
[i
].XMM_Q(1));
3562 void helper_fxrstor(target_ulong ptr
, int data64
)
3564 int i
, fpus
, fptag
, nb_xmm_regs
;
3568 env
->fpuc
= lduw(ptr
);
3569 fpus
= lduw(ptr
+ 2);
3570 fptag
= lduw(ptr
+ 4);
3571 env
->fpstt
= (fpus
>> 11) & 7;
3572 env
->fpus
= fpus
& ~0x3800;
3574 for(i
= 0;i
< 8; i
++) {
3575 env
->fptags
[i
] = ((fptag
>> i
) & 1);
3579 for(i
= 0;i
< 8; i
++) {
3580 tmp
= helper_fldt(addr
);
3585 if (env
->cr
[4] & CR4_OSFXSR_MASK
) {
3586 /* XXX: finish it */
3587 env
->mxcsr
= ldl(ptr
+ 0x18);
3589 nb_xmm_regs
= 8 << data64
;
3591 for(i
= 0; i
< nb_xmm_regs
; i
++) {
3592 env
->xmm_regs
[i
].XMM_Q(0) = ldq(addr
);
3593 env
->xmm_regs
[i
].XMM_Q(1) = ldq(addr
+ 8);
3599 #ifndef USE_X86LDOUBLE
3601 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3603 CPU86_LDoubleU temp
;
3608 *pmant
= (MANTD(temp
) << 11) | (1LL << 63);
3609 /* exponent + sign */
3610 e
= EXPD(temp
) - EXPBIAS
+ 16383;
3611 e
|= SIGND(temp
) >> 16;
3615 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3617 CPU86_LDoubleU temp
;
3621 /* XXX: handle overflow ? */
3622 e
= (upper
& 0x7fff) - 16383 + EXPBIAS
; /* exponent */
3623 e
|= (upper
>> 4) & 0x800; /* sign */
3624 ll
= (mant
>> 11) & ((1LL << 52) - 1);
3626 temp
.l
.upper
= (e
<< 20) | (ll
>> 32);
3629 temp
.ll
= ll
| ((uint64_t)e
<< 52);
3636 void cpu_get_fp80(uint64_t *pmant
, uint16_t *pexp
, CPU86_LDouble f
)
3638 CPU86_LDoubleU temp
;
3641 *pmant
= temp
.l
.lower
;
3642 *pexp
= temp
.l
.upper
;
3645 CPU86_LDouble
cpu_set_fp80(uint64_t mant
, uint16_t upper
)
3647 CPU86_LDoubleU temp
;
3649 temp
.l
.upper
= upper
;
3650 temp
.l
.lower
= mant
;
3655 #ifdef TARGET_X86_64
3657 //#define DEBUG_MULDIV
3659 static void add128(uint64_t *plow
, uint64_t *phigh
, uint64_t a
, uint64_t b
)
3668 static void neg128(uint64_t *plow
, uint64_t *phigh
)
3672 add128(plow
, phigh
, 1, 0);
3675 /* return TRUE if overflow */
3676 static int div64(uint64_t *plow
, uint64_t *phigh
, uint64_t b
)
3678 uint64_t q
, r
, a1
, a0
;
3691 /* XXX: use a better algorithm */
3692 for(i
= 0; i
< 64; i
++) {
3694 a1
= (a1
<< 1) | (a0
>> 63);
3695 if (ab
|| a1
>= b
) {
3701 a0
= (a0
<< 1) | qb
;
3703 #if defined(DEBUG_MULDIV)
3704 printf("div: 0x%016" PRIx64
"%016" PRIx64
" / 0x%016" PRIx64
": q=0x%016" PRIx64
" r=0x%016" PRIx64
"\n",
3705 *phigh
, *plow
, b
, a0
, a1
);
3713 /* return TRUE if overflow */
3714 static int idiv64(uint64_t *plow
, uint64_t *phigh
, int64_t b
)
3717 sa
= ((int64_t)*phigh
< 0);
3719 neg128(plow
, phigh
);
3723 if (div64(plow
, phigh
, b
) != 0)
3726 if (*plow
> (1ULL << 63))
3730 if (*plow
>= (1ULL << 63))
3738 void helper_mulq_EAX_T0(void)
3742 mulu64(&r0
, &r1
, EAX
, T0
);
3749 void helper_imulq_EAX_T0(void)
3753 muls64(&r0
, &r1
, EAX
, T0
);
3757 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3760 void helper_imulq_T0_T1(void)
3764 muls64(&r0
, &r1
, T0
, T1
);
3767 CC_SRC
= ((int64_t)r1
!= ((int64_t)r0
>> 63));
3770 void helper_divq_EAX_T0(void)
3774 raise_exception(EXCP00_DIVZ
);
3778 if (div64(&r0
, &r1
, T0
))
3779 raise_exception(EXCP00_DIVZ
);
3784 void helper_idivq_EAX_T0(void)
3788 raise_exception(EXCP00_DIVZ
);
3792 if (idiv64(&r0
, &r1
, T0
))
3793 raise_exception(EXCP00_DIVZ
);
3798 void helper_bswapq_T0(void)
3804 void helper_hlt(void)
3806 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
; /* needed if sti is just before */
3807 env
->hflags
|= HF_HALTED_MASK
;
3808 env
->exception_index
= EXCP_HLT
;
3812 void helper_monitor(void)
3814 if ((uint32_t)ECX
!= 0)
3815 raise_exception(EXCP0D_GPF
);
3816 /* XXX: store address ? */
3819 void helper_mwait(void)
3821 if ((uint32_t)ECX
!= 0)
3822 raise_exception(EXCP0D_GPF
);
3823 /* XXX: not complete but not completely erroneous */
3824 if (env
->cpu_index
!= 0 || env
->next_cpu
!= NULL
) {
3825 /* more than one CPU: do not sleep because another CPU may
3832 float approx_rsqrt(float a
)
3834 return 1.0 / sqrt(a
);
3837 float approx_rcp(float a
)
3842 void update_fp_status(void)
3846 /* set rounding mode */
3847 switch(env
->fpuc
& RC_MASK
) {
3850 rnd_type
= float_round_nearest_even
;
3853 rnd_type
= float_round_down
;
3856 rnd_type
= float_round_up
;
3859 rnd_type
= float_round_to_zero
;
3862 set_float_rounding_mode(rnd_type
, &env
->fp_status
);
3864 switch((env
->fpuc
>> 8) & 3) {
3876 set_floatx80_rounding_precision(rnd_type
, &env
->fp_status
);
3880 #if !defined(CONFIG_USER_ONLY)
3882 #define MMUSUFFIX _mmu
3884 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3886 # define GETPC() (__builtin_return_address(0))
3890 #include "softmmu_template.h"
3893 #include "softmmu_template.h"
3896 #include "softmmu_template.h"
3899 #include "softmmu_template.h"
3903 /* try to fill the TLB and return an exception if error. If retaddr is
3904 NULL, it means that the function was called in C code (i.e. not
3905 from generated code or from helper.c) */
3906 /* XXX: fix it to restore all registers */
3907 void tlb_fill(target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
3909 TranslationBlock
*tb
;
3912 CPUX86State
*saved_env
;
3914 /* XXX: hack to restore env in all cases, even if not called from
3917 env
= cpu_single_env
;
3919 ret
= cpu_x86_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
3922 /* now we have a real cpu fault */
3923 pc
= (unsigned long)retaddr
;
3924 tb
= tb_find_pc(pc
);
3926 /* the PC is inside the translated code. It means that we have
3927 a virtual CPU fault */
3928 cpu_restore_state(tb
, env
, pc
, NULL
);
3932 raise_exception_err(env
->exception_index
, env
->error_code
);
3934 raise_exception_err_norestore(env
->exception_index
, env
->error_code
);
3940 /* Secure Virtual Machine helpers */
3942 void helper_stgi(void)
3944 env
->hflags
|= HF_GIF_MASK
;
3947 void helper_clgi(void)
3949 env
->hflags
&= ~HF_GIF_MASK
;
3952 #if defined(CONFIG_USER_ONLY)
3954 void helper_vmrun(target_ulong addr
) { }
3955 void helper_vmmcall(void) { }
3956 void helper_vmload(target_ulong addr
) { }
3957 void helper_vmsave(target_ulong addr
) { }
3958 void helper_skinit(void) { }
3959 void helper_invlpga(void) { }
3960 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
) { }
3961 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
3968 static inline uint32_t
3969 vmcb2cpu_attrib(uint16_t vmcb_attrib
, uint32_t vmcb_base
, uint32_t vmcb_limit
)
3971 return ((vmcb_attrib
& 0x00ff) << 8) /* Type, S, DPL, P */
3972 | ((vmcb_attrib
& 0x0f00) << 12) /* AVL, L, DB, G */
3973 | ((vmcb_base
>> 16) & 0xff) /* Base 23-16 */
3974 | (vmcb_base
& 0xff000000) /* Base 31-24 */
3975 | (vmcb_limit
& 0xf0000); /* Limit 19-16 */
3978 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib
)
3980 return ((cpu_attrib
>> 8) & 0xff) /* Type, S, DPL, P */
3981 | ((cpu_attrib
& 0xf00000) >> 12); /* AVL, L, DB, G */
3984 extern uint8_t *phys_ram_base
;
3985 void helper_vmrun(target_ulong addr
)
3990 if (loglevel
& CPU_LOG_TB_IN_ASM
)
3991 fprintf(logfile
,"vmrun! " TARGET_FMT_lx
"\n", addr
);
3993 env
->vm_vmcb
= addr
;
3996 /* save the current CPU state in the hsave page */
3997 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
3998 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4000 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4001 stl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4003 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4004 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4005 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4006 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4007 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
), env
->cr
[8]);
4008 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4009 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4011 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4012 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4014 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_ES
], es
);
4015 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_CS
], cs
);
4016 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_SS
], ss
);
4017 SVM_SAVE_SEG(env
->vm_hsave
, segs
[R_DS
], ds
);
4019 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
), EIP
);
4020 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4021 stq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4023 /* load the interception bitmaps so we do not need to access the
4025 /* We shift all the intercept bits so we can OR them with the TB
4027 env
->intercept
= (ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept
)) << INTERCEPT_INTR
) | INTERCEPT_SVM_MASK
;
4028 env
->intercept_cr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_read
));
4029 env
->intercept_cr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_cr_write
));
4030 env
->intercept_dr_read
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_read
));
4031 env
->intercept_dr_write
= lduw_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_dr_write
));
4032 env
->intercept_exceptions
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.intercept_exceptions
));
4034 env
->gdt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4035 env
->gdt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4037 env
->idt
.base
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
));
4038 env
->idt
.limit
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4040 /* clear exit_info_2 so we behave like the real hardware */
4041 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
4043 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
)));
4044 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
)));
4045 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
)));
4046 env
->cr
[2] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
4047 int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
4048 if (int_ctl
& V_INTR_MASKING_MASK
) {
4049 env
->cr
[8] = int_ctl
& V_TPR_MASK
;
4050 if (env
->eflags
& IF_MASK
)
4051 env
->hflags
|= HF_HIF_MASK
;
4054 #ifdef TARGET_X86_64
4055 env
->efer
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
));
4056 env
->hflags
&= ~HF_LMA_MASK
;
4057 if (env
->efer
& MSR_EFER_LMA
)
4058 env
->hflags
|= HF_LMA_MASK
;
4061 load_eflags(ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
)),
4062 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4063 CC_OP
= CC_OP_EFLAGS
;
4064 CC_DST
= 0xffffffff;
4066 SVM_LOAD_SEG(env
->vm_vmcb
, ES
, es
);
4067 SVM_LOAD_SEG(env
->vm_vmcb
, CS
, cs
);
4068 SVM_LOAD_SEG(env
->vm_vmcb
, SS
, ss
);
4069 SVM_LOAD_SEG(env
->vm_vmcb
, DS
, ds
);
4071 EIP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
4073 ESP
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
4074 EAX
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
4075 env
->dr
[7] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
4076 env
->dr
[6] = ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
4077 cpu_x86_set_cpl(env
, ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
)));
4079 /* FIXME: guest state consistency checks */
4081 switch(ldub_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
4082 case TLB_CONTROL_DO_NOTHING
:
4084 case TLB_CONTROL_FLUSH_ALL_ASID
:
4085 /* FIXME: this is not 100% correct but should work for now */
4094 /* maybe we need to inject an event */
4095 event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
));
4096 if (event_inj
& SVM_EVTINJ_VALID
) {
4097 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
4098 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
4099 uint32_t event_inj_err
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj_err
));
4100 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), event_inj
& ~SVM_EVTINJ_VALID
);
4102 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4103 fprintf(logfile
, "Injecting(%#hx): ", valid_err
);
4104 /* FIXME: need to implement valid_err */
4105 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
4106 case SVM_EVTINJ_TYPE_INTR
:
4107 env
->exception_index
= vector
;
4108 env
->error_code
= event_inj_err
;
4109 env
->exception_is_int
= 1;
4110 env
->exception_next_eip
= -1;
4111 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4112 fprintf(logfile
, "INTR");
4114 case SVM_EVTINJ_TYPE_NMI
:
4115 env
->exception_index
= vector
;
4116 env
->error_code
= event_inj_err
;
4117 env
->exception_is_int
= 1;
4118 env
->exception_next_eip
= EIP
;
4119 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4120 fprintf(logfile
, "NMI");
4122 case SVM_EVTINJ_TYPE_EXEPT
:
4123 env
->exception_index
= vector
;
4124 env
->error_code
= event_inj_err
;
4125 env
->exception_is_int
= 0;
4126 env
->exception_next_eip
= -1;
4127 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4128 fprintf(logfile
, "EXEPT");
4130 case SVM_EVTINJ_TYPE_SOFT
:
4131 env
->exception_index
= vector
;
4132 env
->error_code
= event_inj_err
;
4133 env
->exception_is_int
= 1;
4134 env
->exception_next_eip
= EIP
;
4135 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4136 fprintf(logfile
, "SOFT");
4139 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4140 fprintf(logfile
, " %#x %#x\n", env
->exception_index
, env
->error_code
);
4142 if ((int_ctl
& V_IRQ_MASK
) || (env
->intercept
& INTERCEPT_VINTR
)) {
4143 env
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
4149 void helper_vmmcall(void)
4151 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4152 fprintf(logfile
,"vmmcall!\n");
4155 void helper_vmload(target_ulong addr
)
4157 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4158 fprintf(logfile
,"vmload! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4159 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4160 env
->segs
[R_FS
].base
);
4162 SVM_LOAD_SEG2(addr
, segs
[R_FS
], fs
);
4163 SVM_LOAD_SEG2(addr
, segs
[R_GS
], gs
);
4164 SVM_LOAD_SEG2(addr
, tr
, tr
);
4165 SVM_LOAD_SEG2(addr
, ldt
, ldtr
);
4167 #ifdef TARGET_X86_64
4168 env
->kernelgsbase
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
));
4169 env
->lstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
));
4170 env
->cstar
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
));
4171 env
->fmask
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
));
4173 env
->star
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.star
));
4174 env
->sysenter_cs
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
));
4175 env
->sysenter_esp
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
));
4176 env
->sysenter_eip
= ldq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
));
4179 void helper_vmsave(target_ulong addr
)
4181 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4182 fprintf(logfile
,"vmsave! " TARGET_FMT_lx
"\nFS: %016" PRIx64
" | " TARGET_FMT_lx
"\n",
4183 addr
, ldq_phys(addr
+ offsetof(struct vmcb
, save
.fs
.base
)),
4184 env
->segs
[R_FS
].base
);
4186 SVM_SAVE_SEG(addr
, segs
[R_FS
], fs
);
4187 SVM_SAVE_SEG(addr
, segs
[R_GS
], gs
);
4188 SVM_SAVE_SEG(addr
, tr
, tr
);
4189 SVM_SAVE_SEG(addr
, ldt
, ldtr
);
4191 #ifdef TARGET_X86_64
4192 stq_phys(addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
), env
->kernelgsbase
);
4193 stq_phys(addr
+ offsetof(struct vmcb
, save
.lstar
), env
->lstar
);
4194 stq_phys(addr
+ offsetof(struct vmcb
, save
.cstar
), env
->cstar
);
4195 stq_phys(addr
+ offsetof(struct vmcb
, save
.sfmask
), env
->fmask
);
4197 stq_phys(addr
+ offsetof(struct vmcb
, save
.star
), env
->star
);
4198 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_cs
), env
->sysenter_cs
);
4199 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_esp
), env
->sysenter_esp
);
4200 stq_phys(addr
+ offsetof(struct vmcb
, save
.sysenter_eip
), env
->sysenter_eip
);
4203 void helper_skinit(void)
4205 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4206 fprintf(logfile
,"skinit!\n");
4209 void helper_invlpga(void)
4214 int svm_check_intercept_param(uint32_t type
, uint64_t param
)
4217 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
4218 if (INTERCEPTEDw(_cr_read
, (1 << (type
- SVM_EXIT_READ_CR0
)))) {
4219 vmexit(type
, param
);
4223 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 8:
4224 if (INTERCEPTEDw(_dr_read
, (1 << (type
- SVM_EXIT_READ_DR0
)))) {
4225 vmexit(type
, param
);
4229 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
4230 if (INTERCEPTEDw(_cr_write
, (1 << (type
- SVM_EXIT_WRITE_CR0
)))) {
4231 vmexit(type
, param
);
4235 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 8:
4236 if (INTERCEPTEDw(_dr_write
, (1 << (type
- SVM_EXIT_WRITE_DR0
)))) {
4237 vmexit(type
, param
);
4241 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 16:
4242 if (INTERCEPTEDl(_exceptions
, (1 << (type
- SVM_EXIT_EXCP_BASE
)))) {
4243 vmexit(type
, param
);
4248 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT
)) {
4249 /* FIXME: this should be read in at vmrun (faster this way?) */
4250 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.iopm_base_pa
));
4251 uint16_t port
= (uint16_t) (param
>> 16);
4253 if(ldub_phys(addr
+ port
/ 8) & (1 << (port
% 8)))
4254 vmexit(type
, param
);
4259 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT
)) {
4260 /* FIXME: this should be read in at vmrun (faster this way?) */
4261 uint64_t addr
= ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.msrpm_base_pa
));
4262 switch((uint32_t)ECX
) {
4267 case 0xc0000000 ... 0xc0001fff:
4268 T0
= (8192 + ECX
- 0xc0000000) * 2;
4272 case 0xc0010000 ... 0xc0011fff:
4273 T0
= (16384 + ECX
- 0xc0010000) * 2;
4278 vmexit(type
, param
);
4281 if (ldub_phys(addr
+ T1
) & ((1 << param
) << T0
))
4282 vmexit(type
, param
);
4287 if (INTERCEPTED((1ULL << ((type
- SVM_EXIT_INTR
) + INTERCEPT_INTR
)))) {
4288 vmexit(type
, param
);
4296 void vmexit(uint64_t exit_code
, uint64_t exit_info_1
)
4300 if (loglevel
& CPU_LOG_TB_IN_ASM
)
4301 fprintf(logfile
,"vmexit(%016" PRIx64
", %016" PRIx64
", %016" PRIx64
", " TARGET_FMT_lx
")!\n",
4302 exit_code
, exit_info_1
,
4303 ldq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
)),
4306 if(env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
4307 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), SVM_INTERRUPT_SHADOW_MASK
);
4308 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
4310 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
4313 /* Save the VM state in the vmcb */
4314 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_ES
], es
);
4315 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_CS
], cs
);
4316 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_SS
], ss
);
4317 SVM_SAVE_SEG(env
->vm_vmcb
, segs
[R_DS
], ds
);
4319 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
), env
->gdt
.base
);
4320 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
), env
->gdt
.limit
);
4322 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
), env
->idt
.base
);
4323 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
), env
->idt
.limit
);
4325 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
4326 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
4327 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
4328 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
4329 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
4331 if ((int_ctl
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
))) & V_INTR_MASKING_MASK
) {
4332 int_ctl
&= ~V_TPR_MASK
;
4333 int_ctl
|= env
->cr
[8] & V_TPR_MASK
;
4334 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), int_ctl
);
4337 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
), compute_eflags());
4338 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
), env
->eip
);
4339 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), ESP
);
4340 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), EAX
);
4341 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
4342 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
4343 stb_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
), env
->hflags
& HF_CPL_MASK
);
4345 /* Reload the host state from vm_hsave */
4346 env
->hflags
&= ~HF_HIF_MASK
;
4348 env
->intercept_exceptions
= 0;
4349 env
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
4351 env
->gdt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
));
4352 env
->gdt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
));
4354 env
->idt
.base
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
));
4355 env
->idt
.limit
= ldl_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
));
4357 cpu_x86_update_cr0(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
)) | CR0_PE_MASK
);
4358 cpu_x86_update_cr4(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
)));
4359 cpu_x86_update_cr3(env
, ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
)));
4360 if (int_ctl
& V_INTR_MASKING_MASK
)
4361 env
->cr
[8] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr8
));
4362 /* we need to set the efer after the crs so the hidden flags get set properly */
4363 #ifdef TARGET_X86_64
4364 env
->efer
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
));
4365 env
->hflags
&= ~HF_LMA_MASK
;
4366 if (env
->efer
& MSR_EFER_LMA
)
4367 env
->hflags
|= HF_LMA_MASK
;
4371 load_eflags(ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
)),
4372 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
4373 CC_OP
= CC_OP_EFLAGS
;
4375 SVM_LOAD_SEG(env
->vm_hsave
, ES
, es
);
4376 SVM_LOAD_SEG(env
->vm_hsave
, CS
, cs
);
4377 SVM_LOAD_SEG(env
->vm_hsave
, SS
, ss
);
4378 SVM_LOAD_SEG(env
->vm_hsave
, DS
, ds
);
4380 EIP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
4381 ESP
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
));
4382 EAX
= ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
));
4384 env
->dr
[6] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
4385 env
->dr
[7] = ldq_phys(env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
4388 cpu_x86_set_cpl(env
, 0);
4389 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code_hi
), (uint32_t)(exit_code
>> 32));
4390 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
), exit_code
);
4391 stq_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_1
), exit_info_1
);
4394 /* FIXME: Resets the current ASID register to zero (host ASID). */
4396 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4398 /* Clears the TSC_OFFSET inside the processor. */
4400 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4401 from the page table indicated the host's CR3. If the PDPEs contain
4402 illegal state, the processor causes a shutdown. */
4404 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4405 env
->cr
[0] |= CR0_PE_MASK
;
4406 env
->eflags
&= ~VM_MASK
;
4408 /* Disables all breakpoints in the host DR7 register. */
4410 /* Checks the reloaded host state for consistency. */
4412 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4413 host's code segment or non-canonical (in the case of long mode), a
4414 #GP fault is delivered inside the host.) */
4416 /* remove any pending exception */
4417 env
->exception_index
= -1;
4418 env
->error_code
= 0;
4419 env
->old_exception
= -1;