]>
git.proxmox.com Git - mirror_qemu.git/blob - target-i386/seg_helper.c
cc7eadf9e2e1c473a590f7a02d2a4f288c722ca9
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
27 #if !defined(CONFIG_USER_ONLY)
28 #include "exec/softmmu_exec.h"
29 #endif /* !defined(CONFIG_USER_ONLY) */
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
40 /* return non zero if error */
41 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
42 uint32_t *e2_ptr
, int selector
)
53 index
= selector
& ~7;
54 if ((index
+ 7) > dt
->limit
) {
57 ptr
= dt
->base
+ index
;
58 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
59 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
63 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
67 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
68 if (e2
& DESC_G_MASK
) {
69 limit
= (limit
<< 12) | 0xfff;
74 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
76 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
79 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
82 sc
->base
= get_seg_base(e1
, e2
);
83 sc
->limit
= get_seg_limit(e1
, e2
);
87 /* init the segment cache in vm86 mode. */
88 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
91 cpu_x86_load_seg_cache(env
, seg
, selector
,
92 (selector
<< 4), 0xffff, 0);
95 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
96 uint32_t *esp_ptr
, int dpl
)
98 X86CPU
*cpu
= x86_env_get_cpu(env
);
99 int type
, index
, shift
;
104 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
105 for (i
= 0; i
< env
->tr
.limit
; i
++) {
106 printf("%02x ", env
->tr
.base
[i
]);
115 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
116 cpu_abort(CPU(cpu
), "invalid tss");
118 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
119 if ((type
& 7) != 1) {
120 cpu_abort(CPU(cpu
), "invalid tss type");
123 index
= (dpl
* 4 + 2) << shift
;
124 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
125 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
128 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
129 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
131 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
132 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
136 /* XXX: merge with load_seg() */
137 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
142 if ((selector
& 0xfffc) != 0) {
143 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
144 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
146 if (!(e2
& DESC_S_MASK
)) {
147 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
150 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
151 cpl
= env
->hflags
& HF_CPL_MASK
;
152 if (seg_reg
== R_CS
) {
153 if (!(e2
& DESC_CS_MASK
)) {
154 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
156 /* XXX: is it correct? */
158 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
160 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
161 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
163 } else if (seg_reg
== R_SS
) {
164 /* SS must be writable data */
165 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
166 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
168 if (dpl
!= cpl
|| dpl
!= rpl
) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 /* not readable code */
173 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
174 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
176 /* if data or non conforming code, checks the rights */
177 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
178 if (dpl
< cpl
|| dpl
< rpl
) {
179 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 if (!(e2
& DESC_P_MASK
)) {
184 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
186 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
187 get_seg_base(e1
, e2
),
188 get_seg_limit(e1
, e2
),
191 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
192 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
197 #define SWITCH_TSS_JMP 0
198 #define SWITCH_TSS_IRET 1
199 #define SWITCH_TSS_CALL 2
201 /* XXX: restore CPU state in registers (PowerPC case) */
202 static void switch_tss(CPUX86State
*env
, int tss_selector
,
203 uint32_t e1
, uint32_t e2
, int source
,
206 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
207 target_ulong tss_base
;
208 uint32_t new_regs
[8], new_segs
[6];
209 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
210 uint32_t old_eflags
, eflags_mask
;
215 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
216 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
219 /* if task gate, we read the TSS segment and we load it */
221 if (!(e2
& DESC_P_MASK
)) {
222 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
224 tss_selector
= e1
>> 16;
225 if (tss_selector
& 4) {
226 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
228 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
229 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
231 if (e2
& DESC_S_MASK
) {
232 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
234 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
235 if ((type
& 7) != 1) {
236 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
240 if (!(e2
& DESC_P_MASK
)) {
241 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
249 tss_limit
= get_seg_limit(e1
, e2
);
250 tss_base
= get_seg_base(e1
, e2
);
251 if ((tss_selector
& 4) != 0 ||
252 tss_limit
< tss_limit_max
) {
253 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
255 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
257 old_tss_limit_max
= 103;
259 old_tss_limit_max
= 43;
262 /* read all the registers from the new TSS */
265 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
266 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
267 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
268 for (i
= 0; i
< 8; i
++) {
269 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
271 for (i
= 0; i
< 6; i
++) {
272 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
274 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
275 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
279 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
280 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
281 for (i
= 0; i
< 8; i
++) {
282 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
285 for (i
= 0; i
< 4; i
++) {
286 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
288 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
293 /* XXX: avoid a compiler warning, see
294 http://support.amd.com/us/Processor_TechDocs/24593.pdf
295 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
298 /* NOTE: we must avoid memory exceptions during the task switch,
299 so we make dummy accesses before */
300 /* XXX: it can still fail in some cases, so a bigger hack is
301 necessary to valid the TLB after having done the accesses */
303 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
304 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
305 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
306 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
308 /* clear busy bit (it is restartable) */
309 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
313 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
314 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
315 e2
&= ~DESC_TSS_BUSY_MASK
;
316 cpu_stl_kernel(env
, ptr
+ 4, e2
);
318 old_eflags
= cpu_compute_eflags(env
);
319 if (source
== SWITCH_TSS_IRET
) {
320 old_eflags
&= ~NT_MASK
;
323 /* save the current state in the old TSS */
326 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
327 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
328 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
329 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
330 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
331 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
332 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
333 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
334 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
335 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
336 for (i
= 0; i
< 6; i
++) {
337 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
338 env
->segs
[i
].selector
);
342 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
343 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
344 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
345 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
346 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
347 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
349 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
350 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
351 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
352 for (i
= 0; i
< 4; i
++) {
353 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
354 env
->segs
[i
].selector
);
358 /* now if an exception occurs, it will occurs in the next task
361 if (source
== SWITCH_TSS_CALL
) {
362 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
363 new_eflags
|= NT_MASK
;
367 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
371 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
372 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
373 e2
|= DESC_TSS_BUSY_MASK
;
374 cpu_stl_kernel(env
, ptr
+ 4, e2
);
377 /* set the new CPU state */
378 /* from this point, any exception which occurs can give problems */
379 env
->cr
[0] |= CR0_TS_MASK
;
380 env
->hflags
|= HF_TS_MASK
;
381 env
->tr
.selector
= tss_selector
;
382 env
->tr
.base
= tss_base
;
383 env
->tr
.limit
= tss_limit
;
384 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
386 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
387 cpu_x86_update_cr3(env
, new_cr3
);
390 /* load all registers without an exception, then reload them with
391 possible exception */
393 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
394 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
396 eflags_mask
&= 0xffff;
398 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
399 /* XXX: what to do in 16 bit case? */
400 env
->regs
[R_EAX
] = new_regs
[0];
401 env
->regs
[R_ECX
] = new_regs
[1];
402 env
->regs
[R_EDX
] = new_regs
[2];
403 env
->regs
[R_EBX
] = new_regs
[3];
404 env
->regs
[R_ESP
] = new_regs
[4];
405 env
->regs
[R_EBP
] = new_regs
[5];
406 env
->regs
[R_ESI
] = new_regs
[6];
407 env
->regs
[R_EDI
] = new_regs
[7];
408 if (new_eflags
& VM_MASK
) {
409 for (i
= 0; i
< 6; i
++) {
410 load_seg_vm(env
, i
, new_segs
[i
]);
413 /* first just selectors as the rest may trigger exceptions */
414 for (i
= 0; i
< 6; i
++) {
415 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
419 env
->ldt
.selector
= new_ldt
& ~4;
426 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
429 if ((new_ldt
& 0xfffc) != 0) {
431 index
= new_ldt
& ~7;
432 if ((index
+ 7) > dt
->limit
) {
433 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
435 ptr
= dt
->base
+ index
;
436 e1
= cpu_ldl_kernel(env
, ptr
);
437 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
438 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
439 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
441 if (!(e2
& DESC_P_MASK
)) {
442 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
444 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
447 /* load the segments */
448 if (!(new_eflags
& VM_MASK
)) {
449 tss_load_seg(env
, R_CS
, new_segs
[R_CS
]);
450 tss_load_seg(env
, R_SS
, new_segs
[R_SS
]);
451 tss_load_seg(env
, R_ES
, new_segs
[R_ES
]);
452 tss_load_seg(env
, R_DS
, new_segs
[R_DS
]);
453 tss_load_seg(env
, R_FS
, new_segs
[R_FS
]);
454 tss_load_seg(env
, R_GS
, new_segs
[R_GS
]);
457 /* check that env->eip is in the CS segment limits */
458 if (new_eip
> env
->segs
[R_CS
].limit
) {
459 /* XXX: different exception if CALL? */
460 raise_exception_err(env
, EXCP0D_GPF
, 0);
463 #ifndef CONFIG_USER_ONLY
464 /* reset local breakpoints */
465 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
466 for (i
= 0; i
< DR7_MAX_BP
; i
++) {
467 if (hw_local_breakpoint_enabled(env
->dr
[7], i
) &&
468 !hw_global_breakpoint_enabled(env
->dr
[7], i
)) {
469 hw_breakpoint_remove(env
, i
);
472 env
->dr
[7] &= ~DR7_LOCAL_BP_MASK
;
477 static inline unsigned int get_sp_mask(unsigned int e2
)
479 if (e2
& DESC_B_MASK
) {
486 static int exception_has_error_code(int intno
)
502 #define SET_ESP(val, sp_mask) \
504 if ((sp_mask) == 0xffff) { \
505 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
507 } else if ((sp_mask) == 0xffffffffLL) { \
508 env->regs[R_ESP] = (uint32_t)(val); \
510 env->regs[R_ESP] = (val); \
514 #define SET_ESP(val, sp_mask) \
516 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
517 ((val) & (sp_mask)); \
521 /* in 64-bit machines, this can overflow. So this segment addition macro
522 * can be used to trim the value to 32-bit whenever needed */
523 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
525 /* XXX: add a is_user flag to have proper security support */
526 #define PUSHW(ssp, sp, sp_mask, val) \
529 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
532 #define PUSHL(ssp, sp, sp_mask, val) \
535 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
538 #define POPW(ssp, sp, sp_mask, val) \
540 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
544 #define POPL(ssp, sp, sp_mask, val) \
546 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
550 /* protected mode interrupt */
551 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
552 int error_code
, unsigned int next_eip
,
556 target_ulong ptr
, ssp
;
557 int type
, dpl
, selector
, ss_dpl
, cpl
;
558 int has_error_code
, new_stack
, shift
;
559 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
560 uint32_t old_eip
, sp_mask
;
561 int vm86
= env
->eflags
& VM_MASK
;
564 if (!is_int
&& !is_hw
) {
565 has_error_code
= exception_has_error_code(intno
);
574 if (intno
* 8 + 7 > dt
->limit
) {
575 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
577 ptr
= dt
->base
+ intno
* 8;
578 e1
= cpu_ldl_kernel(env
, ptr
);
579 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
580 /* check gate type */
581 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
583 case 5: /* task gate */
584 /* must do that check here to return the correct error code */
585 if (!(e2
& DESC_P_MASK
)) {
586 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
588 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
589 if (has_error_code
) {
593 /* push the error code */
594 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
596 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
601 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
602 ssp
= env
->segs
[R_SS
].base
+ esp
;
604 cpu_stl_kernel(env
, ssp
, error_code
);
606 cpu_stw_kernel(env
, ssp
, error_code
);
611 case 6: /* 286 interrupt gate */
612 case 7: /* 286 trap gate */
613 case 14: /* 386 interrupt gate */
614 case 15: /* 386 trap gate */
617 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
620 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
621 cpl
= env
->hflags
& HF_CPL_MASK
;
622 /* check privilege if software int */
623 if (is_int
&& dpl
< cpl
) {
624 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
626 /* check valid bit */
627 if (!(e2
& DESC_P_MASK
)) {
628 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
631 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
632 if ((selector
& 0xfffc) == 0) {
633 raise_exception_err(env
, EXCP0D_GPF
, 0);
635 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
636 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
638 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
639 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
641 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
643 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
645 if (!(e2
& DESC_P_MASK
)) {
646 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
648 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
649 /* to inner privilege */
650 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
651 if ((ss
& 0xfffc) == 0) {
652 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
654 if ((ss
& 3) != dpl
) {
655 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
657 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
658 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
660 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
662 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
664 if (!(ss_e2
& DESC_S_MASK
) ||
665 (ss_e2
& DESC_CS_MASK
) ||
666 !(ss_e2
& DESC_W_MASK
)) {
667 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
669 if (!(ss_e2
& DESC_P_MASK
)) {
670 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
673 sp_mask
= get_sp_mask(ss_e2
);
674 ssp
= get_seg_base(ss_e1
, ss_e2
);
675 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
676 /* to same privilege */
678 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
681 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
682 ssp
= env
->segs
[R_SS
].base
;
683 esp
= env
->regs
[R_ESP
];
686 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
687 new_stack
= 0; /* avoid warning */
688 sp_mask
= 0; /* avoid warning */
689 ssp
= 0; /* avoid warning */
690 esp
= 0; /* avoid warning */
696 /* XXX: check that enough room is available */
697 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
706 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
707 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
708 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
709 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
711 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
712 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
714 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
715 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
716 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
717 if (has_error_code
) {
718 PUSHL(ssp
, esp
, sp_mask
, error_code
);
723 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
724 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
725 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
726 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
728 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
729 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
731 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
732 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
733 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
734 if (has_error_code
) {
735 PUSHW(ssp
, esp
, sp_mask
, error_code
);
739 /* interrupt gate clear IF mask */
740 if ((type
& 1) == 0) {
741 env
->eflags
&= ~IF_MASK
;
743 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
747 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
748 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
749 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
750 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
752 ss
= (ss
& ~3) | dpl
;
753 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
754 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
756 SET_ESP(esp
, sp_mask
);
758 selector
= (selector
& ~3) | dpl
;
759 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
760 get_seg_base(e1
, e2
),
761 get_seg_limit(e1
, e2
),
768 #define PUSHQ(sp, val) \
771 cpu_stq_kernel(env, sp, (val)); \
774 #define POPQ(sp, val) \
776 val = cpu_ldq_kernel(env, sp); \
780 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
782 X86CPU
*cpu
= x86_env_get_cpu(env
);
786 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
787 env
->tr
.base
, env
->tr
.limit
);
790 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
791 cpu_abort(CPU(cpu
), "invalid tss");
793 index
= 8 * level
+ 4;
794 if ((index
+ 7) > env
->tr
.limit
) {
795 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
797 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
800 /* 64 bit interrupt */
801 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
802 int error_code
, target_ulong next_eip
, int is_hw
)
806 int type
, dpl
, selector
, cpl
, ist
;
807 int has_error_code
, new_stack
;
808 uint32_t e1
, e2
, e3
, ss
;
809 target_ulong old_eip
, esp
, offset
;
812 if (!is_int
&& !is_hw
) {
813 has_error_code
= exception_has_error_code(intno
);
822 if (intno
* 16 + 15 > dt
->limit
) {
823 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
825 ptr
= dt
->base
+ intno
* 16;
826 e1
= cpu_ldl_kernel(env
, ptr
);
827 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
828 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
829 /* check gate type */
830 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
832 case 14: /* 386 interrupt gate */
833 case 15: /* 386 trap gate */
836 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
839 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
840 cpl
= env
->hflags
& HF_CPL_MASK
;
841 /* check privilege if software int */
842 if (is_int
&& dpl
< cpl
) {
843 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
845 /* check valid bit */
846 if (!(e2
& DESC_P_MASK
)) {
847 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
850 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
852 if ((selector
& 0xfffc) == 0) {
853 raise_exception_err(env
, EXCP0D_GPF
, 0);
856 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
857 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
859 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
860 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
862 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
864 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
866 if (!(e2
& DESC_P_MASK
)) {
867 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
869 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
870 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
872 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
873 /* to inner privilege */
875 esp
= get_rsp_from_tss(env
, ist
+ 3);
877 esp
= get_rsp_from_tss(env
, dpl
);
879 esp
&= ~0xfLL
; /* align stack */
882 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
883 /* to same privilege */
884 if (env
->eflags
& VM_MASK
) {
885 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
889 esp
= get_rsp_from_tss(env
, ist
+ 3);
891 esp
= env
->regs
[R_ESP
];
893 esp
&= ~0xfLL
; /* align stack */
896 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
897 new_stack
= 0; /* avoid warning */
898 esp
= 0; /* avoid warning */
901 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
902 PUSHQ(esp
, env
->regs
[R_ESP
]);
903 PUSHQ(esp
, cpu_compute_eflags(env
));
904 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
906 if (has_error_code
) {
907 PUSHQ(esp
, error_code
);
910 /* interrupt gate clear IF mask */
911 if ((type
& 1) == 0) {
912 env
->eflags
&= ~IF_MASK
;
914 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
918 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
920 env
->regs
[R_ESP
] = esp
;
922 selector
= (selector
& ~3) | dpl
;
923 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
924 get_seg_base(e1
, e2
),
925 get_seg_limit(e1
, e2
),
932 #if defined(CONFIG_USER_ONLY)
933 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
935 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
937 cs
->exception_index
= EXCP_SYSCALL
;
938 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
942 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
946 if (!(env
->efer
& MSR_EFER_SCE
)) {
947 raise_exception_err(env
, EXCP06_ILLOP
, 0);
949 selector
= (env
->star
>> 32) & 0xffff;
950 if (env
->hflags
& HF_LMA_MASK
) {
953 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
954 env
->regs
[11] = cpu_compute_eflags(env
);
956 code64
= env
->hflags
& HF_CS64_MASK
;
958 env
->eflags
&= ~env
->fmask
;
959 cpu_load_eflags(env
, env
->eflags
, 0);
960 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
962 DESC_G_MASK
| DESC_P_MASK
|
964 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
966 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
968 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
970 DESC_W_MASK
| DESC_A_MASK
);
972 env
->eip
= env
->lstar
;
974 env
->eip
= env
->cstar
;
977 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
979 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
980 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
982 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
984 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
985 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
987 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
989 DESC_W_MASK
| DESC_A_MASK
);
990 env
->eip
= (uint32_t)env
->star
;
997 void helper_sysret(CPUX86State
*env
, int dflag
)
1001 if (!(env
->efer
& MSR_EFER_SCE
)) {
1002 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1004 cpl
= env
->hflags
& HF_CPL_MASK
;
1005 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1006 raise_exception_err(env
, EXCP0D_GPF
, 0);
1008 selector
= (env
->star
>> 48) & 0xffff;
1009 if (env
->hflags
& HF_LMA_MASK
) {
1010 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1011 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1014 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1016 DESC_G_MASK
| DESC_P_MASK
|
1017 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1018 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1020 env
->eip
= env
->regs
[R_ECX
];
1022 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1024 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1025 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1026 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1027 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1029 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1031 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1032 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1033 DESC_W_MASK
| DESC_A_MASK
);
1035 env
->eflags
|= IF_MASK
;
1036 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1038 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1039 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1040 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1041 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1042 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1044 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1045 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1046 DESC_W_MASK
| DESC_A_MASK
);
1051 /* real mode interrupt */
1052 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1053 int error_code
, unsigned int next_eip
)
1056 target_ulong ptr
, ssp
;
1058 uint32_t offset
, esp
;
1059 uint32_t old_cs
, old_eip
;
1061 /* real mode (simpler!) */
1063 if (intno
* 4 + 3 > dt
->limit
) {
1064 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1066 ptr
= dt
->base
+ intno
* 4;
1067 offset
= cpu_lduw_kernel(env
, ptr
);
1068 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1069 esp
= env
->regs
[R_ESP
];
1070 ssp
= env
->segs
[R_SS
].base
;
1076 old_cs
= env
->segs
[R_CS
].selector
;
1077 /* XXX: use SS segment size? */
1078 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1079 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1080 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1082 /* update processor state */
1083 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1085 env
->segs
[R_CS
].selector
= selector
;
1086 env
->segs
[R_CS
].base
= (selector
<< 4);
1087 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1090 #if defined(CONFIG_USER_ONLY)
1091 /* fake user mode interrupt */
1092 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1093 int error_code
, target_ulong next_eip
)
1097 int dpl
, cpl
, shift
;
1101 if (env
->hflags
& HF_LMA_MASK
) {
1106 ptr
= dt
->base
+ (intno
<< shift
);
1107 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1109 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1110 cpl
= env
->hflags
& HF_CPL_MASK
;
1111 /* check privilege if software int */
1112 if (is_int
&& dpl
< cpl
) {
1113 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1116 /* Since we emulate only user space, we cannot do more than
1117 exiting the emulation with the suitable exception and error
1120 env
->eip
= next_eip
;
1126 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1127 int error_code
, int is_hw
, int rm
)
1129 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1130 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1131 control
.event_inj
));
1133 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1137 type
= SVM_EVTINJ_TYPE_SOFT
;
1139 type
= SVM_EVTINJ_TYPE_EXEPT
;
1141 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1142 if (!rm
&& exception_has_error_code(intno
)) {
1143 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1144 stl_phys(cs
->as
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1145 control
.event_inj_err
),
1149 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1156 * Begin execution of an interruption. is_int is TRUE if coming from
1157 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1158 * instruction. It is only relevant if is_int is TRUE.
1160 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1161 int error_code
, target_ulong next_eip
, int is_hw
)
1163 CPUX86State
*env
= &cpu
->env
;
1165 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1166 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1169 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1170 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1171 count
, intno
, error_code
, is_int
,
1172 env
->hflags
& HF_CPL_MASK
,
1173 env
->segs
[R_CS
].selector
, env
->eip
,
1174 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1175 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1176 if (intno
== 0x0e) {
1177 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1179 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1182 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1189 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1190 for (i
= 0; i
< 16; i
++) {
1191 qemu_log(" %02x", ldub(ptr
+ i
));
1199 if (env
->cr
[0] & CR0_PE_MASK
) {
1200 #if !defined(CONFIG_USER_ONLY)
1201 if (env
->hflags
& HF_SVMI_MASK
) {
1202 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1205 #ifdef TARGET_X86_64
1206 if (env
->hflags
& HF_LMA_MASK
) {
1207 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1211 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1215 #if !defined(CONFIG_USER_ONLY)
1216 if (env
->hflags
& HF_SVMI_MASK
) {
1217 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1220 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1223 #if !defined(CONFIG_USER_ONLY)
1224 if (env
->hflags
& HF_SVMI_MASK
) {
1225 CPUState
*cs
= CPU(cpu
);
1226 uint32_t event_inj
= ldl_phys(cs
->as
, env
->vm_vmcb
+
1227 offsetof(struct vmcb
,
1228 control
.event_inj
));
1231 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1232 event_inj
& ~SVM_EVTINJ_VALID
);
1237 void x86_cpu_do_interrupt(CPUState
*cs
)
1239 X86CPU
*cpu
= X86_CPU(cs
);
1240 CPUX86State
*env
= &cpu
->env
;
1242 #if defined(CONFIG_USER_ONLY)
1243 /* if user mode only, we simulate a fake exception
1244 which will be handled outside the cpu execution
1246 do_interrupt_user(env
, cs
->exception_index
,
1247 env
->exception_is_int
,
1249 env
->exception_next_eip
);
1250 /* successfully delivered */
1251 env
->old_exception
= -1;
1253 /* simulate a real cpu exception. On i386, it can
1254 trigger new exceptions, but we do not handle
1255 double or triple faults yet. */
1256 do_interrupt_all(cpu
, cs
->exception_index
,
1257 env
->exception_is_int
,
1259 env
->exception_next_eip
, 0);
1260 /* successfully delivered */
1261 env
->old_exception
= -1;
1265 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1267 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1270 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1274 uint32_t esp_mask
, esp
, ebp
;
1276 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1277 ssp
= env
->segs
[R_SS
].base
;
1278 ebp
= env
->regs
[R_EBP
];
1279 esp
= env
->regs
[R_ESP
];
1286 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1287 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1290 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1297 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1298 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1301 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1305 #ifdef TARGET_X86_64
1306 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1309 target_ulong esp
, ebp
;
1311 ebp
= env
->regs
[R_EBP
];
1312 esp
= env
->regs
[R_ESP
];
1320 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1323 cpu_stq_data(env
, esp
, t1
);
1330 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1333 cpu_stw_data(env
, esp
, t1
);
1338 void helper_lldt(CPUX86State
*env
, int selector
)
1342 int index
, entry_limit
;
1346 if ((selector
& 0xfffc) == 0) {
1347 /* XXX: NULL selector case: invalid LDT */
1351 if (selector
& 0x4) {
1352 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1355 index
= selector
& ~7;
1356 #ifdef TARGET_X86_64
1357 if (env
->hflags
& HF_LMA_MASK
) {
1364 if ((index
+ entry_limit
) > dt
->limit
) {
1365 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1367 ptr
= dt
->base
+ index
;
1368 e1
= cpu_ldl_kernel(env
, ptr
);
1369 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1370 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1371 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1373 if (!(e2
& DESC_P_MASK
)) {
1374 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1376 #ifdef TARGET_X86_64
1377 if (env
->hflags
& HF_LMA_MASK
) {
1380 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1381 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1382 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1386 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1389 env
->ldt
.selector
= selector
;
1392 void helper_ltr(CPUX86State
*env
, int selector
)
1396 int index
, type
, entry_limit
;
1400 if ((selector
& 0xfffc) == 0) {
1401 /* NULL selector case: invalid TR */
1406 if (selector
& 0x4) {
1407 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1410 index
= selector
& ~7;
1411 #ifdef TARGET_X86_64
1412 if (env
->hflags
& HF_LMA_MASK
) {
1419 if ((index
+ entry_limit
) > dt
->limit
) {
1420 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1422 ptr
= dt
->base
+ index
;
1423 e1
= cpu_ldl_kernel(env
, ptr
);
1424 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1425 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1426 if ((e2
& DESC_S_MASK
) ||
1427 (type
!= 1 && type
!= 9)) {
1428 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1430 if (!(e2
& DESC_P_MASK
)) {
1431 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1433 #ifdef TARGET_X86_64
1434 if (env
->hflags
& HF_LMA_MASK
) {
1437 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1438 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1439 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1440 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1442 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1443 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1447 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1449 e2
|= DESC_TSS_BUSY_MASK
;
1450 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1452 env
->tr
.selector
= selector
;
1455 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1456 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1465 cpl
= env
->hflags
& HF_CPL_MASK
;
1466 if ((selector
& 0xfffc) == 0) {
1467 /* null selector case */
1469 #ifdef TARGET_X86_64
1470 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1473 raise_exception_err(env
, EXCP0D_GPF
, 0);
1475 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1478 if (selector
& 0x4) {
1483 index
= selector
& ~7;
1484 if ((index
+ 7) > dt
->limit
) {
1485 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1487 ptr
= dt
->base
+ index
;
1488 e1
= cpu_ldl_kernel(env
, ptr
);
1489 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1491 if (!(e2
& DESC_S_MASK
)) {
1492 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1495 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1496 if (seg_reg
== R_SS
) {
1497 /* must be writable segment */
1498 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1499 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1501 if (rpl
!= cpl
|| dpl
!= cpl
) {
1502 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1505 /* must be readable segment */
1506 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1507 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1510 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1511 /* if not conforming code, test rights */
1512 if (dpl
< cpl
|| dpl
< rpl
) {
1513 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1518 if (!(e2
& DESC_P_MASK
)) {
1519 if (seg_reg
== R_SS
) {
1520 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1522 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1526 /* set the access bit if not already set */
1527 if (!(e2
& DESC_A_MASK
)) {
1529 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1532 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1533 get_seg_base(e1
, e2
),
1534 get_seg_limit(e1
, e2
),
1537 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1538 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1543 /* protected mode jump */
1544 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1545 int next_eip_addend
)
1548 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1549 target_ulong next_eip
;
1551 if ((new_cs
& 0xfffc) == 0) {
1552 raise_exception_err(env
, EXCP0D_GPF
, 0);
1554 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1555 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1557 cpl
= env
->hflags
& HF_CPL_MASK
;
1558 if (e2
& DESC_S_MASK
) {
1559 if (!(e2
& DESC_CS_MASK
)) {
1560 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1562 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1563 if (e2
& DESC_C_MASK
) {
1564 /* conforming code segment */
1566 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1569 /* non conforming code segment */
1572 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1575 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1578 if (!(e2
& DESC_P_MASK
)) {
1579 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1581 limit
= get_seg_limit(e1
, e2
);
1582 if (new_eip
> limit
&&
1583 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1584 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1586 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1587 get_seg_base(e1
, e2
), limit
, e2
);
1590 /* jump to call or task gate */
1591 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1593 cpl
= env
->hflags
& HF_CPL_MASK
;
1594 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1596 case 1: /* 286 TSS */
1597 case 9: /* 386 TSS */
1598 case 5: /* task gate */
1599 if (dpl
< cpl
|| dpl
< rpl
) {
1600 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1602 next_eip
= env
->eip
+ next_eip_addend
;
1603 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1604 CC_OP
= CC_OP_EFLAGS
;
1606 case 4: /* 286 call gate */
1607 case 12: /* 386 call gate */
1608 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1609 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1611 if (!(e2
& DESC_P_MASK
)) {
1612 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1615 new_eip
= (e1
& 0xffff);
1617 new_eip
|= (e2
& 0xffff0000);
1619 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1620 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1622 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1623 /* must be code segment */
1624 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1625 (DESC_S_MASK
| DESC_CS_MASK
))) {
1626 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1628 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1629 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1630 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1632 if (!(e2
& DESC_P_MASK
)) {
1633 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1635 limit
= get_seg_limit(e1
, e2
);
1636 if (new_eip
> limit
) {
1637 raise_exception_err(env
, EXCP0D_GPF
, 0);
1639 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1640 get_seg_base(e1
, e2
), limit
, e2
);
1644 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1650 /* real mode call */
1651 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1652 int shift
, int next_eip
)
1655 uint32_t esp
, esp_mask
;
1659 esp
= env
->regs
[R_ESP
];
1660 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1661 ssp
= env
->segs
[R_SS
].base
;
1663 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1664 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1666 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1667 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1670 SET_ESP(esp
, esp_mask
);
1672 env
->segs
[R_CS
].selector
= new_cs
;
1673 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1676 /* protected mode call */
1677 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1678 int shift
, int next_eip_addend
)
1681 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1682 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1683 uint32_t val
, limit
, old_sp_mask
;
1684 target_ulong ssp
, old_ssp
, next_eip
;
1686 next_eip
= env
->eip
+ next_eip_addend
;
1687 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1688 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1689 if ((new_cs
& 0xfffc) == 0) {
1690 raise_exception_err(env
, EXCP0D_GPF
, 0);
1692 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1693 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1695 cpl
= env
->hflags
& HF_CPL_MASK
;
1696 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1697 if (e2
& DESC_S_MASK
) {
1698 if (!(e2
& DESC_CS_MASK
)) {
1699 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1701 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1702 if (e2
& DESC_C_MASK
) {
1703 /* conforming code segment */
1705 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1708 /* non conforming code segment */
1711 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1714 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1717 if (!(e2
& DESC_P_MASK
)) {
1718 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1721 #ifdef TARGET_X86_64
1722 /* XXX: check 16/32 bit cases in long mode */
1727 rsp
= env
->regs
[R_ESP
];
1728 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1729 PUSHQ(rsp
, next_eip
);
1730 /* from this point, not restartable */
1731 env
->regs
[R_ESP
] = rsp
;
1732 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1733 get_seg_base(e1
, e2
),
1734 get_seg_limit(e1
, e2
), e2
);
1739 sp
= env
->regs
[R_ESP
];
1740 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1741 ssp
= env
->segs
[R_SS
].base
;
1743 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1744 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1746 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1747 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1750 limit
= get_seg_limit(e1
, e2
);
1751 if (new_eip
> limit
) {
1752 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1754 /* from this point, not restartable */
1755 SET_ESP(sp
, sp_mask
);
1756 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1757 get_seg_base(e1
, e2
), limit
, e2
);
1761 /* check gate type */
1762 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1763 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1766 case 1: /* available 286 TSS */
1767 case 9: /* available 386 TSS */
1768 case 5: /* task gate */
1769 if (dpl
< cpl
|| dpl
< rpl
) {
1770 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1772 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1773 CC_OP
= CC_OP_EFLAGS
;
1775 case 4: /* 286 call gate */
1776 case 12: /* 386 call gate */
1779 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1784 if (dpl
< cpl
|| dpl
< rpl
) {
1785 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1787 /* check valid bit */
1788 if (!(e2
& DESC_P_MASK
)) {
1789 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1791 selector
= e1
>> 16;
1792 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1793 param_count
= e2
& 0x1f;
1794 if ((selector
& 0xfffc) == 0) {
1795 raise_exception_err(env
, EXCP0D_GPF
, 0);
1798 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1799 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1801 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1802 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1804 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1806 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1808 if (!(e2
& DESC_P_MASK
)) {
1809 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1812 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1813 /* to inner privilege */
1814 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1815 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1816 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1818 if ((ss
& 0xfffc) == 0) {
1819 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1821 if ((ss
& 3) != dpl
) {
1822 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1824 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1825 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1827 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1828 if (ss_dpl
!= dpl
) {
1829 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1831 if (!(ss_e2
& DESC_S_MASK
) ||
1832 (ss_e2
& DESC_CS_MASK
) ||
1833 !(ss_e2
& DESC_W_MASK
)) {
1834 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1836 if (!(ss_e2
& DESC_P_MASK
)) {
1837 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1840 /* push_size = ((param_count * 2) + 8) << shift; */
1842 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1843 old_ssp
= env
->segs
[R_SS
].base
;
1845 sp_mask
= get_sp_mask(ss_e2
);
1846 ssp
= get_seg_base(ss_e1
, ss_e2
);
1848 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1849 PUSHL(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1850 for (i
= param_count
- 1; i
>= 0; i
--) {
1851 val
= cpu_ldl_kernel(env
, old_ssp
+
1852 ((env
->regs
[R_ESP
] + i
* 4) &
1854 PUSHL(ssp
, sp
, sp_mask
, val
);
1857 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1858 PUSHW(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
]);
1859 for (i
= param_count
- 1; i
>= 0; i
--) {
1860 val
= cpu_lduw_kernel(env
, old_ssp
+
1861 ((env
->regs
[R_ESP
] + i
* 2) &
1863 PUSHW(ssp
, sp
, sp_mask
, val
);
1868 /* to same privilege */
1869 sp
= env
->regs
[R_ESP
];
1870 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1871 ssp
= env
->segs
[R_SS
].base
;
1872 /* push_size = (4 << shift); */
1877 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1878 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1880 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1881 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1884 /* from this point, not restartable */
1887 ss
= (ss
& ~3) | dpl
;
1888 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1890 get_seg_limit(ss_e1
, ss_e2
),
1894 selector
= (selector
& ~3) | dpl
;
1895 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1896 get_seg_base(e1
, e2
),
1897 get_seg_limit(e1
, e2
),
1899 SET_ESP(sp
, sp_mask
);
1904 /* real and vm86 mode iret */
1905 void helper_iret_real(CPUX86State
*env
, int shift
)
1907 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1911 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1912 sp
= env
->regs
[R_ESP
];
1913 ssp
= env
->segs
[R_SS
].base
;
1916 POPL(ssp
, sp
, sp_mask
, new_eip
);
1917 POPL(ssp
, sp
, sp_mask
, new_cs
);
1919 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1922 POPW(ssp
, sp
, sp_mask
, new_eip
);
1923 POPW(ssp
, sp
, sp_mask
, new_cs
);
1924 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1926 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1927 env
->segs
[R_CS
].selector
= new_cs
;
1928 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1930 if (env
->eflags
& VM_MASK
) {
1931 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1934 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1938 eflags_mask
&= 0xffff;
1940 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1941 env
->hflags2
&= ~HF2_NMI_MASK
;
1944 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1949 /* XXX: on x86_64, we do not want to nullify FS and GS because
1950 they may still contain a valid base. I would be interested to
1951 know how a real x86_64 CPU behaves */
1952 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1953 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1957 e2
= env
->segs
[seg_reg
].flags
;
1958 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1959 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1960 /* data or non conforming code segment */
1962 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1967 /* protected mode iret */
1968 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1969 int is_iret
, int addend
)
1971 uint32_t new_cs
, new_eflags
, new_ss
;
1972 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1973 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1974 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1975 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1977 #ifdef TARGET_X86_64
1983 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1985 sp
= env
->regs
[R_ESP
];
1986 ssp
= env
->segs
[R_SS
].base
;
1987 new_eflags
= 0; /* avoid warning */
1988 #ifdef TARGET_X86_64
1994 POPQ(sp
, new_eflags
);
2001 POPL(ssp
, sp
, sp_mask
, new_eip
);
2002 POPL(ssp
, sp
, sp_mask
, new_cs
);
2005 POPL(ssp
, sp
, sp_mask
, new_eflags
);
2006 if (new_eflags
& VM_MASK
) {
2007 goto return_to_vm86
;
2012 POPW(ssp
, sp
, sp_mask
, new_eip
);
2013 POPW(ssp
, sp
, sp_mask
, new_cs
);
2015 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2019 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2020 new_cs
, new_eip
, shift
, addend
);
2021 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2022 if ((new_cs
& 0xfffc) == 0) {
2023 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2025 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2026 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2028 if (!(e2
& DESC_S_MASK
) ||
2029 !(e2
& DESC_CS_MASK
)) {
2030 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2032 cpl
= env
->hflags
& HF_CPL_MASK
;
2035 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2037 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2038 if (e2
& DESC_C_MASK
) {
2040 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2044 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2047 if (!(e2
& DESC_P_MASK
)) {
2048 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2052 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2053 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2054 /* return to same privilege level */
2055 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2056 get_seg_base(e1
, e2
),
2057 get_seg_limit(e1
, e2
),
2060 /* return to different privilege level */
2061 #ifdef TARGET_X86_64
2071 POPL(ssp
, sp
, sp_mask
, new_esp
);
2072 POPL(ssp
, sp
, sp_mask
, new_ss
);
2076 POPW(ssp
, sp
, sp_mask
, new_esp
);
2077 POPW(ssp
, sp
, sp_mask
, new_ss
);
2080 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2082 if ((new_ss
& 0xfffc) == 0) {
2083 #ifdef TARGET_X86_64
2084 /* NULL ss is allowed in long mode if cpl != 3 */
2085 /* XXX: test CS64? */
2086 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2087 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2089 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2090 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2091 DESC_W_MASK
| DESC_A_MASK
);
2092 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2096 raise_exception_err(env
, EXCP0D_GPF
, 0);
2099 if ((new_ss
& 3) != rpl
) {
2100 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2102 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2103 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2105 if (!(ss_e2
& DESC_S_MASK
) ||
2106 (ss_e2
& DESC_CS_MASK
) ||
2107 !(ss_e2
& DESC_W_MASK
)) {
2108 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2110 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2112 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2114 if (!(ss_e2
& DESC_P_MASK
)) {
2115 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2117 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2118 get_seg_base(ss_e1
, ss_e2
),
2119 get_seg_limit(ss_e1
, ss_e2
),
2123 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2124 get_seg_base(e1
, e2
),
2125 get_seg_limit(e1
, e2
),
2128 #ifdef TARGET_X86_64
2129 if (env
->hflags
& HF_CS64_MASK
) {
2134 sp_mask
= get_sp_mask(ss_e2
);
2137 /* validate data segments */
2138 validate_seg(env
, R_ES
, rpl
);
2139 validate_seg(env
, R_DS
, rpl
);
2140 validate_seg(env
, R_FS
, rpl
);
2141 validate_seg(env
, R_GS
, rpl
);
2145 SET_ESP(sp
, sp_mask
);
2148 /* NOTE: 'cpl' is the _old_ CPL */
2149 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2151 eflags_mask
|= IOPL_MASK
;
2153 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2155 eflags_mask
|= IF_MASK
;
2158 eflags_mask
&= 0xffff;
2160 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2165 POPL(ssp
, sp
, sp_mask
, new_esp
);
2166 POPL(ssp
, sp
, sp_mask
, new_ss
);
2167 POPL(ssp
, sp
, sp_mask
, new_es
);
2168 POPL(ssp
, sp
, sp_mask
, new_ds
);
2169 POPL(ssp
, sp
, sp_mask
, new_fs
);
2170 POPL(ssp
, sp
, sp_mask
, new_gs
);
2172 /* modify processor state */
2173 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2174 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2176 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2177 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2178 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2179 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2180 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2181 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2183 env
->eip
= new_eip
& 0xffff;
2184 env
->regs
[R_ESP
] = new_esp
;
2187 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2189 int tss_selector
, type
;
2192 /* specific case for TSS */
2193 if (env
->eflags
& NT_MASK
) {
2194 #ifdef TARGET_X86_64
2195 if (env
->hflags
& HF_LMA_MASK
) {
2196 raise_exception_err(env
, EXCP0D_GPF
, 0);
2199 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2200 if (tss_selector
& 4) {
2201 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2203 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2204 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2206 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2207 /* NOTE: we check both segment and busy TSS */
2209 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2211 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2213 helper_ret_protected(env
, shift
, 1, 0);
2215 env
->hflags2
&= ~HF2_NMI_MASK
;
2218 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2220 helper_ret_protected(env
, shift
, 0, addend
);
2223 void helper_sysenter(CPUX86State
*env
)
2225 if (env
->sysenter_cs
== 0) {
2226 raise_exception_err(env
, EXCP0D_GPF
, 0);
2228 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2230 #ifdef TARGET_X86_64
2231 if (env
->hflags
& HF_LMA_MASK
) {
2232 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2234 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2236 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2241 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2243 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2245 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2247 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2249 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2251 DESC_W_MASK
| DESC_A_MASK
);
2252 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2253 env
->eip
= env
->sysenter_eip
;
2256 void helper_sysexit(CPUX86State
*env
, int dflag
)
2260 cpl
= env
->hflags
& HF_CPL_MASK
;
2261 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2262 raise_exception_err(env
, EXCP0D_GPF
, 0);
2264 #ifdef TARGET_X86_64
2266 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2268 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2269 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2270 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2272 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2274 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2275 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2276 DESC_W_MASK
| DESC_A_MASK
);
2280 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2282 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2283 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2284 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2285 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2287 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2288 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2289 DESC_W_MASK
| DESC_A_MASK
);
2291 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2292 env
->eip
= env
->regs
[R_EDX
];
2295 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2298 uint32_t e1
, e2
, eflags
, selector
;
2299 int rpl
, dpl
, cpl
, type
;
2301 selector
= selector1
& 0xffff;
2302 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2303 if ((selector
& 0xfffc) == 0) {
2306 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2310 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2311 cpl
= env
->hflags
& HF_CPL_MASK
;
2312 if (e2
& DESC_S_MASK
) {
2313 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2316 if (dpl
< cpl
|| dpl
< rpl
) {
2321 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2332 if (dpl
< cpl
|| dpl
< rpl
) {
2334 CC_SRC
= eflags
& ~CC_Z
;
2338 limit
= get_seg_limit(e1
, e2
);
2339 CC_SRC
= eflags
| CC_Z
;
2343 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2345 uint32_t e1
, e2
, eflags
, selector
;
2346 int rpl
, dpl
, cpl
, type
;
2348 selector
= selector1
& 0xffff;
2349 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2350 if ((selector
& 0xfffc) == 0) {
2353 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2357 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2358 cpl
= env
->hflags
& HF_CPL_MASK
;
2359 if (e2
& DESC_S_MASK
) {
2360 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2363 if (dpl
< cpl
|| dpl
< rpl
) {
2368 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2382 if (dpl
< cpl
|| dpl
< rpl
) {
2384 CC_SRC
= eflags
& ~CC_Z
;
2388 CC_SRC
= eflags
| CC_Z
;
2389 return e2
& 0x00f0ff00;
2392 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2394 uint32_t e1
, e2
, eflags
, selector
;
2397 selector
= selector1
& 0xffff;
2398 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2399 if ((selector
& 0xfffc) == 0) {
2402 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2405 if (!(e2
& DESC_S_MASK
)) {
2409 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2410 cpl
= env
->hflags
& HF_CPL_MASK
;
2411 if (e2
& DESC_CS_MASK
) {
2412 if (!(e2
& DESC_R_MASK
)) {
2415 if (!(e2
& DESC_C_MASK
)) {
2416 if (dpl
< cpl
|| dpl
< rpl
) {
2421 if (dpl
< cpl
|| dpl
< rpl
) {
2423 CC_SRC
= eflags
& ~CC_Z
;
2427 CC_SRC
= eflags
| CC_Z
;
2430 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2432 uint32_t e1
, e2
, eflags
, selector
;
2435 selector
= selector1
& 0xffff;
2436 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2437 if ((selector
& 0xfffc) == 0) {
2440 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2443 if (!(e2
& DESC_S_MASK
)) {
2447 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2448 cpl
= env
->hflags
& HF_CPL_MASK
;
2449 if (e2
& DESC_CS_MASK
) {
2452 if (dpl
< cpl
|| dpl
< rpl
) {
2455 if (!(e2
& DESC_W_MASK
)) {
2457 CC_SRC
= eflags
& ~CC_Z
;
2461 CC_SRC
= eflags
| CC_Z
;
2464 #if defined(CONFIG_USER_ONLY)
2465 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2467 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2469 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2470 (selector
<< 4), 0xffff, 0);
2472 helper_load_seg(env
, seg_reg
, selector
);