]>
git.proxmox.com Git - qemu.git/blob - target-i386/seg_helper.c
f136128ed2b0a3c0f671ab32bea054c0beb58bbf
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
28 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
29 # define LOG_PCALL_STATE(env) \
30 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
32 # define LOG_PCALL(...) do { } while (0)
33 # define LOG_PCALL_STATE(env) do { } while (0)
36 /* return non zero if error */
37 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
38 uint32_t *e2_ptr
, int selector
)
49 index
= selector
& ~7;
50 if ((index
+ 7) > dt
->limit
) {
53 ptr
= dt
->base
+ index
;
54 *e1_ptr
= cpu_ldl_kernel(env
, ptr
);
55 *e2_ptr
= cpu_ldl_kernel(env
, ptr
+ 4);
59 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
63 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
64 if (e2
& DESC_G_MASK
) {
65 limit
= (limit
<< 12) | 0xfff;
70 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
72 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
75 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
78 sc
->base
= get_seg_base(e1
, e2
);
79 sc
->limit
= get_seg_limit(e1
, e2
);
83 /* init the segment cache in vm86 mode. */
84 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
87 cpu_x86_load_seg_cache(env
, seg
, selector
,
88 (selector
<< 4), 0xffff, 0);
91 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
92 uint32_t *esp_ptr
, int dpl
)
94 int type
, index
, shift
;
99 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
100 for (i
= 0; i
< env
->tr
.limit
; i
++) {
101 printf("%02x ", env
->tr
.base
[i
]);
110 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
111 cpu_abort(env
, "invalid tss");
113 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
114 if ((type
& 7) != 1) {
115 cpu_abort(env
, "invalid tss type");
118 index
= (dpl
* 4 + 2) << shift
;
119 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
120 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
123 *esp_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
);
124 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 2);
126 *esp_ptr
= cpu_ldl_kernel(env
, env
->tr
.base
+ index
);
127 *ss_ptr
= cpu_lduw_kernel(env
, env
->tr
.base
+ index
+ 4);
131 /* XXX: merge with load_seg() */
132 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
137 if ((selector
& 0xfffc) != 0) {
138 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
139 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
141 if (!(e2
& DESC_S_MASK
)) {
142 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
145 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
146 cpl
= env
->hflags
& HF_CPL_MASK
;
147 if (seg_reg
== R_CS
) {
148 if (!(e2
& DESC_CS_MASK
)) {
149 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
151 /* XXX: is it correct? */
153 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
155 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
156 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
158 } else if (seg_reg
== R_SS
) {
159 /* SS must be writable data */
160 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
161 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
163 if (dpl
!= cpl
|| dpl
!= rpl
) {
164 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
167 /* not readable code */
168 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
171 /* if data or non conforming code, checks the rights */
172 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
173 if (dpl
< cpl
|| dpl
< rpl
) {
174 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
178 if (!(e2
& DESC_P_MASK
)) {
179 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
181 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
182 get_seg_base(e1
, e2
),
183 get_seg_limit(e1
, e2
),
186 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
187 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
192 #define SWITCH_TSS_JMP 0
193 #define SWITCH_TSS_IRET 1
194 #define SWITCH_TSS_CALL 2
196 /* XXX: restore CPU state in registers (PowerPC case) */
197 static void switch_tss(CPUX86State
*env
, int tss_selector
,
198 uint32_t e1
, uint32_t e2
, int source
,
201 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
202 target_ulong tss_base
;
203 uint32_t new_regs
[8], new_segs
[6];
204 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
205 uint32_t old_eflags
, eflags_mask
;
210 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
211 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
214 /* if task gate, we read the TSS segment and we load it */
216 if (!(e2
& DESC_P_MASK
)) {
217 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
219 tss_selector
= e1
>> 16;
220 if (tss_selector
& 4) {
221 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
223 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
224 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
226 if (e2
& DESC_S_MASK
) {
227 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
229 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
230 if ((type
& 7) != 1) {
231 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
235 if (!(e2
& DESC_P_MASK
)) {
236 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
244 tss_limit
= get_seg_limit(e1
, e2
);
245 tss_base
= get_seg_base(e1
, e2
);
246 if ((tss_selector
& 4) != 0 ||
247 tss_limit
< tss_limit_max
) {
248 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
250 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
252 old_tss_limit_max
= 103;
254 old_tss_limit_max
= 43;
257 /* read all the registers from the new TSS */
260 new_cr3
= cpu_ldl_kernel(env
, tss_base
+ 0x1c);
261 new_eip
= cpu_ldl_kernel(env
, tss_base
+ 0x20);
262 new_eflags
= cpu_ldl_kernel(env
, tss_base
+ 0x24);
263 for (i
= 0; i
< 8; i
++) {
264 new_regs
[i
] = cpu_ldl_kernel(env
, tss_base
+ (0x28 + i
* 4));
266 for (i
= 0; i
< 6; i
++) {
267 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x48 + i
* 4));
269 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x60);
270 new_trap
= cpu_ldl_kernel(env
, tss_base
+ 0x64);
274 new_eip
= cpu_lduw_kernel(env
, tss_base
+ 0x0e);
275 new_eflags
= cpu_lduw_kernel(env
, tss_base
+ 0x10);
276 for (i
= 0; i
< 8; i
++) {
277 new_regs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x12 + i
* 2)) |
280 for (i
= 0; i
< 4; i
++) {
281 new_segs
[i
] = cpu_lduw_kernel(env
, tss_base
+ (0x22 + i
* 4));
283 new_ldt
= cpu_lduw_kernel(env
, tss_base
+ 0x2a);
288 /* XXX: avoid a compiler warning, see
289 http://support.amd.com/us/Processor_TechDocs/24593.pdf
290 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
293 /* NOTE: we must avoid memory exceptions during the task switch,
294 so we make dummy accesses before */
295 /* XXX: it can still fail in some cases, so a bigger hack is
296 necessary to valid the TLB after having done the accesses */
298 v1
= cpu_ldub_kernel(env
, env
->tr
.base
);
299 v2
= cpu_ldub_kernel(env
, env
->tr
.base
+ old_tss_limit_max
);
300 cpu_stb_kernel(env
, env
->tr
.base
, v1
);
301 cpu_stb_kernel(env
, env
->tr
.base
+ old_tss_limit_max
, v2
);
303 /* clear busy bit (it is restartable) */
304 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
308 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
309 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
310 e2
&= ~DESC_TSS_BUSY_MASK
;
311 cpu_stl_kernel(env
, ptr
+ 4, e2
);
313 old_eflags
= cpu_compute_eflags(env
);
314 if (source
== SWITCH_TSS_IRET
) {
315 old_eflags
&= ~NT_MASK
;
318 /* save the current state in the old TSS */
321 cpu_stl_kernel(env
, env
->tr
.base
+ 0x20, next_eip
);
322 cpu_stl_kernel(env
, env
->tr
.base
+ 0x24, old_eflags
);
323 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
324 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
325 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
326 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
327 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
328 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
329 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
330 cpu_stl_kernel(env
, env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
331 for (i
= 0; i
< 6; i
++) {
332 cpu_stw_kernel(env
, env
->tr
.base
+ (0x48 + i
* 4),
333 env
->segs
[i
].selector
);
337 cpu_stw_kernel(env
, env
->tr
.base
+ 0x0e, next_eip
);
338 cpu_stw_kernel(env
, env
->tr
.base
+ 0x10, old_eflags
);
339 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
340 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
341 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
342 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
343 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
344 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
345 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
346 cpu_stw_kernel(env
, env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
347 for (i
= 0; i
< 4; i
++) {
348 cpu_stw_kernel(env
, env
->tr
.base
+ (0x22 + i
* 4),
349 env
->segs
[i
].selector
);
353 /* now if an exception occurs, it will occurs in the next task
356 if (source
== SWITCH_TSS_CALL
) {
357 cpu_stw_kernel(env
, tss_base
, env
->tr
.selector
);
358 new_eflags
|= NT_MASK
;
362 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
366 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
367 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
368 e2
|= DESC_TSS_BUSY_MASK
;
369 cpu_stl_kernel(env
, ptr
+ 4, e2
);
372 /* set the new CPU state */
373 /* from this point, any exception which occurs can give problems */
374 env
->cr
[0] |= CR0_TS_MASK
;
375 env
->hflags
|= HF_TS_MASK
;
376 env
->tr
.selector
= tss_selector
;
377 env
->tr
.base
= tss_base
;
378 env
->tr
.limit
= tss_limit
;
379 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
381 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
382 cpu_x86_update_cr3(env
, new_cr3
);
385 /* load all registers without an exception, then reload them with
386 possible exception */
388 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
389 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
391 eflags_mask
&= 0xffff;
393 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
394 /* XXX: what to do in 16 bit case? */
403 if (new_eflags
& VM_MASK
) {
404 for (i
= 0; i
< 6; i
++) {
405 load_seg_vm(env
, i
, new_segs
[i
]);
407 /* in vm86, CPL is always 3 */
408 cpu_x86_set_cpl(env
, 3);
410 /* CPL is set the RPL of CS */
411 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
412 /* first just selectors as the rest may trigger exceptions */
413 for (i
= 0; i
< 6; i
++) {
414 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
418 env
->ldt
.selector
= new_ldt
& ~4;
425 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
428 if ((new_ldt
& 0xfffc) != 0) {
430 index
= new_ldt
& ~7;
431 if ((index
+ 7) > dt
->limit
) {
432 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
434 ptr
= dt
->base
+ index
;
435 e1
= cpu_ldl_kernel(env
, ptr
);
436 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
437 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
438 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
440 if (!(e2
& DESC_P_MASK
)) {
441 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
443 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
446 /* load the segments */
447 if (!(new_eflags
& VM_MASK
)) {
448 tss_load_seg(env
, R_CS
, new_segs
[R_CS
]);
449 tss_load_seg(env
, R_SS
, new_segs
[R_SS
]);
450 tss_load_seg(env
, R_ES
, new_segs
[R_ES
]);
451 tss_load_seg(env
, R_DS
, new_segs
[R_DS
]);
452 tss_load_seg(env
, R_FS
, new_segs
[R_FS
]);
453 tss_load_seg(env
, R_GS
, new_segs
[R_GS
]);
456 /* check that EIP is in the CS segment limits */
457 if (new_eip
> env
->segs
[R_CS
].limit
) {
458 /* XXX: different exception if CALL? */
459 raise_exception_err(env
, EXCP0D_GPF
, 0);
462 #ifndef CONFIG_USER_ONLY
463 /* reset local breakpoints */
464 if (env
->dr
[7] & 0x55) {
465 for (i
= 0; i
< 4; i
++) {
466 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1) {
467 hw_breakpoint_remove(env
, i
);
475 static inline unsigned int get_sp_mask(unsigned int e2
)
477 if (e2
& DESC_B_MASK
) {
484 static int exception_has_error_code(int intno
)
500 #define SET_ESP(val, sp_mask) \
502 if ((sp_mask) == 0xffff) { \
503 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
504 } else if ((sp_mask) == 0xffffffffLL) { \
505 ESP = (uint32_t)(val); \
511 #define SET_ESP(val, sp_mask) \
513 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
517 /* in 64-bit machines, this can overflow. So this segment addition macro
518 * can be used to trim the value to 32-bit whenever needed */
519 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
521 /* XXX: add a is_user flag to have proper security support */
522 #define PUSHW(ssp, sp, sp_mask, val) \
525 cpu_stw_kernel(env, (ssp) + (sp & (sp_mask)), (val)); \
528 #define PUSHL(ssp, sp, sp_mask, val) \
531 cpu_stl_kernel(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
534 #define POPW(ssp, sp, sp_mask, val) \
536 val = cpu_lduw_kernel(env, (ssp) + (sp & (sp_mask))); \
540 #define POPL(ssp, sp, sp_mask, val) \
542 val = (uint32_t)cpu_ldl_kernel(env, SEG_ADDL(ssp, sp, sp_mask)); \
546 /* protected mode interrupt */
547 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
548 int error_code
, unsigned int next_eip
,
552 target_ulong ptr
, ssp
;
553 int type
, dpl
, selector
, ss_dpl
, cpl
;
554 int has_error_code
, new_stack
, shift
;
555 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
556 uint32_t old_eip
, sp_mask
;
559 if (!is_int
&& !is_hw
) {
560 has_error_code
= exception_has_error_code(intno
);
569 if (intno
* 8 + 7 > dt
->limit
) {
570 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
572 ptr
= dt
->base
+ intno
* 8;
573 e1
= cpu_ldl_kernel(env
, ptr
);
574 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
575 /* check gate type */
576 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
578 case 5: /* task gate */
579 /* must do that check here to return the correct error code */
580 if (!(e2
& DESC_P_MASK
)) {
581 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
583 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
584 if (has_error_code
) {
588 /* push the error code */
589 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
591 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
596 esp
= (ESP
- (2 << shift
)) & mask
;
597 ssp
= env
->segs
[R_SS
].base
+ esp
;
599 cpu_stl_kernel(env
, ssp
, error_code
);
601 cpu_stw_kernel(env
, ssp
, error_code
);
606 case 6: /* 286 interrupt gate */
607 case 7: /* 286 trap gate */
608 case 14: /* 386 interrupt gate */
609 case 15: /* 386 trap gate */
612 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
615 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
616 cpl
= env
->hflags
& HF_CPL_MASK
;
617 /* check privilege if software int */
618 if (is_int
&& dpl
< cpl
) {
619 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
621 /* check valid bit */
622 if (!(e2
& DESC_P_MASK
)) {
623 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
626 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
627 if ((selector
& 0xfffc) == 0) {
628 raise_exception_err(env
, EXCP0D_GPF
, 0);
630 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
631 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
633 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
634 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
636 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
638 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
640 if (!(e2
& DESC_P_MASK
)) {
641 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
643 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
644 /* to inner privilege */
645 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
);
646 if ((ss
& 0xfffc) == 0) {
647 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
649 if ((ss
& 3) != dpl
) {
650 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
652 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
653 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
655 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
657 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
659 if (!(ss_e2
& DESC_S_MASK
) ||
660 (ss_e2
& DESC_CS_MASK
) ||
661 !(ss_e2
& DESC_W_MASK
)) {
662 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
664 if (!(ss_e2
& DESC_P_MASK
)) {
665 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
668 sp_mask
= get_sp_mask(ss_e2
);
669 ssp
= get_seg_base(ss_e1
, ss_e2
);
670 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
671 /* to same privilege */
672 if (env
->eflags
& VM_MASK
) {
673 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
676 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
677 ssp
= env
->segs
[R_SS
].base
;
681 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
682 new_stack
= 0; /* avoid warning */
683 sp_mask
= 0; /* avoid warning */
684 ssp
= 0; /* avoid warning */
685 esp
= 0; /* avoid warning */
691 /* XXX: check that enough room is available */
692 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
693 if (env
->eflags
& VM_MASK
) {
700 if (env
->eflags
& VM_MASK
) {
701 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
702 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
703 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
704 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
706 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
707 PUSHL(ssp
, esp
, sp_mask
, ESP
);
709 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
710 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
711 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
712 if (has_error_code
) {
713 PUSHL(ssp
, esp
, sp_mask
, error_code
);
717 if (env
->eflags
& VM_MASK
) {
718 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
719 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
720 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
721 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
723 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
724 PUSHW(ssp
, esp
, sp_mask
, ESP
);
726 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
727 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
728 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
729 if (has_error_code
) {
730 PUSHW(ssp
, esp
, sp_mask
, error_code
);
735 if (env
->eflags
& VM_MASK
) {
736 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
737 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
738 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
739 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
741 ss
= (ss
& ~3) | dpl
;
742 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
743 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
745 SET_ESP(esp
, sp_mask
);
747 selector
= (selector
& ~3) | dpl
;
748 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
749 get_seg_base(e1
, e2
),
750 get_seg_limit(e1
, e2
),
752 cpu_x86_set_cpl(env
, dpl
);
755 /* interrupt gate clear IF mask */
756 if ((type
& 1) == 0) {
757 env
->eflags
&= ~IF_MASK
;
759 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
764 #define PUSHQ(sp, val) \
767 cpu_stq_kernel(env, sp, (val)); \
770 #define POPQ(sp, val) \
772 val = cpu_ldq_kernel(env, sp); \
776 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
781 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
782 env
->tr
.base
, env
->tr
.limit
);
785 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
786 cpu_abort(env
, "invalid tss");
788 index
= 8 * level
+ 4;
789 if ((index
+ 7) > env
->tr
.limit
) {
790 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
792 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
795 /* 64 bit interrupt */
796 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
797 int error_code
, target_ulong next_eip
, int is_hw
)
801 int type
, dpl
, selector
, cpl
, ist
;
802 int has_error_code
, new_stack
;
803 uint32_t e1
, e2
, e3
, ss
;
804 target_ulong old_eip
, esp
, offset
;
807 if (!is_int
&& !is_hw
) {
808 has_error_code
= exception_has_error_code(intno
);
817 if (intno
* 16 + 15 > dt
->limit
) {
818 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
820 ptr
= dt
->base
+ intno
* 16;
821 e1
= cpu_ldl_kernel(env
, ptr
);
822 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
823 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
824 /* check gate type */
825 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
827 case 14: /* 386 interrupt gate */
828 case 15: /* 386 trap gate */
831 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
834 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
835 cpl
= env
->hflags
& HF_CPL_MASK
;
836 /* check privilege if software int */
837 if (is_int
&& dpl
< cpl
) {
838 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
840 /* check valid bit */
841 if (!(e2
& DESC_P_MASK
)) {
842 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
845 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
847 if ((selector
& 0xfffc) == 0) {
848 raise_exception_err(env
, EXCP0D_GPF
, 0);
851 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
852 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
854 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
855 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
857 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
859 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
861 if (!(e2
& DESC_P_MASK
)) {
862 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
864 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
865 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
867 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
868 /* to inner privilege */
870 esp
= get_rsp_from_tss(env
, ist
+ 3);
872 esp
= get_rsp_from_tss(env
, dpl
);
874 esp
&= ~0xfLL
; /* align stack */
877 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
878 /* to same privilege */
879 if (env
->eflags
& VM_MASK
) {
880 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
884 esp
= get_rsp_from_tss(env
, ist
+ 3);
888 esp
&= ~0xfLL
; /* align stack */
891 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
892 new_stack
= 0; /* avoid warning */
893 esp
= 0; /* avoid warning */
896 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
898 PUSHQ(esp
, cpu_compute_eflags(env
));
899 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
901 if (has_error_code
) {
902 PUSHQ(esp
, error_code
);
907 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
911 selector
= (selector
& ~3) | dpl
;
912 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
913 get_seg_base(e1
, e2
),
914 get_seg_limit(e1
, e2
),
916 cpu_x86_set_cpl(env
, dpl
);
919 /* interrupt gate clear IF mask */
920 if ((type
& 1) == 0) {
921 env
->eflags
&= ~IF_MASK
;
923 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
928 #if defined(CONFIG_USER_ONLY)
929 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
931 env
->exception_index
= EXCP_SYSCALL
;
932 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
936 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
940 if (!(env
->efer
& MSR_EFER_SCE
)) {
941 raise_exception_err(env
, EXCP06_ILLOP
, 0);
943 selector
= (env
->star
>> 32) & 0xffff;
944 if (env
->hflags
& HF_LMA_MASK
) {
947 ECX
= env
->eip
+ next_eip_addend
;
948 env
->regs
[11] = cpu_compute_eflags(env
);
950 code64
= env
->hflags
& HF_CS64_MASK
;
952 cpu_x86_set_cpl(env
, 0);
953 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
955 DESC_G_MASK
| DESC_P_MASK
|
957 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
959 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
961 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
963 DESC_W_MASK
| DESC_A_MASK
);
964 env
->eflags
&= ~env
->fmask
;
965 cpu_load_eflags(env
, env
->eflags
, 0);
967 env
->eip
= env
->lstar
;
969 env
->eip
= env
->cstar
;
972 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
974 cpu_x86_set_cpl(env
, 0);
975 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
977 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
979 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
980 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
982 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
984 DESC_W_MASK
| DESC_A_MASK
);
985 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
986 env
->eip
= (uint32_t)env
->star
;
993 void helper_sysret(CPUX86State
*env
, int dflag
)
997 if (!(env
->efer
& MSR_EFER_SCE
)) {
998 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1000 cpl
= env
->hflags
& HF_CPL_MASK
;
1001 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1002 raise_exception_err(env
, EXCP0D_GPF
, 0);
1004 selector
= (env
->star
>> 48) & 0xffff;
1005 if (env
->hflags
& HF_LMA_MASK
) {
1007 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1009 DESC_G_MASK
| DESC_P_MASK
|
1010 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1011 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1015 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1017 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1018 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1019 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1020 env
->eip
= (uint32_t)ECX
;
1022 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1024 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1025 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1026 DESC_W_MASK
| DESC_A_MASK
);
1027 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1028 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1030 cpu_x86_set_cpl(env
, 3);
1032 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1034 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1035 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1036 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1037 env
->eip
= (uint32_t)ECX
;
1038 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1040 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1041 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1042 DESC_W_MASK
| DESC_A_MASK
);
1043 env
->eflags
|= IF_MASK
;
1044 cpu_x86_set_cpl(env
, 3);
1049 /* real mode interrupt */
1050 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1051 int error_code
, unsigned int next_eip
)
1054 target_ulong ptr
, ssp
;
1056 uint32_t offset
, esp
;
1057 uint32_t old_cs
, old_eip
;
1059 /* real mode (simpler!) */
1061 if (intno
* 4 + 3 > dt
->limit
) {
1062 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1064 ptr
= dt
->base
+ intno
* 4;
1065 offset
= cpu_lduw_kernel(env
, ptr
);
1066 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1068 ssp
= env
->segs
[R_SS
].base
;
1074 old_cs
= env
->segs
[R_CS
].selector
;
1075 /* XXX: use SS segment size? */
1076 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1077 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1078 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1080 /* update processor state */
1081 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1083 env
->segs
[R_CS
].selector
= selector
;
1084 env
->segs
[R_CS
].base
= (selector
<< 4);
1085 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1088 #if defined(CONFIG_USER_ONLY)
1089 /* fake user mode interrupt */
1090 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1091 int error_code
, target_ulong next_eip
)
1095 int dpl
, cpl
, shift
;
1099 if (env
->hflags
& HF_LMA_MASK
) {
1104 ptr
= dt
->base
+ (intno
<< shift
);
1105 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1107 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1108 cpl
= env
->hflags
& HF_CPL_MASK
;
1109 /* check privilege if software int */
1110 if (is_int
&& dpl
< cpl
) {
1111 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1114 /* Since we emulate only user space, we cannot do more than
1115 exiting the emulation with the suitable exception and error
1124 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1125 int error_code
, int is_hw
, int rm
)
1127 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1128 control
.event_inj
));
1130 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1134 type
= SVM_EVTINJ_TYPE_SOFT
;
1136 type
= SVM_EVTINJ_TYPE_EXEPT
;
1138 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1139 if (!rm
&& exception_has_error_code(intno
)) {
1140 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1141 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1142 control
.event_inj_err
),
1145 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1152 * Begin execution of an interruption. is_int is TRUE if coming from
1153 * the int instruction. next_eip is the EIP value AFTER the interrupt
1154 * instruction. It is only relevant if is_int is TRUE.
1156 static void do_interrupt_all(CPUX86State
*env
, int intno
, int is_int
,
1157 int error_code
, target_ulong next_eip
, int is_hw
)
1159 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1160 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1163 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1164 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1165 count
, intno
, error_code
, is_int
,
1166 env
->hflags
& HF_CPL_MASK
,
1167 env
->segs
[R_CS
].selector
, EIP
,
1168 (int)env
->segs
[R_CS
].base
+ EIP
,
1169 env
->segs
[R_SS
].selector
, ESP
);
1170 if (intno
== 0x0e) {
1171 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1173 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1176 log_cpu_state(env
, X86_DUMP_CCOP
);
1183 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1184 for (i
= 0; i
< 16; i
++) {
1185 qemu_log(" %02x", ldub(ptr
+ i
));
1193 if (env
->cr
[0] & CR0_PE_MASK
) {
1194 #if !defined(CONFIG_USER_ONLY)
1195 if (env
->hflags
& HF_SVMI_MASK
) {
1196 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1199 #ifdef TARGET_X86_64
1200 if (env
->hflags
& HF_LMA_MASK
) {
1201 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1205 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1209 #if !defined(CONFIG_USER_ONLY)
1210 if (env
->hflags
& HF_SVMI_MASK
) {
1211 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1214 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1217 #if !defined(CONFIG_USER_ONLY)
1218 if (env
->hflags
& HF_SVMI_MASK
) {
1219 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+
1220 offsetof(struct vmcb
,
1221 control
.event_inj
));
1223 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1224 event_inj
& ~SVM_EVTINJ_VALID
);
1229 void do_interrupt(CPUX86State
*env
)
1231 #if defined(CONFIG_USER_ONLY)
1232 /* if user mode only, we simulate a fake exception
1233 which will be handled outside the cpu execution
1235 do_interrupt_user(env
, env
->exception_index
,
1236 env
->exception_is_int
,
1238 env
->exception_next_eip
);
1239 /* successfully delivered */
1240 env
->old_exception
= -1;
1242 /* simulate a real cpu exception. On i386, it can
1243 trigger new exceptions, but we do not handle
1244 double or triple faults yet. */
1245 do_interrupt_all(env
, env
->exception_index
,
1246 env
->exception_is_int
,
1248 env
->exception_next_eip
, 0);
1249 /* successfully delivered */
1250 env
->old_exception
= -1;
1254 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1256 do_interrupt_all(env
, intno
, 0, 0, 0, is_hw
);
1259 void helper_enter_level(CPUX86State
*env
, int level
, int data32
,
1263 uint32_t esp_mask
, esp
, ebp
;
1265 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1266 ssp
= env
->segs
[R_SS
].base
;
1275 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
),
1276 cpu_ldl_data(env
, ssp
+ (ebp
& esp_mask
)));
1279 cpu_stl_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1286 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
),
1287 cpu_lduw_data(env
, ssp
+ (ebp
& esp_mask
)));
1290 cpu_stw_data(env
, ssp
+ (esp
& esp_mask
), t1
);
1294 #ifdef TARGET_X86_64
1295 void helper_enter64_level(CPUX86State
*env
, int level
, int data64
,
1298 target_ulong esp
, ebp
;
1309 cpu_stq_data(env
, esp
, cpu_ldq_data(env
, ebp
));
1312 cpu_stq_data(env
, esp
, t1
);
1319 cpu_stw_data(env
, esp
, cpu_lduw_data(env
, ebp
));
1322 cpu_stw_data(env
, esp
, t1
);
1327 void helper_lldt(CPUX86State
*env
, int selector
)
1331 int index
, entry_limit
;
1335 if ((selector
& 0xfffc) == 0) {
1336 /* XXX: NULL selector case: invalid LDT */
1340 if (selector
& 0x4) {
1341 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1344 index
= selector
& ~7;
1345 #ifdef TARGET_X86_64
1346 if (env
->hflags
& HF_LMA_MASK
) {
1353 if ((index
+ entry_limit
) > dt
->limit
) {
1354 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1356 ptr
= dt
->base
+ index
;
1357 e1
= cpu_ldl_kernel(env
, ptr
);
1358 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1359 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1360 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1362 if (!(e2
& DESC_P_MASK
)) {
1363 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1365 #ifdef TARGET_X86_64
1366 if (env
->hflags
& HF_LMA_MASK
) {
1369 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1370 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1371 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1375 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1378 env
->ldt
.selector
= selector
;
1381 void helper_ltr(CPUX86State
*env
, int selector
)
1385 int index
, type
, entry_limit
;
1389 if ((selector
& 0xfffc) == 0) {
1390 /* NULL selector case: invalid TR */
1395 if (selector
& 0x4) {
1396 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1399 index
= selector
& ~7;
1400 #ifdef TARGET_X86_64
1401 if (env
->hflags
& HF_LMA_MASK
) {
1408 if ((index
+ entry_limit
) > dt
->limit
) {
1409 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1411 ptr
= dt
->base
+ index
;
1412 e1
= cpu_ldl_kernel(env
, ptr
);
1413 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1414 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1415 if ((e2
& DESC_S_MASK
) ||
1416 (type
!= 1 && type
!= 9)) {
1417 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1419 if (!(e2
& DESC_P_MASK
)) {
1420 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1422 #ifdef TARGET_X86_64
1423 if (env
->hflags
& HF_LMA_MASK
) {
1426 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
1427 e4
= cpu_ldl_kernel(env
, ptr
+ 12);
1428 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1429 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1431 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1432 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1436 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1438 e2
|= DESC_TSS_BUSY_MASK
;
1439 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1441 env
->tr
.selector
= selector
;
1444 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1445 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1454 cpl
= env
->hflags
& HF_CPL_MASK
;
1455 if ((selector
& 0xfffc) == 0) {
1456 /* null selector case */
1458 #ifdef TARGET_X86_64
1459 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1462 raise_exception_err(env
, EXCP0D_GPF
, 0);
1464 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1467 if (selector
& 0x4) {
1472 index
= selector
& ~7;
1473 if ((index
+ 7) > dt
->limit
) {
1474 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1476 ptr
= dt
->base
+ index
;
1477 e1
= cpu_ldl_kernel(env
, ptr
);
1478 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1480 if (!(e2
& DESC_S_MASK
)) {
1481 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1484 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1485 if (seg_reg
== R_SS
) {
1486 /* must be writable segment */
1487 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1488 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1490 if (rpl
!= cpl
|| dpl
!= cpl
) {
1491 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1494 /* must be readable segment */
1495 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1496 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1499 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1500 /* if not conforming code, test rights */
1501 if (dpl
< cpl
|| dpl
< rpl
) {
1502 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1507 if (!(e2
& DESC_P_MASK
)) {
1508 if (seg_reg
== R_SS
) {
1509 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1511 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1515 /* set the access bit if not already set */
1516 if (!(e2
& DESC_A_MASK
)) {
1518 cpu_stl_kernel(env
, ptr
+ 4, e2
);
1521 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1522 get_seg_base(e1
, e2
),
1523 get_seg_limit(e1
, e2
),
1526 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1527 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1532 /* protected mode jump */
1533 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1534 int next_eip_addend
)
1537 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1538 target_ulong next_eip
;
1540 if ((new_cs
& 0xfffc) == 0) {
1541 raise_exception_err(env
, EXCP0D_GPF
, 0);
1543 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1544 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1546 cpl
= env
->hflags
& HF_CPL_MASK
;
1547 if (e2
& DESC_S_MASK
) {
1548 if (!(e2
& DESC_CS_MASK
)) {
1549 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1551 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1552 if (e2
& DESC_C_MASK
) {
1553 /* conforming code segment */
1555 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1558 /* non conforming code segment */
1561 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1564 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1567 if (!(e2
& DESC_P_MASK
)) {
1568 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1570 limit
= get_seg_limit(e1
, e2
);
1571 if (new_eip
> limit
&&
1572 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1573 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1575 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1576 get_seg_base(e1
, e2
), limit
, e2
);
1579 /* jump to call or task gate */
1580 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1582 cpl
= env
->hflags
& HF_CPL_MASK
;
1583 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1585 case 1: /* 286 TSS */
1586 case 9: /* 386 TSS */
1587 case 5: /* task gate */
1588 if (dpl
< cpl
|| dpl
< rpl
) {
1589 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1591 next_eip
= env
->eip
+ next_eip_addend
;
1592 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1593 CC_OP
= CC_OP_EFLAGS
;
1595 case 4: /* 286 call gate */
1596 case 12: /* 386 call gate */
1597 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1598 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1600 if (!(e2
& DESC_P_MASK
)) {
1601 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1604 new_eip
= (e1
& 0xffff);
1606 new_eip
|= (e2
& 0xffff0000);
1608 if (load_segment(env
, &e1
, &e2
, gate_cs
) != 0) {
1609 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1611 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1612 /* must be code segment */
1613 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1614 (DESC_S_MASK
| DESC_CS_MASK
))) {
1615 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1617 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1618 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1619 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1621 if (!(e2
& DESC_P_MASK
)) {
1622 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1624 limit
= get_seg_limit(e1
, e2
);
1625 if (new_eip
> limit
) {
1626 raise_exception_err(env
, EXCP0D_GPF
, 0);
1628 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1629 get_seg_base(e1
, e2
), limit
, e2
);
1633 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1639 /* real mode call */
1640 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1641 int shift
, int next_eip
)
1644 uint32_t esp
, esp_mask
;
1649 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1650 ssp
= env
->segs
[R_SS
].base
;
1652 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1653 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1655 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1656 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1659 SET_ESP(esp
, esp_mask
);
1661 env
->segs
[R_CS
].selector
= new_cs
;
1662 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1665 /* protected mode call */
1666 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1667 int shift
, int next_eip_addend
)
1670 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1671 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1672 uint32_t val
, limit
, old_sp_mask
;
1673 target_ulong ssp
, old_ssp
, next_eip
;
1675 next_eip
= env
->eip
+ next_eip_addend
;
1676 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1677 LOG_PCALL_STATE(env
);
1678 if ((new_cs
& 0xfffc) == 0) {
1679 raise_exception_err(env
, EXCP0D_GPF
, 0);
1681 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
1682 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1684 cpl
= env
->hflags
& HF_CPL_MASK
;
1685 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1686 if (e2
& DESC_S_MASK
) {
1687 if (!(e2
& DESC_CS_MASK
)) {
1688 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1690 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1691 if (e2
& DESC_C_MASK
) {
1692 /* conforming code segment */
1694 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1697 /* non conforming code segment */
1700 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1703 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1706 if (!(e2
& DESC_P_MASK
)) {
1707 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1710 #ifdef TARGET_X86_64
1711 /* XXX: check 16/32 bit cases in long mode */
1717 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1718 PUSHQ(rsp
, next_eip
);
1719 /* from this point, not restartable */
1721 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1722 get_seg_base(e1
, e2
),
1723 get_seg_limit(e1
, e2
), e2
);
1729 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1730 ssp
= env
->segs
[R_SS
].base
;
1732 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1733 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1735 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1736 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1739 limit
= get_seg_limit(e1
, e2
);
1740 if (new_eip
> limit
) {
1741 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1743 /* from this point, not restartable */
1744 SET_ESP(sp
, sp_mask
);
1745 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1746 get_seg_base(e1
, e2
), limit
, e2
);
1750 /* check gate type */
1751 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1752 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1755 case 1: /* available 286 TSS */
1756 case 9: /* available 386 TSS */
1757 case 5: /* task gate */
1758 if (dpl
< cpl
|| dpl
< rpl
) {
1759 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1761 switch_tss(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1762 CC_OP
= CC_OP_EFLAGS
;
1764 case 4: /* 286 call gate */
1765 case 12: /* 386 call gate */
1768 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1773 if (dpl
< cpl
|| dpl
< rpl
) {
1774 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1776 /* check valid bit */
1777 if (!(e2
& DESC_P_MASK
)) {
1778 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1780 selector
= e1
>> 16;
1781 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1782 param_count
= e2
& 0x1f;
1783 if ((selector
& 0xfffc) == 0) {
1784 raise_exception_err(env
, EXCP0D_GPF
, 0);
1787 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
1788 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1790 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1791 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1793 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1795 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1797 if (!(e2
& DESC_P_MASK
)) {
1798 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1801 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1802 /* to inner privilege */
1803 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
);
1804 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
1806 ss
, sp
, param_count
, ESP
);
1807 if ((ss
& 0xfffc) == 0) {
1808 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1810 if ((ss
& 3) != dpl
) {
1811 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1813 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
1814 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1816 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1817 if (ss_dpl
!= dpl
) {
1818 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1820 if (!(ss_e2
& DESC_S_MASK
) ||
1821 (ss_e2
& DESC_CS_MASK
) ||
1822 !(ss_e2
& DESC_W_MASK
)) {
1823 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1825 if (!(ss_e2
& DESC_P_MASK
)) {
1826 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1829 /* push_size = ((param_count * 2) + 8) << shift; */
1831 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1832 old_ssp
= env
->segs
[R_SS
].base
;
1834 sp_mask
= get_sp_mask(ss_e2
);
1835 ssp
= get_seg_base(ss_e1
, ss_e2
);
1837 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1838 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1839 for (i
= param_count
- 1; i
>= 0; i
--) {
1840 val
= cpu_ldl_kernel(env
, old_ssp
+ ((ESP
+ i
* 4) &
1842 PUSHL(ssp
, sp
, sp_mask
, val
);
1845 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1846 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1847 for (i
= param_count
- 1; i
>= 0; i
--) {
1848 val
= cpu_lduw_kernel(env
, old_ssp
+ ((ESP
+ i
* 2) &
1850 PUSHW(ssp
, sp
, sp_mask
, val
);
1855 /* to same privilege */
1857 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1858 ssp
= env
->segs
[R_SS
].base
;
1859 /* push_size = (4 << shift); */
1864 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1865 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1867 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1868 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1871 /* from this point, not restartable */
1874 ss
= (ss
& ~3) | dpl
;
1875 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1877 get_seg_limit(ss_e1
, ss_e2
),
1881 selector
= (selector
& ~3) | dpl
;
1882 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1883 get_seg_base(e1
, e2
),
1884 get_seg_limit(e1
, e2
),
1886 cpu_x86_set_cpl(env
, dpl
);
1887 SET_ESP(sp
, sp_mask
);
1892 /* real and vm86 mode iret */
1893 void helper_iret_real(CPUX86State
*env
, int shift
)
1895 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1899 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1901 ssp
= env
->segs
[R_SS
].base
;
1904 POPL(ssp
, sp
, sp_mask
, new_eip
);
1905 POPL(ssp
, sp
, sp_mask
, new_cs
);
1907 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1910 POPW(ssp
, sp
, sp_mask
, new_eip
);
1911 POPW(ssp
, sp
, sp_mask
, new_cs
);
1912 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1914 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1915 env
->segs
[R_CS
].selector
= new_cs
;
1916 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1918 if (env
->eflags
& VM_MASK
) {
1919 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1922 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1926 eflags_mask
&= 0xffff;
1928 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1929 env
->hflags2
&= ~HF2_NMI_MASK
;
1932 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1937 /* XXX: on x86_64, we do not want to nullify FS and GS because
1938 they may still contain a valid base. I would be interested to
1939 know how a real x86_64 CPU behaves */
1940 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1941 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1945 e2
= env
->segs
[seg_reg
].flags
;
1946 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1947 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1948 /* data or non conforming code segment */
1950 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1955 /* protected mode iret */
1956 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1957 int is_iret
, int addend
)
1959 uint32_t new_cs
, new_eflags
, new_ss
;
1960 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1961 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1962 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1963 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1965 #ifdef TARGET_X86_64
1971 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1974 ssp
= env
->segs
[R_SS
].base
;
1975 new_eflags
= 0; /* avoid warning */
1976 #ifdef TARGET_X86_64
1982 POPQ(sp
, new_eflags
);
1989 POPL(ssp
, sp
, sp_mask
, new_eip
);
1990 POPL(ssp
, sp
, sp_mask
, new_cs
);
1993 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1994 if (new_eflags
& VM_MASK
) {
1995 goto return_to_vm86
;
2000 POPW(ssp
, sp
, sp_mask
, new_eip
);
2001 POPW(ssp
, sp
, sp_mask
, new_cs
);
2003 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2007 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2008 new_cs
, new_eip
, shift
, addend
);
2009 LOG_PCALL_STATE(env
);
2010 if ((new_cs
& 0xfffc) == 0) {
2011 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2013 if (load_segment(env
, &e1
, &e2
, new_cs
) != 0) {
2014 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2016 if (!(e2
& DESC_S_MASK
) ||
2017 !(e2
& DESC_CS_MASK
)) {
2018 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2020 cpl
= env
->hflags
& HF_CPL_MASK
;
2023 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2025 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2026 if (e2
& DESC_C_MASK
) {
2028 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2032 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2035 if (!(e2
& DESC_P_MASK
)) {
2036 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2040 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2041 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2042 /* return to same privilege level */
2043 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2044 get_seg_base(e1
, e2
),
2045 get_seg_limit(e1
, e2
),
2048 /* return to different privilege level */
2049 #ifdef TARGET_X86_64
2059 POPL(ssp
, sp
, sp_mask
, new_esp
);
2060 POPL(ssp
, sp
, sp_mask
, new_ss
);
2064 POPW(ssp
, sp
, sp_mask
, new_esp
);
2065 POPW(ssp
, sp
, sp_mask
, new_ss
);
2068 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2070 if ((new_ss
& 0xfffc) == 0) {
2071 #ifdef TARGET_X86_64
2072 /* NULL ss is allowed in long mode if cpl != 3 */
2073 /* XXX: test CS64? */
2074 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2075 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2077 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2078 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2079 DESC_W_MASK
| DESC_A_MASK
);
2080 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2084 raise_exception_err(env
, EXCP0D_GPF
, 0);
2087 if ((new_ss
& 3) != rpl
) {
2088 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2090 if (load_segment(env
, &ss_e1
, &ss_e2
, new_ss
) != 0) {
2091 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2093 if (!(ss_e2
& DESC_S_MASK
) ||
2094 (ss_e2
& DESC_CS_MASK
) ||
2095 !(ss_e2
& DESC_W_MASK
)) {
2096 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2098 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2100 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2102 if (!(ss_e2
& DESC_P_MASK
)) {
2103 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2105 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2106 get_seg_base(ss_e1
, ss_e2
),
2107 get_seg_limit(ss_e1
, ss_e2
),
2111 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2112 get_seg_base(e1
, e2
),
2113 get_seg_limit(e1
, e2
),
2115 cpu_x86_set_cpl(env
, rpl
);
2117 #ifdef TARGET_X86_64
2118 if (env
->hflags
& HF_CS64_MASK
) {
2123 sp_mask
= get_sp_mask(ss_e2
);
2126 /* validate data segments */
2127 validate_seg(env
, R_ES
, rpl
);
2128 validate_seg(env
, R_DS
, rpl
);
2129 validate_seg(env
, R_FS
, rpl
);
2130 validate_seg(env
, R_GS
, rpl
);
2134 SET_ESP(sp
, sp_mask
);
2137 /* NOTE: 'cpl' is the _old_ CPL */
2138 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2140 eflags_mask
|= IOPL_MASK
;
2142 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2144 eflags_mask
|= IF_MASK
;
2147 eflags_mask
&= 0xffff;
2149 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2154 POPL(ssp
, sp
, sp_mask
, new_esp
);
2155 POPL(ssp
, sp
, sp_mask
, new_ss
);
2156 POPL(ssp
, sp
, sp_mask
, new_es
);
2157 POPL(ssp
, sp
, sp_mask
, new_ds
);
2158 POPL(ssp
, sp
, sp_mask
, new_fs
);
2159 POPL(ssp
, sp
, sp_mask
, new_gs
);
2161 /* modify processor state */
2162 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2163 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2165 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2166 cpu_x86_set_cpl(env
, 3);
2167 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2168 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2169 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2170 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2171 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2173 env
->eip
= new_eip
& 0xffff;
2177 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2179 int tss_selector
, type
;
2182 /* specific case for TSS */
2183 if (env
->eflags
& NT_MASK
) {
2184 #ifdef TARGET_X86_64
2185 if (env
->hflags
& HF_LMA_MASK
) {
2186 raise_exception_err(env
, EXCP0D_GPF
, 0);
2189 tss_selector
= cpu_lduw_kernel(env
, env
->tr
.base
+ 0);
2190 if (tss_selector
& 4) {
2191 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2193 if (load_segment(env
, &e1
, &e2
, tss_selector
) != 0) {
2194 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2196 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2197 /* NOTE: we check both segment and busy TSS */
2199 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2201 switch_tss(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2203 helper_ret_protected(env
, shift
, 1, 0);
2205 env
->hflags2
&= ~HF2_NMI_MASK
;
2208 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2210 helper_ret_protected(env
, shift
, 0, addend
);
2213 void helper_sysenter(CPUX86State
*env
)
2215 if (env
->sysenter_cs
== 0) {
2216 raise_exception_err(env
, EXCP0D_GPF
, 0);
2218 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2219 cpu_x86_set_cpl(env
, 0);
2221 #ifdef TARGET_X86_64
2222 if (env
->hflags
& HF_LMA_MASK
) {
2223 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2225 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2227 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2232 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2234 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2236 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2238 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2240 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2242 DESC_W_MASK
| DESC_A_MASK
);
2243 ESP
= env
->sysenter_esp
;
2244 EIP
= env
->sysenter_eip
;
2247 void helper_sysexit(CPUX86State
*env
, int dflag
)
2251 cpl
= env
->hflags
& HF_CPL_MASK
;
2252 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2253 raise_exception_err(env
, EXCP0D_GPF
, 0);
2255 cpu_x86_set_cpl(env
, 3);
2256 #ifdef TARGET_X86_64
2258 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2260 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2261 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2262 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2264 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2266 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2267 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2268 DESC_W_MASK
| DESC_A_MASK
);
2272 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2274 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2275 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2276 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2277 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2279 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2280 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2281 DESC_W_MASK
| DESC_A_MASK
);
2287 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2290 uint32_t e1
, e2
, eflags
, selector
;
2291 int rpl
, dpl
, cpl
, type
;
2293 selector
= selector1
& 0xffff;
2294 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2295 if ((selector
& 0xfffc) == 0) {
2298 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2302 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2303 cpl
= env
->hflags
& HF_CPL_MASK
;
2304 if (e2
& DESC_S_MASK
) {
2305 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2308 if (dpl
< cpl
|| dpl
< rpl
) {
2313 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2324 if (dpl
< cpl
|| dpl
< rpl
) {
2326 CC_SRC
= eflags
& ~CC_Z
;
2330 limit
= get_seg_limit(e1
, e2
);
2331 CC_SRC
= eflags
| CC_Z
;
2335 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2337 uint32_t e1
, e2
, eflags
, selector
;
2338 int rpl
, dpl
, cpl
, type
;
2340 selector
= selector1
& 0xffff;
2341 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2342 if ((selector
& 0xfffc) == 0) {
2345 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2349 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2350 cpl
= env
->hflags
& HF_CPL_MASK
;
2351 if (e2
& DESC_S_MASK
) {
2352 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2355 if (dpl
< cpl
|| dpl
< rpl
) {
2360 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2374 if (dpl
< cpl
|| dpl
< rpl
) {
2376 CC_SRC
= eflags
& ~CC_Z
;
2380 CC_SRC
= eflags
| CC_Z
;
2381 return e2
& 0x00f0ff00;
2384 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2386 uint32_t e1
, e2
, eflags
, selector
;
2389 selector
= selector1
& 0xffff;
2390 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2391 if ((selector
& 0xfffc) == 0) {
2394 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2397 if (!(e2
& DESC_S_MASK
)) {
2401 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2402 cpl
= env
->hflags
& HF_CPL_MASK
;
2403 if (e2
& DESC_CS_MASK
) {
2404 if (!(e2
& DESC_R_MASK
)) {
2407 if (!(e2
& DESC_C_MASK
)) {
2408 if (dpl
< cpl
|| dpl
< rpl
) {
2413 if (dpl
< cpl
|| dpl
< rpl
) {
2415 CC_SRC
= eflags
& ~CC_Z
;
2419 CC_SRC
= eflags
| CC_Z
;
2422 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2424 uint32_t e1
, e2
, eflags
, selector
;
2427 selector
= selector1
& 0xffff;
2428 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2429 if ((selector
& 0xfffc) == 0) {
2432 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
2435 if (!(e2
& DESC_S_MASK
)) {
2439 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2440 cpl
= env
->hflags
& HF_CPL_MASK
;
2441 if (e2
& DESC_CS_MASK
) {
2444 if (dpl
< cpl
|| dpl
< rpl
) {
2447 if (!(e2
& DESC_W_MASK
)) {
2449 CC_SRC
= eflags
& ~CC_Z
;
2453 CC_SRC
= eflags
| CC_Z
;
2456 #if defined(CONFIG_USER_ONLY)
2457 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2459 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2461 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2462 (selector
<< 4), 0xffff, 0);
2464 helper_load_seg(env
, seg_reg
, selector
);