SegmentCache tr;
SegmentCache gdt; /* only base and limit are used */
SegmentCache idt; /* only base and limit are used */
+ int cpl; /* current cpl */
/* sysenter registers */
uint32_t sysenter_cs;
uint32_t breakpoints[MAX_BREAKPOINTS];
int nb_breakpoints;
+ int singlestep_enabled;
/* user data */
void *opaque;
/* needed to load some predefinied segment registers */
void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector);
+/* wrapper, just in case memory mappings must be changed */
+static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl)
+{
+ s->cpl = cpl;
+}
+
/* simulate fsave/frstor */
void cpu_x86_fsave(CPUX86State *s, uint8_t *ptr, int data32);
void cpu_x86_frstor(CPUX86State *s, uint8_t *ptr, int data32);
{
SegmentCache *dt;
uint8_t *ptr, *ssp;
- int type, dpl, cpl, selector, ss_dpl;
+ int type, dpl, selector, ss_dpl;
int has_error_code, new_stack, shift;
uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size;
uint32_t old_cs, old_ss, old_esp, old_eip;
break;
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (env->eflags & VM_MASK)
- cpl = 3;
- else
- cpl = env->segs[R_CS].selector & 3;
/* check privledge if software int */
- if (is_int && dpl < cpl)
+ if (is_int && dpl < env->cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* check valid bit */
if (!(e2 & DESC_P_MASK))
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl > cpl)
+ if (dpl > env->cpl)
raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ if (!(e2 & DESC_C_MASK) && dpl < env->cpl) {
/* to inner priviledge */
get_ss_esp_from_tss(&ss, &esp, dpl);
if ((ss & 0xfffc) == 0)
if (!(ss_e2 & DESC_P_MASK))
raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
new_stack = 1;
- } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) {
/* to same priviledge */
new_stack = 0;
} else {
{
SegmentCache *dt;
uint8_t *ptr;
- int dpl, cpl;
+ int dpl;
uint32_t e2;
dt = &env->idt;
e2 = ldl(ptr + 4);
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = 3;
/* check privledge if software int */
- if (is_int && dpl < cpl)
+ if (is_int && dpl < env->cpl)
raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
/* Since we emulate only user space, we cannot do more than
selector, (unsigned long)sc->base, sc->limit, sc->flags);
#endif
}
+ if (seg_reg == R_CS) {
+ cpu_x86_set_cpl(env, selector & 3);
+ }
sc->selector = selector;
}
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->segs[R_CS].selector & 3;
+ cpl = env->cpl;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
raise_exception_err(EXCP0D_GPF, 0);
if (load_segment(&e1, &e2, new_cs) != 0)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->segs[R_CS].selector & 3;
+ cpl = env->cpl;
if (e2 & DESC_S_MASK) {
if (!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
if (!(e2 & DESC_S_MASK) ||
!(e2 & DESC_CS_MASK))
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->segs[R_CS].selector & 3;
+ cpl = env->cpl;
rpl = new_cs & 3;
if (rpl < cpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
/* modify processor state */
load_eflags(new_eflags, FL_UPDATE_CPL0_MASK | VM_MASK | VIF_MASK | VIP_MASK);
load_seg_vm(R_CS, new_cs);
+ cpu_x86_set_cpl(env, 3);
load_seg_vm(R_SS, new_ss);
load_seg_vm(R_ES, new_es);
load_seg_vm(R_DS, new_ds);
load_seg_vm(R_FS, new_fs);
load_seg_vm(R_GS, new_gs);
-
+
env->eip = new_eip;
env->regs[R_ESP] = new_esp;
}