#if 0
#define raise_exception_err(a, b)\
do {\
- printf("raise_exception line=%d\n", __LINE__);\
+ fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
(raise_exception_err)(a, b);\
} while (0)
#endif
/* XXX: restore CPU state in registers (PowerPC case) */
static void switch_tss(int tss_selector,
- uint32_t e1, uint32_t e2, int source)
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
{
int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
uint8_t *tss_base;
type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
#ifdef DEBUG_PCALL
- if (loglevel)
+ if (loglevel & CPU_LOG_PCALL)
fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
#endif
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
uint8_t *ptr;
uint32_t e2;
- ptr = env->gdt.base + (env->tr.selector << 3);
+ ptr = env->gdt.base + (env->tr.selector & ~7);
e2 = ldl_kernel(ptr + 4);
e2 &= ~DESC_TSS_BUSY_MASK;
stl_kernel(ptr + 4, e2);
/* save the current state in the old TSS */
if (type & 8) {
/* 32 bit */
- stl_kernel(env->tr.base + 0x20, env->eip);
+ stl_kernel(env->tr.base + 0x20, next_eip);
stl_kernel(env->tr.base + 0x24, old_eflags);
for(i = 0; i < 8; i++)
stl_kernel(env->tr.base + (0x28 + i * 4), env->regs[i]);
stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
} else {
/* 16 bit */
- stw_kernel(env->tr.base + 0x0e, new_eip);
+ stw_kernel(env->tr.base + 0x0e, next_eip);
stw_kernel(env->tr.base + 0x10, old_eflags);
for(i = 0; i < 8; i++)
stw_kernel(env->tr.base + (0x12 + i * 2), env->regs[i]);
if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
uint8_t *ptr;
uint32_t e2;
- ptr = env->gdt.base + (tss_selector << 3);
+ ptr = env->gdt.base + (tss_selector & ~7);
e2 = ldl_kernel(ptr + 4);
e2 |= DESC_TSS_BUSY_MASK;
stl_kernel(ptr + 4, e2);
/* set the new CPU state */
/* from this point, any exception which occurs can give problems */
env->cr[0] |= CR0_TS_MASK;
+ env->hflags |= HF_TS_MASK;
env->tr.selector = tss_selector;
env->tr.base = tss_base;
env->tr.limit = tss_limit;
/* check that EIP is in the CS segment limits */
if (new_eip > env->segs[R_CS].limit) {
+ /* XXX: different exception if CALL ? */
raise_exception_err(EXCP0D_GPF, 0);
}
}
break;
}
}
+ if (is_int)
+ old_eip = next_eip;
+ else
+ old_eip = env->eip;
dt = &env->idt;
if (intno * 8 + 7 > dt->limit)
/* must do that check here to return the correct error code */
if (!(e2 & DESC_P_MASK))
raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
- switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL);
+ switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
int mask;
/* push the error code */
push_size += 8;
push_size <<= shift;
#endif
- if (is_int)
- old_eip = next_eip;
- else
- old_eip = env->eip;
if (shift == 1) {
if (new_stack) {
if (env->eflags & VM_MASK) {
if (new_stack) {
if (env->eflags & VM_MASK) {
- /* XXX: explain me why W2K hangs if the whole segment cache is
- reset ? */
-#if 1
- env->segs[R_ES].selector = 0;
- env->segs[R_ES].flags = 0;
- env->segs[R_DS].selector = 0;
- env->segs[R_DS].flags = 0;
- env->segs[R_FS].selector = 0;
- env->segs[R_FS].flags = 0;
- env->segs[R_GS].selector = 0;
- env->segs[R_GS].flags = 0;
-#else
cpu_x86_load_seg_cache(env, R_ES, 0, NULL, 0, 0);
cpu_x86_load_seg_cache(env, R_DS, 0, NULL, 0, 0);
cpu_x86_load_seg_cache(env, R_FS, 0, NULL, 0, 0);
cpu_x86_load_seg_cache(env, R_GS, 0, NULL, 0, 0);
-#endif
}
ss = (ss & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss,
}
/*
- * Begin excution of an interruption. is_int is TRUE if coming from
+ * Begin execution of an interruption. is_int is TRUE if coming from
* the int instruction. next_eip is the EIP value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
void do_interrupt(int intno, int is_int, int error_code,
unsigned int next_eip, int is_hw)
{
-#if 0
- {
- extern FILE *stdout;
- static int count;
- if (env->cr[0] & CR0_PE_MASK) {
- fprintf(stdout, "%d: v=%02x e=%04x i=%d CPL=%d CS:EIP=%04x:%08x SS:ESP=%04x:%08x",
+#ifdef DEBUG_PCALL
+ if (loglevel & (CPU_LOG_PCALL | CPU_LOG_INT)) {
+ if ((env->cr[0] & CR0_PE_MASK)) {
+ static int count;
+ fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:%08x pc=%08x SP=%04x:%08x",
count, intno, error_code, is_int,
env->hflags & HF_CPL_MASK,
env->segs[R_CS].selector, EIP,
+ (int)env->segs[R_CS].base + EIP,
env->segs[R_SS].selector, ESP);
if (intno == 0x0e) {
- fprintf(stdout, " CR2=%08x", env->cr[2]);
+ fprintf(logfile, " CR2=%08x", env->cr[2]);
} else {
- fprintf(stdout, " EAX=%08x", env->regs[R_EAX]);
+ fprintf(logfile, " EAX=%08x", env->regs[R_EAX]);
}
- fprintf(stdout, "\n");
-
- if (0) {
- cpu_x86_dump_state(env, stdout, X86_DUMP_CCOP);
+ fprintf(logfile, "\n");
#if 0
- {
- int i;
- uint8_t *ptr;
- fprintf(stdout, " code=");
- ptr = env->segs[R_CS].base + env->eip;
- for(i = 0; i < 16; i++) {
- fprintf(stdout, " %02x", ldub(ptr + i));
- }
- fprintf(stdout, "\n");
+ cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
+ {
+ int i;
+ uint8_t *ptr;
+ fprintf(logfile, " code=");
+ ptr = env->segs[R_CS].base + env->eip;
+ for(i = 0; i < 16; i++) {
+ fprintf(logfile, " %02x", ldub(ptr + i));
}
-#endif
+ fprintf(logfile, "\n");
}
- count++;
- }
- }
#endif
-#ifdef DEBUG_PCALL
- if (loglevel) {
- static int count;
- fprintf(logfile, "%d: interrupt: vector=%02x error_code=%04x int=%d\n",
- count, intno, error_code, is_int);
- cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
-#if 0
- {
- int i;
- uint8_t *ptr;
- fprintf(logfile, " code=");
- ptr = env->segs[R_CS].base + env->eip;
- for(i = 0; i < 16; i++) {
- fprintf(logfile, " %02x", ldub(ptr + i));
- }
- fprintf(logfile, "\n");
+ count++;
}
-#endif
- count++;
}
#endif
if (env->cr[0] & CR0_PE_MASK) {
}
/* protected mode jump */
-void helper_ljmp_protected_T0_T1(void)
+void helper_ljmp_protected_T0_T1(int next_eip)
{
int new_cs, new_eip, gate_cs, type;
uint32_t e1, e2, cpl, dpl, rpl, limit;
case 5: /* task gate */
if (dpl < cpl || dpl < rpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP);
+ switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
break;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
new_cs = T0;
new_eip = T1;
#ifdef DEBUG_PCALL
- if (loglevel) {
- fprintf(logfile, "lcall %04x:%08x\n",
- new_cs, new_eip);
+ if (loglevel & CPU_LOG_PCALL) {
+ fprintf(logfile, "lcall %04x:%08x s=%d\n",
+ new_cs, new_eip, shift);
cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
}
#endif
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
cpl = env->hflags & HF_CPL_MASK;
#ifdef DEBUG_PCALL
- if (loglevel) {
+ if (loglevel & CPU_LOG_PCALL) {
fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
}
#endif
case 5: /* task gate */
if (dpl < cpl || dpl < rpl)
raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL);
+ switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
return;
case 4: /* 286 call gate */
case 12: /* 386 call gate */
/* to inner priviledge */
get_ss_esp_from_tss(&ss, &sp, dpl);
#ifdef DEBUG_PCALL
- if (loglevel)
- fprintf(logfile, "ss=%04x sp=%04x param_count=%d ESP=%x\n",
+ if (loglevel & CPU_LOG_PCALL)
+ fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=%x\n",
ss, sp, param_count, ESP);
#endif
if ((ss & 0xfffc) == 0)
POPW(ssp, sp, sp_mask, new_eflags);
}
#ifdef DEBUG_PCALL
- if (loglevel) {
- fprintf(logfile, "lret new %04x:%08x addend=0x%x\n",
- new_cs, new_eip, addend);
+ if (loglevel & CPU_LOG_PCALL) {
+ fprintf(logfile, "lret new %04x:%08x s=%d addend=0x%x\n",
+ new_cs, new_eip, shift, addend);
cpu_x86_dump_state(env, logfile, X86_DUMP_CCOP);
}
#endif
POPW(ssp, sp, sp_mask, new_esp);
POPW(ssp, sp, sp_mask, new_ss);
}
+#ifdef DEBUG_PCALL
+ if (loglevel & CPU_LOG_PCALL) {
+ fprintf(logfile, "new ss:esp=%04x:%08x\n",
+ new_ss, new_esp);
+ }
+#endif
if ((new_ss & 3) != rpl)
raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
ss_e2);
cpu_x86_set_cpl(env, rpl);
sp = new_esp;
- /* XXX: change sp_mask according to old segment ? */
+ sp_mask = get_sp_mask(ss_e2);
/* validate data segments */
validate_seg(R_ES, cpl);
load_seg_vm(R_FS, new_fs & 0xffff);
load_seg_vm(R_GS, new_gs & 0xffff);
- env->eip = new_eip;
+ env->eip = new_eip & 0xffff;
ESP = new_esp;
}
-void helper_iret_protected(int shift)
+void helper_iret_protected(int shift, int next_eip)
{
int tss_selector, type;
uint32_t e1, e2;
/* NOTE: we check both segment and busy TSS */
if (type != 3)
raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET);
+ switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
} else {
helper_ret_protected(shift, 1, 0);
}
cpu_x86_flush_tlb(env, addr);
}
-/* rdtsc */
-#if !defined(__i386__) && !defined(__x86_64__)
-uint64_t emu_time;
-#endif
-
void helper_rdtsc(void)
{
uint64_t val;
-#if defined(__i386__) || defined(__x86_64__)
- asm("rdtsc" : "=A" (val));
-#else
- /* better than nothing: the time increases */
- val = emu_time++;
-#endif
+
+ val = cpu_get_tsc(env);
EAX = val;
EDX = val >> 32;
}
helper_fstt(ST0, (uint8_t *)A0);
}
-/* BCD ops */
+void fpu_set_exception(int mask)
+{
+ env->fpus |= mask;
+ if (env->fpus & (~env->fpuc & FPUC_EM))
+ env->fpus |= FPUS_SE | FPUS_B;
+}
-#define MUL10(iv) ( iv + iv + (iv << 3) )
+CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
+{
+ if (b == 0.0)
+ fpu_set_exception(FPUS_ZE);
+ return a / b;
+}
+
+void fpu_raise_exception(void)
+{
+ if (env->cr[0] & CR0_NE_MASK) {
+ raise_exception(EXCP10_COPR);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ else {
+ cpu_set_ferr(env);
+ }
+#endif
+}
+
+/* BCD ops */
void helper_fbld_ST0_A0(void)
{
}
}
+/* XXX: merge with helper_fstt ? */
+
+#ifndef USE_X86LDOUBLE
+
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
+{
+ CPU86_LDoubleU temp;
+ int e;
+
+ temp.d = f;
+ /* mantissa */
+ *pmant = (MANTD(temp) << 11) | (1LL << 63);
+ /* exponent + sign */
+ e = EXPD(temp) - EXPBIAS + 16383;
+ e |= SIGND(temp) >> 16;
+ *pexp = e;
+}
+
+CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU86_LDoubleU temp;
+ int e;
+ uint64_t ll;
+
+ /* XXX: handle overflow ? */
+ e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
+ e |= (upper >> 4) & 0x800; /* sign */
+ ll = (mant >> 11) & ((1LL << 52) - 1);
+#ifdef __arm__
+ temp.l.upper = (e << 20) | (ll >> 32);
+ temp.l.lower = ll;
+#else
+ temp.ll = ll | ((uint64_t)e << 52);
+#endif
+ return temp.d;
+}
+
+#else
+
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
+{
+ CPU86_LDoubleU temp;
+
+ temp.d = f;
+ *pmant = temp.l.lower;
+ *pexp = temp.l.upper;
+}
+
+CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU86_LDoubleU temp;
+
+ temp.l.upper = upper;
+ temp.l.lower = mant;
+ return temp.d;
+}
+#endif
+
#if !defined(CONFIG_USER_ONLY)
#define MMUSUFFIX _mmu