*/
#include "cpu.h"
-#include "dyngen-exec.h"
-#include "qemu-log.h"
+#include "qemu/log.h"
#include "helper.h"
//#define DEBUG_PCALL
+#if !defined(CONFIG_USER_ONLY)
+#include "exec/softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
#ifdef DEBUG_PCALL
# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
-# define LOG_PCALL_STATE(env) \
- log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
+# define LOG_PCALL_STATE(cpu) \
+ log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
#else
# define LOG_PCALL(...) do { } while (0)
-# define LOG_PCALL_STATE(env) do { } while (0)
+# define LOG_PCALL_STATE(cpu) do { } while (0)
#endif
/* return non zero if error */
-static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
- int selector)
+static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
+ uint32_t *e2_ptr, int selector)
{
SegmentCache *dt;
int index;
}
/* init the segment cache in vm86 mode. */
-static inline void load_seg_vm(int seg, int selector)
+static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
{
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg, selector,
(selector << 4), 0xffff, 0);
}
-static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
+static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
uint32_t *esp_ptr, int dpl)
{
int type, index, shift;
}
/* XXX: merge with load_seg() */
-static void tss_load_seg(int seg_reg, int selector)
+static void tss_load_seg(CPUX86State *env, int seg_reg, int selector)
{
uint32_t e1, e2;
int rpl, dpl, cpl;
if ((selector & 0xfffc) != 0) {
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK)) {
#define SWITCH_TSS_CALL 2
/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss(int tss_selector,
+static void switch_tss(CPUX86State *env, int tss_selector,
uint32_t e1, uint32_t e2, int source,
uint32_t next_eip)
{
if (tss_selector & 4) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- if (load_segment(&e1, &e2, tss_selector) != 0) {
+ if (load_segment(env, &e1, &e2, tss_selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
}
if (e2 & DESC_S_MASK) {
/* 32 bit */
cpu_stl_kernel(env, env->tr.base + 0x20, next_eip);
cpu_stl_kernel(env, env->tr.base + 0x24, old_eflags);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), EAX);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), ECX);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), EDX);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), EBX);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), ESP);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), EBP);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), ESI);
- cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), EDI);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
+ cpu_stl_kernel(env, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
for (i = 0; i < 6; i++) {
cpu_stw_kernel(env, env->tr.base + (0x48 + i * 4),
env->segs[i].selector);
/* 16 bit */
cpu_stw_kernel(env, env->tr.base + 0x0e, next_eip);
cpu_stw_kernel(env, env->tr.base + 0x10, old_eflags);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), EAX);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), ECX);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), EDX);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), EBX);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), ESP);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), EBP);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), ESI);
- cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), EDI);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
+ cpu_stw_kernel(env, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
for (i = 0; i < 4; i++) {
cpu_stw_kernel(env, env->tr.base + (0x22 + i * 4),
env->segs[i].selector);
}
cpu_load_eflags(env, new_eflags, eflags_mask);
/* XXX: what to do in 16 bit case? */
- EAX = new_regs[0];
- ECX = new_regs[1];
- EDX = new_regs[2];
- EBX = new_regs[3];
- ESP = new_regs[4];
- EBP = new_regs[5];
- ESI = new_regs[6];
- EDI = new_regs[7];
+ env->regs[R_EAX] = new_regs[0];
+ env->regs[R_ECX] = new_regs[1];
+ env->regs[R_EDX] = new_regs[2];
+ env->regs[R_EBX] = new_regs[3];
+ env->regs[R_ESP] = new_regs[4];
+ env->regs[R_EBP] = new_regs[5];
+ env->regs[R_ESI] = new_regs[6];
+ env->regs[R_EDI] = new_regs[7];
if (new_eflags & VM_MASK) {
for (i = 0; i < 6; i++) {
- load_seg_vm(i, new_segs[i]);
+ load_seg_vm(env, i, new_segs[i]);
}
/* in vm86, CPL is always 3 */
cpu_x86_set_cpl(env, 3);
/* load the segments */
if (!(new_eflags & VM_MASK)) {
- tss_load_seg(R_CS, new_segs[R_CS]);
- tss_load_seg(R_SS, new_segs[R_SS]);
- tss_load_seg(R_ES, new_segs[R_ES]);
- tss_load_seg(R_DS, new_segs[R_DS]);
- tss_load_seg(R_FS, new_segs[R_FS]);
- tss_load_seg(R_GS, new_segs[R_GS]);
+ tss_load_seg(env, R_CS, new_segs[R_CS]);
+ tss_load_seg(env, R_SS, new_segs[R_SS]);
+ tss_load_seg(env, R_ES, new_segs[R_ES]);
+ tss_load_seg(env, R_DS, new_segs[R_DS]);
+ tss_load_seg(env, R_FS, new_segs[R_FS]);
+ tss_load_seg(env, R_GS, new_segs[R_GS]);
}
- /* check that EIP is in the CS segment limits */
+ /* check that env->eip is in the CS segment limits */
if (new_eip > env->segs[R_CS].limit) {
/* XXX: different exception if CALL? */
raise_exception_err(env, EXCP0D_GPF, 0);
#ifndef CONFIG_USER_ONLY
/* reset local breakpoints */
- if (env->dr[7] & 0x55) {
- for (i = 0; i < 4; i++) {
- if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
+ if (env->dr[7] & DR7_LOCAL_BP_MASK) {
+ for (i = 0; i < DR7_MAX_BP; i++) {
+ if (hw_local_breakpoint_enabled(env->dr[7], i) &&
+ !hw_global_breakpoint_enabled(env->dr[7], i)) {
hw_breakpoint_remove(env, i);
}
}
- env->dr[7] &= ~0x55;
+ env->dr[7] &= ~DR7_LOCAL_BP_MASK;
}
#endif
}
}
#ifdef TARGET_X86_64
-#define SET_ESP(val, sp_mask) \
- do { \
- if ((sp_mask) == 0xffff) { \
- ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
- } else if ((sp_mask) == 0xffffffffLL) { \
- ESP = (uint32_t)(val); \
- } else { \
- ESP = (val); \
- } \
+#define SET_ESP(val, sp_mask) \
+ do { \
+ if ((sp_mask) == 0xffff) { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
+ ((val) & 0xffff); \
+ } else if ((sp_mask) == 0xffffffffLL) { \
+ env->regs[R_ESP] = (uint32_t)(val); \
+ } else { \
+ env->regs[R_ESP] = (val); \
+ } \
} while (0)
#else
-#define SET_ESP(val, sp_mask) \
- do { \
- ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
+#define SET_ESP(val, sp_mask) \
+ do { \
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
+ ((val) & (sp_mask)); \
} while (0)
#endif
}
/* protected mode interrupt */
-static void do_interrupt_protected(int intno, int is_int, int error_code,
- unsigned int next_eip, int is_hw)
+static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip,
+ int is_hw)
{
SegmentCache *dt;
target_ulong ptr, ssp;
if (!(e2 & DESC_P_MASK)) {
raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
}
- switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
if (has_error_code) {
int type;
uint32_t mask;
} else {
mask = 0xffff;
}
- esp = (ESP - (2 << shift)) & mask;
+ esp = (env->regs[R_ESP] - (2 << shift)) & mask;
ssp = env->segs[R_SS].base + esp;
if (shift) {
cpu_stl_kernel(env, ssp, error_code);
if ((selector & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
}
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(&ss, &esp, dpl);
+ get_ss_esp_from_tss(env, &ss, &esp, dpl);
if ((ss & 0xfffc) == 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
if ((ss & 3) != dpl) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
new_stack = 0;
sp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
- esp = ESP;
+ esp = env->regs[R_ESP];
dpl = cpl;
} else {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
}
PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, esp, sp_mask, ESP);
+ PUSHL(ssp, esp, sp_mask, env->regs[R_ESP]);
}
PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
}
PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, esp, sp_mask, ESP);
+ PUSHW(ssp, esp, sp_mask, env->regs[R_ESP]);
}
PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
sp += 8; \
}
-static inline target_ulong get_rsp_from_tss(int level)
+static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
{
int index;
}
/* 64 bit interrupt */
-static void do_interrupt64(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
+static void do_interrupt64(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
{
SegmentCache *dt;
target_ulong ptr;
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
/* to inner privilege */
if (ist != 0) {
- esp = get_rsp_from_tss(ist + 3);
+ esp = get_rsp_from_tss(env, ist + 3);
} else {
- esp = get_rsp_from_tss(dpl);
+ esp = get_rsp_from_tss(env, dpl);
}
esp &= ~0xfLL; /* align stack */
ss = 0;
}
new_stack = 0;
if (ist != 0) {
- esp = get_rsp_from_tss(ist + 3);
+ esp = get_rsp_from_tss(env, ist + 3);
} else {
- esp = ESP;
+ esp = env->regs[R_ESP];
}
esp &= ~0xfLL; /* align stack */
dpl = cpl;
}
PUSHQ(esp, env->segs[R_SS].selector);
- PUSHQ(esp, ESP);
+ PUSHQ(esp, env->regs[R_ESP]);
PUSHQ(esp, cpu_compute_eflags(env));
PUSHQ(esp, env->segs[R_CS].selector);
PUSHQ(esp, old_eip);
ss = 0 | dpl;
cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
}
- ESP = esp;
+ env->regs[R_ESP] = esp;
selector = (selector & ~3) | dpl;
cpu_x86_load_seg_cache(env, R_CS, selector,
#ifdef TARGET_X86_64
#if defined(CONFIG_USER_ONLY)
-void helper_syscall(int next_eip_addend)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
{
env->exception_index = EXCP_SYSCALL;
env->exception_next_eip = env->eip + next_eip_addend;
cpu_loop_exit(env);
}
#else
-void helper_syscall(int next_eip_addend)
+void helper_syscall(CPUX86State *env, int next_eip_addend)
{
int selector;
if (env->hflags & HF_LMA_MASK) {
int code64;
- ECX = env->eip + next_eip_addend;
+ env->regs[R_ECX] = env->eip + next_eip_addend;
env->regs[11] = cpu_compute_eflags(env);
code64 = env->hflags & HF_CS64_MASK;
env->eip = env->cstar;
}
} else {
- ECX = (uint32_t)(env->eip + next_eip_addend);
+ env->regs[R_ECX] = (uint32_t)(env->eip + next_eip_addend);
cpu_x86_set_cpl(env, 0);
cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
#endif
#ifdef TARGET_X86_64
-void helper_sysret(int dflag)
+void helper_sysret(CPUX86State *env, int dflag)
{
int cpl, selector;
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
DESC_L_MASK);
- env->eip = ECX;
+ env->eip = env->regs[R_ECX];
} else {
cpu_x86_load_seg_cache(env, R_CS, selector | 3,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- env->eip = (uint32_t)ECX;
+ env->eip = (uint32_t)env->regs[R_ECX];
}
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- env->eip = (uint32_t)ECX;
+ env->eip = (uint32_t)env->regs[R_ECX];
cpu_x86_load_seg_cache(env, R_SS, selector + 8,
0, 0xffffffff,
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
#endif
/* real mode interrupt */
-static void do_interrupt_real(int intno, int is_int, int error_code,
- unsigned int next_eip)
+static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
+ int error_code, unsigned int next_eip)
{
SegmentCache *dt;
target_ulong ptr, ssp;
ptr = dt->base + intno * 4;
offset = cpu_lduw_kernel(env, ptr);
selector = cpu_lduw_kernel(env, ptr + 2);
- esp = ESP;
+ esp = env->regs[R_ESP];
ssp = env->segs[R_SS].base;
if (is_int) {
old_eip = next_eip;
PUSHW(ssp, esp, 0xffff, old_eip);
/* update processor state */
- ESP = (ESP & ~0xffff) | (esp & 0xffff);
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | (esp & 0xffff);
env->eip = offset;
env->segs[R_CS].selector = selector;
env->segs[R_CS].base = (selector << 4);
#if defined(CONFIG_USER_ONLY)
/* fake user mode interrupt */
-static void do_interrupt_user(int intno, int is_int, int error_code,
- target_ulong next_eip)
+static void do_interrupt_user(CPUX86State *env, int intno, int is_int,
+ int error_code, target_ulong next_eip)
{
SegmentCache *dt;
target_ulong ptr;
exiting the emulation with the suitable exception and error
code */
if (is_int) {
- EIP = next_eip;
+ env->eip = next_eip;
}
}
#else
-static void handle_even_inj(int intno, int is_int, int error_code,
- int is_hw, int rm)
+static void handle_even_inj(CPUX86State *env, int intno, int is_int,
+ int error_code, int is_hw, int rm)
{
uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
control.event_inj));
/*
* Begin execution of an interruption. is_int is TRUE if coming from
- * the int instruction. next_eip is the EIP value AFTER the interrupt
+ * the int instruction. next_eip is the env->eip value AFTER the interrupt
* instruction. It is only relevant if is_int is TRUE.
*/
-static void do_interrupt_all(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
+static void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
+ int error_code, target_ulong next_eip, int is_hw)
{
+ CPUX86State *env = &cpu->env;
+
if (qemu_loglevel_mask(CPU_LOG_INT)) {
if ((env->cr[0] & CR0_PE_MASK)) {
static int count;
" pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
count, intno, error_code, is_int,
env->hflags & HF_CPL_MASK,
- env->segs[R_CS].selector, EIP,
- (int)env->segs[R_CS].base + EIP,
- env->segs[R_SS].selector, ESP);
+ env->segs[R_CS].selector, env->eip,
+ (int)env->segs[R_CS].base + env->eip,
+ env->segs[R_SS].selector, env->regs[R_ESP]);
if (intno == 0x0e) {
qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
} else {
- qemu_log(" EAX=" TARGET_FMT_lx, EAX);
+ qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
}
qemu_log("\n");
- log_cpu_state(env, X86_DUMP_CCOP);
+ log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
#if 0
{
int i;
if (env->cr[0] & CR0_PE_MASK) {
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
- handle_even_inj(intno, is_int, error_code, is_hw, 0);
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
}
#endif
#ifdef TARGET_X86_64
if (env->hflags & HF_LMA_MASK) {
- do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
+ do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
} else
#endif
{
- do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
+ do_interrupt_protected(env, intno, is_int, error_code, next_eip,
+ is_hw);
}
} else {
#if !defined(CONFIG_USER_ONLY)
if (env->hflags & HF_SVMI_MASK) {
- handle_even_inj(intno, is_int, error_code, is_hw, 1);
+ handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
}
#endif
- do_interrupt_real(intno, is_int, error_code, next_eip);
+ do_interrupt_real(env, intno, is_int, error_code, next_eip);
}
#if !defined(CONFIG_USER_ONLY)
#endif
}
-void do_interrupt(CPUX86State *env1)
+void x86_cpu_do_interrupt(CPUState *cs)
{
- CPUX86State *saved_env;
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
- saved_env = env;
- env = env1;
#if defined(CONFIG_USER_ONLY)
/* if user mode only, we simulate a fake exception
which will be handled outside the cpu execution
loop */
- do_interrupt_user(env->exception_index,
+ do_interrupt_user(env, env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip);
/* simulate a real cpu exception. On i386, it can
trigger new exceptions, but we do not handle
double or triple faults yet. */
- do_interrupt_all(env->exception_index,
+ do_interrupt_all(cpu, env->exception_index,
env->exception_is_int,
env->error_code,
env->exception_next_eip, 0);
/* successfully delivered */
env->old_exception = -1;
#endif
- env = saved_env;
}
-void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
+void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
- do_interrupt_all(intno, 0, 0, 0, is_hw);
- env = saved_env;
+ do_interrupt_all(x86_env_get_cpu(env), intno, 0, 0, 0, is_hw);
}
-void helper_enter_level(int level, int data32, target_ulong t1)
+void helper_enter_level(CPUX86State *env, int level, int data32,
+ target_ulong t1)
{
target_ulong ssp;
uint32_t esp_mask, esp, ebp;
esp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
- ebp = EBP;
- esp = ESP;
+ ebp = env->regs[R_EBP];
+ esp = env->regs[R_ESP];
if (data32) {
/* 32 bit */
esp -= 4;
}
#ifdef TARGET_X86_64
-void helper_enter64_level(int level, int data64, target_ulong t1)
+void helper_enter64_level(CPUX86State *env, int level, int data64,
+ target_ulong t1)
{
target_ulong esp, ebp;
- ebp = EBP;
- esp = ESP;
+ ebp = env->regs[R_EBP];
+ esp = env->regs[R_ESP];
if (data64) {
/* 64 bit */
}
#endif
-void helper_lldt(int selector)
+void helper_lldt(CPUX86State *env, int selector)
{
SegmentCache *dt;
uint32_t e1, e2;
env->ldt.selector = selector;
}
-void helper_ltr(int selector)
+void helper_ltr(CPUX86State *env, int selector)
{
SegmentCache *dt;
uint32_t e1, e2;
}
/* only works if protected mode and not VM86. seg_reg must be != R_CS */
-void helper_load_seg(int seg_reg, int selector)
+void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
{
uint32_t e1, e2;
int cpl, dpl, rpl;
}
/* protected mode jump */
-void helper_ljmp_protected(int new_cs, target_ulong new_eip,
+void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
int next_eip_addend)
{
int gate_cs, type;
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
cpl = env->hflags & HF_CPL_MASK;
}
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
+ env->eip = new_eip;
} else {
/* jump to call or task gate */
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
next_eip = env->eip + next_eip_addend;
- switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
+ switch_tss(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
CC_OP = CC_OP_EFLAGS;
break;
case 4: /* 286 call gate */
if (type == 12) {
new_eip |= (e2 & 0xffff0000);
}
- if (load_segment(&e1, &e2, gate_cs) != 0) {
+ if (load_segment(env, &e1, &e2, gate_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
}
dpl = (e2 >> DESC_DPL_SHIFT) & 3;
}
cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
+ env->eip = new_eip;
break;
default:
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
/* real mode call */
-void helper_lcall_real(int new_cs, target_ulong new_eip1,
+void helper_lcall_real(CPUX86State *env, int new_cs, target_ulong new_eip1,
int shift, int next_eip)
{
int new_eip;
target_ulong ssp;
new_eip = new_eip1;
- esp = ESP;
+ esp = env->regs[R_ESP];
esp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
if (shift) {
}
/* protected mode call */
-void helper_lcall_protected(int new_cs, target_ulong new_eip,
+void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
int shift, int next_eip_addend)
{
int new_stack, i;
next_eip = env->eip + next_eip_addend;
LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
- LOG_PCALL_STATE(env);
+ LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
cpl = env->hflags & HF_CPL_MASK;
target_ulong rsp;
/* 64 bit case */
- rsp = ESP;
+ rsp = env->regs[R_ESP];
PUSHQ(rsp, env->segs[R_CS].selector);
PUSHQ(rsp, next_eip);
/* from this point, not restartable */
- ESP = rsp;
+ env->regs[R_ESP] = rsp;
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2),
get_seg_limit(e1, e2), e2);
- EIP = new_eip;
+ env->eip = new_eip;
} else
#endif
{
- sp = ESP;
+ sp = env->regs[R_ESP];
sp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
if (shift) {
SET_ESP(sp, sp_mask);
cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
+ env->eip = new_eip;
}
} else {
/* check gate type */
if (dpl < cpl || dpl < rpl) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
- switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
+ switch_tss(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
CC_OP = CC_OP_EFLAGS;
return;
case 4: /* 286 call gate */
raise_exception_err(env, EXCP0D_GPF, 0);
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
}
if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
if (!(e2 & DESC_C_MASK) && dpl < cpl) {
/* to inner privilege */
- get_ss_esp_from_tss(&ss, &sp, dpl);
- LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
- "\n",
- ss, sp, param_count, ESP);
+ get_ss_esp_from_tss(env, &ss, &sp, dpl);
+ LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
+ TARGET_FMT_lx "\n", ss, sp, param_count,
+ env->regs[R_ESP]);
if ((ss & 0xfffc) == 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
if ((ss & 3) != dpl) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
}
ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
ssp = get_seg_base(ss_e1, ss_e2);
if (shift) {
PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, sp, sp_mask, ESP);
+ PUSHL(ssp, sp, sp_mask, env->regs[R_ESP]);
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_ldl_kernel(env, old_ssp + ((ESP + i * 4) &
- old_sp_mask));
+ val = cpu_ldl_kernel(env, old_ssp +
+ ((env->regs[R_ESP] + i * 4) &
+ old_sp_mask));
PUSHL(ssp, sp, sp_mask, val);
}
} else {
PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, sp, sp_mask, ESP);
+ PUSHW(ssp, sp, sp_mask, env->regs[R_ESP]);
for (i = param_count - 1; i >= 0; i--) {
- val = cpu_lduw_kernel(env, old_ssp + ((ESP + i * 2) &
- old_sp_mask));
+ val = cpu_lduw_kernel(env, old_ssp +
+ ((env->regs[R_ESP] + i * 2) &
+ old_sp_mask));
PUSHW(ssp, sp, sp_mask, val);
}
}
new_stack = 1;
} else {
/* to same privilege */
- sp = ESP;
+ sp = env->regs[R_ESP];
sp_mask = get_sp_mask(env->segs[R_SS].flags);
ssp = env->segs[R_SS].base;
/* push_size = (4 << shift); */
e2);
cpu_x86_set_cpl(env, dpl);
SET_ESP(sp, sp_mask);
- EIP = offset;
+ env->eip = offset;
}
}
/* real and vm86 mode iret */
-void helper_iret_real(int shift)
+void helper_iret_real(CPUX86State *env, int shift)
{
uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
target_ulong ssp;
int eflags_mask;
sp_mask = 0xffff; /* XXXX: use SS segment size? */
- sp = ESP;
+ sp = env->regs[R_ESP];
ssp = env->segs[R_SS].base;
if (shift == 1) {
/* 32 bits */
POPW(ssp, sp, sp_mask, new_cs);
POPW(ssp, sp, sp_mask, new_eflags);
}
- ESP = (ESP & ~sp_mask) | (sp & sp_mask);
+ env->regs[R_ESP] = (env->regs[R_ESP] & ~sp_mask) | (sp & sp_mask);
env->segs[R_CS].selector = new_cs;
env->segs[R_CS].base = (new_cs << 4);
env->eip = new_eip;
env->hflags2 &= ~HF2_NMI_MASK;
}
-static inline void validate_seg(int seg_reg, int cpl)
+static inline void validate_seg(CPUX86State *env, int seg_reg, int cpl)
{
int dpl;
uint32_t e2;
}
/* protected mode iret */
-static inline void helper_ret_protected(int shift, int is_iret, int addend)
+static inline void helper_ret_protected(CPUX86State *env, int shift,
+ int is_iret, int addend)
{
uint32_t new_cs, new_eflags, new_ss;
uint32_t new_es, new_ds, new_fs, new_gs;
{
sp_mask = get_sp_mask(env->segs[R_SS].flags);
}
- sp = ESP;
+ sp = env->regs[R_ESP];
ssp = env->segs[R_SS].base;
new_eflags = 0; /* avoid warning */
#ifdef TARGET_X86_64
}
LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
new_cs, new_eip, shift, addend);
- LOG_PCALL_STATE(env);
+ LOG_PCALL_STATE(CPU(x86_env_get_cpu(env)));
if ((new_cs & 0xfffc) == 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
- if (load_segment(&e1, &e2, new_cs) != 0) {
+ if (load_segment(env, &e1, &e2, new_cs) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
}
if (!(e2 & DESC_S_MASK) ||
if ((new_ss & 3) != rpl) {
raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
}
- if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
+ if (load_segment(env, &ss_e1, &ss_e2, new_ss) != 0) {
raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
}
if (!(ss_e2 & DESC_S_MASK) ||
}
/* validate data segments */
- validate_seg(R_ES, rpl);
- validate_seg(R_DS, rpl);
- validate_seg(R_FS, rpl);
- validate_seg(R_GS, rpl);
+ validate_seg(env, R_ES, rpl);
+ validate_seg(env, R_DS, rpl);
+ validate_seg(env, R_FS, rpl);
+ validate_seg(env, R_GS, rpl);
sp += addend;
}
cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
VIP_MASK);
- load_seg_vm(R_CS, new_cs & 0xffff);
+ load_seg_vm(env, R_CS, new_cs & 0xffff);
cpu_x86_set_cpl(env, 3);
- load_seg_vm(R_SS, new_ss & 0xffff);
- load_seg_vm(R_ES, new_es & 0xffff);
- load_seg_vm(R_DS, new_ds & 0xffff);
- load_seg_vm(R_FS, new_fs & 0xffff);
- load_seg_vm(R_GS, new_gs & 0xffff);
+ load_seg_vm(env, R_SS, new_ss & 0xffff);
+ load_seg_vm(env, R_ES, new_es & 0xffff);
+ load_seg_vm(env, R_DS, new_ds & 0xffff);
+ load_seg_vm(env, R_FS, new_fs & 0xffff);
+ load_seg_vm(env, R_GS, new_gs & 0xffff);
env->eip = new_eip & 0xffff;
- ESP = new_esp;
+ env->regs[R_ESP] = new_esp;
}
-void helper_iret_protected(int shift, int next_eip)
+void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
{
int tss_selector, type;
uint32_t e1, e2;
if (tss_selector & 4) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- if (load_segment(&e1, &e2, tss_selector) != 0) {
+ if (load_segment(env, &e1, &e2, tss_selector) != 0) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
if (type != 3) {
raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
}
- switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
+ switch_tss(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
} else {
- helper_ret_protected(shift, 1, 0);
+ helper_ret_protected(env, shift, 1, 0);
}
env->hflags2 &= ~HF2_NMI_MASK;
}
-void helper_lret_protected(int shift, int addend)
+void helper_lret_protected(CPUX86State *env, int shift, int addend)
{
- helper_ret_protected(shift, 0, addend);
+ helper_ret_protected(env, shift, 0, addend);
}
-void helper_sysenter(void)
+void helper_sysenter(CPUX86State *env)
{
if (env->sysenter_cs == 0) {
raise_exception_err(env, EXCP0D_GPF, 0);
DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
DESC_S_MASK |
DESC_W_MASK | DESC_A_MASK);
- ESP = env->sysenter_esp;
- EIP = env->sysenter_eip;
+ env->regs[R_ESP] = env->sysenter_esp;
+ env->eip = env->sysenter_eip;
}
-void helper_sysexit(int dflag)
+void helper_sysexit(CPUX86State *env, int dflag)
{
int cpl;
DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
DESC_W_MASK | DESC_A_MASK);
}
- ESP = ECX;
- EIP = EDX;
+ env->regs[R_ESP] = env->regs[R_ECX];
+ env->eip = env->regs[R_EDX];
}
-target_ulong helper_lsl(target_ulong selector1)
+target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
{
unsigned int limit;
uint32_t e1, e2, eflags, selector;
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
rpl = selector & 3;
return limit;
}
-target_ulong helper_lar(target_ulong selector1)
+target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl, type;
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
rpl = selector & 3;
return e2 & 0x00f0ff00;
}
-void helper_verr(target_ulong selector1)
+void helper_verr(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl;
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
CC_SRC = eflags | CC_Z;
}
-void helper_verw(target_ulong selector1)
+void helper_verw(CPUX86State *env, target_ulong selector1)
{
uint32_t e1, e2, eflags, selector;
int rpl, dpl, cpl;
if ((selector & 0xfffc) == 0) {
goto fail;
}
- if (load_segment(&e1, &e2, selector) != 0) {
+ if (load_segment(env, &e1, &e2, selector) != 0) {
goto fail;
}
if (!(e2 & DESC_S_MASK)) {
}
#if defined(CONFIG_USER_ONLY)
-void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
+void cpu_x86_load_seg(CPUX86State *env, int seg_reg, int selector)
{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = s;
if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
selector &= 0xffff;
cpu_x86_load_seg_cache(env, seg_reg, selector,
(selector << 4), 0xffff, 0);
} else {
- helper_load_seg(seg_reg, selector);
+ helper_load_seg(env, seg_reg, selector);
}
- env = saved_env;
}
#endif