X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=cpu-exec.c;h=252da8688270b21db03c13a3ff1deee764014e8d;hb=a22260ae380fa6abb546479cfc2962ba4c40382d;hp=de0d716da0615a7293b69a949133255848c78414;hpb=a3ce3668ccff7d350a4f795ad99a012a6d41caef;p=qemu.git diff --git a/cpu-exec.c b/cpu-exec.c index de0d716da..252da8688 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -1,5 +1,5 @@ /* - * i386 emulator main execution loop + * emulator main execution loop * * Copyright (c) 2003-2005 Fabrice Bellard * @@ -21,17 +21,18 @@ #include "disas.h" #include "tcg.h" #include "qemu-barrier.h" +#include "qtest.h" int tb_invalidated_flag; //#define CONFIG_DEBUG_EXEC -bool qemu_cpu_has_work(CPUState *env) +bool qemu_cpu_has_work(CPUArchState *env) { return cpu_has_work(env); } -void cpu_loop_exit(CPUState *env) +void cpu_loop_exit(CPUArchState *env) { env->current_tb = NULL; longjmp(env->jmp_env, 1); @@ -41,7 +42,7 @@ void cpu_loop_exit(CPUState *env) restored in a state compatible with the CPU emulator */ #if defined(CONFIG_SOFTMMU) -void cpu_resume_from_signal(CPUState *env, void *puc) +void cpu_resume_from_signal(CPUArchState *env, void *puc) { /* XXX: restore cpu registers saved in host registers */ @@ -52,10 +53,10 @@ void cpu_resume_from_signal(CPUState *env, void *puc) /* Execute the code without caching the generated code. An interpreter could be used if available. */ -static void cpu_exec_nocache(CPUState *env, int max_cycles, +static void cpu_exec_nocache(CPUArchState *env, int max_cycles, TranslationBlock *orig_tb) { - unsigned long next_tb; + tcg_target_ulong next_tb; TranslationBlock *tb; /* Should never happen. @@ -79,14 +80,14 @@ static void cpu_exec_nocache(CPUState *env, int max_cycles, tb_free(tb); } -static TranslationBlock *tb_find_slow(CPUState *env, +static TranslationBlock *tb_find_slow(CPUArchState *env, target_ulong pc, target_ulong cs_base, uint64_t flags) { TranslationBlock *tb, **ptb1; unsigned int h; - tb_page_addr_t phys_pc, phys_page1, phys_page2; + tb_page_addr_t phys_pc, phys_page1; target_ulong virt_page2; tb_invalidated_flag = 0; @@ -94,7 +95,6 @@ static TranslationBlock *tb_find_slow(CPUState *env, /* find translated block using physical mappings */ phys_pc = get_page_addr_code(env, pc); phys_page1 = phys_pc & TARGET_PAGE_MASK; - phys_page2 = -1; h = tb_phys_hash_func(phys_pc); ptb1 = &tb_phys_hash[h]; for(;;) { @@ -107,6 +107,8 @@ static TranslationBlock *tb_find_slow(CPUState *env, tb->flags == flags) { /* check next page if needed */ if (tb->page_addr[1] != -1) { + tb_page_addr_t phys_page2; + virt_page2 = (pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; phys_page2 = get_page_addr_code(env, virt_page2); @@ -134,7 +136,7 @@ static TranslationBlock *tb_find_slow(CPUState *env, return tb; } -static inline TranslationBlock *tb_find_fast(CPUState *env) +static inline TranslationBlock *tb_find_fast(CPUArchState *env) { TranslationBlock *tb; target_ulong cs_base, pc; @@ -154,15 +156,12 @@ static inline TranslationBlock *tb_find_fast(CPUState *env) static CPUDebugExcpHandler *debug_excp_handler; -CPUDebugExcpHandler *cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) +void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler) { - CPUDebugExcpHandler *old_handler = debug_excp_handler; - debug_excp_handler = handler; - return old_handler; } -static void cpu_handle_debug_exception(CPUState *env) +static void cpu_handle_debug_exception(CPUArchState *env) { CPUWatchpoint *wp; @@ -180,12 +179,15 @@ static void cpu_handle_debug_exception(CPUState *env) volatile sig_atomic_t exit_request; -int cpu_exec(CPUState *env) +int cpu_exec(CPUArchState *env) { +#ifdef TARGET_PPC + CPUState *cpu = ENV_GET_CPU(env); +#endif int ret, interrupt_request; TranslationBlock *tb; uint8_t *tc_ptr; - unsigned long next_tb; + tcg_target_ulong next_tb; if (env->halted) { if (!cpu_has_work(env)) { @@ -216,12 +218,15 @@ int cpu_exec(CPUState *env) #elif defined(TARGET_ARM) #elif defined(TARGET_UNICORE32) #elif defined(TARGET_PPC) + env->reserve_addr = -1; #elif defined(TARGET_LM32) #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) +#elif defined(TARGET_OPENRISC) #elif defined(TARGET_SH4) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) +#elif defined(TARGET_XTENSA) /* XXXXX */ #else #error unsupported target CPU @@ -281,17 +286,25 @@ int cpu_exec(CPUState *env) } #endif #if defined(TARGET_I386) +#if !defined(CONFIG_USER_ONLY) + if (interrupt_request & CPU_INTERRUPT_POLL) { + env->interrupt_request &= ~CPU_INTERRUPT_POLL; + apic_poll_irq(env->apic_state); + } +#endif if (interrupt_request & CPU_INTERRUPT_INIT) { - svm_check_intercept(env, SVM_EXIT_INIT); - do_cpu_init(env); + cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, + 0); + do_cpu_init(x86_env_get_cpu(env)); env->exception_index = EXCP_HALTED; cpu_loop_exit(env); } else if (interrupt_request & CPU_INTERRUPT_SIPI) { - do_cpu_sipi(env); + do_cpu_sipi(x86_env_get_cpu(env)); } else if (env->hflags2 & HF2_GIF_MASK) { if ((interrupt_request & CPU_INTERRUPT_SMI) && !(env->hflags & HF_SMM_MASK)) { - svm_check_intercept(env, SVM_EXIT_SMI); + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, + 0); env->interrupt_request &= ~CPU_INTERRUPT_SMI; do_smm_enter(env); next_tb = 0; @@ -301,7 +314,7 @@ int cpu_exec(CPUState *env) env->hflags2 |= HF2_NMI_MASK; do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); next_tb = 0; - } else if (interrupt_request & CPU_INTERRUPT_MCE) { + } else if (interrupt_request & CPU_INTERRUPT_MCE) { env->interrupt_request &= ~CPU_INTERRUPT_MCE; do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); next_tb = 0; @@ -312,7 +325,8 @@ int cpu_exec(CPUState *env) (env->eflags & IF_MASK && !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { int intno; - svm_check_intercept(env, SVM_EXIT_INTR); + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, + 0); env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ); intno = cpu_get_pic_interrupt(env); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno); @@ -326,7 +340,8 @@ int cpu_exec(CPUState *env) !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; /* FIXME: this should respect TPR */ - svm_check_intercept(env, SVM_EXIT_VINTR); + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, + 0); intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno); do_interrupt_x86_hardirq(env, intno, 1); @@ -336,11 +351,9 @@ int cpu_exec(CPUState *env) } } #elif defined(TARGET_PPC) -#if 0 if ((interrupt_request & CPU_INTERRUPT_RESET)) { - cpu_reset(env); + cpu_reset(cpu); } -#endif if (interrupt_request & CPU_INTERRUPT_HARD) { ppc_hw_interrupt(env); if (env->pending_interrupts == 0) @@ -372,6 +385,23 @@ int cpu_exec(CPUState *env) do_interrupt(env); next_tb = 0; } +#elif defined(TARGET_OPENRISC) + { + int idx = -1; + if ((interrupt_request & CPU_INTERRUPT_HARD) + && (env->sr & SR_IEE)) { + idx = EXCP_INT; + } + if ((interrupt_request & CPU_INTERRUPT_TIMER) + && (env->sr & SR_TEE)) { + idx = EXCP_TICK; + } + if (idx >= 0) { + env->exception_index = idx; + do_interrupt(env); + next_tb = 0; + } + } #elif defined(TARGET_SPARC) if (interrupt_request & CPU_INTERRUPT_HARD) { if (cpu_interrupts_enabled(env) && @@ -387,7 +417,7 @@ int cpu_exec(CPUState *env) next_tb = 0; } } - } + } #elif defined(TARGET_ARM) if (interrupt_request & CPU_INTERRUPT_FIQ && !(env->uncached_cpsr & CPSR_F)) { @@ -414,6 +444,7 @@ int cpu_exec(CPUState *env) #elif defined(TARGET_UNICORE32) if (interrupt_request & CPU_INTERRUPT_HARD && !(env->uncached_asr & ASR_I)) { + env->exception_index = UC32_EXCP_INTR; do_interrupt(env); next_tb = 0; } @@ -426,7 +457,7 @@ int cpu_exec(CPUState *env) { int idx = -1; /* ??? This hard-codes the OSF/1 interrupt levels. */ - switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) { + switch (env->pal_mode ? 7 : env->ps & PS_INT_MASK) { case 0 ... 3: if (interrupt_request & CPU_INTERRUPT_HARD) { idx = EXCP_DEV_INTERRUPT; @@ -462,11 +493,18 @@ int cpu_exec(CPUState *env) do_interrupt(env); next_tb = 0; } - if (interrupt_request & CPU_INTERRUPT_NMI - && (env->pregs[PR_CCS] & M_FLAG)) { - env->exception_index = EXCP_NMI; - do_interrupt(env); - next_tb = 0; + if (interrupt_request & CPU_INTERRUPT_NMI) { + unsigned int m_flag_archval; + if (env->pregs[PR_VR] < 32) { + m_flag_archval = M_FLAG_V10; + } else { + m_flag_archval = M_FLAG_V32; + } + if ((env->pregs[PR_CCS] & m_flag_archval)) { + env->exception_index = EXCP_NMI; + do_interrupt(env); + next_tb = 0; + } } #elif defined(TARGET_M68K) if (interrupt_request & CPU_INTERRUPT_HARD @@ -487,6 +525,12 @@ int cpu_exec(CPUState *env) do_interrupt(env); next_tb = 0; } +#elif defined(TARGET_XTENSA) + if (interrupt_request & CPU_INTERRUPT_HARD) { + env->exception_index = EXC_IRQ; + do_interrupt(env); + next_tb = 0; + } #endif /* Don't use the cached interrupt_request value, do_interrupt may have updated the EXITTB flag. */ @@ -508,7 +552,7 @@ int cpu_exec(CPUState *env) #if defined(TARGET_I386) env->eflags = env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK); - log_cpu_state(env, X86_DUMP_CCOP); + log_cpu_state(env, CPU_DUMP_CCOP); env->eflags &= ~(DF_MASK | CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); #elif defined(TARGET_M68K) cpu_m68k_flush_flags(env, env->cc_op); @@ -533,8 +577,8 @@ int cpu_exec(CPUState *env) tb_invalidated_flag = 0; } #ifdef CONFIG_DEBUG_EXEC - qemu_log_mask(CPU_LOG_EXEC, "Trace 0x%08lx [" TARGET_FMT_lx "] %s\n", - (long)tb->tc_ptr, tb->pc, + qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n", + tb->tc_ptr, tb->pc, lookup_symbol(tb->pc)); #endif /* see if we can patch the calling TB. When the TB @@ -553,12 +597,12 @@ int cpu_exec(CPUState *env) barrier(); if (likely(!env->exit_request)) { tc_ptr = tb->tc_ptr; - /* execute the generated code */ + /* execute the generated code */ next_tb = tcg_qemu_tb_exec(env, tc_ptr); if ((next_tb & 3) == 2) { /* Instruction counter expired. */ int insns_left; - tb = (TranslationBlock *)(long)(next_tb & ~3); + tb = (TranslationBlock *)(next_tb & ~3); /* Restore PC. */ cpu_pc_from_tb(env, tb); insns_left = env->icount_decr.u32; @@ -612,10 +656,12 @@ int cpu_exec(CPUState *env) | env->cc_dest | (env->cc_x << 4); #elif defined(TARGET_MICROBLAZE) #elif defined(TARGET_MIPS) +#elif defined(TARGET_OPENRISC) #elif defined(TARGET_SH4) #elif defined(TARGET_ALPHA) #elif defined(TARGET_CRIS) #elif defined(TARGET_S390X) +#elif defined(TARGET_XTENSA) /* XXXXX */ #else #error unsupported target CPU