]> git.proxmox.com Git - mirror_qemu.git/blobdiff - translate-all.c
gtk: Use ctrl+alt+q for quit accelerator
[mirror_qemu.git] / translate-all.c
index 105c25aff38ac69699e6c246550984f02811e91d..5759974d4224c3b0c7990ec0418d67fee1458b5d 100644 (file)
@@ -197,9 +197,10 @@ int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr
 
 /* The cpu state corresponding to 'searched_pc' is restored.
  */
-static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
+static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
                                      uintptr_t searched_pc)
 {
+    CPUArchState *env = cpu->env_ptr;
     TCGContext *s = &tcg_ctx;
     int j;
     uintptr_t tc_ptr;
@@ -216,9 +217,9 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
 
     if (use_icount) {
         /* Reset the cycle counter to the start of the block.  */
-        env->icount_decr.u16.low += tb->icount;
+        cpu->icount_decr.u16.low += tb->icount;
         /* Clear the IO flag.  */
-        env->can_do_io = 0;
+        cpu->can_do_io = 0;
     }
 
     /* find opc index corresponding to search_pc */
@@ -241,7 +242,7 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
     while (s->gen_opc_instr_start[j] == 0) {
         j--;
     }
-    env->icount_decr.u16.low -= s->gen_opc_icount[j];
+    cpu->icount_decr.u16.low -= s->gen_opc_icount[j];
 
     restore_state_to_opc(env, tb, j);
 
@@ -252,13 +253,13 @@ static int cpu_restore_state_from_tb(TranslationBlock *tb, CPUArchState *env,
     return 0;
 }
 
-bool cpu_restore_state(CPUArchState *env, uintptr_t retaddr)
+bool cpu_restore_state(CPUState *cpu, uintptr_t retaddr)
 {
     TranslationBlock *tb;
 
     tb = tb_find_pc(retaddr);
     if (tb) {
-        cpu_restore_state_from_tb(tb, env, retaddr);
+        cpu_restore_state_from_tb(cpu, tb, retaddr);
         return true;
     }
     return false;
@@ -289,17 +290,15 @@ static inline void map_exec(void *addr, long size)
 }
 #endif
 
-static void page_init(void)
+void page_size_init(void)
 {
     /* NOTE: we can always suppose that qemu_host_page_size >=
        TARGET_PAGE_SIZE */
 #ifdef _WIN32
-    {
-        SYSTEM_INFO system_info;
+    SYSTEM_INFO system_info;
 
-        GetSystemInfo(&system_info);
-        qemu_real_host_page_size = system_info.dwPageSize;
-    }
+    GetSystemInfo(&system_info);
+    qemu_real_host_page_size = system_info.dwPageSize;
 #else
     qemu_real_host_page_size = getpagesize();
 #endif
@@ -310,7 +309,11 @@ static void page_init(void)
         qemu_host_page_size = TARGET_PAGE_SIZE;
     }
     qemu_host_page_mask = ~(qemu_host_page_size - 1);
+}
 
+static void page_init(void)
+{
+    page_size_init();
 #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
     {
 #ifdef HAVE_KINFO_GETVMMAP
@@ -685,7 +688,7 @@ static void page_flush_tb(void)
 /* XXX: tb_flush is currently not thread safe */
 void tb_flush(CPUArchState *env1)
 {
-    CPUState *cpu;
+    CPUState *cpu = ENV_GET_CPU(env1);
 
 #if defined(DEBUG_FLUSH)
     printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
@@ -696,14 +699,12 @@ void tb_flush(CPUArchState *env1)
 #endif
     if ((unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer)
         > tcg_ctx.code_gen_buffer_size) {
-        cpu_abort(env1, "Internal error: code buffer overflow\n");
+        cpu_abort(cpu, "Internal error: code buffer overflow\n");
     }
     tcg_ctx.tb_ctx.nb_tbs = 0;
 
     CPU_FOREACH(cpu) {
-        CPUArchState *env = cpu->env_ptr;
-
-        memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
+        memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
     }
 
     memset(tcg_ctx.tb_ctx.tb_phys_hash, 0, sizeof(tcg_ctx.tb_ctx.tb_phys_hash));
@@ -854,10 +855,8 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
     /* remove the TB from the hash list */
     h = tb_jmp_cache_hash_func(tb->pc);
     CPU_FOREACH(cpu) {
-        CPUArchState *env = cpu->env_ptr;
-
-        if (env->tb_jmp_cache[h] == tb) {
-            env->tb_jmp_cache[h] = NULL;
+        if (cpu->tb_jmp_cache[h] == tb) {
+            cpu->tb_jmp_cache[h] = NULL;
         }
     }
 
@@ -939,10 +938,11 @@ static void build_page_bitmap(PageDesc *p)
     }
 }
 
-TranslationBlock *tb_gen_code(CPUArchState *env,
+TranslationBlock *tb_gen_code(CPUState *cpu,
                               target_ulong pc, target_ulong cs_base,
                               int flags, int cflags)
 {
+    CPUArchState *env = cpu->env_ptr;
     TranslationBlock *tb;
     uint8_t *tc_ptr;
     tb_page_addr_t phys_pc, phys_page2;
@@ -1007,7 +1007,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
 {
     TranslationBlock *tb, *tb_next, *saved_tb;
     CPUState *cpu = current_cpu;
-#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_HAS_PRECISE_SMC)
     CPUArchState *env = NULL;
 #endif
     tb_page_addr_t tb_start, tb_end;
@@ -1032,7 +1032,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
         /* build code bitmap */
         build_page_bitmap(p);
     }
-#if defined(TARGET_HAS_PRECISE_SMC) || !defined(CONFIG_USER_ONLY)
+#if defined(TARGET_HAS_PRECISE_SMC)
     if (cpu != NULL) {
         env = cpu->env_ptr;
     }
@@ -1061,9 +1061,9 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
             if (current_tb_not_found) {
                 current_tb_not_found = 0;
                 current_tb = NULL;
-                if (env->mem_io_pc) {
+                if (cpu->mem_io_pc) {
                     /* now we have a real cpu fault */
-                    current_tb = tb_find_pc(env->mem_io_pc);
+                    current_tb = tb_find_pc(cpu->mem_io_pc);
                 }
             }
             if (current_tb == tb &&
@@ -1075,7 +1075,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
                 restore the CPU state */
 
                 current_tb_modified = 1;
-                cpu_restore_state_from_tb(current_tb, env, env->mem_io_pc);
+                cpu_restore_state_from_tb(cpu, current_tb, cpu->mem_io_pc);
                 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                                      &current_flags);
             }
@@ -1102,7 +1102,7 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
     if (!p->first_tb) {
         invalidate_page_bitmap(p);
         if (is_cpu_write_access) {
-            tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
+            tlb_unprotect_code_phys(cpu, start, cpu->mem_io_vaddr);
         }
     }
 #endif
@@ -1112,8 +1112,8 @@ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
            modifying the memory. It will ensure that it cannot modify
            itself */
         cpu->current_tb = NULL;
-        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
-        cpu_resume_from_signal(env, NULL);
+        tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
+        cpu_resume_from_signal(cpu, NULL);
     }
 #endif
 }
@@ -1194,7 +1194,7 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
                    restore the CPU state */
 
             current_tb_modified = 1;
-            cpu_restore_state_from_tb(current_tb, env, pc);
+            cpu_restore_state_from_tb(cpu, current_tb, pc);
             cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
                                  &current_flags);
         }
@@ -1209,11 +1209,11 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr,
            modifying the memory. It will ensure that it cannot modify
            itself */
         cpu->current_tb = NULL;
-        tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
+        tb_gen_code(cpu, current_pc, current_cs_base, current_flags, 1);
         if (locked) {
             mmap_unlock();
         }
-        cpu_resume_from_signal(env, puc);
+        cpu_resume_from_signal(cpu, puc);
     }
 #endif
 }
@@ -1355,13 +1355,13 @@ static TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
 }
 
 #if defined(TARGET_HAS_ICE) && !defined(CONFIG_USER_ONLY)
-void tb_invalidate_phys_addr(hwaddr addr)
+void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
 {
     ram_addr_t ram_addr;
     MemoryRegion *mr;
     hwaddr l = 1;
 
-    mr = address_space_translate(&address_space_memory, addr, &addr, &l, false);
+    mr = address_space_translate(as, addr, &addr, &l, false);
     if (!(memory_region_is_ram(mr)
           || memory_region_is_romd(mr))) {
         return;
@@ -1372,16 +1372,16 @@ void tb_invalidate_phys_addr(hwaddr addr)
 }
 #endif /* TARGET_HAS_ICE && !defined(CONFIG_USER_ONLY) */
 
-void tb_check_watchpoint(CPUArchState *env)
+void tb_check_watchpoint(CPUState *cpu)
 {
     TranslationBlock *tb;
 
-    tb = tb_find_pc(env->mem_io_pc);
+    tb = tb_find_pc(cpu->mem_io_pc);
     if (!tb) {
-        cpu_abort(env, "check_watchpoint: could not find TB for pc=%p",
-                  (void *)env->mem_io_pc);
+        cpu_abort(cpu, "check_watchpoint: could not find TB for pc=%p",
+                  (void *)cpu->mem_io_pc);
     }
-    cpu_restore_state_from_tb(tb, env, env->mem_io_pc);
+    cpu_restore_state_from_tb(cpu, tb, cpu->mem_io_pc);
     tb_phys_invalidate(tb, -1);
 }
 
@@ -1389,7 +1389,6 @@ void tb_check_watchpoint(CPUArchState *env)
 /* mask must never be zero, except for A20 change call */
 static void tcg_handle_interrupt(CPUState *cpu, int mask)
 {
-    CPUArchState *env = cpu->env_ptr;
     int old_mask;
 
     old_mask = cpu->interrupt_request;
@@ -1405,10 +1404,10 @@ static void tcg_handle_interrupt(CPUState *cpu, int mask)
     }
 
     if (use_icount) {
-        env->icount_decr.u16.high = 0xffff;
-        if (!can_do_io(env)
+        cpu->icount_decr.u16.high = 0xffff;
+        if (!cpu_can_do_io(cpu)
             && (mask & ~old_mask) != 0) {
-            cpu_abort(env, "Raised interrupt while not in I/O function");
+            cpu_abort(cpu, "Raised interrupt while not in I/O function");
         }
     } else {
         cpu->tcg_exit_req = 1;
@@ -1419,8 +1418,11 @@ CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
 
 /* in deterministic execution mode, instructions doing device I/Os
    must be at the end of the TB */
-void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
+void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
 {
+#if defined(TARGET_MIPS) || defined(TARGET_SH4)
+    CPUArchState *env = cpu->env_ptr;
+#endif
     TranslationBlock *tb;
     uint32_t n, cflags;
     target_ulong pc, cs_base;
@@ -1428,14 +1430,14 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
 
     tb = tb_find_pc(retaddr);
     if (!tb) {
-        cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
+        cpu_abort(cpu, "cpu_io_recompile: could not find TB for pc=%p",
                   (void *)retaddr);
     }
-    n = env->icount_decr.u16.low + tb->icount;
-    cpu_restore_state_from_tb(tb, env, retaddr);
+    n = cpu->icount_decr.u16.low + tb->icount;
+    cpu_restore_state_from_tb(cpu, tb, retaddr);
     /* Calculate how many instructions had been executed before the fault
        occurred.  */
-    n = n - env->icount_decr.u16.low;
+    n = n - cpu->icount_decr.u16.low;
     /* Generate a new TB ending on the I/O insn.  */
     n++;
     /* On MIPS and SH, delay slot instructions can only be restarted if
@@ -1445,20 +1447,20 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
 #if defined(TARGET_MIPS)
     if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
         env->active_tc.PC -= 4;
-        env->icount_decr.u16.low++;
+        cpu->icount_decr.u16.low++;
         env->hflags &= ~MIPS_HFLAG_BMASK;
     }
 #elif defined(TARGET_SH4)
     if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
             && n > 1) {
         env->pc -= 2;
-        env->icount_decr.u16.low++;
+        cpu->icount_decr.u16.low++;
         env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
     }
 #endif
     /* This should never happen.  */
     if (n > CF_COUNT_MASK) {
-        cpu_abort(env, "TB too big during recompile");
+        cpu_abort(cpu, "TB too big during recompile");
     }
 
     cflags = n | CF_LAST_IO;
@@ -1468,27 +1470,27 @@ void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
     tb_phys_invalidate(tb, -1);
     /* FIXME: In theory this could raise an exception.  In practice
        we have already translated the block once so it's probably ok.  */
-    tb_gen_code(env, pc, cs_base, flags, cflags);
+    tb_gen_code(cpu, pc, cs_base, flags, cflags);
     /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
        the first in the TB) then we end up generating a whole new TB and
        repeating the fault, which is horribly inefficient.
        Better would be to execute just this insn uncached, or generate a
        second new TB.  */
-    cpu_resume_from_signal(env, NULL);
+    cpu_resume_from_signal(cpu, NULL);
 }
 
-void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
+void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
 {
     unsigned int i;
 
     /* Discard jump cache entries for any tb which might potentially
        overlap the flushed page.  */
     i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
-    memset(&env->tb_jmp_cache[i], 0,
+    memset(&cpu->tb_jmp_cache[i], 0,
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
 
     i = tb_jmp_cache_hash_page(addr);
-    memset(&env->tb_jmp_cache[i], 0,
+    memset(&cpu->tb_jmp_cache[i], 0,
            TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
 }
 
@@ -1775,7 +1777,6 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
                     return -1;
                 }
             }
-            return 0;
         }
     }
     return 0;