X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=translate-all.c;h=4a9ee33dac60481a5729d696aac3d37da887ca43;hb=28d54e58fd21e3176a2825c824a5921215835a3c;hp=b6b0e1c098a96ff160f4917f2447576f7d754f5f;hpb=cb4e0f9ddf7d45de7e4716cbab661ea568bd0b6c;p=mirror_qemu.git diff --git a/translate-all.c b/translate-all.c index b6b0e1c098..4a9ee33dac 100644 --- a/translate-all.c +++ b/translate-all.c @@ -58,6 +58,7 @@ #endif #include "exec/cputlb.h" +#include "exec/tb-hash.h" #include "translate-all.h" #include "qemu/bitmap.h" #include "qemu/timer.h" @@ -116,17 +117,48 @@ typedef struct PageDesc { #define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) -uintptr_t qemu_real_host_page_size; uintptr_t qemu_host_page_size; uintptr_t qemu_host_page_mask; -/* This is a multi-level map on the virtual address space. - The bottom level has pointers to PageDesc. */ +/* The bottom level has pointers to PageDesc */ static void *l1_map[V_L1_SIZE]; /* code generation context */ TCGContext tcg_ctx; +/* translation block context */ +#ifdef CONFIG_USER_ONLY +__thread int have_tb_lock; +#endif + +void tb_lock(void) +{ +#ifdef CONFIG_USER_ONLY + assert(!have_tb_lock); + qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); + have_tb_lock++; +#endif +} + +void tb_unlock(void) +{ +#ifdef CONFIG_USER_ONLY + assert(have_tb_lock); + have_tb_lock--; + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); +#endif +} + +void tb_lock_reset(void) +{ +#ifdef CONFIG_USER_ONLY + if (have_tb_lock) { + qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); + have_tb_lock = 0; + } +#endif +} + static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2); static TranslationBlock *tb_find_pc(uintptr_t tc_ptr); @@ -137,11 +169,13 @@ void cpu_gen_init(void) } /* return non zero if the very first instruction is invalid so that - the virtual CPU can trigger an exception. - - '*gen_code_size_ptr' contains the size of the generated code (host - code). -*/ + * the virtual CPU can trigger an exception. + * + * '*gen_code_size_ptr' contains the size of the generated code (host + * code). + * + * Called with mmap_lock held for user-mode emulation. + */ int cpu_gen_code(CPUArchState *env, TranslationBlock *tb, int *gen_code_size_ptr) { TCGContext *s = &tcg_ctx; @@ -220,6 +254,7 @@ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, gen_intermediate_code_pc(env, tb); if (tb->cflags & CF_USE_ICOUNT) { + assert(use_icount); /* Reset the cycle counter to the start of the block. */ cpu->icount_decr.u16.low += tb->icount; /* Clear the IO flag. */ @@ -306,6 +341,7 @@ void page_size_init(void) /* NOTE: we can always suppose that qemu_host_page_size >= TARGET_PAGE_SIZE */ qemu_real_host_page_size = getpagesize(); + qemu_real_host_page_mask = ~(qemu_real_host_page_size - 1); if (qemu_host_page_size == 0) { qemu_host_page_size = qemu_real_host_page_size; } @@ -384,6 +420,9 @@ static void page_init(void) #endif } +/* If alloc=1: + * Called with mmap_lock held for user-mode emulation. + */ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) { PageDesc *pd; @@ -395,26 +434,26 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) /* Level 2..N-1. */ for (i = V_L1_SHIFT / V_L2_BITS - 1; i > 0; i--) { - void **p = *lp; + void **p = atomic_rcu_read(lp); if (p == NULL) { if (!alloc) { return NULL; } p = g_new0(void *, V_L2_SIZE); - *lp = p; + atomic_rcu_set(lp, p); } lp = p + ((index >> (i * V_L2_BITS)) & (V_L2_SIZE - 1)); } - pd = *lp; + pd = atomic_rcu_read(lp); if (pd == NULL) { if (!alloc) { return NULL; } pd = g_new0(PageDesc, V_L2_SIZE); - *lp = pd; + atomic_rcu_set(lp, pd); } return pd + (index & (V_L2_SIZE - 1)); @@ -425,11 +464,6 @@ static inline PageDesc *page_find(tb_page_addr_t index) return page_find_alloc(index, 0); } -#if !defined(CONFIG_USER_ONLY) -#define mmap_lock() do { } while (0) -#define mmap_unlock() do { } while (0) -#endif - #if defined(CONFIG_USER_ONLY) /* Currently it is not recommended to allocate big chunks of data in user mode. It will change when a dedicated libc will be used. */ @@ -672,6 +706,7 @@ static inline void code_gen_alloc(size_t tb_size) CODE_GEN_AVG_BLOCK_SIZE; tcg_ctx.tb_ctx.tbs = g_malloc(tcg_ctx.code_gen_max_blocks * sizeof(TranslationBlock)); + qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); } /* Must be called before using the QEMU cpus. 'tb_size' is the size @@ -684,7 +719,7 @@ void tcg_exec_init(unsigned long tb_size) tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; tcg_register_jit(tcg_ctx.code_gen_buffer, tcg_ctx.code_gen_buffer_size); page_init(); -#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) +#if defined(CONFIG_SOFTMMU) /* There's no guest base to take into account, so go ahead and initialize the prologue now. */ tcg_prologue_init(&tcg_ctx); @@ -727,10 +762,8 @@ void tb_free(TranslationBlock *tb) static inline void invalidate_page_bitmap(PageDesc *p) { - if (p->code_bitmap) { - g_free(p->code_bitmap); - p->code_bitmap = NULL; - } + g_free(p->code_bitmap); + p->code_bitmap = NULL; p->code_write_count = 0; } @@ -769,10 +802,8 @@ static void page_flush_tb(void) /* flush all the translation blocks */ /* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUArchState *env1) +void tb_flush(CPUState *cpu) { - CPUState *cpu = ENV_GET_CPU(env1); - #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), @@ -994,6 +1025,7 @@ static void build_page_bitmap(PageDesc *p) } } +/* Called with mmap_lock held for user mode emulation. */ TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc, target_ulong cs_base, int flags, int cflags) @@ -1011,7 +1043,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu, tb = tb_alloc(pc); if (!tb) { /* flush must be done */ - tb_flush(env); + tb_flush(cpu); /* cannot fail at this point */ tb = tb_alloc(pc); /* Don't forget to invalidate previous TB info. */ @@ -1041,6 +1073,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. + * + * Called with mmap_lock held for user-mode emulation */ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) { @@ -1057,6 +1091,8 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end) * 'is_cpu_write_access' should be true if called from a real cpu write * access: the virtual CPU will exit the current TB if code is modified inside * this TB. + * + * Called with mmap_lock held for user-mode emulation */ void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, int is_cpu_write_access) @@ -1205,6 +1241,7 @@ void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) } #if !defined(CONFIG_SOFTMMU) +/* Called with mmap_lock held. */ static void tb_invalidate_phys_page(tb_page_addr_t addr, uintptr_t pc, void *puc, bool locked) @@ -1274,7 +1311,10 @@ static void tb_invalidate_phys_page(tb_page_addr_t addr, } #endif -/* add the tb in the target page and protect it if necessary */ +/* add the tb in the target page and protect it if necessary + * + * Called with mmap_lock held for user-mode emulation. + */ static inline void tb_alloc_page(TranslationBlock *tb, unsigned int n, tb_page_addr_t page_addr) { @@ -1330,16 +1370,16 @@ static inline void tb_alloc_page(TranslationBlock *tb, } /* add a new TB and link it to the physical page tables. phys_page2 is - (-1) to indicate that only one page contains the TB. */ + * (-1) to indicate that only one page contains the TB. + * + * Called with mmap_lock held for user-mode emulation. + */ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) { unsigned int h; TranslationBlock **ptb; - /* Grab the mmap lock to stop another thread invalidating this TB - before we are done. */ - mmap_lock(); /* add in the physical hash table */ h = tb_phys_hash_func(phys_pc); ptb = &tcg_ctx.tb_ctx.tb_phys_hash[h]; @@ -1369,7 +1409,6 @@ static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc, #ifdef DEBUG_TB_CHECK tb_page_check(); #endif - mmap_unlock(); } /* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < @@ -1450,36 +1489,6 @@ void tb_check_watchpoint(CPUState *cpu) } #ifndef CONFIG_USER_ONLY -/* mask must never be zero, except for A20 change call */ -static void tcg_handle_interrupt(CPUState *cpu, int mask) -{ - int old_mask; - - old_mask = cpu->interrupt_request; - cpu->interrupt_request |= mask; - - /* - * If called from iothread context, wake the target cpu in - * case its halted. - */ - if (!qemu_cpu_is_self(cpu)) { - qemu_cpu_kick(cpu); - return; - } - - if (use_icount) { - cpu->icount_decr.u16.high = 0xffff; - if (!cpu_can_do_io(cpu) - && (mask & ~old_mask) != 0) { - cpu_abort(cpu, "Raised interrupt while not in I/O function"); - } - } else { - cpu->tcg_exit_req = 1; - } -} - -CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; - /* in deterministic execution mode, instructions doing device I/Os must be at the end of the TB */ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) @@ -1532,6 +1541,14 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr) cs_base = tb->cs_base; flags = tb->flags; tb_phys_invalidate(tb, -1); + if (tb->cflags & CF_NOCACHE) { + if (tb->orig_tb) { + /* Invalidate original TB if this TB was generated in + * cpu_exec_nocache() */ + tb_phys_invalidate(tb->orig_tb, -1); + } + tb_free(tb); + } /* FIXME: In theory this could raise an exception. In practice we have already translated the block once so it's probably ok. */ tb_gen_code(cpu, pc, cs_base, flags, cflags);