X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=exec.c;h=aec65c506372a70f6ec3e3c18e147ab8e9b73d52;hb=02d26729ea39fdca8385fbaf96cef904b2935891;hp=df6793812fe9af3668b2ba4628e4521e9bf692b7;hpb=98c8a73b2e82eecac359b0b55a2d9d69f0a916ff;p=qemu.git diff --git a/exec.c b/exec.c index df6793812..aec65c506 100644 --- a/exec.c +++ b/exec.c @@ -1,5 +1,5 @@ /* - * virtual page mapping and translated block handling + * Virtual page mapping * * Copyright (c) 2003 Fabrice Bellard * @@ -29,80 +29,42 @@ #include "tcg.h" #include "hw/hw.h" #include "hw/qdev.h" -#include "osdep.h" -#include "kvm.h" -#include "hw/xen.h" -#include "qemu-timer.h" -#include "memory.h" -#include "exec-memory.h" +#include "qemu/osdep.h" +#include "sysemu/kvm.h" +#include "hw/xen/xen.h" +#include "qemu/timer.h" +#include "qemu/config-file.h" +#include "exec/memory.h" +#include "sysemu/dma.h" +#include "exec/address-spaces.h" #if defined(CONFIG_USER_ONLY) #include -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) -#include -#if __FreeBSD_version >= 700104 -#define HAVE_KINFO_GETVMMAP -#define sigqueue sigqueue_freebsd /* avoid redefinition */ -#include -#include -#include -#define _KERNEL -#include -#undef _KERNEL -#undef sigqueue -#include -#endif -#endif #else /* !CONFIG_USER_ONLY */ -#include "xen-mapcache.h" +#include "sysemu/xen-mapcache.h" #include "trace.h" #endif +#include "exec/cpu-all.h" -#include "cputlb.h" +#include "exec/cputlb.h" +#include "translate-all.h" -#include "memory-internal.h" +#include "exec/memory-internal.h" -//#define DEBUG_TB_INVALIDATE -//#define DEBUG_FLUSH //#define DEBUG_UNASSIGNED - -/* make various TB consistency checks */ -//#define DEBUG_TB_CHECK - -//#define DEBUG_IOPORT //#define DEBUG_SUBPAGE -#if !defined(CONFIG_USER_ONLY) -/* TB consistency checks only implemented for usermode emulation. */ -#undef DEBUG_TB_CHECK -#endif - -#define SMC_BITMAP_USE_THRESHOLD 10 - -static TranslationBlock *tbs; -static int code_gen_max_blocks; -TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; -static int nb_tbs; -/* any access to the tbs or the page table must use this lock */ -spinlock_t tb_lock = SPIN_LOCK_UNLOCKED; - -uint8_t *code_gen_prologue; -static uint8_t *code_gen_buffer; -static size_t code_gen_buffer_size; -/* threshold to flush the translated code buffer */ -static size_t code_gen_buffer_max_size; -static uint8_t *code_gen_ptr; - #if !defined(CONFIG_USER_ONLY) int phys_ram_fd; static int in_migration; -RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) }; +RAMList ram_list = { .blocks = QTAILQ_HEAD_INITIALIZER(ram_list.blocks) }; static MemoryRegion *system_memory; static MemoryRegion *system_io; AddressSpace address_space_io; AddressSpace address_space_memory; +DMAContext dma_context_memory; MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty; static MemoryRegion io_mem_subpage_ram; @@ -116,60 +78,7 @@ DEFINE_TLS(CPUArchState *,cpu_single_env); /* 0 = Do not count executed instructions. 1 = Precise instruction counting. 2 = Adaptive rate instruction counting. */ -int use_icount = 0; - -typedef struct PageDesc { - /* list of TBs intersecting this ram page */ - TranslationBlock *first_tb; - /* in order to optimize self modifying code, we count the number - of lookups we do to a given page to use a bitmap */ - unsigned int code_write_count; - uint8_t *code_bitmap; -#if defined(CONFIG_USER_ONLY) - unsigned long flags; -#endif -} PageDesc; - -/* In system mode we want L1_MAP to be based on ram offsets, - while in user mode we want it to be based on virtual addresses. */ -#if !defined(CONFIG_USER_ONLY) -#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS -# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS -#else -# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS -#endif -#else -# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS -#endif - -/* Size of the L2 (and L3, etc) page tables. */ -#define L2_BITS 10 -#define L2_SIZE (1 << L2_BITS) - -#define P_L2_LEVELS \ - (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1) - -/* The bits remaining after N lower levels of page tables. */ -#define V_L1_BITS_REM \ - ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS) - -#if V_L1_BITS_REM < 4 -#define V_L1_BITS (V_L1_BITS_REM + L2_BITS) -#else -#define V_L1_BITS V_L1_BITS_REM -#endif - -#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS) - -#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS) - -uintptr_t qemu_real_host_page_size; -uintptr_t qemu_host_page_size; -uintptr_t qemu_host_page_mask; - -/* This is a multi-level map on the virtual address space. - The bottom level has pointers to PageDesc. */ -static void *l1_map[V_L1_SIZE]; +int use_icount; #if !defined(CONFIG_USER_ONLY) @@ -188,181 +97,11 @@ static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc; static void io_mem_init(void); static void memory_map_init(void); +static void *qemu_safe_ram_ptr(ram_addr_t addr); static MemoryRegion io_mem_watch; #endif -/* statistics */ -static int tb_flush_count; -static int tb_phys_invalidate_count; - -#ifdef _WIN32 -static inline void map_exec(void *addr, long size) -{ - DWORD old_protect; - VirtualProtect(addr, size, - PAGE_EXECUTE_READWRITE, &old_protect); - -} -#else -static inline void map_exec(void *addr, long size) -{ - unsigned long start, end, page_size; - - page_size = getpagesize(); - start = (unsigned long)addr; - start &= ~(page_size - 1); - - end = (unsigned long)addr + size; - end += page_size - 1; - end &= ~(page_size - 1); - - mprotect((void *)start, end - start, - PROT_READ | PROT_WRITE | PROT_EXEC); -} -#endif - -static void page_init(void) -{ - /* NOTE: we can always suppose that qemu_host_page_size >= - TARGET_PAGE_SIZE */ -#ifdef _WIN32 - { - SYSTEM_INFO system_info; - - GetSystemInfo(&system_info); - qemu_real_host_page_size = system_info.dwPageSize; - } -#else - qemu_real_host_page_size = getpagesize(); -#endif - if (qemu_host_page_size == 0) - qemu_host_page_size = qemu_real_host_page_size; - if (qemu_host_page_size < TARGET_PAGE_SIZE) - qemu_host_page_size = TARGET_PAGE_SIZE; - qemu_host_page_mask = ~(qemu_host_page_size - 1); - -#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) - { -#ifdef HAVE_KINFO_GETVMMAP - struct kinfo_vmentry *freep; - int i, cnt; - - freep = kinfo_getvmmap(getpid(), &cnt); - if (freep) { - mmap_lock(); - for (i = 0; i < cnt; i++) { - unsigned long startaddr, endaddr; - - startaddr = freep[i].kve_start; - endaddr = freep[i].kve_end; - if (h2g_valid(startaddr)) { - startaddr = h2g(startaddr) & TARGET_PAGE_MASK; - - if (h2g_valid(endaddr)) { - endaddr = h2g(endaddr); - page_set_flags(startaddr, endaddr, PAGE_RESERVED); - } else { -#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS - endaddr = ~0ul; - page_set_flags(startaddr, endaddr, PAGE_RESERVED); -#endif - } - } - } - free(freep); - mmap_unlock(); - } -#else - FILE *f; - - last_brk = (unsigned long)sbrk(0); - - f = fopen("/compat/linux/proc/self/maps", "r"); - if (f) { - mmap_lock(); - - do { - unsigned long startaddr, endaddr; - int n; - - n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr); - - if (n == 2 && h2g_valid(startaddr)) { - startaddr = h2g(startaddr) & TARGET_PAGE_MASK; - - if (h2g_valid(endaddr)) { - endaddr = h2g(endaddr); - } else { - endaddr = ~0ul; - } - page_set_flags(startaddr, endaddr, PAGE_RESERVED); - } - } while (!feof(f)); - - fclose(f); - mmap_unlock(); - } -#endif - } -#endif -} - -static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) -{ - PageDesc *pd; - void **lp; - int i; - -#if defined(CONFIG_USER_ONLY) - /* We can't use g_malloc because it may recurse into a locked mutex. */ -# define ALLOC(P, SIZE) \ - do { \ - P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ - } while (0) -#else -# define ALLOC(P, SIZE) \ - do { P = g_malloc0(SIZE); } while (0) -#endif - - /* Level 1. Always allocated. */ - lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1)); - - /* Level 2..N-1. */ - for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) { - void **p = *lp; - - if (p == NULL) { - if (!alloc) { - return NULL; - } - ALLOC(p, sizeof(void *) * L2_SIZE); - *lp = p; - } - - lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1)); - } - - pd = *lp; - if (pd == NULL) { - if (!alloc) { - return NULL; - } - ALLOC(pd, sizeof(PageDesc) * L2_SIZE); - *lp = pd; - } - -#undef ALLOC - - return pd + (index & (L2_SIZE - 1)); -} - -static inline PageDesc *page_find(tb_page_addr_t index) -{ - return page_find_alloc(index, 0); -} - #if !defined(CONFIG_USER_ONLY) static void phys_map_node_reserve(unsigned nodes) @@ -469,193 +208,27 @@ bool memory_region_is_unassigned(MemoryRegion *mr) && mr != &io_mem_notdirty && !mr->rom_device && mr != &io_mem_watch; } - -#define mmap_lock() do { } while(0) -#define mmap_unlock() do { } while(0) #endif -#if defined(CONFIG_USER_ONLY) -/* Currently it is not recommended to allocate big chunks of data in - user mode. It will change when a dedicated libc will be used. */ -/* ??? 64-bit hosts ought to have no problem mmaping data outside the - region in which the guest needs to run. Revisit this. */ -#define USE_STATIC_CODE_GEN_BUFFER -#endif - -/* ??? Should configure for this, not list operating systems here. */ -#if (defined(__linux__) \ - || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \ - || defined(__DragonFly__) || defined(__OpenBSD__) \ - || defined(__NetBSD__)) -# define USE_MMAP -#endif - -/* Minimum size of the code gen buffer. This number is randomly chosen, - but not so small that we can't have a fair number of TB's live. */ -#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024) - -/* Maximum size of the code gen buffer we'd like to use. Unless otherwise - indicated, this is constrained by the range of direct branches on the - host cpu, as used by the TCG implementation of goto_tb. */ -#if defined(__x86_64__) -# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) -#elif defined(__sparc__) -# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024) -#elif defined(__arm__) -# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024) -#elif defined(__s390x__) - /* We have a +- 4GB range on the branches; leave some slop. */ -# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024) -#else -# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) -#endif - -#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024) - -#define DEFAULT_CODE_GEN_BUFFER_SIZE \ - (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \ - ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE) - -static inline size_t size_code_gen_buffer(size_t tb_size) -{ - /* Size the buffer. */ - if (tb_size == 0) { -#ifdef USE_STATIC_CODE_GEN_BUFFER - tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE; -#else - /* ??? Needs adjustments. */ - /* ??? If we relax the requirement that CONFIG_USER_ONLY use the - static buffer, we could size this on RESERVED_VA, on the text - segment size of the executable, or continue to use the default. */ - tb_size = (unsigned long)(ram_size / 4); -#endif - } - if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) { - tb_size = MIN_CODE_GEN_BUFFER_SIZE; - } - if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) { - tb_size = MAX_CODE_GEN_BUFFER_SIZE; - } - code_gen_buffer_size = tb_size; - return tb_size; -} - -#ifdef USE_STATIC_CODE_GEN_BUFFER -static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE] - __attribute__((aligned(CODE_GEN_ALIGN))); - -static inline void *alloc_code_gen_buffer(void) -{ - map_exec(static_code_gen_buffer, code_gen_buffer_size); - return static_code_gen_buffer; -} -#elif defined(USE_MMAP) -static inline void *alloc_code_gen_buffer(void) -{ - int flags = MAP_PRIVATE | MAP_ANONYMOUS; - uintptr_t start = 0; - void *buf; - - /* Constrain the position of the buffer based on the host cpu. - Note that these addresses are chosen in concert with the - addresses assigned in the relevant linker script file. */ -# if defined(__PIE__) || defined(__PIC__) - /* Don't bother setting a preferred location if we're building - a position-independent executable. We're more likely to get - an address near the main executable if we let the kernel - choose the address. */ -# elif defined(__x86_64__) && defined(MAP_32BIT) - /* Force the memory down into low memory with the executable. - Leave the choice of exact location with the kernel. */ - flags |= MAP_32BIT; - /* Cannot expect to map more than 800MB in low memory. */ - if (code_gen_buffer_size > 800u * 1024 * 1024) { - code_gen_buffer_size = 800u * 1024 * 1024; - } -# elif defined(__sparc__) - start = 0x40000000ul; -# elif defined(__s390x__) - start = 0x90000000ul; -# endif - - buf = mmap((void *)start, code_gen_buffer_size, - PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0); - return buf == MAP_FAILED ? NULL : buf; -} -#else -static inline void *alloc_code_gen_buffer(void) -{ - void *buf = g_malloc(code_gen_buffer_size); - if (buf) { - map_exec(buf, code_gen_buffer_size); - } - return buf; -} -#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */ - -static inline void code_gen_alloc(size_t tb_size) -{ - code_gen_buffer_size = size_code_gen_buffer(tb_size); - code_gen_buffer = alloc_code_gen_buffer(); - if (code_gen_buffer == NULL) { - fprintf(stderr, "Could not allocate dynamic translator buffer\n"); - exit(1); - } - - /* Steal room for the prologue at the end of the buffer. This ensures - (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches - from TB's to the prologue are going to be in range. It also means - that we don't need to mark (additional) portions of the data segment - as executable. */ - code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024; - code_gen_buffer_size -= 1024; - - code_gen_buffer_max_size = code_gen_buffer_size - - (TCG_MAX_OP_SIZE * OPC_BUF_SIZE); - code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE; - tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock)); -} - -/* Must be called before using the QEMU cpus. 'tb_size' is the size - (in bytes) allocated to the translation buffer. Zero means default - size. */ -void tcg_exec_init(unsigned long tb_size) -{ - cpu_gen_init(); - code_gen_alloc(tb_size); - code_gen_ptr = code_gen_buffer; - tcg_register_jit(code_gen_buffer, code_gen_buffer_size); - page_init(); -#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE) - /* There's no guest base to take into account, so go ahead and - initialize the prologue now. */ - tcg_prologue_init(&tcg_ctx); -#endif -} - -bool tcg_enabled(void) -{ - return code_gen_buffer != NULL; -} - void cpu_exec_init_all(void) { #if !defined(CONFIG_USER_ONLY) + qemu_mutex_init(&ram_list.mutex); memory_map_init(); io_mem_init(); #endif } -#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) +#if !defined(CONFIG_USER_ONLY) static int cpu_common_post_load(void *opaque, int version_id) { - CPUArchState *env = opaque; + CPUState *cpu = opaque; /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the version_id is increased. */ - env->interrupt_request &= ~0x01; - tlb_flush(env, 1); + cpu->interrupt_request &= ~0x01; + tlb_flush(cpu->env_ptr, 1); return 0; } @@ -664,809 +237,81 @@ static const VMStateDescription vmstate_cpu_common = { .name = "cpu_common", .version_id = 1, .minimum_version_id = 1, - .minimum_version_id_old = 1, - .post_load = cpu_common_post_load, - .fields = (VMStateField []) { - VMSTATE_UINT32(halted, CPUArchState), - VMSTATE_UINT32(interrupt_request, CPUArchState), - VMSTATE_END_OF_LIST() - } -}; -#endif - -CPUArchState *qemu_get_cpu(int cpu) -{ - CPUArchState *env = first_cpu; - - while (env) { - if (env->cpu_index == cpu) - break; - env = env->next_cpu; - } - - return env; -} - -void cpu_exec_init(CPUArchState *env) -{ -#ifndef CONFIG_USER_ONLY - CPUState *cpu = ENV_GET_CPU(env); -#endif - CPUArchState **penv; - int cpu_index; - -#if defined(CONFIG_USER_ONLY) - cpu_list_lock(); -#endif - env->next_cpu = NULL; - penv = &first_cpu; - cpu_index = 0; - while (*penv != NULL) { - penv = &(*penv)->next_cpu; - cpu_index++; - } - env->cpu_index = cpu_index; - env->numa_node = 0; - QTAILQ_INIT(&env->breakpoints); - QTAILQ_INIT(&env->watchpoints); -#ifndef CONFIG_USER_ONLY - cpu->thread_id = qemu_get_thread_id(); -#endif - *penv = env; -#if defined(CONFIG_USER_ONLY) - cpu_list_unlock(); -#endif -#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) - vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env); - register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, - cpu_save, cpu_load, env); -#endif -} - -/* Allocate a new translation block. Flush the translation buffer if - too many translation blocks or too much generated code. */ -static TranslationBlock *tb_alloc(target_ulong pc) -{ - TranslationBlock *tb; - - if (nb_tbs >= code_gen_max_blocks || - (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size) - return NULL; - tb = &tbs[nb_tbs++]; - tb->pc = pc; - tb->cflags = 0; - return tb; -} - -void tb_free(TranslationBlock *tb) -{ - /* In practice this is mostly used for single use temporary TB - Ignore the hard cases and just back up if this TB happens to - be the last one generated. */ - if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) { - code_gen_ptr = tb->tc_ptr; - nb_tbs--; - } -} - -static inline void invalidate_page_bitmap(PageDesc *p) -{ - if (p->code_bitmap) { - g_free(p->code_bitmap); - p->code_bitmap = NULL; - } - p->code_write_count = 0; -} - -/* Set to NULL all the 'first_tb' fields in all PageDescs. */ - -static void page_flush_tb_1 (int level, void **lp) -{ - int i; - - if (*lp == NULL) { - return; - } - if (level == 0) { - PageDesc *pd = *lp; - for (i = 0; i < L2_SIZE; ++i) { - pd[i].first_tb = NULL; - invalidate_page_bitmap(pd + i); - } - } else { - void **pp = *lp; - for (i = 0; i < L2_SIZE; ++i) { - page_flush_tb_1 (level - 1, pp + i); - } - } -} - -static void page_flush_tb(void) -{ - int i; - for (i = 0; i < V_L1_SIZE; i++) { - page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i); - } -} - -/* flush all the translation blocks */ -/* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUArchState *env1) -{ - CPUArchState *env; -#if defined(DEBUG_FLUSH) - printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", - (unsigned long)(code_gen_ptr - code_gen_buffer), - nb_tbs, nb_tbs > 0 ? - ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0); -#endif - if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size) - cpu_abort(env1, "Internal error: code buffer overflow\n"); - - nb_tbs = 0; - - for(env = first_cpu; env != NULL; env = env->next_cpu) { - memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *)); - } - - memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *)); - page_flush_tb(); - - code_gen_ptr = code_gen_buffer; - /* XXX: flush processor icache at this point if cache flush is - expensive */ - tb_flush_count++; -} - -#ifdef DEBUG_TB_CHECK - -static void tb_invalidate_check(target_ulong address) -{ - TranslationBlock *tb; - int i; - address &= TARGET_PAGE_MASK; - for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { - for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { - if (!(address + TARGET_PAGE_SIZE <= tb->pc || - address >= tb->pc + tb->size)) { - printf("ERROR invalidate: address=" TARGET_FMT_lx - " PC=%08lx size=%04x\n", - address, (long)tb->pc, tb->size); - } - } - } -} - -/* verify that all the pages have correct rights for code */ -static void tb_page_check(void) -{ - TranslationBlock *tb; - int i, flags1, flags2; - - for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) { - for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) { - flags1 = page_get_flags(tb->pc); - flags2 = page_get_flags(tb->pc + tb->size - 1); - if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) { - printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n", - (long)tb->pc, tb->size, flags1, flags2); - } - } - } -} - -#endif - -/* invalidate one TB */ -static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb, - int next_offset) -{ - TranslationBlock *tb1; - for(;;) { - tb1 = *ptb; - if (tb1 == tb) { - *ptb = *(TranslationBlock **)((char *)tb1 + next_offset); - break; - } - ptb = (TranslationBlock **)((char *)tb1 + next_offset); - } -} - -static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb) -{ - TranslationBlock *tb1; - unsigned int n1; - - for(;;) { - tb1 = *ptb; - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (tb1 == tb) { - *ptb = tb1->page_next[n1]; - break; - } - ptb = &tb1->page_next[n1]; - } -} - -static inline void tb_jmp_remove(TranslationBlock *tb, int n) -{ - TranslationBlock *tb1, **ptb; - unsigned int n1; - - ptb = &tb->jmp_next[n]; - tb1 = *ptb; - if (tb1) { - /* find tb(n) in circular list */ - for(;;) { - tb1 = *ptb; - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (n1 == n && tb1 == tb) - break; - if (n1 == 2) { - ptb = &tb1->jmp_first; - } else { - ptb = &tb1->jmp_next[n1]; - } - } - /* now we can suppress tb(n) from the list */ - *ptb = tb->jmp_next[n]; - - tb->jmp_next[n] = NULL; - } -} - -/* reset the jump entry 'n' of a TB so that it is not chained to - another TB */ -static inline void tb_reset_jump(TranslationBlock *tb, int n) -{ - tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n])); -} - -void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr) -{ - CPUArchState *env; - PageDesc *p; - unsigned int h, n1; - tb_page_addr_t phys_pc; - TranslationBlock *tb1, *tb2; - - /* remove the TB from the hash list */ - phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - h = tb_phys_hash_func(phys_pc); - tb_remove(&tb_phys_hash[h], tb, - offsetof(TranslationBlock, phys_hash_next)); - - /* remove the TB from the page list */ - if (tb->page_addr[0] != page_addr) { - p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS); - tb_page_remove(&p->first_tb, tb); - invalidate_page_bitmap(p); - } - if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) { - p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS); - tb_page_remove(&p->first_tb, tb); - invalidate_page_bitmap(p); - } - - tb_invalidated_flag = 1; - - /* remove the TB from the hash list */ - h = tb_jmp_cache_hash_func(tb->pc); - for(env = first_cpu; env != NULL; env = env->next_cpu) { - if (env->tb_jmp_cache[h] == tb) - env->tb_jmp_cache[h] = NULL; - } - - /* suppress this TB from the two jump lists */ - tb_jmp_remove(tb, 0); - tb_jmp_remove(tb, 1); - - /* suppress any remaining jumps to this TB */ - tb1 = tb->jmp_first; - for(;;) { - n1 = (uintptr_t)tb1 & 3; - if (n1 == 2) - break; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - tb2 = tb1->jmp_next[n1]; - tb_reset_jump(tb1, n1); - tb1->jmp_next[n1] = NULL; - tb1 = tb2; - } - tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */ - - tb_phys_invalidate_count++; -} - -static inline void set_bits(uint8_t *tab, int start, int len) -{ - int end, mask, end1; - - end = start + len; - tab += start >> 3; - mask = 0xff << (start & 7); - if ((start & ~7) == (end & ~7)) { - if (start < end) { - mask &= ~(0xff << (end & 7)); - *tab |= mask; - } - } else { - *tab++ |= mask; - start = (start + 8) & ~7; - end1 = end & ~7; - while (start < end1) { - *tab++ = 0xff; - start += 8; - } - if (start < end) { - mask = ~(0xff << (end & 7)); - *tab |= mask; - } - } -} - -static void build_page_bitmap(PageDesc *p) -{ - int n, tb_start, tb_end; - TranslationBlock *tb; - - p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8); - - tb = p->first_tb; - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->pc & ~TARGET_PAGE_MASK; - tb_end = tb_start + tb->size; - if (tb_end > TARGET_PAGE_SIZE) - tb_end = TARGET_PAGE_SIZE; - } else { - tb_start = 0; - tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - set_bits(p->code_bitmap, tb_start, tb_end - tb_start); - tb = tb->page_next[n]; - } -} - -TranslationBlock *tb_gen_code(CPUArchState *env, - target_ulong pc, target_ulong cs_base, - int flags, int cflags) -{ - TranslationBlock *tb; - uint8_t *tc_ptr; - tb_page_addr_t phys_pc, phys_page2; - target_ulong virt_page2; - int code_gen_size; - - phys_pc = get_page_addr_code(env, pc); - tb = tb_alloc(pc); - if (!tb) { - /* flush must be done */ - tb_flush(env); - /* cannot fail at this point */ - tb = tb_alloc(pc); - /* Don't forget to invalidate previous TB info. */ - tb_invalidated_flag = 1; - } - tc_ptr = code_gen_ptr; - tb->tc_ptr = tc_ptr; - tb->cs_base = cs_base; - tb->flags = flags; - tb->cflags = cflags; - cpu_gen_code(env, tb, &code_gen_size); - code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size + - CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1)); - - /* check next page if needed */ - virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK; - phys_page2 = -1; - if ((pc & TARGET_PAGE_MASK) != virt_page2) { - phys_page2 = get_page_addr_code(env, virt_page2); - } - tb_link_page(tb, phys_pc, phys_page2); - return tb; -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end may refer to *different* physical pages. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - */ -void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access) -{ - while (start < end) { - tb_invalidate_phys_page_range(start, end, is_cpu_write_access); - start &= TARGET_PAGE_MASK; - start += TARGET_PAGE_SIZE; - } -} - -/* - * Invalidate all TBs which intersect with the target physical address range - * [start;end[. NOTE: start and end must refer to the *same* physical page. - * 'is_cpu_write_access' should be true if called from a real cpu write - * access: the virtual CPU will exit the current TB if code is modified inside - * this TB. - */ -void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, - int is_cpu_write_access) -{ - TranslationBlock *tb, *tb_next, *saved_tb; - CPUArchState *env = cpu_single_env; - tb_page_addr_t tb_start, tb_end; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - int current_tb_not_found = is_cpu_write_access; - TranslationBlock *current_tb = NULL; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - int current_flags = 0; -#endif /* TARGET_HAS_PRECISE_SMC */ - - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) - return; - if (!p->code_bitmap && - ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD && - is_cpu_write_access) { - /* build code bitmap */ - build_page_bitmap(p); - } - - /* we remove all the TBs in the range [start, end[ */ - /* XXX: see if in some cases it could be faster to invalidate all the code */ - tb = p->first_tb; - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); - tb_next = tb->page_next[n]; - /* NOTE: this is subtle as a TB may span two physical pages */ - if (n == 0) { - /* NOTE: tb_end may be after the end of the page, but - it is not a problem */ - tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK); - tb_end = tb_start + tb->size; - } else { - tb_start = tb->page_addr[1]; - tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK); - } - if (!(tb_end <= start || tb_start >= end)) { -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_not_found) { - current_tb_not_found = 0; - current_tb = NULL; - if (env->mem_io_pc) { - /* now we have a real cpu fault */ - current_tb = tb_find_pc(env->mem_io_pc); - } - } - if (current_tb == tb && - (current_tb->cflags & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - cpu_restore_state(current_tb, env, env->mem_io_pc); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - /* we need to do that to handle the case where a signal - occurs while doing tb_phys_invalidate() */ - saved_tb = NULL; - if (env) { - saved_tb = env->current_tb; - env->current_tb = NULL; - } - tb_phys_invalidate(tb, -1); - if (env) { - env->current_tb = saved_tb; - if (env->interrupt_request && env->current_tb) - cpu_interrupt(env, env->interrupt_request); - } - } - tb = tb_next; - } -#if !defined(CONFIG_USER_ONLY) - /* if no code remaining, no need to continue to use slow writes */ - if (!p->first_tb) { - invalidate_page_bitmap(p); - if (is_cpu_write_access) { - tlb_unprotect_code_phys(env, start, env->mem_io_vaddr); - } - } -#endif -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* we generate a block containing just the instruction - modifying the memory. It will ensure that it cannot modify - itself */ - env->current_tb = NULL; - tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); - cpu_resume_from_signal(env, NULL); - } -#endif -} - -/* len must be <= 8 and start must be a multiple of len */ -static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len) -{ - PageDesc *p; - int offset, b; -#if 0 - if (1) { - qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n", - cpu_single_env->mem_io_vaddr, len, - cpu_single_env->eip, - cpu_single_env->eip + - (intptr_t)cpu_single_env->segs[R_CS].base); - } -#endif - p = page_find(start >> TARGET_PAGE_BITS); - if (!p) - return; - if (p->code_bitmap) { - offset = start & ~TARGET_PAGE_MASK; - b = p->code_bitmap[offset >> 3] >> (offset & 7); - if (b & ((1 << len) - 1)) - goto do_invalidate; - } else { - do_invalidate: - tb_invalidate_phys_page_range(start, start + len, 1); - } -} - -#if !defined(CONFIG_SOFTMMU) -static void tb_invalidate_phys_page(tb_page_addr_t addr, - uintptr_t pc, void *puc) -{ - TranslationBlock *tb; - PageDesc *p; - int n; -#ifdef TARGET_HAS_PRECISE_SMC - TranslationBlock *current_tb = NULL; - CPUArchState *env = cpu_single_env; - int current_tb_modified = 0; - target_ulong current_pc = 0; - target_ulong current_cs_base = 0; - int current_flags = 0; -#endif - - addr &= TARGET_PAGE_MASK; - p = page_find(addr >> TARGET_PAGE_BITS); - if (!p) - return; - tb = p->first_tb; -#ifdef TARGET_HAS_PRECISE_SMC - if (tb && pc != 0) { - current_tb = tb_find_pc(pc); - } -#endif - while (tb != NULL) { - n = (uintptr_t)tb & 3; - tb = (TranslationBlock *)((uintptr_t)tb & ~3); -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb == tb && - (current_tb->cflags & CF_COUNT_MASK) != 1) { - /* If we are modifying the current TB, we must stop - its execution. We could be more precise by checking - that the modification is after the current PC, but it - would require a specialized function to partially - restore the CPU state */ - - current_tb_modified = 1; - cpu_restore_state(current_tb, env, pc); - cpu_get_tb_cpu_state(env, ¤t_pc, ¤t_cs_base, - ¤t_flags); - } -#endif /* TARGET_HAS_PRECISE_SMC */ - tb_phys_invalidate(tb, addr); - tb = tb->page_next[n]; - } - p->first_tb = NULL; -#ifdef TARGET_HAS_PRECISE_SMC - if (current_tb_modified) { - /* we generate a block containing just the instruction - modifying the memory. It will ensure that it cannot modify - itself */ - env->current_tb = NULL; - tb_gen_code(env, current_pc, current_cs_base, current_flags, 1); - cpu_resume_from_signal(env, puc); - } -#endif -} -#endif - -/* add the tb in the target page and protect it if necessary */ -static inline void tb_alloc_page(TranslationBlock *tb, - unsigned int n, tb_page_addr_t page_addr) -{ - PageDesc *p; -#ifndef CONFIG_USER_ONLY - bool page_already_protected; -#endif - - tb->page_addr[n] = page_addr; - p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1); - tb->page_next[n] = p->first_tb; -#ifndef CONFIG_USER_ONLY - page_already_protected = p->first_tb != NULL; -#endif - p->first_tb = (TranslationBlock *)((uintptr_t)tb | n); - invalidate_page_bitmap(p); - -#if defined(TARGET_HAS_SMC) || 1 - -#if defined(CONFIG_USER_ONLY) - if (p->flags & PAGE_WRITE) { - target_ulong addr; - PageDesc *p2; - int prot; - - /* force the host page as non writable (writes will have a - page fault + mprotect overhead) */ - page_addr &= qemu_host_page_mask; - prot = 0; - for(addr = page_addr; addr < page_addr + qemu_host_page_size; - addr += TARGET_PAGE_SIZE) { - - p2 = page_find (addr >> TARGET_PAGE_BITS); - if (!p2) - continue; - prot |= p2->flags; - p2->flags &= ~PAGE_WRITE; - } - mprotect(g2h(page_addr), qemu_host_page_size, - (prot & PAGE_BITS) & ~PAGE_WRITE); -#ifdef DEBUG_TB_INVALIDATE - printf("protecting code page: 0x" TARGET_FMT_lx "\n", - page_addr); -#endif - } -#else - /* if some code is already present, then the pages are already - protected. So we handle the case where only the first TB is - allocated in a physical page */ - if (!page_already_protected) { - tlb_protect_code(page_addr); - } -#endif - -#endif /* TARGET_HAS_SMC */ -} - -/* add a new TB and link it to the physical page tables. phys_page2 is - (-1) to indicate that only one page contains the TB. */ -void tb_link_page(TranslationBlock *tb, - tb_page_addr_t phys_pc, tb_page_addr_t phys_page2) -{ - unsigned int h; - TranslationBlock **ptb; - - /* Grab the mmap lock to stop another thread invalidating this TB - before we are done. */ - mmap_lock(); - /* add in the physical hash table */ - h = tb_phys_hash_func(phys_pc); - ptb = &tb_phys_hash[h]; - tb->phys_hash_next = *ptb; - *ptb = tb; - - /* add in the page list */ - tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK); - if (phys_page2 != -1) - tb_alloc_page(tb, 1, phys_page2); - else - tb->page_addr[1] = -1; - - tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); - tb->jmp_next[0] = NULL; - tb->jmp_next[1] = NULL; - - /* init original jump addresses */ - if (tb->tb_next_offset[0] != 0xffff) - tb_reset_jump(tb, 0); - if (tb->tb_next_offset[1] != 0xffff) - tb_reset_jump(tb, 1); - -#ifdef DEBUG_TB_CHECK - tb_page_check(); + .minimum_version_id_old = 1, + .post_load = cpu_common_post_load, + .fields = (VMStateField []) { + VMSTATE_UINT32(halted, CPUState), + VMSTATE_UINT32(interrupt_request, CPUState), + VMSTATE_END_OF_LIST() + } +}; +#else +#define vmstate_cpu_common vmstate_dummy #endif - mmap_unlock(); -} -/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr < - tb[1].tc_ptr. Return NULL if not found */ -TranslationBlock *tb_find_pc(uintptr_t tc_ptr) +CPUState *qemu_get_cpu(int index) { - int m_min, m_max, m; - uintptr_t v; - TranslationBlock *tb; + CPUArchState *env = first_cpu; + CPUState *cpu = NULL; - if (nb_tbs <= 0) - return NULL; - if (tc_ptr < (uintptr_t)code_gen_buffer || - tc_ptr >= (uintptr_t)code_gen_ptr) { - return NULL; - } - /* binary search (cf Knuth) */ - m_min = 0; - m_max = nb_tbs - 1; - while (m_min <= m_max) { - m = (m_min + m_max) >> 1; - tb = &tbs[m]; - v = (uintptr_t)tb->tc_ptr; - if (v == tc_ptr) - return tb; - else if (tc_ptr < v) { - m_max = m - 1; - } else { - m_min = m + 1; + while (env) { + cpu = ENV_GET_CPU(env); + if (cpu->cpu_index == index) { + break; } + env = env->next_cpu; } - return &tbs[m_max]; -} -static void tb_reset_jump_recursive(TranslationBlock *tb); + return env ? cpu : NULL; +} -static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n) +void qemu_for_each_cpu(void (*func)(CPUState *cpu, void *data), void *data) { - TranslationBlock *tb1, *tb_next, **ptb; - unsigned int n1; - - tb1 = tb->jmp_next[n]; - if (tb1 != NULL) { - /* find head of list */ - for(;;) { - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (n1 == 2) - break; - tb1 = tb1->jmp_next[n1]; - } - /* we are now sure now that tb jumps to tb1 */ - tb_next = tb1; - - /* remove tb from the jmp_first list */ - ptb = &tb_next->jmp_first; - for(;;) { - tb1 = *ptb; - n1 = (uintptr_t)tb1 & 3; - tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3); - if (n1 == n && tb1 == tb) - break; - ptb = &tb1->jmp_next[n1]; - } - *ptb = tb->jmp_next[n]; - tb->jmp_next[n] = NULL; - - /* suppress the jump to next tb in generated code */ - tb_reset_jump(tb, n); + CPUArchState *env = first_cpu; - /* suppress jumps in the tb on which we could have jumped */ - tb_reset_jump_recursive(tb_next); + while (env) { + func(ENV_GET_CPU(env), data); + env = env->next_cpu; } } -static void tb_reset_jump_recursive(TranslationBlock *tb) +void cpu_exec_init(CPUArchState *env) { - tb_reset_jump_recursive2(tb, 0); - tb_reset_jump_recursive2(tb, 1); + CPUState *cpu = ENV_GET_CPU(env); + CPUClass *cc = CPU_GET_CLASS(cpu); + CPUArchState **penv; + int cpu_index; + +#if defined(CONFIG_USER_ONLY) + cpu_list_lock(); +#endif + env->next_cpu = NULL; + penv = &first_cpu; + cpu_index = 0; + while (*penv != NULL) { + penv = &(*penv)->next_cpu; + cpu_index++; + } + cpu->cpu_index = cpu_index; + cpu->numa_node = 0; + QTAILQ_INIT(&env->breakpoints); + QTAILQ_INIT(&env->watchpoints); +#ifndef CONFIG_USER_ONLY + cpu->thread_id = qemu_get_thread_id(); +#endif + *penv = env; +#if defined(CONFIG_USER_ONLY) + cpu_list_unlock(); +#endif + vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu); +#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY) + register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION, + cpu_save, cpu_load, env); + assert(cc->vmsd == NULL); +#endif + if (cc->vmsd != NULL) { + vmstate_register(NULL, cpu_index, cc->vmsd, cpu); + } } #if defined(TARGET_HAS_ICE) @@ -1476,21 +321,6 @@ static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) tb_invalidate_phys_page_range(pc, pc + 1, 0); } #else -void tb_invalidate_phys_addr(hwaddr addr) -{ - ram_addr_t ram_addr; - MemoryRegionSection *section; - - section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS); - if (!(memory_region_is_ram(section->mr) - || (section->mr->rom_device && section->mr->readable))) { - return; - } - ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK) - + memory_region_section_addr(section, addr); - tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0); -} - static void breakpoint_invalidate(CPUArchState *env, target_ulong pc) { tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) | @@ -1672,76 +502,12 @@ void cpu_single_step(CPUArchState *env, int enabled) #endif } -static void cpu_unlink_tb(CPUArchState *env) -{ - /* FIXME: TB unchaining isn't SMP safe. For now just ignore the - problem and hope the cpu will stop of its own accord. For userspace - emulation this often isn't actually as bad as it sounds. Often - signals are used primarily to interrupt blocking syscalls. */ - TranslationBlock *tb; - static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED; - - spin_lock(&interrupt_lock); - tb = env->current_tb; - /* if the cpu is currently executing code, we must unlink it and - all the potentially executing TB */ - if (tb) { - env->current_tb = NULL; - tb_reset_jump_recursive(tb); - } - spin_unlock(&interrupt_lock); -} - -#ifndef CONFIG_USER_ONLY -/* mask must never be zero, except for A20 change call */ -static void tcg_handle_interrupt(CPUArchState *env, int mask) +void cpu_exit(CPUArchState *env) { CPUState *cpu = ENV_GET_CPU(env); - int old_mask; - - old_mask = env->interrupt_request; - env->interrupt_request |= mask; - - /* - * If called from iothread context, wake the target cpu in - * case its halted. - */ - if (!qemu_cpu_is_self(cpu)) { - qemu_cpu_kick(cpu); - return; - } - - if (use_icount) { - env->icount_decr.u16.high = 0xffff; - if (!can_do_io(env) - && (mask & ~old_mask) != 0) { - cpu_abort(env, "Raised interrupt while not in I/O function"); - } - } else { - cpu_unlink_tb(env); - } -} - -CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt; - -#else /* CONFIG_USER_ONLY */ - -void cpu_interrupt(CPUArchState *env, int mask) -{ - env->interrupt_request |= mask; - cpu_unlink_tb(env); -} -#endif /* CONFIG_USER_ONLY */ -void cpu_reset_interrupt(CPUArchState *env, int mask) -{ - env->interrupt_request &= ~mask; -} - -void cpu_exit(CPUArchState *env) -{ - env->exit_request = 1; - cpu_unlink_tb(env); + cpu->exit_request = 1; + cpu->tcg_exit_req = 1; } void cpu_abort(CPUArchState *env, const char *fmt, ...) @@ -1780,7 +546,6 @@ CPUArchState *cpu_copy(CPUArchState *env) { CPUArchState *new_env = cpu_init(env->cpu_model_str); CPUArchState *next_cpu = new_env->next_cpu; - int cpu_index = new_env->cpu_index; #if defined(TARGET_HAS_ICE) CPUBreakpoint *bp; CPUWatchpoint *wp; @@ -1788,9 +553,8 @@ CPUArchState *cpu_copy(CPUArchState *env) memcpy(new_env, env, sizeof(CPUArchState)); - /* Preserve chaining and index. */ + /* Preserve chaining. */ new_env->next_cpu = next_cpu; - new_env->cpu_index = cpu_index; /* Clone all break/watchpoints. Note: Once we support ptrace with hw-debug register access, make sure @@ -1811,21 +575,6 @@ CPUArchState *cpu_copy(CPUArchState *env) } #if !defined(CONFIG_USER_ONLY) -void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr) -{ - unsigned int i; - - /* Discard jump cache entries for any tb which might potentially - overlap the flushed page. */ - i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE); - memset (&env->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); - - i = tb_jmp_cache_hash_page(addr); - memset (&env->tb_jmp_cache[i], 0, - TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *)); -} - static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end, uintptr_t length) { @@ -1863,7 +612,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end, } } -int cpu_physical_memory_set_dirty_tracking(int enable) +static int cpu_physical_memory_set_dirty_tracking(int enable) { int ret = 0; in_migration = enable; @@ -1915,264 +664,6 @@ hwaddr memory_region_section_get_iotlb(CPUArchState *env, return iotlb; } - -#else -/* - * Walks guest process memory "regions" one by one - * and calls callback function 'fn' for each region. - */ - -struct walk_memory_regions_data -{ - walk_memory_regions_fn fn; - void *priv; - uintptr_t start; - int prot; -}; - -static int walk_memory_regions_end(struct walk_memory_regions_data *data, - abi_ulong end, int new_prot) -{ - if (data->start != -1ul) { - int rc = data->fn(data->priv, data->start, end, data->prot); - if (rc != 0) { - return rc; - } - } - - data->start = (new_prot ? end : -1ul); - data->prot = new_prot; - - return 0; -} - -static int walk_memory_regions_1(struct walk_memory_regions_data *data, - abi_ulong base, int level, void **lp) -{ - abi_ulong pa; - int i, rc; - - if (*lp == NULL) { - return walk_memory_regions_end(data, base, 0); - } - - if (level == 0) { - PageDesc *pd = *lp; - for (i = 0; i < L2_SIZE; ++i) { - int prot = pd[i].flags; - - pa = base | (i << TARGET_PAGE_BITS); - if (prot != data->prot) { - rc = walk_memory_regions_end(data, pa, prot); - if (rc != 0) { - return rc; - } - } - } - } else { - void **pp = *lp; - for (i = 0; i < L2_SIZE; ++i) { - pa = base | ((abi_ulong)i << - (TARGET_PAGE_BITS + L2_BITS * level)); - rc = walk_memory_regions_1(data, pa, level - 1, pp + i); - if (rc != 0) { - return rc; - } - } - } - - return 0; -} - -int walk_memory_regions(void *priv, walk_memory_regions_fn fn) -{ - struct walk_memory_regions_data data; - uintptr_t i; - - data.fn = fn; - data.priv = priv; - data.start = -1ul; - data.prot = 0; - - for (i = 0; i < V_L1_SIZE; i++) { - int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT, - V_L1_SHIFT / L2_BITS - 1, l1_map + i); - if (rc != 0) { - return rc; - } - } - - return walk_memory_regions_end(&data, 0, 0); -} - -static int dump_region(void *priv, abi_ulong start, - abi_ulong end, unsigned long prot) -{ - FILE *f = (FILE *)priv; - - (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx - " "TARGET_ABI_FMT_lx" %c%c%c\n", - start, end, end - start, - ((prot & PAGE_READ) ? 'r' : '-'), - ((prot & PAGE_WRITE) ? 'w' : '-'), - ((prot & PAGE_EXEC) ? 'x' : '-')); - - return (0); -} - -/* dump memory mappings */ -void page_dump(FILE *f) -{ - (void) fprintf(f, "%-8s %-8s %-8s %s\n", - "start", "end", "size", "prot"); - walk_memory_regions(f, dump_region); -} - -int page_get_flags(target_ulong address) -{ - PageDesc *p; - - p = page_find(address >> TARGET_PAGE_BITS); - if (!p) - return 0; - return p->flags; -} - -/* Modify the flags of a page and invalidate the code if necessary. - The flag PAGE_WRITE_ORG is positioned automatically depending - on PAGE_WRITE. The mmap_lock should already be held. */ -void page_set_flags(target_ulong start, target_ulong end, int flags) -{ - target_ulong addr, len; - - /* This function should never be called with addresses outside the - guest address space. If this assert fires, it probably indicates - a missing call to h2g_valid. */ -#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS - assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); -#endif - assert(start < end); - - start = start & TARGET_PAGE_MASK; - end = TARGET_PAGE_ALIGN(end); - - if (flags & PAGE_WRITE) { - flags |= PAGE_WRITE_ORG; - } - - for (addr = start, len = end - start; - len != 0; - len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1); - - /* If the write protection bit is set, then we invalidate - the code inside. */ - if (!(p->flags & PAGE_WRITE) && - (flags & PAGE_WRITE) && - p->first_tb) { - tb_invalidate_phys_page(addr, 0, NULL); - } - p->flags = flags; - } -} - -int page_check_range(target_ulong start, target_ulong len, int flags) -{ - PageDesc *p; - target_ulong end; - target_ulong addr; - - /* This function should never be called with addresses outside the - guest address space. If this assert fires, it probably indicates - a missing call to h2g_valid. */ -#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS - assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS)); -#endif - - if (len == 0) { - return 0; - } - if (start + len - 1 < start) { - /* We've wrapped around. */ - return -1; - } - - end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */ - start = start & TARGET_PAGE_MASK; - - for (addr = start, len = end - start; - len != 0; - len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { - p = page_find(addr >> TARGET_PAGE_BITS); - if( !p ) - return -1; - if( !(p->flags & PAGE_VALID) ) - return -1; - - if ((flags & PAGE_READ) && !(p->flags & PAGE_READ)) - return -1; - if (flags & PAGE_WRITE) { - if (!(p->flags & PAGE_WRITE_ORG)) - return -1; - /* unprotect the page if it was put read-only because it - contains translated code */ - if (!(p->flags & PAGE_WRITE)) { - if (!page_unprotect(addr, 0, NULL)) - return -1; - } - return 0; - } - } - return 0; -} - -/* called from signal handler: invalidate the code and unprotect the - page. Return TRUE if the fault was successfully handled. */ -int page_unprotect(target_ulong address, uintptr_t pc, void *puc) -{ - unsigned int prot; - PageDesc *p; - target_ulong host_start, host_end, addr; - - /* Technically this isn't safe inside a signal handler. However we - know this only ever happens in a synchronous SEGV handler, so in - practice it seems to be ok. */ - mmap_lock(); - - p = page_find(address >> TARGET_PAGE_BITS); - if (!p) { - mmap_unlock(); - return 0; - } - - /* if the page was really writable, then we change its - protection back to writable */ - if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) { - host_start = address & qemu_host_page_mask; - host_end = host_start + qemu_host_page_size; - - prot = 0; - for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) { - p = page_find(addr >> TARGET_PAGE_BITS); - p->flags |= PAGE_WRITE; - prot |= p->flags; - - /* and since the content will be modified, we must invalidate - the corresponding translated code. */ - tb_invalidate_phys_page(addr, pc, puc); -#ifdef DEBUG_TB_CHECK - tb_invalidate_check(addr); -#endif - } - mprotect((void *)g2h(host_start), qemu_host_page_size, - prot & PAGE_BITS); - - mmap_unlock(); - return 1; - } - mmap_unlock(); - return 0; -} #endif /* defined(CONFIG_USER_ONLY) */ #if !defined(CONFIG_USER_ONLY) @@ -2324,6 +815,16 @@ void qemu_flush_coalesced_mmio_buffer(void) kvm_flush_coalesced_mmio_buffer(); } +void qemu_mutex_lock_ramlist(void) +{ + qemu_mutex_lock(&ram_list.mutex); +} + +void qemu_mutex_unlock_ramlist(void) +{ + qemu_mutex_unlock(&ram_list.mutex); +} + #if defined(__linux__) && !defined(TARGET_S390X) #include @@ -2355,6 +856,8 @@ static void *file_ram_alloc(RAMBlock *block, const char *path) { char *filename; + char *sanitized_name; + char *c; void *area; int fd; #ifdef MAP_POPULATE @@ -2376,18 +879,25 @@ static void *file_ram_alloc(RAMBlock *block, return NULL; } - if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) { - return NULL; + /* Make name safe to use with mkstemp by replacing '/' with '_'. */ + sanitized_name = g_strdup(block->mr->name); + for (c = sanitized_name; *c != '\0'; c++) { + if (*c == '/') + *c = '_'; } + filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path, + sanitized_name); + g_free(sanitized_name); + fd = mkstemp(filename); if (fd < 0) { perror("unable to create backing store for hugepages"); - free(filename); + g_free(filename); return NULL; } unlink(filename); - free(filename); + g_free(filename); memory = (memory+hpagesize-1) & ~(hpagesize-1); @@ -2425,15 +935,17 @@ static ram_addr_t find_ram_offset(ram_addr_t size) RAMBlock *block, *next_block; ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX; - if (QLIST_EMPTY(&ram_list.blocks)) + assert(size != 0); /* it would hand out same offset multiple times */ + + if (QTAILQ_EMPTY(&ram_list.blocks)) return 0; - QLIST_FOREACH(block, &ram_list.blocks, next) { + QTAILQ_FOREACH(block, &ram_list.blocks, next) { ram_addr_t end, next = RAM_ADDR_MAX; end = block->offset + block->length; - QLIST_FOREACH(next_block, &ram_list.blocks, next) { + QTAILQ_FOREACH(next_block, &ram_list.blocks, next) { if (next_block->offset >= end) { next = MIN(next, next_block->offset); } @@ -2458,7 +970,7 @@ ram_addr_t last_ram_offset(void) RAMBlock *block; ram_addr_t last = 0; - QLIST_FOREACH(block, &ram_list.blocks, next) + QTAILQ_FOREACH(block, &ram_list.blocks, next) last = MAX(last, block->offset + block->length); return last; @@ -2487,7 +999,7 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) RAMBlock *new_block, *block; new_block = NULL; - QLIST_FOREACH(block, &ram_list.blocks, next) { + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (block->offset == addr) { new_block = block; break; @@ -2505,13 +1017,16 @@ void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev) } pstrcat(new_block->idstr, sizeof(new_block->idstr), name); - QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This assumes the iothread lock is taken here too. */ + qemu_mutex_lock_ramlist(); + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (block != new_block && !strcmp(block->idstr, new_block->idstr)) { fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n", new_block->idstr); abort(); } } + qemu_mutex_unlock_ramlist(); } static int memory_try_enable_merging(void *addr, size_t len) @@ -2530,11 +1045,13 @@ static int memory_try_enable_merging(void *addr, size_t len) ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, MemoryRegion *mr) { - RAMBlock *new_block; + RAMBlock *block, *new_block; size = TARGET_PAGE_ALIGN(size); new_block = g_malloc0(sizeof(*new_block)); + /* This assumes the iothread lock is taken here too. */ + qemu_mutex_lock_ramlist(); new_block->mr = mr; new_block->offset = find_ram_offset(size); if (host) { @@ -2545,7 +1062,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, #if defined (__linux__) && !defined(TARGET_S390X) new_block->host = file_ram_alloc(new_block, size, mem_path); if (!new_block->host) { - new_block->host = qemu_vmalloc(size); + new_block->host = qemu_anon_ram_alloc(size); memory_try_enable_merging(new_block->host, size); } #else @@ -2557,16 +1074,30 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host, xen_ram_alloc(new_block->offset, size, mr); } else if (kvm_enabled()) { /* some s390/kvm configurations have special constraints */ - new_block->host = kvm_vmalloc(size); + new_block->host = kvm_ram_alloc(size); } else { - new_block->host = qemu_vmalloc(size); + new_block->host = qemu_anon_ram_alloc(size); } memory_try_enable_merging(new_block->host, size); } } new_block->length = size; - QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next); + /* Keep the list sorted from biggest to smallest block. */ + QTAILQ_FOREACH(block, &ram_list.blocks, next) { + if (block->length < new_block->length) { + break; + } + } + if (block) { + QTAILQ_INSERT_BEFORE(block, new_block, next); + } else { + QTAILQ_INSERT_TAIL(&ram_list.blocks, new_block, next); + } + ram_list.mru_block = NULL; + + ram_list.version++; + qemu_mutex_unlock_ramlist(); ram_list.phys_dirty = g_realloc(ram_list.phys_dirty, last_ram_offset() >> TARGET_PAGE_BITS); @@ -2592,22 +1123,31 @@ void qemu_ram_free_from_ptr(ram_addr_t addr) { RAMBlock *block; - QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This assumes the iothread lock is taken here too. */ + qemu_mutex_lock_ramlist(); + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { - QLIST_REMOVE(block, next); + QTAILQ_REMOVE(&ram_list.blocks, block, next); + ram_list.mru_block = NULL; + ram_list.version++; g_free(block); - return; + break; } } + qemu_mutex_unlock_ramlist(); } void qemu_ram_free(ram_addr_t addr) { RAMBlock *block; - QLIST_FOREACH(block, &ram_list.blocks, next) { + /* This assumes the iothread lock is taken here too. */ + qemu_mutex_lock_ramlist(); + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr == block->offset) { - QLIST_REMOVE(block, next); + QTAILQ_REMOVE(&ram_list.blocks, block, next); + ram_list.mru_block = NULL; + ram_list.version++; if (block->flags & RAM_PREALLOC_MASK) { ; } else if (mem_path) { @@ -2616,26 +1156,23 @@ void qemu_ram_free(ram_addr_t addr) munmap(block->host, block->length); close(block->fd); } else { - qemu_vfree(block->host); + qemu_anon_ram_free(block->host, block->length); } #else abort(); #endif } else { -#if defined(TARGET_S390X) && defined(CONFIG_KVM) - munmap(block->host, block->length); -#else if (xen_enabled()) { xen_invalidate_map_cache_entry(block->host); } else { - qemu_vfree(block->host); + qemu_anon_ram_free(block->host, block->length); } -#endif } g_free(block); - return; + break; } } + qemu_mutex_unlock_ramlist(); } @@ -2647,7 +1184,7 @@ void qemu_ram_remap(ram_addr_t addr, ram_addr_t length) int flags; void *area, *vaddr; - QLIST_FOREACH(block, &ram_list.blocks, next) { + QTAILQ_FOREACH(block, &ram_list.blocks, next) { offset = addr - block->offset; if (offset < block->length) { vaddr = block->host + offset; @@ -2713,43 +1250,48 @@ void *qemu_get_ram_ptr(ram_addr_t addr) { RAMBlock *block; - QLIST_FOREACH(block, &ram_list.blocks, next) { + /* The list is protected by the iothread lock here. */ + block = ram_list.mru_block; + if (block && addr - block->offset < block->length) { + goto found; + } + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { - /* Move this entry to to start of the list. */ - if (block != QLIST_FIRST(&ram_list.blocks)) { - QLIST_REMOVE(block, next); - QLIST_INSERT_HEAD(&ram_list.blocks, block, next); - } - if (xen_enabled()) { - /* We need to check if the requested address is in the RAM - * because we don't want to map the entire memory in QEMU. - * In that case just map until the end of the page. - */ - if (block->offset == 0) { - return xen_map_cache(addr, 0, 0); - } else if (block->host == NULL) { - block->host = - xen_map_cache(block->offset, block->length, 1); - } - } - return block->host + (addr - block->offset); + goto found; } } fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr); abort(); - return NULL; +found: + ram_list.mru_block = block; + if (xen_enabled()) { + /* We need to check if the requested address is in the RAM + * because we don't want to map the entire memory in QEMU. + * In that case just map until the end of the page. + */ + if (block->offset == 0) { + return xen_map_cache(addr, 0, 0); + } else if (block->host == NULL) { + block->host = + xen_map_cache(block->offset, block->length, 1); + } + } + return block->host + (addr - block->offset); } -/* Return a host pointer to ram allocated with qemu_ram_alloc. - * Same as qemu_get_ram_ptr but avoid reordering ramblocks. +/* Return a host pointer to ram allocated with qemu_ram_alloc. Same as + * qemu_get_ram_ptr but do not touch ram_list.mru_block. + * + * ??? Is this still necessary? */ -void *qemu_safe_ram_ptr(ram_addr_t addr) +static void *qemu_safe_ram_ptr(ram_addr_t addr) { RAMBlock *block; - QLIST_FOREACH(block, &ram_list.blocks, next) { + /* The list is protected by the iothread lock here. */ + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { if (xen_enabled()) { /* We need to check if the requested address is in the RAM @@ -2775,7 +1317,7 @@ void *qemu_safe_ram_ptr(ram_addr_t addr) /* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr * but takes a size argument */ -void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) +static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) { if (*size == 0) { return NULL; @@ -2785,7 +1327,7 @@ void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size) } else { RAMBlock *block; - QLIST_FOREACH(block, &ram_list.blocks, next) { + QTAILQ_FOREACH(block, &ram_list.blocks, next) { if (addr - block->offset < block->length) { if (addr - block->offset + *size > block->length) *size = block->length - addr + block->offset; @@ -2813,7 +1355,7 @@ int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr) return 0; } - QLIST_FOREACH(block, &ram_list.blocks, next) { + QTAILQ_FOREACH(block, &ram_list.blocks, next) { /* This case append when the block is not mapped. */ if (block->host == NULL) { continue; @@ -2936,7 +1478,6 @@ static void check_watchpoint(int offset, int len_mask, int flags) { CPUArchState *env = cpu_single_env; target_ulong pc, cs_base; - TranslationBlock *tb; target_ulong vaddr; CPUWatchpoint *wp; int cpu_flags; @@ -2945,7 +1486,7 @@ static void check_watchpoint(int offset, int len_mask, int flags) /* We re-entered the check after replacing the TB. Now raise * the debug interrupt so that is will trigger after the * current instruction. */ - cpu_interrupt(env, CPU_INTERRUPT_DEBUG); + cpu_interrupt(ENV_GET_CPU(env), CPU_INTERRUPT_DEBUG); return; } vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset; @@ -2955,13 +1496,7 @@ static void check_watchpoint(int offset, int len_mask, int flags) wp->flags |= BP_WATCHPOINT_HIT; if (!env->watchpoint_hit) { env->watchpoint_hit = wp; - tb = tb_find_pc(env->mem_io_pc); - if (!tb) { - cpu_abort(env, "check_watchpoint: could not find TB for " - "pc=%p", (void *)env->mem_io_pc); - } - cpu_restore_state(tb, env, env->mem_io_pc); - tb_phys_invalidate(tb, -1); + tb_check_watchpoint(env); if (wp->flags & BP_STOP_BEFORE_ACCESS) { env->exception_index = EXCP_DEBUG; cpu_loop_exit(env); @@ -3280,6 +1815,9 @@ static void memory_map_init(void) memory_listener_register(&core_memory_listener, &address_space_memory); memory_listener_register(&io_memory_listener, &address_space_io); memory_listener_register(&tcg_memory_listener, &address_space_memory); + + dma_context_init(&dma_context_memory, &address_space_memory, + NULL, NULL, NULL); } MemoryRegion *get_system_memory(void) @@ -3523,7 +2061,7 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque)) return client; } -void cpu_unregister_map_client(void *_client) +static void cpu_unregister_map_client(void *_client) { MapClient *client = (MapClient *)_client; @@ -4069,119 +2607,8 @@ int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr, } #endif -/* in deterministic execution mode, instructions doing device I/Os - must be at the end of the TB */ -void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr) -{ - TranslationBlock *tb; - uint32_t n, cflags; - target_ulong pc, cs_base; - uint64_t flags; - - tb = tb_find_pc(retaddr); - if (!tb) { - cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p", - (void *)retaddr); - } - n = env->icount_decr.u16.low + tb->icount; - cpu_restore_state(tb, env, retaddr); - /* Calculate how many instructions had been executed before the fault - occurred. */ - n = n - env->icount_decr.u16.low; - /* Generate a new TB ending on the I/O insn. */ - n++; - /* On MIPS and SH, delay slot instructions can only be restarted if - they were already the first instruction in the TB. If this is not - the first instruction in a TB then re-execute the preceding - branch. */ -#if defined(TARGET_MIPS) - if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) { - env->active_tc.PC -= 4; - env->icount_decr.u16.low++; - env->hflags &= ~MIPS_HFLAG_BMASK; - } -#elif defined(TARGET_SH4) - if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0 - && n > 1) { - env->pc -= 2; - env->icount_decr.u16.low++; - env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL); - } -#endif - /* This should never happen. */ - if (n > CF_COUNT_MASK) - cpu_abort(env, "TB too big during recompile"); - - cflags = n | CF_LAST_IO; - pc = tb->pc; - cs_base = tb->cs_base; - flags = tb->flags; - tb_phys_invalidate(tb, -1); - /* FIXME: In theory this could raise an exception. In practice - we have already translated the block once so it's probably ok. */ - tb_gen_code(env, pc, cs_base, flags, cflags); - /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not - the first in the TB) then we end up generating a whole new TB and - repeating the fault, which is horribly inefficient. - Better would be to execute just this insn uncached, or generate a - second new TB. */ - cpu_resume_from_signal(env, NULL); -} - #if !defined(CONFIG_USER_ONLY) -void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) -{ - int i, target_code_size, max_target_code_size; - int direct_jmp_count, direct_jmp2_count, cross_page; - TranslationBlock *tb; - - target_code_size = 0; - max_target_code_size = 0; - cross_page = 0; - direct_jmp_count = 0; - direct_jmp2_count = 0; - for(i = 0; i < nb_tbs; i++) { - tb = &tbs[i]; - target_code_size += tb->size; - if (tb->size > max_target_code_size) - max_target_code_size = tb->size; - if (tb->page_addr[1] != -1) - cross_page++; - if (tb->tb_next_offset[0] != 0xffff) { - direct_jmp_count++; - if (tb->tb_next_offset[1] != 0xffff) { - direct_jmp2_count++; - } - } - } - /* XXX: avoid using doubles ? */ - cpu_fprintf(f, "Translation buffer state:\n"); - cpu_fprintf(f, "gen code size %td/%zd\n", - code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size); - cpu_fprintf(f, "TB count %d/%d\n", - nb_tbs, code_gen_max_blocks); - cpu_fprintf(f, "TB avg target size %d max=%d bytes\n", - nb_tbs ? target_code_size / nb_tbs : 0, - max_target_code_size); - cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n", - nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0, - target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0); - cpu_fprintf(f, "cross page TB count %d (%d%%)\n", - cross_page, - nb_tbs ? (cross_page * 100) / nb_tbs : 0); - cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n", - direct_jmp_count, - nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0, - direct_jmp2_count, - nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0); - cpu_fprintf(f, "\nStatistics:\n"); - cpu_fprintf(f, "TB flush count %d\n", tb_flush_count); - cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count); - cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); - tcg_dump_info(f, cpu_fprintf); -} - /* * A helper function for the _utterly broken_ virtio device model to find out if * it's running on a big endian machine. Don't do this at home kids!