#include "cpu.h"
#include "exec-all.h"
#include "qemu-common.h"
+#include "tcg.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
#endif
#undef DEBUG_TB_CHECK
#endif
-/* threshold to flush the translated code buffer */
-#define CODE_GEN_BUFFER_MAX_SIZE (CODE_GEN_BUFFER_SIZE - code_gen_max_block_size())
-
#define SMC_BITMAP_USE_THRESHOLD 10
#define MMAP_AREA_START 0x00000000
#elif defined(TARGET_PPC64)
#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_X86_64) && !defined(USE_KQEMU)
-#define TARGET_PHYS_ADDR_SPACE_BITS 40
+#define TARGET_PHYS_ADDR_SPACE_BITS 42
#elif defined(TARGET_I386) && !defined(USE_KQEMU)
#define TARGET_PHYS_ADDR_SPACE_BITS 36
#else
#define TARGET_PHYS_ADDR_SPACE_BITS 32
#endif
-TranslationBlock tbs[CODE_GEN_MAX_BLOCKS];
+TranslationBlock *tbs;
+int code_gen_max_blocks;
TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
int nb_tbs;
/* any access to the tbs or the page table must use this lock */
spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
-uint8_t code_gen_buffer[CODE_GEN_BUFFER_SIZE] __attribute__((aligned (32)));
+uint8_t code_gen_prologue[1024] __attribute__((aligned (32)));
+uint8_t *code_gen_buffer;
+unsigned long code_gen_buffer_size;
+/* threshold to flush the translated code buffer */
+unsigned long code_gen_buffer_max_size;
uint8_t *code_gen_ptr;
ram_addr_t phys_ram_size;
*/
#define L1_BITS (TARGET_VIRT_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
#else
-#define L1_BITS (TARGET_PHYS_ADDR_SPACE_BITS - L2_BITS - TARGET_PAGE_BITS)
+#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
#endif
-#undef L1_BITS
-#undef L2_BITS
-#define L1_BITS 13
-#define L2_BITS 13
-
#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)
void *opaque[TARGET_PAGE_SIZE][2][4];
} subpage_t;
+#ifdef _WIN32
+static void map_exec(void *addr, long size)
+{
+ DWORD old_protect;
+ VirtualProtect(addr, size,
+ PAGE_EXECUTE_READWRITE, &old_protect);
+
+}
+#else
+static void map_exec(void *addr, long size)
+{
+ unsigned long start, end, page_size;
+
+ page_size = getpagesize();
+ start = (unsigned long)addr;
+ start &= ~(page_size - 1);
+
+ end = (unsigned long)addr + size;
+ end += page_size - 1;
+ end &= ~(page_size - 1);
+
+ mprotect((void *)start, end - start,
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+}
+#endif
+
static void page_init(void)
{
/* NOTE: we can always suppose that qemu_host_page_size >=
GetSystemInfo(&system_info);
qemu_real_host_page_size = system_info.dwPageSize;
-
- VirtualProtect(code_gen_buffer, sizeof(code_gen_buffer),
- PAGE_EXECUTE_READWRITE, &old_protect);
}
#else
qemu_real_host_page_size = getpagesize();
- {
- unsigned long start, end;
-
- start = (unsigned long)code_gen_buffer;
- start &= ~(qemu_real_host_page_size - 1);
-
- end = (unsigned long)code_gen_buffer + sizeof(code_gen_buffer);
- end += qemu_real_host_page_size - 1;
- end &= ~(qemu_real_host_page_size - 1);
-
- mprotect((void *)start, end - start,
- PROT_READ | PROT_WRITE | PROT_EXEC);
- }
#endif
-
if (qemu_host_page_size == 0)
qemu_host_page_size = qemu_real_host_page_size;
if (qemu_host_page_size < TARGET_PAGE_SIZE)
FILE *f;
int n;
+ mmap_lock();
+ last_brk = (unsigned long)sbrk(0);
f = fopen("/proc/self/maps", "r");
if (f) {
do {
n = fscanf (f, "%llx-%llx %*[^\n]\n", &startaddr, &endaddr);
if (n == 2) {
- page_set_flags(TARGET_PAGE_ALIGN(startaddr),
+ startaddr = MIN(startaddr,
+ (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
+ endaddr = MIN(endaddr,
+ (1ULL << TARGET_PHYS_ADDR_SPACE_BITS) - 1);
+ page_set_flags(startaddr & TARGET_PAGE_MASK,
TARGET_PAGE_ALIGN(endaddr),
PAGE_RESERVED);
}
} while (!feof(f));
fclose(f);
}
+ mmap_unlock();
}
#endif
}
PhysPageDesc *pd;
p = (void **)l1_phys_map;
-#if 0
#if TARGET_PHYS_ADDR_SPACE_BITS > 32
#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
memset(p, 0, sizeof(void *) * L1_SIZE);
*lp = p;
}
-#endif
#endif
lp = p + ((index >> L2_BITS) & (L1_SIZE - 1));
pd = *lp;
static void tlb_protect_code(ram_addr_t ram_addr);
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr);
+#define mmap_lock() do { } while(0)
+#define mmap_unlock() do { } while(0)
+#endif
+
+#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
+
+#if defined(CONFIG_USER_ONLY)
+/* Currently it is not recommanded to allocate big chunks of data in
+ user mode. It will change when a dedicated libc will be used */
+#define USE_STATIC_CODE_GEN_BUFFER
+#endif
+
+#ifdef USE_STATIC_CODE_GEN_BUFFER
+static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE];
+#endif
+
+void code_gen_alloc(unsigned long tb_size)
+{
+#ifdef USE_STATIC_CODE_GEN_BUFFER
+ code_gen_buffer = static_code_gen_buffer;
+ code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
+ map_exec(code_gen_buffer, code_gen_buffer_size);
+#else
+ code_gen_buffer_size = tb_size;
+ if (code_gen_buffer_size == 0) {
+#if defined(CONFIG_USER_ONLY)
+ /* in user mode, phys_ram_size is not meaningful */
+ code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
+#else
+ /* XXX: needs ajustments */
+ code_gen_buffer_size = (int)(phys_ram_size / 4);
#endif
+ }
+ if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
+ code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
+ /* The code gen buffer location may have constraints depending on
+ the host cpu and OS */
+#if defined(__linux__)
+ {
+ int flags;
+ flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#if defined(__x86_64__)
+ flags |= MAP_32BIT;
+ /* Cannot map more than that */
+ if (code_gen_buffer_size > (800 * 1024 * 1024))
+ code_gen_buffer_size = (800 * 1024 * 1024);
+#endif
+ code_gen_buffer = mmap(NULL, code_gen_buffer_size,
+ PROT_WRITE | PROT_READ | PROT_EXEC,
+ flags, -1, 0);
+ if (code_gen_buffer == MAP_FAILED) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ }
+#else
+ code_gen_buffer = qemu_malloc(code_gen_buffer_size);
+ if (!code_gen_buffer) {
+ fprintf(stderr, "Could not allocate dynamic translator buffer\n");
+ exit(1);
+ }
+ map_exec(code_gen_buffer, code_gen_buffer_size);
+#endif
+#endif /* !USE_STATIC_CODE_GEN_BUFFER */
+ map_exec(code_gen_prologue, sizeof(code_gen_prologue));
+ code_gen_buffer_max_size = code_gen_buffer_size -
+ code_gen_max_block_size();
+ code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
+ tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
+}
+
+/* Must be called before using the QEMU cpus. 'tb_size' is the size
+ (in bytes) allocated to the translation buffer. Zero means default
+ size. */
+void cpu_exec_init_all(unsigned long tb_size)
+{
+ cpu_gen_init();
+ code_gen_alloc(tb_size);
+ code_gen_ptr = code_gen_buffer;
+ page_init();
+ io_mem_init();
+}
void cpu_exec_init(CPUState *env)
{
CPUState **penv;
int cpu_index;
- if (!code_gen_ptr) {
- cpu_gen_init();
- code_gen_ptr = code_gen_buffer;
- page_init();
- io_mem_init();
- }
env->next_cpu = NULL;
penv = &first_cpu;
cpu_index = 0;
nb_tbs, nb_tbs > 0 ?
((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
#endif
- if ((unsigned long)(code_gen_ptr - code_gen_buffer) > CODE_GEN_BUFFER_SIZE)
+ if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
cpu_abort(env1, "Internal error: code buffer overflow\n");
nb_tbs = 0;
{
TranslationBlock *tb;
- if (nb_tbs >= CODE_GEN_MAX_BLOCKS ||
- (code_gen_ptr - code_gen_buffer) >= CODE_GEN_BUFFER_MAX_SIZE)
+ if (nb_tbs >= code_gen_max_blocks ||
+ (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
return NULL;
tb = &tbs[nb_tbs++];
tb->pc = pc;
unsigned int h;
TranslationBlock **ptb;
+ /* Grab the mmap lock to stop another thread invalidating this TB
+ before we are done. */
+ mmap_lock();
/* add in the physical hash table */
h = tb_phys_hash_func(phys_pc);
ptb = &tb_phys_hash[h];
#ifdef DEBUG_TB_CHECK
tb_page_check();
#endif
+ mmap_unlock();
}
/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
return -1;
}
+/* Remove all watchpoints. */
+void cpu_watchpoint_remove_all(CPUState *env) {
+ int i;
+
+ for (i = 0; i < env->nb_watchpoints; i++) {
+ tlb_flush_page(env, env->watchpoint[i].vaddr);
+ }
+ env->nb_watchpoints = 0;
+}
+
/* add a breakpoint. EXCP_DEBUG is returned by the CPU loop if a
breakpoint is reached */
int cpu_breakpoint_insert(CPUState *env, target_ulong pc)
#endif
}
+/* remove all breakpoints */
+void cpu_breakpoint_remove_all(CPUState *env) {
+#if defined(TARGET_HAS_ICE)
+ int i;
+ for(i = 0; i < env->nb_breakpoints; i++) {
+ breakpoint_invalidate(env, env->breakpoints[i]);
+ }
+ env->nb_breakpoints = 0;
+#endif
+}
+
/* remove a breakpoint */
int cpu_breakpoint_remove(CPUState *env, target_ulong pc)
{
vfprintf(stderr, fmt, ap);
fprintf(stderr, "\n");
#ifdef TARGET_I386
- if(env->intercept & INTERCEPT_SVM_MASK) {
- /* most probably the virtual machine should not
- be shut down but rather caught by the VMM */
- vmexit(SVM_EXIT_SHUTDOWN, 0);
- }
cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
#else
cpu_dump_state(env, stderr, fprintf, 0);
#if !defined(CONFIG_USER_ONLY)
+static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
+{
+ unsigned int i;
+
+ /* Discard jump cache entries for any tb which might potentially
+ overlap the flushed page. */
+ i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
+ memset (&env->tb_jmp_cache[i], 0,
+ TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
+
+ i = tb_jmp_cache_hash_page(addr);
+ memset (&env->tb_jmp_cache[i], 0,
+ TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
+}
+
/* NOTE: if flush_global is true, also flush global entries (not
implemented yet) */
void tlb_flush(CPUState *env, int flush_global)
void tlb_flush_page(CPUState *env, target_ulong addr)
{
int i;
- TranslationBlock *tb;
#if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
#endif
- /* Discard jump cache entries for any tb which might potentially
- overlap the flushed page. */
- i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
- memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
-
- i = tb_jmp_cache_hash_page(addr);
- memset (&env->tb_jmp_cache[i], 0, TB_JMP_PAGE_SIZE * sizeof(tb));
+ tlb_flush_jmp_cache(env, addr);
#if !defined(CONFIG_SOFTMMU)
if (addr < MMAP_AREA_END)
} else {
te->addr_read = -1;
}
+
if (prot & PAGE_EXEC) {
te->addr_code = address;
} else {
PageDesc *p;
target_ulong addr;
+ /* mmap_lock should already be held. */
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
if (flags & PAGE_WRITE)
PageDesc *p, *p1;
target_ulong host_start, host_end, addr;
+ /* Technically this isn't safe inside a signal handler. However we
+ know this only ever happens in a synchronous SEGV handler, so in
+ practice it seems to be ok. */
+ mmap_lock();
+
host_start = address & qemu_host_page_mask;
page_index = host_start >> TARGET_PAGE_BITS;
p1 = page_find(page_index);
- if (!p1)
+ if (!p1) {
+ mmap_unlock();
return 0;
+ }
host_end = host_start + qemu_host_page_size;
p = p1;
prot = 0;
#ifdef DEBUG_TB_CHECK
tb_invalidate_check(address);
#endif
+ mmap_unlock();
return 1;
}
}
+ mmap_unlock();
return 0;
}
#endif /* defined(CONFIG_USER_ONLY) */
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- int memory);
+ ram_addr_t memory);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- int orig_memory);
+ ram_addr_t orig_memory);
#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
need_subpage) \
do { \
ram_addr_t orig_size = size;
void *subpage;
+#ifdef USE_KQEMU
+ /* XXX: should not depend on cpu context */
+ env = first_cpu;
+ if (env->kqemu_enabled) {
+ kqemu_set_phys_mem(start_addr, size, phys_offset);
+ }
+#endif
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
ram_addr_t addr;
- if ((phys_ram_alloc_offset + size) >= phys_ram_size) {
- fprintf(stderr, "Not enough memory (requested_size = %lu, max memory = %" PRIu64 ")\n",
- size, (uint64_t)phys_ram_size);
+ if ((phys_ram_alloc_offset + size) > phys_ram_size) {
+ fprintf(stderr, "Not enough memory (requested_size = %" PRIu64 ", max memory = %" PRIu64 "\n",
+ (uint64_t)size, (uint64_t)phys_ram_size);
abort();
}
addr = phys_ram_alloc_offset;
};
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
- int memory)
+ ram_addr_t memory)
{
int idx, eidx;
unsigned int i;
}
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
- int orig_memory)
+ ram_addr_t orig_memory)
{
subpage_t *mmio;
int subpage_memory;
if (!(flags & PAGE_WRITE))
return;
/* XXX: this code should not depend on lock_user */
- if (!(p = lock_user(VERIFY_WRITE, addr, len, 0)))
+ if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
/* FIXME - should this return an error rather than just fail? */
return;
- memcpy(p, buf, len);
- unlock_user(p, addr, len);
+ memcpy(p, buf, l);
+ unlock_user(p, addr, l);
} else {
if (!(flags & PAGE_READ))
return;
/* XXX: this code should not depend on lock_user */
- if (!(p = lock_user(VERIFY_READ, addr, len, 1)))
+ if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
/* FIXME - should this return an error rather than just fail? */
return;
- memcpy(buf, p, len);
+ memcpy(buf, p, l);
unlock_user(p, addr, 0);
}
len -= l;
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
- cpu_fprintf(f, "TB count %d\n", nb_tbs);
+ cpu_fprintf(f, "gen code size %ld/%ld\n",
+ code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
+ cpu_fprintf(f, "TB count %d/%d\n",
+ nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
max_target_code_size);
cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
-#ifdef CONFIG_PROFILER
- {
- int64_t tot;
- tot = dyngen_interm_time + dyngen_code_time;
- cpu_fprintf(f, "JIT cycles %" PRId64 " (%0.3f s at 2.4 GHz)\n",
- tot, tot / 2.4e9);
- cpu_fprintf(f, "translated TBs %" PRId64 " (aborted=%" PRId64 " %0.1f%%)\n",
- dyngen_tb_count,
- dyngen_tb_count1 - dyngen_tb_count,
- dyngen_tb_count1 ? (double)(dyngen_tb_count1 - dyngen_tb_count) / dyngen_tb_count1 * 100.0 : 0);
- cpu_fprintf(f, "avg ops/TB %0.1f max=%d\n",
- dyngen_tb_count ? (double)dyngen_op_count / dyngen_tb_count : 0, dyngen_op_count_max);
- cpu_fprintf(f, "old ops/total ops %0.1f%%\n",
- dyngen_op_count ? (double)dyngen_old_op_count / dyngen_op_count * 100.0 : 0);
- cpu_fprintf(f, "deleted ops/TB %0.2f\n",
- dyngen_tb_count ?
- (double)dyngen_tcg_del_op_count / dyngen_tb_count : 0);
- cpu_fprintf(f, "cycles/op %0.1f\n",
- dyngen_op_count ? (double)tot / dyngen_op_count : 0);
- cpu_fprintf(f, "cycles/in byte %0.1f\n",
- dyngen_code_in_len ? (double)tot / dyngen_code_in_len : 0);
- cpu_fprintf(f, "cycles/out byte %0.1f\n",
- dyngen_code_out_len ? (double)tot / dyngen_code_out_len : 0);
- if (tot == 0)
- tot = 1;
- cpu_fprintf(f, " gen_interm time %0.1f%%\n",
- (double)dyngen_interm_time / tot * 100.0);
- cpu_fprintf(f, " gen_code time %0.1f%%\n",
- (double)dyngen_code_time / tot * 100.0);
- cpu_fprintf(f, "cpu_restore count %" PRId64 "\n",
- dyngen_restore_count);
- cpu_fprintf(f, " avg cycles %0.1f\n",
- dyngen_restore_count ? (double)dyngen_restore_time / dyngen_restore_count : 0);
- {
- extern void dump_op_count(void);
- dump_op_count();
- }
- }
-#endif
+ tcg_dump_info(f, cpu_fprintf);
}
#if !defined(CONFIG_USER_ONLY)