#include "kvm.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
+#include <signal.h>
#endif
//#define DEBUG_TB_INVALIDATE
/* XXX: for system emulation, it could just be an array */
static PageDesc *l1_map[L1_SIZE];
-static PhysPageDesc **l1_phys_map;
#if !defined(CONFIG_USER_ONLY)
+static PhysPageDesc **l1_phys_map;
+
static void io_mem_init(void);
/* io memory support */
#endif
/* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
static const char *logfilename = "/tmp/qemu.log";
+#endif
FILE *logfile;
int loglevel;
static int log_append = 0;
static int tb_flush_count;
static int tb_phys_invalidate_count;
-#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
-typedef struct subpage_t {
- target_phys_addr_t base;
- CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
- CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
- void *opaque[TARGET_PAGE_SIZE][2][4];
- ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
-} subpage_t;
-
#ifdef _WIN32
static void map_exec(void *addr, long size)
{
while ((1 << qemu_host_page_bits) < qemu_host_page_size)
qemu_host_page_bits++;
qemu_host_page_mask = ~(qemu_host_page_size - 1);
+#if !defined(CONFIG_USER_ONLY)
l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
+#endif
#if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
{
return p + (index & (L2_SIZE - 1));
}
+#if !defined(CONFIG_USER_ONLY)
static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
{
void **lp, **p;
return phys_page_find_alloc(index, 0);
}
-#if !defined(CONFIG_USER_ONLY)
static void tlb_protect_code(ram_addr_t ram_addr);
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr);
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
-static void cpu_common_pre_save(const void *opaque)
+static void cpu_common_pre_save(void *opaque)
{
- CPUState *env = (void *)opaque;
+ CPUState *env = opaque;
cpu_synchronize_state(env);
}
return 0;
}
-static int cpu_common_post_load(void *opaque)
+static int cpu_common_post_load(void *opaque, int version_id)
{
CPUState *env = opaque;
}
env->cpu_index = cpu_index;
env->numa_node = 0;
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&env->breakpoints);
+ QTAILQ_INIT(&env->watchpoints);
*penv = env;
#if defined(CONFIG_USER_ONLY)
cpu_list_unlock();
}
#if defined(TARGET_HAS_ICE)
+#if defined(CONFIG_USER_ONLY)
+static void breakpoint_invalidate(CPUState *env, target_ulong pc)
+{
+ tb_invalidate_phys_page_range(pc, pc + 1, 0);
+}
+#else
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
target_phys_addr_t addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
}
#endif
+#endif /* TARGET_HAS_ICE */
/* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
/* keep all GDB-injected watchpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+ QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
else
- TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+ QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
tlb_flush_page(env, addr);
target_ulong len_mask = ~(len - 1);
CPUWatchpoint *wp;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (addr == wp->vaddr && len_mask == wp->len_mask
&& flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
cpu_watchpoint_remove_by_ref(env, wp);
/* Remove a specific watchpoint by reference. */
void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
{
- TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+ QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
tlb_flush_page(env, watchpoint->vaddr);
{
CPUWatchpoint *wp, *next;
- TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+ QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
if (wp->flags & mask)
cpu_watchpoint_remove_by_ref(env, wp);
}
/* keep all GDB-injected breakpoints in front */
if (flags & BP_GDB)
- TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
else
- TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+ QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
breakpoint_invalidate(env, pc);
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp;
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
if (bp->pc == pc && bp->flags == flags) {
cpu_breakpoint_remove_by_ref(env, bp);
return 0;
void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
{
#if defined(TARGET_HAS_ICE)
- TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
+ QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
breakpoint_invalidate(env, breakpoint->pc);
#if defined(TARGET_HAS_ICE)
CPUBreakpoint *bp, *next;
- TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+ QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
if (bp->flags & mask)
cpu_breakpoint_remove_by_ref(env, bp);
}
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(CONFIG_USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
-#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock(&interrupt_lock);
tb = env->current_tb;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
+ if (tb) {
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
}
-#endif
+ spin_unlock(&interrupt_lock);
}
/* mask must never be zero, except for A20 change call */
{ 0, NULL, NULL },
};
+#ifndef CONFIG_USER_ONLY
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
+ = QLIST_HEAD_INITIALIZER(memory_client_list);
+
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ client->set_memory(client, start_addr, size, phys_offset);
+ }
+}
+
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
+ target_phys_addr_t end)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->sync_dirty_bitmap(client, start, end);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static int cpu_notify_migration_log(int enable)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->migration_log(client, enable);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
+ CPUPhysMemoryClient *client)
+{
+ PhysPageDesc *pd;
+ int l1, l2;
+
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ pd = phys_map[l1];
+ if (!pd) {
+ continue;
+ }
+ for (l2 = 0; l2 < L2_SIZE; ++l2) {
+ if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
+ continue;
+ }
+ client->set_memory(client, pd[l2].region_offset,
+ TARGET_PAGE_SIZE, pd[l2].phys_offset);
+ }
+ }
+}
+
+static void phys_page_for_each(CPUPhysMemoryClient *client)
+{
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+ void **phys_map = (void **)l1_phys_map;
+ int l1;
+ if (!l1_phys_map) {
+ return;
+ }
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ if (phys_map[l1]) {
+ phys_page_for_each_in_l1_map(phys_map[l1], client);
+ }
+ }
+#else
+ if (!l1_phys_map) {
+ return;
+ }
+ phys_page_for_each_in_l1_map(l1_phys_map, client);
+#endif
+}
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_INSERT_HEAD(&memory_client_list, client, list);
+ phys_page_for_each(client);
+}
+
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_REMOVE(client, list);
+}
+#endif
+
static int cmp1(const char *s1, int n, const char *s2)
{
if (strlen(s2) != n)
}
va_end(ap2);
va_end(ap);
+#if defined(CONFIG_USER_ONLY)
+ {
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &act, NULL);
+ }
+#endif
abort();
}
/* Clone all break/watchpoints.
Note: Once we support ptrace with hw-debug register access, make sure
BP_CPU break/watchpoints are handled correctly on clone. */
- TAILQ_INIT(&env->breakpoints);
- TAILQ_INIT(&env->watchpoints);
+ QTAILQ_INIT(&env->breakpoints);
+ QTAILQ_INIT(&env->watchpoints);
#if defined(TARGET_HAS_ICE)
- TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+ QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
}
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
wp->flags, NULL);
}
int cpu_physical_memory_set_dirty_tracking(int enable)
{
+ int ret = 0;
in_migration = enable;
- if (kvm_enabled()) {
- return kvm_set_migration_log(enable);
- }
- return 0;
+ ret = cpu_notify_migration_log(!!enable);
+ return ret;
}
int cpu_physical_memory_get_dirty_tracking(void)
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{
- int ret = 0;
+ int ret;
- if (kvm_enabled())
- ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+ ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
return ret;
}
code_address = address;
/* Make accesses to pages with watchpoints go via the
watchpoint trap routines. */
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
iotlb = io_mem_watch + paddr;
/* TODO: The memory case can be optimized by not trapping
#if !defined(CONFIG_USER_ONLY)
+#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
+typedef struct subpage_t {
+ target_phys_addr_t base;
+ CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
+ CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
+ void *opaque[TARGET_PAGE_SIZE][2][4];
+ ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
+} subpage_t;
+
static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
ram_addr_t memory, ram_addr_t region_offset);
static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
} \
} while (0)
-/* register physical memory. 'size' must be a multiple of the target
- page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+/* register physical memory.
+ For RAM, 'size' must be a multiple of the target page size.
+ If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page. The address used when calling the IO function is
the offset from the start of the region, plus region_offset. Both
start_addr and region_offset are rounded down to a page boundary
ram_addr_t orig_size = size;
void *subpage;
- if (kvm_enabled())
- kvm_set_phys_mem(start_addr, size, phys_offset);
+ cpu_notify_set_memory(start_addr, size, phys_offset);
if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset = start_addr;
kvm_uncoalesce_mmio_region(addr, size);
}
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_enabled())
+ kvm_flush_coalesced_mmio_buffer();
+}
+
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
new_block->host = qemu_vmalloc(size);
+#endif
+#ifdef MADV_MERGEABLE
+ madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
new_block->offset = last_ram_offset;
new_block->length = size;
ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
uint8_t *host = ptr;
prev = NULL;
- prevp = &ram_blocks;
block = ram_blocks;
while (block && (block->host > host
|| block->host + block->length <= host)) {
- if (prev)
- prevp = &prev->next;
prev = block;
block = block->next;
}
return;
}
vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
- TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+ QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
if ((vaddr == (wp->vaddr & len_mask) ||
(vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
wp->flags |= BP_WATCHPOINT_HIT;
io_mem_used[i] = 1;
return i;
}
-
+ fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
return -1;
}
/* physical memory access (slow version, mainly for debug) */
#if defined(CONFIG_USER_ONLY)
-void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
- int len, int is_write)
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+ uint8_t *buf, int len, int is_write)
{
int l, flags;
target_ulong page;
l = len;
flags = page_get_flags(page);
if (!(flags & PAGE_VALID))
- return;
+ return -1;
if (is_write) {
if (!(flags & PAGE_WRITE))
- return;
+ return -1;
/* XXX: this code should not depend on lock_user */
if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
- /* FIXME - should this return an error rather than just fail? */
- return;
+ return -1;
memcpy(p, buf, l);
unlock_user(p, addr, l);
} else {
if (!(flags & PAGE_READ))
- return;
+ return -1;
/* XXX: this code should not depend on lock_user */
if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
- /* FIXME - should this return an error rather than just fail? */
- return;
+ return -1;
memcpy(buf, p, l);
unlock_user(p, addr, 0);
}
buf += l;
addr += l;
}
+ return 0;
}
#else
typedef struct MapClient {
void *opaque;
void (*callback)(void *opaque);
- LIST_ENTRY(MapClient) link;
+ QLIST_ENTRY(MapClient) link;
} MapClient;
-static LIST_HEAD(map_client_list, MapClient) map_client_list
- = LIST_HEAD_INITIALIZER(map_client_list);
+static QLIST_HEAD(map_client_list, MapClient) map_client_list
+ = QLIST_HEAD_INITIALIZER(map_client_list);
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
{
client->opaque = opaque;
client->callback = callback;
- LIST_INSERT_HEAD(&map_client_list, client, link);
+ QLIST_INSERT_HEAD(&map_client_list, client, link);
return client;
}
{
MapClient *client = (MapClient *)_client;
- LIST_REMOVE(client, link);
+ QLIST_REMOVE(client, link);
qemu_free(client);
}
{
MapClient *client;
- while (!LIST_EMPTY(&map_client_list)) {
- client = LIST_FIRST(&map_client_list);
+ while (!QLIST_EMPTY(&map_client_list)) {
+ client = QLIST_FIRST(&map_client_list);
client->callback(client->opaque);
cpu_unregister_map_client(client);
}
if (is_write) {
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
}
- qemu_free(bounce.buffer);
+ qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
cpu_notify_map_clients();
}
cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
}
-#endif
-
/* virtual memory access for debug (includes writing to ROM) */
int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)
if (l > len)
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
-#if !defined(CONFIG_USER_ONLY)
if (is_write)
cpu_physical_memory_write_rom(phys_addr, buf, l);
else
-#endif
cpu_physical_memory_rw(phys_addr, buf, l, is_write);
len -= l;
buf += l;
}
return 0;
}
+#endif
/* in deterministic execution mode, instructions doing device I/Os
must be at the end of the TB */