#include "kvm.h"
#if defined(CONFIG_USER_ONLY)
#include <qemu.h>
+#include <signal.h>
#endif
//#define DEBUG_TB_INVALIDATE
#endif
/* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
static const char *logfilename = "/tmp/qemu.log";
+#endif
FILE *logfile;
int loglevel;
static int log_append = 0;
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
{
int flags;
void *addr = NULL;
#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
-static void cpu_common_pre_save(const void *opaque)
+static void cpu_common_pre_save(void *opaque)
{
- CPUState *env = (void *)opaque;
+ CPUState *env = opaque;
cpu_synchronize_state(env);
}
}
#if defined(TARGET_HAS_ICE)
+#if defined(CONFIG_USER_ONLY)
+static void breakpoint_invalidate(CPUState *env, target_ulong pc)
+{
+ tb_invalidate_phys_page_range(pc, pc + 1, 0);
+}
+#else
static void breakpoint_invalidate(CPUState *env, target_ulong pc)
{
target_phys_addr_t addr;
tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
}
#endif
+#endif /* TARGET_HAS_ICE */
/* Add a watchpoint. */
int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(CONFIG_USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
signals are used primarily to interrupt blocking syscalls. */
-#else
TranslationBlock *tb;
static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
+ spin_lock(&interrupt_lock);
tb = env->current_tb;
/* if the cpu is currently executing code, we must unlink it and
all the potentially executing TB */
- if (tb && !testandset(&interrupt_lock)) {
+ if (tb) {
env->current_tb = NULL;
tb_reset_jump_recursive(tb);
- resetlock(&interrupt_lock);
}
-#endif
+ spin_unlock(&interrupt_lock);
}
/* mask must never be zero, except for A20 change call */
{ 0, NULL, NULL },
};
+#ifndef CONFIG_USER_ONLY
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
+ = QLIST_HEAD_INITIALIZER(memory_client_list);
+
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ client->set_memory(client, start_addr, size, phys_offset);
+ }
+}
+
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
+ target_phys_addr_t end)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->sync_dirty_bitmap(client, start, end);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static int cpu_notify_migration_log(int enable)
+{
+ CPUPhysMemoryClient *client;
+ QLIST_FOREACH(client, &memory_client_list, list) {
+ int r = client->migration_log(client, enable);
+ if (r < 0)
+ return r;
+ }
+ return 0;
+}
+
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
+ CPUPhysMemoryClient *client)
+{
+ PhysPageDesc *pd;
+ int l1, l2;
+
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ pd = phys_map[l1];
+ if (!pd) {
+ continue;
+ }
+ for (l2 = 0; l2 < L2_SIZE; ++l2) {
+ if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
+ continue;
+ }
+ client->set_memory(client, pd[l2].region_offset,
+ TARGET_PAGE_SIZE, pd[l2].phys_offset);
+ }
+ }
+}
+
+static void phys_page_for_each(CPUPhysMemoryClient *client)
+{
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+ void **phys_map = (void **)l1_phys_map;
+ int l1;
+ if (!l1_phys_map) {
+ return;
+ }
+ for (l1 = 0; l1 < L1_SIZE; ++l1) {
+ if (phys_map[l1]) {
+ phys_page_for_each_in_l1_map(phys_map[l1], client);
+ }
+ }
+#else
+ if (!l1_phys_map) {
+ return;
+ }
+ phys_page_for_each_in_l1_map(l1_phys_map, client);
+#endif
+}
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_INSERT_HEAD(&memory_client_list, client, list);
+ phys_page_for_each(client);
+}
+
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
+{
+ QLIST_REMOVE(client, list);
+}
+#endif
+
static int cmp1(const char *s1, int n, const char *s2)
{
if (strlen(s2) != n)
}
va_end(ap2);
va_end(ap);
+#if defined(CONFIG_USER_ONLY)
+ {
+ struct sigaction act;
+ sigfillset(&act.sa_mask);
+ act.sa_handler = SIG_DFL;
+ sigaction(SIGABRT, &act, NULL);
+ }
+#endif
abort();
}
int cpu_physical_memory_set_dirty_tracking(int enable)
{
+ int ret = 0;
in_migration = enable;
- if (kvm_enabled()) {
- return kvm_set_migration_log(enable);
- }
- return 0;
+ ret = cpu_notify_migration_log(!!enable);
+ return ret;
}
int cpu_physical_memory_get_dirty_tracking(void)
int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
target_phys_addr_t end_addr)
{
- int ret = 0;
+ int ret;
- if (kvm_enabled())
- ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+ ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
return ret;
}
} \
} while (0)
-/* register physical memory. 'size' must be a multiple of the target
- page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+/* register physical memory.
+ For RAM, 'size' must be a multiple of the target page size.
+ If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
io memory page. The address used when calling the IO function is
the offset from the start of the region, plus region_offset. Both
start_addr and region_offset are rounded down to a page boundary
ram_addr_t orig_size = size;
void *subpage;
- if (kvm_enabled())
- kvm_set_phys_mem(start_addr, size, phys_offset);
+ cpu_notify_set_memory(start_addr, size, phys_offset);
if (phys_offset == IO_MEM_UNASSIGNED) {
region_offset = start_addr;
kvm_uncoalesce_mmio_region(addr, size);
}
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+ if (kvm_enabled())
+ kvm_flush_coalesced_mmio_buffer();
+}
+
ram_addr_t qemu_ram_alloc(ram_addr_t size)
{
RAMBlock *new_block;
size = TARGET_PAGE_ALIGN(size);
new_block = qemu_malloc(sizeof(*new_block));
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
new_block->host = qemu_vmalloc(size);
+#endif
+#ifdef MADV_MERGEABLE
+ madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
new_block->offset = last_ram_offset;
new_block->length = size;
ram_addr_t qemu_ram_addr_from_host(void *ptr)
{
RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
uint8_t *host = ptr;
prev = NULL;
- prevp = &ram_blocks;
block = ram_blocks;
while (block && (block->host > host
|| block->host + block->length <= host)) {
- if (prev)
- prevp = &prev->next;
prev = block;
block = block->next;
}
io_mem_used[i] = 1;
return i;
}
-
+ fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
return -1;
}
if (is_write) {
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
}
- qemu_free(bounce.buffer);
+ qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
cpu_notify_map_clients();
}