]> git.proxmox.com Git - mirror_qemu.git/blobdiff - exec.c
Move subpage definitions
[mirror_qemu.git] / exec.c
diff --git a/exec.c b/exec.c
index 6a62a8a054487f1c0ed73c07acb4463c4efff8e6..3276a0874faeebe6521ad7dd157760411025e4b7 100644 (file)
--- a/exec.c
+++ b/exec.c
@@ -40,6 +40,7 @@
 #include "kvm.h"
 #if defined(CONFIG_USER_ONLY)
 #include <qemu.h>
+#include <signal.h>
 #endif
 
 //#define DEBUG_TB_INVALIDATE
@@ -178,9 +179,10 @@ unsigned long qemu_host_page_mask;
 
 /* XXX: for system emulation, it could just be an array */
 static PageDesc *l1_map[L1_SIZE];
-static PhysPageDesc **l1_phys_map;
 
 #if !defined(CONFIG_USER_ONLY)
+static PhysPageDesc **l1_phys_map;
+
 static void io_mem_init(void);
 
 /* io memory support */
@@ -192,7 +194,11 @@ static int io_mem_watch;
 #endif
 
 /* log support */
+#ifdef WIN32
+static const char *logfilename = "qemu.log";
+#else
 static const char *logfilename = "/tmp/qemu.log";
+#endif
 FILE *logfile;
 int loglevel;
 static int log_append = 0;
@@ -202,15 +208,6 @@ static int tlb_flush_count;
 static int tb_flush_count;
 static int tb_phys_invalidate_count;
 
-#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
-typedef struct subpage_t {
-    target_phys_addr_t base;
-    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
-    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
-    void *opaque[TARGET_PAGE_SIZE][2][4];
-    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
-} subpage_t;
-
 #ifdef _WIN32
 static void map_exec(void *addr, long size)
 {
@@ -259,8 +256,10 @@ static void page_init(void)
     while ((1 << qemu_host_page_bits) < qemu_host_page_size)
         qemu_host_page_bits++;
     qemu_host_page_mask = ~(qemu_host_page_size - 1);
+#if !defined(CONFIG_USER_ONLY)
     l1_phys_map = qemu_vmalloc(L1_SIZE * sizeof(void *));
     memset(l1_phys_map, 0, L1_SIZE * sizeof(void *));
+#endif
 
 #if !defined(_WIN32) && defined(CONFIG_USER_ONLY)
     {
@@ -346,6 +345,7 @@ static inline PageDesc *page_find(target_ulong index)
     return p + (index & (L2_SIZE - 1));
 }
 
+#if !defined(CONFIG_USER_ONLY)
 static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
 {
     void **lp, **p;
@@ -390,7 +390,6 @@ static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
     return phys_page_find_alloc(index, 0);
 }
 
-#if !defined(CONFIG_USER_ONLY)
 static void tlb_protect_code(ram_addr_t ram_addr);
 static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
                                     target_ulong vaddr);
@@ -463,7 +462,7 @@ static void code_gen_alloc(unsigned long tb_size)
             exit(1);
         }
     }
-#elif defined(__FreeBSD__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
     {
         int flags;
         void *addr = NULL;
@@ -513,9 +512,9 @@ void cpu_exec_init_all(unsigned long tb_size)
 
 #if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
 
-static void cpu_common_pre_save(const void *opaque)
+static void cpu_common_pre_save(void *opaque)
 {
-    CPUState *env = (void *)opaque;
+    CPUState *env = opaque;
 
     cpu_synchronize_state(env);
 }
@@ -528,7 +527,7 @@ static int cpu_common_pre_load(void *opaque)
     return 0;
 }
 
-static int cpu_common_post_load(void *opaque)
+static int cpu_common_post_load(void *opaque, int version_id)
 {
     CPUState *env = opaque;
 
@@ -586,8 +585,8 @@ void cpu_exec_init(CPUState *env)
     }
     env->cpu_index = cpu_index;
     env->numa_node = 0;
-    TAILQ_INIT(&env->breakpoints);
-    TAILQ_INIT(&env->watchpoints);
+    QTAILQ_INIT(&env->breakpoints);
+    QTAILQ_INIT(&env->watchpoints);
     *penv = env;
 #if defined(CONFIG_USER_ONLY)
     cpu_list_unlock();
@@ -1308,6 +1307,12 @@ static void tb_reset_jump_recursive(TranslationBlock *tb)
 }
 
 #if defined(TARGET_HAS_ICE)
+#if defined(CONFIG_USER_ONLY)
+static void breakpoint_invalidate(CPUState *env, target_ulong pc)
+{
+    tb_invalidate_phys_page_range(pc, pc + 1, 0);
+}
+#else
 static void breakpoint_invalidate(CPUState *env, target_ulong pc)
 {
     target_phys_addr_t addr;
@@ -1326,6 +1331,7 @@ static void breakpoint_invalidate(CPUState *env, target_ulong pc)
     tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
 }
 #endif
+#endif /* TARGET_HAS_ICE */
 
 /* Add a watchpoint.  */
 int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
@@ -1348,9 +1354,9 @@ int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
 
     /* keep all GDB-injected watchpoints in front */
     if (flags & BP_GDB)
-        TAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
+        QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
     else
-        TAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
+        QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
 
     tlb_flush_page(env, addr);
 
@@ -1366,7 +1372,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
     target_ulong len_mask = ~(len - 1);
     CPUWatchpoint *wp;
 
-    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
         if (addr == wp->vaddr && len_mask == wp->len_mask
                 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
             cpu_watchpoint_remove_by_ref(env, wp);
@@ -1379,7 +1385,7 @@ int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
 /* Remove a specific watchpoint by reference.  */
 void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
 {
-    TAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
+    QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
 
     tlb_flush_page(env, watchpoint->vaddr);
 
@@ -1391,7 +1397,7 @@ void cpu_watchpoint_remove_all(CPUState *env, int mask)
 {
     CPUWatchpoint *wp, *next;
 
-    TAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
+    QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
         if (wp->flags & mask)
             cpu_watchpoint_remove_by_ref(env, wp);
     }
@@ -1411,9 +1417,9 @@ int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
 
     /* keep all GDB-injected breakpoints in front */
     if (flags & BP_GDB)
-        TAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
+        QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
     else
-        TAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
+        QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
 
     breakpoint_invalidate(env, pc);
 
@@ -1431,7 +1437,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
 #if defined(TARGET_HAS_ICE)
     CPUBreakpoint *bp;
 
-    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
         if (bp->pc == pc && bp->flags == flags) {
             cpu_breakpoint_remove_by_ref(env, bp);
             return 0;
@@ -1447,7 +1453,7 @@ int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
 void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
 {
 #if defined(TARGET_HAS_ICE)
-    TAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
+    QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
 
     breakpoint_invalidate(env, breakpoint->pc);
 
@@ -1461,7 +1467,7 @@ void cpu_breakpoint_remove_all(CPUState *env, int mask)
 #if defined(TARGET_HAS_ICE)
     CPUBreakpoint *bp, *next;
 
-    TAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
+    QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
         if (bp->flags & mask)
             cpu_breakpoint_remove_by_ref(env, bp);
     }
@@ -1526,24 +1532,22 @@ void cpu_set_log_filename(const char *filename)
 
 static void cpu_unlink_tb(CPUState *env)
 {
-#if defined(CONFIG_USE_NPTL)
     /* FIXME: TB unchaining isn't SMP safe.  For now just ignore the
        problem and hope the cpu will stop of its own accord.  For userspace
        emulation this often isn't actually as bad as it sounds.  Often
        signals are used primarily to interrupt blocking syscalls.  */
-#else
     TranslationBlock *tb;
     static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
 
+    spin_lock(&interrupt_lock);
     tb = env->current_tb;
     /* if the cpu is currently executing code, we must unlink it and
        all the potentially executing TB */
-    if (tb && !testandset(&interrupt_lock)) {
+    if (tb) {
         env->current_tb = NULL;
         tb_reset_jump_recursive(tb);
-        resetlock(&interrupt_lock);
     }
-#endif
+    spin_unlock(&interrupt_lock);
 }
 
 /* mask must never be zero, except for A20 change call */
@@ -1621,6 +1625,101 @@ const CPULogItem cpu_log_items[] = {
     { 0, NULL, NULL },
 };
 
+#ifndef CONFIG_USER_ONLY
+static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
+    = QLIST_HEAD_INITIALIZER(memory_client_list);
+
+static void cpu_notify_set_memory(target_phys_addr_t start_addr,
+                                 ram_addr_t size,
+                                 ram_addr_t phys_offset)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        client->set_memory(client, start_addr, size, phys_offset);
+    }
+}
+
+static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
+                                       target_phys_addr_t end)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        int r = client->sync_dirty_bitmap(client, start, end);
+        if (r < 0)
+            return r;
+    }
+    return 0;
+}
+
+static int cpu_notify_migration_log(int enable)
+{
+    CPUPhysMemoryClient *client;
+    QLIST_FOREACH(client, &memory_client_list, list) {
+        int r = client->migration_log(client, enable);
+        if (r < 0)
+            return r;
+    }
+    return 0;
+}
+
+static void phys_page_for_each_in_l1_map(PhysPageDesc **phys_map,
+                                         CPUPhysMemoryClient *client)
+{
+    PhysPageDesc *pd;
+    int l1, l2;
+
+    for (l1 = 0; l1 < L1_SIZE; ++l1) {
+        pd = phys_map[l1];
+        if (!pd) {
+            continue;
+        }
+        for (l2 = 0; l2 < L2_SIZE; ++l2) {
+            if (pd[l2].phys_offset == IO_MEM_UNASSIGNED) {
+                continue;
+            }
+            client->set_memory(client, pd[l2].region_offset,
+                               TARGET_PAGE_SIZE, pd[l2].phys_offset);
+        }
+    }
+}
+
+static void phys_page_for_each(CPUPhysMemoryClient *client)
+{
+#if TARGET_PHYS_ADDR_SPACE_BITS > 32
+
+#if TARGET_PHYS_ADDR_SPACE_BITS > (32 + L1_BITS)
+#error unsupported TARGET_PHYS_ADDR_SPACE_BITS
+#endif
+    void **phys_map = (void **)l1_phys_map;
+    int l1;
+    if (!l1_phys_map) {
+        return;
+    }
+    for (l1 = 0; l1 < L1_SIZE; ++l1) {
+        if (phys_map[l1]) {
+            phys_page_for_each_in_l1_map(phys_map[l1], client);
+        }
+    }
+#else
+    if (!l1_phys_map) {
+        return;
+    }
+    phys_page_for_each_in_l1_map(l1_phys_map, client);
+#endif
+}
+
+void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
+{
+    QLIST_INSERT_HEAD(&memory_client_list, client, list);
+    phys_page_for_each(client);
+}
+
+void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
+{
+    QLIST_REMOVE(client, list);
+}
+#endif
+
 static int cmp1(const char *s1, int n, const char *s2)
 {
     if (strlen(s2) != n)
@@ -1690,6 +1789,14 @@ void cpu_abort(CPUState *env, const char *fmt, ...)
     }
     va_end(ap2);
     va_end(ap);
+#if defined(CONFIG_USER_ONLY)
+    {
+        struct sigaction act;
+        sigfillset(&act.sa_mask);
+        act.sa_handler = SIG_DFL;
+        sigaction(SIGABRT, &act, NULL);
+    }
+#endif
     abort();
 }
 
@@ -1712,13 +1819,13 @@ CPUState *cpu_copy(CPUState *env)
     /* Clone all break/watchpoints.
        Note: Once we support ptrace with hw-debug register access, make sure
        BP_CPU break/watchpoints are handled correctly on clone. */
-    TAILQ_INIT(&env->breakpoints);
-    TAILQ_INIT(&env->watchpoints);
+    QTAILQ_INIT(&env->breakpoints);
+    QTAILQ_INIT(&env->watchpoints);
 #if defined(TARGET_HAS_ICE)
-    TAILQ_FOREACH(bp, &env->breakpoints, entry) {
+    QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
         cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
     }
-    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
         cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
                               wp->flags, NULL);
     }
@@ -1880,11 +1987,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
 
 int cpu_physical_memory_set_dirty_tracking(int enable)
 {
+    int ret = 0;
     in_migration = enable;
-    if (kvm_enabled()) {
-        return kvm_set_migration_log(enable);
-    }
-    return 0;
+    ret = cpu_notify_migration_log(!!enable);
+    return ret;
 }
 
 int cpu_physical_memory_get_dirty_tracking(void)
@@ -1895,10 +2001,9 @@ int cpu_physical_memory_get_dirty_tracking(void)
 int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
                                    target_phys_addr_t end_addr)
 {
-    int ret = 0;
+    int ret;
 
-    if (kvm_enabled())
-        ret = kvm_physical_sync_dirty_bitmap(start_addr, end_addr);
+    ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
     return ret;
 }
 
@@ -2009,7 +2114,7 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
     code_address = address;
     /* Make accesses to pages with watchpoints go via the
        watchpoint trap routines.  */
-    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
         if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
             iotlb = io_mem_watch + paddr;
             /* TODO: The memory case can be optimized by not trapping
@@ -2267,6 +2372,15 @@ static inline void tlb_set_dirty(CPUState *env,
 
 #if !defined(CONFIG_USER_ONLY)
 
+#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
+typedef struct subpage_t {
+    target_phys_addr_t base;
+    CPUReadMemoryFunc * const *mem_read[TARGET_PAGE_SIZE][4];
+    CPUWriteMemoryFunc * const *mem_write[TARGET_PAGE_SIZE][4];
+    void *opaque[TARGET_PAGE_SIZE][2][4];
+    ram_addr_t region_offset[TARGET_PAGE_SIZE][2][4];
+} subpage_t;
+
 static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
                              ram_addr_t memory, ram_addr_t region_offset);
 static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
@@ -2291,8 +2405,9 @@ static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
         }                                                               \
     } while (0)
 
-/* register physical memory. 'size' must be a multiple of the target
-   page size. If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
+/* register physical memory.
+   For RAM, 'size' must be a multiple of the target page size.
+   If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
    io memory page.  The address used when calling the IO function is
    the offset from the start of the region, plus region_offset.  Both
    start_addr and region_offset are rounded down to a page boundary
@@ -2309,8 +2424,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
     ram_addr_t orig_size = size;
     void *subpage;
 
-    if (kvm_enabled())
-        kvm_set_phys_mem(start_addr, size, phys_offset);
+    cpu_notify_set_memory(start_addr, size, phys_offset);
 
     if (phys_offset == IO_MEM_UNASSIGNED) {
         region_offset = start_addr;
@@ -2403,6 +2517,12 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
         kvm_uncoalesce_mmio_region(addr, size);
 }
 
+void qemu_flush_coalesced_mmio_buffer(void)
+{
+    if (kvm_enabled())
+        kvm_flush_coalesced_mmio_buffer();
+}
+
 ram_addr_t qemu_ram_alloc(ram_addr_t size)
 {
     RAMBlock *new_block;
@@ -2410,7 +2530,16 @@ ram_addr_t qemu_ram_alloc(ram_addr_t size)
     size = TARGET_PAGE_ALIGN(size);
     new_block = qemu_malloc(sizeof(*new_block));
 
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+    /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+    new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
+                           MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+#else
     new_block->host = qemu_vmalloc(size);
+#endif
+#ifdef MADV_MERGEABLE
+    madvise(new_block->host, size, MADV_MERGEABLE);
+#endif
     new_block->offset = last_ram_offset;
     new_block->length = size;
 
@@ -2477,17 +2606,13 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
 ram_addr_t qemu_ram_addr_from_host(void *ptr)
 {
     RAMBlock *prev;
-    RAMBlock **prevp;
     RAMBlock *block;
     uint8_t *host = ptr;
 
     prev = NULL;
-    prevp = &ram_blocks;
     block = ram_blocks;
     while (block && (block->host > host
                      || block->host + block->length <= host)) {
-        if (prev)
-          prevp = &prev->next;
         prev = block;
         block = block->next;
     }
@@ -2663,7 +2788,7 @@ static void check_watchpoint(int offset, int len_mask, int flags)
         return;
     }
     vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
-    TAILQ_FOREACH(wp, &env->watchpoints, entry) {
+    QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
         if ((vaddr == (wp->vaddr & len_mask) ||
              (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
             wp->flags |= BP_WATCHPOINT_HIT;
@@ -2905,7 +3030,7 @@ static int get_free_io_mem_idx(void)
             io_mem_used[i] = 1;
             return i;
         }
-
+    fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
     return -1;
 }
 
@@ -2981,8 +3106,8 @@ static void io_mem_init(void)
 
 /* physical memory access (slow version, mainly for debug) */
 #if defined(CONFIG_USER_ONLY)
-void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
-                            int len, int is_write)
+int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
+                        uint8_t *buf, int len, int is_write)
 {
     int l, flags;
     target_ulong page;
@@ -2995,23 +3120,21 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
             l = len;
         flags = page_get_flags(page);
         if (!(flags & PAGE_VALID))
-            return;
+            return -1;
         if (is_write) {
             if (!(flags & PAGE_WRITE))
-                return;
+                return -1;
             /* XXX: this code should not depend on lock_user */
             if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
-                /* FIXME - should this return an error rather than just fail? */
-                return;
+                return -1;
             memcpy(p, buf, l);
             unlock_user(p, addr, l);
         } else {
             if (!(flags & PAGE_READ))
-                return;
+                return -1;
             /* XXX: this code should not depend on lock_user */
             if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
-                /* FIXME - should this return an error rather than just fail? */
-                return;
+                return -1;
             memcpy(buf, p, l);
             unlock_user(p, addr, 0);
         }
@@ -3019,6 +3142,7 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
         buf += l;
         addr += l;
     }
+    return 0;
 }
 
 #else
@@ -3169,11 +3293,11 @@ static BounceBuffer bounce;
 typedef struct MapClient {
     void *opaque;
     void (*callback)(void *opaque);
-    LIST_ENTRY(MapClient) link;
+    QLIST_ENTRY(MapClient) link;
 } MapClient;
 
-static LIST_HEAD(map_client_list, MapClient) map_client_list
-    = LIST_HEAD_INITIALIZER(map_client_list);
+static QLIST_HEAD(map_client_list, MapClient) map_client_list
+    = QLIST_HEAD_INITIALIZER(map_client_list);
 
 void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
 {
@@ -3181,7 +3305,7 @@ void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
 
     client->opaque = opaque;
     client->callback = callback;
-    LIST_INSERT_HEAD(&map_client_list, client, link);
+    QLIST_INSERT_HEAD(&map_client_list, client, link);
     return client;
 }
 
@@ -3189,7 +3313,7 @@ void cpu_unregister_map_client(void *_client)
 {
     MapClient *client = (MapClient *)_client;
 
-    LIST_REMOVE(client, link);
+    QLIST_REMOVE(client, link);
     qemu_free(client);
 }
 
@@ -3197,8 +3321,8 @@ static void cpu_notify_map_clients(void)
 {
     MapClient *client;
 
-    while (!LIST_EMPTY(&map_client_list)) {
-        client = LIST_FIRST(&map_client_list);
+    while (!QLIST_EMPTY(&map_client_list)) {
+        client = QLIST_FIRST(&map_client_list);
         client->callback(client->opaque);
         cpu_unregister_map_client(client);
     }
@@ -3297,7 +3421,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
     if (is_write) {
         cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
     }
-    qemu_free(bounce.buffer);
+    qemu_vfree(bounce.buffer);
     bounce.buffer = NULL;
     cpu_notify_map_clients();
 }
@@ -3516,8 +3640,6 @@ void stq_phys(target_phys_addr_t addr, uint64_t val)
     cpu_physical_memory_write(addr, (const uint8_t *)&val, 8);
 }
 
-#endif
-
 /* virtual memory access for debug (includes writing to ROM) */
 int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
                         uint8_t *buf, int len, int is_write)
@@ -3536,11 +3658,9 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
         if (l > len)
             l = len;
         phys_addr += (addr & ~TARGET_PAGE_MASK);
-#if !defined(CONFIG_USER_ONLY)
         if (is_write)
             cpu_physical_memory_write_rom(phys_addr, buf, l);
         else
-#endif
             cpu_physical_memory_rw(phys_addr, buf, l, is_write);
         len -= l;
         buf += l;
@@ -3548,6 +3668,7 @@ int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
     }
     return 0;
 }
+#endif
 
 /* in deterministic execution mode, instructions doing device I/Os
    must be at the end of the TB */