X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=kvm-all.c;h=a3fb8de2688410d38654ebe120ada51aa019f436;hb=9a305c8fc7bb567e9f754ddfe5fb102aaeeb770a;hp=eee294bdf9b15465d4a21a4fd5b89cd8978cbcab;hpb=dd1750d7981cf9e38985c9dfa474dcdbbe236270;p=mirror_qemu.git diff --git a/kvm-all.c b/kvm-all.c index eee294bdf9..a3fb8de268 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -31,6 +31,7 @@ #include "sysemu/kvm.h" #include "qemu/bswap.h" #include "exec/memory.h" +#include "exec/ram_addr.h" #include "exec/address-spaces.h" #include "qemu/event_notifier.h" #include "trace.h" @@ -72,7 +73,8 @@ typedef struct kvm_dirty_log KVMDirtyLog; struct KVMState { - KVMSlot slots[32]; + KVMSlot *slots; + int nr_slots; int fd; int vmfd; int coalesced_mmio; @@ -107,9 +109,11 @@ struct KVMState KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_async_interrupts_allowed; +bool kvm_halt_in_kernel_allowed; bool kvm_irqfds_allowed; bool kvm_msi_via_irqfd_allowed; bool kvm_gsi_routing_allowed; +bool kvm_gsi_direct_mapping; bool kvm_allowed; bool kvm_readonly_mem_allowed; @@ -123,7 +127,7 @@ static KVMSlot *kvm_alloc_slot(KVMState *s) { int i; - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { + for (i = 0; i < s->nr_slots; i++) { if (s->slots[i].memory_size == 0) { return &s->slots[i]; } @@ -139,7 +143,7 @@ static KVMSlot *kvm_lookup_matching_slot(KVMState *s, { int i; - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { + for (i = 0; i < s->nr_slots; i++) { KVMSlot *mem = &s->slots[i]; if (start_addr == mem->start_addr && @@ -161,7 +165,7 @@ static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, KVMSlot *found = NULL; int i; - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { + for (i = 0; i < s->nr_slots; i++) { KVMSlot *mem = &s->slots[i]; if (mem->memory_size == 0 || @@ -183,7 +187,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, { int i; - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { + for (i = 0; i < s->nr_slots; i++) { KVMSlot *mem = &s->slots[i]; if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { @@ -355,7 +359,7 @@ static int kvm_set_migration_log(int enable) s->migration_log = enable; - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { + for (i = 0; i < s->nr_slots; i++) { mem = &s->slots[i]; if (!mem->memory_size) { @@ -376,31 +380,10 @@ static int kvm_set_migration_log(int enable) static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, unsigned long *bitmap) { - unsigned int i, j; - unsigned long page_number, c; - hwaddr addr, addr1; - unsigned int pages = int128_get64(section->size) / getpagesize(); - unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; - unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; + ram_addr_t start = section->offset_within_region + section->mr->ram_addr; + ram_addr_t pages = int128_get64(section->size) / getpagesize(); - /* - * bitmap-traveling is faster than memory-traveling (for addr...) - * especially when most of the memory is not dirty. - */ - for (i = 0; i < len; i++) { - if (bitmap[i] != 0) { - c = leul_to_cpu(bitmap[i]); - do { - j = ffsl(c) - 1; - c &= ~(1ul << j); - page_number = (i * HOST_LONG_BITS + j) * hpratio; - addr1 = page_number * TARGET_PAGE_SIZE; - addr = section->offset_within_region + addr1; - memory_region_set_dirty(section->mr, addr, - TARGET_PAGE_SIZE * hpratio); - } while (c != 0); - } - } + cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages); return 0; } @@ -516,7 +499,7 @@ int kvm_check_extension(KVMState *s, unsigned int extension) return ret; } -static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val, +static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, bool assign, uint32_t size, bool datamatch) { int ret; @@ -788,6 +771,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) static void kvm_region_add(MemoryListener *listener, MemoryRegionSection *section) { + memory_region_ref(section->mr); kvm_set_phys_mem(section, true); } @@ -795,6 +779,7 @@ static void kvm_region_del(MemoryListener *listener, MemoryRegionSection *section) { kvm_set_phys_mem(section, false); + memory_region_unref(section->mr); } static void kvm_log_sync(MemoryListener *listener, @@ -836,6 +821,8 @@ static void kvm_mem_ioeventfd_add(MemoryListener *listener, data, true, int128_get64(section->size), match_data); if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s\n", + __func__, strerror(-r)); abort(); } } @@ -868,6 +855,8 @@ static void kvm_io_ioeventfd_add(MemoryListener *listener, data, true, int128_get64(section->size), match_data); if (r < 0) { + fprintf(stderr, "%s: error adding ioeventfd: %s\n", + __func__, strerror(-r)); abort(); } } @@ -953,7 +942,7 @@ static void clear_gsi(KVMState *s, unsigned int gsi) s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32)); } -static void kvm_init_irq_routing(KVMState *s) +void kvm_init_irq_routing(KVMState *s) { int gsi_count, i; @@ -984,7 +973,7 @@ static void kvm_init_irq_routing(KVMState *s) kvm_arch_init_irq_routing(s); } -static void kvm_irqchip_commit_routes(KVMState *s) +void kvm_irqchip_commit_routes(KVMState *s) { int ret; @@ -1011,15 +1000,10 @@ static void kvm_add_routing_entry(KVMState *s, } n = s->irq_routes->nr++; new = &s->irq_routes->entries[n]; - memset(new, 0, sizeof(*new)); - new->gsi = entry->gsi; - new->type = entry->type; - new->flags = entry->flags; - new->u = entry->u; - set_gsi(s, entry->gsi); + *new = *entry; - kvm_irqchip_commit_routes(s); + set_gsi(s, entry->gsi); } static int kvm_update_routing_entry(KVMState *s, @@ -1034,9 +1018,11 @@ static int kvm_update_routing_entry(KVMState *s, continue; } - entry->type = new_entry->type; - entry->flags = new_entry->flags; - entry->u = new_entry->u; + if(!memcmp(entry, new_entry, sizeof *entry)) { + return 0; + } + + *entry = *new_entry; kvm_irqchip_commit_routes(s); @@ -1048,7 +1034,7 @@ static int kvm_update_routing_entry(KVMState *s, void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin) { - struct kvm_irq_routing_entry e; + struct kvm_irq_routing_entry e = {}; assert(pin < s->gsi_count); @@ -1065,6 +1051,10 @@ void kvm_irqchip_release_virq(KVMState *s, int virq) struct kvm_irq_routing_entry *e; int i; + if (kvm_gsi_direct_mapping()) { + return; + } + for (i = 0; i < s->irq_routes->nr; i++) { e = &s->irq_routes->entries[i]; if (e->gsi == virq) { @@ -1130,7 +1120,7 @@ static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg) QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) { if (route->kroute.u.msi.address_lo == (uint32_t)msg.address && route->kroute.u.msi.address_hi == (msg.address >> 32) && - route->kroute.u.msi.data == msg.data) { + route->kroute.u.msi.data == le32_to_cpu(msg.data)) { return route; } } @@ -1145,7 +1135,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) if (s->direct_msi) { msi.address_lo = (uint32_t)msg.address; msi.address_hi = msg.address >> 32; - msi.data = msg.data; + msi.data = le32_to_cpu(msg.data); msi.flags = 0; memset(msi.pad, 0, sizeof(msi.pad)); @@ -1161,15 +1151,16 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) return virq; } - route = g_malloc(sizeof(KVMMSIRoute)); + route = g_malloc0(sizeof(KVMMSIRoute)); route->kroute.gsi = virq; route->kroute.type = KVM_IRQ_ROUTING_MSI; route->kroute.flags = 0; route->kroute.u.msi.address_lo = (uint32_t)msg.address; route->kroute.u.msi.address_hi = msg.address >> 32; - route->kroute.u.msi.data = msg.data; + route->kroute.u.msi.data = le32_to_cpu(msg.data); kvm_add_routing_entry(s, &route->kroute); + kvm_irqchip_commit_routes(s); QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route, entry); @@ -1182,9 +1173,13 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) { - struct kvm_irq_routing_entry kroute; + struct kvm_irq_routing_entry kroute = {}; int virq; + if (kvm_gsi_direct_mapping()) { + return msg.data & 0xffff; + } + if (!kvm_gsi_routing_enabled()) { return -ENOSYS; } @@ -1199,16 +1194,21 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) kroute.flags = 0; kroute.u.msi.address_lo = (uint32_t)msg.address; kroute.u.msi.address_hi = msg.address >> 32; - kroute.u.msi.data = msg.data; + kroute.u.msi.data = le32_to_cpu(msg.data); kvm_add_routing_entry(s, &kroute); + kvm_irqchip_commit_routes(s); return virq; } int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) { - struct kvm_irq_routing_entry kroute; + struct kvm_irq_routing_entry kroute = {}; + + if (kvm_gsi_direct_mapping()) { + return 0; + } if (!kvm_irqchip_in_kernel()) { return -ENOSYS; @@ -1219,12 +1219,13 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) kroute.flags = 0; kroute.u.msi.address_lo = (uint32_t)msg.address; kroute.u.msi.address_hi = msg.address >> 32; - kroute.u.msi.data = msg.data; + kroute.u.msi.data = le32_to_cpu(msg.data); return kvm_update_routing_entry(s, &kroute); } -static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) +static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq, + bool assign) { struct kvm_irqfd irqfd = { .fd = fd, @@ -1232,6 +1233,11 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN, }; + if (rfd != -1) { + irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE; + irqfd.resamplefd = rfd; + } + if (!kvm_irqfds_enabled()) { return -ENOSYS; } @@ -1241,7 +1247,7 @@ static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign) #else /* !KVM_CAP_IRQ_ROUTING */ -static void kvm_init_irq_routing(KVMState *s) +void kvm_init_irq_routing(KVMState *s) { } @@ -1270,24 +1276,24 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) } #endif /* !KVM_CAP_IRQ_ROUTING */ -int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq) +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) { - return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true); + return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), + rn ? event_notifier_get_fd(rn) : -1, virq, true); } int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq) { - return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false); + return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq, + false); } static int kvm_irqchip_create(KVMState *s) { - QemuOptsList *list = qemu_find_opts("machine"); int ret; - if (QTAILQ_EMPTY(&list->head) || - !qemu_opt_get_bool(QTAILQ_FIRST(&list->head), - "kernel_irqchip", true) || + if (!qemu_opt_get_bool(qemu_get_machine_opts(), "kernel_irqchip", true) || !kvm_check_extension(s, KVM_CAP_IRQCHIP)) { return 0; } @@ -1303,30 +1309,27 @@ static int kvm_irqchip_create(KVMState *s) * interrupt delivery (though the reverse is not necessarily true) */ kvm_async_interrupts_allowed = true; + kvm_halt_in_kernel_allowed = true; kvm_init_irq_routing(s); return 0; } -static int kvm_max_vcpus(KVMState *s) +/* Find number of supported CPUs using the recommended + * procedure from the kernel API documentation to cope with + * older kernels that may be missing capabilities. + */ +static int kvm_recommended_vcpus(KVMState *s) { - int ret; - - /* Find number of supported CPUs using the recommended - * procedure from the kernel API documentation to cope with - * older kernels that may be missing capabilities. - */ - ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); - if (ret) { - return ret; - } - ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS); - if (ret) { - return ret; - } + int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS); + return (ret) ? ret : 4; +} - return 4; +static int kvm_max_vcpus(KVMState *s) +{ + int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS); + return (ret) ? ret : kvm_recommended_vcpus(s); } int kvm_init(void) @@ -1334,11 +1337,19 @@ int kvm_init(void) static const char upgrade_note[] = "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" "(see http://sourceforge.net/projects/kvm).\n"; + struct { + const char *name; + int num; + } num_cpus[] = { + { "SMP", smp_cpus }, + { "hotpluggable", max_cpus }, + { NULL, } + }, *nc = num_cpus; + int soft_vcpus_limit, hard_vcpus_limit; KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; int i; - int max_vcpus; s = g_malloc0(sizeof(KVMState)); @@ -1353,9 +1364,6 @@ int kvm_init(void) #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_INIT(&s->kvm_sw_breakpoints); #endif - for (i = 0; i < ARRAY_SIZE(s->slots); i++) { - s->slots[i].slot = i; - } s->vmfd = -1; s->fd = qemu_open("/dev/kvm", O_RDWR); if (s->fd == -1) { @@ -1379,24 +1387,57 @@ int kvm_init(void) goto err; } - max_vcpus = kvm_max_vcpus(s); - if (smp_cpus > max_vcpus) { - ret = -EINVAL; - fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus " - "supported by KVM (%d)\n", smp_cpus, max_vcpus); - goto err; + s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS); + + /* If unspecified, use the default value */ + if (!s->nr_slots) { + s->nr_slots = 32; + } + + s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); + + for (i = 0; i < s->nr_slots; i++) { + s->slots[i].slot = i; } - s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); - if (s->vmfd < 0) { + /* check the vcpu limits */ + soft_vcpus_limit = kvm_recommended_vcpus(s); + hard_vcpus_limit = kvm_max_vcpus(s); + + while (nc->name) { + if (nc->num > soft_vcpus_limit) { + fprintf(stderr, + "Warning: Number of %s cpus requested (%d) exceeds " + "the recommended cpus supported by KVM (%d)\n", + nc->name, nc->num, soft_vcpus_limit); + + if (nc->num > hard_vcpus_limit) { + ret = -EINVAL; + fprintf(stderr, "Number of %s cpus requested (%d) exceeds " + "the maximum cpus supported by KVM (%d)\n", + nc->name, nc->num, hard_vcpus_limit); + goto err; + } + } + nc++; + } + + do { + ret = kvm_ioctl(s, KVM_CREATE_VM, 0); + } while (ret == -EINTR); + + if (ret < 0) { + fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -s->vmfd, + strerror(-ret)); + #ifdef TARGET_S390X fprintf(stderr, "Please add the 'switch_amode' kernel parameter to " "your host kernel command line\n"); #endif - ret = s->vmfd; goto err; } + s->vmfd = ret; missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); if (!missing_cap) { missing_cap = @@ -1483,6 +1524,7 @@ err: if (s->fd != -1) { close(s->fd); } + g_free(s->slots); g_free(s); return ret; @@ -1495,40 +1537,14 @@ static void kvm_handle_io(uint16_t port, void *data, int direction, int size, uint8_t *ptr = data; for (i = 0; i < count; i++) { - if (direction == KVM_EXIT_IO_IN) { - switch (size) { - case 1: - stb_p(ptr, cpu_inb(port)); - break; - case 2: - stw_p(ptr, cpu_inw(port)); - break; - case 4: - stl_p(ptr, cpu_inl(port)); - break; - } - } else { - switch (size) { - case 1: - cpu_outb(port, ldub_p(ptr)); - break; - case 2: - cpu_outw(port, lduw_p(ptr)); - break; - case 4: - cpu_outl(port, ldl_p(ptr)); - break; - } - } - + address_space_rw(&address_space_io, port, ptr, size, + direction == KVM_EXIT_IO_OUT); ptr += size; } } -static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) +static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run) { - CPUState *cpu = ENV_GET_CPU(env); - fprintf(stderr, "KVM internal error."); if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { int i; @@ -1544,7 +1560,7 @@ static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run) if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { fprintf(stderr, "emulation failure\n"); if (!kvm_arch_stop_on_emulation_error(cpu)) { - cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); + cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE); return EXCP_INTERRUPT; } } @@ -1609,9 +1625,8 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu) cpu->kvm_vcpu_dirty = false; } -int kvm_cpu_exec(CPUArchState *env) +int kvm_cpu_exec(CPUState *cpu) { - CPUState *cpu = ENV_GET_CPU(env); struct kvm_run *run = cpu->kvm_run; int ret, run_ret; @@ -1690,7 +1705,7 @@ int kvm_cpu_exec(CPUArchState *env) ret = -1; break; case KVM_EXIT_INTERNAL_ERROR: - ret = kvm_handle_internal_error(env, run); + ret = kvm_handle_internal_error(cpu, run); break; default: DPRINTF("kvm_arch_handle_exit\n"); @@ -1700,7 +1715,7 @@ int kvm_cpu_exec(CPUArchState *env) } while (ret == 0); if (ret < 0) { - cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); + cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE); vm_stop(RUN_STATE_INTERNAL_ERROR); } @@ -1819,19 +1834,6 @@ int kvm_has_intx_set_mask(void) return kvm_state->intx_set_mask; } -void *kvm_ram_alloc(ram_addr_t size) -{ -#ifdef TARGET_S390X - void *mem; - - mem = kvm_arch_ram_alloc(size); - if (mem) { - return mem; - } -#endif - return qemu_anon_ram_alloc(size); -} - void kvm_setup_guest_memory(void *start, size_t size) { #ifdef CONFIG_VALGRIND_H @@ -1882,14 +1884,13 @@ static void kvm_invoke_set_guest_debug(void *data) &dbg_data->dbg); } -int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) { - CPUState *cpu = ENV_GET_CPU(env); struct kvm_set_guest_debug_data data; data.dbg.control = reinject_trap; - if (env->singlestep_enabled) { + if (cpu->singlestep_enabled) { data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; } kvm_arch_update_guest_debug(cpu, &data.dbg); @@ -1899,16 +1900,14 @@ int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) return data.err; } -int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len, int type) { - CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp; - CPUArchState *env; int err; if (type == GDB_BREAKPOINT_SW) { - bp = kvm_find_sw_breakpoint(current_cpu, addr); + bp = kvm_find_sw_breakpoint(cpu, addr); if (bp) { bp->use_count++; return 0; @@ -1921,14 +1920,13 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, bp->pc = addr; bp->use_count = 1; - err = kvm_arch_insert_sw_breakpoint(current_cpu, bp); + err = kvm_arch_insert_sw_breakpoint(cpu, bp); if (err) { g_free(bp); return err; } - QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints, - bp, entry); + QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); } else { err = kvm_arch_insert_hw_breakpoint(addr, len, type); if (err) { @@ -1936,8 +1934,8 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { - err = kvm_update_guest_debug(env, 0); + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); if (err) { return err; } @@ -1945,16 +1943,14 @@ int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, return 0; } -int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len, int type) { - CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp; - CPUArchState *env; int err; if (type == GDB_BREAKPOINT_SW) { - bp = kvm_find_sw_breakpoint(current_cpu, addr); + bp = kvm_find_sw_breakpoint(cpu, addr); if (!bp) { return -ENOENT; } @@ -1964,12 +1960,12 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, return 0; } - err = kvm_arch_remove_sw_breakpoint(current_cpu, bp); + err = kvm_arch_remove_sw_breakpoint(cpu, bp); if (err) { return err; } - QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry); + QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry); g_free(bp); } else { err = kvm_arch_remove_hw_breakpoint(addr, len, type); @@ -1978,8 +1974,8 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, } } - for (env = first_cpu; env != NULL; env = env->next_cpu) { - err = kvm_update_guest_debug(env, 0); + CPU_FOREACH(cpu) { + err = kvm_update_guest_debug(cpu, 0); if (err) { return err; } @@ -1987,19 +1983,15 @@ int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, return 0; } -void kvm_remove_all_breakpoints(CPUArchState *current_env) +void kvm_remove_all_breakpoints(CPUState *cpu) { - CPUState *current_cpu = ENV_GET_CPU(current_env); struct kvm_sw_breakpoint *bp, *next; - KVMState *s = current_cpu->kvm_state; - CPUArchState *env; - CPUState *cpu; + KVMState *s = cpu->kvm_state; QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { - if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) { + if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) { /* Try harder to find a CPU that currently sees the breakpoint. */ - for (env = first_cpu; env != NULL; env = env->next_cpu) { - cpu = ENV_GET_CPU(env); + CPU_FOREACH(cpu) { if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) { break; } @@ -2010,38 +2002,37 @@ void kvm_remove_all_breakpoints(CPUArchState *current_env) } kvm_arch_remove_all_hw_breakpoints(); - for (env = first_cpu; env != NULL; env = env->next_cpu) { - kvm_update_guest_debug(env, 0); + CPU_FOREACH(cpu) { + kvm_update_guest_debug(cpu, 0); } } #else /* !KVM_CAP_SET_GUEST_DEBUG */ -int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap) +int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap) { return -EINVAL; } -int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr, +int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr, +int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr, target_ulong len, int type) { return -EINVAL; } -void kvm_remove_all_breakpoints(CPUArchState *current_env) +void kvm_remove_all_breakpoints(CPUState *cpu) { } #endif /* !KVM_CAP_SET_GUEST_DEBUG */ -int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset) +int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset) { - CPUState *cpu = ENV_GET_CPU(env); struct kvm_signal_mask *sigmask; int r;