X-Git-Url: https://git.proxmox.com/?a=blobdiff_plain;f=accel%2Fkvm%2Fkvm-all.c;h=439a4efe526327b97d6215936f2a390389282ebc;hb=9e264985ff0bc86927b44b334bd504687f78659d;hp=524c4ddfbd0fc4af27a3f8dd1cff05dd2dfb2be8;hpb=3284aa128153750f14a61e8a96fd085e6f2999b6;p=mirror_qemu.git diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index 524c4ddfbd..439a4efe52 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -18,28 +18,32 @@ #include -#include "qemu-common.h" #include "qemu/atomic.h" #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qapi/error.h" -#include "hw/hw.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/s390x/adapter.h" #include "exec/gdbstub.h" #include "sysemu/kvm_int.h" +#include "sysemu/runstate.h" #include "sysemu/cpus.h" +#include "sysemu/sysemu.h" #include "qemu/bswap.h" #include "exec/memory.h" #include "exec/ram_addr.h" #include "exec/address-spaces.h" #include "qemu/event_notifier.h" +#include "qemu/main-loop.h" #include "trace.h" #include "hw/irq.h" #include "sysemu/sev.h" #include "sysemu/balloon.h" +#include "qapi/visitor.h" +#include "qapi/qapi-types-common.h" +#include "qapi/qapi-visit-common.h" #include "hw/boards.h" @@ -51,7 +55,7 @@ /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We * need to use the real host PAGE_SIZE, as that's what KVM will use. */ -#define PAGE_SIZE getpagesize() +#define PAGE_SIZE qemu_real_host_page_size //#define DEBUG_KVM @@ -88,9 +92,15 @@ struct KVMState #ifdef KVM_CAP_SET_GUEST_DEBUG QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; #endif + int max_nested_state_len; int many_ioeventfds; int intx_set_mask; + int kvm_shadow_mem; + bool kernel_irqchip_allowed; + bool kernel_irqchip_required; + OnOffAuto kernel_irqchip_split; bool sync_mmu; + bool manual_dirty_log_protect; /* The man page (and posix) say ioctl numbers are signed int, but * they're not. Linux, glibc and *BSD all treat ioctl numbers as * unsigned, and treating them as signed here can break things */ @@ -110,6 +120,13 @@ struct KVMState /* memory encryption */ void *memcrypt_handle; int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len); + + /* For "info mtree -f" to tell if an MR is registered in KVM */ + int nr_as; + struct KVMAs { + KVMMemoryListener *ml; + AddressSpace *as; + } *as; }; KVMState *kvm_state; @@ -130,6 +147,7 @@ bool kvm_direct_msi_allowed; bool kvm_ioeventfd_any_length_allowed; bool kvm_msi_use_devid; static bool kvm_immediate_exit; +static hwaddr kvm_max_slot_size = ~0; static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_INFO(USER_MEMORY), @@ -138,9 +156,15 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_LAST_INFO }; +static NotifierList kvm_irqchip_change_notifiers = + NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers); + +#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock) +#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock) + int kvm_get_max_memslots(void) { - KVMState *s = KVM_STATE(current_machine->accelerator); + KVMState *s = KVM_STATE(current_accel()); return s->nr_slots; } @@ -165,6 +189,7 @@ int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len) return 1; } +/* Called with KVMMemoryListener.slots_lock held */ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) { KVMState *s = kvm_state; @@ -182,10 +207,17 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) bool kvm_has_free_slot(MachineState *ms) { KVMState *s = KVM_STATE(ms->accelerator); + bool result; + KVMMemoryListener *kml = &s->memory_listener; - return kvm_get_free_slot(&s->memory_listener); + kvm_slots_lock(kml); + result = !!kvm_get_free_slot(kml); + kvm_slots_unlock(kml); + + return result; } +/* Called with KVMMemoryListener.slots_lock held */ static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) { KVMSlot *slot = kvm_get_free_slot(kml); @@ -244,18 +276,21 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, hwaddr *phys_addr) { KVMMemoryListener *kml = &s->memory_listener; - int i; + int i, ret = 0; + kvm_slots_lock(kml); for (i = 0; i < s->nr_slots; i++) { KVMSlot *mem = &kml->slots[i]; if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { *phys_addr = mem->start_addr + (ram - mem->ram); - return 1; + ret = 1; + break; } } + kvm_slots_unlock(kml); - return 0; + return ret; } static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new) @@ -273,13 +308,23 @@ static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, boo /* Set the slot size to 0 before setting the slot to the desired * value. This is needed based on KVM commit 75d61fbc. */ mem.memory_size = 0; - kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); + ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); + if (ret < 0) { + goto err; + } } mem.memory_size = slot->memory_size; ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); slot->old_flags = mem.flags; +err: trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr, mem.memory_size, mem.userspace_addr, ret); + if (ret < 0) { + error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d," + " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s", + __func__, mem.slot, slot->start_addr, + (uint64_t)mem.memory_size, strerror(errno)); + } return ret; } @@ -292,6 +337,11 @@ int kvm_destroy_vcpu(CPUState *cpu) DPRINTF("kvm_destroy_vcpu\n"); + ret = kvm_arch_destroy_vcpu(cpu); + if (ret < 0) { + goto err; + } + mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { ret = mmap_size; @@ -391,6 +441,7 @@ static int kvm_mem_flags(MemoryRegion *mr) return flags; } +/* Called with KVMMemoryListener.slots_lock held */ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, MemoryRegion *mr) { @@ -407,21 +458,33 @@ static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, static int kvm_section_update_flags(KVMMemoryListener *kml, MemoryRegionSection *section) { - hwaddr start_addr, size; + hwaddr start_addr, size, slot_size; KVMSlot *mem; + int ret = 0; size = kvm_align_section(section, &start_addr); if (!size) { return 0; } - mem = kvm_lookup_matching_slot(kml, start_addr, size); - if (!mem) { - /* We don't have a slot if we want to trap every access. */ - return 0; + kvm_slots_lock(kml); + + while (size && !ret) { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + /* We don't have a slot if we want to trap every access. */ + goto out; + } + + ret = kvm_slot_update_flags(kml, mem, section->mr); + start_addr += slot_size; + size -= slot_size; } - return kvm_slot_update_flags(kml, mem, section->mr); +out: + kvm_slots_unlock(kml); + return ret; } static void kvm_log_start(MemoryListener *listener, @@ -464,7 +527,7 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, { ram_addr_t start = section->offset_within_region + memory_region_get_ram_addr(section->mr); - ram_addr_t pages = int128_get64(section->size) / getpagesize(); + ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size; cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages); return 0; @@ -472,14 +535,37 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) +/* Allocate the dirty bitmap for a slot */ +static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem) +{ + /* + * XXX bad kernel interface alert + * For dirty bitmap, kernel allocates array of size aligned to + * bits-per-long. But for case when the kernel is 64bits and + * the userspace is 32bits, userspace can't align to the same + * bits-per-long, since sizeof(long) is different between kernel + * and user space. This way, userspace will provide buffer which + * may be 4 bytes less than the kernel will use, resulting in + * userspace memory corruption (which is not detectable by valgrind + * too, in most cases). + * So for now, let's align to 64 instead of HOST_LONG_BITS here, in + * a hope that sizeof(long) won't become >8 any time soon. + */ + hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), + /*HOST_LONG_BITS*/ 64) / 8; + mem->dirty_bmap = g_malloc0(bitmap_size); +} + /** - * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space - * This function updates qemu's dirty bitmap using - * memory_region_set_dirty(). This means all bits are set - * to dirty. + * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space + * + * This function will first try to fetch dirty bitmap from the kernel, + * and then updates qemu's dirty bitmap. * - * @start_add: start of logged region. - * @end_addr: end of logged region. + * NOTE: caller must be with kml->slots_lock held. + * + * @kml: the KVM memory listener object + * @section: the memory section to sync the dirty bitmap with */ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, MemoryRegionSection *section) @@ -488,43 +574,215 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, struct kvm_dirty_log d = {}; KVMSlot *mem; hwaddr start_addr, size; + hwaddr slot_size, slot_offset = 0; + int ret = 0; size = kvm_align_section(section, &start_addr); - if (size) { - mem = kvm_lookup_matching_slot(kml, start_addr, size); + while (size) { + MemoryRegionSection subsection = *section; + + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); if (!mem) { /* We don't have a slot if we want to trap every access. */ - return 0; + goto out; } - /* XXX bad kernel interface alert - * For dirty bitmap, kernel allocates array of size aligned to - * bits-per-long. But for case when the kernel is 64bits and - * the userspace is 32bits, userspace can't align to the same - * bits-per-long, since sizeof(long) is different between kernel - * and user space. This way, userspace will provide buffer which - * may be 4 bytes less than the kernel will use, resulting in - * userspace memory corruption (which is not detectable by valgrind - * too, in most cases). - * So for now, let's align to 64 instead of HOST_LONG_BITS here, in - * a hope that sizeof(long) won't become >8 any time soon. - */ - size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), - /*HOST_LONG_BITS*/ 64) / 8; - d.dirty_bitmap = g_malloc0(size); + if (!mem->dirty_bmap) { + /* Allocate on the first log_sync, once and for all */ + kvm_memslot_init_dirty_bitmap(mem); + } + d.dirty_bitmap = mem->dirty_bmap; d.slot = mem->slot | (kml->as_id << 16); if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { DPRINTF("ioctl failed %d\n", errno); - g_free(d.dirty_bitmap); - return -1; + ret = -1; + goto out; } - kvm_get_dirty_pages_log_range(section, d.dirty_bitmap); - g_free(d.dirty_bitmap); + subsection.offset_within_region += slot_offset; + subsection.size = int128_make64(slot_size); + kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap); + + slot_offset += slot_size; + start_addr += slot_size; + size -= slot_size; } +out: + return ret; +} - return 0; +/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */ +#define KVM_CLEAR_LOG_SHIFT 6 +#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT) +#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN) + +static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start, + uint64_t size) +{ + KVMState *s = kvm_state; + uint64_t end, bmap_start, start_delta, bmap_npages; + struct kvm_clear_dirty_log d; + unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; + int ret; + + /* + * We need to extend either the start or the size or both to + * satisfy the KVM interface requirement. Firstly, do the start + * page alignment on 64 host pages + */ + bmap_start = start & KVM_CLEAR_LOG_MASK; + start_delta = start - bmap_start; + bmap_start /= psize; + + /* + * The kernel interface has restriction on the size too, that either: + * + * (1) the size is 64 host pages aligned (just like the start), or + * (2) the size fills up until the end of the KVM memslot. + */ + bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN) + << KVM_CLEAR_LOG_SHIFT; + end = mem->memory_size / psize; + if (bmap_npages > end - bmap_start) { + bmap_npages = end - bmap_start; + } + start_delta /= psize; + + /* + * Prepare the bitmap to clear dirty bits. Here we must guarantee + * that we won't clear any unknown dirty bits otherwise we might + * accidentally clear some set bits which are not yet synced from + * the kernel into QEMU's bitmap, then we'll lose track of the + * guest modifications upon those pages (which can directly lead + * to guest data loss or panic after migration). + * + * Layout of the KVMSlot.dirty_bmap: + * + * |<-------- bmap_npages -----------..>| + * [1] + * start_delta size + * |----------------|-------------|------------------|------------| + * ^ ^ ^ ^ + * | | | | + * start bmap_start (start) end + * of memslot of memslot + * + * [1] bmap_npages can be aligned to either 64 pages or the end of slot + */ + + assert(bmap_start % BITS_PER_LONG == 0); + /* We should never do log_clear before log_sync */ + assert(mem->dirty_bmap); + if (start_delta) { + /* Slow path - we need to manipulate a temp bitmap */ + bmap_clear = bitmap_new(bmap_npages); + bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap, + bmap_start, start_delta + size / psize); + /* + * We need to fill the holes at start because that was not + * specified by the caller and we extended the bitmap only for + * 64 pages alignment + */ + bitmap_clear(bmap_clear, 0, start_delta); + d.dirty_bitmap = bmap_clear; + } else { + /* Fast path - start address aligns well with BITS_PER_LONG */ + d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start); + } + + d.first_page = bmap_start; + /* It should never overflow. If it happens, say something */ + assert(bmap_npages <= UINT32_MAX); + d.num_pages = bmap_npages; + d.slot = mem->slot | (as_id << 16); + + if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) { + ret = -errno; + error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, " + "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d", + __func__, d.slot, (uint64_t)d.first_page, + (uint32_t)d.num_pages, ret); + } else { + ret = 0; + trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages); + } + + /* + * After we have updated the remote dirty bitmap, we update the + * cached bitmap as well for the memslot, then if another user + * clears the same region we know we shouldn't clear it again on + * the remote otherwise it's data loss as well. + */ + bitmap_clear(mem->dirty_bmap, bmap_start + start_delta, + size / psize); + /* This handles the NULL case well */ + g_free(bmap_clear); + return ret; +} + + +/** + * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range + * + * NOTE: this will be a no-op if we haven't enabled manual dirty log + * protection in the host kernel because in that case this operation + * will be done within log_sync(). + * + * @kml: the kvm memory listener + * @section: the memory range to clear dirty bitmap + */ +static int kvm_physical_log_clear(KVMMemoryListener *kml, + MemoryRegionSection *section) +{ + KVMState *s = kvm_state; + uint64_t start, size, offset, count; + KVMSlot *mem; + int ret = 0, i; + + if (!s->manual_dirty_log_protect) { + /* No need to do explicit clear */ + return ret; + } + + start = section->offset_within_address_space; + size = int128_get64(section->size); + + if (!size) { + /* Nothing more we can do... */ + return ret; + } + + kvm_slots_lock(kml); + + for (i = 0; i < s->nr_slots; i++) { + mem = &kml->slots[i]; + /* Discard slots that are empty or do not overlap the section */ + if (!mem->memory_size || + mem->start_addr > start + size - 1 || + start > mem->start_addr + mem->memory_size - 1) { + continue; + } + + if (start >= mem->start_addr) { + /* The slot starts before section or is aligned to it. */ + offset = start - mem->start_addr; + count = MIN(mem->memory_size - offset, size); + } else { + /* The slot starts after section. */ + offset = 0; + count = MIN(mem->memory_size, size - (mem->start_addr - start)); + } + ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count); + if (ret < 0) { + break; + } + } + + kvm_slots_unlock(kml); + + return ret; } static void kvm_coalesce_mmio_region(MemoryListener *listener, @@ -757,6 +1015,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) return NULL; } +void kvm_set_max_memslot_size(hwaddr max_slot_size) +{ + g_assert( + ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size + ); + kvm_max_slot_size = max_slot_size; +} + static void kvm_set_phys_mem(KVMMemoryListener *kml, MemoryRegionSection *section, bool add) { @@ -764,7 +1030,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, int err; MemoryRegion *mr = section->mr; bool writeable = !mr->readonly && !mr->rom_device; - hwaddr start_addr, size; + hwaddr start_addr, size, slot_size; void *ram; if (!memory_region_is_ram(mr)) { @@ -786,40 +1052,65 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + (start_addr - section->offset_within_address_space); + kvm_slots_lock(kml); + if (!add) { - mem = kvm_lookup_matching_slot(kml, start_addr, size); - if (!mem) { - return; - } + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_lookup_matching_slot(kml, start_addr, slot_size); + if (!mem) { + goto out; + } + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_physical_sync_dirty_bitmap(kml, section); + } + + /* unregister the slot */ + g_free(mem->dirty_bmap); + mem->dirty_bmap = NULL; + mem->memory_size = 0; + mem->flags = 0; + err = kvm_set_user_memory_region(kml, mem, false); + if (err) { + fprintf(stderr, "%s: error unregistering slot: %s\n", + __func__, strerror(-err)); + abort(); + } + start_addr += slot_size; + size -= slot_size; + } while (size); + goto out; + } + + /* register the new slot */ + do { + slot_size = MIN(kvm_max_slot_size, size); + mem = kvm_alloc_slot(kml); + mem->memory_size = slot_size; + mem->start_addr = start_addr; + mem->ram = ram; + mem->flags = kvm_mem_flags(mr); + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { - kvm_physical_sync_dirty_bitmap(kml, section); + /* + * Reallocate the bmap; it means it doesn't disappear in + * middle of a migrate. + */ + kvm_memslot_init_dirty_bitmap(mem); } - - /* unregister the slot */ - mem->memory_size = 0; - mem->flags = 0; - err = kvm_set_user_memory_region(kml, mem, false); + err = kvm_set_user_memory_region(kml, mem, true); if (err) { - fprintf(stderr, "%s: error unregistering slot: %s\n", - __func__, strerror(-err)); + fprintf(stderr, "%s: error registering slot: %s\n", __func__, + strerror(-err)); abort(); } - return; - } - - /* register the new slot */ - mem = kvm_alloc_slot(kml); - mem->memory_size = size; - mem->start_addr = start_addr; - mem->ram = ram; - mem->flags = kvm_mem_flags(mr); + start_addr += slot_size; + ram += slot_size; + size -= slot_size; + } while (size); - err = kvm_set_user_memory_region(kml, mem, true); - if (err) { - fprintf(stderr, "%s: error registering slot: %s\n", __func__, - strerror(-err)); - abort(); - } +out: + kvm_slots_unlock(kml); } static void kvm_region_add(MemoryListener *listener, @@ -846,12 +1137,30 @@ static void kvm_log_sync(MemoryListener *listener, KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); int r; + kvm_slots_lock(kml); r = kvm_physical_sync_dirty_bitmap(kml, section); + kvm_slots_unlock(kml); if (r < 0) { abort(); } } +static void kvm_log_clear(MemoryListener *listener, + MemoryRegionSection *section) +{ + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + int r; + + r = kvm_physical_log_clear(kml, section); + if (r < 0) { + error_report_once("%s: kvm log clear failed: mr=%s " + "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__, + section->mr->name, section->offset_within_region, + int128_get64(section->size)); + abort(); + } +} + static void kvm_mem_ioeventfd_add(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, @@ -864,8 +1173,8 @@ static void kvm_mem_ioeventfd_add(MemoryListener *listener, data, true, int128_get64(section->size), match_data); if (r < 0) { - fprintf(stderr, "%s: error adding ioeventfd: %s\n", - __func__, strerror(-r)); + fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); abort(); } } @@ -882,6 +1191,8 @@ static void kvm_mem_ioeventfd_del(MemoryListener *listener, data, false, int128_get64(section->size), match_data); if (r < 0) { + fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); abort(); } } @@ -898,8 +1209,8 @@ static void kvm_io_ioeventfd_add(MemoryListener *listener, data, true, int128_get64(section->size), match_data); if (r < 0) { - fprintf(stderr, "%s: error adding ioeventfd: %s\n", - __func__, strerror(-r)); + fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); abort(); } } @@ -917,6 +1228,8 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener, data, false, int128_get64(section->size), match_data); if (r < 0) { + fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n", + __func__, strerror(-r), -r); abort(); } } @@ -926,6 +1239,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, { int i; + qemu_mutex_init(&kml->slots_lock); kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); kml->as_id = as_id; @@ -938,9 +1252,18 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, kml->listener.log_start = kvm_log_start; kml->listener.log_stop = kvm_log_stop; kml->listener.log_sync = kvm_log_sync; + kml->listener.log_clear = kvm_log_clear; kml->listener.priority = 10; memory_listener_register(&kml->listener, as); + + for (i = 0; i < s->nr_as; ++i) { + if (!s->as[i].as) { + s->as[i].as = as; + s->as[i].ml = kml; + break; + } + } } static MemoryListener kvm_io_listener = { @@ -1107,6 +1430,21 @@ void kvm_irqchip_release_virq(KVMState *s, int virq) trace_kvm_irqchip_release_virq(virq); } +void kvm_irqchip_add_change_notifier(Notifier *n) +{ + notifier_list_add(&kvm_irqchip_change_notifiers, n); +} + +void kvm_irqchip_remove_change_notifier(Notifier *n) +{ + notifier_remove(n); +} + +void kvm_irqchip_change_notify(void) +{ + notifier_list_notify(&kvm_irqchip_change_notifiers, NULL); +} + static unsigned int kvm_hash_msi(uint32_t data) { /* This is optimized for IA32 MSI layout. However, no other arch shall @@ -1451,10 +1789,11 @@ void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); } -static void kvm_irqchip_create(MachineState *machine, KVMState *s) +static void kvm_irqchip_create(KVMState *s) { int ret; + assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO); if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { ; } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { @@ -1469,9 +1808,9 @@ static void kvm_irqchip_create(MachineState *machine, KVMState *s) /* First probe and see if there's a arch-specific hook to create the * in-kernel irqchip for us */ - ret = kvm_arch_irqchip_create(machine, s); + ret = kvm_arch_irqchip_create(s); if (ret == 0) { - if (machine_kernel_irqchip_split(machine)) { + if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) { perror("Split IRQ chip mode not supported."); exit(1); } else { @@ -1519,7 +1858,7 @@ static int kvm_max_vcpu_id(KVMState *s) bool kvm_vcpu_id_is_valid(int vcpu_id) { - KVMState *s = KVM_STATE(current_machine->accelerator); + KVMState *s = KVM_STATE(current_accel()); return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s); } @@ -1533,8 +1872,8 @@ static int kvm_init(MachineState *ms) const char *name; int num; } num_cpus[] = { - { "SMP", smp_cpus }, - { "hotpluggable", max_cpus }, + { "SMP", ms->smp.cpus }, + { "hotpluggable", ms->smp.max_cpus }, { NULL, } }, *nc = num_cpus; int soft_vcpus_limit, hard_vcpus_limit; @@ -1552,7 +1891,7 @@ static int kvm_init(MachineState *ms) * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum * page size for the system though. */ - assert(TARGET_PAGE_SIZE <= getpagesize()); + assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size); s->sigmask_len = 8; @@ -1591,6 +1930,12 @@ static int kvm_init(MachineState *ms) s->nr_slots = 32; } + s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE); + if (s->nr_as <= 1) { + s->nr_as = 1; + } + s->as = g_new0(struct KVMAs, s->nr_as); + kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type"); if (mc->kvm_type) { type = mc->kvm_type(ms, kvm_type); @@ -1662,6 +2007,17 @@ static int kvm_init(MachineState *ms) s->coalesced_pio = s->coalesced_mmio && kvm_check_extension(s, KVM_CAP_COALESCED_PIO); + s->manual_dirty_log_protect = + kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2); + if (s->manual_dirty_log_protect) { + ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1); + if (ret) { + warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 " + "but failed. Falling back to the legacy mode. "); + s->manual_dirty_log_protect = false; + } + } + #ifdef KVM_CAP_VCPU_EVENTS s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif @@ -1673,6 +2029,8 @@ static int kvm_init(MachineState *ms) s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif + s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE); + #ifdef KVM_CAP_IRQ_ROUTING kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif @@ -1723,8 +2081,12 @@ static int kvm_init(MachineState *ms) goto err; } - if (machine_kernel_irqchip_allowed(ms)) { - kvm_irqchip_create(ms, s); + if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) { + s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; + } + + if (s->kernel_irqchip_allowed) { + kvm_irqchip_create(s); } if (kvm_eventfds_allowed) { @@ -1826,9 +2188,9 @@ void kvm_flush_coalesced_mmio_buffer(void) ent = &ring->coalesced_mmio[ring->first]; if (ent->pio == 1) { - address_space_rw(&address_space_io, ent->phys_addr, - MEMTXATTRS_UNSPECIFIED, ent->data, - ent->len, true); + address_space_write(&address_space_io, ent->phys_addr, + MEMTXATTRS_UNSPECIFIED, ent->data, + ent->len); } else { cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); } @@ -2240,6 +2602,11 @@ int kvm_has_debugregs(void) return kvm_state->debugregs; } +int kvm_max_nested_state_length(void) +{ + return kvm_state->max_nested_state_len; +} + int kvm_has_many_ioeventfds(void) { if (!kvm_enabled()) { @@ -2592,17 +2959,137 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) return r; } +static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, + hwaddr start_addr, hwaddr size) +{ + KVMState *kvm = KVM_STATE(ms->accelerator); + int i; + + for (i = 0; i < kvm->nr_as; ++i) { + if (kvm->as[i].as == as && kvm->as[i].ml) { + size = MIN(kvm_max_slot_size, size); + return NULL != kvm_lookup_matching_slot(kvm->as[i].ml, + start_addr, size); + } + } + + return false; +} + +static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + int64_t value = s->kvm_shadow_mem; + + visit_type_int(v, name, &value, errp); +} + +static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + int64_t value; + + visit_type_int(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->kvm_shadow_mem = value; +} + +static void kvm_set_kernel_irqchip(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + Error *err = NULL; + KVMState *s = KVM_STATE(obj); + OnOffSplit mode; + + visit_type_OnOffSplit(v, name, &mode, &err); + if (err) { + error_propagate(errp, err); + return; + } else { + switch (mode) { + case ON_OFF_SPLIT_ON: + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_required = true; + s->kernel_irqchip_split = ON_OFF_AUTO_OFF; + break; + case ON_OFF_SPLIT_OFF: + s->kernel_irqchip_allowed = false; + s->kernel_irqchip_required = false; + s->kernel_irqchip_split = ON_OFF_AUTO_OFF; + break; + case ON_OFF_SPLIT_SPLIT: + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_required = true; + s->kernel_irqchip_split = ON_OFF_AUTO_ON; + break; + default: + /* The value was checked in visit_type_OnOffSplit() above. If + * we get here, then something is wrong in QEMU. + */ + abort(); + } + } +} + +bool kvm_kernel_irqchip_allowed(void) +{ + return kvm_state->kernel_irqchip_allowed; +} + +bool kvm_kernel_irqchip_required(void) +{ + return kvm_state->kernel_irqchip_required; +} + +bool kvm_kernel_irqchip_split(void) +{ + return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON; +} + +static void kvm_accel_instance_init(Object *obj) +{ + KVMState *s = KVM_STATE(obj); + + s->kvm_shadow_mem = -1; + s->kernel_irqchip_allowed = true; + s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; +} + static void kvm_accel_class_init(ObjectClass *oc, void *data) { AccelClass *ac = ACCEL_CLASS(oc); ac->name = "KVM"; ac->init_machine = kvm_init; + ac->has_memory = kvm_accel_has_memory; ac->allowed = &kvm_allowed; + + object_class_property_add(oc, "kernel-irqchip", "on|off|split", + NULL, kvm_set_kernel_irqchip, + NULL, NULL, &error_abort); + object_class_property_set_description(oc, "kernel-irqchip", + "Configure KVM in-kernel irqchip", &error_abort); + + object_class_property_add(oc, "kvm-shadow-mem", "int", + kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem, + NULL, NULL, &error_abort); + object_class_property_set_description(oc, "kvm-shadow-mem", + "KVM shadow MMU size", &error_abort); } static const TypeInfo kvm_accel_type = { .name = TYPE_KVM_ACCEL, .parent = TYPE_ACCEL, + .instance_init = kvm_accel_instance_init, .class_init = kvm_accel_class_init, .instance_size = sizeof(KVMState), };