4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu/atomic.h"
22 #include "qemu/option.h"
23 #include "qemu/config-file.h"
24 #include "qemu/error-report.h"
25 #include "qapi/error.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "sysemu/cpus.h"
33 #include "qemu/bswap.h"
34 #include "exec/memory.h"
35 #include "exec/ram_addr.h"
36 #include "exec/address-spaces.h"
37 #include "qemu/event_notifier.h"
40 #include "sysemu/sev.h"
41 #include "sysemu/balloon.h"
43 #include "hw/boards.h"
45 /* This check must be after config-host.h is included */
47 #include <sys/eventfd.h>
50 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
51 * need to use the real host PAGE_SIZE, as that's what KVM will use.
53 #define PAGE_SIZE getpagesize()
58 #define DPRINTF(fmt, ...) \
59 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
61 #define DPRINTF(fmt, ...) \
65 #define KVM_MSI_HASHTAB_SIZE 256
67 struct KVMParkedVcpu
{
68 unsigned long vcpu_id
;
70 QLIST_ENTRY(KVMParkedVcpu
) node
;
75 AccelState parent_obj
;
82 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
83 bool coalesced_flush_in_progress
;
85 int robust_singlestep
;
87 #ifdef KVM_CAP_SET_GUEST_DEBUG
88 QTAILQ_HEAD(, kvm_sw_breakpoint
) kvm_sw_breakpoints
;
90 int max_nested_state_len
;
94 /* The man page (and posix) say ioctl numbers are signed int, but
95 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
96 * unsigned, and treating them as signed here can break things */
97 unsigned irq_set_ioctl
;
98 unsigned int sigmask_len
;
100 #ifdef KVM_CAP_IRQ_ROUTING
101 struct kvm_irq_routing
*irq_routes
;
102 int nr_allocated_irq_routes
;
103 unsigned long *used_gsi_bitmap
;
104 unsigned int gsi_count
;
105 QTAILQ_HEAD(, KVMMSIRoute
) msi_hashtab
[KVM_MSI_HASHTAB_SIZE
];
107 KVMMemoryListener memory_listener
;
108 QLIST_HEAD(, KVMParkedVcpu
) kvm_parked_vcpus
;
110 /* memory encryption */
111 void *memcrypt_handle
;
112 int (*memcrypt_encrypt_data
)(void *handle
, uint8_t *ptr
, uint64_t len
);
116 bool kvm_kernel_irqchip
;
117 bool kvm_split_irqchip
;
118 bool kvm_async_interrupts_allowed
;
119 bool kvm_halt_in_kernel_allowed
;
120 bool kvm_eventfds_allowed
;
121 bool kvm_irqfds_allowed
;
122 bool kvm_resamplefds_allowed
;
123 bool kvm_msi_via_irqfd_allowed
;
124 bool kvm_gsi_routing_allowed
;
125 bool kvm_gsi_direct_mapping
;
127 bool kvm_readonly_mem_allowed
;
128 bool kvm_vm_attributes_allowed
;
129 bool kvm_direct_msi_allowed
;
130 bool kvm_ioeventfd_any_length_allowed
;
131 bool kvm_msi_use_devid
;
132 static bool kvm_immediate_exit
;
134 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
135 KVM_CAP_INFO(USER_MEMORY
),
136 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
137 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS
),
141 int kvm_get_max_memslots(void)
143 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
148 bool kvm_memcrypt_enabled(void)
150 if (kvm_state
&& kvm_state
->memcrypt_handle
) {
157 int kvm_memcrypt_encrypt_data(uint8_t *ptr
, uint64_t len
)
159 if (kvm_state
->memcrypt_handle
&&
160 kvm_state
->memcrypt_encrypt_data
) {
161 return kvm_state
->memcrypt_encrypt_data(kvm_state
->memcrypt_handle
,
168 static KVMSlot
*kvm_get_free_slot(KVMMemoryListener
*kml
)
170 KVMState
*s
= kvm_state
;
173 for (i
= 0; i
< s
->nr_slots
; i
++) {
174 if (kml
->slots
[i
].memory_size
== 0) {
175 return &kml
->slots
[i
];
182 bool kvm_has_free_slot(MachineState
*ms
)
184 KVMState
*s
= KVM_STATE(ms
->accelerator
);
186 return kvm_get_free_slot(&s
->memory_listener
);
189 static KVMSlot
*kvm_alloc_slot(KVMMemoryListener
*kml
)
191 KVMSlot
*slot
= kvm_get_free_slot(kml
);
197 fprintf(stderr
, "%s: no free slot available\n", __func__
);
201 static KVMSlot
*kvm_lookup_matching_slot(KVMMemoryListener
*kml
,
205 KVMState
*s
= kvm_state
;
208 for (i
= 0; i
< s
->nr_slots
; i
++) {
209 KVMSlot
*mem
= &kml
->slots
[i
];
211 if (start_addr
== mem
->start_addr
&& size
== mem
->memory_size
) {
220 * Calculate and align the start address and the size of the section.
221 * Return the size. If the size is 0, the aligned section is empty.
223 static hwaddr
kvm_align_section(MemoryRegionSection
*section
,
226 hwaddr size
= int128_get64(section
->size
);
227 hwaddr delta
, aligned
;
229 /* kvm works in page size chunks, but the function may be called
230 with sub-page size and unaligned start address. Pad the start
231 address to next and truncate size to previous page boundary. */
232 aligned
= ROUND_UP(section
->offset_within_address_space
,
233 qemu_real_host_page_size
);
234 delta
= aligned
- section
->offset_within_address_space
;
240 return (size
- delta
) & qemu_real_host_page_mask
;
243 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
246 KVMMemoryListener
*kml
= &s
->memory_listener
;
249 for (i
= 0; i
< s
->nr_slots
; i
++) {
250 KVMSlot
*mem
= &kml
->slots
[i
];
252 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
253 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
261 static int kvm_set_user_memory_region(KVMMemoryListener
*kml
, KVMSlot
*slot
, bool new)
263 KVMState
*s
= kvm_state
;
264 struct kvm_userspace_memory_region mem
;
267 mem
.slot
= slot
->slot
| (kml
->as_id
<< 16);
268 mem
.guest_phys_addr
= slot
->start_addr
;
269 mem
.userspace_addr
= (unsigned long)slot
->ram
;
270 mem
.flags
= slot
->flags
;
272 if (slot
->memory_size
&& !new && (mem
.flags
^ slot
->old_flags
) & KVM_MEM_READONLY
) {
273 /* Set the slot size to 0 before setting the slot to the desired
274 * value. This is needed based on KVM commit 75d61fbc. */
276 kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
278 mem
.memory_size
= slot
->memory_size
;
279 ret
= kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
280 slot
->old_flags
= mem
.flags
;
281 trace_kvm_set_user_memory(mem
.slot
, mem
.flags
, mem
.guest_phys_addr
,
282 mem
.memory_size
, mem
.userspace_addr
, ret
);
286 int kvm_destroy_vcpu(CPUState
*cpu
)
288 KVMState
*s
= kvm_state
;
290 struct KVMParkedVcpu
*vcpu
= NULL
;
293 DPRINTF("kvm_destroy_vcpu\n");
295 ret
= kvm_arch_destroy_vcpu(cpu
);
300 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
303 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
307 ret
= munmap(cpu
->kvm_run
, mmap_size
);
312 vcpu
= g_malloc0(sizeof(*vcpu
));
313 vcpu
->vcpu_id
= kvm_arch_vcpu_id(cpu
);
314 vcpu
->kvm_fd
= cpu
->kvm_fd
;
315 QLIST_INSERT_HEAD(&kvm_state
->kvm_parked_vcpus
, vcpu
, node
);
320 static int kvm_get_vcpu(KVMState
*s
, unsigned long vcpu_id
)
322 struct KVMParkedVcpu
*cpu
;
324 QLIST_FOREACH(cpu
, &s
->kvm_parked_vcpus
, node
) {
325 if (cpu
->vcpu_id
== vcpu_id
) {
328 QLIST_REMOVE(cpu
, node
);
329 kvm_fd
= cpu
->kvm_fd
;
335 return kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)vcpu_id
);
338 int kvm_init_vcpu(CPUState
*cpu
)
340 KVMState
*s
= kvm_state
;
344 DPRINTF("kvm_init_vcpu\n");
346 ret
= kvm_get_vcpu(s
, kvm_arch_vcpu_id(cpu
));
348 DPRINTF("kvm_create_vcpu failed\n");
354 cpu
->vcpu_dirty
= true;
356 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
359 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
363 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
365 if (cpu
->kvm_run
== MAP_FAILED
) {
367 DPRINTF("mmap'ing vcpu state failed\n");
371 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
372 s
->coalesced_mmio_ring
=
373 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
376 ret
= kvm_arch_init_vcpu(cpu
);
382 * dirty pages logging control
385 static int kvm_mem_flags(MemoryRegion
*mr
)
387 bool readonly
= mr
->readonly
|| memory_region_is_romd(mr
);
390 if (memory_region_get_dirty_log_mask(mr
) != 0) {
391 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
393 if (readonly
&& kvm_readonly_mem_allowed
) {
394 flags
|= KVM_MEM_READONLY
;
399 static int kvm_slot_update_flags(KVMMemoryListener
*kml
, KVMSlot
*mem
,
402 mem
->flags
= kvm_mem_flags(mr
);
404 /* If nothing changed effectively, no need to issue ioctl */
405 if (mem
->flags
== mem
->old_flags
) {
409 return kvm_set_user_memory_region(kml
, mem
, false);
412 static int kvm_section_update_flags(KVMMemoryListener
*kml
,
413 MemoryRegionSection
*section
)
415 hwaddr start_addr
, size
;
418 size
= kvm_align_section(section
, &start_addr
);
423 mem
= kvm_lookup_matching_slot(kml
, start_addr
, size
);
425 /* We don't have a slot if we want to trap every access. */
429 return kvm_slot_update_flags(kml
, mem
, section
->mr
);
432 static void kvm_log_start(MemoryListener
*listener
,
433 MemoryRegionSection
*section
,
436 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
443 r
= kvm_section_update_flags(kml
, section
);
449 static void kvm_log_stop(MemoryListener
*listener
,
450 MemoryRegionSection
*section
,
453 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
460 r
= kvm_section_update_flags(kml
, section
);
466 /* get kvm's dirty pages bitmap and update qemu's */
467 static int kvm_get_dirty_pages_log_range(MemoryRegionSection
*section
,
468 unsigned long *bitmap
)
470 ram_addr_t start
= section
->offset_within_region
+
471 memory_region_get_ram_addr(section
->mr
);
472 ram_addr_t pages
= int128_get64(section
->size
) / getpagesize();
474 cpu_physical_memory_set_dirty_lebitmap(bitmap
, start
, pages
);
478 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
481 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
482 * This function updates qemu's dirty bitmap using
483 * memory_region_set_dirty(). This means all bits are set
486 * @start_add: start of logged region.
487 * @end_addr: end of logged region.
489 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener
*kml
,
490 MemoryRegionSection
*section
)
492 KVMState
*s
= kvm_state
;
493 struct kvm_dirty_log d
= {};
495 hwaddr start_addr
, size
;
497 size
= kvm_align_section(section
, &start_addr
);
499 mem
= kvm_lookup_matching_slot(kml
, start_addr
, size
);
501 /* We don't have a slot if we want to trap every access. */
505 /* XXX bad kernel interface alert
506 * For dirty bitmap, kernel allocates array of size aligned to
507 * bits-per-long. But for case when the kernel is 64bits and
508 * the userspace is 32bits, userspace can't align to the same
509 * bits-per-long, since sizeof(long) is different between kernel
510 * and user space. This way, userspace will provide buffer which
511 * may be 4 bytes less than the kernel will use, resulting in
512 * userspace memory corruption (which is not detectable by valgrind
513 * too, in most cases).
514 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
515 * a hope that sizeof(long) won't become >8 any time soon.
517 size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
),
518 /*HOST_LONG_BITS*/ 64) / 8;
519 d
.dirty_bitmap
= g_malloc0(size
);
521 d
.slot
= mem
->slot
| (kml
->as_id
<< 16);
522 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
523 DPRINTF("ioctl failed %d\n", errno
);
524 g_free(d
.dirty_bitmap
);
528 kvm_get_dirty_pages_log_range(section
, d
.dirty_bitmap
);
529 g_free(d
.dirty_bitmap
);
535 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
536 MemoryRegionSection
*secion
,
537 hwaddr start
, hwaddr size
)
539 KVMState
*s
= kvm_state
;
541 if (s
->coalesced_mmio
) {
542 struct kvm_coalesced_mmio_zone zone
;
548 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
552 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
553 MemoryRegionSection
*secion
,
554 hwaddr start
, hwaddr size
)
556 KVMState
*s
= kvm_state
;
558 if (s
->coalesced_mmio
) {
559 struct kvm_coalesced_mmio_zone zone
;
565 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
569 static void kvm_coalesce_pio_add(MemoryListener
*listener
,
570 MemoryRegionSection
*section
,
571 hwaddr start
, hwaddr size
)
573 KVMState
*s
= kvm_state
;
575 if (s
->coalesced_pio
) {
576 struct kvm_coalesced_mmio_zone zone
;
582 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
586 static void kvm_coalesce_pio_del(MemoryListener
*listener
,
587 MemoryRegionSection
*section
,
588 hwaddr start
, hwaddr size
)
590 KVMState
*s
= kvm_state
;
592 if (s
->coalesced_pio
) {
593 struct kvm_coalesced_mmio_zone zone
;
599 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
603 static MemoryListener kvm_coalesced_pio_listener
= {
604 .coalesced_io_add
= kvm_coalesce_pio_add
,
605 .coalesced_io_del
= kvm_coalesce_pio_del
,
608 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
612 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
620 int kvm_vm_check_extension(KVMState
*s
, unsigned int extension
)
624 ret
= kvm_vm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
626 /* VM wide version not implemented, use global one instead */
627 ret
= kvm_check_extension(s
, extension
);
633 static uint32_t adjust_ioeventfd_endianness(uint32_t val
, uint32_t size
)
635 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
636 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
637 * endianness, but the memory core hands them in target endianness.
638 * For example, PPC is always treated as big-endian even if running
639 * on KVM and on PPC64LE. Correct here.
653 static int kvm_set_ioeventfd_mmio(int fd
, hwaddr addr
, uint32_t val
,
654 bool assign
, uint32_t size
, bool datamatch
)
657 struct kvm_ioeventfd iofd
= {
658 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
665 trace_kvm_set_ioeventfd_mmio(fd
, (uint64_t)addr
, val
, assign
, size
,
667 if (!kvm_enabled()) {
672 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
675 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
678 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
687 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
688 bool assign
, uint32_t size
, bool datamatch
)
690 struct kvm_ioeventfd kick
= {
691 .datamatch
= datamatch
? adjust_ioeventfd_endianness(val
, size
) : 0,
693 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
698 trace_kvm_set_ioeventfd_pio(fd
, addr
, val
, assign
, size
, datamatch
);
699 if (!kvm_enabled()) {
703 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
706 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
708 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
716 static int kvm_check_many_ioeventfds(void)
718 /* Userspace can use ioeventfd for io notification. This requires a host
719 * that supports eventfd(2) and an I/O thread; since eventfd does not
720 * support SIGIO it cannot interrupt the vcpu.
722 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
723 * can avoid creating too many ioeventfds.
725 #if defined(CONFIG_EVENTFD)
728 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
729 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
730 if (ioeventfds
[i
] < 0) {
733 ret
= kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, true, 2, true);
735 close(ioeventfds
[i
]);
740 /* Decide whether many devices are supported or not */
741 ret
= i
== ARRAY_SIZE(ioeventfds
);
744 kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, false, 2, true);
745 close(ioeventfds
[i
]);
753 static const KVMCapabilityInfo
*
754 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
757 if (!kvm_check_extension(s
, list
->value
)) {
765 static void kvm_set_phys_mem(KVMMemoryListener
*kml
,
766 MemoryRegionSection
*section
, bool add
)
770 MemoryRegion
*mr
= section
->mr
;
771 bool writeable
= !mr
->readonly
&& !mr
->rom_device
;
772 hwaddr start_addr
, size
;
775 if (!memory_region_is_ram(mr
)) {
776 if (writeable
|| !kvm_readonly_mem_allowed
) {
778 } else if (!mr
->romd_mode
) {
779 /* If the memory device is not in romd_mode, then we actually want
780 * to remove the kvm memory slot so all accesses will trap. */
785 size
= kvm_align_section(section
, &start_addr
);
790 /* use aligned delta to align the ram address */
791 ram
= memory_region_get_ram_ptr(mr
) + section
->offset_within_region
+
792 (start_addr
- section
->offset_within_address_space
);
795 mem
= kvm_lookup_matching_slot(kml
, start_addr
, size
);
799 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
800 kvm_physical_sync_dirty_bitmap(kml
, section
);
803 /* unregister the slot */
804 mem
->memory_size
= 0;
806 err
= kvm_set_user_memory_region(kml
, mem
, false);
808 fprintf(stderr
, "%s: error unregistering slot: %s\n",
809 __func__
, strerror(-err
));
815 /* register the new slot */
816 mem
= kvm_alloc_slot(kml
);
817 mem
->memory_size
= size
;
818 mem
->start_addr
= start_addr
;
820 mem
->flags
= kvm_mem_flags(mr
);
822 err
= kvm_set_user_memory_region(kml
, mem
, true);
824 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
830 static void kvm_region_add(MemoryListener
*listener
,
831 MemoryRegionSection
*section
)
833 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
835 memory_region_ref(section
->mr
);
836 kvm_set_phys_mem(kml
, section
, true);
839 static void kvm_region_del(MemoryListener
*listener
,
840 MemoryRegionSection
*section
)
842 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
844 kvm_set_phys_mem(kml
, section
, false);
845 memory_region_unref(section
->mr
);
848 static void kvm_log_sync(MemoryListener
*listener
,
849 MemoryRegionSection
*section
)
851 KVMMemoryListener
*kml
= container_of(listener
, KVMMemoryListener
, listener
);
854 r
= kvm_physical_sync_dirty_bitmap(kml
, section
);
860 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
861 MemoryRegionSection
*section
,
862 bool match_data
, uint64_t data
,
865 int fd
= event_notifier_get_fd(e
);
868 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
869 data
, true, int128_get64(section
->size
),
872 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
873 __func__
, strerror(-r
), -r
);
878 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
879 MemoryRegionSection
*section
,
880 bool match_data
, uint64_t data
,
883 int fd
= event_notifier_get_fd(e
);
886 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
887 data
, false, int128_get64(section
->size
),
890 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
891 __func__
, strerror(-r
), -r
);
896 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
897 MemoryRegionSection
*section
,
898 bool match_data
, uint64_t data
,
901 int fd
= event_notifier_get_fd(e
);
904 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
905 data
, true, int128_get64(section
->size
),
908 fprintf(stderr
, "%s: error adding ioeventfd: %s (%d)\n",
909 __func__
, strerror(-r
), -r
);
914 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
915 MemoryRegionSection
*section
,
916 bool match_data
, uint64_t data
,
920 int fd
= event_notifier_get_fd(e
);
923 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
924 data
, false, int128_get64(section
->size
),
927 fprintf(stderr
, "%s: error deleting ioeventfd: %s (%d)\n",
928 __func__
, strerror(-r
), -r
);
933 void kvm_memory_listener_register(KVMState
*s
, KVMMemoryListener
*kml
,
934 AddressSpace
*as
, int as_id
)
938 kml
->slots
= g_malloc0(s
->nr_slots
* sizeof(KVMSlot
));
941 for (i
= 0; i
< s
->nr_slots
; i
++) {
942 kml
->slots
[i
].slot
= i
;
945 kml
->listener
.region_add
= kvm_region_add
;
946 kml
->listener
.region_del
= kvm_region_del
;
947 kml
->listener
.log_start
= kvm_log_start
;
948 kml
->listener
.log_stop
= kvm_log_stop
;
949 kml
->listener
.log_sync
= kvm_log_sync
;
950 kml
->listener
.priority
= 10;
952 memory_listener_register(&kml
->listener
, as
);
955 static MemoryListener kvm_io_listener
= {
956 .eventfd_add
= kvm_io_ioeventfd_add
,
957 .eventfd_del
= kvm_io_ioeventfd_del
,
961 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
963 struct kvm_irq_level event
;
966 assert(kvm_async_interrupts_enabled());
970 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
972 perror("kvm_set_irq");
976 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
979 #ifdef KVM_CAP_IRQ_ROUTING
980 typedef struct KVMMSIRoute
{
981 struct kvm_irq_routing_entry kroute
;
982 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
985 static void set_gsi(KVMState
*s
, unsigned int gsi
)
987 set_bit(gsi
, s
->used_gsi_bitmap
);
990 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
992 clear_bit(gsi
, s
->used_gsi_bitmap
);
995 void kvm_init_irq_routing(KVMState
*s
)
999 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
) - 1;
1000 if (gsi_count
> 0) {
1001 /* Round up so we can search ints using ffs */
1002 s
->used_gsi_bitmap
= bitmap_new(gsi_count
);
1003 s
->gsi_count
= gsi_count
;
1006 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
1007 s
->nr_allocated_irq_routes
= 0;
1009 if (!kvm_direct_msi_allowed
) {
1010 for (i
= 0; i
< KVM_MSI_HASHTAB_SIZE
; i
++) {
1011 QTAILQ_INIT(&s
->msi_hashtab
[i
]);
1015 kvm_arch_init_irq_routing(s
);
1018 void kvm_irqchip_commit_routes(KVMState
*s
)
1022 if (kvm_gsi_direct_mapping()) {
1026 if (!kvm_gsi_routing_enabled()) {
1030 s
->irq_routes
->flags
= 0;
1031 trace_kvm_irqchip_commit_routes();
1032 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
1036 static void kvm_add_routing_entry(KVMState
*s
,
1037 struct kvm_irq_routing_entry
*entry
)
1039 struct kvm_irq_routing_entry
*new;
1042 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
1043 n
= s
->nr_allocated_irq_routes
* 2;
1047 size
= sizeof(struct kvm_irq_routing
);
1048 size
+= n
* sizeof(*new);
1049 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
1050 s
->nr_allocated_irq_routes
= n
;
1052 n
= s
->irq_routes
->nr
++;
1053 new = &s
->irq_routes
->entries
[n
];
1057 set_gsi(s
, entry
->gsi
);
1060 static int kvm_update_routing_entry(KVMState
*s
,
1061 struct kvm_irq_routing_entry
*new_entry
)
1063 struct kvm_irq_routing_entry
*entry
;
1066 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1067 entry
= &s
->irq_routes
->entries
[n
];
1068 if (entry
->gsi
!= new_entry
->gsi
) {
1072 if(!memcmp(entry
, new_entry
, sizeof *entry
)) {
1076 *entry
= *new_entry
;
1084 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1086 struct kvm_irq_routing_entry e
= {};
1088 assert(pin
< s
->gsi_count
);
1091 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1093 e
.u
.irqchip
.irqchip
= irqchip
;
1094 e
.u
.irqchip
.pin
= pin
;
1095 kvm_add_routing_entry(s
, &e
);
1098 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1100 struct kvm_irq_routing_entry
*e
;
1103 if (kvm_gsi_direct_mapping()) {
1107 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1108 e
= &s
->irq_routes
->entries
[i
];
1109 if (e
->gsi
== virq
) {
1110 s
->irq_routes
->nr
--;
1111 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1115 kvm_arch_release_virq_post(virq
);
1116 trace_kvm_irqchip_release_virq(virq
);
1119 static unsigned int kvm_hash_msi(uint32_t data
)
1121 /* This is optimized for IA32 MSI layout. However, no other arch shall
1122 * repeat the mistake of not providing a direct MSI injection API. */
1126 static void kvm_flush_dynamic_msi_routes(KVMState
*s
)
1128 KVMMSIRoute
*route
, *next
;
1131 for (hash
= 0; hash
< KVM_MSI_HASHTAB_SIZE
; hash
++) {
1132 QTAILQ_FOREACH_SAFE(route
, &s
->msi_hashtab
[hash
], entry
, next
) {
1133 kvm_irqchip_release_virq(s
, route
->kroute
.gsi
);
1134 QTAILQ_REMOVE(&s
->msi_hashtab
[hash
], route
, entry
);
1140 static int kvm_irqchip_get_virq(KVMState
*s
)
1145 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1146 * GSI numbers are more than the number of IRQ route. Allocating a GSI
1147 * number can succeed even though a new route entry cannot be added.
1148 * When this happens, flush dynamic MSI entries to free IRQ route entries.
1150 if (!kvm_direct_msi_allowed
&& s
->irq_routes
->nr
== s
->gsi_count
) {
1151 kvm_flush_dynamic_msi_routes(s
);
1154 /* Return the lowest unused GSI in the bitmap */
1155 next_virq
= find_first_zero_bit(s
->used_gsi_bitmap
, s
->gsi_count
);
1156 if (next_virq
>= s
->gsi_count
) {
1163 static KVMMSIRoute
*kvm_lookup_msi_route(KVMState
*s
, MSIMessage msg
)
1165 unsigned int hash
= kvm_hash_msi(msg
.data
);
1168 QTAILQ_FOREACH(route
, &s
->msi_hashtab
[hash
], entry
) {
1169 if (route
->kroute
.u
.msi
.address_lo
== (uint32_t)msg
.address
&&
1170 route
->kroute
.u
.msi
.address_hi
== (msg
.address
>> 32) &&
1171 route
->kroute
.u
.msi
.data
== le32_to_cpu(msg
.data
)) {
1178 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1183 if (kvm_direct_msi_allowed
) {
1184 msi
.address_lo
= (uint32_t)msg
.address
;
1185 msi
.address_hi
= msg
.address
>> 32;
1186 msi
.data
= le32_to_cpu(msg
.data
);
1188 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1190 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1193 route
= kvm_lookup_msi_route(s
, msg
);
1197 virq
= kvm_irqchip_get_virq(s
);
1202 route
= g_malloc0(sizeof(KVMMSIRoute
));
1203 route
->kroute
.gsi
= virq
;
1204 route
->kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1205 route
->kroute
.flags
= 0;
1206 route
->kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1207 route
->kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1208 route
->kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1210 kvm_add_routing_entry(s
, &route
->kroute
);
1211 kvm_irqchip_commit_routes(s
);
1213 QTAILQ_INSERT_TAIL(&s
->msi_hashtab
[kvm_hash_msi(msg
.data
)], route
,
1217 assert(route
->kroute
.type
== KVM_IRQ_ROUTING_MSI
);
1219 return kvm_set_irq(s
, route
->kroute
.gsi
, 1);
1222 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
1224 struct kvm_irq_routing_entry kroute
= {};
1226 MSIMessage msg
= {0, 0};
1228 if (pci_available
&& dev
) {
1229 msg
= pci_get_msi_message(dev
, vector
);
1232 if (kvm_gsi_direct_mapping()) {
1233 return kvm_arch_msi_data_to_gsi(msg
.data
);
1236 if (!kvm_gsi_routing_enabled()) {
1240 virq
= kvm_irqchip_get_virq(s
);
1246 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1248 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1249 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1250 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1251 if (pci_available
&& kvm_msi_devid_required()) {
1252 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1253 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1255 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1256 kvm_irqchip_release_virq(s
, virq
);
1260 trace_kvm_irqchip_add_msi_route(dev
? dev
->name
: (char *)"N/A",
1263 kvm_add_routing_entry(s
, &kroute
);
1264 kvm_arch_add_msi_route_post(&kroute
, vector
, dev
);
1265 kvm_irqchip_commit_routes(s
);
1270 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
,
1273 struct kvm_irq_routing_entry kroute
= {};
1275 if (kvm_gsi_direct_mapping()) {
1279 if (!kvm_irqchip_in_kernel()) {
1284 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1286 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1287 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1288 kroute
.u
.msi
.data
= le32_to_cpu(msg
.data
);
1289 if (pci_available
&& kvm_msi_devid_required()) {
1290 kroute
.flags
= KVM_MSI_VALID_DEVID
;
1291 kroute
.u
.msi
.devid
= pci_requester_id(dev
);
1293 if (kvm_arch_fixup_msi_route(&kroute
, msg
.address
, msg
.data
, dev
)) {
1297 trace_kvm_irqchip_update_msi_route(virq
);
1299 return kvm_update_routing_entry(s
, &kroute
);
1302 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int rfd
, int virq
,
1305 struct kvm_irqfd irqfd
= {
1308 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
1312 irqfd
.flags
|= KVM_IRQFD_FLAG_RESAMPLE
;
1313 irqfd
.resamplefd
= rfd
;
1316 if (!kvm_irqfds_enabled()) {
1320 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
1323 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1325 struct kvm_irq_routing_entry kroute
= {};
1328 if (!kvm_gsi_routing_enabled()) {
1332 virq
= kvm_irqchip_get_virq(s
);
1338 kroute
.type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
1340 kroute
.u
.adapter
.summary_addr
= adapter
->summary_addr
;
1341 kroute
.u
.adapter
.ind_addr
= adapter
->ind_addr
;
1342 kroute
.u
.adapter
.summary_offset
= adapter
->summary_offset
;
1343 kroute
.u
.adapter
.ind_offset
= adapter
->ind_offset
;
1344 kroute
.u
.adapter
.adapter_id
= adapter
->adapter_id
;
1346 kvm_add_routing_entry(s
, &kroute
);
1351 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
1353 struct kvm_irq_routing_entry kroute
= {};
1356 if (!kvm_gsi_routing_enabled()) {
1359 if (!kvm_check_extension(s
, KVM_CAP_HYPERV_SYNIC
)) {
1362 virq
= kvm_irqchip_get_virq(s
);
1368 kroute
.type
= KVM_IRQ_ROUTING_HV_SINT
;
1370 kroute
.u
.hv_sint
.vcpu
= vcpu
;
1371 kroute
.u
.hv_sint
.sint
= sint
;
1373 kvm_add_routing_entry(s
, &kroute
);
1374 kvm_irqchip_commit_routes(s
);
1379 #else /* !KVM_CAP_IRQ_ROUTING */
1381 void kvm_init_irq_routing(KVMState
*s
)
1385 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1389 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1394 int kvm_irqchip_add_msi_route(KVMState
*s
, int vector
, PCIDevice
*dev
)
1399 int kvm_irqchip_add_adapter_route(KVMState
*s
, AdapterInfo
*adapter
)
1404 int kvm_irqchip_add_hv_sint_route(KVMState
*s
, uint32_t vcpu
, uint32_t sint
)
1409 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int virq
, bool assign
)
1414 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1418 #endif /* !KVM_CAP_IRQ_ROUTING */
1420 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
1421 EventNotifier
*rn
, int virq
)
1423 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
),
1424 rn
? event_notifier_get_fd(rn
) : -1, virq
, true);
1427 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState
*s
, EventNotifier
*n
,
1430 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
), -1, virq
,
1434 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
1435 EventNotifier
*rn
, qemu_irq irq
)
1438 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
1443 return kvm_irqchip_add_irqfd_notifier_gsi(s
, n
, rn
, GPOINTER_TO_INT(gsi
));
1446 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
,
1450 gboolean found
= g_hash_table_lookup_extended(s
->gsimap
, irq
, &key
, &gsi
);
1455 return kvm_irqchip_remove_irqfd_notifier_gsi(s
, n
, GPOINTER_TO_INT(gsi
));
1458 void kvm_irqchip_set_qemuirq_gsi(KVMState
*s
, qemu_irq irq
, int gsi
)
1460 g_hash_table_insert(s
->gsimap
, irq
, GINT_TO_POINTER(gsi
));
1463 static void kvm_irqchip_create(MachineState
*machine
, KVMState
*s
)
1467 if (kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
1469 } else if (kvm_check_extension(s
, KVM_CAP_S390_IRQCHIP
)) {
1470 ret
= kvm_vm_enable_cap(s
, KVM_CAP_S390_IRQCHIP
, 0);
1472 fprintf(stderr
, "Enable kernel irqchip failed: %s\n", strerror(-ret
));
1479 /* First probe and see if there's a arch-specific hook to create the
1480 * in-kernel irqchip for us */
1481 ret
= kvm_arch_irqchip_create(machine
, s
);
1483 if (machine_kernel_irqchip_split(machine
)) {
1484 perror("Split IRQ chip mode not supported.");
1487 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
1491 fprintf(stderr
, "Create kernel irqchip failed: %s\n", strerror(-ret
));
1495 kvm_kernel_irqchip
= true;
1496 /* If we have an in-kernel IRQ chip then we must have asynchronous
1497 * interrupt delivery (though the reverse is not necessarily true)
1499 kvm_async_interrupts_allowed
= true;
1500 kvm_halt_in_kernel_allowed
= true;
1502 kvm_init_irq_routing(s
);
1504 s
->gsimap
= g_hash_table_new(g_direct_hash
, g_direct_equal
);
1507 /* Find number of supported CPUs using the recommended
1508 * procedure from the kernel API documentation to cope with
1509 * older kernels that may be missing capabilities.
1511 static int kvm_recommended_vcpus(KVMState
*s
)
1513 int ret
= kvm_vm_check_extension(s
, KVM_CAP_NR_VCPUS
);
1514 return (ret
) ? ret
: 4;
1517 static int kvm_max_vcpus(KVMState
*s
)
1519 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
1520 return (ret
) ? ret
: kvm_recommended_vcpus(s
);
1523 static int kvm_max_vcpu_id(KVMState
*s
)
1525 int ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPU_ID
);
1526 return (ret
) ? ret
: kvm_max_vcpus(s
);
1529 bool kvm_vcpu_id_is_valid(int vcpu_id
)
1531 KVMState
*s
= KVM_STATE(current_machine
->accelerator
);
1532 return vcpu_id
>= 0 && vcpu_id
< kvm_max_vcpu_id(s
);
1535 static int kvm_init(MachineState
*ms
)
1537 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
1538 static const char upgrade_note
[] =
1539 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1540 "(see http://sourceforge.net/projects/kvm).\n";
1545 { "SMP", ms
->smp
.cpus
},
1546 { "hotpluggable", ms
->smp
.max_cpus
},
1549 int soft_vcpus_limit
, hard_vcpus_limit
;
1551 const KVMCapabilityInfo
*missing_cap
;
1554 const char *kvm_type
;
1556 s
= KVM_STATE(ms
->accelerator
);
1559 * On systems where the kernel can support different base page
1560 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1561 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1562 * page size for the system though.
1564 assert(TARGET_PAGE_SIZE
<= getpagesize());
1568 #ifdef KVM_CAP_SET_GUEST_DEBUG
1569 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
1571 QLIST_INIT(&s
->kvm_parked_vcpus
);
1573 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
1575 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
1580 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
1581 if (ret
< KVM_API_VERSION
) {
1585 fprintf(stderr
, "kvm version too old\n");
1589 if (ret
> KVM_API_VERSION
) {
1591 fprintf(stderr
, "kvm version not supported\n");
1595 kvm_immediate_exit
= kvm_check_extension(s
, KVM_CAP_IMMEDIATE_EXIT
);
1596 s
->nr_slots
= kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
1598 /* If unspecified, use the default value */
1603 kvm_type
= qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1605 type
= mc
->kvm_type(ms
, kvm_type
);
1606 } else if (kvm_type
) {
1608 fprintf(stderr
, "Invalid argument kvm-type=%s\n", kvm_type
);
1613 ret
= kvm_ioctl(s
, KVM_CREATE_VM
, type
);
1614 } while (ret
== -EINTR
);
1617 fprintf(stderr
, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret
,
1621 if (ret
== -EINVAL
) {
1623 "Host kernel setup problem detected. Please verify:\n");
1624 fprintf(stderr
, "- for kernels supporting the switch_amode or"
1625 " user_mode parameters, whether\n");
1627 " user space is running in primary address space\n");
1629 "- for kernels supporting the vm.allocate_pgste sysctl, "
1630 "whether it is enabled\n");
1638 /* check the vcpu limits */
1639 soft_vcpus_limit
= kvm_recommended_vcpus(s
);
1640 hard_vcpus_limit
= kvm_max_vcpus(s
);
1643 if (nc
->num
> soft_vcpus_limit
) {
1644 warn_report("Number of %s cpus requested (%d) exceeds "
1645 "the recommended cpus supported by KVM (%d)",
1646 nc
->name
, nc
->num
, soft_vcpus_limit
);
1648 if (nc
->num
> hard_vcpus_limit
) {
1649 fprintf(stderr
, "Number of %s cpus requested (%d) exceeds "
1650 "the maximum cpus supported by KVM (%d)\n",
1651 nc
->name
, nc
->num
, hard_vcpus_limit
);
1658 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
1661 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
1665 fprintf(stderr
, "kvm does not support %s\n%s",
1666 missing_cap
->name
, upgrade_note
);
1670 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
1671 s
->coalesced_pio
= s
->coalesced_mmio
&&
1672 kvm_check_extension(s
, KVM_CAP_COALESCED_PIO
);
1674 #ifdef KVM_CAP_VCPU_EVENTS
1675 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
1678 s
->robust_singlestep
=
1679 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
1681 #ifdef KVM_CAP_DEBUGREGS
1682 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
1685 s
->max_nested_state_len
= kvm_check_extension(s
, KVM_CAP_NESTED_STATE
);
1687 #ifdef KVM_CAP_IRQ_ROUTING
1688 kvm_direct_msi_allowed
= (kvm_check_extension(s
, KVM_CAP_SIGNAL_MSI
) > 0);
1691 s
->intx_set_mask
= kvm_check_extension(s
, KVM_CAP_PCI_2_3
);
1693 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
1694 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
1695 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
1698 kvm_readonly_mem_allowed
=
1699 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
1701 kvm_eventfds_allowed
=
1702 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD
) > 0);
1704 kvm_irqfds_allowed
=
1705 (kvm_check_extension(s
, KVM_CAP_IRQFD
) > 0);
1707 kvm_resamplefds_allowed
=
1708 (kvm_check_extension(s
, KVM_CAP_IRQFD_RESAMPLE
) > 0);
1710 kvm_vm_attributes_allowed
=
1711 (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
) > 0);
1713 kvm_ioeventfd_any_length_allowed
=
1714 (kvm_check_extension(s
, KVM_CAP_IOEVENTFD_ANY_LENGTH
) > 0);
1719 * if memory encryption object is specified then initialize the memory
1720 * encryption context.
1722 if (ms
->memory_encryption
) {
1723 kvm_state
->memcrypt_handle
= sev_guest_init(ms
->memory_encryption
);
1724 if (!kvm_state
->memcrypt_handle
) {
1729 kvm_state
->memcrypt_encrypt_data
= sev_encrypt_data
;
1732 ret
= kvm_arch_init(ms
, s
);
1737 if (machine_kernel_irqchip_allowed(ms
)) {
1738 kvm_irqchip_create(ms
, s
);
1741 if (kvm_eventfds_allowed
) {
1742 s
->memory_listener
.listener
.eventfd_add
= kvm_mem_ioeventfd_add
;
1743 s
->memory_listener
.listener
.eventfd_del
= kvm_mem_ioeventfd_del
;
1745 s
->memory_listener
.listener
.coalesced_io_add
= kvm_coalesce_mmio_region
;
1746 s
->memory_listener
.listener
.coalesced_io_del
= kvm_uncoalesce_mmio_region
;
1748 kvm_memory_listener_register(s
, &s
->memory_listener
,
1749 &address_space_memory
, 0);
1750 memory_listener_register(&kvm_io_listener
,
1752 memory_listener_register(&kvm_coalesced_pio_listener
,
1755 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
1757 s
->sync_mmu
= !!kvm_vm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
1759 qemu_balloon_inhibit(true);
1772 g_free(s
->memory_listener
.slots
);
1777 void kvm_set_sigmask_len(KVMState
*s
, unsigned int sigmask_len
)
1779 s
->sigmask_len
= sigmask_len
;
1782 static void kvm_handle_io(uint16_t port
, MemTxAttrs attrs
, void *data
, int direction
,
1783 int size
, uint32_t count
)
1786 uint8_t *ptr
= data
;
1788 for (i
= 0; i
< count
; i
++) {
1789 address_space_rw(&address_space_io
, port
, attrs
,
1791 direction
== KVM_EXIT_IO_OUT
);
1796 static int kvm_handle_internal_error(CPUState
*cpu
, struct kvm_run
*run
)
1798 fprintf(stderr
, "KVM internal error. Suberror: %d\n",
1799 run
->internal
.suberror
);
1801 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
1804 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
1805 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
1806 i
, (uint64_t)run
->internal
.data
[i
]);
1809 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
1810 fprintf(stderr
, "emulation failure\n");
1811 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
1812 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
1813 return EXCP_INTERRUPT
;
1816 /* FIXME: Should trigger a qmp message to let management know
1817 * something went wrong.
1822 void kvm_flush_coalesced_mmio_buffer(void)
1824 KVMState
*s
= kvm_state
;
1826 if (s
->coalesced_flush_in_progress
) {
1830 s
->coalesced_flush_in_progress
= true;
1832 if (s
->coalesced_mmio_ring
) {
1833 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
1834 while (ring
->first
!= ring
->last
) {
1835 struct kvm_coalesced_mmio
*ent
;
1837 ent
= &ring
->coalesced_mmio
[ring
->first
];
1839 if (ent
->pio
== 1) {
1840 address_space_rw(&address_space_io
, ent
->phys_addr
,
1841 MEMTXATTRS_UNSPECIFIED
, ent
->data
,
1844 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
1847 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
1851 s
->coalesced_flush_in_progress
= false;
1854 static void do_kvm_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
1856 if (!cpu
->vcpu_dirty
) {
1857 kvm_arch_get_registers(cpu
);
1858 cpu
->vcpu_dirty
= true;
1862 void kvm_cpu_synchronize_state(CPUState
*cpu
)
1864 if (!cpu
->vcpu_dirty
) {
1865 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
1869 static void do_kvm_cpu_synchronize_post_reset(CPUState
*cpu
, run_on_cpu_data arg
)
1871 kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
1872 cpu
->vcpu_dirty
= false;
1875 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
1877 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
1880 static void do_kvm_cpu_synchronize_post_init(CPUState
*cpu
, run_on_cpu_data arg
)
1882 kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
1883 cpu
->vcpu_dirty
= false;
1886 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
1888 run_on_cpu(cpu
, do_kvm_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
1891 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
, run_on_cpu_data arg
)
1893 cpu
->vcpu_dirty
= true;
1896 void kvm_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
1898 run_on_cpu(cpu
, do_kvm_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
1901 #ifdef KVM_HAVE_MCE_INJECTION
1902 static __thread
void *pending_sigbus_addr
;
1903 static __thread
int pending_sigbus_code
;
1904 static __thread
bool have_sigbus_pending
;
1907 static void kvm_cpu_kick(CPUState
*cpu
)
1909 atomic_set(&cpu
->kvm_run
->immediate_exit
, 1);
1912 static void kvm_cpu_kick_self(void)
1914 if (kvm_immediate_exit
) {
1915 kvm_cpu_kick(current_cpu
);
1917 qemu_cpu_kick_self();
1921 static void kvm_eat_signals(CPUState
*cpu
)
1923 struct timespec ts
= { 0, 0 };
1929 if (kvm_immediate_exit
) {
1930 atomic_set(&cpu
->kvm_run
->immediate_exit
, 0);
1931 /* Write kvm_run->immediate_exit before the cpu->exit_request
1932 * write in kvm_cpu_exec.
1938 sigemptyset(&waitset
);
1939 sigaddset(&waitset
, SIG_IPI
);
1942 r
= sigtimedwait(&waitset
, &siginfo
, &ts
);
1943 if (r
== -1 && !(errno
== EAGAIN
|| errno
== EINTR
)) {
1944 perror("sigtimedwait");
1948 r
= sigpending(&chkset
);
1950 perror("sigpending");
1953 } while (sigismember(&chkset
, SIG_IPI
));
1956 int kvm_cpu_exec(CPUState
*cpu
)
1958 struct kvm_run
*run
= cpu
->kvm_run
;
1961 DPRINTF("kvm_cpu_exec()\n");
1963 if (kvm_arch_process_async_events(cpu
)) {
1964 atomic_set(&cpu
->exit_request
, 0);
1968 qemu_mutex_unlock_iothread();
1969 cpu_exec_start(cpu
);
1974 if (cpu
->vcpu_dirty
) {
1975 kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
1976 cpu
->vcpu_dirty
= false;
1979 kvm_arch_pre_run(cpu
, run
);
1980 if (atomic_read(&cpu
->exit_request
)) {
1981 DPRINTF("interrupt exit requested\n");
1983 * KVM requires us to reenter the kernel after IO exits to complete
1984 * instruction emulation. This self-signal will ensure that we
1987 kvm_cpu_kick_self();
1990 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
1991 * Matching barrier in kvm_eat_signals.
1995 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
1997 attrs
= kvm_arch_post_run(cpu
, run
);
1999 #ifdef KVM_HAVE_MCE_INJECTION
2000 if (unlikely(have_sigbus_pending
)) {
2001 qemu_mutex_lock_iothread();
2002 kvm_arch_on_sigbus_vcpu(cpu
, pending_sigbus_code
,
2003 pending_sigbus_addr
);
2004 have_sigbus_pending
= false;
2005 qemu_mutex_unlock_iothread();
2010 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
2011 DPRINTF("io window exit\n");
2012 kvm_eat_signals(cpu
);
2013 ret
= EXCP_INTERRUPT
;
2016 fprintf(stderr
, "error: kvm run failed %s\n",
2017 strerror(-run_ret
));
2019 if (run_ret
== -EBUSY
) {
2021 "This is probably because your SMT is enabled.\n"
2022 "VCPU can only run on primary threads with all "
2023 "secondary threads offline.\n");
2030 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
2031 switch (run
->exit_reason
) {
2033 DPRINTF("handle_io\n");
2034 /* Called outside BQL */
2035 kvm_handle_io(run
->io
.port
, attrs
,
2036 (uint8_t *)run
+ run
->io
.data_offset
,
2043 DPRINTF("handle_mmio\n");
2044 /* Called outside BQL */
2045 address_space_rw(&address_space_memory
,
2046 run
->mmio
.phys_addr
, attrs
,
2049 run
->mmio
.is_write
);
2052 case KVM_EXIT_IRQ_WINDOW_OPEN
:
2053 DPRINTF("irq_window_open\n");
2054 ret
= EXCP_INTERRUPT
;
2056 case KVM_EXIT_SHUTDOWN
:
2057 DPRINTF("shutdown\n");
2058 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2059 ret
= EXCP_INTERRUPT
;
2061 case KVM_EXIT_UNKNOWN
:
2062 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
2063 (uint64_t)run
->hw
.hardware_exit_reason
);
2066 case KVM_EXIT_INTERNAL_ERROR
:
2067 ret
= kvm_handle_internal_error(cpu
, run
);
2069 case KVM_EXIT_SYSTEM_EVENT
:
2070 switch (run
->system_event
.type
) {
2071 case KVM_SYSTEM_EVENT_SHUTDOWN
:
2072 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN
);
2073 ret
= EXCP_INTERRUPT
;
2075 case KVM_SYSTEM_EVENT_RESET
:
2076 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
2077 ret
= EXCP_INTERRUPT
;
2079 case KVM_SYSTEM_EVENT_CRASH
:
2080 kvm_cpu_synchronize_state(cpu
);
2081 qemu_mutex_lock_iothread();
2082 qemu_system_guest_panicked(cpu_get_crash_info(cpu
));
2083 qemu_mutex_unlock_iothread();
2087 DPRINTF("kvm_arch_handle_exit\n");
2088 ret
= kvm_arch_handle_exit(cpu
, run
);
2093 DPRINTF("kvm_arch_handle_exit\n");
2094 ret
= kvm_arch_handle_exit(cpu
, run
);
2100 qemu_mutex_lock_iothread();
2103 cpu_dump_state(cpu
, stderr
, CPU_DUMP_CODE
);
2104 vm_stop(RUN_STATE_INTERNAL_ERROR
);
2107 atomic_set(&cpu
->exit_request
, 0);
2111 int kvm_ioctl(KVMState
*s
, int type
, ...)
2118 arg
= va_arg(ap
, void *);
2121 trace_kvm_ioctl(type
, arg
);
2122 ret
= ioctl(s
->fd
, type
, arg
);
2129 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
2136 arg
= va_arg(ap
, void *);
2139 trace_kvm_vm_ioctl(type
, arg
);
2140 ret
= ioctl(s
->vmfd
, type
, arg
);
2147 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
2154 arg
= va_arg(ap
, void *);
2157 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
2158 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
2165 int kvm_device_ioctl(int fd
, int type
, ...)
2172 arg
= va_arg(ap
, void *);
2175 trace_kvm_device_ioctl(fd
, type
, arg
);
2176 ret
= ioctl(fd
, type
, arg
);
2183 int kvm_vm_check_attr(KVMState
*s
, uint32_t group
, uint64_t attr
)
2186 struct kvm_device_attr attribute
= {
2191 if (!kvm_vm_attributes_allowed
) {
2195 ret
= kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attribute
);
2196 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2200 int kvm_device_check_attr(int dev_fd
, uint32_t group
, uint64_t attr
)
2202 struct kvm_device_attr attribute
= {
2208 return kvm_device_ioctl(dev_fd
, KVM_HAS_DEVICE_ATTR
, &attribute
) ? 0 : 1;
2211 int kvm_device_access(int fd
, int group
, uint64_t attr
,
2212 void *val
, bool write
, Error
**errp
)
2214 struct kvm_device_attr kvmattr
;
2218 kvmattr
.group
= group
;
2219 kvmattr
.attr
= attr
;
2220 kvmattr
.addr
= (uintptr_t)val
;
2222 err
= kvm_device_ioctl(fd
,
2223 write
? KVM_SET_DEVICE_ATTR
: KVM_GET_DEVICE_ATTR
,
2226 error_setg_errno(errp
, -err
,
2227 "KVM_%s_DEVICE_ATTR failed: Group %d "
2228 "attr 0x%016" PRIx64
,
2229 write
? "SET" : "GET", group
, attr
);
2234 bool kvm_has_sync_mmu(void)
2236 return kvm_state
->sync_mmu
;
2239 int kvm_has_vcpu_events(void)
2241 return kvm_state
->vcpu_events
;
2244 int kvm_has_robust_singlestep(void)
2246 return kvm_state
->robust_singlestep
;
2249 int kvm_has_debugregs(void)
2251 return kvm_state
->debugregs
;
2254 int kvm_max_nested_state_length(void)
2256 return kvm_state
->max_nested_state_len
;
2259 int kvm_has_many_ioeventfds(void)
2261 if (!kvm_enabled()) {
2264 return kvm_state
->many_ioeventfds
;
2267 int kvm_has_gsi_routing(void)
2269 #ifdef KVM_CAP_IRQ_ROUTING
2270 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
2276 int kvm_has_intx_set_mask(void)
2278 return kvm_state
->intx_set_mask
;
2281 bool kvm_arm_supports_user_irq(void)
2283 return kvm_check_extension(kvm_state
, KVM_CAP_ARM_USER_IRQ
);
2286 #ifdef KVM_CAP_SET_GUEST_DEBUG
2287 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
,
2290 struct kvm_sw_breakpoint
*bp
;
2292 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
2300 int kvm_sw_breakpoints_active(CPUState
*cpu
)
2302 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
2305 struct kvm_set_guest_debug_data
{
2306 struct kvm_guest_debug dbg
;
2310 static void kvm_invoke_set_guest_debug(CPUState
*cpu
, run_on_cpu_data data
)
2312 struct kvm_set_guest_debug_data
*dbg_data
=
2313 (struct kvm_set_guest_debug_data
*) data
.host_ptr
;
2315 dbg_data
->err
= kvm_vcpu_ioctl(cpu
, KVM_SET_GUEST_DEBUG
,
2319 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2321 struct kvm_set_guest_debug_data data
;
2323 data
.dbg
.control
= reinject_trap
;
2325 if (cpu
->singlestep_enabled
) {
2326 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
2328 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
2330 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
,
2331 RUN_ON_CPU_HOST_PTR(&data
));
2335 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2336 target_ulong len
, int type
)
2338 struct kvm_sw_breakpoint
*bp
;
2341 if (type
== GDB_BREAKPOINT_SW
) {
2342 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2348 bp
= g_malloc(sizeof(struct kvm_sw_breakpoint
));
2351 err
= kvm_arch_insert_sw_breakpoint(cpu
, bp
);
2357 QTAILQ_INSERT_HEAD(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2359 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
2366 err
= kvm_update_guest_debug(cpu
, 0);
2374 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2375 target_ulong len
, int type
)
2377 struct kvm_sw_breakpoint
*bp
;
2380 if (type
== GDB_BREAKPOINT_SW
) {
2381 bp
= kvm_find_sw_breakpoint(cpu
, addr
);
2386 if (bp
->use_count
> 1) {
2391 err
= kvm_arch_remove_sw_breakpoint(cpu
, bp
);
2396 QTAILQ_REMOVE(&cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
2399 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
2406 err
= kvm_update_guest_debug(cpu
, 0);
2414 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2416 struct kvm_sw_breakpoint
*bp
, *next
;
2417 KVMState
*s
= cpu
->kvm_state
;
2420 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
2421 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) != 0) {
2422 /* Try harder to find a CPU that currently sees the breakpoint. */
2423 CPU_FOREACH(tmpcpu
) {
2424 if (kvm_arch_remove_sw_breakpoint(tmpcpu
, bp
) == 0) {
2429 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
2432 kvm_arch_remove_all_hw_breakpoints();
2435 kvm_update_guest_debug(cpu
, 0);
2439 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2441 int kvm_update_guest_debug(CPUState
*cpu
, unsigned long reinject_trap
)
2446 int kvm_insert_breakpoint(CPUState
*cpu
, target_ulong addr
,
2447 target_ulong len
, int type
)
2452 int kvm_remove_breakpoint(CPUState
*cpu
, target_ulong addr
,
2453 target_ulong len
, int type
)
2458 void kvm_remove_all_breakpoints(CPUState
*cpu
)
2461 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2463 static int kvm_set_signal_mask(CPUState
*cpu
, const sigset_t
*sigset
)
2465 KVMState
*s
= kvm_state
;
2466 struct kvm_signal_mask
*sigmask
;
2469 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
2471 sigmask
->len
= s
->sigmask_len
;
2472 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
2473 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
2479 static void kvm_ipi_signal(int sig
)
2482 assert(kvm_immediate_exit
);
2483 kvm_cpu_kick(current_cpu
);
2487 void kvm_init_cpu_signals(CPUState
*cpu
)
2491 struct sigaction sigact
;
2493 memset(&sigact
, 0, sizeof(sigact
));
2494 sigact
.sa_handler
= kvm_ipi_signal
;
2495 sigaction(SIG_IPI
, &sigact
, NULL
);
2497 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
2498 #if defined KVM_HAVE_MCE_INJECTION
2499 sigdelset(&set
, SIGBUS
);
2500 pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
2502 sigdelset(&set
, SIG_IPI
);
2503 if (kvm_immediate_exit
) {
2504 r
= pthread_sigmask(SIG_SETMASK
, &set
, NULL
);
2506 r
= kvm_set_signal_mask(cpu
, &set
);
2509 fprintf(stderr
, "kvm_set_signal_mask: %s\n", strerror(-r
));
2514 /* Called asynchronously in VCPU thread. */
2515 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
2517 #ifdef KVM_HAVE_MCE_INJECTION
2518 if (have_sigbus_pending
) {
2521 have_sigbus_pending
= true;
2522 pending_sigbus_addr
= addr
;
2523 pending_sigbus_code
= code
;
2524 atomic_set(&cpu
->exit_request
, 1);
2531 /* Called synchronously (via signalfd) in main thread. */
2532 int kvm_on_sigbus(int code
, void *addr
)
2534 #ifdef KVM_HAVE_MCE_INJECTION
2535 /* Action required MCE kills the process if SIGBUS is blocked. Because
2536 * that's what happens in the I/O thread, where we handle MCE via signalfd,
2537 * we can only get action optional here.
2539 assert(code
!= BUS_MCEERR_AR
);
2540 kvm_arch_on_sigbus_vcpu(first_cpu
, code
, addr
);
2547 int kvm_create_device(KVMState
*s
, uint64_t type
, bool test
)
2550 struct kvm_create_device create_dev
;
2552 create_dev
.type
= type
;
2554 create_dev
.flags
= test
? KVM_CREATE_DEVICE_TEST
: 0;
2556 if (!kvm_check_extension(s
, KVM_CAP_DEVICE_CTRL
)) {
2560 ret
= kvm_vm_ioctl(s
, KVM_CREATE_DEVICE
, &create_dev
);
2565 return test
? 0 : create_dev
.fd
;
2568 bool kvm_device_supported(int vmfd
, uint64_t type
)
2570 struct kvm_create_device create_dev
= {
2573 .flags
= KVM_CREATE_DEVICE_TEST
,
2576 if (ioctl(vmfd
, KVM_CHECK_EXTENSION
, KVM_CAP_DEVICE_CTRL
) <= 0) {
2580 return (ioctl(vmfd
, KVM_CREATE_DEVICE
, &create_dev
) >= 0);
2583 int kvm_set_one_reg(CPUState
*cs
, uint64_t id
, void *source
)
2585 struct kvm_one_reg reg
;
2589 reg
.addr
= (uintptr_t) source
;
2590 r
= kvm_vcpu_ioctl(cs
, KVM_SET_ONE_REG
, ®
);
2592 trace_kvm_failed_reg_set(id
, strerror(-r
));
2597 int kvm_get_one_reg(CPUState
*cs
, uint64_t id
, void *target
)
2599 struct kvm_one_reg reg
;
2603 reg
.addr
= (uintptr_t) target
;
2604 r
= kvm_vcpu_ioctl(cs
, KVM_GET_ONE_REG
, ®
);
2606 trace_kvm_failed_reg_get(id
, strerror(-r
));
2611 static void kvm_accel_class_init(ObjectClass
*oc
, void *data
)
2613 AccelClass
*ac
= ACCEL_CLASS(oc
);
2615 ac
->init_machine
= kvm_init
;
2616 ac
->allowed
= &kvm_allowed
;
2619 static const TypeInfo kvm_accel_type
= {
2620 .name
= TYPE_KVM_ACCEL
,
2621 .parent
= TYPE_ACCEL
,
2622 .class_init
= kvm_accel_class_init
,
2623 .instance_size
= sizeof(KVMState
),
2626 static void kvm_type_init(void)
2628 type_register_static(&kvm_accel_type
);
2631 type_init(kvm_type_init
);