4 * Copyright IBM, Corp. 2008
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu/atomic.h"
25 #include "qemu/option.h"
26 #include "qemu/config-file.h"
27 #include "sysemu/sysemu.h"
29 #include "hw/pci/msi.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm.h"
32 #include "qemu/bswap.h"
33 #include "exec/memory.h"
34 #include "exec/address-spaces.h"
35 #include "qemu/event_notifier.h"
38 /* This check must be after config-host.h is included */
40 #include <sys/eventfd.h>
43 #ifdef CONFIG_VALGRIND_H
44 #include <valgrind/memcheck.h>
47 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
48 #define PAGE_SIZE TARGET_PAGE_SIZE
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56 #define DPRINTF(fmt, ...) \
60 #define KVM_MSI_HASHTAB_SIZE 256
62 typedef struct KVMSlot
65 ram_addr_t memory_size
;
71 typedef struct kvm_dirty_log KVMDirtyLog
;
79 struct kvm_coalesced_mmio_ring
*coalesced_mmio_ring
;
80 bool coalesced_flush_in_progress
;
81 int broken_set_mem_region
;
84 int robust_singlestep
;
86 #ifdef KVM_CAP_SET_GUEST_DEBUG
87 struct kvm_sw_breakpoint_head kvm_sw_breakpoints
;
93 /* The man page (and posix) say ioctl numbers are signed int, but
94 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
95 * unsigned, and treating them as signed here can break things */
96 unsigned irq_set_ioctl
;
97 #ifdef KVM_CAP_IRQ_ROUTING
98 struct kvm_irq_routing
*irq_routes
;
99 int nr_allocated_irq_routes
;
100 uint32_t *used_gsi_bitmap
;
101 unsigned int gsi_count
;
102 QTAILQ_HEAD(msi_hashtab
, KVMMSIRoute
) msi_hashtab
[KVM_MSI_HASHTAB_SIZE
];
108 bool kvm_kernel_irqchip
;
109 bool kvm_async_interrupts_allowed
;
110 bool kvm_irqfds_allowed
;
111 bool kvm_msi_via_irqfd_allowed
;
112 bool kvm_gsi_routing_allowed
;
114 bool kvm_readonly_mem_allowed
;
116 static const KVMCapabilityInfo kvm_required_capabilites
[] = {
117 KVM_CAP_INFO(USER_MEMORY
),
118 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS
),
122 static KVMSlot
*kvm_alloc_slot(KVMState
*s
)
126 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
127 if (s
->slots
[i
].memory_size
== 0) {
132 fprintf(stderr
, "%s: no free slot available\n", __func__
);
136 static KVMSlot
*kvm_lookup_matching_slot(KVMState
*s
,
142 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
143 KVMSlot
*mem
= &s
->slots
[i
];
145 if (start_addr
== mem
->start_addr
&&
146 end_addr
== mem
->start_addr
+ mem
->memory_size
) {
155 * Find overlapping slot with lowest start address
157 static KVMSlot
*kvm_lookup_overlapping_slot(KVMState
*s
,
161 KVMSlot
*found
= NULL
;
164 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
165 KVMSlot
*mem
= &s
->slots
[i
];
167 if (mem
->memory_size
== 0 ||
168 (found
&& found
->start_addr
< mem
->start_addr
)) {
172 if (end_addr
> mem
->start_addr
&&
173 start_addr
< mem
->start_addr
+ mem
->memory_size
) {
181 int kvm_physical_memory_addr_from_host(KVMState
*s
, void *ram
,
186 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
187 KVMSlot
*mem
= &s
->slots
[i
];
189 if (ram
>= mem
->ram
&& ram
< mem
->ram
+ mem
->memory_size
) {
190 *phys_addr
= mem
->start_addr
+ (ram
- mem
->ram
);
198 static int kvm_set_user_memory_region(KVMState
*s
, KVMSlot
*slot
)
200 struct kvm_userspace_memory_region mem
;
202 mem
.slot
= slot
->slot
;
203 mem
.guest_phys_addr
= slot
->start_addr
;
204 mem
.memory_size
= slot
->memory_size
;
205 mem
.userspace_addr
= (unsigned long)slot
->ram
;
206 mem
.flags
= slot
->flags
;
207 if (s
->migration_log
) {
208 mem
.flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
210 return kvm_vm_ioctl(s
, KVM_SET_USER_MEMORY_REGION
, &mem
);
213 static void kvm_reset_vcpu(void *opaque
)
215 CPUState
*cpu
= opaque
;
217 kvm_arch_reset_vcpu(cpu
);
220 int kvm_init_vcpu(CPUState
*cpu
)
222 KVMState
*s
= kvm_state
;
226 DPRINTF("kvm_init_vcpu\n");
228 ret
= kvm_vm_ioctl(s
, KVM_CREATE_VCPU
, (void *)kvm_arch_vcpu_id(cpu
));
230 DPRINTF("kvm_create_vcpu failed\n");
236 cpu
->kvm_vcpu_dirty
= true;
238 mmap_size
= kvm_ioctl(s
, KVM_GET_VCPU_MMAP_SIZE
, 0);
241 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
245 cpu
->kvm_run
= mmap(NULL
, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
247 if (cpu
->kvm_run
== MAP_FAILED
) {
249 DPRINTF("mmap'ing vcpu state failed\n");
253 if (s
->coalesced_mmio
&& !s
->coalesced_mmio_ring
) {
254 s
->coalesced_mmio_ring
=
255 (void *)cpu
->kvm_run
+ s
->coalesced_mmio
* PAGE_SIZE
;
258 ret
= kvm_arch_init_vcpu(cpu
);
260 qemu_register_reset(kvm_reset_vcpu
, cpu
);
261 kvm_arch_reset_vcpu(cpu
);
268 * dirty pages logging control
271 static int kvm_mem_flags(KVMState
*s
, bool log_dirty
)
273 return log_dirty
? KVM_MEM_LOG_DIRTY_PAGES
: 0;
276 static int kvm_slot_dirty_pages_log_change(KVMSlot
*mem
, bool log_dirty
)
278 KVMState
*s
= kvm_state
;
279 int flags
, mask
= KVM_MEM_LOG_DIRTY_PAGES
;
282 old_flags
= mem
->flags
;
284 flags
= (mem
->flags
& ~mask
) | kvm_mem_flags(s
, log_dirty
);
287 /* If nothing changed effectively, no need to issue ioctl */
288 if (s
->migration_log
) {
289 flags
|= KVM_MEM_LOG_DIRTY_PAGES
;
292 if (flags
== old_flags
) {
296 return kvm_set_user_memory_region(s
, mem
);
299 static int kvm_dirty_pages_log_change(hwaddr phys_addr
,
300 ram_addr_t size
, bool log_dirty
)
302 KVMState
*s
= kvm_state
;
303 KVMSlot
*mem
= kvm_lookup_matching_slot(s
, phys_addr
, phys_addr
+ size
);
306 fprintf(stderr
, "BUG: %s: invalid parameters " TARGET_FMT_plx
"-"
307 TARGET_FMT_plx
"\n", __func__
, phys_addr
,
308 (hwaddr
)(phys_addr
+ size
- 1));
311 return kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
314 static void kvm_log_start(MemoryListener
*listener
,
315 MemoryRegionSection
*section
)
319 r
= kvm_dirty_pages_log_change(section
->offset_within_address_space
,
320 section
->size
, true);
326 static void kvm_log_stop(MemoryListener
*listener
,
327 MemoryRegionSection
*section
)
331 r
= kvm_dirty_pages_log_change(section
->offset_within_address_space
,
332 section
->size
, false);
338 static int kvm_set_migration_log(int enable
)
340 KVMState
*s
= kvm_state
;
344 s
->migration_log
= enable
;
346 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
349 if (!mem
->memory_size
) {
352 if (!!(mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) == enable
) {
355 err
= kvm_set_user_memory_region(s
, mem
);
363 /* get kvm's dirty pages bitmap and update qemu's */
364 static int kvm_get_dirty_pages_log_range(MemoryRegionSection
*section
,
365 unsigned long *bitmap
)
368 unsigned long page_number
, c
;
370 unsigned int len
= ((section
->size
/ getpagesize()) + HOST_LONG_BITS
- 1) / HOST_LONG_BITS
;
371 unsigned long hpratio
= getpagesize() / TARGET_PAGE_SIZE
;
374 * bitmap-traveling is faster than memory-traveling (for addr...)
375 * especially when most of the memory is not dirty.
377 for (i
= 0; i
< len
; i
++) {
378 if (bitmap
[i
] != 0) {
379 c
= leul_to_cpu(bitmap
[i
]);
383 page_number
= (i
* HOST_LONG_BITS
+ j
) * hpratio
;
384 addr1
= page_number
* TARGET_PAGE_SIZE
;
385 addr
= section
->offset_within_region
+ addr1
;
386 memory_region_set_dirty(section
->mr
, addr
,
387 TARGET_PAGE_SIZE
* hpratio
);
394 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
397 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
398 * This function updates qemu's dirty bitmap using
399 * memory_region_set_dirty(). This means all bits are set
402 * @start_add: start of logged region.
403 * @end_addr: end of logged region.
405 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection
*section
)
407 KVMState
*s
= kvm_state
;
408 unsigned long size
, allocated_size
= 0;
412 hwaddr start_addr
= section
->offset_within_address_space
;
413 hwaddr end_addr
= start_addr
+ section
->size
;
415 d
.dirty_bitmap
= NULL
;
416 while (start_addr
< end_addr
) {
417 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, end_addr
);
422 /* XXX bad kernel interface alert
423 * For dirty bitmap, kernel allocates array of size aligned to
424 * bits-per-long. But for case when the kernel is 64bits and
425 * the userspace is 32bits, userspace can't align to the same
426 * bits-per-long, since sizeof(long) is different between kernel
427 * and user space. This way, userspace will provide buffer which
428 * may be 4 bytes less than the kernel will use, resulting in
429 * userspace memory corruption (which is not detectable by valgrind
430 * too, in most cases).
431 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
432 * a hope that sizeof(long) wont become >8 any time soon.
434 size
= ALIGN(((mem
->memory_size
) >> TARGET_PAGE_BITS
),
435 /*HOST_LONG_BITS*/ 64) / 8;
436 if (!d
.dirty_bitmap
) {
437 d
.dirty_bitmap
= g_malloc(size
);
438 } else if (size
> allocated_size
) {
439 d
.dirty_bitmap
= g_realloc(d
.dirty_bitmap
, size
);
441 allocated_size
= size
;
442 memset(d
.dirty_bitmap
, 0, allocated_size
);
446 if (kvm_vm_ioctl(s
, KVM_GET_DIRTY_LOG
, &d
) == -1) {
447 DPRINTF("ioctl failed %d\n", errno
);
452 kvm_get_dirty_pages_log_range(section
, d
.dirty_bitmap
);
453 start_addr
= mem
->start_addr
+ mem
->memory_size
;
455 g_free(d
.dirty_bitmap
);
460 static void kvm_coalesce_mmio_region(MemoryListener
*listener
,
461 MemoryRegionSection
*secion
,
462 hwaddr start
, hwaddr size
)
464 KVMState
*s
= kvm_state
;
466 if (s
->coalesced_mmio
) {
467 struct kvm_coalesced_mmio_zone zone
;
473 (void)kvm_vm_ioctl(s
, KVM_REGISTER_COALESCED_MMIO
, &zone
);
477 static void kvm_uncoalesce_mmio_region(MemoryListener
*listener
,
478 MemoryRegionSection
*secion
,
479 hwaddr start
, hwaddr size
)
481 KVMState
*s
= kvm_state
;
483 if (s
->coalesced_mmio
) {
484 struct kvm_coalesced_mmio_zone zone
;
490 (void)kvm_vm_ioctl(s
, KVM_UNREGISTER_COALESCED_MMIO
, &zone
);
494 int kvm_check_extension(KVMState
*s
, unsigned int extension
)
498 ret
= kvm_ioctl(s
, KVM_CHECK_EXTENSION
, extension
);
506 static int kvm_set_ioeventfd_mmio(int fd
, uint32_t addr
, uint32_t val
,
507 bool assign
, uint32_t size
, bool datamatch
)
510 struct kvm_ioeventfd iofd
;
512 iofd
.datamatch
= datamatch
? val
: 0;
518 if (!kvm_enabled()) {
523 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
526 iofd
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
529 ret
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &iofd
);
538 static int kvm_set_ioeventfd_pio(int fd
, uint16_t addr
, uint16_t val
,
539 bool assign
, uint32_t size
, bool datamatch
)
541 struct kvm_ioeventfd kick
= {
542 .datamatch
= datamatch
? val
: 0,
544 .flags
= KVM_IOEVENTFD_FLAG_PIO
,
549 if (!kvm_enabled()) {
553 kick
.flags
|= KVM_IOEVENTFD_FLAG_DATAMATCH
;
556 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
558 r
= kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
566 static int kvm_check_many_ioeventfds(void)
568 /* Userspace can use ioeventfd for io notification. This requires a host
569 * that supports eventfd(2) and an I/O thread; since eventfd does not
570 * support SIGIO it cannot interrupt the vcpu.
572 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
573 * can avoid creating too many ioeventfds.
575 #if defined(CONFIG_EVENTFD)
578 for (i
= 0; i
< ARRAY_SIZE(ioeventfds
); i
++) {
579 ioeventfds
[i
] = eventfd(0, EFD_CLOEXEC
);
580 if (ioeventfds
[i
] < 0) {
583 ret
= kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, true, 2, true);
585 close(ioeventfds
[i
]);
590 /* Decide whether many devices are supported or not */
591 ret
= i
== ARRAY_SIZE(ioeventfds
);
594 kvm_set_ioeventfd_pio(ioeventfds
[i
], 0, i
, false, 2, true);
595 close(ioeventfds
[i
]);
603 static const KVMCapabilityInfo
*
604 kvm_check_extension_list(KVMState
*s
, const KVMCapabilityInfo
*list
)
607 if (!kvm_check_extension(s
, list
->value
)) {
615 static void kvm_set_phys_mem(MemoryRegionSection
*section
, bool add
)
617 KVMState
*s
= kvm_state
;
620 MemoryRegion
*mr
= section
->mr
;
621 bool log_dirty
= memory_region_is_logging(mr
);
622 hwaddr start_addr
= section
->offset_within_address_space
;
623 ram_addr_t size
= section
->size
;
627 /* kvm works in page size chunks, but the function may be called
628 with sub-page size and unaligned start address. */
629 delta
= TARGET_PAGE_ALIGN(size
) - size
;
635 size
&= TARGET_PAGE_MASK
;
636 if (!size
|| (start_addr
& ~TARGET_PAGE_MASK
)) {
640 if (!memory_region_is_ram(mr
)) {
644 ram
= memory_region_get_ram_ptr(mr
) + section
->offset_within_region
+ delta
;
647 mem
= kvm_lookup_overlapping_slot(s
, start_addr
, start_addr
+ size
);
652 if (add
&& start_addr
>= mem
->start_addr
&&
653 (start_addr
+ size
<= mem
->start_addr
+ mem
->memory_size
) &&
654 (ram
- start_addr
== mem
->ram
- mem
->start_addr
)) {
655 /* The new slot fits into the existing one and comes with
656 * identical parameters - update flags and done. */
657 kvm_slot_dirty_pages_log_change(mem
, log_dirty
);
663 if (mem
->flags
& KVM_MEM_LOG_DIRTY_PAGES
) {
664 kvm_physical_sync_dirty_bitmap(section
);
667 /* unregister the overlapping slot */
668 mem
->memory_size
= 0;
669 err
= kvm_set_user_memory_region(s
, mem
);
671 fprintf(stderr
, "%s: error unregistering overlapping slot: %s\n",
672 __func__
, strerror(-err
));
676 /* Workaround for older KVM versions: we can't join slots, even not by
677 * unregistering the previous ones and then registering the larger
678 * slot. We have to maintain the existing fragmentation. Sigh.
680 * This workaround assumes that the new slot starts at the same
681 * address as the first existing one. If not or if some overlapping
682 * slot comes around later, we will fail (not seen in practice so far)
683 * - and actually require a recent KVM version. */
684 if (s
->broken_set_mem_region
&&
685 old
.start_addr
== start_addr
&& old
.memory_size
< size
&& add
) {
686 mem
= kvm_alloc_slot(s
);
687 mem
->memory_size
= old
.memory_size
;
688 mem
->start_addr
= old
.start_addr
;
690 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
692 err
= kvm_set_user_memory_region(s
, mem
);
694 fprintf(stderr
, "%s: error updating slot: %s\n", __func__
,
699 start_addr
+= old
.memory_size
;
700 ram
+= old
.memory_size
;
701 size
-= old
.memory_size
;
705 /* register prefix slot */
706 if (old
.start_addr
< start_addr
) {
707 mem
= kvm_alloc_slot(s
);
708 mem
->memory_size
= start_addr
- old
.start_addr
;
709 mem
->start_addr
= old
.start_addr
;
711 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
713 err
= kvm_set_user_memory_region(s
, mem
);
715 fprintf(stderr
, "%s: error registering prefix slot: %s\n",
716 __func__
, strerror(-err
));
718 fprintf(stderr
, "%s: This is probably because your kernel's " \
719 "PAGE_SIZE is too big. Please try to use 4k " \
720 "PAGE_SIZE!\n", __func__
);
726 /* register suffix slot */
727 if (old
.start_addr
+ old
.memory_size
> start_addr
+ size
) {
728 ram_addr_t size_delta
;
730 mem
= kvm_alloc_slot(s
);
731 mem
->start_addr
= start_addr
+ size
;
732 size_delta
= mem
->start_addr
- old
.start_addr
;
733 mem
->memory_size
= old
.memory_size
- size_delta
;
734 mem
->ram
= old
.ram
+ size_delta
;
735 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
737 err
= kvm_set_user_memory_region(s
, mem
);
739 fprintf(stderr
, "%s: error registering suffix slot: %s\n",
740 __func__
, strerror(-err
));
746 /* in case the KVM bug workaround already "consumed" the new slot */
753 mem
= kvm_alloc_slot(s
);
754 mem
->memory_size
= size
;
755 mem
->start_addr
= start_addr
;
757 mem
->flags
= kvm_mem_flags(s
, log_dirty
);
759 err
= kvm_set_user_memory_region(s
, mem
);
761 fprintf(stderr
, "%s: error registering slot: %s\n", __func__
,
767 static void kvm_region_add(MemoryListener
*listener
,
768 MemoryRegionSection
*section
)
770 kvm_set_phys_mem(section
, true);
773 static void kvm_region_del(MemoryListener
*listener
,
774 MemoryRegionSection
*section
)
776 kvm_set_phys_mem(section
, false);
779 static void kvm_log_sync(MemoryListener
*listener
,
780 MemoryRegionSection
*section
)
784 r
= kvm_physical_sync_dirty_bitmap(section
);
790 static void kvm_log_global_start(struct MemoryListener
*listener
)
794 r
= kvm_set_migration_log(1);
798 static void kvm_log_global_stop(struct MemoryListener
*listener
)
802 r
= kvm_set_migration_log(0);
806 static void kvm_mem_ioeventfd_add(MemoryListener
*listener
,
807 MemoryRegionSection
*section
,
808 bool match_data
, uint64_t data
,
811 int fd
= event_notifier_get_fd(e
);
814 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
815 data
, true, section
->size
, match_data
);
821 static void kvm_mem_ioeventfd_del(MemoryListener
*listener
,
822 MemoryRegionSection
*section
,
823 bool match_data
, uint64_t data
,
826 int fd
= event_notifier_get_fd(e
);
829 r
= kvm_set_ioeventfd_mmio(fd
, section
->offset_within_address_space
,
830 data
, false, section
->size
, match_data
);
836 static void kvm_io_ioeventfd_add(MemoryListener
*listener
,
837 MemoryRegionSection
*section
,
838 bool match_data
, uint64_t data
,
841 int fd
= event_notifier_get_fd(e
);
844 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
845 data
, true, section
->size
, match_data
);
851 static void kvm_io_ioeventfd_del(MemoryListener
*listener
,
852 MemoryRegionSection
*section
,
853 bool match_data
, uint64_t data
,
857 int fd
= event_notifier_get_fd(e
);
860 r
= kvm_set_ioeventfd_pio(fd
, section
->offset_within_address_space
,
861 data
, false, section
->size
, match_data
);
867 static MemoryListener kvm_memory_listener
= {
868 .region_add
= kvm_region_add
,
869 .region_del
= kvm_region_del
,
870 .log_start
= kvm_log_start
,
871 .log_stop
= kvm_log_stop
,
872 .log_sync
= kvm_log_sync
,
873 .log_global_start
= kvm_log_global_start
,
874 .log_global_stop
= kvm_log_global_stop
,
875 .eventfd_add
= kvm_mem_ioeventfd_add
,
876 .eventfd_del
= kvm_mem_ioeventfd_del
,
877 .coalesced_mmio_add
= kvm_coalesce_mmio_region
,
878 .coalesced_mmio_del
= kvm_uncoalesce_mmio_region
,
882 static MemoryListener kvm_io_listener
= {
883 .eventfd_add
= kvm_io_ioeventfd_add
,
884 .eventfd_del
= kvm_io_ioeventfd_del
,
888 static void kvm_handle_interrupt(CPUState
*cpu
, int mask
)
890 cpu
->interrupt_request
|= mask
;
892 if (!qemu_cpu_is_self(cpu
)) {
897 int kvm_set_irq(KVMState
*s
, int irq
, int level
)
899 struct kvm_irq_level event
;
902 assert(kvm_async_interrupts_enabled());
906 ret
= kvm_vm_ioctl(s
, s
->irq_set_ioctl
, &event
);
908 perror("kvm_set_irq");
912 return (s
->irq_set_ioctl
== KVM_IRQ_LINE
) ? 1 : event
.status
;
915 #ifdef KVM_CAP_IRQ_ROUTING
916 typedef struct KVMMSIRoute
{
917 struct kvm_irq_routing_entry kroute
;
918 QTAILQ_ENTRY(KVMMSIRoute
) entry
;
921 static void set_gsi(KVMState
*s
, unsigned int gsi
)
923 s
->used_gsi_bitmap
[gsi
/ 32] |= 1U << (gsi
% 32);
926 static void clear_gsi(KVMState
*s
, unsigned int gsi
)
928 s
->used_gsi_bitmap
[gsi
/ 32] &= ~(1U << (gsi
% 32));
931 static void kvm_init_irq_routing(KVMState
*s
)
935 gsi_count
= kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
);
937 unsigned int gsi_bits
, i
;
939 /* Round up so we can search ints using ffs */
940 gsi_bits
= ALIGN(gsi_count
, 32);
941 s
->used_gsi_bitmap
= g_malloc0(gsi_bits
/ 8);
942 s
->gsi_count
= gsi_count
;
944 /* Mark any over-allocated bits as already in use */
945 for (i
= gsi_count
; i
< gsi_bits
; i
++) {
950 s
->irq_routes
= g_malloc0(sizeof(*s
->irq_routes
));
951 s
->nr_allocated_irq_routes
= 0;
953 if (!s
->direct_msi
) {
954 for (i
= 0; i
< KVM_MSI_HASHTAB_SIZE
; i
++) {
955 QTAILQ_INIT(&s
->msi_hashtab
[i
]);
959 kvm_arch_init_irq_routing(s
);
962 static void kvm_irqchip_commit_routes(KVMState
*s
)
966 s
->irq_routes
->flags
= 0;
967 ret
= kvm_vm_ioctl(s
, KVM_SET_GSI_ROUTING
, s
->irq_routes
);
971 static void kvm_add_routing_entry(KVMState
*s
,
972 struct kvm_irq_routing_entry
*entry
)
974 struct kvm_irq_routing_entry
*new;
977 if (s
->irq_routes
->nr
== s
->nr_allocated_irq_routes
) {
978 n
= s
->nr_allocated_irq_routes
* 2;
982 size
= sizeof(struct kvm_irq_routing
);
983 size
+= n
* sizeof(*new);
984 s
->irq_routes
= g_realloc(s
->irq_routes
, size
);
985 s
->nr_allocated_irq_routes
= n
;
987 n
= s
->irq_routes
->nr
++;
988 new = &s
->irq_routes
->entries
[n
];
989 memset(new, 0, sizeof(*new));
990 new->gsi
= entry
->gsi
;
991 new->type
= entry
->type
;
992 new->flags
= entry
->flags
;
995 set_gsi(s
, entry
->gsi
);
997 kvm_irqchip_commit_routes(s
);
1000 static int kvm_update_routing_entry(KVMState
*s
,
1001 struct kvm_irq_routing_entry
*new_entry
)
1003 struct kvm_irq_routing_entry
*entry
;
1006 for (n
= 0; n
< s
->irq_routes
->nr
; n
++) {
1007 entry
= &s
->irq_routes
->entries
[n
];
1008 if (entry
->gsi
!= new_entry
->gsi
) {
1012 entry
->type
= new_entry
->type
;
1013 entry
->flags
= new_entry
->flags
;
1014 entry
->u
= new_entry
->u
;
1016 kvm_irqchip_commit_routes(s
);
1024 void kvm_irqchip_add_irq_route(KVMState
*s
, int irq
, int irqchip
, int pin
)
1026 struct kvm_irq_routing_entry e
;
1028 assert(pin
< s
->gsi_count
);
1031 e
.type
= KVM_IRQ_ROUTING_IRQCHIP
;
1033 e
.u
.irqchip
.irqchip
= irqchip
;
1034 e
.u
.irqchip
.pin
= pin
;
1035 kvm_add_routing_entry(s
, &e
);
1038 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1040 struct kvm_irq_routing_entry
*e
;
1043 for (i
= 0; i
< s
->irq_routes
->nr
; i
++) {
1044 e
= &s
->irq_routes
->entries
[i
];
1045 if (e
->gsi
== virq
) {
1046 s
->irq_routes
->nr
--;
1047 *e
= s
->irq_routes
->entries
[s
->irq_routes
->nr
];
1053 static unsigned int kvm_hash_msi(uint32_t data
)
1055 /* This is optimized for IA32 MSI layout. However, no other arch shall
1056 * repeat the mistake of not providing a direct MSI injection API. */
1060 static void kvm_flush_dynamic_msi_routes(KVMState
*s
)
1062 KVMMSIRoute
*route
, *next
;
1065 for (hash
= 0; hash
< KVM_MSI_HASHTAB_SIZE
; hash
++) {
1066 QTAILQ_FOREACH_SAFE(route
, &s
->msi_hashtab
[hash
], entry
, next
) {
1067 kvm_irqchip_release_virq(s
, route
->kroute
.gsi
);
1068 QTAILQ_REMOVE(&s
->msi_hashtab
[hash
], route
, entry
);
1074 static int kvm_irqchip_get_virq(KVMState
*s
)
1076 uint32_t *word
= s
->used_gsi_bitmap
;
1077 int max_words
= ALIGN(s
->gsi_count
, 32) / 32;
1082 /* Return the lowest unused GSI in the bitmap */
1083 for (i
= 0; i
< max_words
; i
++) {
1084 bit
= ffs(~word
[i
]);
1089 return bit
- 1 + i
* 32;
1091 if (!s
->direct_msi
&& retry
) {
1093 kvm_flush_dynamic_msi_routes(s
);
1100 static KVMMSIRoute
*kvm_lookup_msi_route(KVMState
*s
, MSIMessage msg
)
1102 unsigned int hash
= kvm_hash_msi(msg
.data
);
1105 QTAILQ_FOREACH(route
, &s
->msi_hashtab
[hash
], entry
) {
1106 if (route
->kroute
.u
.msi
.address_lo
== (uint32_t)msg
.address
&&
1107 route
->kroute
.u
.msi
.address_hi
== (msg
.address
>> 32) &&
1108 route
->kroute
.u
.msi
.data
== msg
.data
) {
1115 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1120 if (s
->direct_msi
) {
1121 msi
.address_lo
= (uint32_t)msg
.address
;
1122 msi
.address_hi
= msg
.address
>> 32;
1123 msi
.data
= msg
.data
;
1125 memset(msi
.pad
, 0, sizeof(msi
.pad
));
1127 return kvm_vm_ioctl(s
, KVM_SIGNAL_MSI
, &msi
);
1130 route
= kvm_lookup_msi_route(s
, msg
);
1134 virq
= kvm_irqchip_get_virq(s
);
1139 route
= g_malloc(sizeof(KVMMSIRoute
));
1140 route
->kroute
.gsi
= virq
;
1141 route
->kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1142 route
->kroute
.flags
= 0;
1143 route
->kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1144 route
->kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1145 route
->kroute
.u
.msi
.data
= msg
.data
;
1147 kvm_add_routing_entry(s
, &route
->kroute
);
1149 QTAILQ_INSERT_TAIL(&s
->msi_hashtab
[kvm_hash_msi(msg
.data
)], route
,
1153 assert(route
->kroute
.type
== KVM_IRQ_ROUTING_MSI
);
1155 return kvm_set_irq(s
, route
->kroute
.gsi
, 1);
1158 int kvm_irqchip_add_msi_route(KVMState
*s
, MSIMessage msg
)
1160 struct kvm_irq_routing_entry kroute
;
1163 if (!kvm_gsi_routing_enabled()) {
1167 virq
= kvm_irqchip_get_virq(s
);
1173 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1175 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1176 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1177 kroute
.u
.msi
.data
= msg
.data
;
1179 kvm_add_routing_entry(s
, &kroute
);
1184 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1186 struct kvm_irq_routing_entry kroute
;
1188 if (!kvm_irqchip_in_kernel()) {
1193 kroute
.type
= KVM_IRQ_ROUTING_MSI
;
1195 kroute
.u
.msi
.address_lo
= (uint32_t)msg
.address
;
1196 kroute
.u
.msi
.address_hi
= msg
.address
>> 32;
1197 kroute
.u
.msi
.data
= msg
.data
;
1199 return kvm_update_routing_entry(s
, &kroute
);
1202 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int virq
, bool assign
)
1204 struct kvm_irqfd irqfd
= {
1207 .flags
= assign
? 0 : KVM_IRQFD_FLAG_DEASSIGN
,
1210 if (!kvm_irqfds_enabled()) {
1214 return kvm_vm_ioctl(s
, KVM_IRQFD
, &irqfd
);
1217 #else /* !KVM_CAP_IRQ_ROUTING */
1219 static void kvm_init_irq_routing(KVMState
*s
)
1223 void kvm_irqchip_release_virq(KVMState
*s
, int virq
)
1227 int kvm_irqchip_send_msi(KVMState
*s
, MSIMessage msg
)
1232 int kvm_irqchip_add_msi_route(KVMState
*s
, MSIMessage msg
)
1237 static int kvm_irqchip_assign_irqfd(KVMState
*s
, int fd
, int virq
, bool assign
)
1242 int kvm_irqchip_update_msi_route(KVMState
*s
, int virq
, MSIMessage msg
)
1246 #endif /* !KVM_CAP_IRQ_ROUTING */
1248 int kvm_irqchip_add_irqfd_notifier(KVMState
*s
, EventNotifier
*n
, int virq
)
1250 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
), virq
, true);
1253 int kvm_irqchip_remove_irqfd_notifier(KVMState
*s
, EventNotifier
*n
, int virq
)
1255 return kvm_irqchip_assign_irqfd(s
, event_notifier_get_fd(n
), virq
, false);
1258 static int kvm_irqchip_create(KVMState
*s
)
1260 QemuOptsList
*list
= qemu_find_opts("machine");
1263 if (QTAILQ_EMPTY(&list
->head
) ||
1264 !qemu_opt_get_bool(QTAILQ_FIRST(&list
->head
),
1265 "kernel_irqchip", true) ||
1266 !kvm_check_extension(s
, KVM_CAP_IRQCHIP
)) {
1270 ret
= kvm_vm_ioctl(s
, KVM_CREATE_IRQCHIP
);
1272 fprintf(stderr
, "Create kernel irqchip failed\n");
1276 kvm_kernel_irqchip
= true;
1277 /* If we have an in-kernel IRQ chip then we must have asynchronous
1278 * interrupt delivery (though the reverse is not necessarily true)
1280 kvm_async_interrupts_allowed
= true;
1282 kvm_init_irq_routing(s
);
1287 static int kvm_max_vcpus(KVMState
*s
)
1291 /* Find number of supported CPUs using the recommended
1292 * procedure from the kernel API documentation to cope with
1293 * older kernels that may be missing capabilities.
1295 ret
= kvm_check_extension(s
, KVM_CAP_MAX_VCPUS
);
1299 ret
= kvm_check_extension(s
, KVM_CAP_NR_VCPUS
);
1309 static const char upgrade_note
[] =
1310 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1311 "(see http://sourceforge.net/projects/kvm).\n";
1313 const KVMCapabilityInfo
*missing_cap
;
1318 s
= g_malloc0(sizeof(KVMState
));
1321 * On systems where the kernel can support different base page
1322 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1323 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1324 * page size for the system though.
1326 assert(TARGET_PAGE_SIZE
<= getpagesize());
1328 #ifdef KVM_CAP_SET_GUEST_DEBUG
1329 QTAILQ_INIT(&s
->kvm_sw_breakpoints
);
1331 for (i
= 0; i
< ARRAY_SIZE(s
->slots
); i
++) {
1332 s
->slots
[i
].slot
= i
;
1335 s
->fd
= qemu_open("/dev/kvm", O_RDWR
);
1337 fprintf(stderr
, "Could not access KVM kernel module: %m\n");
1342 ret
= kvm_ioctl(s
, KVM_GET_API_VERSION
, 0);
1343 if (ret
< KVM_API_VERSION
) {
1347 fprintf(stderr
, "kvm version too old\n");
1351 if (ret
> KVM_API_VERSION
) {
1353 fprintf(stderr
, "kvm version not supported\n");
1357 max_vcpus
= kvm_max_vcpus(s
);
1358 if (smp_cpus
> max_vcpus
) {
1360 fprintf(stderr
, "Number of SMP cpus requested (%d) exceeds max cpus "
1361 "supported by KVM (%d)\n", smp_cpus
, max_vcpus
);
1365 s
->vmfd
= kvm_ioctl(s
, KVM_CREATE_VM
, 0);
1368 fprintf(stderr
, "Please add the 'switch_amode' kernel parameter to "
1369 "your host kernel command line\n");
1375 missing_cap
= kvm_check_extension_list(s
, kvm_required_capabilites
);
1378 kvm_check_extension_list(s
, kvm_arch_required_capabilities
);
1382 fprintf(stderr
, "kvm does not support %s\n%s",
1383 missing_cap
->name
, upgrade_note
);
1387 s
->coalesced_mmio
= kvm_check_extension(s
, KVM_CAP_COALESCED_MMIO
);
1389 s
->broken_set_mem_region
= 1;
1390 ret
= kvm_check_extension(s
, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS
);
1392 s
->broken_set_mem_region
= 0;
1395 #ifdef KVM_CAP_VCPU_EVENTS
1396 s
->vcpu_events
= kvm_check_extension(s
, KVM_CAP_VCPU_EVENTS
);
1399 s
->robust_singlestep
=
1400 kvm_check_extension(s
, KVM_CAP_X86_ROBUST_SINGLESTEP
);
1402 #ifdef KVM_CAP_DEBUGREGS
1403 s
->debugregs
= kvm_check_extension(s
, KVM_CAP_DEBUGREGS
);
1406 #ifdef KVM_CAP_XSAVE
1407 s
->xsave
= kvm_check_extension(s
, KVM_CAP_XSAVE
);
1411 s
->xcrs
= kvm_check_extension(s
, KVM_CAP_XCRS
);
1414 #ifdef KVM_CAP_PIT_STATE2
1415 s
->pit_state2
= kvm_check_extension(s
, KVM_CAP_PIT_STATE2
);
1418 #ifdef KVM_CAP_IRQ_ROUTING
1419 s
->direct_msi
= (kvm_check_extension(s
, KVM_CAP_SIGNAL_MSI
) > 0);
1422 s
->intx_set_mask
= kvm_check_extension(s
, KVM_CAP_PCI_2_3
);
1424 s
->irq_set_ioctl
= KVM_IRQ_LINE
;
1425 if (kvm_check_extension(s
, KVM_CAP_IRQ_INJECT_STATUS
)) {
1426 s
->irq_set_ioctl
= KVM_IRQ_LINE_STATUS
;
1429 #ifdef KVM_CAP_READONLY_MEM
1430 kvm_readonly_mem_allowed
=
1431 (kvm_check_extension(s
, KVM_CAP_READONLY_MEM
) > 0);
1434 ret
= kvm_arch_init(s
);
1439 ret
= kvm_irqchip_create(s
);
1445 memory_listener_register(&kvm_memory_listener
, &address_space_memory
);
1446 memory_listener_register(&kvm_io_listener
, &address_space_io
);
1448 s
->many_ioeventfds
= kvm_check_many_ioeventfds();
1450 cpu_interrupt_handler
= kvm_handle_interrupt
;
1466 static void kvm_handle_io(uint16_t port
, void *data
, int direction
, int size
,
1470 uint8_t *ptr
= data
;
1472 for (i
= 0; i
< count
; i
++) {
1473 if (direction
== KVM_EXIT_IO_IN
) {
1476 stb_p(ptr
, cpu_inb(port
));
1479 stw_p(ptr
, cpu_inw(port
));
1482 stl_p(ptr
, cpu_inl(port
));
1488 cpu_outb(port
, ldub_p(ptr
));
1491 cpu_outw(port
, lduw_p(ptr
));
1494 cpu_outl(port
, ldl_p(ptr
));
1503 static int kvm_handle_internal_error(CPUArchState
*env
, struct kvm_run
*run
)
1505 CPUState
*cpu
= ENV_GET_CPU(env
);
1507 fprintf(stderr
, "KVM internal error.");
1508 if (kvm_check_extension(kvm_state
, KVM_CAP_INTERNAL_ERROR_DATA
)) {
1511 fprintf(stderr
, " Suberror: %d\n", run
->internal
.suberror
);
1512 for (i
= 0; i
< run
->internal
.ndata
; ++i
) {
1513 fprintf(stderr
, "extra data[%d]: %"PRIx64
"\n",
1514 i
, (uint64_t)run
->internal
.data
[i
]);
1517 fprintf(stderr
, "\n");
1519 if (run
->internal
.suberror
== KVM_INTERNAL_ERROR_EMULATION
) {
1520 fprintf(stderr
, "emulation failure\n");
1521 if (!kvm_arch_stop_on_emulation_error(cpu
)) {
1522 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_CODE
);
1523 return EXCP_INTERRUPT
;
1526 /* FIXME: Should trigger a qmp message to let management know
1527 * something went wrong.
1532 void kvm_flush_coalesced_mmio_buffer(void)
1534 KVMState
*s
= kvm_state
;
1536 if (s
->coalesced_flush_in_progress
) {
1540 s
->coalesced_flush_in_progress
= true;
1542 if (s
->coalesced_mmio_ring
) {
1543 struct kvm_coalesced_mmio_ring
*ring
= s
->coalesced_mmio_ring
;
1544 while (ring
->first
!= ring
->last
) {
1545 struct kvm_coalesced_mmio
*ent
;
1547 ent
= &ring
->coalesced_mmio
[ring
->first
];
1549 cpu_physical_memory_write(ent
->phys_addr
, ent
->data
, ent
->len
);
1551 ring
->first
= (ring
->first
+ 1) % KVM_COALESCED_MMIO_MAX
;
1555 s
->coalesced_flush_in_progress
= false;
1558 static void do_kvm_cpu_synchronize_state(void *arg
)
1560 CPUState
*cpu
= arg
;
1562 if (!cpu
->kvm_vcpu_dirty
) {
1563 kvm_arch_get_registers(cpu
);
1564 cpu
->kvm_vcpu_dirty
= true;
1568 void kvm_cpu_synchronize_state(CPUArchState
*env
)
1570 CPUState
*cpu
= ENV_GET_CPU(env
);
1572 if (!cpu
->kvm_vcpu_dirty
) {
1573 run_on_cpu(cpu
, do_kvm_cpu_synchronize_state
, cpu
);
1577 void kvm_cpu_synchronize_post_reset(CPUState
*cpu
)
1579 kvm_arch_put_registers(cpu
, KVM_PUT_RESET_STATE
);
1580 cpu
->kvm_vcpu_dirty
= false;
1583 void kvm_cpu_synchronize_post_init(CPUState
*cpu
)
1585 kvm_arch_put_registers(cpu
, KVM_PUT_FULL_STATE
);
1586 cpu
->kvm_vcpu_dirty
= false;
1589 int kvm_cpu_exec(CPUArchState
*env
)
1591 CPUState
*cpu
= ENV_GET_CPU(env
);
1592 struct kvm_run
*run
= cpu
->kvm_run
;
1595 DPRINTF("kvm_cpu_exec()\n");
1597 if (kvm_arch_process_async_events(cpu
)) {
1598 cpu
->exit_request
= 0;
1603 if (cpu
->kvm_vcpu_dirty
) {
1604 kvm_arch_put_registers(cpu
, KVM_PUT_RUNTIME_STATE
);
1605 cpu
->kvm_vcpu_dirty
= false;
1608 kvm_arch_pre_run(cpu
, run
);
1609 if (cpu
->exit_request
) {
1610 DPRINTF("interrupt exit requested\n");
1612 * KVM requires us to reenter the kernel after IO exits to complete
1613 * instruction emulation. This self-signal will ensure that we
1616 qemu_cpu_kick_self();
1618 qemu_mutex_unlock_iothread();
1620 run_ret
= kvm_vcpu_ioctl(cpu
, KVM_RUN
, 0);
1622 qemu_mutex_lock_iothread();
1623 kvm_arch_post_run(cpu
, run
);
1626 if (run_ret
== -EINTR
|| run_ret
== -EAGAIN
) {
1627 DPRINTF("io window exit\n");
1628 ret
= EXCP_INTERRUPT
;
1631 fprintf(stderr
, "error: kvm run failed %s\n",
1632 strerror(-run_ret
));
1636 trace_kvm_run_exit(cpu
->cpu_index
, run
->exit_reason
);
1637 switch (run
->exit_reason
) {
1639 DPRINTF("handle_io\n");
1640 kvm_handle_io(run
->io
.port
,
1641 (uint8_t *)run
+ run
->io
.data_offset
,
1648 DPRINTF("handle_mmio\n");
1649 cpu_physical_memory_rw(run
->mmio
.phys_addr
,
1652 run
->mmio
.is_write
);
1655 case KVM_EXIT_IRQ_WINDOW_OPEN
:
1656 DPRINTF("irq_window_open\n");
1657 ret
= EXCP_INTERRUPT
;
1659 case KVM_EXIT_SHUTDOWN
:
1660 DPRINTF("shutdown\n");
1661 qemu_system_reset_request();
1662 ret
= EXCP_INTERRUPT
;
1664 case KVM_EXIT_UNKNOWN
:
1665 fprintf(stderr
, "KVM: unknown exit, hardware reason %" PRIx64
"\n",
1666 (uint64_t)run
->hw
.hardware_exit_reason
);
1669 case KVM_EXIT_INTERNAL_ERROR
:
1670 ret
= kvm_handle_internal_error(env
, run
);
1673 DPRINTF("kvm_arch_handle_exit\n");
1674 ret
= kvm_arch_handle_exit(cpu
, run
);
1680 cpu_dump_state(env
, stderr
, fprintf
, CPU_DUMP_CODE
);
1681 vm_stop(RUN_STATE_INTERNAL_ERROR
);
1684 cpu
->exit_request
= 0;
1688 int kvm_ioctl(KVMState
*s
, int type
, ...)
1695 arg
= va_arg(ap
, void *);
1698 trace_kvm_ioctl(type
, arg
);
1699 ret
= ioctl(s
->fd
, type
, arg
);
1706 int kvm_vm_ioctl(KVMState
*s
, int type
, ...)
1713 arg
= va_arg(ap
, void *);
1716 trace_kvm_vm_ioctl(type
, arg
);
1717 ret
= ioctl(s
->vmfd
, type
, arg
);
1724 int kvm_vcpu_ioctl(CPUState
*cpu
, int type
, ...)
1731 arg
= va_arg(ap
, void *);
1734 trace_kvm_vcpu_ioctl(cpu
->cpu_index
, type
, arg
);
1735 ret
= ioctl(cpu
->kvm_fd
, type
, arg
);
1742 int kvm_has_sync_mmu(void)
1744 return kvm_check_extension(kvm_state
, KVM_CAP_SYNC_MMU
);
1747 int kvm_has_vcpu_events(void)
1749 return kvm_state
->vcpu_events
;
1752 int kvm_has_robust_singlestep(void)
1754 return kvm_state
->robust_singlestep
;
1757 int kvm_has_debugregs(void)
1759 return kvm_state
->debugregs
;
1762 int kvm_has_xsave(void)
1764 return kvm_state
->xsave
;
1767 int kvm_has_xcrs(void)
1769 return kvm_state
->xcrs
;
1772 int kvm_has_pit_state2(void)
1774 return kvm_state
->pit_state2
;
1777 int kvm_has_many_ioeventfds(void)
1779 if (!kvm_enabled()) {
1782 return kvm_state
->many_ioeventfds
;
1785 int kvm_has_gsi_routing(void)
1787 #ifdef KVM_CAP_IRQ_ROUTING
1788 return kvm_check_extension(kvm_state
, KVM_CAP_IRQ_ROUTING
);
1794 int kvm_has_intx_set_mask(void)
1796 return kvm_state
->intx_set_mask
;
1799 void *kvm_ram_alloc(ram_addr_t size
)
1804 mem
= kvm_arch_ram_alloc(size
);
1809 return qemu_anon_ram_alloc(size
);
1812 void kvm_setup_guest_memory(void *start
, size_t size
)
1814 #ifdef CONFIG_VALGRIND_H
1815 VALGRIND_MAKE_MEM_DEFINED(start
, size
);
1817 if (!kvm_has_sync_mmu()) {
1818 int ret
= qemu_madvise(start
, size
, QEMU_MADV_DONTFORK
);
1821 perror("qemu_madvise");
1823 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1829 #ifdef KVM_CAP_SET_GUEST_DEBUG
1830 struct kvm_sw_breakpoint
*kvm_find_sw_breakpoint(CPUState
*cpu
,
1833 struct kvm_sw_breakpoint
*bp
;
1835 QTAILQ_FOREACH(bp
, &cpu
->kvm_state
->kvm_sw_breakpoints
, entry
) {
1843 int kvm_sw_breakpoints_active(CPUState
*cpu
)
1845 return !QTAILQ_EMPTY(&cpu
->kvm_state
->kvm_sw_breakpoints
);
1848 struct kvm_set_guest_debug_data
{
1849 struct kvm_guest_debug dbg
;
1854 static void kvm_invoke_set_guest_debug(void *data
)
1856 struct kvm_set_guest_debug_data
*dbg_data
= data
;
1858 dbg_data
->err
= kvm_vcpu_ioctl(dbg_data
->cpu
, KVM_SET_GUEST_DEBUG
,
1862 int kvm_update_guest_debug(CPUArchState
*env
, unsigned long reinject_trap
)
1864 CPUState
*cpu
= ENV_GET_CPU(env
);
1865 struct kvm_set_guest_debug_data data
;
1867 data
.dbg
.control
= reinject_trap
;
1869 if (env
->singlestep_enabled
) {
1870 data
.dbg
.control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_SINGLESTEP
;
1872 kvm_arch_update_guest_debug(cpu
, &data
.dbg
);
1875 run_on_cpu(cpu
, kvm_invoke_set_guest_debug
, &data
);
1879 int kvm_insert_breakpoint(CPUArchState
*current_env
, target_ulong addr
,
1880 target_ulong len
, int type
)
1882 CPUState
*current_cpu
= ENV_GET_CPU(current_env
);
1883 struct kvm_sw_breakpoint
*bp
;
1887 if (type
== GDB_BREAKPOINT_SW
) {
1888 bp
= kvm_find_sw_breakpoint(current_cpu
, addr
);
1894 bp
= g_malloc(sizeof(struct kvm_sw_breakpoint
));
1901 err
= kvm_arch_insert_sw_breakpoint(current_cpu
, bp
);
1907 QTAILQ_INSERT_HEAD(¤t_cpu
->kvm_state
->kvm_sw_breakpoints
,
1910 err
= kvm_arch_insert_hw_breakpoint(addr
, len
, type
);
1916 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1917 err
= kvm_update_guest_debug(env
, 0);
1925 int kvm_remove_breakpoint(CPUArchState
*current_env
, target_ulong addr
,
1926 target_ulong len
, int type
)
1928 CPUState
*current_cpu
= ENV_GET_CPU(current_env
);
1929 struct kvm_sw_breakpoint
*bp
;
1933 if (type
== GDB_BREAKPOINT_SW
) {
1934 bp
= kvm_find_sw_breakpoint(current_cpu
, addr
);
1939 if (bp
->use_count
> 1) {
1944 err
= kvm_arch_remove_sw_breakpoint(current_cpu
, bp
);
1949 QTAILQ_REMOVE(¤t_cpu
->kvm_state
->kvm_sw_breakpoints
, bp
, entry
);
1952 err
= kvm_arch_remove_hw_breakpoint(addr
, len
, type
);
1958 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1959 err
= kvm_update_guest_debug(env
, 0);
1967 void kvm_remove_all_breakpoints(CPUArchState
*current_env
)
1969 CPUState
*current_cpu
= ENV_GET_CPU(current_env
);
1970 struct kvm_sw_breakpoint
*bp
, *next
;
1971 KVMState
*s
= current_cpu
->kvm_state
;
1975 QTAILQ_FOREACH_SAFE(bp
, &s
->kvm_sw_breakpoints
, entry
, next
) {
1976 if (kvm_arch_remove_sw_breakpoint(current_cpu
, bp
) != 0) {
1977 /* Try harder to find a CPU that currently sees the breakpoint. */
1978 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1979 cpu
= ENV_GET_CPU(env
);
1980 if (kvm_arch_remove_sw_breakpoint(cpu
, bp
) == 0) {
1985 QTAILQ_REMOVE(&s
->kvm_sw_breakpoints
, bp
, entry
);
1988 kvm_arch_remove_all_hw_breakpoints();
1990 for (env
= first_cpu
; env
!= NULL
; env
= env
->next_cpu
) {
1991 kvm_update_guest_debug(env
, 0);
1995 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1997 int kvm_update_guest_debug(CPUArchState
*env
, unsigned long reinject_trap
)
2002 int kvm_insert_breakpoint(CPUArchState
*current_env
, target_ulong addr
,
2003 target_ulong len
, int type
)
2008 int kvm_remove_breakpoint(CPUArchState
*current_env
, target_ulong addr
,
2009 target_ulong len
, int type
)
2014 void kvm_remove_all_breakpoints(CPUArchState
*current_env
)
2017 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2019 int kvm_set_signal_mask(CPUArchState
*env
, const sigset_t
*sigset
)
2021 CPUState
*cpu
= ENV_GET_CPU(env
);
2022 struct kvm_signal_mask
*sigmask
;
2026 return kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, NULL
);
2029 sigmask
= g_malloc(sizeof(*sigmask
) + sizeof(*sigset
));
2032 memcpy(sigmask
->sigset
, sigset
, sizeof(*sigset
));
2033 r
= kvm_vcpu_ioctl(cpu
, KVM_SET_SIGNAL_MASK
, sigmask
);
2038 int kvm_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
2040 return kvm_arch_on_sigbus_vcpu(cpu
, code
, addr
);
2043 int kvm_on_sigbus(int code
, void *addr
)
2045 return kvm_arch_on_sigbus(code
, addr
);