]> git.proxmox.com Git - qemu.git/blob - kvm-all.c
KVM: MSI: Swap payload to native endianness
[qemu.git] / kvm-all.c
1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
18 #include <sys/mman.h>
19 #include <stdarg.h>
20
21 #include <linux/kvm.h>
22
23 #include "qemu-common.h"
24 #include "qemu/atomic.h"
25 #include "qemu/option.h"
26 #include "qemu/config-file.h"
27 #include "sysemu/sysemu.h"
28 #include "hw/hw.h"
29 #include "hw/pci/msi.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm.h"
32 #include "qemu/bswap.h"
33 #include "exec/memory.h"
34 #include "exec/address-spaces.h"
35 #include "qemu/event_notifier.h"
36 #include "trace.h"
37
38 /* This check must be after config-host.h is included */
39 #ifdef CONFIG_EVENTFD
40 #include <sys/eventfd.h>
41 #endif
42
43 #ifdef CONFIG_VALGRIND_H
44 #include <valgrind/memcheck.h>
45 #endif
46
47 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
48 #define PAGE_SIZE TARGET_PAGE_SIZE
49
50 //#define DEBUG_KVM
51
52 #ifdef DEBUG_KVM
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #else
56 #define DPRINTF(fmt, ...) \
57 do { } while (0)
58 #endif
59
60 #define KVM_MSI_HASHTAB_SIZE 256
61
62 typedef struct KVMSlot
63 {
64 hwaddr start_addr;
65 ram_addr_t memory_size;
66 void *ram;
67 int slot;
68 int flags;
69 } KVMSlot;
70
71 typedef struct kvm_dirty_log KVMDirtyLog;
72
73 struct KVMState
74 {
75 KVMSlot slots[32];
76 int fd;
77 int vmfd;
78 int coalesced_mmio;
79 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
80 bool coalesced_flush_in_progress;
81 int broken_set_mem_region;
82 int migration_log;
83 int vcpu_events;
84 int robust_singlestep;
85 int debugregs;
86 #ifdef KVM_CAP_SET_GUEST_DEBUG
87 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
88 #endif
89 int pit_state2;
90 int xsave, xcrs;
91 int many_ioeventfds;
92 int intx_set_mask;
93 /* The man page (and posix) say ioctl numbers are signed int, but
94 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
95 * unsigned, and treating them as signed here can break things */
96 unsigned irq_set_ioctl;
97 #ifdef KVM_CAP_IRQ_ROUTING
98 struct kvm_irq_routing *irq_routes;
99 int nr_allocated_irq_routes;
100 uint32_t *used_gsi_bitmap;
101 unsigned int gsi_count;
102 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
103 bool direct_msi;
104 #endif
105 };
106
107 KVMState *kvm_state;
108 bool kvm_kernel_irqchip;
109 bool kvm_async_interrupts_allowed;
110 bool kvm_halt_in_kernel_allowed;
111 bool kvm_irqfds_allowed;
112 bool kvm_msi_via_irqfd_allowed;
113 bool kvm_gsi_routing_allowed;
114 bool kvm_allowed;
115 bool kvm_readonly_mem_allowed;
116
117 static const KVMCapabilityInfo kvm_required_capabilites[] = {
118 KVM_CAP_INFO(USER_MEMORY),
119 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
120 KVM_CAP_LAST_INFO
121 };
122
123 static KVMSlot *kvm_alloc_slot(KVMState *s)
124 {
125 int i;
126
127 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
128 if (s->slots[i].memory_size == 0) {
129 return &s->slots[i];
130 }
131 }
132
133 fprintf(stderr, "%s: no free slot available\n", __func__);
134 abort();
135 }
136
137 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
138 hwaddr start_addr,
139 hwaddr end_addr)
140 {
141 int i;
142
143 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
144 KVMSlot *mem = &s->slots[i];
145
146 if (start_addr == mem->start_addr &&
147 end_addr == mem->start_addr + mem->memory_size) {
148 return mem;
149 }
150 }
151
152 return NULL;
153 }
154
155 /*
156 * Find overlapping slot with lowest start address
157 */
158 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
159 hwaddr start_addr,
160 hwaddr end_addr)
161 {
162 KVMSlot *found = NULL;
163 int i;
164
165 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
166 KVMSlot *mem = &s->slots[i];
167
168 if (mem->memory_size == 0 ||
169 (found && found->start_addr < mem->start_addr)) {
170 continue;
171 }
172
173 if (end_addr > mem->start_addr &&
174 start_addr < mem->start_addr + mem->memory_size) {
175 found = mem;
176 }
177 }
178
179 return found;
180 }
181
182 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
183 hwaddr *phys_addr)
184 {
185 int i;
186
187 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
188 KVMSlot *mem = &s->slots[i];
189
190 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
191 *phys_addr = mem->start_addr + (ram - mem->ram);
192 return 1;
193 }
194 }
195
196 return 0;
197 }
198
199 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
200 {
201 struct kvm_userspace_memory_region mem;
202
203 mem.slot = slot->slot;
204 mem.guest_phys_addr = slot->start_addr;
205 mem.userspace_addr = (unsigned long)slot->ram;
206 mem.flags = slot->flags;
207 if (s->migration_log) {
208 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
209 }
210
211 if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
212 /* Set the slot size to 0 before setting the slot to the desired
213 * value. This is needed based on KVM commit 75d61fbc. */
214 mem.memory_size = 0;
215 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
216 }
217 mem.memory_size = slot->memory_size;
218 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
219 }
220
221 static void kvm_reset_vcpu(void *opaque)
222 {
223 CPUState *cpu = opaque;
224
225 kvm_arch_reset_vcpu(cpu);
226 }
227
228 int kvm_init_vcpu(CPUState *cpu)
229 {
230 KVMState *s = kvm_state;
231 long mmap_size;
232 int ret;
233
234 DPRINTF("kvm_init_vcpu\n");
235
236 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
237 if (ret < 0) {
238 DPRINTF("kvm_create_vcpu failed\n");
239 goto err;
240 }
241
242 cpu->kvm_fd = ret;
243 cpu->kvm_state = s;
244 cpu->kvm_vcpu_dirty = true;
245
246 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
247 if (mmap_size < 0) {
248 ret = mmap_size;
249 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
250 goto err;
251 }
252
253 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
254 cpu->kvm_fd, 0);
255 if (cpu->kvm_run == MAP_FAILED) {
256 ret = -errno;
257 DPRINTF("mmap'ing vcpu state failed\n");
258 goto err;
259 }
260
261 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
262 s->coalesced_mmio_ring =
263 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
264 }
265
266 ret = kvm_arch_init_vcpu(cpu);
267 if (ret == 0) {
268 qemu_register_reset(kvm_reset_vcpu, cpu);
269 kvm_arch_reset_vcpu(cpu);
270 }
271 err:
272 return ret;
273 }
274
275 /*
276 * dirty pages logging control
277 */
278
279 static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
280 {
281 int flags = 0;
282 flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
283 if (readonly && kvm_readonly_mem_allowed) {
284 flags |= KVM_MEM_READONLY;
285 }
286 return flags;
287 }
288
289 static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
290 {
291 KVMState *s = kvm_state;
292 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
293 int old_flags;
294
295 old_flags = mem->flags;
296
297 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
298 mem->flags = flags;
299
300 /* If nothing changed effectively, no need to issue ioctl */
301 if (s->migration_log) {
302 flags |= KVM_MEM_LOG_DIRTY_PAGES;
303 }
304
305 if (flags == old_flags) {
306 return 0;
307 }
308
309 return kvm_set_user_memory_region(s, mem);
310 }
311
312 static int kvm_dirty_pages_log_change(hwaddr phys_addr,
313 ram_addr_t size, bool log_dirty)
314 {
315 KVMState *s = kvm_state;
316 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
317
318 if (mem == NULL) {
319 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
320 TARGET_FMT_plx "\n", __func__, phys_addr,
321 (hwaddr)(phys_addr + size - 1));
322 return -EINVAL;
323 }
324 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
325 }
326
327 static void kvm_log_start(MemoryListener *listener,
328 MemoryRegionSection *section)
329 {
330 int r;
331
332 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
333 int128_get64(section->size), true);
334 if (r < 0) {
335 abort();
336 }
337 }
338
339 static void kvm_log_stop(MemoryListener *listener,
340 MemoryRegionSection *section)
341 {
342 int r;
343
344 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
345 int128_get64(section->size), false);
346 if (r < 0) {
347 abort();
348 }
349 }
350
351 static int kvm_set_migration_log(int enable)
352 {
353 KVMState *s = kvm_state;
354 KVMSlot *mem;
355 int i, err;
356
357 s->migration_log = enable;
358
359 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
360 mem = &s->slots[i];
361
362 if (!mem->memory_size) {
363 continue;
364 }
365 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
366 continue;
367 }
368 err = kvm_set_user_memory_region(s, mem);
369 if (err) {
370 return err;
371 }
372 }
373 return 0;
374 }
375
376 /* get kvm's dirty pages bitmap and update qemu's */
377 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
378 unsigned long *bitmap)
379 {
380 unsigned int i, j;
381 unsigned long page_number, c;
382 hwaddr addr, addr1;
383 unsigned int pages = int128_get64(section->size) / getpagesize();
384 unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
385 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
386
387 /*
388 * bitmap-traveling is faster than memory-traveling (for addr...)
389 * especially when most of the memory is not dirty.
390 */
391 for (i = 0; i < len; i++) {
392 if (bitmap[i] != 0) {
393 c = leul_to_cpu(bitmap[i]);
394 do {
395 j = ffsl(c) - 1;
396 c &= ~(1ul << j);
397 page_number = (i * HOST_LONG_BITS + j) * hpratio;
398 addr1 = page_number * TARGET_PAGE_SIZE;
399 addr = section->offset_within_region + addr1;
400 memory_region_set_dirty(section->mr, addr,
401 TARGET_PAGE_SIZE * hpratio);
402 } while (c != 0);
403 }
404 }
405 return 0;
406 }
407
408 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
409
410 /**
411 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
412 * This function updates qemu's dirty bitmap using
413 * memory_region_set_dirty(). This means all bits are set
414 * to dirty.
415 *
416 * @start_add: start of logged region.
417 * @end_addr: end of logged region.
418 */
419 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
420 {
421 KVMState *s = kvm_state;
422 unsigned long size, allocated_size = 0;
423 KVMDirtyLog d;
424 KVMSlot *mem;
425 int ret = 0;
426 hwaddr start_addr = section->offset_within_address_space;
427 hwaddr end_addr = start_addr + int128_get64(section->size);
428
429 d.dirty_bitmap = NULL;
430 while (start_addr < end_addr) {
431 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
432 if (mem == NULL) {
433 break;
434 }
435
436 /* XXX bad kernel interface alert
437 * For dirty bitmap, kernel allocates array of size aligned to
438 * bits-per-long. But for case when the kernel is 64bits and
439 * the userspace is 32bits, userspace can't align to the same
440 * bits-per-long, since sizeof(long) is different between kernel
441 * and user space. This way, userspace will provide buffer which
442 * may be 4 bytes less than the kernel will use, resulting in
443 * userspace memory corruption (which is not detectable by valgrind
444 * too, in most cases).
445 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
446 * a hope that sizeof(long) wont become >8 any time soon.
447 */
448 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
449 /*HOST_LONG_BITS*/ 64) / 8;
450 if (!d.dirty_bitmap) {
451 d.dirty_bitmap = g_malloc(size);
452 } else if (size > allocated_size) {
453 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
454 }
455 allocated_size = size;
456 memset(d.dirty_bitmap, 0, allocated_size);
457
458 d.slot = mem->slot;
459
460 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
461 DPRINTF("ioctl failed %d\n", errno);
462 ret = -1;
463 break;
464 }
465
466 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
467 start_addr = mem->start_addr + mem->memory_size;
468 }
469 g_free(d.dirty_bitmap);
470
471 return ret;
472 }
473
474 static void kvm_coalesce_mmio_region(MemoryListener *listener,
475 MemoryRegionSection *secion,
476 hwaddr start, hwaddr size)
477 {
478 KVMState *s = kvm_state;
479
480 if (s->coalesced_mmio) {
481 struct kvm_coalesced_mmio_zone zone;
482
483 zone.addr = start;
484 zone.size = size;
485 zone.pad = 0;
486
487 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
488 }
489 }
490
491 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
492 MemoryRegionSection *secion,
493 hwaddr start, hwaddr size)
494 {
495 KVMState *s = kvm_state;
496
497 if (s->coalesced_mmio) {
498 struct kvm_coalesced_mmio_zone zone;
499
500 zone.addr = start;
501 zone.size = size;
502 zone.pad = 0;
503
504 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
505 }
506 }
507
508 int kvm_check_extension(KVMState *s, unsigned int extension)
509 {
510 int ret;
511
512 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
513 if (ret < 0) {
514 ret = 0;
515 }
516
517 return ret;
518 }
519
520 static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val,
521 bool assign, uint32_t size, bool datamatch)
522 {
523 int ret;
524 struct kvm_ioeventfd iofd;
525
526 iofd.datamatch = datamatch ? val : 0;
527 iofd.addr = addr;
528 iofd.len = size;
529 iofd.flags = 0;
530 iofd.fd = fd;
531
532 if (!kvm_enabled()) {
533 return -ENOSYS;
534 }
535
536 if (datamatch) {
537 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
538 }
539 if (!assign) {
540 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
541 }
542
543 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
544
545 if (ret < 0) {
546 return -errno;
547 }
548
549 return 0;
550 }
551
552 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
553 bool assign, uint32_t size, bool datamatch)
554 {
555 struct kvm_ioeventfd kick = {
556 .datamatch = datamatch ? val : 0,
557 .addr = addr,
558 .flags = KVM_IOEVENTFD_FLAG_PIO,
559 .len = size,
560 .fd = fd,
561 };
562 int r;
563 if (!kvm_enabled()) {
564 return -ENOSYS;
565 }
566 if (datamatch) {
567 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
568 }
569 if (!assign) {
570 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
571 }
572 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
573 if (r < 0) {
574 return r;
575 }
576 return 0;
577 }
578
579
580 static int kvm_check_many_ioeventfds(void)
581 {
582 /* Userspace can use ioeventfd for io notification. This requires a host
583 * that supports eventfd(2) and an I/O thread; since eventfd does not
584 * support SIGIO it cannot interrupt the vcpu.
585 *
586 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
587 * can avoid creating too many ioeventfds.
588 */
589 #if defined(CONFIG_EVENTFD)
590 int ioeventfds[7];
591 int i, ret = 0;
592 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
593 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
594 if (ioeventfds[i] < 0) {
595 break;
596 }
597 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
598 if (ret < 0) {
599 close(ioeventfds[i]);
600 break;
601 }
602 }
603
604 /* Decide whether many devices are supported or not */
605 ret = i == ARRAY_SIZE(ioeventfds);
606
607 while (i-- > 0) {
608 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
609 close(ioeventfds[i]);
610 }
611 return ret;
612 #else
613 return 0;
614 #endif
615 }
616
617 static const KVMCapabilityInfo *
618 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
619 {
620 while (list->name) {
621 if (!kvm_check_extension(s, list->value)) {
622 return list;
623 }
624 list++;
625 }
626 return NULL;
627 }
628
629 static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
630 {
631 KVMState *s = kvm_state;
632 KVMSlot *mem, old;
633 int err;
634 MemoryRegion *mr = section->mr;
635 bool log_dirty = memory_region_is_logging(mr);
636 bool writeable = !mr->readonly && !mr->rom_device;
637 bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
638 hwaddr start_addr = section->offset_within_address_space;
639 ram_addr_t size = int128_get64(section->size);
640 void *ram = NULL;
641 unsigned delta;
642
643 /* kvm works in page size chunks, but the function may be called
644 with sub-page size and unaligned start address. */
645 delta = TARGET_PAGE_ALIGN(size) - size;
646 if (delta > size) {
647 return;
648 }
649 start_addr += delta;
650 size -= delta;
651 size &= TARGET_PAGE_MASK;
652 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
653 return;
654 }
655
656 if (!memory_region_is_ram(mr)) {
657 if (writeable || !kvm_readonly_mem_allowed) {
658 return;
659 } else if (!mr->romd_mode) {
660 /* If the memory device is not in romd_mode, then we actually want
661 * to remove the kvm memory slot so all accesses will trap. */
662 add = false;
663 }
664 }
665
666 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
667
668 while (1) {
669 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
670 if (!mem) {
671 break;
672 }
673
674 if (add && start_addr >= mem->start_addr &&
675 (start_addr + size <= mem->start_addr + mem->memory_size) &&
676 (ram - start_addr == mem->ram - mem->start_addr)) {
677 /* The new slot fits into the existing one and comes with
678 * identical parameters - update flags and done. */
679 kvm_slot_dirty_pages_log_change(mem, log_dirty);
680 return;
681 }
682
683 old = *mem;
684
685 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
686 kvm_physical_sync_dirty_bitmap(section);
687 }
688
689 /* unregister the overlapping slot */
690 mem->memory_size = 0;
691 err = kvm_set_user_memory_region(s, mem);
692 if (err) {
693 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
694 __func__, strerror(-err));
695 abort();
696 }
697
698 /* Workaround for older KVM versions: we can't join slots, even not by
699 * unregistering the previous ones and then registering the larger
700 * slot. We have to maintain the existing fragmentation. Sigh.
701 *
702 * This workaround assumes that the new slot starts at the same
703 * address as the first existing one. If not or if some overlapping
704 * slot comes around later, we will fail (not seen in practice so far)
705 * - and actually require a recent KVM version. */
706 if (s->broken_set_mem_region &&
707 old.start_addr == start_addr && old.memory_size < size && add) {
708 mem = kvm_alloc_slot(s);
709 mem->memory_size = old.memory_size;
710 mem->start_addr = old.start_addr;
711 mem->ram = old.ram;
712 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
713
714 err = kvm_set_user_memory_region(s, mem);
715 if (err) {
716 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
717 strerror(-err));
718 abort();
719 }
720
721 start_addr += old.memory_size;
722 ram += old.memory_size;
723 size -= old.memory_size;
724 continue;
725 }
726
727 /* register prefix slot */
728 if (old.start_addr < start_addr) {
729 mem = kvm_alloc_slot(s);
730 mem->memory_size = start_addr - old.start_addr;
731 mem->start_addr = old.start_addr;
732 mem->ram = old.ram;
733 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
734
735 err = kvm_set_user_memory_region(s, mem);
736 if (err) {
737 fprintf(stderr, "%s: error registering prefix slot: %s\n",
738 __func__, strerror(-err));
739 #ifdef TARGET_PPC
740 fprintf(stderr, "%s: This is probably because your kernel's " \
741 "PAGE_SIZE is too big. Please try to use 4k " \
742 "PAGE_SIZE!\n", __func__);
743 #endif
744 abort();
745 }
746 }
747
748 /* register suffix slot */
749 if (old.start_addr + old.memory_size > start_addr + size) {
750 ram_addr_t size_delta;
751
752 mem = kvm_alloc_slot(s);
753 mem->start_addr = start_addr + size;
754 size_delta = mem->start_addr - old.start_addr;
755 mem->memory_size = old.memory_size - size_delta;
756 mem->ram = old.ram + size_delta;
757 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
758
759 err = kvm_set_user_memory_region(s, mem);
760 if (err) {
761 fprintf(stderr, "%s: error registering suffix slot: %s\n",
762 __func__, strerror(-err));
763 abort();
764 }
765 }
766 }
767
768 /* in case the KVM bug workaround already "consumed" the new slot */
769 if (!size) {
770 return;
771 }
772 if (!add) {
773 return;
774 }
775 mem = kvm_alloc_slot(s);
776 mem->memory_size = size;
777 mem->start_addr = start_addr;
778 mem->ram = ram;
779 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
780
781 err = kvm_set_user_memory_region(s, mem);
782 if (err) {
783 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
784 strerror(-err));
785 abort();
786 }
787 }
788
789 static void kvm_region_add(MemoryListener *listener,
790 MemoryRegionSection *section)
791 {
792 kvm_set_phys_mem(section, true);
793 }
794
795 static void kvm_region_del(MemoryListener *listener,
796 MemoryRegionSection *section)
797 {
798 kvm_set_phys_mem(section, false);
799 }
800
801 static void kvm_log_sync(MemoryListener *listener,
802 MemoryRegionSection *section)
803 {
804 int r;
805
806 r = kvm_physical_sync_dirty_bitmap(section);
807 if (r < 0) {
808 abort();
809 }
810 }
811
812 static void kvm_log_global_start(struct MemoryListener *listener)
813 {
814 int r;
815
816 r = kvm_set_migration_log(1);
817 assert(r >= 0);
818 }
819
820 static void kvm_log_global_stop(struct MemoryListener *listener)
821 {
822 int r;
823
824 r = kvm_set_migration_log(0);
825 assert(r >= 0);
826 }
827
828 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
829 MemoryRegionSection *section,
830 bool match_data, uint64_t data,
831 EventNotifier *e)
832 {
833 int fd = event_notifier_get_fd(e);
834 int r;
835
836 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
837 data, true, int128_get64(section->size),
838 match_data);
839 if (r < 0) {
840 abort();
841 }
842 }
843
844 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
845 MemoryRegionSection *section,
846 bool match_data, uint64_t data,
847 EventNotifier *e)
848 {
849 int fd = event_notifier_get_fd(e);
850 int r;
851
852 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
853 data, false, int128_get64(section->size),
854 match_data);
855 if (r < 0) {
856 abort();
857 }
858 }
859
860 static void kvm_io_ioeventfd_add(MemoryListener *listener,
861 MemoryRegionSection *section,
862 bool match_data, uint64_t data,
863 EventNotifier *e)
864 {
865 int fd = event_notifier_get_fd(e);
866 int r;
867
868 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
869 data, true, int128_get64(section->size),
870 match_data);
871 if (r < 0) {
872 abort();
873 }
874 }
875
876 static void kvm_io_ioeventfd_del(MemoryListener *listener,
877 MemoryRegionSection *section,
878 bool match_data, uint64_t data,
879 EventNotifier *e)
880
881 {
882 int fd = event_notifier_get_fd(e);
883 int r;
884
885 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
886 data, false, int128_get64(section->size),
887 match_data);
888 if (r < 0) {
889 abort();
890 }
891 }
892
893 static MemoryListener kvm_memory_listener = {
894 .region_add = kvm_region_add,
895 .region_del = kvm_region_del,
896 .log_start = kvm_log_start,
897 .log_stop = kvm_log_stop,
898 .log_sync = kvm_log_sync,
899 .log_global_start = kvm_log_global_start,
900 .log_global_stop = kvm_log_global_stop,
901 .eventfd_add = kvm_mem_ioeventfd_add,
902 .eventfd_del = kvm_mem_ioeventfd_del,
903 .coalesced_mmio_add = kvm_coalesce_mmio_region,
904 .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
905 .priority = 10,
906 };
907
908 static MemoryListener kvm_io_listener = {
909 .eventfd_add = kvm_io_ioeventfd_add,
910 .eventfd_del = kvm_io_ioeventfd_del,
911 .priority = 10,
912 };
913
914 static void kvm_handle_interrupt(CPUState *cpu, int mask)
915 {
916 cpu->interrupt_request |= mask;
917
918 if (!qemu_cpu_is_self(cpu)) {
919 qemu_cpu_kick(cpu);
920 }
921 }
922
923 int kvm_set_irq(KVMState *s, int irq, int level)
924 {
925 struct kvm_irq_level event;
926 int ret;
927
928 assert(kvm_async_interrupts_enabled());
929
930 event.level = level;
931 event.irq = irq;
932 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
933 if (ret < 0) {
934 perror("kvm_set_irq");
935 abort();
936 }
937
938 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
939 }
940
941 #ifdef KVM_CAP_IRQ_ROUTING
942 typedef struct KVMMSIRoute {
943 struct kvm_irq_routing_entry kroute;
944 QTAILQ_ENTRY(KVMMSIRoute) entry;
945 } KVMMSIRoute;
946
947 static void set_gsi(KVMState *s, unsigned int gsi)
948 {
949 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
950 }
951
952 static void clear_gsi(KVMState *s, unsigned int gsi)
953 {
954 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
955 }
956
957 void kvm_init_irq_routing(KVMState *s)
958 {
959 int gsi_count, i;
960
961 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
962 if (gsi_count > 0) {
963 unsigned int gsi_bits, i;
964
965 /* Round up so we can search ints using ffs */
966 gsi_bits = ALIGN(gsi_count, 32);
967 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
968 s->gsi_count = gsi_count;
969
970 /* Mark any over-allocated bits as already in use */
971 for (i = gsi_count; i < gsi_bits; i++) {
972 set_gsi(s, i);
973 }
974 }
975
976 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
977 s->nr_allocated_irq_routes = 0;
978
979 if (!s->direct_msi) {
980 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
981 QTAILQ_INIT(&s->msi_hashtab[i]);
982 }
983 }
984
985 kvm_arch_init_irq_routing(s);
986 }
987
988 static void kvm_irqchip_commit_routes(KVMState *s)
989 {
990 int ret;
991
992 s->irq_routes->flags = 0;
993 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
994 assert(ret == 0);
995 }
996
997 static void kvm_add_routing_entry(KVMState *s,
998 struct kvm_irq_routing_entry *entry)
999 {
1000 struct kvm_irq_routing_entry *new;
1001 int n, size;
1002
1003 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1004 n = s->nr_allocated_irq_routes * 2;
1005 if (n < 64) {
1006 n = 64;
1007 }
1008 size = sizeof(struct kvm_irq_routing);
1009 size += n * sizeof(*new);
1010 s->irq_routes = g_realloc(s->irq_routes, size);
1011 s->nr_allocated_irq_routes = n;
1012 }
1013 n = s->irq_routes->nr++;
1014 new = &s->irq_routes->entries[n];
1015 memset(new, 0, sizeof(*new));
1016 new->gsi = entry->gsi;
1017 new->type = entry->type;
1018 new->flags = entry->flags;
1019 new->u = entry->u;
1020
1021 set_gsi(s, entry->gsi);
1022
1023 kvm_irqchip_commit_routes(s);
1024 }
1025
1026 static int kvm_update_routing_entry(KVMState *s,
1027 struct kvm_irq_routing_entry *new_entry)
1028 {
1029 struct kvm_irq_routing_entry *entry;
1030 int n;
1031
1032 for (n = 0; n < s->irq_routes->nr; n++) {
1033 entry = &s->irq_routes->entries[n];
1034 if (entry->gsi != new_entry->gsi) {
1035 continue;
1036 }
1037
1038 entry->type = new_entry->type;
1039 entry->flags = new_entry->flags;
1040 entry->u = new_entry->u;
1041
1042 kvm_irqchip_commit_routes(s);
1043
1044 return 0;
1045 }
1046
1047 return -ESRCH;
1048 }
1049
1050 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1051 {
1052 struct kvm_irq_routing_entry e;
1053
1054 assert(pin < s->gsi_count);
1055
1056 e.gsi = irq;
1057 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1058 e.flags = 0;
1059 e.u.irqchip.irqchip = irqchip;
1060 e.u.irqchip.pin = pin;
1061 kvm_add_routing_entry(s, &e);
1062 }
1063
1064 void kvm_irqchip_release_virq(KVMState *s, int virq)
1065 {
1066 struct kvm_irq_routing_entry *e;
1067 int i;
1068
1069 for (i = 0; i < s->irq_routes->nr; i++) {
1070 e = &s->irq_routes->entries[i];
1071 if (e->gsi == virq) {
1072 s->irq_routes->nr--;
1073 *e = s->irq_routes->entries[s->irq_routes->nr];
1074 }
1075 }
1076 clear_gsi(s, virq);
1077 }
1078
1079 static unsigned int kvm_hash_msi(uint32_t data)
1080 {
1081 /* This is optimized for IA32 MSI layout. However, no other arch shall
1082 * repeat the mistake of not providing a direct MSI injection API. */
1083 return data & 0xff;
1084 }
1085
1086 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1087 {
1088 KVMMSIRoute *route, *next;
1089 unsigned int hash;
1090
1091 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1092 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1093 kvm_irqchip_release_virq(s, route->kroute.gsi);
1094 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1095 g_free(route);
1096 }
1097 }
1098 }
1099
1100 static int kvm_irqchip_get_virq(KVMState *s)
1101 {
1102 uint32_t *word = s->used_gsi_bitmap;
1103 int max_words = ALIGN(s->gsi_count, 32) / 32;
1104 int i, bit;
1105 bool retry = true;
1106
1107 again:
1108 /* Return the lowest unused GSI in the bitmap */
1109 for (i = 0; i < max_words; i++) {
1110 bit = ffs(~word[i]);
1111 if (!bit) {
1112 continue;
1113 }
1114
1115 return bit - 1 + i * 32;
1116 }
1117 if (!s->direct_msi && retry) {
1118 retry = false;
1119 kvm_flush_dynamic_msi_routes(s);
1120 goto again;
1121 }
1122 return -ENOSPC;
1123
1124 }
1125
1126 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1127 {
1128 unsigned int hash = kvm_hash_msi(msg.data);
1129 KVMMSIRoute *route;
1130
1131 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1132 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1133 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1134 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1135 return route;
1136 }
1137 }
1138 return NULL;
1139 }
1140
1141 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1142 {
1143 struct kvm_msi msi;
1144 KVMMSIRoute *route;
1145
1146 if (s->direct_msi) {
1147 msi.address_lo = (uint32_t)msg.address;
1148 msi.address_hi = msg.address >> 32;
1149 msi.data = le32_to_cpu(msg.data);
1150 msi.flags = 0;
1151 memset(msi.pad, 0, sizeof(msi.pad));
1152
1153 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1154 }
1155
1156 route = kvm_lookup_msi_route(s, msg);
1157 if (!route) {
1158 int virq;
1159
1160 virq = kvm_irqchip_get_virq(s);
1161 if (virq < 0) {
1162 return virq;
1163 }
1164
1165 route = g_malloc(sizeof(KVMMSIRoute));
1166 route->kroute.gsi = virq;
1167 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1168 route->kroute.flags = 0;
1169 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1170 route->kroute.u.msi.address_hi = msg.address >> 32;
1171 route->kroute.u.msi.data = le32_to_cpu(msg.data);
1172
1173 kvm_add_routing_entry(s, &route->kroute);
1174
1175 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1176 entry);
1177 }
1178
1179 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1180
1181 return kvm_set_irq(s, route->kroute.gsi, 1);
1182 }
1183
1184 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1185 {
1186 struct kvm_irq_routing_entry kroute;
1187 int virq;
1188
1189 if (!kvm_gsi_routing_enabled()) {
1190 return -ENOSYS;
1191 }
1192
1193 virq = kvm_irqchip_get_virq(s);
1194 if (virq < 0) {
1195 return virq;
1196 }
1197
1198 kroute.gsi = virq;
1199 kroute.type = KVM_IRQ_ROUTING_MSI;
1200 kroute.flags = 0;
1201 kroute.u.msi.address_lo = (uint32_t)msg.address;
1202 kroute.u.msi.address_hi = msg.address >> 32;
1203 kroute.u.msi.data = le32_to_cpu(msg.data);
1204
1205 kvm_add_routing_entry(s, &kroute);
1206
1207 return virq;
1208 }
1209
1210 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1211 {
1212 struct kvm_irq_routing_entry kroute;
1213
1214 if (!kvm_irqchip_in_kernel()) {
1215 return -ENOSYS;
1216 }
1217
1218 kroute.gsi = virq;
1219 kroute.type = KVM_IRQ_ROUTING_MSI;
1220 kroute.flags = 0;
1221 kroute.u.msi.address_lo = (uint32_t)msg.address;
1222 kroute.u.msi.address_hi = msg.address >> 32;
1223 kroute.u.msi.data = le32_to_cpu(msg.data);
1224
1225 return kvm_update_routing_entry(s, &kroute);
1226 }
1227
1228 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1229 {
1230 struct kvm_irqfd irqfd = {
1231 .fd = fd,
1232 .gsi = virq,
1233 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1234 };
1235
1236 if (!kvm_irqfds_enabled()) {
1237 return -ENOSYS;
1238 }
1239
1240 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1241 }
1242
1243 #else /* !KVM_CAP_IRQ_ROUTING */
1244
1245 void kvm_init_irq_routing(KVMState *s)
1246 {
1247 }
1248
1249 void kvm_irqchip_release_virq(KVMState *s, int virq)
1250 {
1251 }
1252
1253 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1254 {
1255 abort();
1256 }
1257
1258 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1259 {
1260 return -ENOSYS;
1261 }
1262
1263 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1264 {
1265 abort();
1266 }
1267
1268 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1269 {
1270 return -ENOSYS;
1271 }
1272 #endif /* !KVM_CAP_IRQ_ROUTING */
1273
1274 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
1275 {
1276 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true);
1277 }
1278
1279 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
1280 {
1281 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false);
1282 }
1283
1284 static int kvm_irqchip_create(KVMState *s)
1285 {
1286 QemuOptsList *list = qemu_find_opts("machine");
1287 int ret;
1288
1289 if (QTAILQ_EMPTY(&list->head) ||
1290 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
1291 "kernel_irqchip", true) ||
1292 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1293 return 0;
1294 }
1295
1296 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1297 if (ret < 0) {
1298 fprintf(stderr, "Create kernel irqchip failed\n");
1299 return ret;
1300 }
1301
1302 kvm_kernel_irqchip = true;
1303 /* If we have an in-kernel IRQ chip then we must have asynchronous
1304 * interrupt delivery (though the reverse is not necessarily true)
1305 */
1306 kvm_async_interrupts_allowed = true;
1307 kvm_halt_in_kernel_allowed = true;
1308
1309 kvm_init_irq_routing(s);
1310
1311 return 0;
1312 }
1313
1314 static int kvm_max_vcpus(KVMState *s)
1315 {
1316 int ret;
1317
1318 /* Find number of supported CPUs using the recommended
1319 * procedure from the kernel API documentation to cope with
1320 * older kernels that may be missing capabilities.
1321 */
1322 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1323 if (ret) {
1324 return ret;
1325 }
1326 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1327 if (ret) {
1328 return ret;
1329 }
1330
1331 return 4;
1332 }
1333
1334 int kvm_init(void)
1335 {
1336 static const char upgrade_note[] =
1337 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1338 "(see http://sourceforge.net/projects/kvm).\n";
1339 KVMState *s;
1340 const KVMCapabilityInfo *missing_cap;
1341 int ret;
1342 int i;
1343 int max_vcpus;
1344
1345 s = g_malloc0(sizeof(KVMState));
1346
1347 /*
1348 * On systems where the kernel can support different base page
1349 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1350 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1351 * page size for the system though.
1352 */
1353 assert(TARGET_PAGE_SIZE <= getpagesize());
1354
1355 #ifdef KVM_CAP_SET_GUEST_DEBUG
1356 QTAILQ_INIT(&s->kvm_sw_breakpoints);
1357 #endif
1358 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
1359 s->slots[i].slot = i;
1360 }
1361 s->vmfd = -1;
1362 s->fd = qemu_open("/dev/kvm", O_RDWR);
1363 if (s->fd == -1) {
1364 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1365 ret = -errno;
1366 goto err;
1367 }
1368
1369 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1370 if (ret < KVM_API_VERSION) {
1371 if (ret > 0) {
1372 ret = -EINVAL;
1373 }
1374 fprintf(stderr, "kvm version too old\n");
1375 goto err;
1376 }
1377
1378 if (ret > KVM_API_VERSION) {
1379 ret = -EINVAL;
1380 fprintf(stderr, "kvm version not supported\n");
1381 goto err;
1382 }
1383
1384 max_vcpus = kvm_max_vcpus(s);
1385 if (smp_cpus > max_vcpus) {
1386 ret = -EINVAL;
1387 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1388 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1389 goto err;
1390 }
1391
1392 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
1393 if (s->vmfd < 0) {
1394 #ifdef TARGET_S390X
1395 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1396 "your host kernel command line\n");
1397 #endif
1398 ret = s->vmfd;
1399 goto err;
1400 }
1401
1402 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1403 if (!missing_cap) {
1404 missing_cap =
1405 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1406 }
1407 if (missing_cap) {
1408 ret = -EINVAL;
1409 fprintf(stderr, "kvm does not support %s\n%s",
1410 missing_cap->name, upgrade_note);
1411 goto err;
1412 }
1413
1414 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1415
1416 s->broken_set_mem_region = 1;
1417 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1418 if (ret > 0) {
1419 s->broken_set_mem_region = 0;
1420 }
1421
1422 #ifdef KVM_CAP_VCPU_EVENTS
1423 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1424 #endif
1425
1426 s->robust_singlestep =
1427 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1428
1429 #ifdef KVM_CAP_DEBUGREGS
1430 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1431 #endif
1432
1433 #ifdef KVM_CAP_XSAVE
1434 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1435 #endif
1436
1437 #ifdef KVM_CAP_XCRS
1438 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1439 #endif
1440
1441 #ifdef KVM_CAP_PIT_STATE2
1442 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1443 #endif
1444
1445 #ifdef KVM_CAP_IRQ_ROUTING
1446 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1447 #endif
1448
1449 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1450
1451 s->irq_set_ioctl = KVM_IRQ_LINE;
1452 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1453 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1454 }
1455
1456 #ifdef KVM_CAP_READONLY_MEM
1457 kvm_readonly_mem_allowed =
1458 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1459 #endif
1460
1461 ret = kvm_arch_init(s);
1462 if (ret < 0) {
1463 goto err;
1464 }
1465
1466 ret = kvm_irqchip_create(s);
1467 if (ret < 0) {
1468 goto err;
1469 }
1470
1471 kvm_state = s;
1472 memory_listener_register(&kvm_memory_listener, &address_space_memory);
1473 memory_listener_register(&kvm_io_listener, &address_space_io);
1474
1475 s->many_ioeventfds = kvm_check_many_ioeventfds();
1476
1477 cpu_interrupt_handler = kvm_handle_interrupt;
1478
1479 return 0;
1480
1481 err:
1482 if (s->vmfd >= 0) {
1483 close(s->vmfd);
1484 }
1485 if (s->fd != -1) {
1486 close(s->fd);
1487 }
1488 g_free(s);
1489
1490 return ret;
1491 }
1492
1493 static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1494 uint32_t count)
1495 {
1496 int i;
1497 uint8_t *ptr = data;
1498
1499 for (i = 0; i < count; i++) {
1500 if (direction == KVM_EXIT_IO_IN) {
1501 switch (size) {
1502 case 1:
1503 stb_p(ptr, cpu_inb(port));
1504 break;
1505 case 2:
1506 stw_p(ptr, cpu_inw(port));
1507 break;
1508 case 4:
1509 stl_p(ptr, cpu_inl(port));
1510 break;
1511 }
1512 } else {
1513 switch (size) {
1514 case 1:
1515 cpu_outb(port, ldub_p(ptr));
1516 break;
1517 case 2:
1518 cpu_outw(port, lduw_p(ptr));
1519 break;
1520 case 4:
1521 cpu_outl(port, ldl_p(ptr));
1522 break;
1523 }
1524 }
1525
1526 ptr += size;
1527 }
1528 }
1529
1530 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1531 {
1532 fprintf(stderr, "KVM internal error.");
1533 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1534 int i;
1535
1536 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
1537 for (i = 0; i < run->internal.ndata; ++i) {
1538 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1539 i, (uint64_t)run->internal.data[i]);
1540 }
1541 } else {
1542 fprintf(stderr, "\n");
1543 }
1544 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1545 fprintf(stderr, "emulation failure\n");
1546 if (!kvm_arch_stop_on_emulation_error(cpu)) {
1547 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1548 return EXCP_INTERRUPT;
1549 }
1550 }
1551 /* FIXME: Should trigger a qmp message to let management know
1552 * something went wrong.
1553 */
1554 return -1;
1555 }
1556
1557 void kvm_flush_coalesced_mmio_buffer(void)
1558 {
1559 KVMState *s = kvm_state;
1560
1561 if (s->coalesced_flush_in_progress) {
1562 return;
1563 }
1564
1565 s->coalesced_flush_in_progress = true;
1566
1567 if (s->coalesced_mmio_ring) {
1568 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1569 while (ring->first != ring->last) {
1570 struct kvm_coalesced_mmio *ent;
1571
1572 ent = &ring->coalesced_mmio[ring->first];
1573
1574 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1575 smp_wmb();
1576 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1577 }
1578 }
1579
1580 s->coalesced_flush_in_progress = false;
1581 }
1582
1583 static void do_kvm_cpu_synchronize_state(void *arg)
1584 {
1585 CPUState *cpu = arg;
1586
1587 if (!cpu->kvm_vcpu_dirty) {
1588 kvm_arch_get_registers(cpu);
1589 cpu->kvm_vcpu_dirty = true;
1590 }
1591 }
1592
1593 void kvm_cpu_synchronize_state(CPUState *cpu)
1594 {
1595 if (!cpu->kvm_vcpu_dirty) {
1596 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
1597 }
1598 }
1599
1600 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1601 {
1602 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1603 cpu->kvm_vcpu_dirty = false;
1604 }
1605
1606 void kvm_cpu_synchronize_post_init(CPUState *cpu)
1607 {
1608 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1609 cpu->kvm_vcpu_dirty = false;
1610 }
1611
1612 int kvm_cpu_exec(CPUState *cpu)
1613 {
1614 struct kvm_run *run = cpu->kvm_run;
1615 int ret, run_ret;
1616
1617 DPRINTF("kvm_cpu_exec()\n");
1618
1619 if (kvm_arch_process_async_events(cpu)) {
1620 cpu->exit_request = 0;
1621 return EXCP_HLT;
1622 }
1623
1624 do {
1625 if (cpu->kvm_vcpu_dirty) {
1626 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1627 cpu->kvm_vcpu_dirty = false;
1628 }
1629
1630 kvm_arch_pre_run(cpu, run);
1631 if (cpu->exit_request) {
1632 DPRINTF("interrupt exit requested\n");
1633 /*
1634 * KVM requires us to reenter the kernel after IO exits to complete
1635 * instruction emulation. This self-signal will ensure that we
1636 * leave ASAP again.
1637 */
1638 qemu_cpu_kick_self();
1639 }
1640 qemu_mutex_unlock_iothread();
1641
1642 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1643
1644 qemu_mutex_lock_iothread();
1645 kvm_arch_post_run(cpu, run);
1646
1647 if (run_ret < 0) {
1648 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1649 DPRINTF("io window exit\n");
1650 ret = EXCP_INTERRUPT;
1651 break;
1652 }
1653 fprintf(stderr, "error: kvm run failed %s\n",
1654 strerror(-run_ret));
1655 abort();
1656 }
1657
1658 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1659 switch (run->exit_reason) {
1660 case KVM_EXIT_IO:
1661 DPRINTF("handle_io\n");
1662 kvm_handle_io(run->io.port,
1663 (uint8_t *)run + run->io.data_offset,
1664 run->io.direction,
1665 run->io.size,
1666 run->io.count);
1667 ret = 0;
1668 break;
1669 case KVM_EXIT_MMIO:
1670 DPRINTF("handle_mmio\n");
1671 cpu_physical_memory_rw(run->mmio.phys_addr,
1672 run->mmio.data,
1673 run->mmio.len,
1674 run->mmio.is_write);
1675 ret = 0;
1676 break;
1677 case KVM_EXIT_IRQ_WINDOW_OPEN:
1678 DPRINTF("irq_window_open\n");
1679 ret = EXCP_INTERRUPT;
1680 break;
1681 case KVM_EXIT_SHUTDOWN:
1682 DPRINTF("shutdown\n");
1683 qemu_system_reset_request();
1684 ret = EXCP_INTERRUPT;
1685 break;
1686 case KVM_EXIT_UNKNOWN:
1687 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1688 (uint64_t)run->hw.hardware_exit_reason);
1689 ret = -1;
1690 break;
1691 case KVM_EXIT_INTERNAL_ERROR:
1692 ret = kvm_handle_internal_error(cpu, run);
1693 break;
1694 default:
1695 DPRINTF("kvm_arch_handle_exit\n");
1696 ret = kvm_arch_handle_exit(cpu, run);
1697 break;
1698 }
1699 } while (ret == 0);
1700
1701 if (ret < 0) {
1702 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1703 vm_stop(RUN_STATE_INTERNAL_ERROR);
1704 }
1705
1706 cpu->exit_request = 0;
1707 return ret;
1708 }
1709
1710 int kvm_ioctl(KVMState *s, int type, ...)
1711 {
1712 int ret;
1713 void *arg;
1714 va_list ap;
1715
1716 va_start(ap, type);
1717 arg = va_arg(ap, void *);
1718 va_end(ap);
1719
1720 trace_kvm_ioctl(type, arg);
1721 ret = ioctl(s->fd, type, arg);
1722 if (ret == -1) {
1723 ret = -errno;
1724 }
1725 return ret;
1726 }
1727
1728 int kvm_vm_ioctl(KVMState *s, int type, ...)
1729 {
1730 int ret;
1731 void *arg;
1732 va_list ap;
1733
1734 va_start(ap, type);
1735 arg = va_arg(ap, void *);
1736 va_end(ap);
1737
1738 trace_kvm_vm_ioctl(type, arg);
1739 ret = ioctl(s->vmfd, type, arg);
1740 if (ret == -1) {
1741 ret = -errno;
1742 }
1743 return ret;
1744 }
1745
1746 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
1747 {
1748 int ret;
1749 void *arg;
1750 va_list ap;
1751
1752 va_start(ap, type);
1753 arg = va_arg(ap, void *);
1754 va_end(ap);
1755
1756 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
1757 ret = ioctl(cpu->kvm_fd, type, arg);
1758 if (ret == -1) {
1759 ret = -errno;
1760 }
1761 return ret;
1762 }
1763
1764 int kvm_has_sync_mmu(void)
1765 {
1766 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1767 }
1768
1769 int kvm_has_vcpu_events(void)
1770 {
1771 return kvm_state->vcpu_events;
1772 }
1773
1774 int kvm_has_robust_singlestep(void)
1775 {
1776 return kvm_state->robust_singlestep;
1777 }
1778
1779 int kvm_has_debugregs(void)
1780 {
1781 return kvm_state->debugregs;
1782 }
1783
1784 int kvm_has_xsave(void)
1785 {
1786 return kvm_state->xsave;
1787 }
1788
1789 int kvm_has_xcrs(void)
1790 {
1791 return kvm_state->xcrs;
1792 }
1793
1794 int kvm_has_pit_state2(void)
1795 {
1796 return kvm_state->pit_state2;
1797 }
1798
1799 int kvm_has_many_ioeventfds(void)
1800 {
1801 if (!kvm_enabled()) {
1802 return 0;
1803 }
1804 return kvm_state->many_ioeventfds;
1805 }
1806
1807 int kvm_has_gsi_routing(void)
1808 {
1809 #ifdef KVM_CAP_IRQ_ROUTING
1810 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1811 #else
1812 return false;
1813 #endif
1814 }
1815
1816 int kvm_has_intx_set_mask(void)
1817 {
1818 return kvm_state->intx_set_mask;
1819 }
1820
1821 void *kvm_ram_alloc(ram_addr_t size)
1822 {
1823 #ifdef TARGET_S390X
1824 void *mem;
1825
1826 mem = kvm_arch_ram_alloc(size);
1827 if (mem) {
1828 return mem;
1829 }
1830 #endif
1831 return qemu_anon_ram_alloc(size);
1832 }
1833
1834 void kvm_setup_guest_memory(void *start, size_t size)
1835 {
1836 #ifdef CONFIG_VALGRIND_H
1837 VALGRIND_MAKE_MEM_DEFINED(start, size);
1838 #endif
1839 if (!kvm_has_sync_mmu()) {
1840 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1841
1842 if (ret) {
1843 perror("qemu_madvise");
1844 fprintf(stderr,
1845 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1846 exit(1);
1847 }
1848 }
1849 }
1850
1851 #ifdef KVM_CAP_SET_GUEST_DEBUG
1852 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
1853 target_ulong pc)
1854 {
1855 struct kvm_sw_breakpoint *bp;
1856
1857 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
1858 if (bp->pc == pc) {
1859 return bp;
1860 }
1861 }
1862 return NULL;
1863 }
1864
1865 int kvm_sw_breakpoints_active(CPUState *cpu)
1866 {
1867 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
1868 }
1869
1870 struct kvm_set_guest_debug_data {
1871 struct kvm_guest_debug dbg;
1872 CPUState *cpu;
1873 int err;
1874 };
1875
1876 static void kvm_invoke_set_guest_debug(void *data)
1877 {
1878 struct kvm_set_guest_debug_data *dbg_data = data;
1879
1880 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
1881 &dbg_data->dbg);
1882 }
1883
1884 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1885 {
1886 CPUState *cpu = ENV_GET_CPU(env);
1887 struct kvm_set_guest_debug_data data;
1888
1889 data.dbg.control = reinject_trap;
1890
1891 if (env->singlestep_enabled) {
1892 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1893 }
1894 kvm_arch_update_guest_debug(cpu, &data.dbg);
1895 data.cpu = cpu;
1896
1897 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
1898 return data.err;
1899 }
1900
1901 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1902 target_ulong len, int type)
1903 {
1904 CPUState *current_cpu = ENV_GET_CPU(current_env);
1905 struct kvm_sw_breakpoint *bp;
1906 CPUArchState *env;
1907 int err;
1908
1909 if (type == GDB_BREAKPOINT_SW) {
1910 bp = kvm_find_sw_breakpoint(current_cpu, addr);
1911 if (bp) {
1912 bp->use_count++;
1913 return 0;
1914 }
1915
1916 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
1917 if (!bp) {
1918 return -ENOMEM;
1919 }
1920
1921 bp->pc = addr;
1922 bp->use_count = 1;
1923 err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
1924 if (err) {
1925 g_free(bp);
1926 return err;
1927 }
1928
1929 QTAILQ_INSERT_HEAD(&current_cpu->kvm_state->kvm_sw_breakpoints,
1930 bp, entry);
1931 } else {
1932 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
1933 if (err) {
1934 return err;
1935 }
1936 }
1937
1938 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1939 err = kvm_update_guest_debug(env, 0);
1940 if (err) {
1941 return err;
1942 }
1943 }
1944 return 0;
1945 }
1946
1947 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1948 target_ulong len, int type)
1949 {
1950 CPUState *current_cpu = ENV_GET_CPU(current_env);
1951 struct kvm_sw_breakpoint *bp;
1952 CPUArchState *env;
1953 int err;
1954
1955 if (type == GDB_BREAKPOINT_SW) {
1956 bp = kvm_find_sw_breakpoint(current_cpu, addr);
1957 if (!bp) {
1958 return -ENOENT;
1959 }
1960
1961 if (bp->use_count > 1) {
1962 bp->use_count--;
1963 return 0;
1964 }
1965
1966 err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
1967 if (err) {
1968 return err;
1969 }
1970
1971 QTAILQ_REMOVE(&current_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
1972 g_free(bp);
1973 } else {
1974 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1975 if (err) {
1976 return err;
1977 }
1978 }
1979
1980 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1981 err = kvm_update_guest_debug(env, 0);
1982 if (err) {
1983 return err;
1984 }
1985 }
1986 return 0;
1987 }
1988
1989 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1990 {
1991 CPUState *current_cpu = ENV_GET_CPU(current_env);
1992 struct kvm_sw_breakpoint *bp, *next;
1993 KVMState *s = current_cpu->kvm_state;
1994 CPUArchState *env;
1995 CPUState *cpu;
1996
1997 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1998 if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
1999 /* Try harder to find a CPU that currently sees the breakpoint. */
2000 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2001 cpu = ENV_GET_CPU(env);
2002 if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
2003 break;
2004 }
2005 }
2006 }
2007 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2008 g_free(bp);
2009 }
2010 kvm_arch_remove_all_hw_breakpoints();
2011
2012 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2013 kvm_update_guest_debug(env, 0);
2014 }
2015 }
2016
2017 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2018
2019 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
2020 {
2021 return -EINVAL;
2022 }
2023
2024 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
2025 target_ulong len, int type)
2026 {
2027 return -EINVAL;
2028 }
2029
2030 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
2031 target_ulong len, int type)
2032 {
2033 return -EINVAL;
2034 }
2035
2036 void kvm_remove_all_breakpoints(CPUArchState *current_env)
2037 {
2038 }
2039 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2040
2041 int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2042 {
2043 struct kvm_signal_mask *sigmask;
2044 int r;
2045
2046 if (!sigset) {
2047 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
2048 }
2049
2050 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2051
2052 sigmask->len = 8;
2053 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2054 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2055 g_free(sigmask);
2056
2057 return r;
2058 }
2059 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2060 {
2061 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
2062 }
2063
2064 int kvm_on_sigbus(int code, void *addr)
2065 {
2066 return kvm_arch_on_sigbus(code, addr);
2067 }