]> git.proxmox.com Git - mirror_qemu.git/blob - kvm-all.c
kvm-irqchip: x86: add msi route notify fn
[mirror_qemu.git] / kvm-all.c
1 /*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5 * Red Hat, Inc. 2008
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Glauber Costa <gcosta@redhat.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
18
19 #include <linux/kvm.h>
20
21 #include "qemu-common.h"
22 #include "qemu/atomic.h"
23 #include "qemu/option.h"
24 #include "qemu/config-file.h"
25 #include "qemu/error-report.h"
26 #include "hw/hw.h"
27 #include "hw/pci/msi.h"
28 #include "hw/pci/msix.h"
29 #include "hw/s390x/adapter.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm_int.h"
32 #include "qemu/bswap.h"
33 #include "exec/memory.h"
34 #include "exec/ram_addr.h"
35 #include "exec/address-spaces.h"
36 #include "qemu/event_notifier.h"
37 #include "trace.h"
38 #include "hw/irq.h"
39
40 #include "hw/boards.h"
41
42 /* This check must be after config-host.h is included */
43 #ifdef CONFIG_EVENTFD
44 #include <sys/eventfd.h>
45 #endif
46
47 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
48 * need to use the real host PAGE_SIZE, as that's what KVM will use.
49 */
50 #define PAGE_SIZE getpagesize()
51
52 //#define DEBUG_KVM
53
54 #ifdef DEBUG_KVM
55 #define DPRINTF(fmt, ...) \
56 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57 #else
58 #define DPRINTF(fmt, ...) \
59 do { } while (0)
60 #endif
61
62 #define KVM_MSI_HASHTAB_SIZE 256
63
64 struct KVMParkedVcpu {
65 unsigned long vcpu_id;
66 int kvm_fd;
67 QLIST_ENTRY(KVMParkedVcpu) node;
68 };
69
70 struct KVMState
71 {
72 AccelState parent_obj;
73
74 int nr_slots;
75 int fd;
76 int vmfd;
77 int coalesced_mmio;
78 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
79 bool coalesced_flush_in_progress;
80 int broken_set_mem_region;
81 int vcpu_events;
82 int robust_singlestep;
83 int debugregs;
84 #ifdef KVM_CAP_SET_GUEST_DEBUG
85 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
86 #endif
87 int many_ioeventfds;
88 int intx_set_mask;
89 /* The man page (and posix) say ioctl numbers are signed int, but
90 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
91 * unsigned, and treating them as signed here can break things */
92 unsigned irq_set_ioctl;
93 unsigned int sigmask_len;
94 GHashTable *gsimap;
95 #ifdef KVM_CAP_IRQ_ROUTING
96 struct kvm_irq_routing *irq_routes;
97 int nr_allocated_irq_routes;
98 unsigned long *used_gsi_bitmap;
99 unsigned int gsi_count;
100 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
101 #endif
102 KVMMemoryListener memory_listener;
103 QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
104 };
105
106 KVMState *kvm_state;
107 bool kvm_kernel_irqchip;
108 bool kvm_split_irqchip;
109 bool kvm_async_interrupts_allowed;
110 bool kvm_halt_in_kernel_allowed;
111 bool kvm_eventfds_allowed;
112 bool kvm_irqfds_allowed;
113 bool kvm_resamplefds_allowed;
114 bool kvm_msi_via_irqfd_allowed;
115 bool kvm_gsi_routing_allowed;
116 bool kvm_gsi_direct_mapping;
117 bool kvm_allowed;
118 bool kvm_readonly_mem_allowed;
119 bool kvm_vm_attributes_allowed;
120 bool kvm_direct_msi_allowed;
121 bool kvm_ioeventfd_any_length_allowed;
122
123 static const KVMCapabilityInfo kvm_required_capabilites[] = {
124 KVM_CAP_INFO(USER_MEMORY),
125 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
126 KVM_CAP_LAST_INFO
127 };
128
129 int kvm_get_max_memslots(void)
130 {
131 KVMState *s = KVM_STATE(current_machine->accelerator);
132
133 return s->nr_slots;
134 }
135
136 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
137 {
138 KVMState *s = kvm_state;
139 int i;
140
141 for (i = 0; i < s->nr_slots; i++) {
142 if (kml->slots[i].memory_size == 0) {
143 return &kml->slots[i];
144 }
145 }
146
147 return NULL;
148 }
149
150 bool kvm_has_free_slot(MachineState *ms)
151 {
152 KVMState *s = KVM_STATE(ms->accelerator);
153
154 return kvm_get_free_slot(&s->memory_listener);
155 }
156
157 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
158 {
159 KVMSlot *slot = kvm_get_free_slot(kml);
160
161 if (slot) {
162 return slot;
163 }
164
165 fprintf(stderr, "%s: no free slot available\n", __func__);
166 abort();
167 }
168
169 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
170 hwaddr start_addr,
171 hwaddr end_addr)
172 {
173 KVMState *s = kvm_state;
174 int i;
175
176 for (i = 0; i < s->nr_slots; i++) {
177 KVMSlot *mem = &kml->slots[i];
178
179 if (start_addr == mem->start_addr &&
180 end_addr == mem->start_addr + mem->memory_size) {
181 return mem;
182 }
183 }
184
185 return NULL;
186 }
187
188 /*
189 * Find overlapping slot with lowest start address
190 */
191 static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml,
192 hwaddr start_addr,
193 hwaddr end_addr)
194 {
195 KVMState *s = kvm_state;
196 KVMSlot *found = NULL;
197 int i;
198
199 for (i = 0; i < s->nr_slots; i++) {
200 KVMSlot *mem = &kml->slots[i];
201
202 if (mem->memory_size == 0 ||
203 (found && found->start_addr < mem->start_addr)) {
204 continue;
205 }
206
207 if (end_addr > mem->start_addr &&
208 start_addr < mem->start_addr + mem->memory_size) {
209 found = mem;
210 }
211 }
212
213 return found;
214 }
215
216 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
217 hwaddr *phys_addr)
218 {
219 KVMMemoryListener *kml = &s->memory_listener;
220 int i;
221
222 for (i = 0; i < s->nr_slots; i++) {
223 KVMSlot *mem = &kml->slots[i];
224
225 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
226 *phys_addr = mem->start_addr + (ram - mem->ram);
227 return 1;
228 }
229 }
230
231 return 0;
232 }
233
234 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
235 {
236 KVMState *s = kvm_state;
237 struct kvm_userspace_memory_region mem;
238
239 mem.slot = slot->slot | (kml->as_id << 16);
240 mem.guest_phys_addr = slot->start_addr;
241 mem.userspace_addr = (unsigned long)slot->ram;
242 mem.flags = slot->flags;
243
244 if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
245 /* Set the slot size to 0 before setting the slot to the desired
246 * value. This is needed based on KVM commit 75d61fbc. */
247 mem.memory_size = 0;
248 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
249 }
250 mem.memory_size = slot->memory_size;
251 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
252 }
253
254 int kvm_destroy_vcpu(CPUState *cpu)
255 {
256 KVMState *s = kvm_state;
257 long mmap_size;
258 struct KVMParkedVcpu *vcpu = NULL;
259 int ret = 0;
260
261 DPRINTF("kvm_destroy_vcpu\n");
262
263 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
264 if (mmap_size < 0) {
265 ret = mmap_size;
266 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
267 goto err;
268 }
269
270 ret = munmap(cpu->kvm_run, mmap_size);
271 if (ret < 0) {
272 goto err;
273 }
274
275 vcpu = g_malloc0(sizeof(*vcpu));
276 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
277 vcpu->kvm_fd = cpu->kvm_fd;
278 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
279 err:
280 return ret;
281 }
282
283 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
284 {
285 struct KVMParkedVcpu *cpu;
286
287 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
288 if (cpu->vcpu_id == vcpu_id) {
289 int kvm_fd;
290
291 QLIST_REMOVE(cpu, node);
292 kvm_fd = cpu->kvm_fd;
293 g_free(cpu);
294 return kvm_fd;
295 }
296 }
297
298 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
299 }
300
301 int kvm_init_vcpu(CPUState *cpu)
302 {
303 KVMState *s = kvm_state;
304 long mmap_size;
305 int ret;
306
307 DPRINTF("kvm_init_vcpu\n");
308
309 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
310 if (ret < 0) {
311 DPRINTF("kvm_create_vcpu failed\n");
312 goto err;
313 }
314
315 cpu->kvm_fd = ret;
316 cpu->kvm_state = s;
317 cpu->kvm_vcpu_dirty = true;
318
319 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
320 if (mmap_size < 0) {
321 ret = mmap_size;
322 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
323 goto err;
324 }
325
326 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
327 cpu->kvm_fd, 0);
328 if (cpu->kvm_run == MAP_FAILED) {
329 ret = -errno;
330 DPRINTF("mmap'ing vcpu state failed\n");
331 goto err;
332 }
333
334 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
335 s->coalesced_mmio_ring =
336 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
337 }
338
339 ret = kvm_arch_init_vcpu(cpu);
340 err:
341 return ret;
342 }
343
344 /*
345 * dirty pages logging control
346 */
347
348 static int kvm_mem_flags(MemoryRegion *mr)
349 {
350 bool readonly = mr->readonly || memory_region_is_romd(mr);
351 int flags = 0;
352
353 if (memory_region_get_dirty_log_mask(mr) != 0) {
354 flags |= KVM_MEM_LOG_DIRTY_PAGES;
355 }
356 if (readonly && kvm_readonly_mem_allowed) {
357 flags |= KVM_MEM_READONLY;
358 }
359 return flags;
360 }
361
362 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
363 MemoryRegion *mr)
364 {
365 int old_flags;
366
367 old_flags = mem->flags;
368 mem->flags = kvm_mem_flags(mr);
369
370 /* If nothing changed effectively, no need to issue ioctl */
371 if (mem->flags == old_flags) {
372 return 0;
373 }
374
375 return kvm_set_user_memory_region(kml, mem);
376 }
377
378 static int kvm_section_update_flags(KVMMemoryListener *kml,
379 MemoryRegionSection *section)
380 {
381 hwaddr phys_addr = section->offset_within_address_space;
382 ram_addr_t size = int128_get64(section->size);
383 KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size);
384
385 if (mem == NULL) {
386 return 0;
387 } else {
388 return kvm_slot_update_flags(kml, mem, section->mr);
389 }
390 }
391
392 static void kvm_log_start(MemoryListener *listener,
393 MemoryRegionSection *section,
394 int old, int new)
395 {
396 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
397 int r;
398
399 if (old != 0) {
400 return;
401 }
402
403 r = kvm_section_update_flags(kml, section);
404 if (r < 0) {
405 abort();
406 }
407 }
408
409 static void kvm_log_stop(MemoryListener *listener,
410 MemoryRegionSection *section,
411 int old, int new)
412 {
413 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
414 int r;
415
416 if (new != 0) {
417 return;
418 }
419
420 r = kvm_section_update_flags(kml, section);
421 if (r < 0) {
422 abort();
423 }
424 }
425
426 /* get kvm's dirty pages bitmap and update qemu's */
427 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
428 unsigned long *bitmap)
429 {
430 ram_addr_t start = section->offset_within_region +
431 memory_region_get_ram_addr(section->mr);
432 ram_addr_t pages = int128_get64(section->size) / getpagesize();
433
434 cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
435 return 0;
436 }
437
438 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
439
440 /**
441 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
442 * This function updates qemu's dirty bitmap using
443 * memory_region_set_dirty(). This means all bits are set
444 * to dirty.
445 *
446 * @start_add: start of logged region.
447 * @end_addr: end of logged region.
448 */
449 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
450 MemoryRegionSection *section)
451 {
452 KVMState *s = kvm_state;
453 unsigned long size, allocated_size = 0;
454 struct kvm_dirty_log d = {};
455 KVMSlot *mem;
456 int ret = 0;
457 hwaddr start_addr = section->offset_within_address_space;
458 hwaddr end_addr = start_addr + int128_get64(section->size);
459
460 d.dirty_bitmap = NULL;
461 while (start_addr < end_addr) {
462 mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr);
463 if (mem == NULL) {
464 break;
465 }
466
467 /* XXX bad kernel interface alert
468 * For dirty bitmap, kernel allocates array of size aligned to
469 * bits-per-long. But for case when the kernel is 64bits and
470 * the userspace is 32bits, userspace can't align to the same
471 * bits-per-long, since sizeof(long) is different between kernel
472 * and user space. This way, userspace will provide buffer which
473 * may be 4 bytes less than the kernel will use, resulting in
474 * userspace memory corruption (which is not detectable by valgrind
475 * too, in most cases).
476 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
477 * a hope that sizeof(long) won't become >8 any time soon.
478 */
479 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
480 /*HOST_LONG_BITS*/ 64) / 8;
481 if (!d.dirty_bitmap) {
482 d.dirty_bitmap = g_malloc(size);
483 } else if (size > allocated_size) {
484 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
485 }
486 allocated_size = size;
487 memset(d.dirty_bitmap, 0, allocated_size);
488
489 d.slot = mem->slot | (kml->as_id << 16);
490 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
491 DPRINTF("ioctl failed %d\n", errno);
492 ret = -1;
493 break;
494 }
495
496 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
497 start_addr = mem->start_addr + mem->memory_size;
498 }
499 g_free(d.dirty_bitmap);
500
501 return ret;
502 }
503
504 static void kvm_coalesce_mmio_region(MemoryListener *listener,
505 MemoryRegionSection *secion,
506 hwaddr start, hwaddr size)
507 {
508 KVMState *s = kvm_state;
509
510 if (s->coalesced_mmio) {
511 struct kvm_coalesced_mmio_zone zone;
512
513 zone.addr = start;
514 zone.size = size;
515 zone.pad = 0;
516
517 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
518 }
519 }
520
521 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
522 MemoryRegionSection *secion,
523 hwaddr start, hwaddr size)
524 {
525 KVMState *s = kvm_state;
526
527 if (s->coalesced_mmio) {
528 struct kvm_coalesced_mmio_zone zone;
529
530 zone.addr = start;
531 zone.size = size;
532 zone.pad = 0;
533
534 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
535 }
536 }
537
538 int kvm_check_extension(KVMState *s, unsigned int extension)
539 {
540 int ret;
541
542 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
543 if (ret < 0) {
544 ret = 0;
545 }
546
547 return ret;
548 }
549
550 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
551 {
552 int ret;
553
554 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
555 if (ret < 0) {
556 /* VM wide version not implemented, use global one instead */
557 ret = kvm_check_extension(s, extension);
558 }
559
560 return ret;
561 }
562
563 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
564 {
565 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
566 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
567 * endianness, but the memory core hands them in target endianness.
568 * For example, PPC is always treated as big-endian even if running
569 * on KVM and on PPC64LE. Correct here.
570 */
571 switch (size) {
572 case 2:
573 val = bswap16(val);
574 break;
575 case 4:
576 val = bswap32(val);
577 break;
578 }
579 #endif
580 return val;
581 }
582
583 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
584 bool assign, uint32_t size, bool datamatch)
585 {
586 int ret;
587 struct kvm_ioeventfd iofd = {
588 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
589 .addr = addr,
590 .len = size,
591 .flags = 0,
592 .fd = fd,
593 };
594
595 if (!kvm_enabled()) {
596 return -ENOSYS;
597 }
598
599 if (datamatch) {
600 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
601 }
602 if (!assign) {
603 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
604 }
605
606 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
607
608 if (ret < 0) {
609 return -errno;
610 }
611
612 return 0;
613 }
614
615 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
616 bool assign, uint32_t size, bool datamatch)
617 {
618 struct kvm_ioeventfd kick = {
619 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
620 .addr = addr,
621 .flags = KVM_IOEVENTFD_FLAG_PIO,
622 .len = size,
623 .fd = fd,
624 };
625 int r;
626 if (!kvm_enabled()) {
627 return -ENOSYS;
628 }
629 if (datamatch) {
630 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
631 }
632 if (!assign) {
633 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
634 }
635 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
636 if (r < 0) {
637 return r;
638 }
639 return 0;
640 }
641
642
643 static int kvm_check_many_ioeventfds(void)
644 {
645 /* Userspace can use ioeventfd for io notification. This requires a host
646 * that supports eventfd(2) and an I/O thread; since eventfd does not
647 * support SIGIO it cannot interrupt the vcpu.
648 *
649 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
650 * can avoid creating too many ioeventfds.
651 */
652 #if defined(CONFIG_EVENTFD)
653 int ioeventfds[7];
654 int i, ret = 0;
655 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
656 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
657 if (ioeventfds[i] < 0) {
658 break;
659 }
660 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
661 if (ret < 0) {
662 close(ioeventfds[i]);
663 break;
664 }
665 }
666
667 /* Decide whether many devices are supported or not */
668 ret = i == ARRAY_SIZE(ioeventfds);
669
670 while (i-- > 0) {
671 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
672 close(ioeventfds[i]);
673 }
674 return ret;
675 #else
676 return 0;
677 #endif
678 }
679
680 static const KVMCapabilityInfo *
681 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
682 {
683 while (list->name) {
684 if (!kvm_check_extension(s, list->value)) {
685 return list;
686 }
687 list++;
688 }
689 return NULL;
690 }
691
692 static void kvm_set_phys_mem(KVMMemoryListener *kml,
693 MemoryRegionSection *section, bool add)
694 {
695 KVMState *s = kvm_state;
696 KVMSlot *mem, old;
697 int err;
698 MemoryRegion *mr = section->mr;
699 bool writeable = !mr->readonly && !mr->rom_device;
700 hwaddr start_addr = section->offset_within_address_space;
701 ram_addr_t size = int128_get64(section->size);
702 void *ram = NULL;
703 unsigned delta;
704
705 /* kvm works in page size chunks, but the function may be called
706 with sub-page size and unaligned start address. Pad the start
707 address to next and truncate size to previous page boundary. */
708 delta = qemu_real_host_page_size - (start_addr & ~qemu_real_host_page_mask);
709 delta &= ~qemu_real_host_page_mask;
710 if (delta > size) {
711 return;
712 }
713 start_addr += delta;
714 size -= delta;
715 size &= qemu_real_host_page_mask;
716 if (!size || (start_addr & ~qemu_real_host_page_mask)) {
717 return;
718 }
719
720 if (!memory_region_is_ram(mr)) {
721 if (writeable || !kvm_readonly_mem_allowed) {
722 return;
723 } else if (!mr->romd_mode) {
724 /* If the memory device is not in romd_mode, then we actually want
725 * to remove the kvm memory slot so all accesses will trap. */
726 add = false;
727 }
728 }
729
730 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
731
732 while (1) {
733 mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size);
734 if (!mem) {
735 break;
736 }
737
738 if (add && start_addr >= mem->start_addr &&
739 (start_addr + size <= mem->start_addr + mem->memory_size) &&
740 (ram - start_addr == mem->ram - mem->start_addr)) {
741 /* The new slot fits into the existing one and comes with
742 * identical parameters - update flags and done. */
743 kvm_slot_update_flags(kml, mem, mr);
744 return;
745 }
746
747 old = *mem;
748
749 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
750 kvm_physical_sync_dirty_bitmap(kml, section);
751 }
752
753 /* unregister the overlapping slot */
754 mem->memory_size = 0;
755 err = kvm_set_user_memory_region(kml, mem);
756 if (err) {
757 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
758 __func__, strerror(-err));
759 abort();
760 }
761
762 /* Workaround for older KVM versions: we can't join slots, even not by
763 * unregistering the previous ones and then registering the larger
764 * slot. We have to maintain the existing fragmentation. Sigh.
765 *
766 * This workaround assumes that the new slot starts at the same
767 * address as the first existing one. If not or if some overlapping
768 * slot comes around later, we will fail (not seen in practice so far)
769 * - and actually require a recent KVM version. */
770 if (s->broken_set_mem_region &&
771 old.start_addr == start_addr && old.memory_size < size && add) {
772 mem = kvm_alloc_slot(kml);
773 mem->memory_size = old.memory_size;
774 mem->start_addr = old.start_addr;
775 mem->ram = old.ram;
776 mem->flags = kvm_mem_flags(mr);
777
778 err = kvm_set_user_memory_region(kml, mem);
779 if (err) {
780 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
781 strerror(-err));
782 abort();
783 }
784
785 start_addr += old.memory_size;
786 ram += old.memory_size;
787 size -= old.memory_size;
788 continue;
789 }
790
791 /* register prefix slot */
792 if (old.start_addr < start_addr) {
793 mem = kvm_alloc_slot(kml);
794 mem->memory_size = start_addr - old.start_addr;
795 mem->start_addr = old.start_addr;
796 mem->ram = old.ram;
797 mem->flags = kvm_mem_flags(mr);
798
799 err = kvm_set_user_memory_region(kml, mem);
800 if (err) {
801 fprintf(stderr, "%s: error registering prefix slot: %s\n",
802 __func__, strerror(-err));
803 #ifdef TARGET_PPC
804 fprintf(stderr, "%s: This is probably because your kernel's " \
805 "PAGE_SIZE is too big. Please try to use 4k " \
806 "PAGE_SIZE!\n", __func__);
807 #endif
808 abort();
809 }
810 }
811
812 /* register suffix slot */
813 if (old.start_addr + old.memory_size > start_addr + size) {
814 ram_addr_t size_delta;
815
816 mem = kvm_alloc_slot(kml);
817 mem->start_addr = start_addr + size;
818 size_delta = mem->start_addr - old.start_addr;
819 mem->memory_size = old.memory_size - size_delta;
820 mem->ram = old.ram + size_delta;
821 mem->flags = kvm_mem_flags(mr);
822
823 err = kvm_set_user_memory_region(kml, mem);
824 if (err) {
825 fprintf(stderr, "%s: error registering suffix slot: %s\n",
826 __func__, strerror(-err));
827 abort();
828 }
829 }
830 }
831
832 /* in case the KVM bug workaround already "consumed" the new slot */
833 if (!size) {
834 return;
835 }
836 if (!add) {
837 return;
838 }
839 mem = kvm_alloc_slot(kml);
840 mem->memory_size = size;
841 mem->start_addr = start_addr;
842 mem->ram = ram;
843 mem->flags = kvm_mem_flags(mr);
844
845 err = kvm_set_user_memory_region(kml, mem);
846 if (err) {
847 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
848 strerror(-err));
849 abort();
850 }
851 }
852
853 static void kvm_region_add(MemoryListener *listener,
854 MemoryRegionSection *section)
855 {
856 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
857
858 memory_region_ref(section->mr);
859 kvm_set_phys_mem(kml, section, true);
860 }
861
862 static void kvm_region_del(MemoryListener *listener,
863 MemoryRegionSection *section)
864 {
865 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
866
867 kvm_set_phys_mem(kml, section, false);
868 memory_region_unref(section->mr);
869 }
870
871 static void kvm_log_sync(MemoryListener *listener,
872 MemoryRegionSection *section)
873 {
874 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
875 int r;
876
877 r = kvm_physical_sync_dirty_bitmap(kml, section);
878 if (r < 0) {
879 abort();
880 }
881 }
882
883 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
884 MemoryRegionSection *section,
885 bool match_data, uint64_t data,
886 EventNotifier *e)
887 {
888 int fd = event_notifier_get_fd(e);
889 int r;
890
891 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
892 data, true, int128_get64(section->size),
893 match_data);
894 if (r < 0) {
895 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
896 __func__, strerror(-r));
897 abort();
898 }
899 }
900
901 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
902 MemoryRegionSection *section,
903 bool match_data, uint64_t data,
904 EventNotifier *e)
905 {
906 int fd = event_notifier_get_fd(e);
907 int r;
908
909 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
910 data, false, int128_get64(section->size),
911 match_data);
912 if (r < 0) {
913 abort();
914 }
915 }
916
917 static void kvm_io_ioeventfd_add(MemoryListener *listener,
918 MemoryRegionSection *section,
919 bool match_data, uint64_t data,
920 EventNotifier *e)
921 {
922 int fd = event_notifier_get_fd(e);
923 int r;
924
925 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
926 data, true, int128_get64(section->size),
927 match_data);
928 if (r < 0) {
929 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
930 __func__, strerror(-r));
931 abort();
932 }
933 }
934
935 static void kvm_io_ioeventfd_del(MemoryListener *listener,
936 MemoryRegionSection *section,
937 bool match_data, uint64_t data,
938 EventNotifier *e)
939
940 {
941 int fd = event_notifier_get_fd(e);
942 int r;
943
944 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
945 data, false, int128_get64(section->size),
946 match_data);
947 if (r < 0) {
948 abort();
949 }
950 }
951
952 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
953 AddressSpace *as, int as_id)
954 {
955 int i;
956
957 kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
958 kml->as_id = as_id;
959
960 for (i = 0; i < s->nr_slots; i++) {
961 kml->slots[i].slot = i;
962 }
963
964 kml->listener.region_add = kvm_region_add;
965 kml->listener.region_del = kvm_region_del;
966 kml->listener.log_start = kvm_log_start;
967 kml->listener.log_stop = kvm_log_stop;
968 kml->listener.log_sync = kvm_log_sync;
969 kml->listener.priority = 10;
970
971 memory_listener_register(&kml->listener, as);
972 }
973
974 static MemoryListener kvm_io_listener = {
975 .eventfd_add = kvm_io_ioeventfd_add,
976 .eventfd_del = kvm_io_ioeventfd_del,
977 .priority = 10,
978 };
979
980 static void kvm_handle_interrupt(CPUState *cpu, int mask)
981 {
982 cpu->interrupt_request |= mask;
983
984 if (!qemu_cpu_is_self(cpu)) {
985 qemu_cpu_kick(cpu);
986 }
987 }
988
989 int kvm_set_irq(KVMState *s, int irq, int level)
990 {
991 struct kvm_irq_level event;
992 int ret;
993
994 assert(kvm_async_interrupts_enabled());
995
996 event.level = level;
997 event.irq = irq;
998 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
999 if (ret < 0) {
1000 perror("kvm_set_irq");
1001 abort();
1002 }
1003
1004 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1005 }
1006
1007 #ifdef KVM_CAP_IRQ_ROUTING
1008 typedef struct KVMMSIRoute {
1009 struct kvm_irq_routing_entry kroute;
1010 QTAILQ_ENTRY(KVMMSIRoute) entry;
1011 } KVMMSIRoute;
1012
1013 static void set_gsi(KVMState *s, unsigned int gsi)
1014 {
1015 set_bit(gsi, s->used_gsi_bitmap);
1016 }
1017
1018 static void clear_gsi(KVMState *s, unsigned int gsi)
1019 {
1020 clear_bit(gsi, s->used_gsi_bitmap);
1021 }
1022
1023 void kvm_init_irq_routing(KVMState *s)
1024 {
1025 int gsi_count, i;
1026
1027 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1028 if (gsi_count > 0) {
1029 /* Round up so we can search ints using ffs */
1030 s->used_gsi_bitmap = bitmap_new(gsi_count);
1031 s->gsi_count = gsi_count;
1032 }
1033
1034 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1035 s->nr_allocated_irq_routes = 0;
1036
1037 if (!kvm_direct_msi_allowed) {
1038 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1039 QTAILQ_INIT(&s->msi_hashtab[i]);
1040 }
1041 }
1042
1043 kvm_arch_init_irq_routing(s);
1044 }
1045
1046 void kvm_irqchip_commit_routes(KVMState *s)
1047 {
1048 int ret;
1049
1050 s->irq_routes->flags = 0;
1051 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1052 assert(ret == 0);
1053 }
1054
1055 static void kvm_add_routing_entry(KVMState *s,
1056 struct kvm_irq_routing_entry *entry)
1057 {
1058 struct kvm_irq_routing_entry *new;
1059 int n, size;
1060
1061 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1062 n = s->nr_allocated_irq_routes * 2;
1063 if (n < 64) {
1064 n = 64;
1065 }
1066 size = sizeof(struct kvm_irq_routing);
1067 size += n * sizeof(*new);
1068 s->irq_routes = g_realloc(s->irq_routes, size);
1069 s->nr_allocated_irq_routes = n;
1070 }
1071 n = s->irq_routes->nr++;
1072 new = &s->irq_routes->entries[n];
1073
1074 *new = *entry;
1075
1076 set_gsi(s, entry->gsi);
1077 }
1078
1079 static int kvm_update_routing_entry(KVMState *s,
1080 struct kvm_irq_routing_entry *new_entry)
1081 {
1082 struct kvm_irq_routing_entry *entry;
1083 int n;
1084
1085 for (n = 0; n < s->irq_routes->nr; n++) {
1086 entry = &s->irq_routes->entries[n];
1087 if (entry->gsi != new_entry->gsi) {
1088 continue;
1089 }
1090
1091 if(!memcmp(entry, new_entry, sizeof *entry)) {
1092 return 0;
1093 }
1094
1095 *entry = *new_entry;
1096
1097 kvm_irqchip_commit_routes(s);
1098
1099 return 0;
1100 }
1101
1102 return -ESRCH;
1103 }
1104
1105 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1106 {
1107 struct kvm_irq_routing_entry e = {};
1108
1109 assert(pin < s->gsi_count);
1110
1111 e.gsi = irq;
1112 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1113 e.flags = 0;
1114 e.u.irqchip.irqchip = irqchip;
1115 e.u.irqchip.pin = pin;
1116 kvm_add_routing_entry(s, &e);
1117 }
1118
1119 void kvm_irqchip_release_virq(KVMState *s, int virq)
1120 {
1121 struct kvm_irq_routing_entry *e;
1122 int i;
1123
1124 if (kvm_gsi_direct_mapping()) {
1125 return;
1126 }
1127
1128 for (i = 0; i < s->irq_routes->nr; i++) {
1129 e = &s->irq_routes->entries[i];
1130 if (e->gsi == virq) {
1131 s->irq_routes->nr--;
1132 *e = s->irq_routes->entries[s->irq_routes->nr];
1133 }
1134 }
1135 clear_gsi(s, virq);
1136 kvm_arch_release_virq_post(virq);
1137 }
1138
1139 static unsigned int kvm_hash_msi(uint32_t data)
1140 {
1141 /* This is optimized for IA32 MSI layout. However, no other arch shall
1142 * repeat the mistake of not providing a direct MSI injection API. */
1143 return data & 0xff;
1144 }
1145
1146 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1147 {
1148 KVMMSIRoute *route, *next;
1149 unsigned int hash;
1150
1151 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1152 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1153 kvm_irqchip_release_virq(s, route->kroute.gsi);
1154 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1155 g_free(route);
1156 }
1157 }
1158 }
1159
1160 static int kvm_irqchip_get_virq(KVMState *s)
1161 {
1162 int next_virq;
1163
1164 /*
1165 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1166 * GSI numbers are more than the number of IRQ route. Allocating a GSI
1167 * number can succeed even though a new route entry cannot be added.
1168 * When this happens, flush dynamic MSI entries to free IRQ route entries.
1169 */
1170 if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1171 kvm_flush_dynamic_msi_routes(s);
1172 }
1173
1174 /* Return the lowest unused GSI in the bitmap */
1175 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1176 if (next_virq >= s->gsi_count) {
1177 return -ENOSPC;
1178 } else {
1179 return next_virq;
1180 }
1181 }
1182
1183 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1184 {
1185 unsigned int hash = kvm_hash_msi(msg.data);
1186 KVMMSIRoute *route;
1187
1188 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1189 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1190 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1191 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1192 return route;
1193 }
1194 }
1195 return NULL;
1196 }
1197
1198 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1199 {
1200 struct kvm_msi msi;
1201 KVMMSIRoute *route;
1202
1203 if (kvm_direct_msi_allowed) {
1204 msi.address_lo = (uint32_t)msg.address;
1205 msi.address_hi = msg.address >> 32;
1206 msi.data = le32_to_cpu(msg.data);
1207 msi.flags = 0;
1208 memset(msi.pad, 0, sizeof(msi.pad));
1209
1210 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1211 }
1212
1213 route = kvm_lookup_msi_route(s, msg);
1214 if (!route) {
1215 int virq;
1216
1217 virq = kvm_irqchip_get_virq(s);
1218 if (virq < 0) {
1219 return virq;
1220 }
1221
1222 route = g_malloc0(sizeof(KVMMSIRoute));
1223 route->kroute.gsi = virq;
1224 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1225 route->kroute.flags = 0;
1226 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1227 route->kroute.u.msi.address_hi = msg.address >> 32;
1228 route->kroute.u.msi.data = le32_to_cpu(msg.data);
1229
1230 kvm_add_routing_entry(s, &route->kroute);
1231 kvm_irqchip_commit_routes(s);
1232
1233 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1234 entry);
1235 }
1236
1237 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1238
1239 return kvm_set_irq(s, route->kroute.gsi, 1);
1240 }
1241
1242 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1243 {
1244 struct kvm_irq_routing_entry kroute = {};
1245 int virq;
1246 MSIMessage msg = {0, 0};
1247
1248 if (dev) {
1249 msg = pci_get_msi_message(dev, vector);
1250 }
1251
1252 if (kvm_gsi_direct_mapping()) {
1253 return kvm_arch_msi_data_to_gsi(msg.data);
1254 }
1255
1256 if (!kvm_gsi_routing_enabled()) {
1257 return -ENOSYS;
1258 }
1259
1260 virq = kvm_irqchip_get_virq(s);
1261 if (virq < 0) {
1262 return virq;
1263 }
1264
1265 kroute.gsi = virq;
1266 kroute.type = KVM_IRQ_ROUTING_MSI;
1267 kroute.flags = 0;
1268 kroute.u.msi.address_lo = (uint32_t)msg.address;
1269 kroute.u.msi.address_hi = msg.address >> 32;
1270 kroute.u.msi.data = le32_to_cpu(msg.data);
1271 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1272 kvm_irqchip_release_virq(s, virq);
1273 return -EINVAL;
1274 }
1275
1276 kvm_add_routing_entry(s, &kroute);
1277 kvm_arch_add_msi_route_post(&kroute, vector, dev);
1278 kvm_irqchip_commit_routes(s);
1279
1280 return virq;
1281 }
1282
1283 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1284 PCIDevice *dev)
1285 {
1286 struct kvm_irq_routing_entry kroute = {};
1287
1288 if (kvm_gsi_direct_mapping()) {
1289 return 0;
1290 }
1291
1292 if (!kvm_irqchip_in_kernel()) {
1293 return -ENOSYS;
1294 }
1295
1296 kroute.gsi = virq;
1297 kroute.type = KVM_IRQ_ROUTING_MSI;
1298 kroute.flags = 0;
1299 kroute.u.msi.address_lo = (uint32_t)msg.address;
1300 kroute.u.msi.address_hi = msg.address >> 32;
1301 kroute.u.msi.data = le32_to_cpu(msg.data);
1302 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1303 return -EINVAL;
1304 }
1305
1306 return kvm_update_routing_entry(s, &kroute);
1307 }
1308
1309 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1310 bool assign)
1311 {
1312 struct kvm_irqfd irqfd = {
1313 .fd = fd,
1314 .gsi = virq,
1315 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1316 };
1317
1318 if (rfd != -1) {
1319 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1320 irqfd.resamplefd = rfd;
1321 }
1322
1323 if (!kvm_irqfds_enabled()) {
1324 return -ENOSYS;
1325 }
1326
1327 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1328 }
1329
1330 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1331 {
1332 struct kvm_irq_routing_entry kroute = {};
1333 int virq;
1334
1335 if (!kvm_gsi_routing_enabled()) {
1336 return -ENOSYS;
1337 }
1338
1339 virq = kvm_irqchip_get_virq(s);
1340 if (virq < 0) {
1341 return virq;
1342 }
1343
1344 kroute.gsi = virq;
1345 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1346 kroute.flags = 0;
1347 kroute.u.adapter.summary_addr = adapter->summary_addr;
1348 kroute.u.adapter.ind_addr = adapter->ind_addr;
1349 kroute.u.adapter.summary_offset = adapter->summary_offset;
1350 kroute.u.adapter.ind_offset = adapter->ind_offset;
1351 kroute.u.adapter.adapter_id = adapter->adapter_id;
1352
1353 kvm_add_routing_entry(s, &kroute);
1354
1355 return virq;
1356 }
1357
1358 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1359 {
1360 struct kvm_irq_routing_entry kroute = {};
1361 int virq;
1362
1363 if (!kvm_gsi_routing_enabled()) {
1364 return -ENOSYS;
1365 }
1366 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1367 return -ENOSYS;
1368 }
1369 virq = kvm_irqchip_get_virq(s);
1370 if (virq < 0) {
1371 return virq;
1372 }
1373
1374 kroute.gsi = virq;
1375 kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1376 kroute.flags = 0;
1377 kroute.u.hv_sint.vcpu = vcpu;
1378 kroute.u.hv_sint.sint = sint;
1379
1380 kvm_add_routing_entry(s, &kroute);
1381 kvm_irqchip_commit_routes(s);
1382
1383 return virq;
1384 }
1385
1386 #else /* !KVM_CAP_IRQ_ROUTING */
1387
1388 void kvm_init_irq_routing(KVMState *s)
1389 {
1390 }
1391
1392 void kvm_irqchip_release_virq(KVMState *s, int virq)
1393 {
1394 }
1395
1396 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1397 {
1398 abort();
1399 }
1400
1401 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1402 {
1403 return -ENOSYS;
1404 }
1405
1406 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1407 {
1408 return -ENOSYS;
1409 }
1410
1411 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1412 {
1413 return -ENOSYS;
1414 }
1415
1416 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1417 {
1418 abort();
1419 }
1420
1421 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1422 {
1423 return -ENOSYS;
1424 }
1425 #endif /* !KVM_CAP_IRQ_ROUTING */
1426
1427 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1428 EventNotifier *rn, int virq)
1429 {
1430 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1431 rn ? event_notifier_get_fd(rn) : -1, virq, true);
1432 }
1433
1434 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1435 int virq)
1436 {
1437 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1438 false);
1439 }
1440
1441 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1442 EventNotifier *rn, qemu_irq irq)
1443 {
1444 gpointer key, gsi;
1445 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1446
1447 if (!found) {
1448 return -ENXIO;
1449 }
1450 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1451 }
1452
1453 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1454 qemu_irq irq)
1455 {
1456 gpointer key, gsi;
1457 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1458
1459 if (!found) {
1460 return -ENXIO;
1461 }
1462 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1463 }
1464
1465 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1466 {
1467 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1468 }
1469
1470 static void kvm_irqchip_create(MachineState *machine, KVMState *s)
1471 {
1472 int ret;
1473
1474 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1475 ;
1476 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1477 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1478 if (ret < 0) {
1479 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1480 exit(1);
1481 }
1482 } else {
1483 return;
1484 }
1485
1486 /* First probe and see if there's a arch-specific hook to create the
1487 * in-kernel irqchip for us */
1488 ret = kvm_arch_irqchip_create(machine, s);
1489 if (ret == 0) {
1490 if (machine_kernel_irqchip_split(machine)) {
1491 perror("Split IRQ chip mode not supported.");
1492 exit(1);
1493 } else {
1494 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1495 }
1496 }
1497 if (ret < 0) {
1498 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1499 exit(1);
1500 }
1501
1502 kvm_kernel_irqchip = true;
1503 /* If we have an in-kernel IRQ chip then we must have asynchronous
1504 * interrupt delivery (though the reverse is not necessarily true)
1505 */
1506 kvm_async_interrupts_allowed = true;
1507 kvm_halt_in_kernel_allowed = true;
1508
1509 kvm_init_irq_routing(s);
1510
1511 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1512 }
1513
1514 /* Find number of supported CPUs using the recommended
1515 * procedure from the kernel API documentation to cope with
1516 * older kernels that may be missing capabilities.
1517 */
1518 static int kvm_recommended_vcpus(KVMState *s)
1519 {
1520 int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1521 return (ret) ? ret : 4;
1522 }
1523
1524 static int kvm_max_vcpus(KVMState *s)
1525 {
1526 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1527 return (ret) ? ret : kvm_recommended_vcpus(s);
1528 }
1529
1530 static int kvm_max_vcpu_id(KVMState *s)
1531 {
1532 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1533 return (ret) ? ret : kvm_max_vcpus(s);
1534 }
1535
1536 bool kvm_vcpu_id_is_valid(int vcpu_id)
1537 {
1538 KVMState *s = KVM_STATE(current_machine->accelerator);
1539 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1540 }
1541
1542 static int kvm_init(MachineState *ms)
1543 {
1544 MachineClass *mc = MACHINE_GET_CLASS(ms);
1545 static const char upgrade_note[] =
1546 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1547 "(see http://sourceforge.net/projects/kvm).\n";
1548 struct {
1549 const char *name;
1550 int num;
1551 } num_cpus[] = {
1552 { "SMP", smp_cpus },
1553 { "hotpluggable", max_cpus },
1554 { NULL, }
1555 }, *nc = num_cpus;
1556 int soft_vcpus_limit, hard_vcpus_limit;
1557 KVMState *s;
1558 const KVMCapabilityInfo *missing_cap;
1559 int ret;
1560 int type = 0;
1561 const char *kvm_type;
1562
1563 s = KVM_STATE(ms->accelerator);
1564
1565 /*
1566 * On systems where the kernel can support different base page
1567 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1568 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1569 * page size for the system though.
1570 */
1571 assert(TARGET_PAGE_SIZE <= getpagesize());
1572
1573 s->sigmask_len = 8;
1574
1575 #ifdef KVM_CAP_SET_GUEST_DEBUG
1576 QTAILQ_INIT(&s->kvm_sw_breakpoints);
1577 #endif
1578 QLIST_INIT(&s->kvm_parked_vcpus);
1579 s->vmfd = -1;
1580 s->fd = qemu_open("/dev/kvm", O_RDWR);
1581 if (s->fd == -1) {
1582 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1583 ret = -errno;
1584 goto err;
1585 }
1586
1587 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1588 if (ret < KVM_API_VERSION) {
1589 if (ret >= 0) {
1590 ret = -EINVAL;
1591 }
1592 fprintf(stderr, "kvm version too old\n");
1593 goto err;
1594 }
1595
1596 if (ret > KVM_API_VERSION) {
1597 ret = -EINVAL;
1598 fprintf(stderr, "kvm version not supported\n");
1599 goto err;
1600 }
1601
1602 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1603
1604 /* If unspecified, use the default value */
1605 if (!s->nr_slots) {
1606 s->nr_slots = 32;
1607 }
1608
1609 /* check the vcpu limits */
1610 soft_vcpus_limit = kvm_recommended_vcpus(s);
1611 hard_vcpus_limit = kvm_max_vcpus(s);
1612
1613 while (nc->name) {
1614 if (nc->num > soft_vcpus_limit) {
1615 fprintf(stderr,
1616 "Warning: Number of %s cpus requested (%d) exceeds "
1617 "the recommended cpus supported by KVM (%d)\n",
1618 nc->name, nc->num, soft_vcpus_limit);
1619
1620 if (nc->num > hard_vcpus_limit) {
1621 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1622 "the maximum cpus supported by KVM (%d)\n",
1623 nc->name, nc->num, hard_vcpus_limit);
1624 exit(1);
1625 }
1626 }
1627 nc++;
1628 }
1629
1630 kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1631 if (mc->kvm_type) {
1632 type = mc->kvm_type(kvm_type);
1633 } else if (kvm_type) {
1634 ret = -EINVAL;
1635 fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1636 goto err;
1637 }
1638
1639 do {
1640 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1641 } while (ret == -EINTR);
1642
1643 if (ret < 0) {
1644 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1645 strerror(-ret));
1646
1647 #ifdef TARGET_S390X
1648 if (ret == -EINVAL) {
1649 fprintf(stderr,
1650 "Host kernel setup problem detected. Please verify:\n");
1651 fprintf(stderr, "- for kernels supporting the switch_amode or"
1652 " user_mode parameters, whether\n");
1653 fprintf(stderr,
1654 " user space is running in primary address space\n");
1655 fprintf(stderr,
1656 "- for kernels supporting the vm.allocate_pgste sysctl, "
1657 "whether it is enabled\n");
1658 }
1659 #endif
1660 goto err;
1661 }
1662
1663 s->vmfd = ret;
1664 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1665 if (!missing_cap) {
1666 missing_cap =
1667 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1668 }
1669 if (missing_cap) {
1670 ret = -EINVAL;
1671 fprintf(stderr, "kvm does not support %s\n%s",
1672 missing_cap->name, upgrade_note);
1673 goto err;
1674 }
1675
1676 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1677
1678 s->broken_set_mem_region = 1;
1679 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1680 if (ret > 0) {
1681 s->broken_set_mem_region = 0;
1682 }
1683
1684 #ifdef KVM_CAP_VCPU_EVENTS
1685 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1686 #endif
1687
1688 s->robust_singlestep =
1689 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1690
1691 #ifdef KVM_CAP_DEBUGREGS
1692 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1693 #endif
1694
1695 #ifdef KVM_CAP_IRQ_ROUTING
1696 kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1697 #endif
1698
1699 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1700
1701 s->irq_set_ioctl = KVM_IRQ_LINE;
1702 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1703 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1704 }
1705
1706 #ifdef KVM_CAP_READONLY_MEM
1707 kvm_readonly_mem_allowed =
1708 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1709 #endif
1710
1711 kvm_eventfds_allowed =
1712 (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1713
1714 kvm_irqfds_allowed =
1715 (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
1716
1717 kvm_resamplefds_allowed =
1718 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
1719
1720 kvm_vm_attributes_allowed =
1721 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
1722
1723 kvm_ioeventfd_any_length_allowed =
1724 (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
1725
1726 ret = kvm_arch_init(ms, s);
1727 if (ret < 0) {
1728 goto err;
1729 }
1730
1731 if (machine_kernel_irqchip_allowed(ms)) {
1732 kvm_irqchip_create(ms, s);
1733 }
1734
1735 kvm_state = s;
1736
1737 if (kvm_eventfds_allowed) {
1738 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
1739 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
1740 }
1741 s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
1742 s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
1743
1744 kvm_memory_listener_register(s, &s->memory_listener,
1745 &address_space_memory, 0);
1746 memory_listener_register(&kvm_io_listener,
1747 &address_space_io);
1748
1749 s->many_ioeventfds = kvm_check_many_ioeventfds();
1750
1751 cpu_interrupt_handler = kvm_handle_interrupt;
1752
1753 return 0;
1754
1755 err:
1756 assert(ret < 0);
1757 if (s->vmfd >= 0) {
1758 close(s->vmfd);
1759 }
1760 if (s->fd != -1) {
1761 close(s->fd);
1762 }
1763 g_free(s->memory_listener.slots);
1764
1765 return ret;
1766 }
1767
1768 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1769 {
1770 s->sigmask_len = sigmask_len;
1771 }
1772
1773 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
1774 int size, uint32_t count)
1775 {
1776 int i;
1777 uint8_t *ptr = data;
1778
1779 for (i = 0; i < count; i++) {
1780 address_space_rw(&address_space_io, port, attrs,
1781 ptr, size,
1782 direction == KVM_EXIT_IO_OUT);
1783 ptr += size;
1784 }
1785 }
1786
1787 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1788 {
1789 fprintf(stderr, "KVM internal error. Suberror: %d\n",
1790 run->internal.suberror);
1791
1792 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1793 int i;
1794
1795 for (i = 0; i < run->internal.ndata; ++i) {
1796 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1797 i, (uint64_t)run->internal.data[i]);
1798 }
1799 }
1800 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1801 fprintf(stderr, "emulation failure\n");
1802 if (!kvm_arch_stop_on_emulation_error(cpu)) {
1803 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1804 return EXCP_INTERRUPT;
1805 }
1806 }
1807 /* FIXME: Should trigger a qmp message to let management know
1808 * something went wrong.
1809 */
1810 return -1;
1811 }
1812
1813 void kvm_flush_coalesced_mmio_buffer(void)
1814 {
1815 KVMState *s = kvm_state;
1816
1817 if (s->coalesced_flush_in_progress) {
1818 return;
1819 }
1820
1821 s->coalesced_flush_in_progress = true;
1822
1823 if (s->coalesced_mmio_ring) {
1824 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1825 while (ring->first != ring->last) {
1826 struct kvm_coalesced_mmio *ent;
1827
1828 ent = &ring->coalesced_mmio[ring->first];
1829
1830 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1831 smp_wmb();
1832 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1833 }
1834 }
1835
1836 s->coalesced_flush_in_progress = false;
1837 }
1838
1839 static void do_kvm_cpu_synchronize_state(void *arg)
1840 {
1841 CPUState *cpu = arg;
1842
1843 if (!cpu->kvm_vcpu_dirty) {
1844 kvm_arch_get_registers(cpu);
1845 cpu->kvm_vcpu_dirty = true;
1846 }
1847 }
1848
1849 void kvm_cpu_synchronize_state(CPUState *cpu)
1850 {
1851 if (!cpu->kvm_vcpu_dirty) {
1852 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
1853 }
1854 }
1855
1856 static void do_kvm_cpu_synchronize_post_reset(void *arg)
1857 {
1858 CPUState *cpu = arg;
1859
1860 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1861 cpu->kvm_vcpu_dirty = false;
1862 }
1863
1864 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1865 {
1866 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu);
1867 }
1868
1869 static void do_kvm_cpu_synchronize_post_init(void *arg)
1870 {
1871 CPUState *cpu = arg;
1872
1873 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1874 cpu->kvm_vcpu_dirty = false;
1875 }
1876
1877 void kvm_cpu_synchronize_post_init(CPUState *cpu)
1878 {
1879 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
1880 }
1881
1882 int kvm_cpu_exec(CPUState *cpu)
1883 {
1884 struct kvm_run *run = cpu->kvm_run;
1885 int ret, run_ret;
1886
1887 DPRINTF("kvm_cpu_exec()\n");
1888
1889 if (kvm_arch_process_async_events(cpu)) {
1890 cpu->exit_request = 0;
1891 return EXCP_HLT;
1892 }
1893
1894 qemu_mutex_unlock_iothread();
1895
1896 do {
1897 MemTxAttrs attrs;
1898
1899 if (cpu->kvm_vcpu_dirty) {
1900 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1901 cpu->kvm_vcpu_dirty = false;
1902 }
1903
1904 kvm_arch_pre_run(cpu, run);
1905 if (cpu->exit_request) {
1906 DPRINTF("interrupt exit requested\n");
1907 /*
1908 * KVM requires us to reenter the kernel after IO exits to complete
1909 * instruction emulation. This self-signal will ensure that we
1910 * leave ASAP again.
1911 */
1912 qemu_cpu_kick_self();
1913 }
1914
1915 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1916
1917 attrs = kvm_arch_post_run(cpu, run);
1918
1919 if (run_ret < 0) {
1920 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1921 DPRINTF("io window exit\n");
1922 ret = EXCP_INTERRUPT;
1923 break;
1924 }
1925 fprintf(stderr, "error: kvm run failed %s\n",
1926 strerror(-run_ret));
1927 #ifdef TARGET_PPC
1928 if (run_ret == -EBUSY) {
1929 fprintf(stderr,
1930 "This is probably because your SMT is enabled.\n"
1931 "VCPU can only run on primary threads with all "
1932 "secondary threads offline.\n");
1933 }
1934 #endif
1935 ret = -1;
1936 break;
1937 }
1938
1939 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1940 switch (run->exit_reason) {
1941 case KVM_EXIT_IO:
1942 DPRINTF("handle_io\n");
1943 /* Called outside BQL */
1944 kvm_handle_io(run->io.port, attrs,
1945 (uint8_t *)run + run->io.data_offset,
1946 run->io.direction,
1947 run->io.size,
1948 run->io.count);
1949 ret = 0;
1950 break;
1951 case KVM_EXIT_MMIO:
1952 DPRINTF("handle_mmio\n");
1953 /* Called outside BQL */
1954 address_space_rw(&address_space_memory,
1955 run->mmio.phys_addr, attrs,
1956 run->mmio.data,
1957 run->mmio.len,
1958 run->mmio.is_write);
1959 ret = 0;
1960 break;
1961 case KVM_EXIT_IRQ_WINDOW_OPEN:
1962 DPRINTF("irq_window_open\n");
1963 ret = EXCP_INTERRUPT;
1964 break;
1965 case KVM_EXIT_SHUTDOWN:
1966 DPRINTF("shutdown\n");
1967 qemu_system_reset_request();
1968 ret = EXCP_INTERRUPT;
1969 break;
1970 case KVM_EXIT_UNKNOWN:
1971 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1972 (uint64_t)run->hw.hardware_exit_reason);
1973 ret = -1;
1974 break;
1975 case KVM_EXIT_INTERNAL_ERROR:
1976 ret = kvm_handle_internal_error(cpu, run);
1977 break;
1978 case KVM_EXIT_SYSTEM_EVENT:
1979 switch (run->system_event.type) {
1980 case KVM_SYSTEM_EVENT_SHUTDOWN:
1981 qemu_system_shutdown_request();
1982 ret = EXCP_INTERRUPT;
1983 break;
1984 case KVM_SYSTEM_EVENT_RESET:
1985 qemu_system_reset_request();
1986 ret = EXCP_INTERRUPT;
1987 break;
1988 case KVM_SYSTEM_EVENT_CRASH:
1989 qemu_mutex_lock_iothread();
1990 qemu_system_guest_panicked();
1991 qemu_mutex_unlock_iothread();
1992 ret = 0;
1993 break;
1994 default:
1995 DPRINTF("kvm_arch_handle_exit\n");
1996 ret = kvm_arch_handle_exit(cpu, run);
1997 break;
1998 }
1999 break;
2000 default:
2001 DPRINTF("kvm_arch_handle_exit\n");
2002 ret = kvm_arch_handle_exit(cpu, run);
2003 break;
2004 }
2005 } while (ret == 0);
2006
2007 qemu_mutex_lock_iothread();
2008
2009 if (ret < 0) {
2010 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
2011 vm_stop(RUN_STATE_INTERNAL_ERROR);
2012 }
2013
2014 cpu->exit_request = 0;
2015 return ret;
2016 }
2017
2018 int kvm_ioctl(KVMState *s, int type, ...)
2019 {
2020 int ret;
2021 void *arg;
2022 va_list ap;
2023
2024 va_start(ap, type);
2025 arg = va_arg(ap, void *);
2026 va_end(ap);
2027
2028 trace_kvm_ioctl(type, arg);
2029 ret = ioctl(s->fd, type, arg);
2030 if (ret == -1) {
2031 ret = -errno;
2032 }
2033 return ret;
2034 }
2035
2036 int kvm_vm_ioctl(KVMState *s, int type, ...)
2037 {
2038 int ret;
2039 void *arg;
2040 va_list ap;
2041
2042 va_start(ap, type);
2043 arg = va_arg(ap, void *);
2044 va_end(ap);
2045
2046 trace_kvm_vm_ioctl(type, arg);
2047 ret = ioctl(s->vmfd, type, arg);
2048 if (ret == -1) {
2049 ret = -errno;
2050 }
2051 return ret;
2052 }
2053
2054 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2055 {
2056 int ret;
2057 void *arg;
2058 va_list ap;
2059
2060 va_start(ap, type);
2061 arg = va_arg(ap, void *);
2062 va_end(ap);
2063
2064 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2065 ret = ioctl(cpu->kvm_fd, type, arg);
2066 if (ret == -1) {
2067 ret = -errno;
2068 }
2069 return ret;
2070 }
2071
2072 int kvm_device_ioctl(int fd, int type, ...)
2073 {
2074 int ret;
2075 void *arg;
2076 va_list ap;
2077
2078 va_start(ap, type);
2079 arg = va_arg(ap, void *);
2080 va_end(ap);
2081
2082 trace_kvm_device_ioctl(fd, type, arg);
2083 ret = ioctl(fd, type, arg);
2084 if (ret == -1) {
2085 ret = -errno;
2086 }
2087 return ret;
2088 }
2089
2090 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2091 {
2092 int ret;
2093 struct kvm_device_attr attribute = {
2094 .group = group,
2095 .attr = attr,
2096 };
2097
2098 if (!kvm_vm_attributes_allowed) {
2099 return 0;
2100 }
2101
2102 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2103 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2104 return ret ? 0 : 1;
2105 }
2106
2107 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2108 {
2109 struct kvm_device_attr attribute = {
2110 .group = group,
2111 .attr = attr,
2112 .flags = 0,
2113 };
2114
2115 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2116 }
2117
2118 void kvm_device_access(int fd, int group, uint64_t attr,
2119 void *val, bool write)
2120 {
2121 struct kvm_device_attr kvmattr;
2122 int err;
2123
2124 kvmattr.flags = 0;
2125 kvmattr.group = group;
2126 kvmattr.attr = attr;
2127 kvmattr.addr = (uintptr_t)val;
2128
2129 err = kvm_device_ioctl(fd,
2130 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2131 &kvmattr);
2132 if (err < 0) {
2133 error_report("KVM_%s_DEVICE_ATTR failed: %s",
2134 write ? "SET" : "GET", strerror(-err));
2135 error_printf("Group %d attr 0x%016" PRIx64, group, attr);
2136 abort();
2137 }
2138 }
2139
2140 int kvm_has_sync_mmu(void)
2141 {
2142 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2143 }
2144
2145 int kvm_has_vcpu_events(void)
2146 {
2147 return kvm_state->vcpu_events;
2148 }
2149
2150 int kvm_has_robust_singlestep(void)
2151 {
2152 return kvm_state->robust_singlestep;
2153 }
2154
2155 int kvm_has_debugregs(void)
2156 {
2157 return kvm_state->debugregs;
2158 }
2159
2160 int kvm_has_many_ioeventfds(void)
2161 {
2162 if (!kvm_enabled()) {
2163 return 0;
2164 }
2165 return kvm_state->many_ioeventfds;
2166 }
2167
2168 int kvm_has_gsi_routing(void)
2169 {
2170 #ifdef KVM_CAP_IRQ_ROUTING
2171 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2172 #else
2173 return false;
2174 #endif
2175 }
2176
2177 int kvm_has_intx_set_mask(void)
2178 {
2179 return kvm_state->intx_set_mask;
2180 }
2181
2182 void kvm_setup_guest_memory(void *start, size_t size)
2183 {
2184 if (!kvm_has_sync_mmu()) {
2185 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
2186
2187 if (ret) {
2188 perror("qemu_madvise");
2189 fprintf(stderr,
2190 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
2191 exit(1);
2192 }
2193 }
2194 }
2195
2196 #ifdef KVM_CAP_SET_GUEST_DEBUG
2197 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2198 target_ulong pc)
2199 {
2200 struct kvm_sw_breakpoint *bp;
2201
2202 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2203 if (bp->pc == pc) {
2204 return bp;
2205 }
2206 }
2207 return NULL;
2208 }
2209
2210 int kvm_sw_breakpoints_active(CPUState *cpu)
2211 {
2212 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2213 }
2214
2215 struct kvm_set_guest_debug_data {
2216 struct kvm_guest_debug dbg;
2217 CPUState *cpu;
2218 int err;
2219 };
2220
2221 static void kvm_invoke_set_guest_debug(void *data)
2222 {
2223 struct kvm_set_guest_debug_data *dbg_data = data;
2224
2225 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
2226 &dbg_data->dbg);
2227 }
2228
2229 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2230 {
2231 struct kvm_set_guest_debug_data data;
2232
2233 data.dbg.control = reinject_trap;
2234
2235 if (cpu->singlestep_enabled) {
2236 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2237 }
2238 kvm_arch_update_guest_debug(cpu, &data.dbg);
2239 data.cpu = cpu;
2240
2241 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
2242 return data.err;
2243 }
2244
2245 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2246 target_ulong len, int type)
2247 {
2248 struct kvm_sw_breakpoint *bp;
2249 int err;
2250
2251 if (type == GDB_BREAKPOINT_SW) {
2252 bp = kvm_find_sw_breakpoint(cpu, addr);
2253 if (bp) {
2254 bp->use_count++;
2255 return 0;
2256 }
2257
2258 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2259 bp->pc = addr;
2260 bp->use_count = 1;
2261 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2262 if (err) {
2263 g_free(bp);
2264 return err;
2265 }
2266
2267 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2268 } else {
2269 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2270 if (err) {
2271 return err;
2272 }
2273 }
2274
2275 CPU_FOREACH(cpu) {
2276 err = kvm_update_guest_debug(cpu, 0);
2277 if (err) {
2278 return err;
2279 }
2280 }
2281 return 0;
2282 }
2283
2284 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2285 target_ulong len, int type)
2286 {
2287 struct kvm_sw_breakpoint *bp;
2288 int err;
2289
2290 if (type == GDB_BREAKPOINT_SW) {
2291 bp = kvm_find_sw_breakpoint(cpu, addr);
2292 if (!bp) {
2293 return -ENOENT;
2294 }
2295
2296 if (bp->use_count > 1) {
2297 bp->use_count--;
2298 return 0;
2299 }
2300
2301 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2302 if (err) {
2303 return err;
2304 }
2305
2306 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2307 g_free(bp);
2308 } else {
2309 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2310 if (err) {
2311 return err;
2312 }
2313 }
2314
2315 CPU_FOREACH(cpu) {
2316 err = kvm_update_guest_debug(cpu, 0);
2317 if (err) {
2318 return err;
2319 }
2320 }
2321 return 0;
2322 }
2323
2324 void kvm_remove_all_breakpoints(CPUState *cpu)
2325 {
2326 struct kvm_sw_breakpoint *bp, *next;
2327 KVMState *s = cpu->kvm_state;
2328 CPUState *tmpcpu;
2329
2330 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2331 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2332 /* Try harder to find a CPU that currently sees the breakpoint. */
2333 CPU_FOREACH(tmpcpu) {
2334 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2335 break;
2336 }
2337 }
2338 }
2339 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2340 g_free(bp);
2341 }
2342 kvm_arch_remove_all_hw_breakpoints();
2343
2344 CPU_FOREACH(cpu) {
2345 kvm_update_guest_debug(cpu, 0);
2346 }
2347 }
2348
2349 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2350
2351 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2352 {
2353 return -EINVAL;
2354 }
2355
2356 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2357 target_ulong len, int type)
2358 {
2359 return -EINVAL;
2360 }
2361
2362 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2363 target_ulong len, int type)
2364 {
2365 return -EINVAL;
2366 }
2367
2368 void kvm_remove_all_breakpoints(CPUState *cpu)
2369 {
2370 }
2371 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2372
2373 int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2374 {
2375 KVMState *s = kvm_state;
2376 struct kvm_signal_mask *sigmask;
2377 int r;
2378
2379 if (!sigset) {
2380 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
2381 }
2382
2383 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2384
2385 sigmask->len = s->sigmask_len;
2386 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2387 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2388 g_free(sigmask);
2389
2390 return r;
2391 }
2392 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2393 {
2394 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
2395 }
2396
2397 int kvm_on_sigbus(int code, void *addr)
2398 {
2399 return kvm_arch_on_sigbus(code, addr);
2400 }
2401
2402 int kvm_create_device(KVMState *s, uint64_t type, bool test)
2403 {
2404 int ret;
2405 struct kvm_create_device create_dev;
2406
2407 create_dev.type = type;
2408 create_dev.fd = -1;
2409 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2410
2411 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2412 return -ENOTSUP;
2413 }
2414
2415 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2416 if (ret) {
2417 return ret;
2418 }
2419
2420 return test ? 0 : create_dev.fd;
2421 }
2422
2423 bool kvm_device_supported(int vmfd, uint64_t type)
2424 {
2425 struct kvm_create_device create_dev = {
2426 .type = type,
2427 .fd = -1,
2428 .flags = KVM_CREATE_DEVICE_TEST,
2429 };
2430
2431 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
2432 return false;
2433 }
2434
2435 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
2436 }
2437
2438 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2439 {
2440 struct kvm_one_reg reg;
2441 int r;
2442
2443 reg.id = id;
2444 reg.addr = (uintptr_t) source;
2445 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2446 if (r) {
2447 trace_kvm_failed_reg_set(id, strerror(-r));
2448 }
2449 return r;
2450 }
2451
2452 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2453 {
2454 struct kvm_one_reg reg;
2455 int r;
2456
2457 reg.id = id;
2458 reg.addr = (uintptr_t) target;
2459 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2460 if (r) {
2461 trace_kvm_failed_reg_get(id, strerror(-r));
2462 }
2463 return r;
2464 }
2465
2466 static void kvm_accel_class_init(ObjectClass *oc, void *data)
2467 {
2468 AccelClass *ac = ACCEL_CLASS(oc);
2469 ac->name = "KVM";
2470 ac->init_machine = kvm_init;
2471 ac->allowed = &kvm_allowed;
2472 }
2473
2474 static const TypeInfo kvm_accel_type = {
2475 .name = TYPE_KVM_ACCEL,
2476 .parent = TYPE_ACCEL,
2477 .class_init = kvm_accel_class_init,
2478 .instance_size = sizeof(KVMState),
2479 };
2480
2481 static void kvm_type_init(void)
2482 {
2483 type_register_static(&kvm_accel_type);
2484 }
2485
2486 type_init(kvm_type_init);