]> git.proxmox.com Git - qemu.git/blame - kvm-all.c
configure: Don't implicitly hardcode list of KVM architectures
[qemu.git] / kvm-all.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5832d1f2 5 * Red Hat, Inc. 2008
05330448
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
5832d1f2 9 * Glauber Costa <gcosta@redhat.com>
05330448
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/ioctl.h>
18#include <sys/mman.h>
984b5181 19#include <stdarg.h>
05330448
AL
20
21#include <linux/kvm.h>
22
23#include "qemu-common.h"
85199474 24#include "qemu-barrier.h"
ebd063d1
PB
25#include "qemu-option.h"
26#include "qemu-config.h"
05330448 27#include "sysemu.h"
d33a1810 28#include "hw/hw.h"
04fa27f5 29#include "hw/msi.h"
e22a25c9 30#include "gdbstub.h"
05330448 31#include "kvm.h"
8369e01c 32#include "bswap.h"
a01672d3 33#include "memory.h"
80a1ea37 34#include "exec-memory.h"
753d5e14 35#include "event_notifier.h"
05330448 36
d2f2b8a7
SH
37/* This check must be after config-host.h is included */
38#ifdef CONFIG_EVENTFD
39#include <sys/eventfd.h>
40#endif
41
93148aa5 42/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
f65ed4c1
AL
43#define PAGE_SIZE TARGET_PAGE_SIZE
44
05330448
AL
45//#define DEBUG_KVM
46
47#ifdef DEBUG_KVM
8c0d577e 48#define DPRINTF(fmt, ...) \
05330448
AL
49 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
50#else
8c0d577e 51#define DPRINTF(fmt, ...) \
05330448
AL
52 do { } while (0)
53#endif
54
04fa27f5
JK
55#define KVM_MSI_HASHTAB_SIZE 256
56
34fc643f
AL
57typedef struct KVMSlot
58{
c227f099
AL
59 target_phys_addr_t start_addr;
60 ram_addr_t memory_size;
9f213ed9 61 void *ram;
34fc643f
AL
62 int slot;
63 int flags;
64} KVMSlot;
05330448 65
5832d1f2
AL
66typedef struct kvm_dirty_log KVMDirtyLog;
67
05330448
AL
68struct KVMState
69{
70 KVMSlot slots[32];
71 int fd;
72 int vmfd;
f65ed4c1 73 int coalesced_mmio;
62a2744c 74 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
1cae88b9 75 bool coalesced_flush_in_progress;
e69917e2 76 int broken_set_mem_region;
4495d6a7 77 int migration_log;
a0fb002c 78 int vcpu_events;
b0b1d690 79 int robust_singlestep;
ff44f1a3 80 int debugregs;
e22a25c9
AL
81#ifdef KVM_CAP_SET_GUEST_DEBUG
82 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
83#endif
8a7c7393 84 int pit_state2;
f1665b21 85 int xsave, xcrs;
d2f2b8a7 86 int many_ioeventfds;
92e4b519
DG
87 /* The man page (and posix) say ioctl numbers are signed int, but
88 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
89 * unsigned, and treating them as signed here can break things */
90 unsigned irqchip_inject_ioctl;
84b058d7
JK
91#ifdef KVM_CAP_IRQ_ROUTING
92 struct kvm_irq_routing *irq_routes;
93 int nr_allocated_irq_routes;
94 uint32_t *used_gsi_bitmap;
4e2e4e63 95 unsigned int gsi_count;
04fa27f5 96 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
4a3adebb 97 bool direct_msi;
84b058d7 98#endif
05330448
AL
99};
100
6a7af8cb 101KVMState *kvm_state;
3d4b2649 102bool kvm_kernel_irqchip;
05330448 103
94a8d39a
JK
104static const KVMCapabilityInfo kvm_required_capabilites[] = {
105 KVM_CAP_INFO(USER_MEMORY),
106 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
107 KVM_CAP_LAST_INFO
108};
109
05330448
AL
110static KVMSlot *kvm_alloc_slot(KVMState *s)
111{
112 int i;
113
114 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
a426e122 115 if (s->slots[i].memory_size == 0) {
05330448 116 return &s->slots[i];
a426e122 117 }
05330448
AL
118 }
119
d3f8d37f
AL
120 fprintf(stderr, "%s: no free slot available\n", __func__);
121 abort();
122}
123
124static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
c227f099
AL
125 target_phys_addr_t start_addr,
126 target_phys_addr_t end_addr)
d3f8d37f
AL
127{
128 int i;
129
130 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
131 KVMSlot *mem = &s->slots[i];
132
133 if (start_addr == mem->start_addr &&
134 end_addr == mem->start_addr + mem->memory_size) {
135 return mem;
136 }
137 }
138
05330448
AL
139 return NULL;
140}
141
6152e2ae
AL
142/*
143 * Find overlapping slot with lowest start address
144 */
145static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
c227f099
AL
146 target_phys_addr_t start_addr,
147 target_phys_addr_t end_addr)
05330448 148{
6152e2ae 149 KVMSlot *found = NULL;
05330448
AL
150 int i;
151
152 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
153 KVMSlot *mem = &s->slots[i];
154
6152e2ae
AL
155 if (mem->memory_size == 0 ||
156 (found && found->start_addr < mem->start_addr)) {
157 continue;
158 }
159
160 if (end_addr > mem->start_addr &&
161 start_addr < mem->start_addr + mem->memory_size) {
162 found = mem;
163 }
05330448
AL
164 }
165
6152e2ae 166 return found;
05330448
AL
167}
168
9f213ed9
AK
169int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
170 target_phys_addr_t *phys_addr)
983dfc3b
HY
171{
172 int i;
173
174 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
175 KVMSlot *mem = &s->slots[i];
176
9f213ed9
AK
177 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
178 *phys_addr = mem->start_addr + (ram - mem->ram);
983dfc3b
HY
179 return 1;
180 }
181 }
182
183 return 0;
184}
185
5832d1f2
AL
186static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
187{
188 struct kvm_userspace_memory_region mem;
189
190 mem.slot = slot->slot;
191 mem.guest_phys_addr = slot->start_addr;
192 mem.memory_size = slot->memory_size;
9f213ed9 193 mem.userspace_addr = (unsigned long)slot->ram;
5832d1f2 194 mem.flags = slot->flags;
4495d6a7
JK
195 if (s->migration_log) {
196 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
197 }
5832d1f2
AL
198 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
199}
200
8d2ba1fb
JK
201static void kvm_reset_vcpu(void *opaque)
202{
9349b4f9 203 CPUArchState *env = opaque;
8d2ba1fb 204
caa5af0f 205 kvm_arch_reset_vcpu(env);
8d2ba1fb 206}
5832d1f2 207
9349b4f9 208int kvm_init_vcpu(CPUArchState *env)
05330448
AL
209{
210 KVMState *s = kvm_state;
211 long mmap_size;
212 int ret;
213
8c0d577e 214 DPRINTF("kvm_init_vcpu\n");
05330448 215
984b5181 216 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
05330448 217 if (ret < 0) {
8c0d577e 218 DPRINTF("kvm_create_vcpu failed\n");
05330448
AL
219 goto err;
220 }
221
222 env->kvm_fd = ret;
223 env->kvm_state = s;
d841b6c4 224 env->kvm_vcpu_dirty = 1;
05330448
AL
225
226 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
227 if (mmap_size < 0) {
748a680b 228 ret = mmap_size;
8c0d577e 229 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
05330448
AL
230 goto err;
231 }
232
233 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
234 env->kvm_fd, 0);
235 if (env->kvm_run == MAP_FAILED) {
236 ret = -errno;
8c0d577e 237 DPRINTF("mmap'ing vcpu state failed\n");
05330448
AL
238 goto err;
239 }
240
a426e122
JK
241 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
242 s->coalesced_mmio_ring =
243 (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
244 }
62a2744c 245
05330448 246 ret = kvm_arch_init_vcpu(env);
8d2ba1fb 247 if (ret == 0) {
a08d4367 248 qemu_register_reset(kvm_reset_vcpu, env);
caa5af0f 249 kvm_arch_reset_vcpu(env);
8d2ba1fb 250 }
05330448
AL
251err:
252 return ret;
253}
254
5832d1f2
AL
255/*
256 * dirty pages logging control
257 */
25254bbc
MT
258
259static int kvm_mem_flags(KVMState *s, bool log_dirty)
260{
261 return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
262}
263
264static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
5832d1f2
AL
265{
266 KVMState *s = kvm_state;
25254bbc 267 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
4495d6a7
JK
268 int old_flags;
269
4495d6a7 270 old_flags = mem->flags;
5832d1f2 271
25254bbc 272 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
5832d1f2
AL
273 mem->flags = flags;
274
4495d6a7
JK
275 /* If nothing changed effectively, no need to issue ioctl */
276 if (s->migration_log) {
277 flags |= KVM_MEM_LOG_DIRTY_PAGES;
278 }
25254bbc 279
4495d6a7 280 if (flags == old_flags) {
25254bbc 281 return 0;
4495d6a7
JK
282 }
283
5832d1f2
AL
284 return kvm_set_user_memory_region(s, mem);
285}
286
25254bbc
MT
287static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
288 ram_addr_t size, bool log_dirty)
289{
290 KVMState *s = kvm_state;
291 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
292
293 if (mem == NULL) {
294 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
295 TARGET_FMT_plx "\n", __func__, phys_addr,
296 (target_phys_addr_t)(phys_addr + size - 1));
297 return -EINVAL;
298 }
299 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
300}
301
a01672d3
AK
302static void kvm_log_start(MemoryListener *listener,
303 MemoryRegionSection *section)
5832d1f2 304{
a01672d3
AK
305 int r;
306
307 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
308 section->size, true);
309 if (r < 0) {
310 abort();
311 }
5832d1f2
AL
312}
313
a01672d3
AK
314static void kvm_log_stop(MemoryListener *listener,
315 MemoryRegionSection *section)
5832d1f2 316{
a01672d3
AK
317 int r;
318
319 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
320 section->size, false);
321 if (r < 0) {
322 abort();
323 }
5832d1f2
AL
324}
325
7b8f3b78 326static int kvm_set_migration_log(int enable)
4495d6a7
JK
327{
328 KVMState *s = kvm_state;
329 KVMSlot *mem;
330 int i, err;
331
332 s->migration_log = enable;
333
334 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
335 mem = &s->slots[i];
336
70fedd76
AW
337 if (!mem->memory_size) {
338 continue;
339 }
4495d6a7
JK
340 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
341 continue;
342 }
343 err = kvm_set_user_memory_region(s, mem);
344 if (err) {
345 return err;
346 }
347 }
348 return 0;
349}
350
8369e01c 351/* get kvm's dirty pages bitmap and update qemu's */
ffcde12f
AK
352static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
353 unsigned long *bitmap)
96c1606b 354{
8369e01c 355 unsigned int i, j;
aa90fec7
BH
356 unsigned long page_number, c;
357 target_phys_addr_t addr, addr1;
ffcde12f 358 unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
3145fcb6 359 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
8369e01c
MT
360
361 /*
362 * bitmap-traveling is faster than memory-traveling (for addr...)
363 * especially when most of the memory is not dirty.
364 */
365 for (i = 0; i < len; i++) {
366 if (bitmap[i] != 0) {
367 c = leul_to_cpu(bitmap[i]);
368 do {
369 j = ffsl(c) - 1;
370 c &= ~(1ul << j);
3145fcb6 371 page_number = (i * HOST_LONG_BITS + j) * hpratio;
8369e01c 372 addr1 = page_number * TARGET_PAGE_SIZE;
ffcde12f 373 addr = section->offset_within_region + addr1;
3145fcb6
DG
374 memory_region_set_dirty(section->mr, addr,
375 TARGET_PAGE_SIZE * hpratio);
8369e01c
MT
376 } while (c != 0);
377 }
378 }
379 return 0;
96c1606b
AG
380}
381
8369e01c
MT
382#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
383
5832d1f2
AL
384/**
385 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
fd4aa979
BS
386 * This function updates qemu's dirty bitmap using
387 * memory_region_set_dirty(). This means all bits are set
388 * to dirty.
5832d1f2 389 *
d3f8d37f 390 * @start_add: start of logged region.
5832d1f2
AL
391 * @end_addr: end of logged region.
392 */
ffcde12f 393static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
5832d1f2
AL
394{
395 KVMState *s = kvm_state;
151f7749 396 unsigned long size, allocated_size = 0;
151f7749
JK
397 KVMDirtyLog d;
398 KVMSlot *mem;
399 int ret = 0;
ffcde12f
AK
400 target_phys_addr_t start_addr = section->offset_within_address_space;
401 target_phys_addr_t end_addr = start_addr + section->size;
5832d1f2 402
151f7749
JK
403 d.dirty_bitmap = NULL;
404 while (start_addr < end_addr) {
405 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
406 if (mem == NULL) {
407 break;
408 }
5832d1f2 409
51b0c606
MT
410 /* XXX bad kernel interface alert
411 * For dirty bitmap, kernel allocates array of size aligned to
412 * bits-per-long. But for case when the kernel is 64bits and
413 * the userspace is 32bits, userspace can't align to the same
414 * bits-per-long, since sizeof(long) is different between kernel
415 * and user space. This way, userspace will provide buffer which
416 * may be 4 bytes less than the kernel will use, resulting in
417 * userspace memory corruption (which is not detectable by valgrind
418 * too, in most cases).
419 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
420 * a hope that sizeof(long) wont become >8 any time soon.
421 */
422 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
423 /*HOST_LONG_BITS*/ 64) / 8;
151f7749 424 if (!d.dirty_bitmap) {
7267c094 425 d.dirty_bitmap = g_malloc(size);
151f7749 426 } else if (size > allocated_size) {
7267c094 427 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
151f7749
JK
428 }
429 allocated_size = size;
430 memset(d.dirty_bitmap, 0, allocated_size);
5832d1f2 431
151f7749 432 d.slot = mem->slot;
5832d1f2 433
6e489f3f 434 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
8c0d577e 435 DPRINTF("ioctl failed %d\n", errno);
151f7749
JK
436 ret = -1;
437 break;
438 }
5832d1f2 439
ffcde12f 440 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
8369e01c 441 start_addr = mem->start_addr + mem->memory_size;
5832d1f2 442 }
7267c094 443 g_free(d.dirty_bitmap);
151f7749
JK
444
445 return ret;
5832d1f2
AL
446}
447
c227f099 448int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
f65ed4c1
AL
449{
450 int ret = -ENOSYS;
f65ed4c1
AL
451 KVMState *s = kvm_state;
452
453 if (s->coalesced_mmio) {
454 struct kvm_coalesced_mmio_zone zone;
455
456 zone.addr = start;
457 zone.size = size;
7e680753 458 zone.pad = 0;
f65ed4c1
AL
459
460 ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
461 }
f65ed4c1
AL
462
463 return ret;
464}
465
c227f099 466int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
f65ed4c1
AL
467{
468 int ret = -ENOSYS;
f65ed4c1
AL
469 KVMState *s = kvm_state;
470
471 if (s->coalesced_mmio) {
472 struct kvm_coalesced_mmio_zone zone;
473
474 zone.addr = start;
475 zone.size = size;
7e680753 476 zone.pad = 0;
f65ed4c1
AL
477
478 ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
479 }
f65ed4c1
AL
480
481 return ret;
482}
483
ad7b8b33
AL
484int kvm_check_extension(KVMState *s, unsigned int extension)
485{
486 int ret;
487
488 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
489 if (ret < 0) {
490 ret = 0;
491 }
492
493 return ret;
494}
495
d2f2b8a7
SH
496static int kvm_check_many_ioeventfds(void)
497{
d0dcac83
SH
498 /* Userspace can use ioeventfd for io notification. This requires a host
499 * that supports eventfd(2) and an I/O thread; since eventfd does not
500 * support SIGIO it cannot interrupt the vcpu.
501 *
502 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
d2f2b8a7
SH
503 * can avoid creating too many ioeventfds.
504 */
12d4536f 505#if defined(CONFIG_EVENTFD)
d2f2b8a7
SH
506 int ioeventfds[7];
507 int i, ret = 0;
508 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
509 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
510 if (ioeventfds[i] < 0) {
511 break;
512 }
513 ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
514 if (ret < 0) {
515 close(ioeventfds[i]);
516 break;
517 }
518 }
519
520 /* Decide whether many devices are supported or not */
521 ret = i == ARRAY_SIZE(ioeventfds);
522
523 while (i-- > 0) {
524 kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
525 close(ioeventfds[i]);
526 }
527 return ret;
528#else
529 return 0;
530#endif
531}
532
94a8d39a
JK
533static const KVMCapabilityInfo *
534kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
535{
536 while (list->name) {
537 if (!kvm_check_extension(s, list->value)) {
538 return list;
539 }
540 list++;
541 }
542 return NULL;
543}
544
a01672d3 545static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
46dbef6a
MT
546{
547 KVMState *s = kvm_state;
46dbef6a
MT
548 KVMSlot *mem, old;
549 int err;
a01672d3
AK
550 MemoryRegion *mr = section->mr;
551 bool log_dirty = memory_region_is_logging(mr);
552 target_phys_addr_t start_addr = section->offset_within_address_space;
553 ram_addr_t size = section->size;
9f213ed9 554 void *ram = NULL;
8f6f962b 555 unsigned delta;
46dbef6a 556
14542fea
GN
557 /* kvm works in page size chunks, but the function may be called
558 with sub-page size and unaligned start address. */
8f6f962b
AK
559 delta = TARGET_PAGE_ALIGN(size) - size;
560 if (delta > size) {
561 return;
562 }
563 start_addr += delta;
564 size -= delta;
565 size &= TARGET_PAGE_MASK;
566 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
567 return;
568 }
46dbef6a 569
a01672d3
AK
570 if (!memory_region_is_ram(mr)) {
571 return;
9f213ed9
AK
572 }
573
8f6f962b 574 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
a01672d3 575
46dbef6a
MT
576 while (1) {
577 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
578 if (!mem) {
579 break;
580 }
581
a01672d3 582 if (add && start_addr >= mem->start_addr &&
46dbef6a 583 (start_addr + size <= mem->start_addr + mem->memory_size) &&
9f213ed9 584 (ram - start_addr == mem->ram - mem->start_addr)) {
46dbef6a 585 /* The new slot fits into the existing one and comes with
25254bbc
MT
586 * identical parameters - update flags and done. */
587 kvm_slot_dirty_pages_log_change(mem, log_dirty);
46dbef6a
MT
588 return;
589 }
590
591 old = *mem;
592
3fbffb62
AK
593 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
594 kvm_physical_sync_dirty_bitmap(section);
595 }
596
46dbef6a
MT
597 /* unregister the overlapping slot */
598 mem->memory_size = 0;
599 err = kvm_set_user_memory_region(s, mem);
600 if (err) {
601 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
602 __func__, strerror(-err));
603 abort();
604 }
605
606 /* Workaround for older KVM versions: we can't join slots, even not by
607 * unregistering the previous ones and then registering the larger
608 * slot. We have to maintain the existing fragmentation. Sigh.
609 *
610 * This workaround assumes that the new slot starts at the same
611 * address as the first existing one. If not or if some overlapping
612 * slot comes around later, we will fail (not seen in practice so far)
613 * - and actually require a recent KVM version. */
614 if (s->broken_set_mem_region &&
a01672d3 615 old.start_addr == start_addr && old.memory_size < size && add) {
46dbef6a
MT
616 mem = kvm_alloc_slot(s);
617 mem->memory_size = old.memory_size;
618 mem->start_addr = old.start_addr;
9f213ed9 619 mem->ram = old.ram;
25254bbc 620 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
621
622 err = kvm_set_user_memory_region(s, mem);
623 if (err) {
624 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
625 strerror(-err));
626 abort();
627 }
628
629 start_addr += old.memory_size;
9f213ed9 630 ram += old.memory_size;
46dbef6a
MT
631 size -= old.memory_size;
632 continue;
633 }
634
635 /* register prefix slot */
636 if (old.start_addr < start_addr) {
637 mem = kvm_alloc_slot(s);
638 mem->memory_size = start_addr - old.start_addr;
639 mem->start_addr = old.start_addr;
9f213ed9 640 mem->ram = old.ram;
25254bbc 641 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
642
643 err = kvm_set_user_memory_region(s, mem);
644 if (err) {
645 fprintf(stderr, "%s: error registering prefix slot: %s\n",
646 __func__, strerror(-err));
d4d6868f
AG
647#ifdef TARGET_PPC
648 fprintf(stderr, "%s: This is probably because your kernel's " \
649 "PAGE_SIZE is too big. Please try to use 4k " \
650 "PAGE_SIZE!\n", __func__);
651#endif
46dbef6a
MT
652 abort();
653 }
654 }
655
656 /* register suffix slot */
657 if (old.start_addr + old.memory_size > start_addr + size) {
658 ram_addr_t size_delta;
659
660 mem = kvm_alloc_slot(s);
661 mem->start_addr = start_addr + size;
662 size_delta = mem->start_addr - old.start_addr;
663 mem->memory_size = old.memory_size - size_delta;
9f213ed9 664 mem->ram = old.ram + size_delta;
25254bbc 665 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
666
667 err = kvm_set_user_memory_region(s, mem);
668 if (err) {
669 fprintf(stderr, "%s: error registering suffix slot: %s\n",
670 __func__, strerror(-err));
671 abort();
672 }
673 }
674 }
675
676 /* in case the KVM bug workaround already "consumed" the new slot */
a426e122 677 if (!size) {
46dbef6a 678 return;
a426e122 679 }
a01672d3 680 if (!add) {
46dbef6a 681 return;
a426e122 682 }
46dbef6a
MT
683 mem = kvm_alloc_slot(s);
684 mem->memory_size = size;
685 mem->start_addr = start_addr;
9f213ed9 686 mem->ram = ram;
25254bbc 687 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
688
689 err = kvm_set_user_memory_region(s, mem);
690 if (err) {
691 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
692 strerror(-err));
693 abort();
694 }
695}
696
50c1e149
AK
697static void kvm_begin(MemoryListener *listener)
698{
699}
700
701static void kvm_commit(MemoryListener *listener)
702{
703}
704
a01672d3
AK
705static void kvm_region_add(MemoryListener *listener,
706 MemoryRegionSection *section)
707{
708 kvm_set_phys_mem(section, true);
709}
710
711static void kvm_region_del(MemoryListener *listener,
712 MemoryRegionSection *section)
713{
714 kvm_set_phys_mem(section, false);
715}
716
50c1e149
AK
717static void kvm_region_nop(MemoryListener *listener,
718 MemoryRegionSection *section)
719{
720}
721
a01672d3
AK
722static void kvm_log_sync(MemoryListener *listener,
723 MemoryRegionSection *section)
7b8f3b78 724{
a01672d3
AK
725 int r;
726
ffcde12f 727 r = kvm_physical_sync_dirty_bitmap(section);
a01672d3
AK
728 if (r < 0) {
729 abort();
730 }
7b8f3b78
MT
731}
732
a01672d3 733static void kvm_log_global_start(struct MemoryListener *listener)
7b8f3b78 734{
a01672d3
AK
735 int r;
736
737 r = kvm_set_migration_log(1);
738 assert(r >= 0);
7b8f3b78
MT
739}
740
a01672d3 741static void kvm_log_global_stop(struct MemoryListener *listener)
7b8f3b78 742{
a01672d3
AK
743 int r;
744
745 r = kvm_set_migration_log(0);
746 assert(r >= 0);
7b8f3b78
MT
747}
748
80a1ea37
AK
749static void kvm_mem_ioeventfd_add(MemoryRegionSection *section,
750 bool match_data, uint64_t data, int fd)
751{
752 int r;
753
4b8f1c88 754 assert(match_data && section->size <= 8);
80a1ea37 755
4b8f1c88
MT
756 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
757 data, true, section->size);
80a1ea37
AK
758 if (r < 0) {
759 abort();
760 }
761}
762
763static void kvm_mem_ioeventfd_del(MemoryRegionSection *section,
764 bool match_data, uint64_t data, int fd)
765{
766 int r;
767
4b8f1c88
MT
768 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
769 data, false, section->size);
80a1ea37
AK
770 if (r < 0) {
771 abort();
772 }
773}
774
775static void kvm_io_ioeventfd_add(MemoryRegionSection *section,
776 bool match_data, uint64_t data, int fd)
777{
778 int r;
779
780 assert(match_data && section->size == 2);
781
782 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
783 data, true);
784 if (r < 0) {
785 abort();
786 }
787}
788
789static void kvm_io_ioeventfd_del(MemoryRegionSection *section,
790 bool match_data, uint64_t data, int fd)
791
792{
793 int r;
794
795 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
796 data, false);
797 if (r < 0) {
798 abort();
799 }
800}
801
802static void kvm_eventfd_add(MemoryListener *listener,
803 MemoryRegionSection *section,
753d5e14
PB
804 bool match_data, uint64_t data,
805 EventNotifier *e)
80a1ea37
AK
806{
807 if (section->address_space == get_system_memory()) {
753d5e14
PB
808 kvm_mem_ioeventfd_add(section, match_data, data,
809 event_notifier_get_fd(e));
80a1ea37 810 } else {
753d5e14
PB
811 kvm_io_ioeventfd_add(section, match_data, data,
812 event_notifier_get_fd(e));
80a1ea37
AK
813 }
814}
815
816static void kvm_eventfd_del(MemoryListener *listener,
817 MemoryRegionSection *section,
753d5e14
PB
818 bool match_data, uint64_t data,
819 EventNotifier *e)
80a1ea37
AK
820{
821 if (section->address_space == get_system_memory()) {
753d5e14
PB
822 kvm_mem_ioeventfd_del(section, match_data, data,
823 event_notifier_get_fd(e));
80a1ea37 824 } else {
753d5e14
PB
825 kvm_io_ioeventfd_del(section, match_data, data,
826 event_notifier_get_fd(e));
80a1ea37
AK
827 }
828}
829
a01672d3 830static MemoryListener kvm_memory_listener = {
50c1e149
AK
831 .begin = kvm_begin,
832 .commit = kvm_commit,
a01672d3
AK
833 .region_add = kvm_region_add,
834 .region_del = kvm_region_del,
50c1e149 835 .region_nop = kvm_region_nop,
e5896b12
AP
836 .log_start = kvm_log_start,
837 .log_stop = kvm_log_stop,
a01672d3
AK
838 .log_sync = kvm_log_sync,
839 .log_global_start = kvm_log_global_start,
840 .log_global_stop = kvm_log_global_stop,
80a1ea37
AK
841 .eventfd_add = kvm_eventfd_add,
842 .eventfd_del = kvm_eventfd_del,
72e22d2f 843 .priority = 10,
7b8f3b78
MT
844};
845
9349b4f9 846static void kvm_handle_interrupt(CPUArchState *env, int mask)
aa7f74d1
JK
847{
848 env->interrupt_request |= mask;
849
850 if (!qemu_cpu_is_self(env)) {
851 qemu_cpu_kick(env);
852 }
853}
854
84b058d7
JK
855int kvm_irqchip_set_irq(KVMState *s, int irq, int level)
856{
857 struct kvm_irq_level event;
858 int ret;
859
3d4b2649 860 assert(kvm_irqchip_in_kernel());
84b058d7
JK
861
862 event.level = level;
863 event.irq = irq;
864 ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
865 if (ret < 0) {
866 perror("kvm_set_irqchip_line");
867 abort();
868 }
869
870 return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
871}
872
873#ifdef KVM_CAP_IRQ_ROUTING
d3d3bef0
JK
874typedef struct KVMMSIRoute {
875 struct kvm_irq_routing_entry kroute;
876 QTAILQ_ENTRY(KVMMSIRoute) entry;
877} KVMMSIRoute;
878
84b058d7
JK
879static void set_gsi(KVMState *s, unsigned int gsi)
880{
84b058d7
JK
881 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
882}
883
04fa27f5
JK
884static void clear_gsi(KVMState *s, unsigned int gsi)
885{
886 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
887}
888
84b058d7
JK
889static void kvm_init_irq_routing(KVMState *s)
890{
04fa27f5 891 int gsi_count, i;
84b058d7
JK
892
893 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
894 if (gsi_count > 0) {
895 unsigned int gsi_bits, i;
896
897 /* Round up so we can search ints using ffs */
bc8c6788 898 gsi_bits = ALIGN(gsi_count, 32);
84b058d7 899 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
4e2e4e63 900 s->gsi_count = gsi_count;
84b058d7
JK
901
902 /* Mark any over-allocated bits as already in use */
903 for (i = gsi_count; i < gsi_bits; i++) {
904 set_gsi(s, i);
905 }
906 }
907
908 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
909 s->nr_allocated_irq_routes = 0;
910
4a3adebb
JK
911 if (!s->direct_msi) {
912 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
913 QTAILQ_INIT(&s->msi_hashtab[i]);
914 }
04fa27f5
JK
915 }
916
84b058d7
JK
917 kvm_arch_init_irq_routing(s);
918}
919
e7b20308
JK
920static void kvm_irqchip_commit_routes(KVMState *s)
921{
922 int ret;
923
924 s->irq_routes->flags = 0;
925 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
926 assert(ret == 0);
927}
928
84b058d7
JK
929static void kvm_add_routing_entry(KVMState *s,
930 struct kvm_irq_routing_entry *entry)
931{
932 struct kvm_irq_routing_entry *new;
933 int n, size;
934
935 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
936 n = s->nr_allocated_irq_routes * 2;
937 if (n < 64) {
938 n = 64;
939 }
940 size = sizeof(struct kvm_irq_routing);
941 size += n * sizeof(*new);
942 s->irq_routes = g_realloc(s->irq_routes, size);
943 s->nr_allocated_irq_routes = n;
944 }
945 n = s->irq_routes->nr++;
946 new = &s->irq_routes->entries[n];
947 memset(new, 0, sizeof(*new));
948 new->gsi = entry->gsi;
949 new->type = entry->type;
950 new->flags = entry->flags;
951 new->u = entry->u;
952
953 set_gsi(s, entry->gsi);
e7b20308
JK
954
955 kvm_irqchip_commit_routes(s);
84b058d7
JK
956}
957
1df186df 958void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
84b058d7
JK
959{
960 struct kvm_irq_routing_entry e;
961
4e2e4e63
JK
962 assert(pin < s->gsi_count);
963
84b058d7
JK
964 e.gsi = irq;
965 e.type = KVM_IRQ_ROUTING_IRQCHIP;
966 e.flags = 0;
967 e.u.irqchip.irqchip = irqchip;
968 e.u.irqchip.pin = pin;
969 kvm_add_routing_entry(s, &e);
970}
971
1e2aa8be 972void kvm_irqchip_release_virq(KVMState *s, int virq)
04fa27f5
JK
973{
974 struct kvm_irq_routing_entry *e;
975 int i;
976
977 for (i = 0; i < s->irq_routes->nr; i++) {
978 e = &s->irq_routes->entries[i];
979 if (e->gsi == virq) {
980 s->irq_routes->nr--;
981 *e = s->irq_routes->entries[s->irq_routes->nr];
982 }
983 }
984 clear_gsi(s, virq);
e7b20308
JK
985
986 kvm_irqchip_commit_routes(s);
04fa27f5
JK
987}
988
989static unsigned int kvm_hash_msi(uint32_t data)
990{
991 /* This is optimized for IA32 MSI layout. However, no other arch shall
992 * repeat the mistake of not providing a direct MSI injection API. */
993 return data & 0xff;
994}
995
996static void kvm_flush_dynamic_msi_routes(KVMState *s)
997{
998 KVMMSIRoute *route, *next;
999 unsigned int hash;
1000
1001 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1002 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1003 kvm_irqchip_release_virq(s, route->kroute.gsi);
1004 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1005 g_free(route);
1006 }
1007 }
1008}
1009
1010static int kvm_irqchip_get_virq(KVMState *s)
1011{
1012 uint32_t *word = s->used_gsi_bitmap;
1013 int max_words = ALIGN(s->gsi_count, 32) / 32;
1014 int i, bit;
1015 bool retry = true;
1016
1017again:
1018 /* Return the lowest unused GSI in the bitmap */
1019 for (i = 0; i < max_words; i++) {
1020 bit = ffs(~word[i]);
1021 if (!bit) {
1022 continue;
1023 }
1024
1025 return bit - 1 + i * 32;
1026 }
4a3adebb 1027 if (!s->direct_msi && retry) {
04fa27f5
JK
1028 retry = false;
1029 kvm_flush_dynamic_msi_routes(s);
1030 goto again;
1031 }
1032 return -ENOSPC;
1033
1034}
1035
1036static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1037{
1038 unsigned int hash = kvm_hash_msi(msg.data);
1039 KVMMSIRoute *route;
1040
1041 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1042 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1043 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1044 route->kroute.u.msi.data == msg.data) {
1045 return route;
1046 }
1047 }
1048 return NULL;
1049}
1050
1051int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1052{
4a3adebb 1053 struct kvm_msi msi;
04fa27f5
JK
1054 KVMMSIRoute *route;
1055
4a3adebb
JK
1056 if (s->direct_msi) {
1057 msi.address_lo = (uint32_t)msg.address;
1058 msi.address_hi = msg.address >> 32;
1059 msi.data = msg.data;
1060 msi.flags = 0;
1061 memset(msi.pad, 0, sizeof(msi.pad));
1062
1063 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1064 }
1065
04fa27f5
JK
1066 route = kvm_lookup_msi_route(s, msg);
1067 if (!route) {
e7b20308 1068 int virq;
04fa27f5
JK
1069
1070 virq = kvm_irqchip_get_virq(s);
1071 if (virq < 0) {
1072 return virq;
1073 }
1074
1075 route = g_malloc(sizeof(KVMMSIRoute));
1076 route->kroute.gsi = virq;
1077 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1078 route->kroute.flags = 0;
1079 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1080 route->kroute.u.msi.address_hi = msg.address >> 32;
1081 route->kroute.u.msi.data = msg.data;
1082
1083 kvm_add_routing_entry(s, &route->kroute);
1084
1085 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1086 entry);
04fa27f5
JK
1087 }
1088
1089 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1090
1091 return kvm_irqchip_set_irq(s, route->kroute.gsi, 1);
1092}
1093
92b4e489
JK
1094int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1095{
1096 struct kvm_irq_routing_entry kroute;
1097 int virq;
1098
1099 if (!kvm_irqchip_in_kernel()) {
1100 return -ENOSYS;
1101 }
1102
1103 virq = kvm_irqchip_get_virq(s);
1104 if (virq < 0) {
1105 return virq;
1106 }
1107
1108 kroute.gsi = virq;
1109 kroute.type = KVM_IRQ_ROUTING_MSI;
1110 kroute.flags = 0;
1111 kroute.u.msi.address_lo = (uint32_t)msg.address;
1112 kroute.u.msi.address_hi = msg.address >> 32;
1113 kroute.u.msi.data = msg.data;
1114
1115 kvm_add_routing_entry(s, &kroute);
1116
1117 return virq;
1118}
1119
39853bbc
JK
1120static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1121{
1122 struct kvm_irqfd irqfd = {
1123 .fd = fd,
1124 .gsi = virq,
1125 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1126 };
1127
1128 if (!kvm_irqchip_in_kernel()) {
1129 return -ENOSYS;
1130 }
1131
1132 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1133}
1134
84b058d7
JK
1135#else /* !KVM_CAP_IRQ_ROUTING */
1136
1137static void kvm_init_irq_routing(KVMState *s)
1138{
1139}
04fa27f5 1140
d3d3bef0
JK
1141void kvm_irqchip_release_virq(KVMState *s, int virq)
1142{
1143}
1144
04fa27f5
JK
1145int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1146{
1147 abort();
1148}
92b4e489
JK
1149
1150int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1151{
df410675 1152 return -ENOSYS;
92b4e489 1153}
39853bbc
JK
1154
1155static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1156{
1157 abort();
1158}
84b058d7
JK
1159#endif /* !KVM_CAP_IRQ_ROUTING */
1160
39853bbc
JK
1161int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq)
1162{
1163 return kvm_irqchip_assign_irqfd(s, fd, virq, true);
1164}
1165
15b2bd18
PB
1166int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq)
1167{
1168 return kvm_irqchip_add_irqfd(s, event_notifier_get_fd(n), virq);
1169}
1170
39853bbc
JK
1171int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq)
1172{
1173 return kvm_irqchip_assign_irqfd(s, fd, virq, false);
1174}
1175
15b2bd18
PB
1176int kvm_irqchip_remove_irq_notifier(KVMState *s, EventNotifier *n, int virq)
1177{
1178 return kvm_irqchip_remove_irqfd(s, event_notifier_get_fd(n), virq);
1179}
1180
84b058d7
JK
1181static int kvm_irqchip_create(KVMState *s)
1182{
1183 QemuOptsList *list = qemu_find_opts("machine");
1184 int ret;
1185
1186 if (QTAILQ_EMPTY(&list->head) ||
1187 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
a24b9106 1188 "kernel_irqchip", true) ||
84b058d7
JK
1189 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1190 return 0;
1191 }
1192
1193 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1194 if (ret < 0) {
1195 fprintf(stderr, "Create kernel irqchip failed\n");
1196 return ret;
1197 }
1198
1199 s->irqchip_inject_ioctl = KVM_IRQ_LINE;
1200 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1201 s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
1202 }
3d4b2649 1203 kvm_kernel_irqchip = true;
84b058d7
JK
1204
1205 kvm_init_irq_routing(s);
1206
1207 return 0;
1208}
1209
3ed444e9
DH
1210static int kvm_max_vcpus(KVMState *s)
1211{
1212 int ret;
1213
1214 /* Find number of supported CPUs using the recommended
1215 * procedure from the kernel API documentation to cope with
1216 * older kernels that may be missing capabilities.
1217 */
1218 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1219 if (ret) {
1220 return ret;
1221 }
1222 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1223 if (ret) {
1224 return ret;
1225 }
1226
1227 return 4;
1228}
1229
cad1e282 1230int kvm_init(void)
05330448 1231{
168ccc11
JK
1232 static const char upgrade_note[] =
1233 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1234 "(see http://sourceforge.net/projects/kvm).\n";
05330448 1235 KVMState *s;
94a8d39a 1236 const KVMCapabilityInfo *missing_cap;
05330448
AL
1237 int ret;
1238 int i;
3ed444e9 1239 int max_vcpus;
05330448 1240
7267c094 1241 s = g_malloc0(sizeof(KVMState));
05330448 1242
3145fcb6
DG
1243 /*
1244 * On systems where the kernel can support different base page
1245 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1246 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1247 * page size for the system though.
1248 */
1249 assert(TARGET_PAGE_SIZE <= getpagesize());
1250
e22a25c9 1251#ifdef KVM_CAP_SET_GUEST_DEBUG
72cf2d4f 1252 QTAILQ_INIT(&s->kvm_sw_breakpoints);
e22a25c9 1253#endif
a426e122 1254 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
05330448 1255 s->slots[i].slot = i;
a426e122 1256 }
05330448 1257 s->vmfd = -1;
40ff6d7e 1258 s->fd = qemu_open("/dev/kvm", O_RDWR);
05330448
AL
1259 if (s->fd == -1) {
1260 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1261 ret = -errno;
1262 goto err;
1263 }
1264
1265 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1266 if (ret < KVM_API_VERSION) {
a426e122 1267 if (ret > 0) {
05330448 1268 ret = -EINVAL;
a426e122 1269 }
05330448
AL
1270 fprintf(stderr, "kvm version too old\n");
1271 goto err;
1272 }
1273
1274 if (ret > KVM_API_VERSION) {
1275 ret = -EINVAL;
1276 fprintf(stderr, "kvm version not supported\n");
1277 goto err;
1278 }
1279
3ed444e9
DH
1280 max_vcpus = kvm_max_vcpus(s);
1281 if (smp_cpus > max_vcpus) {
1282 ret = -EINVAL;
1283 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1284 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1285 goto err;
1286 }
1287
05330448 1288 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
0104dcac
AG
1289 if (s->vmfd < 0) {
1290#ifdef TARGET_S390X
1291 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1292 "your host kernel command line\n");
1293#endif
db9eae1c 1294 ret = s->vmfd;
05330448 1295 goto err;
0104dcac 1296 }
05330448 1297
94a8d39a
JK
1298 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1299 if (!missing_cap) {
1300 missing_cap =
1301 kvm_check_extension_list(s, kvm_arch_required_capabilities);
05330448 1302 }
94a8d39a 1303 if (missing_cap) {
ad7b8b33 1304 ret = -EINVAL;
94a8d39a
JK
1305 fprintf(stderr, "kvm does not support %s\n%s",
1306 missing_cap->name, upgrade_note);
d85dc283
AL
1307 goto err;
1308 }
1309
ad7b8b33 1310 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
f65ed4c1 1311
e69917e2 1312 s->broken_set_mem_region = 1;
14a09518 1313 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
e69917e2
JK
1314 if (ret > 0) {
1315 s->broken_set_mem_region = 0;
1316 }
e69917e2 1317
a0fb002c
JK
1318#ifdef KVM_CAP_VCPU_EVENTS
1319 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1320#endif
1321
b0b1d690
JK
1322 s->robust_singlestep =
1323 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
b0b1d690 1324
ff44f1a3
JK
1325#ifdef KVM_CAP_DEBUGREGS
1326 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1327#endif
1328
f1665b21
SY
1329#ifdef KVM_CAP_XSAVE
1330 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1331#endif
1332
f1665b21
SY
1333#ifdef KVM_CAP_XCRS
1334 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1335#endif
1336
8a7c7393
JK
1337#ifdef KVM_CAP_PIT_STATE2
1338 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1339#endif
1340
d3d3bef0 1341#ifdef KVM_CAP_IRQ_ROUTING
4a3adebb 1342 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
d3d3bef0 1343#endif
4a3adebb 1344
cad1e282 1345 ret = kvm_arch_init(s);
a426e122 1346 if (ret < 0) {
05330448 1347 goto err;
a426e122 1348 }
05330448 1349
84b058d7
JK
1350 ret = kvm_irqchip_create(s);
1351 if (ret < 0) {
1352 goto err;
1353 }
1354
05330448 1355 kvm_state = s;
7376e582 1356 memory_listener_register(&kvm_memory_listener, NULL);
05330448 1357
d2f2b8a7
SH
1358 s->many_ioeventfds = kvm_check_many_ioeventfds();
1359
aa7f74d1
JK
1360 cpu_interrupt_handler = kvm_handle_interrupt;
1361
05330448
AL
1362 return 0;
1363
1364err:
1365 if (s) {
db9eae1c 1366 if (s->vmfd >= 0) {
05330448 1367 close(s->vmfd);
a426e122
JK
1368 }
1369 if (s->fd != -1) {
05330448 1370 close(s->fd);
a426e122 1371 }
05330448 1372 }
7267c094 1373 g_free(s);
05330448
AL
1374
1375 return ret;
1376}
1377
b30e93e9
JK
1378static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1379 uint32_t count)
05330448
AL
1380{
1381 int i;
1382 uint8_t *ptr = data;
1383
1384 for (i = 0; i < count; i++) {
1385 if (direction == KVM_EXIT_IO_IN) {
1386 switch (size) {
1387 case 1:
afcea8cb 1388 stb_p(ptr, cpu_inb(port));
05330448
AL
1389 break;
1390 case 2:
afcea8cb 1391 stw_p(ptr, cpu_inw(port));
05330448
AL
1392 break;
1393 case 4:
afcea8cb 1394 stl_p(ptr, cpu_inl(port));
05330448
AL
1395 break;
1396 }
1397 } else {
1398 switch (size) {
1399 case 1:
afcea8cb 1400 cpu_outb(port, ldub_p(ptr));
05330448
AL
1401 break;
1402 case 2:
afcea8cb 1403 cpu_outw(port, lduw_p(ptr));
05330448
AL
1404 break;
1405 case 4:
afcea8cb 1406 cpu_outl(port, ldl_p(ptr));
05330448
AL
1407 break;
1408 }
1409 }
1410
1411 ptr += size;
1412 }
05330448
AL
1413}
1414
9349b4f9 1415static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
7c80eef8 1416{
bb44e0d1 1417 fprintf(stderr, "KVM internal error.");
7c80eef8
MT
1418 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1419 int i;
1420
bb44e0d1 1421 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
7c80eef8
MT
1422 for (i = 0; i < run->internal.ndata; ++i) {
1423 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1424 i, (uint64_t)run->internal.data[i]);
1425 }
bb44e0d1
JK
1426 } else {
1427 fprintf(stderr, "\n");
7c80eef8 1428 }
7c80eef8
MT
1429 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1430 fprintf(stderr, "emulation failure\n");
a426e122 1431 if (!kvm_arch_stop_on_emulation_error(env)) {
f5c848ee 1432 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
d73cd8f4 1433 return EXCP_INTERRUPT;
a426e122 1434 }
7c80eef8
MT
1435 }
1436 /* FIXME: Should trigger a qmp message to let management know
1437 * something went wrong.
1438 */
73aaec4a 1439 return -1;
7c80eef8 1440}
7c80eef8 1441
62a2744c 1442void kvm_flush_coalesced_mmio_buffer(void)
f65ed4c1 1443{
f65ed4c1 1444 KVMState *s = kvm_state;
1cae88b9
AK
1445
1446 if (s->coalesced_flush_in_progress) {
1447 return;
1448 }
1449
1450 s->coalesced_flush_in_progress = true;
1451
62a2744c
SY
1452 if (s->coalesced_mmio_ring) {
1453 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
f65ed4c1
AL
1454 while (ring->first != ring->last) {
1455 struct kvm_coalesced_mmio *ent;
1456
1457 ent = &ring->coalesced_mmio[ring->first];
1458
1459 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
85199474 1460 smp_wmb();
f65ed4c1
AL
1461 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1462 }
1463 }
1cae88b9
AK
1464
1465 s->coalesced_flush_in_progress = false;
f65ed4c1
AL
1466}
1467
2705d56a 1468static void do_kvm_cpu_synchronize_state(void *_env)
4c0960c0 1469{
9349b4f9 1470 CPUArchState *env = _env;
2705d56a 1471
9ded2744 1472 if (!env->kvm_vcpu_dirty) {
4c0960c0 1473 kvm_arch_get_registers(env);
9ded2744 1474 env->kvm_vcpu_dirty = 1;
4c0960c0
AK
1475 }
1476}
1477
9349b4f9 1478void kvm_cpu_synchronize_state(CPUArchState *env)
2705d56a 1479{
a426e122 1480 if (!env->kvm_vcpu_dirty) {
2705d56a 1481 run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
a426e122 1482 }
2705d56a
JK
1483}
1484
9349b4f9 1485void kvm_cpu_synchronize_post_reset(CPUArchState *env)
ea375f9a
JK
1486{
1487 kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
1488 env->kvm_vcpu_dirty = 0;
1489}
1490
9349b4f9 1491void kvm_cpu_synchronize_post_init(CPUArchState *env)
ea375f9a
JK
1492{
1493 kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
1494 env->kvm_vcpu_dirty = 0;
1495}
1496
9349b4f9 1497int kvm_cpu_exec(CPUArchState *env)
05330448
AL
1498{
1499 struct kvm_run *run = env->kvm_run;
7cbb533f 1500 int ret, run_ret;
05330448 1501
8c0d577e 1502 DPRINTF("kvm_cpu_exec()\n");
05330448 1503
99036865 1504 if (kvm_arch_process_async_events(env)) {
9ccfac9e 1505 env->exit_request = 0;
6792a57b 1506 return EXCP_HLT;
9ccfac9e 1507 }
0af691d7 1508
9ccfac9e 1509 do {
9ded2744 1510 if (env->kvm_vcpu_dirty) {
ea375f9a 1511 kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
9ded2744 1512 env->kvm_vcpu_dirty = 0;
4c0960c0
AK
1513 }
1514
8c14c173 1515 kvm_arch_pre_run(env, run);
9ccfac9e
JK
1516 if (env->exit_request) {
1517 DPRINTF("interrupt exit requested\n");
1518 /*
1519 * KVM requires us to reenter the kernel after IO exits to complete
1520 * instruction emulation. This self-signal will ensure that we
1521 * leave ASAP again.
1522 */
1523 qemu_cpu_kick_self();
1524 }
d549db5a 1525 qemu_mutex_unlock_iothread();
9ccfac9e 1526
7cbb533f 1527 run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
9ccfac9e 1528
d549db5a 1529 qemu_mutex_lock_iothread();
05330448
AL
1530 kvm_arch_post_run(env, run);
1531
b0c883b5
JK
1532 kvm_flush_coalesced_mmio_buffer();
1533
7cbb533f 1534 if (run_ret < 0) {
dc77d341
JK
1535 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1536 DPRINTF("io window exit\n");
d73cd8f4 1537 ret = EXCP_INTERRUPT;
dc77d341
JK
1538 break;
1539 }
7b011fbc
ME
1540 fprintf(stderr, "error: kvm run failed %s\n",
1541 strerror(-run_ret));
05330448
AL
1542 abort();
1543 }
1544
05330448
AL
1545 switch (run->exit_reason) {
1546 case KVM_EXIT_IO:
8c0d577e 1547 DPRINTF("handle_io\n");
b30e93e9
JK
1548 kvm_handle_io(run->io.port,
1549 (uint8_t *)run + run->io.data_offset,
1550 run->io.direction,
1551 run->io.size,
1552 run->io.count);
d73cd8f4 1553 ret = 0;
05330448
AL
1554 break;
1555 case KVM_EXIT_MMIO:
8c0d577e 1556 DPRINTF("handle_mmio\n");
05330448
AL
1557 cpu_physical_memory_rw(run->mmio.phys_addr,
1558 run->mmio.data,
1559 run->mmio.len,
1560 run->mmio.is_write);
d73cd8f4 1561 ret = 0;
05330448
AL
1562 break;
1563 case KVM_EXIT_IRQ_WINDOW_OPEN:
8c0d577e 1564 DPRINTF("irq_window_open\n");
d73cd8f4 1565 ret = EXCP_INTERRUPT;
05330448
AL
1566 break;
1567 case KVM_EXIT_SHUTDOWN:
8c0d577e 1568 DPRINTF("shutdown\n");
05330448 1569 qemu_system_reset_request();
d73cd8f4 1570 ret = EXCP_INTERRUPT;
05330448
AL
1571 break;
1572 case KVM_EXIT_UNKNOWN:
bb44e0d1
JK
1573 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1574 (uint64_t)run->hw.hardware_exit_reason);
73aaec4a 1575 ret = -1;
05330448 1576 break;
7c80eef8 1577 case KVM_EXIT_INTERNAL_ERROR:
73aaec4a 1578 ret = kvm_handle_internal_error(env, run);
7c80eef8 1579 break;
05330448 1580 default:
8c0d577e 1581 DPRINTF("kvm_arch_handle_exit\n");
05330448
AL
1582 ret = kvm_arch_handle_exit(env, run);
1583 break;
1584 }
d73cd8f4 1585 } while (ret == 0);
05330448 1586
73aaec4a 1587 if (ret < 0) {
f5c848ee 1588 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
0461d5a6 1589 vm_stop(RUN_STATE_INTERNAL_ERROR);
becfc390
AL
1590 }
1591
6792a57b 1592 env->exit_request = 0;
05330448
AL
1593 return ret;
1594}
1595
984b5181 1596int kvm_ioctl(KVMState *s, int type, ...)
05330448
AL
1597{
1598 int ret;
984b5181
AL
1599 void *arg;
1600 va_list ap;
05330448 1601
984b5181
AL
1602 va_start(ap, type);
1603 arg = va_arg(ap, void *);
1604 va_end(ap);
1605
1606 ret = ioctl(s->fd, type, arg);
a426e122 1607 if (ret == -1) {
05330448 1608 ret = -errno;
a426e122 1609 }
05330448
AL
1610 return ret;
1611}
1612
984b5181 1613int kvm_vm_ioctl(KVMState *s, int type, ...)
05330448
AL
1614{
1615 int ret;
984b5181
AL
1616 void *arg;
1617 va_list ap;
1618
1619 va_start(ap, type);
1620 arg = va_arg(ap, void *);
1621 va_end(ap);
05330448 1622
984b5181 1623 ret = ioctl(s->vmfd, type, arg);
a426e122 1624 if (ret == -1) {
05330448 1625 ret = -errno;
a426e122 1626 }
05330448
AL
1627 return ret;
1628}
1629
9349b4f9 1630int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
05330448
AL
1631{
1632 int ret;
984b5181
AL
1633 void *arg;
1634 va_list ap;
1635
1636 va_start(ap, type);
1637 arg = va_arg(ap, void *);
1638 va_end(ap);
05330448 1639
984b5181 1640 ret = ioctl(env->kvm_fd, type, arg);
a426e122 1641 if (ret == -1) {
05330448 1642 ret = -errno;
a426e122 1643 }
05330448
AL
1644 return ret;
1645}
bd322087
AL
1646
1647int kvm_has_sync_mmu(void)
1648{
94a8d39a 1649 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
bd322087 1650}
e22a25c9 1651
a0fb002c
JK
1652int kvm_has_vcpu_events(void)
1653{
1654 return kvm_state->vcpu_events;
1655}
1656
b0b1d690
JK
1657int kvm_has_robust_singlestep(void)
1658{
1659 return kvm_state->robust_singlestep;
1660}
1661
ff44f1a3
JK
1662int kvm_has_debugregs(void)
1663{
1664 return kvm_state->debugregs;
1665}
1666
f1665b21
SY
1667int kvm_has_xsave(void)
1668{
1669 return kvm_state->xsave;
1670}
1671
1672int kvm_has_xcrs(void)
1673{
1674 return kvm_state->xcrs;
1675}
1676
8a7c7393
JK
1677int kvm_has_pit_state2(void)
1678{
1679 return kvm_state->pit_state2;
1680}
1681
d2f2b8a7
SH
1682int kvm_has_many_ioeventfds(void)
1683{
1684 if (!kvm_enabled()) {
1685 return 0;
1686 }
1687 return kvm_state->many_ioeventfds;
1688}
1689
84b058d7
JK
1690int kvm_has_gsi_routing(void)
1691{
a9c5eb0d 1692#ifdef KVM_CAP_IRQ_ROUTING
84b058d7 1693 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
a9c5eb0d
AG
1694#else
1695 return false;
1696#endif
84b058d7
JK
1697}
1698
9b5b76d4
JK
1699int kvm_allows_irq0_override(void)
1700{
3d4b2649 1701 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
9b5b76d4
JK
1702}
1703
fdec9918
CB
1704void *kvm_vmalloc(ram_addr_t size)
1705{
1706#ifdef TARGET_S390X
1707 void *mem;
1708
1709 mem = kvm_arch_vmalloc(size);
1710 if (mem) {
1711 return mem;
1712 }
1713#endif
1714 return qemu_vmalloc(size);
1715}
1716
6f0437e8
JK
1717void kvm_setup_guest_memory(void *start, size_t size)
1718{
1719 if (!kvm_has_sync_mmu()) {
e78815a5 1720 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
6f0437e8
JK
1721
1722 if (ret) {
e78815a5
AF
1723 perror("qemu_madvise");
1724 fprintf(stderr,
1725 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
6f0437e8
JK
1726 exit(1);
1727 }
6f0437e8
JK
1728 }
1729}
1730
e22a25c9 1731#ifdef KVM_CAP_SET_GUEST_DEBUG
9349b4f9 1732struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
e22a25c9
AL
1733 target_ulong pc)
1734{
1735 struct kvm_sw_breakpoint *bp;
1736
72cf2d4f 1737 QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
a426e122 1738 if (bp->pc == pc) {
e22a25c9 1739 return bp;
a426e122 1740 }
e22a25c9
AL
1741 }
1742 return NULL;
1743}
1744
9349b4f9 1745int kvm_sw_breakpoints_active(CPUArchState *env)
e22a25c9 1746{
72cf2d4f 1747 return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
e22a25c9
AL
1748}
1749
452e4751
GC
1750struct kvm_set_guest_debug_data {
1751 struct kvm_guest_debug dbg;
9349b4f9 1752 CPUArchState *env;
452e4751
GC
1753 int err;
1754};
1755
1756static void kvm_invoke_set_guest_debug(void *data)
1757{
1758 struct kvm_set_guest_debug_data *dbg_data = data;
9349b4f9 1759 CPUArchState *env = dbg_data->env;
b3807725 1760
b3807725 1761 dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
452e4751
GC
1762}
1763
9349b4f9 1764int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9 1765{
452e4751 1766 struct kvm_set_guest_debug_data data;
e22a25c9 1767
b0b1d690 1768 data.dbg.control = reinject_trap;
e22a25c9 1769
b0b1d690
JK
1770 if (env->singlestep_enabled) {
1771 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1772 }
452e4751 1773 kvm_arch_update_guest_debug(env, &data.dbg);
452e4751 1774 data.env = env;
e22a25c9 1775
be41cbe0 1776 run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
452e4751 1777 return data.err;
e22a25c9
AL
1778}
1779
9349b4f9 1780int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1781 target_ulong len, int type)
1782{
1783 struct kvm_sw_breakpoint *bp;
9349b4f9 1784 CPUArchState *env;
e22a25c9
AL
1785 int err;
1786
1787 if (type == GDB_BREAKPOINT_SW) {
1788 bp = kvm_find_sw_breakpoint(current_env, addr);
1789 if (bp) {
1790 bp->use_count++;
1791 return 0;
1792 }
1793
7267c094 1794 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
a426e122 1795 if (!bp) {
e22a25c9 1796 return -ENOMEM;
a426e122 1797 }
e22a25c9
AL
1798
1799 bp->pc = addr;
1800 bp->use_count = 1;
1801 err = kvm_arch_insert_sw_breakpoint(current_env, bp);
1802 if (err) {
7267c094 1803 g_free(bp);
e22a25c9
AL
1804 return err;
1805 }
1806
72cf2d4f 1807 QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
e22a25c9
AL
1808 bp, entry);
1809 } else {
1810 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
a426e122 1811 if (err) {
e22a25c9 1812 return err;
a426e122 1813 }
e22a25c9
AL
1814 }
1815
1816 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1817 err = kvm_update_guest_debug(env, 0);
a426e122 1818 if (err) {
e22a25c9 1819 return err;
a426e122 1820 }
e22a25c9
AL
1821 }
1822 return 0;
1823}
1824
9349b4f9 1825int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1826 target_ulong len, int type)
1827{
1828 struct kvm_sw_breakpoint *bp;
9349b4f9 1829 CPUArchState *env;
e22a25c9
AL
1830 int err;
1831
1832 if (type == GDB_BREAKPOINT_SW) {
1833 bp = kvm_find_sw_breakpoint(current_env, addr);
a426e122 1834 if (!bp) {
e22a25c9 1835 return -ENOENT;
a426e122 1836 }
e22a25c9
AL
1837
1838 if (bp->use_count > 1) {
1839 bp->use_count--;
1840 return 0;
1841 }
1842
1843 err = kvm_arch_remove_sw_breakpoint(current_env, bp);
a426e122 1844 if (err) {
e22a25c9 1845 return err;
a426e122 1846 }
e22a25c9 1847
72cf2d4f 1848 QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
7267c094 1849 g_free(bp);
e22a25c9
AL
1850 } else {
1851 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
a426e122 1852 if (err) {
e22a25c9 1853 return err;
a426e122 1854 }
e22a25c9
AL
1855 }
1856
1857 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1858 err = kvm_update_guest_debug(env, 0);
a426e122 1859 if (err) {
e22a25c9 1860 return err;
a426e122 1861 }
e22a25c9
AL
1862 }
1863 return 0;
1864}
1865
9349b4f9 1866void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9
AL
1867{
1868 struct kvm_sw_breakpoint *bp, *next;
1869 KVMState *s = current_env->kvm_state;
9349b4f9 1870 CPUArchState *env;
e22a25c9 1871
72cf2d4f 1872 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
e22a25c9
AL
1873 if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1874 /* Try harder to find a CPU that currently sees the breakpoint. */
1875 for (env = first_cpu; env != NULL; env = env->next_cpu) {
a426e122 1876 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
e22a25c9 1877 break;
a426e122 1878 }
e22a25c9
AL
1879 }
1880 }
1881 }
1882 kvm_arch_remove_all_hw_breakpoints();
1883
a426e122 1884 for (env = first_cpu; env != NULL; env = env->next_cpu) {
e22a25c9 1885 kvm_update_guest_debug(env, 0);
a426e122 1886 }
e22a25c9
AL
1887}
1888
1889#else /* !KVM_CAP_SET_GUEST_DEBUG */
1890
9349b4f9 1891int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9
AL
1892{
1893 return -EINVAL;
1894}
1895
9349b4f9 1896int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1897 target_ulong len, int type)
1898{
1899 return -EINVAL;
1900}
1901
9349b4f9 1902int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1903 target_ulong len, int type)
1904{
1905 return -EINVAL;
1906}
1907
9349b4f9 1908void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9
AL
1909{
1910}
1911#endif /* !KVM_CAP_SET_GUEST_DEBUG */
cc84de95 1912
9349b4f9 1913int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
cc84de95
MT
1914{
1915 struct kvm_signal_mask *sigmask;
1916 int r;
1917
a426e122 1918 if (!sigset) {
cc84de95 1919 return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
a426e122 1920 }
cc84de95 1921
7267c094 1922 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
cc84de95
MT
1923
1924 sigmask->len = 8;
1925 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1926 r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
7267c094 1927 g_free(sigmask);
cc84de95
MT
1928
1929 return r;
1930}
ca821806 1931
4b8f1c88
MT
1932int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val, bool assign,
1933 uint32_t size)
44f1a3d8 1934{
44f1a3d8
CM
1935 int ret;
1936 struct kvm_ioeventfd iofd;
1937
1938 iofd.datamatch = val;
1939 iofd.addr = addr;
4b8f1c88 1940 iofd.len = size;
44f1a3d8
CM
1941 iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
1942 iofd.fd = fd;
1943
1944 if (!kvm_enabled()) {
1945 return -ENOSYS;
1946 }
1947
1948 if (!assign) {
1949 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1950 }
1951
1952 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1953
1954 if (ret < 0) {
1955 return -errno;
1956 }
1957
1958 return 0;
44f1a3d8
CM
1959}
1960
ca821806
MT
1961int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
1962{
1963 struct kvm_ioeventfd kick = {
1964 .datamatch = val,
1965 .addr = addr,
1966 .len = 2,
1967 .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
1968 .fd = fd,
1969 };
1970 int r;
a426e122 1971 if (!kvm_enabled()) {
ca821806 1972 return -ENOSYS;
a426e122
JK
1973 }
1974 if (!assign) {
ca821806 1975 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
a426e122 1976 }
ca821806 1977 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
a426e122 1978 if (r < 0) {
ca821806 1979 return r;
a426e122 1980 }
ca821806 1981 return 0;
98c8573e 1982}
a1b87fe0 1983
9349b4f9 1984int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
a1b87fe0
JK
1985{
1986 return kvm_arch_on_sigbus_vcpu(env, code, addr);
1987}
1988
1989int kvm_on_sigbus(int code, void *addr)
1990{
1991 return kvm_arch_on_sigbus(code, addr);
1992}