]> git.proxmox.com Git - qemu.git/blame - kvm-all.c
tcg/optimize: fix setcond2 optimization
[qemu.git] / kvm-all.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5832d1f2 5 * Red Hat, Inc. 2008
05330448
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
5832d1f2 9 * Glauber Costa <gcosta@redhat.com>
05330448
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/ioctl.h>
18#include <sys/mman.h>
984b5181 19#include <stdarg.h>
05330448
AL
20
21#include <linux/kvm.h>
22
23#include "qemu-common.h"
1de7afc9
PB
24#include "qemu/atomic.h"
25#include "qemu/option.h"
26#include "qemu/config-file.h"
9c17d615 27#include "sysemu/sysemu.h"
d33a1810 28#include "hw/hw.h"
a2cb15b0 29#include "hw/pci/msi.h"
022c62cb 30#include "exec/gdbstub.h"
9c17d615 31#include "sysemu/kvm.h"
1de7afc9 32#include "qemu/bswap.h"
022c62cb
PB
33#include "exec/memory.h"
34#include "exec/address-spaces.h"
1de7afc9 35#include "qemu/event_notifier.h"
9c775729 36#include "trace.h"
05330448 37
d2f2b8a7
SH
38/* This check must be after config-host.h is included */
39#ifdef CONFIG_EVENTFD
40#include <sys/eventfd.h>
41#endif
42
62fe8331
CB
43#ifdef CONFIG_VALGRIND_H
44#include <valgrind/memcheck.h>
45#endif
46
93148aa5 47/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
f65ed4c1
AL
48#define PAGE_SIZE TARGET_PAGE_SIZE
49
05330448
AL
50//#define DEBUG_KVM
51
52#ifdef DEBUG_KVM
8c0d577e 53#define DPRINTF(fmt, ...) \
05330448
AL
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55#else
8c0d577e 56#define DPRINTF(fmt, ...) \
05330448
AL
57 do { } while (0)
58#endif
59
04fa27f5
JK
60#define KVM_MSI_HASHTAB_SIZE 256
61
34fc643f
AL
62typedef struct KVMSlot
63{
a8170e5e 64 hwaddr start_addr;
c227f099 65 ram_addr_t memory_size;
9f213ed9 66 void *ram;
34fc643f
AL
67 int slot;
68 int flags;
69} KVMSlot;
05330448 70
5832d1f2
AL
71typedef struct kvm_dirty_log KVMDirtyLog;
72
05330448
AL
73struct KVMState
74{
75 KVMSlot slots[32];
76 int fd;
77 int vmfd;
f65ed4c1 78 int coalesced_mmio;
62a2744c 79 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
1cae88b9 80 bool coalesced_flush_in_progress;
e69917e2 81 int broken_set_mem_region;
4495d6a7 82 int migration_log;
a0fb002c 83 int vcpu_events;
b0b1d690 84 int robust_singlestep;
ff44f1a3 85 int debugregs;
e22a25c9
AL
86#ifdef KVM_CAP_SET_GUEST_DEBUG
87 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
88#endif
8a7c7393 89 int pit_state2;
f1665b21 90 int xsave, xcrs;
d2f2b8a7 91 int many_ioeventfds;
3ab73842 92 int intx_set_mask;
92e4b519
DG
93 /* The man page (and posix) say ioctl numbers are signed int, but
94 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
95 * unsigned, and treating them as signed here can break things */
e333cd69 96 unsigned irq_set_ioctl;
84b058d7
JK
97#ifdef KVM_CAP_IRQ_ROUTING
98 struct kvm_irq_routing *irq_routes;
99 int nr_allocated_irq_routes;
100 uint32_t *used_gsi_bitmap;
4e2e4e63 101 unsigned int gsi_count;
04fa27f5 102 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
4a3adebb 103 bool direct_msi;
84b058d7 104#endif
05330448
AL
105};
106
6a7af8cb 107KVMState *kvm_state;
3d4b2649 108bool kvm_kernel_irqchip;
7ae26bd4 109bool kvm_async_interrupts_allowed;
cc7e0ddf 110bool kvm_irqfds_allowed;
614e41bc 111bool kvm_msi_via_irqfd_allowed;
f3e1bed8 112bool kvm_gsi_routing_allowed;
13eed94e 113bool kvm_allowed;
05330448 114
94a8d39a
JK
115static const KVMCapabilityInfo kvm_required_capabilites[] = {
116 KVM_CAP_INFO(USER_MEMORY),
117 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
118 KVM_CAP_LAST_INFO
119};
120
05330448
AL
121static KVMSlot *kvm_alloc_slot(KVMState *s)
122{
123 int i;
124
125 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
a426e122 126 if (s->slots[i].memory_size == 0) {
05330448 127 return &s->slots[i];
a426e122 128 }
05330448
AL
129 }
130
d3f8d37f
AL
131 fprintf(stderr, "%s: no free slot available\n", __func__);
132 abort();
133}
134
135static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
a8170e5e
AK
136 hwaddr start_addr,
137 hwaddr end_addr)
d3f8d37f
AL
138{
139 int i;
140
141 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
142 KVMSlot *mem = &s->slots[i];
143
144 if (start_addr == mem->start_addr &&
145 end_addr == mem->start_addr + mem->memory_size) {
146 return mem;
147 }
148 }
149
05330448
AL
150 return NULL;
151}
152
6152e2ae
AL
153/*
154 * Find overlapping slot with lowest start address
155 */
156static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
a8170e5e
AK
157 hwaddr start_addr,
158 hwaddr end_addr)
05330448 159{
6152e2ae 160 KVMSlot *found = NULL;
05330448
AL
161 int i;
162
163 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
164 KVMSlot *mem = &s->slots[i];
165
6152e2ae
AL
166 if (mem->memory_size == 0 ||
167 (found && found->start_addr < mem->start_addr)) {
168 continue;
169 }
170
171 if (end_addr > mem->start_addr &&
172 start_addr < mem->start_addr + mem->memory_size) {
173 found = mem;
174 }
05330448
AL
175 }
176
6152e2ae 177 return found;
05330448
AL
178}
179
9f213ed9 180int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
a8170e5e 181 hwaddr *phys_addr)
983dfc3b
HY
182{
183 int i;
184
185 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
186 KVMSlot *mem = &s->slots[i];
187
9f213ed9
AK
188 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
189 *phys_addr = mem->start_addr + (ram - mem->ram);
983dfc3b
HY
190 return 1;
191 }
192 }
193
194 return 0;
195}
196
5832d1f2
AL
197static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
198{
199 struct kvm_userspace_memory_region mem;
200
201 mem.slot = slot->slot;
202 mem.guest_phys_addr = slot->start_addr;
203 mem.memory_size = slot->memory_size;
9f213ed9 204 mem.userspace_addr = (unsigned long)slot->ram;
5832d1f2 205 mem.flags = slot->flags;
4495d6a7
JK
206 if (s->migration_log) {
207 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
208 }
5832d1f2
AL
209 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
210}
211
8d2ba1fb
JK
212static void kvm_reset_vcpu(void *opaque)
213{
20d695a9 214 CPUState *cpu = opaque;
8d2ba1fb 215
20d695a9 216 kvm_arch_reset_vcpu(cpu);
8d2ba1fb 217}
5832d1f2 218
504134d2 219int kvm_init_vcpu(CPUState *cpu)
05330448
AL
220{
221 KVMState *s = kvm_state;
222 long mmap_size;
223 int ret;
224
8c0d577e 225 DPRINTF("kvm_init_vcpu\n");
05330448 226
b164e48e 227 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
05330448 228 if (ret < 0) {
8c0d577e 229 DPRINTF("kvm_create_vcpu failed\n");
05330448
AL
230 goto err;
231 }
232
8737c51c 233 cpu->kvm_fd = ret;
a60f24b5 234 cpu->kvm_state = s;
20d695a9 235 cpu->kvm_vcpu_dirty = true;
05330448
AL
236
237 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
238 if (mmap_size < 0) {
748a680b 239 ret = mmap_size;
8c0d577e 240 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
05330448
AL
241 goto err;
242 }
243
f7575c96 244 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
8737c51c 245 cpu->kvm_fd, 0);
f7575c96 246 if (cpu->kvm_run == MAP_FAILED) {
05330448 247 ret = -errno;
8c0d577e 248 DPRINTF("mmap'ing vcpu state failed\n");
05330448
AL
249 goto err;
250 }
251
a426e122
JK
252 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
253 s->coalesced_mmio_ring =
f7575c96 254 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
a426e122 255 }
62a2744c 256
20d695a9 257 ret = kvm_arch_init_vcpu(cpu);
8d2ba1fb 258 if (ret == 0) {
20d695a9
AF
259 qemu_register_reset(kvm_reset_vcpu, cpu);
260 kvm_arch_reset_vcpu(cpu);
8d2ba1fb 261 }
05330448
AL
262err:
263 return ret;
264}
265
5832d1f2
AL
266/*
267 * dirty pages logging control
268 */
25254bbc
MT
269
270static int kvm_mem_flags(KVMState *s, bool log_dirty)
271{
272 return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
273}
274
275static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
5832d1f2
AL
276{
277 KVMState *s = kvm_state;
25254bbc 278 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
4495d6a7
JK
279 int old_flags;
280
4495d6a7 281 old_flags = mem->flags;
5832d1f2 282
25254bbc 283 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
5832d1f2
AL
284 mem->flags = flags;
285
4495d6a7
JK
286 /* If nothing changed effectively, no need to issue ioctl */
287 if (s->migration_log) {
288 flags |= KVM_MEM_LOG_DIRTY_PAGES;
289 }
25254bbc 290
4495d6a7 291 if (flags == old_flags) {
25254bbc 292 return 0;
4495d6a7
JK
293 }
294
5832d1f2
AL
295 return kvm_set_user_memory_region(s, mem);
296}
297
a8170e5e 298static int kvm_dirty_pages_log_change(hwaddr phys_addr,
25254bbc
MT
299 ram_addr_t size, bool log_dirty)
300{
301 KVMState *s = kvm_state;
302 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
303
304 if (mem == NULL) {
305 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
306 TARGET_FMT_plx "\n", __func__, phys_addr,
a8170e5e 307 (hwaddr)(phys_addr + size - 1));
25254bbc
MT
308 return -EINVAL;
309 }
310 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
311}
312
a01672d3
AK
313static void kvm_log_start(MemoryListener *listener,
314 MemoryRegionSection *section)
5832d1f2 315{
a01672d3
AK
316 int r;
317
318 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
319 section->size, true);
320 if (r < 0) {
321 abort();
322 }
5832d1f2
AL
323}
324
a01672d3
AK
325static void kvm_log_stop(MemoryListener *listener,
326 MemoryRegionSection *section)
5832d1f2 327{
a01672d3
AK
328 int r;
329
330 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
331 section->size, false);
332 if (r < 0) {
333 abort();
334 }
5832d1f2
AL
335}
336
7b8f3b78 337static int kvm_set_migration_log(int enable)
4495d6a7
JK
338{
339 KVMState *s = kvm_state;
340 KVMSlot *mem;
341 int i, err;
342
343 s->migration_log = enable;
344
345 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
346 mem = &s->slots[i];
347
70fedd76
AW
348 if (!mem->memory_size) {
349 continue;
350 }
4495d6a7
JK
351 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
352 continue;
353 }
354 err = kvm_set_user_memory_region(s, mem);
355 if (err) {
356 return err;
357 }
358 }
359 return 0;
360}
361
8369e01c 362/* get kvm's dirty pages bitmap and update qemu's */
ffcde12f
AK
363static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
364 unsigned long *bitmap)
96c1606b 365{
8369e01c 366 unsigned int i, j;
aa90fec7 367 unsigned long page_number, c;
a8170e5e 368 hwaddr addr, addr1;
752ced04 369 unsigned int len = ((section->size / getpagesize()) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
3145fcb6 370 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
8369e01c
MT
371
372 /*
373 * bitmap-traveling is faster than memory-traveling (for addr...)
374 * especially when most of the memory is not dirty.
375 */
376 for (i = 0; i < len; i++) {
377 if (bitmap[i] != 0) {
378 c = leul_to_cpu(bitmap[i]);
379 do {
380 j = ffsl(c) - 1;
381 c &= ~(1ul << j);
3145fcb6 382 page_number = (i * HOST_LONG_BITS + j) * hpratio;
8369e01c 383 addr1 = page_number * TARGET_PAGE_SIZE;
ffcde12f 384 addr = section->offset_within_region + addr1;
3145fcb6
DG
385 memory_region_set_dirty(section->mr, addr,
386 TARGET_PAGE_SIZE * hpratio);
8369e01c
MT
387 } while (c != 0);
388 }
389 }
390 return 0;
96c1606b
AG
391}
392
8369e01c
MT
393#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
394
5832d1f2
AL
395/**
396 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
fd4aa979
BS
397 * This function updates qemu's dirty bitmap using
398 * memory_region_set_dirty(). This means all bits are set
399 * to dirty.
5832d1f2 400 *
d3f8d37f 401 * @start_add: start of logged region.
5832d1f2
AL
402 * @end_addr: end of logged region.
403 */
ffcde12f 404static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
5832d1f2
AL
405{
406 KVMState *s = kvm_state;
151f7749 407 unsigned long size, allocated_size = 0;
151f7749
JK
408 KVMDirtyLog d;
409 KVMSlot *mem;
410 int ret = 0;
a8170e5e
AK
411 hwaddr start_addr = section->offset_within_address_space;
412 hwaddr end_addr = start_addr + section->size;
5832d1f2 413
151f7749
JK
414 d.dirty_bitmap = NULL;
415 while (start_addr < end_addr) {
416 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
417 if (mem == NULL) {
418 break;
419 }
5832d1f2 420
51b0c606
MT
421 /* XXX bad kernel interface alert
422 * For dirty bitmap, kernel allocates array of size aligned to
423 * bits-per-long. But for case when the kernel is 64bits and
424 * the userspace is 32bits, userspace can't align to the same
425 * bits-per-long, since sizeof(long) is different between kernel
426 * and user space. This way, userspace will provide buffer which
427 * may be 4 bytes less than the kernel will use, resulting in
428 * userspace memory corruption (which is not detectable by valgrind
429 * too, in most cases).
430 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
431 * a hope that sizeof(long) wont become >8 any time soon.
432 */
433 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
434 /*HOST_LONG_BITS*/ 64) / 8;
151f7749 435 if (!d.dirty_bitmap) {
7267c094 436 d.dirty_bitmap = g_malloc(size);
151f7749 437 } else if (size > allocated_size) {
7267c094 438 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
151f7749
JK
439 }
440 allocated_size = size;
441 memset(d.dirty_bitmap, 0, allocated_size);
5832d1f2 442
151f7749 443 d.slot = mem->slot;
5832d1f2 444
6e489f3f 445 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
8c0d577e 446 DPRINTF("ioctl failed %d\n", errno);
151f7749
JK
447 ret = -1;
448 break;
449 }
5832d1f2 450
ffcde12f 451 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
8369e01c 452 start_addr = mem->start_addr + mem->memory_size;
5832d1f2 453 }
7267c094 454 g_free(d.dirty_bitmap);
151f7749
JK
455
456 return ret;
5832d1f2
AL
457}
458
95d2994a
AK
459static void kvm_coalesce_mmio_region(MemoryListener *listener,
460 MemoryRegionSection *secion,
a8170e5e 461 hwaddr start, hwaddr size)
f65ed4c1 462{
f65ed4c1
AL
463 KVMState *s = kvm_state;
464
465 if (s->coalesced_mmio) {
466 struct kvm_coalesced_mmio_zone zone;
467
468 zone.addr = start;
469 zone.size = size;
7e680753 470 zone.pad = 0;
f65ed4c1 471
95d2994a 472 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
f65ed4c1 473 }
f65ed4c1
AL
474}
475
95d2994a
AK
476static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
477 MemoryRegionSection *secion,
a8170e5e 478 hwaddr start, hwaddr size)
f65ed4c1 479{
f65ed4c1
AL
480 KVMState *s = kvm_state;
481
482 if (s->coalesced_mmio) {
483 struct kvm_coalesced_mmio_zone zone;
484
485 zone.addr = start;
486 zone.size = size;
7e680753 487 zone.pad = 0;
f65ed4c1 488
95d2994a 489 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
f65ed4c1 490 }
f65ed4c1
AL
491}
492
ad7b8b33
AL
493int kvm_check_extension(KVMState *s, unsigned int extension)
494{
495 int ret;
496
497 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
498 if (ret < 0) {
499 ret = 0;
500 }
501
502 return ret;
503}
504
44c3f8f7 505static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val,
41cb62c2 506 bool assign, uint32_t size, bool datamatch)
500ffd4a
MT
507{
508 int ret;
509 struct kvm_ioeventfd iofd;
510
41cb62c2 511 iofd.datamatch = datamatch ? val : 0;
500ffd4a
MT
512 iofd.addr = addr;
513 iofd.len = size;
41cb62c2 514 iofd.flags = 0;
500ffd4a
MT
515 iofd.fd = fd;
516
517 if (!kvm_enabled()) {
518 return -ENOSYS;
519 }
520
41cb62c2
MT
521 if (datamatch) {
522 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
523 }
500ffd4a
MT
524 if (!assign) {
525 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
526 }
527
528 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
529
530 if (ret < 0) {
531 return -errno;
532 }
533
534 return 0;
535}
536
44c3f8f7 537static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
41cb62c2 538 bool assign, uint32_t size, bool datamatch)
500ffd4a
MT
539{
540 struct kvm_ioeventfd kick = {
41cb62c2 541 .datamatch = datamatch ? val : 0,
500ffd4a 542 .addr = addr,
41cb62c2 543 .flags = KVM_IOEVENTFD_FLAG_PIO,
44c3f8f7 544 .len = size,
500ffd4a
MT
545 .fd = fd,
546 };
547 int r;
548 if (!kvm_enabled()) {
549 return -ENOSYS;
550 }
41cb62c2
MT
551 if (datamatch) {
552 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
553 }
500ffd4a
MT
554 if (!assign) {
555 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
556 }
557 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
558 if (r < 0) {
559 return r;
560 }
561 return 0;
562}
563
564
d2f2b8a7
SH
565static int kvm_check_many_ioeventfds(void)
566{
d0dcac83
SH
567 /* Userspace can use ioeventfd for io notification. This requires a host
568 * that supports eventfd(2) and an I/O thread; since eventfd does not
569 * support SIGIO it cannot interrupt the vcpu.
570 *
571 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
d2f2b8a7
SH
572 * can avoid creating too many ioeventfds.
573 */
12d4536f 574#if defined(CONFIG_EVENTFD)
d2f2b8a7
SH
575 int ioeventfds[7];
576 int i, ret = 0;
577 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
578 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
579 if (ioeventfds[i] < 0) {
580 break;
581 }
41cb62c2 582 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
d2f2b8a7
SH
583 if (ret < 0) {
584 close(ioeventfds[i]);
585 break;
586 }
587 }
588
589 /* Decide whether many devices are supported or not */
590 ret = i == ARRAY_SIZE(ioeventfds);
591
592 while (i-- > 0) {
41cb62c2 593 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
d2f2b8a7
SH
594 close(ioeventfds[i]);
595 }
596 return ret;
597#else
598 return 0;
599#endif
600}
601
94a8d39a
JK
602static const KVMCapabilityInfo *
603kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
604{
605 while (list->name) {
606 if (!kvm_check_extension(s, list->value)) {
607 return list;
608 }
609 list++;
610 }
611 return NULL;
612}
613
a01672d3 614static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
46dbef6a
MT
615{
616 KVMState *s = kvm_state;
46dbef6a
MT
617 KVMSlot *mem, old;
618 int err;
a01672d3
AK
619 MemoryRegion *mr = section->mr;
620 bool log_dirty = memory_region_is_logging(mr);
a8170e5e 621 hwaddr start_addr = section->offset_within_address_space;
a01672d3 622 ram_addr_t size = section->size;
9f213ed9 623 void *ram = NULL;
8f6f962b 624 unsigned delta;
46dbef6a 625
14542fea
GN
626 /* kvm works in page size chunks, but the function may be called
627 with sub-page size and unaligned start address. */
8f6f962b
AK
628 delta = TARGET_PAGE_ALIGN(size) - size;
629 if (delta > size) {
630 return;
631 }
632 start_addr += delta;
633 size -= delta;
634 size &= TARGET_PAGE_MASK;
635 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
636 return;
637 }
46dbef6a 638
a01672d3
AK
639 if (!memory_region_is_ram(mr)) {
640 return;
9f213ed9
AK
641 }
642
8f6f962b 643 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
a01672d3 644
46dbef6a
MT
645 while (1) {
646 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
647 if (!mem) {
648 break;
649 }
650
a01672d3 651 if (add && start_addr >= mem->start_addr &&
46dbef6a 652 (start_addr + size <= mem->start_addr + mem->memory_size) &&
9f213ed9 653 (ram - start_addr == mem->ram - mem->start_addr)) {
46dbef6a 654 /* The new slot fits into the existing one and comes with
25254bbc
MT
655 * identical parameters - update flags and done. */
656 kvm_slot_dirty_pages_log_change(mem, log_dirty);
46dbef6a
MT
657 return;
658 }
659
660 old = *mem;
661
3fbffb62
AK
662 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
663 kvm_physical_sync_dirty_bitmap(section);
664 }
665
46dbef6a
MT
666 /* unregister the overlapping slot */
667 mem->memory_size = 0;
668 err = kvm_set_user_memory_region(s, mem);
669 if (err) {
670 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
671 __func__, strerror(-err));
672 abort();
673 }
674
675 /* Workaround for older KVM versions: we can't join slots, even not by
676 * unregistering the previous ones and then registering the larger
677 * slot. We have to maintain the existing fragmentation. Sigh.
678 *
679 * This workaround assumes that the new slot starts at the same
680 * address as the first existing one. If not or if some overlapping
681 * slot comes around later, we will fail (not seen in practice so far)
682 * - and actually require a recent KVM version. */
683 if (s->broken_set_mem_region &&
a01672d3 684 old.start_addr == start_addr && old.memory_size < size && add) {
46dbef6a
MT
685 mem = kvm_alloc_slot(s);
686 mem->memory_size = old.memory_size;
687 mem->start_addr = old.start_addr;
9f213ed9 688 mem->ram = old.ram;
25254bbc 689 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
690
691 err = kvm_set_user_memory_region(s, mem);
692 if (err) {
693 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
694 strerror(-err));
695 abort();
696 }
697
698 start_addr += old.memory_size;
9f213ed9 699 ram += old.memory_size;
46dbef6a
MT
700 size -= old.memory_size;
701 continue;
702 }
703
704 /* register prefix slot */
705 if (old.start_addr < start_addr) {
706 mem = kvm_alloc_slot(s);
707 mem->memory_size = start_addr - old.start_addr;
708 mem->start_addr = old.start_addr;
9f213ed9 709 mem->ram = old.ram;
25254bbc 710 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
711
712 err = kvm_set_user_memory_region(s, mem);
713 if (err) {
714 fprintf(stderr, "%s: error registering prefix slot: %s\n",
715 __func__, strerror(-err));
d4d6868f
AG
716#ifdef TARGET_PPC
717 fprintf(stderr, "%s: This is probably because your kernel's " \
718 "PAGE_SIZE is too big. Please try to use 4k " \
719 "PAGE_SIZE!\n", __func__);
720#endif
46dbef6a
MT
721 abort();
722 }
723 }
724
725 /* register suffix slot */
726 if (old.start_addr + old.memory_size > start_addr + size) {
727 ram_addr_t size_delta;
728
729 mem = kvm_alloc_slot(s);
730 mem->start_addr = start_addr + size;
731 size_delta = mem->start_addr - old.start_addr;
732 mem->memory_size = old.memory_size - size_delta;
9f213ed9 733 mem->ram = old.ram + size_delta;
25254bbc 734 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
735
736 err = kvm_set_user_memory_region(s, mem);
737 if (err) {
738 fprintf(stderr, "%s: error registering suffix slot: %s\n",
739 __func__, strerror(-err));
740 abort();
741 }
742 }
743 }
744
745 /* in case the KVM bug workaround already "consumed" the new slot */
a426e122 746 if (!size) {
46dbef6a 747 return;
a426e122 748 }
a01672d3 749 if (!add) {
46dbef6a 750 return;
a426e122 751 }
46dbef6a
MT
752 mem = kvm_alloc_slot(s);
753 mem->memory_size = size;
754 mem->start_addr = start_addr;
9f213ed9 755 mem->ram = ram;
25254bbc 756 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
757
758 err = kvm_set_user_memory_region(s, mem);
759 if (err) {
760 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
761 strerror(-err));
762 abort();
763 }
764}
765
a01672d3
AK
766static void kvm_region_add(MemoryListener *listener,
767 MemoryRegionSection *section)
768{
769 kvm_set_phys_mem(section, true);
770}
771
772static void kvm_region_del(MemoryListener *listener,
773 MemoryRegionSection *section)
774{
775 kvm_set_phys_mem(section, false);
776}
777
778static void kvm_log_sync(MemoryListener *listener,
779 MemoryRegionSection *section)
7b8f3b78 780{
a01672d3
AK
781 int r;
782
ffcde12f 783 r = kvm_physical_sync_dirty_bitmap(section);
a01672d3
AK
784 if (r < 0) {
785 abort();
786 }
7b8f3b78
MT
787}
788
a01672d3 789static void kvm_log_global_start(struct MemoryListener *listener)
7b8f3b78 790{
a01672d3
AK
791 int r;
792
793 r = kvm_set_migration_log(1);
794 assert(r >= 0);
7b8f3b78
MT
795}
796
a01672d3 797static void kvm_log_global_stop(struct MemoryListener *listener)
7b8f3b78 798{
a01672d3
AK
799 int r;
800
801 r = kvm_set_migration_log(0);
802 assert(r >= 0);
7b8f3b78
MT
803}
804
d22b096e
AK
805static void kvm_mem_ioeventfd_add(MemoryListener *listener,
806 MemoryRegionSection *section,
807 bool match_data, uint64_t data,
808 EventNotifier *e)
809{
810 int fd = event_notifier_get_fd(e);
80a1ea37
AK
811 int r;
812
4b8f1c88 813 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
41cb62c2 814 data, true, section->size, match_data);
80a1ea37
AK
815 if (r < 0) {
816 abort();
817 }
818}
819
d22b096e
AK
820static void kvm_mem_ioeventfd_del(MemoryListener *listener,
821 MemoryRegionSection *section,
822 bool match_data, uint64_t data,
823 EventNotifier *e)
80a1ea37 824{
d22b096e 825 int fd = event_notifier_get_fd(e);
80a1ea37
AK
826 int r;
827
4b8f1c88 828 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
41cb62c2 829 data, false, section->size, match_data);
80a1ea37
AK
830 if (r < 0) {
831 abort();
832 }
833}
834
d22b096e
AK
835static void kvm_io_ioeventfd_add(MemoryListener *listener,
836 MemoryRegionSection *section,
837 bool match_data, uint64_t data,
838 EventNotifier *e)
80a1ea37 839{
d22b096e 840 int fd = event_notifier_get_fd(e);
80a1ea37
AK
841 int r;
842
44c3f8f7 843 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
41cb62c2 844 data, true, section->size, match_data);
80a1ea37
AK
845 if (r < 0) {
846 abort();
847 }
848}
849
d22b096e
AK
850static void kvm_io_ioeventfd_del(MemoryListener *listener,
851 MemoryRegionSection *section,
852 bool match_data, uint64_t data,
853 EventNotifier *e)
80a1ea37
AK
854
855{
d22b096e 856 int fd = event_notifier_get_fd(e);
80a1ea37
AK
857 int r;
858
44c3f8f7 859 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
41cb62c2 860 data, false, section->size, match_data);
80a1ea37
AK
861 if (r < 0) {
862 abort();
863 }
864}
865
a01672d3
AK
866static MemoryListener kvm_memory_listener = {
867 .region_add = kvm_region_add,
868 .region_del = kvm_region_del,
e5896b12
AP
869 .log_start = kvm_log_start,
870 .log_stop = kvm_log_stop,
a01672d3
AK
871 .log_sync = kvm_log_sync,
872 .log_global_start = kvm_log_global_start,
873 .log_global_stop = kvm_log_global_stop,
d22b096e
AK
874 .eventfd_add = kvm_mem_ioeventfd_add,
875 .eventfd_del = kvm_mem_ioeventfd_del,
95d2994a
AK
876 .coalesced_mmio_add = kvm_coalesce_mmio_region,
877 .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
d22b096e
AK
878 .priority = 10,
879};
880
881static MemoryListener kvm_io_listener = {
d22b096e
AK
882 .eventfd_add = kvm_io_ioeventfd_add,
883 .eventfd_del = kvm_io_ioeventfd_del,
72e22d2f 884 .priority = 10,
7b8f3b78
MT
885};
886
c3affe56 887static void kvm_handle_interrupt(CPUState *cpu, int mask)
aa7f74d1 888{
259186a7 889 cpu->interrupt_request |= mask;
aa7f74d1 890
60e82579 891 if (!qemu_cpu_is_self(cpu)) {
c08d7424 892 qemu_cpu_kick(cpu);
aa7f74d1
JK
893 }
894}
895
3889c3fa 896int kvm_set_irq(KVMState *s, int irq, int level)
84b058d7
JK
897{
898 struct kvm_irq_level event;
899 int ret;
900
7ae26bd4 901 assert(kvm_async_interrupts_enabled());
84b058d7
JK
902
903 event.level = level;
904 event.irq = irq;
e333cd69 905 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
84b058d7 906 if (ret < 0) {
3889c3fa 907 perror("kvm_set_irq");
84b058d7
JK
908 abort();
909 }
910
e333cd69 911 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
84b058d7
JK
912}
913
914#ifdef KVM_CAP_IRQ_ROUTING
d3d3bef0
JK
915typedef struct KVMMSIRoute {
916 struct kvm_irq_routing_entry kroute;
917 QTAILQ_ENTRY(KVMMSIRoute) entry;
918} KVMMSIRoute;
919
84b058d7
JK
920static void set_gsi(KVMState *s, unsigned int gsi)
921{
84b058d7
JK
922 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
923}
924
04fa27f5
JK
925static void clear_gsi(KVMState *s, unsigned int gsi)
926{
927 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
928}
929
84b058d7
JK
930static void kvm_init_irq_routing(KVMState *s)
931{
04fa27f5 932 int gsi_count, i;
84b058d7
JK
933
934 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
935 if (gsi_count > 0) {
936 unsigned int gsi_bits, i;
937
938 /* Round up so we can search ints using ffs */
bc8c6788 939 gsi_bits = ALIGN(gsi_count, 32);
84b058d7 940 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
4e2e4e63 941 s->gsi_count = gsi_count;
84b058d7
JK
942
943 /* Mark any over-allocated bits as already in use */
944 for (i = gsi_count; i < gsi_bits; i++) {
945 set_gsi(s, i);
946 }
947 }
948
949 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
950 s->nr_allocated_irq_routes = 0;
951
4a3adebb
JK
952 if (!s->direct_msi) {
953 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
954 QTAILQ_INIT(&s->msi_hashtab[i]);
955 }
04fa27f5
JK
956 }
957
84b058d7
JK
958 kvm_arch_init_irq_routing(s);
959}
960
e7b20308
JK
961static void kvm_irqchip_commit_routes(KVMState *s)
962{
963 int ret;
964
965 s->irq_routes->flags = 0;
966 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
967 assert(ret == 0);
968}
969
84b058d7
JK
970static void kvm_add_routing_entry(KVMState *s,
971 struct kvm_irq_routing_entry *entry)
972{
973 struct kvm_irq_routing_entry *new;
974 int n, size;
975
976 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
977 n = s->nr_allocated_irq_routes * 2;
978 if (n < 64) {
979 n = 64;
980 }
981 size = sizeof(struct kvm_irq_routing);
982 size += n * sizeof(*new);
983 s->irq_routes = g_realloc(s->irq_routes, size);
984 s->nr_allocated_irq_routes = n;
985 }
986 n = s->irq_routes->nr++;
987 new = &s->irq_routes->entries[n];
988 memset(new, 0, sizeof(*new));
989 new->gsi = entry->gsi;
990 new->type = entry->type;
991 new->flags = entry->flags;
992 new->u = entry->u;
993
994 set_gsi(s, entry->gsi);
e7b20308
JK
995
996 kvm_irqchip_commit_routes(s);
84b058d7
JK
997}
998
cc57407e
JK
999static int kvm_update_routing_entry(KVMState *s,
1000 struct kvm_irq_routing_entry *new_entry)
1001{
1002 struct kvm_irq_routing_entry *entry;
1003 int n;
1004
1005 for (n = 0; n < s->irq_routes->nr; n++) {
1006 entry = &s->irq_routes->entries[n];
1007 if (entry->gsi != new_entry->gsi) {
1008 continue;
1009 }
1010
1011 entry->type = new_entry->type;
1012 entry->flags = new_entry->flags;
1013 entry->u = new_entry->u;
1014
1015 kvm_irqchip_commit_routes(s);
1016
1017 return 0;
1018 }
1019
1020 return -ESRCH;
1021}
1022
1df186df 1023void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
84b058d7
JK
1024{
1025 struct kvm_irq_routing_entry e;
1026
4e2e4e63
JK
1027 assert(pin < s->gsi_count);
1028
84b058d7
JK
1029 e.gsi = irq;
1030 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1031 e.flags = 0;
1032 e.u.irqchip.irqchip = irqchip;
1033 e.u.irqchip.pin = pin;
1034 kvm_add_routing_entry(s, &e);
1035}
1036
1e2aa8be 1037void kvm_irqchip_release_virq(KVMState *s, int virq)
04fa27f5
JK
1038{
1039 struct kvm_irq_routing_entry *e;
1040 int i;
1041
1042 for (i = 0; i < s->irq_routes->nr; i++) {
1043 e = &s->irq_routes->entries[i];
1044 if (e->gsi == virq) {
1045 s->irq_routes->nr--;
1046 *e = s->irq_routes->entries[s->irq_routes->nr];
1047 }
1048 }
1049 clear_gsi(s, virq);
1050}
1051
1052static unsigned int kvm_hash_msi(uint32_t data)
1053{
1054 /* This is optimized for IA32 MSI layout. However, no other arch shall
1055 * repeat the mistake of not providing a direct MSI injection API. */
1056 return data & 0xff;
1057}
1058
1059static void kvm_flush_dynamic_msi_routes(KVMState *s)
1060{
1061 KVMMSIRoute *route, *next;
1062 unsigned int hash;
1063
1064 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1065 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1066 kvm_irqchip_release_virq(s, route->kroute.gsi);
1067 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1068 g_free(route);
1069 }
1070 }
1071}
1072
1073static int kvm_irqchip_get_virq(KVMState *s)
1074{
1075 uint32_t *word = s->used_gsi_bitmap;
1076 int max_words = ALIGN(s->gsi_count, 32) / 32;
1077 int i, bit;
1078 bool retry = true;
1079
1080again:
1081 /* Return the lowest unused GSI in the bitmap */
1082 for (i = 0; i < max_words; i++) {
1083 bit = ffs(~word[i]);
1084 if (!bit) {
1085 continue;
1086 }
1087
1088 return bit - 1 + i * 32;
1089 }
4a3adebb 1090 if (!s->direct_msi && retry) {
04fa27f5
JK
1091 retry = false;
1092 kvm_flush_dynamic_msi_routes(s);
1093 goto again;
1094 }
1095 return -ENOSPC;
1096
1097}
1098
1099static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1100{
1101 unsigned int hash = kvm_hash_msi(msg.data);
1102 KVMMSIRoute *route;
1103
1104 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1105 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1106 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1107 route->kroute.u.msi.data == msg.data) {
1108 return route;
1109 }
1110 }
1111 return NULL;
1112}
1113
1114int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1115{
4a3adebb 1116 struct kvm_msi msi;
04fa27f5
JK
1117 KVMMSIRoute *route;
1118
4a3adebb
JK
1119 if (s->direct_msi) {
1120 msi.address_lo = (uint32_t)msg.address;
1121 msi.address_hi = msg.address >> 32;
1122 msi.data = msg.data;
1123 msi.flags = 0;
1124 memset(msi.pad, 0, sizeof(msi.pad));
1125
1126 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1127 }
1128
04fa27f5
JK
1129 route = kvm_lookup_msi_route(s, msg);
1130 if (!route) {
e7b20308 1131 int virq;
04fa27f5
JK
1132
1133 virq = kvm_irqchip_get_virq(s);
1134 if (virq < 0) {
1135 return virq;
1136 }
1137
1138 route = g_malloc(sizeof(KVMMSIRoute));
1139 route->kroute.gsi = virq;
1140 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1141 route->kroute.flags = 0;
1142 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1143 route->kroute.u.msi.address_hi = msg.address >> 32;
1144 route->kroute.u.msi.data = msg.data;
1145
1146 kvm_add_routing_entry(s, &route->kroute);
1147
1148 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1149 entry);
04fa27f5
JK
1150 }
1151
1152 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1153
3889c3fa 1154 return kvm_set_irq(s, route->kroute.gsi, 1);
04fa27f5
JK
1155}
1156
92b4e489
JK
1157int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1158{
1159 struct kvm_irq_routing_entry kroute;
1160 int virq;
1161
f3e1bed8 1162 if (!kvm_gsi_routing_enabled()) {
92b4e489
JK
1163 return -ENOSYS;
1164 }
1165
1166 virq = kvm_irqchip_get_virq(s);
1167 if (virq < 0) {
1168 return virq;
1169 }
1170
1171 kroute.gsi = virq;
1172 kroute.type = KVM_IRQ_ROUTING_MSI;
1173 kroute.flags = 0;
1174 kroute.u.msi.address_lo = (uint32_t)msg.address;
1175 kroute.u.msi.address_hi = msg.address >> 32;
1176 kroute.u.msi.data = msg.data;
1177
1178 kvm_add_routing_entry(s, &kroute);
1179
1180 return virq;
1181}
1182
cc57407e
JK
1183int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1184{
1185 struct kvm_irq_routing_entry kroute;
1186
1187 if (!kvm_irqchip_in_kernel()) {
1188 return -ENOSYS;
1189 }
1190
1191 kroute.gsi = virq;
1192 kroute.type = KVM_IRQ_ROUTING_MSI;
1193 kroute.flags = 0;
1194 kroute.u.msi.address_lo = (uint32_t)msg.address;
1195 kroute.u.msi.address_hi = msg.address >> 32;
1196 kroute.u.msi.data = msg.data;
1197
1198 return kvm_update_routing_entry(s, &kroute);
1199}
1200
39853bbc
JK
1201static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1202{
1203 struct kvm_irqfd irqfd = {
1204 .fd = fd,
1205 .gsi = virq,
1206 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1207 };
1208
cc7e0ddf 1209 if (!kvm_irqfds_enabled()) {
39853bbc
JK
1210 return -ENOSYS;
1211 }
1212
1213 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1214}
1215
84b058d7
JK
1216#else /* !KVM_CAP_IRQ_ROUTING */
1217
1218static void kvm_init_irq_routing(KVMState *s)
1219{
1220}
04fa27f5 1221
d3d3bef0
JK
1222void kvm_irqchip_release_virq(KVMState *s, int virq)
1223{
1224}
1225
04fa27f5
JK
1226int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1227{
1228 abort();
1229}
92b4e489
JK
1230
1231int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1232{
df410675 1233 return -ENOSYS;
92b4e489 1234}
39853bbc
JK
1235
1236static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1237{
1238 abort();
1239}
dabe3143
MT
1240
1241int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1242{
1243 return -ENOSYS;
1244}
84b058d7
JK
1245#endif /* !KVM_CAP_IRQ_ROUTING */
1246
b131c74a 1247int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
39853bbc 1248{
b131c74a 1249 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true);
39853bbc
JK
1250}
1251
b131c74a 1252int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
15b2bd18 1253{
b131c74a 1254 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false);
15b2bd18
PB
1255}
1256
84b058d7
JK
1257static int kvm_irqchip_create(KVMState *s)
1258{
1259 QemuOptsList *list = qemu_find_opts("machine");
1260 int ret;
1261
1262 if (QTAILQ_EMPTY(&list->head) ||
1263 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
a24b9106 1264 "kernel_irqchip", true) ||
84b058d7
JK
1265 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1266 return 0;
1267 }
1268
1269 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1270 if (ret < 0) {
1271 fprintf(stderr, "Create kernel irqchip failed\n");
1272 return ret;
1273 }
1274
3d4b2649 1275 kvm_kernel_irqchip = true;
7ae26bd4
PM
1276 /* If we have an in-kernel IRQ chip then we must have asynchronous
1277 * interrupt delivery (though the reverse is not necessarily true)
1278 */
1279 kvm_async_interrupts_allowed = true;
84b058d7
JK
1280
1281 kvm_init_irq_routing(s);
1282
1283 return 0;
1284}
1285
3ed444e9
DH
1286static int kvm_max_vcpus(KVMState *s)
1287{
1288 int ret;
1289
1290 /* Find number of supported CPUs using the recommended
1291 * procedure from the kernel API documentation to cope with
1292 * older kernels that may be missing capabilities.
1293 */
1294 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1295 if (ret) {
1296 return ret;
1297 }
1298 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1299 if (ret) {
1300 return ret;
1301 }
1302
1303 return 4;
1304}
1305
cad1e282 1306int kvm_init(void)
05330448 1307{
168ccc11
JK
1308 static const char upgrade_note[] =
1309 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1310 "(see http://sourceforge.net/projects/kvm).\n";
05330448 1311 KVMState *s;
94a8d39a 1312 const KVMCapabilityInfo *missing_cap;
05330448
AL
1313 int ret;
1314 int i;
3ed444e9 1315 int max_vcpus;
05330448 1316
7267c094 1317 s = g_malloc0(sizeof(KVMState));
05330448 1318
3145fcb6
DG
1319 /*
1320 * On systems where the kernel can support different base page
1321 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1322 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1323 * page size for the system though.
1324 */
1325 assert(TARGET_PAGE_SIZE <= getpagesize());
1326
e22a25c9 1327#ifdef KVM_CAP_SET_GUEST_DEBUG
72cf2d4f 1328 QTAILQ_INIT(&s->kvm_sw_breakpoints);
e22a25c9 1329#endif
a426e122 1330 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
05330448 1331 s->slots[i].slot = i;
a426e122 1332 }
05330448 1333 s->vmfd = -1;
40ff6d7e 1334 s->fd = qemu_open("/dev/kvm", O_RDWR);
05330448
AL
1335 if (s->fd == -1) {
1336 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1337 ret = -errno;
1338 goto err;
1339 }
1340
1341 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1342 if (ret < KVM_API_VERSION) {
a426e122 1343 if (ret > 0) {
05330448 1344 ret = -EINVAL;
a426e122 1345 }
05330448
AL
1346 fprintf(stderr, "kvm version too old\n");
1347 goto err;
1348 }
1349
1350 if (ret > KVM_API_VERSION) {
1351 ret = -EINVAL;
1352 fprintf(stderr, "kvm version not supported\n");
1353 goto err;
1354 }
1355
3ed444e9
DH
1356 max_vcpus = kvm_max_vcpus(s);
1357 if (smp_cpus > max_vcpus) {
1358 ret = -EINVAL;
1359 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1360 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1361 goto err;
1362 }
1363
05330448 1364 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
0104dcac
AG
1365 if (s->vmfd < 0) {
1366#ifdef TARGET_S390X
1367 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1368 "your host kernel command line\n");
1369#endif
db9eae1c 1370 ret = s->vmfd;
05330448 1371 goto err;
0104dcac 1372 }
05330448 1373
94a8d39a
JK
1374 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1375 if (!missing_cap) {
1376 missing_cap =
1377 kvm_check_extension_list(s, kvm_arch_required_capabilities);
05330448 1378 }
94a8d39a 1379 if (missing_cap) {
ad7b8b33 1380 ret = -EINVAL;
94a8d39a
JK
1381 fprintf(stderr, "kvm does not support %s\n%s",
1382 missing_cap->name, upgrade_note);
d85dc283
AL
1383 goto err;
1384 }
1385
ad7b8b33 1386 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
f65ed4c1 1387
e69917e2 1388 s->broken_set_mem_region = 1;
14a09518 1389 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
e69917e2
JK
1390 if (ret > 0) {
1391 s->broken_set_mem_region = 0;
1392 }
e69917e2 1393
a0fb002c
JK
1394#ifdef KVM_CAP_VCPU_EVENTS
1395 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1396#endif
1397
b0b1d690
JK
1398 s->robust_singlestep =
1399 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
b0b1d690 1400
ff44f1a3
JK
1401#ifdef KVM_CAP_DEBUGREGS
1402 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1403#endif
1404
f1665b21
SY
1405#ifdef KVM_CAP_XSAVE
1406 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1407#endif
1408
f1665b21
SY
1409#ifdef KVM_CAP_XCRS
1410 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1411#endif
1412
8a7c7393
JK
1413#ifdef KVM_CAP_PIT_STATE2
1414 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1415#endif
1416
d3d3bef0 1417#ifdef KVM_CAP_IRQ_ROUTING
4a3adebb 1418 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
d3d3bef0 1419#endif
4a3adebb 1420
3ab73842
JK
1421 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1422
e333cd69 1423 s->irq_set_ioctl = KVM_IRQ_LINE;
8732fbd2 1424 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
e333cd69 1425 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
8732fbd2
PM
1426 }
1427
cad1e282 1428 ret = kvm_arch_init(s);
a426e122 1429 if (ret < 0) {
05330448 1430 goto err;
a426e122 1431 }
05330448 1432
84b058d7
JK
1433 ret = kvm_irqchip_create(s);
1434 if (ret < 0) {
1435 goto err;
1436 }
1437
05330448 1438 kvm_state = s;
f6790af6
AK
1439 memory_listener_register(&kvm_memory_listener, &address_space_memory);
1440 memory_listener_register(&kvm_io_listener, &address_space_io);
05330448 1441
d2f2b8a7
SH
1442 s->many_ioeventfds = kvm_check_many_ioeventfds();
1443
aa7f74d1
JK
1444 cpu_interrupt_handler = kvm_handle_interrupt;
1445
05330448
AL
1446 return 0;
1447
1448err:
6d1cc321
SW
1449 if (s->vmfd >= 0) {
1450 close(s->vmfd);
1451 }
1452 if (s->fd != -1) {
1453 close(s->fd);
05330448 1454 }
7267c094 1455 g_free(s);
05330448
AL
1456
1457 return ret;
1458}
1459
b30e93e9
JK
1460static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1461 uint32_t count)
05330448
AL
1462{
1463 int i;
1464 uint8_t *ptr = data;
1465
1466 for (i = 0; i < count; i++) {
1467 if (direction == KVM_EXIT_IO_IN) {
1468 switch (size) {
1469 case 1:
afcea8cb 1470 stb_p(ptr, cpu_inb(port));
05330448
AL
1471 break;
1472 case 2:
afcea8cb 1473 stw_p(ptr, cpu_inw(port));
05330448
AL
1474 break;
1475 case 4:
afcea8cb 1476 stl_p(ptr, cpu_inl(port));
05330448
AL
1477 break;
1478 }
1479 } else {
1480 switch (size) {
1481 case 1:
afcea8cb 1482 cpu_outb(port, ldub_p(ptr));
05330448
AL
1483 break;
1484 case 2:
afcea8cb 1485 cpu_outw(port, lduw_p(ptr));
05330448
AL
1486 break;
1487 case 4:
afcea8cb 1488 cpu_outl(port, ldl_p(ptr));
05330448
AL
1489 break;
1490 }
1491 }
1492
1493 ptr += size;
1494 }
05330448
AL
1495}
1496
9349b4f9 1497static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
7c80eef8 1498{
20d695a9
AF
1499 CPUState *cpu = ENV_GET_CPU(env);
1500
bb44e0d1 1501 fprintf(stderr, "KVM internal error.");
7c80eef8
MT
1502 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1503 int i;
1504
bb44e0d1 1505 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
7c80eef8
MT
1506 for (i = 0; i < run->internal.ndata; ++i) {
1507 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1508 i, (uint64_t)run->internal.data[i]);
1509 }
bb44e0d1
JK
1510 } else {
1511 fprintf(stderr, "\n");
7c80eef8 1512 }
7c80eef8
MT
1513 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1514 fprintf(stderr, "emulation failure\n");
20d695a9 1515 if (!kvm_arch_stop_on_emulation_error(cpu)) {
f5c848ee 1516 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
d73cd8f4 1517 return EXCP_INTERRUPT;
a426e122 1518 }
7c80eef8
MT
1519 }
1520 /* FIXME: Should trigger a qmp message to let management know
1521 * something went wrong.
1522 */
73aaec4a 1523 return -1;
7c80eef8 1524}
7c80eef8 1525
62a2744c 1526void kvm_flush_coalesced_mmio_buffer(void)
f65ed4c1 1527{
f65ed4c1 1528 KVMState *s = kvm_state;
1cae88b9
AK
1529
1530 if (s->coalesced_flush_in_progress) {
1531 return;
1532 }
1533
1534 s->coalesced_flush_in_progress = true;
1535
62a2744c
SY
1536 if (s->coalesced_mmio_ring) {
1537 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
f65ed4c1
AL
1538 while (ring->first != ring->last) {
1539 struct kvm_coalesced_mmio *ent;
1540
1541 ent = &ring->coalesced_mmio[ring->first];
1542
1543 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
85199474 1544 smp_wmb();
f65ed4c1
AL
1545 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1546 }
1547 }
1cae88b9
AK
1548
1549 s->coalesced_flush_in_progress = false;
f65ed4c1
AL
1550}
1551
20d695a9 1552static void do_kvm_cpu_synchronize_state(void *arg)
4c0960c0 1553{
20d695a9 1554 CPUState *cpu = arg;
2705d56a 1555
20d695a9
AF
1556 if (!cpu->kvm_vcpu_dirty) {
1557 kvm_arch_get_registers(cpu);
1558 cpu->kvm_vcpu_dirty = true;
4c0960c0
AK
1559 }
1560}
1561
9349b4f9 1562void kvm_cpu_synchronize_state(CPUArchState *env)
2705d56a 1563{
f100f0b3
AF
1564 CPUState *cpu = ENV_GET_CPU(env);
1565
20d695a9
AF
1566 if (!cpu->kvm_vcpu_dirty) {
1567 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
a426e122 1568 }
2705d56a
JK
1569}
1570
3f24a58f 1571void kvm_cpu_synchronize_post_reset(CPUState *cpu)
ea375f9a 1572{
20d695a9
AF
1573 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1574 cpu->kvm_vcpu_dirty = false;
ea375f9a
JK
1575}
1576
3f24a58f 1577void kvm_cpu_synchronize_post_init(CPUState *cpu)
ea375f9a 1578{
20d695a9
AF
1579 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1580 cpu->kvm_vcpu_dirty = false;
ea375f9a
JK
1581}
1582
9349b4f9 1583int kvm_cpu_exec(CPUArchState *env)
05330448 1584{
20d695a9 1585 CPUState *cpu = ENV_GET_CPU(env);
f7575c96 1586 struct kvm_run *run = cpu->kvm_run;
7cbb533f 1587 int ret, run_ret;
05330448 1588
8c0d577e 1589 DPRINTF("kvm_cpu_exec()\n");
05330448 1590
20d695a9 1591 if (kvm_arch_process_async_events(cpu)) {
fcd7d003 1592 cpu->exit_request = 0;
6792a57b 1593 return EXCP_HLT;
9ccfac9e 1594 }
0af691d7 1595
9ccfac9e 1596 do {
20d695a9
AF
1597 if (cpu->kvm_vcpu_dirty) {
1598 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1599 cpu->kvm_vcpu_dirty = false;
4c0960c0
AK
1600 }
1601
20d695a9 1602 kvm_arch_pre_run(cpu, run);
fcd7d003 1603 if (cpu->exit_request) {
9ccfac9e
JK
1604 DPRINTF("interrupt exit requested\n");
1605 /*
1606 * KVM requires us to reenter the kernel after IO exits to complete
1607 * instruction emulation. This self-signal will ensure that we
1608 * leave ASAP again.
1609 */
1610 qemu_cpu_kick_self();
1611 }
d549db5a 1612 qemu_mutex_unlock_iothread();
9ccfac9e 1613
1bc22652 1614 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
9ccfac9e 1615
d549db5a 1616 qemu_mutex_lock_iothread();
20d695a9 1617 kvm_arch_post_run(cpu, run);
05330448 1618
7cbb533f 1619 if (run_ret < 0) {
dc77d341
JK
1620 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1621 DPRINTF("io window exit\n");
d73cd8f4 1622 ret = EXCP_INTERRUPT;
dc77d341
JK
1623 break;
1624 }
7b011fbc
ME
1625 fprintf(stderr, "error: kvm run failed %s\n",
1626 strerror(-run_ret));
05330448
AL
1627 abort();
1628 }
1629
b76ac80a 1630 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
05330448
AL
1631 switch (run->exit_reason) {
1632 case KVM_EXIT_IO:
8c0d577e 1633 DPRINTF("handle_io\n");
b30e93e9
JK
1634 kvm_handle_io(run->io.port,
1635 (uint8_t *)run + run->io.data_offset,
1636 run->io.direction,
1637 run->io.size,
1638 run->io.count);
d73cd8f4 1639 ret = 0;
05330448
AL
1640 break;
1641 case KVM_EXIT_MMIO:
8c0d577e 1642 DPRINTF("handle_mmio\n");
05330448
AL
1643 cpu_physical_memory_rw(run->mmio.phys_addr,
1644 run->mmio.data,
1645 run->mmio.len,
1646 run->mmio.is_write);
d73cd8f4 1647 ret = 0;
05330448
AL
1648 break;
1649 case KVM_EXIT_IRQ_WINDOW_OPEN:
8c0d577e 1650 DPRINTF("irq_window_open\n");
d73cd8f4 1651 ret = EXCP_INTERRUPT;
05330448
AL
1652 break;
1653 case KVM_EXIT_SHUTDOWN:
8c0d577e 1654 DPRINTF("shutdown\n");
05330448 1655 qemu_system_reset_request();
d73cd8f4 1656 ret = EXCP_INTERRUPT;
05330448
AL
1657 break;
1658 case KVM_EXIT_UNKNOWN:
bb44e0d1
JK
1659 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1660 (uint64_t)run->hw.hardware_exit_reason);
73aaec4a 1661 ret = -1;
05330448 1662 break;
7c80eef8 1663 case KVM_EXIT_INTERNAL_ERROR:
73aaec4a 1664 ret = kvm_handle_internal_error(env, run);
7c80eef8 1665 break;
05330448 1666 default:
8c0d577e 1667 DPRINTF("kvm_arch_handle_exit\n");
20d695a9 1668 ret = kvm_arch_handle_exit(cpu, run);
05330448
AL
1669 break;
1670 }
d73cd8f4 1671 } while (ret == 0);
05330448 1672
73aaec4a 1673 if (ret < 0) {
f5c848ee 1674 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
0461d5a6 1675 vm_stop(RUN_STATE_INTERNAL_ERROR);
becfc390
AL
1676 }
1677
fcd7d003 1678 cpu->exit_request = 0;
05330448
AL
1679 return ret;
1680}
1681
984b5181 1682int kvm_ioctl(KVMState *s, int type, ...)
05330448
AL
1683{
1684 int ret;
984b5181
AL
1685 void *arg;
1686 va_list ap;
05330448 1687
984b5181
AL
1688 va_start(ap, type);
1689 arg = va_arg(ap, void *);
1690 va_end(ap);
1691
9c775729 1692 trace_kvm_ioctl(type, arg);
984b5181 1693 ret = ioctl(s->fd, type, arg);
a426e122 1694 if (ret == -1) {
05330448 1695 ret = -errno;
a426e122 1696 }
05330448
AL
1697 return ret;
1698}
1699
984b5181 1700int kvm_vm_ioctl(KVMState *s, int type, ...)
05330448
AL
1701{
1702 int ret;
984b5181
AL
1703 void *arg;
1704 va_list ap;
1705
1706 va_start(ap, type);
1707 arg = va_arg(ap, void *);
1708 va_end(ap);
05330448 1709
9c775729 1710 trace_kvm_vm_ioctl(type, arg);
984b5181 1711 ret = ioctl(s->vmfd, type, arg);
a426e122 1712 if (ret == -1) {
05330448 1713 ret = -errno;
a426e122 1714 }
05330448
AL
1715 return ret;
1716}
1717
1bc22652 1718int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
05330448
AL
1719{
1720 int ret;
984b5181
AL
1721 void *arg;
1722 va_list ap;
1723
1724 va_start(ap, type);
1725 arg = va_arg(ap, void *);
1726 va_end(ap);
05330448 1727
9c775729 1728 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
8737c51c 1729 ret = ioctl(cpu->kvm_fd, type, arg);
a426e122 1730 if (ret == -1) {
05330448 1731 ret = -errno;
a426e122 1732 }
05330448
AL
1733 return ret;
1734}
bd322087
AL
1735
1736int kvm_has_sync_mmu(void)
1737{
94a8d39a 1738 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
bd322087 1739}
e22a25c9 1740
a0fb002c
JK
1741int kvm_has_vcpu_events(void)
1742{
1743 return kvm_state->vcpu_events;
1744}
1745
b0b1d690
JK
1746int kvm_has_robust_singlestep(void)
1747{
1748 return kvm_state->robust_singlestep;
1749}
1750
ff44f1a3
JK
1751int kvm_has_debugregs(void)
1752{
1753 return kvm_state->debugregs;
1754}
1755
f1665b21
SY
1756int kvm_has_xsave(void)
1757{
1758 return kvm_state->xsave;
1759}
1760
1761int kvm_has_xcrs(void)
1762{
1763 return kvm_state->xcrs;
1764}
1765
8a7c7393
JK
1766int kvm_has_pit_state2(void)
1767{
1768 return kvm_state->pit_state2;
1769}
1770
d2f2b8a7
SH
1771int kvm_has_many_ioeventfds(void)
1772{
1773 if (!kvm_enabled()) {
1774 return 0;
1775 }
1776 return kvm_state->many_ioeventfds;
1777}
1778
84b058d7
JK
1779int kvm_has_gsi_routing(void)
1780{
a9c5eb0d 1781#ifdef KVM_CAP_IRQ_ROUTING
84b058d7 1782 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
a9c5eb0d
AG
1783#else
1784 return false;
1785#endif
84b058d7
JK
1786}
1787
3ab73842
JK
1788int kvm_has_intx_set_mask(void)
1789{
1790 return kvm_state->intx_set_mask;
1791}
1792
fdec9918
CB
1793void *kvm_vmalloc(ram_addr_t size)
1794{
1795#ifdef TARGET_S390X
1796 void *mem;
1797
1798 mem = kvm_arch_vmalloc(size);
1799 if (mem) {
1800 return mem;
1801 }
1802#endif
1803 return qemu_vmalloc(size);
1804}
1805
6f0437e8
JK
1806void kvm_setup_guest_memory(void *start, size_t size)
1807{
62fe8331
CB
1808#ifdef CONFIG_VALGRIND_H
1809 VALGRIND_MAKE_MEM_DEFINED(start, size);
1810#endif
6f0437e8 1811 if (!kvm_has_sync_mmu()) {
e78815a5 1812 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
6f0437e8
JK
1813
1814 if (ret) {
e78815a5
AF
1815 perror("qemu_madvise");
1816 fprintf(stderr,
1817 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
6f0437e8
JK
1818 exit(1);
1819 }
6f0437e8
JK
1820 }
1821}
1822
e22a25c9 1823#ifdef KVM_CAP_SET_GUEST_DEBUG
a60f24b5 1824struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
e22a25c9
AL
1825 target_ulong pc)
1826{
1827 struct kvm_sw_breakpoint *bp;
1828
a60f24b5 1829 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
a426e122 1830 if (bp->pc == pc) {
e22a25c9 1831 return bp;
a426e122 1832 }
e22a25c9
AL
1833 }
1834 return NULL;
1835}
1836
a60f24b5 1837int kvm_sw_breakpoints_active(CPUState *cpu)
e22a25c9 1838{
a60f24b5 1839 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
e22a25c9
AL
1840}
1841
452e4751
GC
1842struct kvm_set_guest_debug_data {
1843 struct kvm_guest_debug dbg;
a60f24b5 1844 CPUState *cpu;
452e4751
GC
1845 int err;
1846};
1847
1848static void kvm_invoke_set_guest_debug(void *data)
1849{
1850 struct kvm_set_guest_debug_data *dbg_data = data;
b3807725 1851
a60f24b5
AF
1852 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
1853 &dbg_data->dbg);
452e4751
GC
1854}
1855
9349b4f9 1856int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9 1857{
f100f0b3 1858 CPUState *cpu = ENV_GET_CPU(env);
452e4751 1859 struct kvm_set_guest_debug_data data;
e22a25c9 1860
b0b1d690 1861 data.dbg.control = reinject_trap;
e22a25c9 1862
b0b1d690
JK
1863 if (env->singlestep_enabled) {
1864 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1865 }
20d695a9 1866 kvm_arch_update_guest_debug(cpu, &data.dbg);
a60f24b5 1867 data.cpu = cpu;
e22a25c9 1868
f100f0b3 1869 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
452e4751 1870 return data.err;
e22a25c9
AL
1871}
1872
9349b4f9 1873int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1874 target_ulong len, int type)
1875{
20d695a9 1876 CPUState *current_cpu = ENV_GET_CPU(current_env);
e22a25c9 1877 struct kvm_sw_breakpoint *bp;
9349b4f9 1878 CPUArchState *env;
e22a25c9
AL
1879 int err;
1880
1881 if (type == GDB_BREAKPOINT_SW) {
a60f24b5 1882 bp = kvm_find_sw_breakpoint(current_cpu, addr);
e22a25c9
AL
1883 if (bp) {
1884 bp->use_count++;
1885 return 0;
1886 }
1887
7267c094 1888 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
a426e122 1889 if (!bp) {
e22a25c9 1890 return -ENOMEM;
a426e122 1891 }
e22a25c9
AL
1892
1893 bp->pc = addr;
1894 bp->use_count = 1;
20d695a9 1895 err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
e22a25c9 1896 if (err) {
7267c094 1897 g_free(bp);
e22a25c9
AL
1898 return err;
1899 }
1900
a60f24b5 1901 QTAILQ_INSERT_HEAD(&current_cpu->kvm_state->kvm_sw_breakpoints,
e22a25c9
AL
1902 bp, entry);
1903 } else {
1904 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
a426e122 1905 if (err) {
e22a25c9 1906 return err;
a426e122 1907 }
e22a25c9
AL
1908 }
1909
1910 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1911 err = kvm_update_guest_debug(env, 0);
a426e122 1912 if (err) {
e22a25c9 1913 return err;
a426e122 1914 }
e22a25c9
AL
1915 }
1916 return 0;
1917}
1918
9349b4f9 1919int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1920 target_ulong len, int type)
1921{
20d695a9 1922 CPUState *current_cpu = ENV_GET_CPU(current_env);
e22a25c9 1923 struct kvm_sw_breakpoint *bp;
9349b4f9 1924 CPUArchState *env;
e22a25c9
AL
1925 int err;
1926
1927 if (type == GDB_BREAKPOINT_SW) {
a60f24b5 1928 bp = kvm_find_sw_breakpoint(current_cpu, addr);
a426e122 1929 if (!bp) {
e22a25c9 1930 return -ENOENT;
a426e122 1931 }
e22a25c9
AL
1932
1933 if (bp->use_count > 1) {
1934 bp->use_count--;
1935 return 0;
1936 }
1937
20d695a9 1938 err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
a426e122 1939 if (err) {
e22a25c9 1940 return err;
a426e122 1941 }
e22a25c9 1942
a60f24b5 1943 QTAILQ_REMOVE(&current_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
7267c094 1944 g_free(bp);
e22a25c9
AL
1945 } else {
1946 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
a426e122 1947 if (err) {
e22a25c9 1948 return err;
a426e122 1949 }
e22a25c9
AL
1950 }
1951
1952 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1953 err = kvm_update_guest_debug(env, 0);
a426e122 1954 if (err) {
e22a25c9 1955 return err;
a426e122 1956 }
e22a25c9
AL
1957 }
1958 return 0;
1959}
1960
9349b4f9 1961void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9 1962{
20d695a9 1963 CPUState *current_cpu = ENV_GET_CPU(current_env);
e22a25c9 1964 struct kvm_sw_breakpoint *bp, *next;
a60f24b5 1965 KVMState *s = current_cpu->kvm_state;
9349b4f9 1966 CPUArchState *env;
20d695a9 1967 CPUState *cpu;
e22a25c9 1968
72cf2d4f 1969 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
20d695a9 1970 if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
e22a25c9
AL
1971 /* Try harder to find a CPU that currently sees the breakpoint. */
1972 for (env = first_cpu; env != NULL; env = env->next_cpu) {
20d695a9
AF
1973 cpu = ENV_GET_CPU(env);
1974 if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
e22a25c9 1975 break;
a426e122 1976 }
e22a25c9
AL
1977 }
1978 }
78021d6d
JK
1979 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
1980 g_free(bp);
e22a25c9
AL
1981 }
1982 kvm_arch_remove_all_hw_breakpoints();
1983
a426e122 1984 for (env = first_cpu; env != NULL; env = env->next_cpu) {
e22a25c9 1985 kvm_update_guest_debug(env, 0);
a426e122 1986 }
e22a25c9
AL
1987}
1988
1989#else /* !KVM_CAP_SET_GUEST_DEBUG */
1990
9349b4f9 1991int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9
AL
1992{
1993 return -EINVAL;
1994}
1995
9349b4f9 1996int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1997 target_ulong len, int type)
1998{
1999 return -EINVAL;
2000}
2001
9349b4f9 2002int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
2003 target_ulong len, int type)
2004{
2005 return -EINVAL;
2006}
2007
9349b4f9 2008void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9
AL
2009{
2010}
2011#endif /* !KVM_CAP_SET_GUEST_DEBUG */
cc84de95 2012
9349b4f9 2013int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
cc84de95 2014{
1bc22652 2015 CPUState *cpu = ENV_GET_CPU(env);
cc84de95
MT
2016 struct kvm_signal_mask *sigmask;
2017 int r;
2018
a426e122 2019 if (!sigset) {
1bc22652 2020 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
a426e122 2021 }
cc84de95 2022
7267c094 2023 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
cc84de95
MT
2024
2025 sigmask->len = 8;
2026 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1bc22652 2027 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
7267c094 2028 g_free(sigmask);
cc84de95
MT
2029
2030 return r;
2031}
290adf38 2032int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
a1b87fe0 2033{
20d695a9 2034 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
a1b87fe0
JK
2035}
2036
2037int kvm_on_sigbus(int code, void *addr)
2038{
2039 return kvm_arch_on_sigbus(code, addr);
2040}