]> git.proxmox.com Git - qemu.git/blame - kvm-all.c
update-linux-headers.sh: Don't hard code list of architectures
[qemu.git] / kvm-all.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5832d1f2 5 * Red Hat, Inc. 2008
05330448
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
5832d1f2 9 * Glauber Costa <gcosta@redhat.com>
05330448
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/ioctl.h>
18#include <sys/mman.h>
984b5181 19#include <stdarg.h>
05330448
AL
20
21#include <linux/kvm.h>
22
23#include "qemu-common.h"
85199474 24#include "qemu-barrier.h"
ebd063d1
PB
25#include "qemu-option.h"
26#include "qemu-config.h"
05330448 27#include "sysemu.h"
d33a1810 28#include "hw/hw.h"
04fa27f5 29#include "hw/msi.h"
e22a25c9 30#include "gdbstub.h"
05330448 31#include "kvm.h"
8369e01c 32#include "bswap.h"
a01672d3 33#include "memory.h"
80a1ea37 34#include "exec-memory.h"
753d5e14 35#include "event_notifier.h"
05330448 36
d2f2b8a7
SH
37/* This check must be after config-host.h is included */
38#ifdef CONFIG_EVENTFD
39#include <sys/eventfd.h>
40#endif
41
62fe8331
CB
42#ifdef CONFIG_VALGRIND_H
43#include <valgrind/memcheck.h>
44#endif
45
93148aa5 46/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
f65ed4c1
AL
47#define PAGE_SIZE TARGET_PAGE_SIZE
48
05330448
AL
49//#define DEBUG_KVM
50
51#ifdef DEBUG_KVM
8c0d577e 52#define DPRINTF(fmt, ...) \
05330448
AL
53 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
54#else
8c0d577e 55#define DPRINTF(fmt, ...) \
05330448
AL
56 do { } while (0)
57#endif
58
04fa27f5
JK
59#define KVM_MSI_HASHTAB_SIZE 256
60
34fc643f
AL
61typedef struct KVMSlot
62{
c227f099
AL
63 target_phys_addr_t start_addr;
64 ram_addr_t memory_size;
9f213ed9 65 void *ram;
34fc643f
AL
66 int slot;
67 int flags;
68} KVMSlot;
05330448 69
5832d1f2
AL
70typedef struct kvm_dirty_log KVMDirtyLog;
71
05330448
AL
72struct KVMState
73{
74 KVMSlot slots[32];
75 int fd;
76 int vmfd;
f65ed4c1 77 int coalesced_mmio;
62a2744c 78 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
1cae88b9 79 bool coalesced_flush_in_progress;
e69917e2 80 int broken_set_mem_region;
4495d6a7 81 int migration_log;
a0fb002c 82 int vcpu_events;
b0b1d690 83 int robust_singlestep;
ff44f1a3 84 int debugregs;
e22a25c9
AL
85#ifdef KVM_CAP_SET_GUEST_DEBUG
86 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
87#endif
8a7c7393 88 int pit_state2;
f1665b21 89 int xsave, xcrs;
d2f2b8a7 90 int many_ioeventfds;
3ab73842 91 int intx_set_mask;
92e4b519
DG
92 /* The man page (and posix) say ioctl numbers are signed int, but
93 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
94 * unsigned, and treating them as signed here can break things */
95 unsigned irqchip_inject_ioctl;
84b058d7
JK
96#ifdef KVM_CAP_IRQ_ROUTING
97 struct kvm_irq_routing *irq_routes;
98 int nr_allocated_irq_routes;
99 uint32_t *used_gsi_bitmap;
4e2e4e63 100 unsigned int gsi_count;
04fa27f5 101 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
4a3adebb 102 bool direct_msi;
84b058d7 103#endif
05330448
AL
104};
105
6a7af8cb 106KVMState *kvm_state;
3d4b2649 107bool kvm_kernel_irqchip;
7ae26bd4 108bool kvm_async_interrupts_allowed;
cc7e0ddf 109bool kvm_irqfds_allowed;
614e41bc 110bool kvm_msi_via_irqfd_allowed;
f3e1bed8 111bool kvm_gsi_routing_allowed;
05330448 112
94a8d39a
JK
113static const KVMCapabilityInfo kvm_required_capabilites[] = {
114 KVM_CAP_INFO(USER_MEMORY),
115 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
116 KVM_CAP_LAST_INFO
117};
118
05330448
AL
119static KVMSlot *kvm_alloc_slot(KVMState *s)
120{
121 int i;
122
123 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
a426e122 124 if (s->slots[i].memory_size == 0) {
05330448 125 return &s->slots[i];
a426e122 126 }
05330448
AL
127 }
128
d3f8d37f
AL
129 fprintf(stderr, "%s: no free slot available\n", __func__);
130 abort();
131}
132
133static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
c227f099
AL
134 target_phys_addr_t start_addr,
135 target_phys_addr_t end_addr)
d3f8d37f
AL
136{
137 int i;
138
139 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
140 KVMSlot *mem = &s->slots[i];
141
142 if (start_addr == mem->start_addr &&
143 end_addr == mem->start_addr + mem->memory_size) {
144 return mem;
145 }
146 }
147
05330448
AL
148 return NULL;
149}
150
6152e2ae
AL
151/*
152 * Find overlapping slot with lowest start address
153 */
154static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
c227f099
AL
155 target_phys_addr_t start_addr,
156 target_phys_addr_t end_addr)
05330448 157{
6152e2ae 158 KVMSlot *found = NULL;
05330448
AL
159 int i;
160
161 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
162 KVMSlot *mem = &s->slots[i];
163
6152e2ae
AL
164 if (mem->memory_size == 0 ||
165 (found && found->start_addr < mem->start_addr)) {
166 continue;
167 }
168
169 if (end_addr > mem->start_addr &&
170 start_addr < mem->start_addr + mem->memory_size) {
171 found = mem;
172 }
05330448
AL
173 }
174
6152e2ae 175 return found;
05330448
AL
176}
177
9f213ed9
AK
178int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
179 target_phys_addr_t *phys_addr)
983dfc3b
HY
180{
181 int i;
182
183 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
184 KVMSlot *mem = &s->slots[i];
185
9f213ed9
AK
186 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
187 *phys_addr = mem->start_addr + (ram - mem->ram);
983dfc3b
HY
188 return 1;
189 }
190 }
191
192 return 0;
193}
194
5832d1f2
AL
195static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
196{
197 struct kvm_userspace_memory_region mem;
198
199 mem.slot = slot->slot;
200 mem.guest_phys_addr = slot->start_addr;
201 mem.memory_size = slot->memory_size;
9f213ed9 202 mem.userspace_addr = (unsigned long)slot->ram;
5832d1f2 203 mem.flags = slot->flags;
4495d6a7
JK
204 if (s->migration_log) {
205 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
206 }
5832d1f2
AL
207 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
208}
209
8d2ba1fb
JK
210static void kvm_reset_vcpu(void *opaque)
211{
9349b4f9 212 CPUArchState *env = opaque;
8d2ba1fb 213
caa5af0f 214 kvm_arch_reset_vcpu(env);
8d2ba1fb 215}
5832d1f2 216
9349b4f9 217int kvm_init_vcpu(CPUArchState *env)
05330448
AL
218{
219 KVMState *s = kvm_state;
220 long mmap_size;
221 int ret;
222
8c0d577e 223 DPRINTF("kvm_init_vcpu\n");
05330448 224
984b5181 225 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
05330448 226 if (ret < 0) {
8c0d577e 227 DPRINTF("kvm_create_vcpu failed\n");
05330448
AL
228 goto err;
229 }
230
231 env->kvm_fd = ret;
232 env->kvm_state = s;
d841b6c4 233 env->kvm_vcpu_dirty = 1;
05330448
AL
234
235 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
236 if (mmap_size < 0) {
748a680b 237 ret = mmap_size;
8c0d577e 238 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
05330448
AL
239 goto err;
240 }
241
242 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
243 env->kvm_fd, 0);
244 if (env->kvm_run == MAP_FAILED) {
245 ret = -errno;
8c0d577e 246 DPRINTF("mmap'ing vcpu state failed\n");
05330448
AL
247 goto err;
248 }
249
a426e122
JK
250 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
251 s->coalesced_mmio_ring =
252 (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
253 }
62a2744c 254
05330448 255 ret = kvm_arch_init_vcpu(env);
8d2ba1fb 256 if (ret == 0) {
a08d4367 257 qemu_register_reset(kvm_reset_vcpu, env);
caa5af0f 258 kvm_arch_reset_vcpu(env);
8d2ba1fb 259 }
05330448
AL
260err:
261 return ret;
262}
263
5832d1f2
AL
264/*
265 * dirty pages logging control
266 */
25254bbc
MT
267
268static int kvm_mem_flags(KVMState *s, bool log_dirty)
269{
270 return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
271}
272
273static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
5832d1f2
AL
274{
275 KVMState *s = kvm_state;
25254bbc 276 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
4495d6a7
JK
277 int old_flags;
278
4495d6a7 279 old_flags = mem->flags;
5832d1f2 280
25254bbc 281 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
5832d1f2
AL
282 mem->flags = flags;
283
4495d6a7
JK
284 /* If nothing changed effectively, no need to issue ioctl */
285 if (s->migration_log) {
286 flags |= KVM_MEM_LOG_DIRTY_PAGES;
287 }
25254bbc 288
4495d6a7 289 if (flags == old_flags) {
25254bbc 290 return 0;
4495d6a7
JK
291 }
292
5832d1f2
AL
293 return kvm_set_user_memory_region(s, mem);
294}
295
25254bbc
MT
296static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
297 ram_addr_t size, bool log_dirty)
298{
299 KVMState *s = kvm_state;
300 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
301
302 if (mem == NULL) {
303 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
304 TARGET_FMT_plx "\n", __func__, phys_addr,
305 (target_phys_addr_t)(phys_addr + size - 1));
306 return -EINVAL;
307 }
308 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
309}
310
a01672d3
AK
311static void kvm_log_start(MemoryListener *listener,
312 MemoryRegionSection *section)
5832d1f2 313{
a01672d3
AK
314 int r;
315
316 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
317 section->size, true);
318 if (r < 0) {
319 abort();
320 }
5832d1f2
AL
321}
322
a01672d3
AK
323static void kvm_log_stop(MemoryListener *listener,
324 MemoryRegionSection *section)
5832d1f2 325{
a01672d3
AK
326 int r;
327
328 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
329 section->size, false);
330 if (r < 0) {
331 abort();
332 }
5832d1f2
AL
333}
334
7b8f3b78 335static int kvm_set_migration_log(int enable)
4495d6a7
JK
336{
337 KVMState *s = kvm_state;
338 KVMSlot *mem;
339 int i, err;
340
341 s->migration_log = enable;
342
343 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
344 mem = &s->slots[i];
345
70fedd76
AW
346 if (!mem->memory_size) {
347 continue;
348 }
4495d6a7
JK
349 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
350 continue;
351 }
352 err = kvm_set_user_memory_region(s, mem);
353 if (err) {
354 return err;
355 }
356 }
357 return 0;
358}
359
8369e01c 360/* get kvm's dirty pages bitmap and update qemu's */
ffcde12f
AK
361static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
362 unsigned long *bitmap)
96c1606b 363{
8369e01c 364 unsigned int i, j;
aa90fec7
BH
365 unsigned long page_number, c;
366 target_phys_addr_t addr, addr1;
ffcde12f 367 unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
3145fcb6 368 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
8369e01c
MT
369
370 /*
371 * bitmap-traveling is faster than memory-traveling (for addr...)
372 * especially when most of the memory is not dirty.
373 */
374 for (i = 0; i < len; i++) {
375 if (bitmap[i] != 0) {
376 c = leul_to_cpu(bitmap[i]);
377 do {
378 j = ffsl(c) - 1;
379 c &= ~(1ul << j);
3145fcb6 380 page_number = (i * HOST_LONG_BITS + j) * hpratio;
8369e01c 381 addr1 = page_number * TARGET_PAGE_SIZE;
ffcde12f 382 addr = section->offset_within_region + addr1;
3145fcb6
DG
383 memory_region_set_dirty(section->mr, addr,
384 TARGET_PAGE_SIZE * hpratio);
8369e01c
MT
385 } while (c != 0);
386 }
387 }
388 return 0;
96c1606b
AG
389}
390
8369e01c
MT
391#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
392
5832d1f2
AL
393/**
394 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
fd4aa979
BS
395 * This function updates qemu's dirty bitmap using
396 * memory_region_set_dirty(). This means all bits are set
397 * to dirty.
5832d1f2 398 *
d3f8d37f 399 * @start_add: start of logged region.
5832d1f2
AL
400 * @end_addr: end of logged region.
401 */
ffcde12f 402static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
5832d1f2
AL
403{
404 KVMState *s = kvm_state;
151f7749 405 unsigned long size, allocated_size = 0;
151f7749
JK
406 KVMDirtyLog d;
407 KVMSlot *mem;
408 int ret = 0;
ffcde12f
AK
409 target_phys_addr_t start_addr = section->offset_within_address_space;
410 target_phys_addr_t end_addr = start_addr + section->size;
5832d1f2 411
151f7749
JK
412 d.dirty_bitmap = NULL;
413 while (start_addr < end_addr) {
414 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
415 if (mem == NULL) {
416 break;
417 }
5832d1f2 418
51b0c606
MT
419 /* XXX bad kernel interface alert
420 * For dirty bitmap, kernel allocates array of size aligned to
421 * bits-per-long. But for case when the kernel is 64bits and
422 * the userspace is 32bits, userspace can't align to the same
423 * bits-per-long, since sizeof(long) is different between kernel
424 * and user space. This way, userspace will provide buffer which
425 * may be 4 bytes less than the kernel will use, resulting in
426 * userspace memory corruption (which is not detectable by valgrind
427 * too, in most cases).
428 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
429 * a hope that sizeof(long) wont become >8 any time soon.
430 */
431 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
432 /*HOST_LONG_BITS*/ 64) / 8;
151f7749 433 if (!d.dirty_bitmap) {
7267c094 434 d.dirty_bitmap = g_malloc(size);
151f7749 435 } else if (size > allocated_size) {
7267c094 436 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
151f7749
JK
437 }
438 allocated_size = size;
439 memset(d.dirty_bitmap, 0, allocated_size);
5832d1f2 440
151f7749 441 d.slot = mem->slot;
5832d1f2 442
6e489f3f 443 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
8c0d577e 444 DPRINTF("ioctl failed %d\n", errno);
151f7749
JK
445 ret = -1;
446 break;
447 }
5832d1f2 448
ffcde12f 449 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
8369e01c 450 start_addr = mem->start_addr + mem->memory_size;
5832d1f2 451 }
7267c094 452 g_free(d.dirty_bitmap);
151f7749
JK
453
454 return ret;
5832d1f2
AL
455}
456
c227f099 457int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
f65ed4c1
AL
458{
459 int ret = -ENOSYS;
f65ed4c1
AL
460 KVMState *s = kvm_state;
461
462 if (s->coalesced_mmio) {
463 struct kvm_coalesced_mmio_zone zone;
464
465 zone.addr = start;
466 zone.size = size;
7e680753 467 zone.pad = 0;
f65ed4c1
AL
468
469 ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
470 }
f65ed4c1
AL
471
472 return ret;
473}
474
c227f099 475int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
f65ed4c1
AL
476{
477 int ret = -ENOSYS;
f65ed4c1
AL
478 KVMState *s = kvm_state;
479
480 if (s->coalesced_mmio) {
481 struct kvm_coalesced_mmio_zone zone;
482
483 zone.addr = start;
484 zone.size = size;
7e680753 485 zone.pad = 0;
f65ed4c1
AL
486
487 ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
488 }
f65ed4c1
AL
489
490 return ret;
491}
492
ad7b8b33
AL
493int kvm_check_extension(KVMState *s, unsigned int extension)
494{
495 int ret;
496
497 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
498 if (ret < 0) {
499 ret = 0;
500 }
501
502 return ret;
503}
504
d2f2b8a7
SH
505static int kvm_check_many_ioeventfds(void)
506{
d0dcac83
SH
507 /* Userspace can use ioeventfd for io notification. This requires a host
508 * that supports eventfd(2) and an I/O thread; since eventfd does not
509 * support SIGIO it cannot interrupt the vcpu.
510 *
511 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
d2f2b8a7
SH
512 * can avoid creating too many ioeventfds.
513 */
12d4536f 514#if defined(CONFIG_EVENTFD)
d2f2b8a7
SH
515 int ioeventfds[7];
516 int i, ret = 0;
517 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
518 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
519 if (ioeventfds[i] < 0) {
520 break;
521 }
522 ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
523 if (ret < 0) {
524 close(ioeventfds[i]);
525 break;
526 }
527 }
528
529 /* Decide whether many devices are supported or not */
530 ret = i == ARRAY_SIZE(ioeventfds);
531
532 while (i-- > 0) {
533 kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
534 close(ioeventfds[i]);
535 }
536 return ret;
537#else
538 return 0;
539#endif
540}
541
94a8d39a
JK
542static const KVMCapabilityInfo *
543kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
544{
545 while (list->name) {
546 if (!kvm_check_extension(s, list->value)) {
547 return list;
548 }
549 list++;
550 }
551 return NULL;
552}
553
a01672d3 554static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
46dbef6a
MT
555{
556 KVMState *s = kvm_state;
46dbef6a
MT
557 KVMSlot *mem, old;
558 int err;
a01672d3
AK
559 MemoryRegion *mr = section->mr;
560 bool log_dirty = memory_region_is_logging(mr);
561 target_phys_addr_t start_addr = section->offset_within_address_space;
562 ram_addr_t size = section->size;
9f213ed9 563 void *ram = NULL;
8f6f962b 564 unsigned delta;
46dbef6a 565
14542fea
GN
566 /* kvm works in page size chunks, but the function may be called
567 with sub-page size and unaligned start address. */
8f6f962b
AK
568 delta = TARGET_PAGE_ALIGN(size) - size;
569 if (delta > size) {
570 return;
571 }
572 start_addr += delta;
573 size -= delta;
574 size &= TARGET_PAGE_MASK;
575 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
576 return;
577 }
46dbef6a 578
a01672d3
AK
579 if (!memory_region_is_ram(mr)) {
580 return;
9f213ed9
AK
581 }
582
8f6f962b 583 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
a01672d3 584
46dbef6a
MT
585 while (1) {
586 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
587 if (!mem) {
588 break;
589 }
590
a01672d3 591 if (add && start_addr >= mem->start_addr &&
46dbef6a 592 (start_addr + size <= mem->start_addr + mem->memory_size) &&
9f213ed9 593 (ram - start_addr == mem->ram - mem->start_addr)) {
46dbef6a 594 /* The new slot fits into the existing one and comes with
25254bbc
MT
595 * identical parameters - update flags and done. */
596 kvm_slot_dirty_pages_log_change(mem, log_dirty);
46dbef6a
MT
597 return;
598 }
599
600 old = *mem;
601
3fbffb62
AK
602 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
603 kvm_physical_sync_dirty_bitmap(section);
604 }
605
46dbef6a
MT
606 /* unregister the overlapping slot */
607 mem->memory_size = 0;
608 err = kvm_set_user_memory_region(s, mem);
609 if (err) {
610 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
611 __func__, strerror(-err));
612 abort();
613 }
614
615 /* Workaround for older KVM versions: we can't join slots, even not by
616 * unregistering the previous ones and then registering the larger
617 * slot. We have to maintain the existing fragmentation. Sigh.
618 *
619 * This workaround assumes that the new slot starts at the same
620 * address as the first existing one. If not or if some overlapping
621 * slot comes around later, we will fail (not seen in practice so far)
622 * - and actually require a recent KVM version. */
623 if (s->broken_set_mem_region &&
a01672d3 624 old.start_addr == start_addr && old.memory_size < size && add) {
46dbef6a
MT
625 mem = kvm_alloc_slot(s);
626 mem->memory_size = old.memory_size;
627 mem->start_addr = old.start_addr;
9f213ed9 628 mem->ram = old.ram;
25254bbc 629 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
630
631 err = kvm_set_user_memory_region(s, mem);
632 if (err) {
633 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
634 strerror(-err));
635 abort();
636 }
637
638 start_addr += old.memory_size;
9f213ed9 639 ram += old.memory_size;
46dbef6a
MT
640 size -= old.memory_size;
641 continue;
642 }
643
644 /* register prefix slot */
645 if (old.start_addr < start_addr) {
646 mem = kvm_alloc_slot(s);
647 mem->memory_size = start_addr - old.start_addr;
648 mem->start_addr = old.start_addr;
9f213ed9 649 mem->ram = old.ram;
25254bbc 650 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
651
652 err = kvm_set_user_memory_region(s, mem);
653 if (err) {
654 fprintf(stderr, "%s: error registering prefix slot: %s\n",
655 __func__, strerror(-err));
d4d6868f
AG
656#ifdef TARGET_PPC
657 fprintf(stderr, "%s: This is probably because your kernel's " \
658 "PAGE_SIZE is too big. Please try to use 4k " \
659 "PAGE_SIZE!\n", __func__);
660#endif
46dbef6a
MT
661 abort();
662 }
663 }
664
665 /* register suffix slot */
666 if (old.start_addr + old.memory_size > start_addr + size) {
667 ram_addr_t size_delta;
668
669 mem = kvm_alloc_slot(s);
670 mem->start_addr = start_addr + size;
671 size_delta = mem->start_addr - old.start_addr;
672 mem->memory_size = old.memory_size - size_delta;
9f213ed9 673 mem->ram = old.ram + size_delta;
25254bbc 674 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
675
676 err = kvm_set_user_memory_region(s, mem);
677 if (err) {
678 fprintf(stderr, "%s: error registering suffix slot: %s\n",
679 __func__, strerror(-err));
680 abort();
681 }
682 }
683 }
684
685 /* in case the KVM bug workaround already "consumed" the new slot */
a426e122 686 if (!size) {
46dbef6a 687 return;
a426e122 688 }
a01672d3 689 if (!add) {
46dbef6a 690 return;
a426e122 691 }
46dbef6a
MT
692 mem = kvm_alloc_slot(s);
693 mem->memory_size = size;
694 mem->start_addr = start_addr;
9f213ed9 695 mem->ram = ram;
25254bbc 696 mem->flags = kvm_mem_flags(s, log_dirty);
46dbef6a
MT
697
698 err = kvm_set_user_memory_region(s, mem);
699 if (err) {
700 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
701 strerror(-err));
702 abort();
703 }
704}
705
50c1e149
AK
706static void kvm_begin(MemoryListener *listener)
707{
708}
709
710static void kvm_commit(MemoryListener *listener)
711{
712}
713
a01672d3
AK
714static void kvm_region_add(MemoryListener *listener,
715 MemoryRegionSection *section)
716{
717 kvm_set_phys_mem(section, true);
718}
719
720static void kvm_region_del(MemoryListener *listener,
721 MemoryRegionSection *section)
722{
723 kvm_set_phys_mem(section, false);
724}
725
50c1e149
AK
726static void kvm_region_nop(MemoryListener *listener,
727 MemoryRegionSection *section)
728{
729}
730
a01672d3
AK
731static void kvm_log_sync(MemoryListener *listener,
732 MemoryRegionSection *section)
7b8f3b78 733{
a01672d3
AK
734 int r;
735
ffcde12f 736 r = kvm_physical_sync_dirty_bitmap(section);
a01672d3
AK
737 if (r < 0) {
738 abort();
739 }
7b8f3b78
MT
740}
741
a01672d3 742static void kvm_log_global_start(struct MemoryListener *listener)
7b8f3b78 743{
a01672d3
AK
744 int r;
745
746 r = kvm_set_migration_log(1);
747 assert(r >= 0);
7b8f3b78
MT
748}
749
a01672d3 750static void kvm_log_global_stop(struct MemoryListener *listener)
7b8f3b78 751{
a01672d3
AK
752 int r;
753
754 r = kvm_set_migration_log(0);
755 assert(r >= 0);
7b8f3b78
MT
756}
757
80a1ea37
AK
758static void kvm_mem_ioeventfd_add(MemoryRegionSection *section,
759 bool match_data, uint64_t data, int fd)
760{
761 int r;
762
4b8f1c88 763 assert(match_data && section->size <= 8);
80a1ea37 764
4b8f1c88
MT
765 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
766 data, true, section->size);
80a1ea37
AK
767 if (r < 0) {
768 abort();
769 }
770}
771
772static void kvm_mem_ioeventfd_del(MemoryRegionSection *section,
773 bool match_data, uint64_t data, int fd)
774{
775 int r;
776
4b8f1c88
MT
777 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
778 data, false, section->size);
80a1ea37
AK
779 if (r < 0) {
780 abort();
781 }
782}
783
784static void kvm_io_ioeventfd_add(MemoryRegionSection *section,
785 bool match_data, uint64_t data, int fd)
786{
787 int r;
788
789 assert(match_data && section->size == 2);
790
791 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
792 data, true);
793 if (r < 0) {
794 abort();
795 }
796}
797
798static void kvm_io_ioeventfd_del(MemoryRegionSection *section,
799 bool match_data, uint64_t data, int fd)
800
801{
802 int r;
803
804 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
805 data, false);
806 if (r < 0) {
807 abort();
808 }
809}
810
811static void kvm_eventfd_add(MemoryListener *listener,
812 MemoryRegionSection *section,
753d5e14
PB
813 bool match_data, uint64_t data,
814 EventNotifier *e)
80a1ea37
AK
815{
816 if (section->address_space == get_system_memory()) {
753d5e14
PB
817 kvm_mem_ioeventfd_add(section, match_data, data,
818 event_notifier_get_fd(e));
80a1ea37 819 } else {
753d5e14
PB
820 kvm_io_ioeventfd_add(section, match_data, data,
821 event_notifier_get_fd(e));
80a1ea37
AK
822 }
823}
824
825static void kvm_eventfd_del(MemoryListener *listener,
826 MemoryRegionSection *section,
753d5e14
PB
827 bool match_data, uint64_t data,
828 EventNotifier *e)
80a1ea37
AK
829{
830 if (section->address_space == get_system_memory()) {
753d5e14
PB
831 kvm_mem_ioeventfd_del(section, match_data, data,
832 event_notifier_get_fd(e));
80a1ea37 833 } else {
753d5e14
PB
834 kvm_io_ioeventfd_del(section, match_data, data,
835 event_notifier_get_fd(e));
80a1ea37
AK
836 }
837}
838
a01672d3 839static MemoryListener kvm_memory_listener = {
50c1e149
AK
840 .begin = kvm_begin,
841 .commit = kvm_commit,
a01672d3
AK
842 .region_add = kvm_region_add,
843 .region_del = kvm_region_del,
50c1e149 844 .region_nop = kvm_region_nop,
e5896b12
AP
845 .log_start = kvm_log_start,
846 .log_stop = kvm_log_stop,
a01672d3
AK
847 .log_sync = kvm_log_sync,
848 .log_global_start = kvm_log_global_start,
849 .log_global_stop = kvm_log_global_stop,
80a1ea37
AK
850 .eventfd_add = kvm_eventfd_add,
851 .eventfd_del = kvm_eventfd_del,
72e22d2f 852 .priority = 10,
7b8f3b78
MT
853};
854
9349b4f9 855static void kvm_handle_interrupt(CPUArchState *env, int mask)
aa7f74d1
JK
856{
857 env->interrupt_request |= mask;
858
859 if (!qemu_cpu_is_self(env)) {
860 qemu_cpu_kick(env);
861 }
862}
863
3889c3fa 864int kvm_set_irq(KVMState *s, int irq, int level)
84b058d7
JK
865{
866 struct kvm_irq_level event;
867 int ret;
868
7ae26bd4 869 assert(kvm_async_interrupts_enabled());
84b058d7
JK
870
871 event.level = level;
872 event.irq = irq;
873 ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
874 if (ret < 0) {
3889c3fa 875 perror("kvm_set_irq");
84b058d7
JK
876 abort();
877 }
878
879 return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
880}
881
882#ifdef KVM_CAP_IRQ_ROUTING
d3d3bef0
JK
883typedef struct KVMMSIRoute {
884 struct kvm_irq_routing_entry kroute;
885 QTAILQ_ENTRY(KVMMSIRoute) entry;
886} KVMMSIRoute;
887
84b058d7
JK
888static void set_gsi(KVMState *s, unsigned int gsi)
889{
84b058d7
JK
890 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
891}
892
04fa27f5
JK
893static void clear_gsi(KVMState *s, unsigned int gsi)
894{
895 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
896}
897
84b058d7
JK
898static void kvm_init_irq_routing(KVMState *s)
899{
04fa27f5 900 int gsi_count, i;
84b058d7
JK
901
902 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
903 if (gsi_count > 0) {
904 unsigned int gsi_bits, i;
905
906 /* Round up so we can search ints using ffs */
bc8c6788 907 gsi_bits = ALIGN(gsi_count, 32);
84b058d7 908 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
4e2e4e63 909 s->gsi_count = gsi_count;
84b058d7
JK
910
911 /* Mark any over-allocated bits as already in use */
912 for (i = gsi_count; i < gsi_bits; i++) {
913 set_gsi(s, i);
914 }
915 }
916
917 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
918 s->nr_allocated_irq_routes = 0;
919
4a3adebb
JK
920 if (!s->direct_msi) {
921 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
922 QTAILQ_INIT(&s->msi_hashtab[i]);
923 }
04fa27f5
JK
924 }
925
84b058d7
JK
926 kvm_arch_init_irq_routing(s);
927}
928
e7b20308
JK
929static void kvm_irqchip_commit_routes(KVMState *s)
930{
931 int ret;
932
933 s->irq_routes->flags = 0;
934 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
935 assert(ret == 0);
936}
937
84b058d7
JK
938static void kvm_add_routing_entry(KVMState *s,
939 struct kvm_irq_routing_entry *entry)
940{
941 struct kvm_irq_routing_entry *new;
942 int n, size;
943
944 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
945 n = s->nr_allocated_irq_routes * 2;
946 if (n < 64) {
947 n = 64;
948 }
949 size = sizeof(struct kvm_irq_routing);
950 size += n * sizeof(*new);
951 s->irq_routes = g_realloc(s->irq_routes, size);
952 s->nr_allocated_irq_routes = n;
953 }
954 n = s->irq_routes->nr++;
955 new = &s->irq_routes->entries[n];
956 memset(new, 0, sizeof(*new));
957 new->gsi = entry->gsi;
958 new->type = entry->type;
959 new->flags = entry->flags;
960 new->u = entry->u;
961
962 set_gsi(s, entry->gsi);
e7b20308
JK
963
964 kvm_irqchip_commit_routes(s);
84b058d7
JK
965}
966
cc57407e
JK
967static int kvm_update_routing_entry(KVMState *s,
968 struct kvm_irq_routing_entry *new_entry)
969{
970 struct kvm_irq_routing_entry *entry;
971 int n;
972
973 for (n = 0; n < s->irq_routes->nr; n++) {
974 entry = &s->irq_routes->entries[n];
975 if (entry->gsi != new_entry->gsi) {
976 continue;
977 }
978
979 entry->type = new_entry->type;
980 entry->flags = new_entry->flags;
981 entry->u = new_entry->u;
982
983 kvm_irqchip_commit_routes(s);
984
985 return 0;
986 }
987
988 return -ESRCH;
989}
990
1df186df 991void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
84b058d7
JK
992{
993 struct kvm_irq_routing_entry e;
994
4e2e4e63
JK
995 assert(pin < s->gsi_count);
996
84b058d7
JK
997 e.gsi = irq;
998 e.type = KVM_IRQ_ROUTING_IRQCHIP;
999 e.flags = 0;
1000 e.u.irqchip.irqchip = irqchip;
1001 e.u.irqchip.pin = pin;
1002 kvm_add_routing_entry(s, &e);
1003}
1004
1e2aa8be 1005void kvm_irqchip_release_virq(KVMState *s, int virq)
04fa27f5
JK
1006{
1007 struct kvm_irq_routing_entry *e;
1008 int i;
1009
1010 for (i = 0; i < s->irq_routes->nr; i++) {
1011 e = &s->irq_routes->entries[i];
1012 if (e->gsi == virq) {
1013 s->irq_routes->nr--;
1014 *e = s->irq_routes->entries[s->irq_routes->nr];
1015 }
1016 }
1017 clear_gsi(s, virq);
e7b20308
JK
1018
1019 kvm_irqchip_commit_routes(s);
04fa27f5
JK
1020}
1021
1022static unsigned int kvm_hash_msi(uint32_t data)
1023{
1024 /* This is optimized for IA32 MSI layout. However, no other arch shall
1025 * repeat the mistake of not providing a direct MSI injection API. */
1026 return data & 0xff;
1027}
1028
1029static void kvm_flush_dynamic_msi_routes(KVMState *s)
1030{
1031 KVMMSIRoute *route, *next;
1032 unsigned int hash;
1033
1034 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1035 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1036 kvm_irqchip_release_virq(s, route->kroute.gsi);
1037 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1038 g_free(route);
1039 }
1040 }
1041}
1042
1043static int kvm_irqchip_get_virq(KVMState *s)
1044{
1045 uint32_t *word = s->used_gsi_bitmap;
1046 int max_words = ALIGN(s->gsi_count, 32) / 32;
1047 int i, bit;
1048 bool retry = true;
1049
1050again:
1051 /* Return the lowest unused GSI in the bitmap */
1052 for (i = 0; i < max_words; i++) {
1053 bit = ffs(~word[i]);
1054 if (!bit) {
1055 continue;
1056 }
1057
1058 return bit - 1 + i * 32;
1059 }
4a3adebb 1060 if (!s->direct_msi && retry) {
04fa27f5
JK
1061 retry = false;
1062 kvm_flush_dynamic_msi_routes(s);
1063 goto again;
1064 }
1065 return -ENOSPC;
1066
1067}
1068
1069static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1070{
1071 unsigned int hash = kvm_hash_msi(msg.data);
1072 KVMMSIRoute *route;
1073
1074 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1075 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1076 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1077 route->kroute.u.msi.data == msg.data) {
1078 return route;
1079 }
1080 }
1081 return NULL;
1082}
1083
1084int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1085{
4a3adebb 1086 struct kvm_msi msi;
04fa27f5
JK
1087 KVMMSIRoute *route;
1088
4a3adebb
JK
1089 if (s->direct_msi) {
1090 msi.address_lo = (uint32_t)msg.address;
1091 msi.address_hi = msg.address >> 32;
1092 msi.data = msg.data;
1093 msi.flags = 0;
1094 memset(msi.pad, 0, sizeof(msi.pad));
1095
1096 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1097 }
1098
04fa27f5
JK
1099 route = kvm_lookup_msi_route(s, msg);
1100 if (!route) {
e7b20308 1101 int virq;
04fa27f5
JK
1102
1103 virq = kvm_irqchip_get_virq(s);
1104 if (virq < 0) {
1105 return virq;
1106 }
1107
1108 route = g_malloc(sizeof(KVMMSIRoute));
1109 route->kroute.gsi = virq;
1110 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1111 route->kroute.flags = 0;
1112 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1113 route->kroute.u.msi.address_hi = msg.address >> 32;
1114 route->kroute.u.msi.data = msg.data;
1115
1116 kvm_add_routing_entry(s, &route->kroute);
1117
1118 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1119 entry);
04fa27f5
JK
1120 }
1121
1122 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1123
3889c3fa 1124 return kvm_set_irq(s, route->kroute.gsi, 1);
04fa27f5
JK
1125}
1126
92b4e489
JK
1127int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1128{
1129 struct kvm_irq_routing_entry kroute;
1130 int virq;
1131
f3e1bed8 1132 if (!kvm_gsi_routing_enabled()) {
92b4e489
JK
1133 return -ENOSYS;
1134 }
1135
1136 virq = kvm_irqchip_get_virq(s);
1137 if (virq < 0) {
1138 return virq;
1139 }
1140
1141 kroute.gsi = virq;
1142 kroute.type = KVM_IRQ_ROUTING_MSI;
1143 kroute.flags = 0;
1144 kroute.u.msi.address_lo = (uint32_t)msg.address;
1145 kroute.u.msi.address_hi = msg.address >> 32;
1146 kroute.u.msi.data = msg.data;
1147
1148 kvm_add_routing_entry(s, &kroute);
1149
1150 return virq;
1151}
1152
cc57407e
JK
1153int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1154{
1155 struct kvm_irq_routing_entry kroute;
1156
1157 if (!kvm_irqchip_in_kernel()) {
1158 return -ENOSYS;
1159 }
1160
1161 kroute.gsi = virq;
1162 kroute.type = KVM_IRQ_ROUTING_MSI;
1163 kroute.flags = 0;
1164 kroute.u.msi.address_lo = (uint32_t)msg.address;
1165 kroute.u.msi.address_hi = msg.address >> 32;
1166 kroute.u.msi.data = msg.data;
1167
1168 return kvm_update_routing_entry(s, &kroute);
1169}
1170
39853bbc
JK
1171static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1172{
1173 struct kvm_irqfd irqfd = {
1174 .fd = fd,
1175 .gsi = virq,
1176 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1177 };
1178
cc7e0ddf 1179 if (!kvm_irqfds_enabled()) {
39853bbc
JK
1180 return -ENOSYS;
1181 }
1182
1183 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1184}
1185
84b058d7
JK
1186#else /* !KVM_CAP_IRQ_ROUTING */
1187
1188static void kvm_init_irq_routing(KVMState *s)
1189{
1190}
04fa27f5 1191
d3d3bef0
JK
1192void kvm_irqchip_release_virq(KVMState *s, int virq)
1193{
1194}
1195
04fa27f5
JK
1196int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1197{
1198 abort();
1199}
92b4e489
JK
1200
1201int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1202{
df410675 1203 return -ENOSYS;
92b4e489 1204}
39853bbc
JK
1205
1206static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1207{
1208 abort();
1209}
84b058d7
JK
1210#endif /* !KVM_CAP_IRQ_ROUTING */
1211
b131c74a 1212int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
39853bbc 1213{
b131c74a 1214 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true);
39853bbc
JK
1215}
1216
b131c74a 1217int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
15b2bd18 1218{
b131c74a 1219 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false);
15b2bd18
PB
1220}
1221
84b058d7
JK
1222static int kvm_irqchip_create(KVMState *s)
1223{
1224 QemuOptsList *list = qemu_find_opts("machine");
1225 int ret;
1226
1227 if (QTAILQ_EMPTY(&list->head) ||
1228 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
a24b9106 1229 "kernel_irqchip", true) ||
84b058d7
JK
1230 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1231 return 0;
1232 }
1233
1234 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1235 if (ret < 0) {
1236 fprintf(stderr, "Create kernel irqchip failed\n");
1237 return ret;
1238 }
1239
1240 s->irqchip_inject_ioctl = KVM_IRQ_LINE;
1241 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1242 s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
1243 }
3d4b2649 1244 kvm_kernel_irqchip = true;
7ae26bd4
PM
1245 /* If we have an in-kernel IRQ chip then we must have asynchronous
1246 * interrupt delivery (though the reverse is not necessarily true)
1247 */
1248 kvm_async_interrupts_allowed = true;
84b058d7
JK
1249
1250 kvm_init_irq_routing(s);
1251
1252 return 0;
1253}
1254
3ed444e9
DH
1255static int kvm_max_vcpus(KVMState *s)
1256{
1257 int ret;
1258
1259 /* Find number of supported CPUs using the recommended
1260 * procedure from the kernel API documentation to cope with
1261 * older kernels that may be missing capabilities.
1262 */
1263 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1264 if (ret) {
1265 return ret;
1266 }
1267 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1268 if (ret) {
1269 return ret;
1270 }
1271
1272 return 4;
1273}
1274
cad1e282 1275int kvm_init(void)
05330448 1276{
168ccc11
JK
1277 static const char upgrade_note[] =
1278 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1279 "(see http://sourceforge.net/projects/kvm).\n";
05330448 1280 KVMState *s;
94a8d39a 1281 const KVMCapabilityInfo *missing_cap;
05330448
AL
1282 int ret;
1283 int i;
3ed444e9 1284 int max_vcpus;
05330448 1285
7267c094 1286 s = g_malloc0(sizeof(KVMState));
05330448 1287
3145fcb6
DG
1288 /*
1289 * On systems where the kernel can support different base page
1290 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1291 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1292 * page size for the system though.
1293 */
1294 assert(TARGET_PAGE_SIZE <= getpagesize());
1295
e22a25c9 1296#ifdef KVM_CAP_SET_GUEST_DEBUG
72cf2d4f 1297 QTAILQ_INIT(&s->kvm_sw_breakpoints);
e22a25c9 1298#endif
a426e122 1299 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
05330448 1300 s->slots[i].slot = i;
a426e122 1301 }
05330448 1302 s->vmfd = -1;
40ff6d7e 1303 s->fd = qemu_open("/dev/kvm", O_RDWR);
05330448
AL
1304 if (s->fd == -1) {
1305 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1306 ret = -errno;
1307 goto err;
1308 }
1309
1310 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1311 if (ret < KVM_API_VERSION) {
a426e122 1312 if (ret > 0) {
05330448 1313 ret = -EINVAL;
a426e122 1314 }
05330448
AL
1315 fprintf(stderr, "kvm version too old\n");
1316 goto err;
1317 }
1318
1319 if (ret > KVM_API_VERSION) {
1320 ret = -EINVAL;
1321 fprintf(stderr, "kvm version not supported\n");
1322 goto err;
1323 }
1324
3ed444e9
DH
1325 max_vcpus = kvm_max_vcpus(s);
1326 if (smp_cpus > max_vcpus) {
1327 ret = -EINVAL;
1328 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1329 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1330 goto err;
1331 }
1332
05330448 1333 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
0104dcac
AG
1334 if (s->vmfd < 0) {
1335#ifdef TARGET_S390X
1336 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1337 "your host kernel command line\n");
1338#endif
db9eae1c 1339 ret = s->vmfd;
05330448 1340 goto err;
0104dcac 1341 }
05330448 1342
94a8d39a
JK
1343 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1344 if (!missing_cap) {
1345 missing_cap =
1346 kvm_check_extension_list(s, kvm_arch_required_capabilities);
05330448 1347 }
94a8d39a 1348 if (missing_cap) {
ad7b8b33 1349 ret = -EINVAL;
94a8d39a
JK
1350 fprintf(stderr, "kvm does not support %s\n%s",
1351 missing_cap->name, upgrade_note);
d85dc283
AL
1352 goto err;
1353 }
1354
ad7b8b33 1355 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
f65ed4c1 1356
e69917e2 1357 s->broken_set_mem_region = 1;
14a09518 1358 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
e69917e2
JK
1359 if (ret > 0) {
1360 s->broken_set_mem_region = 0;
1361 }
e69917e2 1362
a0fb002c
JK
1363#ifdef KVM_CAP_VCPU_EVENTS
1364 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1365#endif
1366
b0b1d690
JK
1367 s->robust_singlestep =
1368 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
b0b1d690 1369
ff44f1a3
JK
1370#ifdef KVM_CAP_DEBUGREGS
1371 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1372#endif
1373
f1665b21
SY
1374#ifdef KVM_CAP_XSAVE
1375 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1376#endif
1377
f1665b21
SY
1378#ifdef KVM_CAP_XCRS
1379 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1380#endif
1381
8a7c7393
JK
1382#ifdef KVM_CAP_PIT_STATE2
1383 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1384#endif
1385
d3d3bef0 1386#ifdef KVM_CAP_IRQ_ROUTING
4a3adebb 1387 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
d3d3bef0 1388#endif
4a3adebb 1389
3ab73842
JK
1390 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1391
cad1e282 1392 ret = kvm_arch_init(s);
a426e122 1393 if (ret < 0) {
05330448 1394 goto err;
a426e122 1395 }
05330448 1396
84b058d7
JK
1397 ret = kvm_irqchip_create(s);
1398 if (ret < 0) {
1399 goto err;
1400 }
1401
05330448 1402 kvm_state = s;
7376e582 1403 memory_listener_register(&kvm_memory_listener, NULL);
05330448 1404
d2f2b8a7
SH
1405 s->many_ioeventfds = kvm_check_many_ioeventfds();
1406
aa7f74d1
JK
1407 cpu_interrupt_handler = kvm_handle_interrupt;
1408
05330448
AL
1409 return 0;
1410
1411err:
1412 if (s) {
db9eae1c 1413 if (s->vmfd >= 0) {
05330448 1414 close(s->vmfd);
a426e122
JK
1415 }
1416 if (s->fd != -1) {
05330448 1417 close(s->fd);
a426e122 1418 }
05330448 1419 }
7267c094 1420 g_free(s);
05330448
AL
1421
1422 return ret;
1423}
1424
b30e93e9
JK
1425static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1426 uint32_t count)
05330448
AL
1427{
1428 int i;
1429 uint8_t *ptr = data;
1430
1431 for (i = 0; i < count; i++) {
1432 if (direction == KVM_EXIT_IO_IN) {
1433 switch (size) {
1434 case 1:
afcea8cb 1435 stb_p(ptr, cpu_inb(port));
05330448
AL
1436 break;
1437 case 2:
afcea8cb 1438 stw_p(ptr, cpu_inw(port));
05330448
AL
1439 break;
1440 case 4:
afcea8cb 1441 stl_p(ptr, cpu_inl(port));
05330448
AL
1442 break;
1443 }
1444 } else {
1445 switch (size) {
1446 case 1:
afcea8cb 1447 cpu_outb(port, ldub_p(ptr));
05330448
AL
1448 break;
1449 case 2:
afcea8cb 1450 cpu_outw(port, lduw_p(ptr));
05330448
AL
1451 break;
1452 case 4:
afcea8cb 1453 cpu_outl(port, ldl_p(ptr));
05330448
AL
1454 break;
1455 }
1456 }
1457
1458 ptr += size;
1459 }
05330448
AL
1460}
1461
9349b4f9 1462static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
7c80eef8 1463{
bb44e0d1 1464 fprintf(stderr, "KVM internal error.");
7c80eef8
MT
1465 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1466 int i;
1467
bb44e0d1 1468 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
7c80eef8
MT
1469 for (i = 0; i < run->internal.ndata; ++i) {
1470 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1471 i, (uint64_t)run->internal.data[i]);
1472 }
bb44e0d1
JK
1473 } else {
1474 fprintf(stderr, "\n");
7c80eef8 1475 }
7c80eef8
MT
1476 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1477 fprintf(stderr, "emulation failure\n");
a426e122 1478 if (!kvm_arch_stop_on_emulation_error(env)) {
f5c848ee 1479 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
d73cd8f4 1480 return EXCP_INTERRUPT;
a426e122 1481 }
7c80eef8
MT
1482 }
1483 /* FIXME: Should trigger a qmp message to let management know
1484 * something went wrong.
1485 */
73aaec4a 1486 return -1;
7c80eef8 1487}
7c80eef8 1488
62a2744c 1489void kvm_flush_coalesced_mmio_buffer(void)
f65ed4c1 1490{
f65ed4c1 1491 KVMState *s = kvm_state;
1cae88b9
AK
1492
1493 if (s->coalesced_flush_in_progress) {
1494 return;
1495 }
1496
1497 s->coalesced_flush_in_progress = true;
1498
62a2744c
SY
1499 if (s->coalesced_mmio_ring) {
1500 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
f65ed4c1
AL
1501 while (ring->first != ring->last) {
1502 struct kvm_coalesced_mmio *ent;
1503
1504 ent = &ring->coalesced_mmio[ring->first];
1505
1506 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
85199474 1507 smp_wmb();
f65ed4c1
AL
1508 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1509 }
1510 }
1cae88b9
AK
1511
1512 s->coalesced_flush_in_progress = false;
f65ed4c1
AL
1513}
1514
2705d56a 1515static void do_kvm_cpu_synchronize_state(void *_env)
4c0960c0 1516{
9349b4f9 1517 CPUArchState *env = _env;
2705d56a 1518
9ded2744 1519 if (!env->kvm_vcpu_dirty) {
4c0960c0 1520 kvm_arch_get_registers(env);
9ded2744 1521 env->kvm_vcpu_dirty = 1;
4c0960c0
AK
1522 }
1523}
1524
9349b4f9 1525void kvm_cpu_synchronize_state(CPUArchState *env)
2705d56a 1526{
a426e122 1527 if (!env->kvm_vcpu_dirty) {
2705d56a 1528 run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
a426e122 1529 }
2705d56a
JK
1530}
1531
9349b4f9 1532void kvm_cpu_synchronize_post_reset(CPUArchState *env)
ea375f9a
JK
1533{
1534 kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
1535 env->kvm_vcpu_dirty = 0;
1536}
1537
9349b4f9 1538void kvm_cpu_synchronize_post_init(CPUArchState *env)
ea375f9a
JK
1539{
1540 kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
1541 env->kvm_vcpu_dirty = 0;
1542}
1543
9349b4f9 1544int kvm_cpu_exec(CPUArchState *env)
05330448
AL
1545{
1546 struct kvm_run *run = env->kvm_run;
7cbb533f 1547 int ret, run_ret;
05330448 1548
8c0d577e 1549 DPRINTF("kvm_cpu_exec()\n");
05330448 1550
99036865 1551 if (kvm_arch_process_async_events(env)) {
9ccfac9e 1552 env->exit_request = 0;
6792a57b 1553 return EXCP_HLT;
9ccfac9e 1554 }
0af691d7 1555
9ccfac9e 1556 do {
9ded2744 1557 if (env->kvm_vcpu_dirty) {
ea375f9a 1558 kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
9ded2744 1559 env->kvm_vcpu_dirty = 0;
4c0960c0
AK
1560 }
1561
8c14c173 1562 kvm_arch_pre_run(env, run);
9ccfac9e
JK
1563 if (env->exit_request) {
1564 DPRINTF("interrupt exit requested\n");
1565 /*
1566 * KVM requires us to reenter the kernel after IO exits to complete
1567 * instruction emulation. This self-signal will ensure that we
1568 * leave ASAP again.
1569 */
1570 qemu_cpu_kick_self();
1571 }
d549db5a 1572 qemu_mutex_unlock_iothread();
9ccfac9e 1573
7cbb533f 1574 run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
9ccfac9e 1575
d549db5a 1576 qemu_mutex_lock_iothread();
05330448
AL
1577 kvm_arch_post_run(env, run);
1578
b0c883b5
JK
1579 kvm_flush_coalesced_mmio_buffer();
1580
7cbb533f 1581 if (run_ret < 0) {
dc77d341
JK
1582 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1583 DPRINTF("io window exit\n");
d73cd8f4 1584 ret = EXCP_INTERRUPT;
dc77d341
JK
1585 break;
1586 }
7b011fbc
ME
1587 fprintf(stderr, "error: kvm run failed %s\n",
1588 strerror(-run_ret));
05330448
AL
1589 abort();
1590 }
1591
05330448
AL
1592 switch (run->exit_reason) {
1593 case KVM_EXIT_IO:
8c0d577e 1594 DPRINTF("handle_io\n");
b30e93e9
JK
1595 kvm_handle_io(run->io.port,
1596 (uint8_t *)run + run->io.data_offset,
1597 run->io.direction,
1598 run->io.size,
1599 run->io.count);
d73cd8f4 1600 ret = 0;
05330448
AL
1601 break;
1602 case KVM_EXIT_MMIO:
8c0d577e 1603 DPRINTF("handle_mmio\n");
05330448
AL
1604 cpu_physical_memory_rw(run->mmio.phys_addr,
1605 run->mmio.data,
1606 run->mmio.len,
1607 run->mmio.is_write);
d73cd8f4 1608 ret = 0;
05330448
AL
1609 break;
1610 case KVM_EXIT_IRQ_WINDOW_OPEN:
8c0d577e 1611 DPRINTF("irq_window_open\n");
d73cd8f4 1612 ret = EXCP_INTERRUPT;
05330448
AL
1613 break;
1614 case KVM_EXIT_SHUTDOWN:
8c0d577e 1615 DPRINTF("shutdown\n");
05330448 1616 qemu_system_reset_request();
d73cd8f4 1617 ret = EXCP_INTERRUPT;
05330448
AL
1618 break;
1619 case KVM_EXIT_UNKNOWN:
bb44e0d1
JK
1620 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1621 (uint64_t)run->hw.hardware_exit_reason);
73aaec4a 1622 ret = -1;
05330448 1623 break;
7c80eef8 1624 case KVM_EXIT_INTERNAL_ERROR:
73aaec4a 1625 ret = kvm_handle_internal_error(env, run);
7c80eef8 1626 break;
05330448 1627 default:
8c0d577e 1628 DPRINTF("kvm_arch_handle_exit\n");
05330448
AL
1629 ret = kvm_arch_handle_exit(env, run);
1630 break;
1631 }
d73cd8f4 1632 } while (ret == 0);
05330448 1633
73aaec4a 1634 if (ret < 0) {
f5c848ee 1635 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
0461d5a6 1636 vm_stop(RUN_STATE_INTERNAL_ERROR);
becfc390
AL
1637 }
1638
6792a57b 1639 env->exit_request = 0;
05330448
AL
1640 return ret;
1641}
1642
984b5181 1643int kvm_ioctl(KVMState *s, int type, ...)
05330448
AL
1644{
1645 int ret;
984b5181
AL
1646 void *arg;
1647 va_list ap;
05330448 1648
984b5181
AL
1649 va_start(ap, type);
1650 arg = va_arg(ap, void *);
1651 va_end(ap);
1652
1653 ret = ioctl(s->fd, type, arg);
a426e122 1654 if (ret == -1) {
05330448 1655 ret = -errno;
a426e122 1656 }
05330448
AL
1657 return ret;
1658}
1659
984b5181 1660int kvm_vm_ioctl(KVMState *s, int type, ...)
05330448
AL
1661{
1662 int ret;
984b5181
AL
1663 void *arg;
1664 va_list ap;
1665
1666 va_start(ap, type);
1667 arg = va_arg(ap, void *);
1668 va_end(ap);
05330448 1669
984b5181 1670 ret = ioctl(s->vmfd, type, arg);
a426e122 1671 if (ret == -1) {
05330448 1672 ret = -errno;
a426e122 1673 }
05330448
AL
1674 return ret;
1675}
1676
9349b4f9 1677int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
05330448
AL
1678{
1679 int ret;
984b5181
AL
1680 void *arg;
1681 va_list ap;
1682
1683 va_start(ap, type);
1684 arg = va_arg(ap, void *);
1685 va_end(ap);
05330448 1686
984b5181 1687 ret = ioctl(env->kvm_fd, type, arg);
a426e122 1688 if (ret == -1) {
05330448 1689 ret = -errno;
a426e122 1690 }
05330448
AL
1691 return ret;
1692}
bd322087
AL
1693
1694int kvm_has_sync_mmu(void)
1695{
94a8d39a 1696 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
bd322087 1697}
e22a25c9 1698
a0fb002c
JK
1699int kvm_has_vcpu_events(void)
1700{
1701 return kvm_state->vcpu_events;
1702}
1703
b0b1d690
JK
1704int kvm_has_robust_singlestep(void)
1705{
1706 return kvm_state->robust_singlestep;
1707}
1708
ff44f1a3
JK
1709int kvm_has_debugregs(void)
1710{
1711 return kvm_state->debugregs;
1712}
1713
f1665b21
SY
1714int kvm_has_xsave(void)
1715{
1716 return kvm_state->xsave;
1717}
1718
1719int kvm_has_xcrs(void)
1720{
1721 return kvm_state->xcrs;
1722}
1723
8a7c7393
JK
1724int kvm_has_pit_state2(void)
1725{
1726 return kvm_state->pit_state2;
1727}
1728
d2f2b8a7
SH
1729int kvm_has_many_ioeventfds(void)
1730{
1731 if (!kvm_enabled()) {
1732 return 0;
1733 }
1734 return kvm_state->many_ioeventfds;
1735}
1736
84b058d7
JK
1737int kvm_has_gsi_routing(void)
1738{
a9c5eb0d 1739#ifdef KVM_CAP_IRQ_ROUTING
84b058d7 1740 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
a9c5eb0d
AG
1741#else
1742 return false;
1743#endif
84b058d7
JK
1744}
1745
3ab73842
JK
1746int kvm_has_intx_set_mask(void)
1747{
1748 return kvm_state->intx_set_mask;
1749}
1750
fdec9918
CB
1751void *kvm_vmalloc(ram_addr_t size)
1752{
1753#ifdef TARGET_S390X
1754 void *mem;
1755
1756 mem = kvm_arch_vmalloc(size);
1757 if (mem) {
1758 return mem;
1759 }
1760#endif
1761 return qemu_vmalloc(size);
1762}
1763
6f0437e8
JK
1764void kvm_setup_guest_memory(void *start, size_t size)
1765{
62fe8331
CB
1766#ifdef CONFIG_VALGRIND_H
1767 VALGRIND_MAKE_MEM_DEFINED(start, size);
1768#endif
6f0437e8 1769 if (!kvm_has_sync_mmu()) {
e78815a5 1770 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
6f0437e8
JK
1771
1772 if (ret) {
e78815a5
AF
1773 perror("qemu_madvise");
1774 fprintf(stderr,
1775 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
6f0437e8
JK
1776 exit(1);
1777 }
6f0437e8
JK
1778 }
1779}
1780
e22a25c9 1781#ifdef KVM_CAP_SET_GUEST_DEBUG
9349b4f9 1782struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
e22a25c9
AL
1783 target_ulong pc)
1784{
1785 struct kvm_sw_breakpoint *bp;
1786
72cf2d4f 1787 QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
a426e122 1788 if (bp->pc == pc) {
e22a25c9 1789 return bp;
a426e122 1790 }
e22a25c9
AL
1791 }
1792 return NULL;
1793}
1794
9349b4f9 1795int kvm_sw_breakpoints_active(CPUArchState *env)
e22a25c9 1796{
72cf2d4f 1797 return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
e22a25c9
AL
1798}
1799
452e4751
GC
1800struct kvm_set_guest_debug_data {
1801 struct kvm_guest_debug dbg;
9349b4f9 1802 CPUArchState *env;
452e4751
GC
1803 int err;
1804};
1805
1806static void kvm_invoke_set_guest_debug(void *data)
1807{
1808 struct kvm_set_guest_debug_data *dbg_data = data;
9349b4f9 1809 CPUArchState *env = dbg_data->env;
b3807725 1810
b3807725 1811 dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
452e4751
GC
1812}
1813
9349b4f9 1814int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9 1815{
452e4751 1816 struct kvm_set_guest_debug_data data;
e22a25c9 1817
b0b1d690 1818 data.dbg.control = reinject_trap;
e22a25c9 1819
b0b1d690
JK
1820 if (env->singlestep_enabled) {
1821 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1822 }
452e4751 1823 kvm_arch_update_guest_debug(env, &data.dbg);
452e4751 1824 data.env = env;
e22a25c9 1825
be41cbe0 1826 run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
452e4751 1827 return data.err;
e22a25c9
AL
1828}
1829
9349b4f9 1830int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1831 target_ulong len, int type)
1832{
1833 struct kvm_sw_breakpoint *bp;
9349b4f9 1834 CPUArchState *env;
e22a25c9
AL
1835 int err;
1836
1837 if (type == GDB_BREAKPOINT_SW) {
1838 bp = kvm_find_sw_breakpoint(current_env, addr);
1839 if (bp) {
1840 bp->use_count++;
1841 return 0;
1842 }
1843
7267c094 1844 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
a426e122 1845 if (!bp) {
e22a25c9 1846 return -ENOMEM;
a426e122 1847 }
e22a25c9
AL
1848
1849 bp->pc = addr;
1850 bp->use_count = 1;
1851 err = kvm_arch_insert_sw_breakpoint(current_env, bp);
1852 if (err) {
7267c094 1853 g_free(bp);
e22a25c9
AL
1854 return err;
1855 }
1856
72cf2d4f 1857 QTAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
e22a25c9
AL
1858 bp, entry);
1859 } else {
1860 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
a426e122 1861 if (err) {
e22a25c9 1862 return err;
a426e122 1863 }
e22a25c9
AL
1864 }
1865
1866 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1867 err = kvm_update_guest_debug(env, 0);
a426e122 1868 if (err) {
e22a25c9 1869 return err;
a426e122 1870 }
e22a25c9
AL
1871 }
1872 return 0;
1873}
1874
9349b4f9 1875int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1876 target_ulong len, int type)
1877{
1878 struct kvm_sw_breakpoint *bp;
9349b4f9 1879 CPUArchState *env;
e22a25c9
AL
1880 int err;
1881
1882 if (type == GDB_BREAKPOINT_SW) {
1883 bp = kvm_find_sw_breakpoint(current_env, addr);
a426e122 1884 if (!bp) {
e22a25c9 1885 return -ENOENT;
a426e122 1886 }
e22a25c9
AL
1887
1888 if (bp->use_count > 1) {
1889 bp->use_count--;
1890 return 0;
1891 }
1892
1893 err = kvm_arch_remove_sw_breakpoint(current_env, bp);
a426e122 1894 if (err) {
e22a25c9 1895 return err;
a426e122 1896 }
e22a25c9 1897
72cf2d4f 1898 QTAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
7267c094 1899 g_free(bp);
e22a25c9
AL
1900 } else {
1901 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
a426e122 1902 if (err) {
e22a25c9 1903 return err;
a426e122 1904 }
e22a25c9
AL
1905 }
1906
1907 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1908 err = kvm_update_guest_debug(env, 0);
a426e122 1909 if (err) {
e22a25c9 1910 return err;
a426e122 1911 }
e22a25c9
AL
1912 }
1913 return 0;
1914}
1915
9349b4f9 1916void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9
AL
1917{
1918 struct kvm_sw_breakpoint *bp, *next;
1919 KVMState *s = current_env->kvm_state;
9349b4f9 1920 CPUArchState *env;
e22a25c9 1921
72cf2d4f 1922 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
e22a25c9
AL
1923 if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1924 /* Try harder to find a CPU that currently sees the breakpoint. */
1925 for (env = first_cpu; env != NULL; env = env->next_cpu) {
a426e122 1926 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
e22a25c9 1927 break;
a426e122 1928 }
e22a25c9
AL
1929 }
1930 }
1931 }
1932 kvm_arch_remove_all_hw_breakpoints();
1933
a426e122 1934 for (env = first_cpu; env != NULL; env = env->next_cpu) {
e22a25c9 1935 kvm_update_guest_debug(env, 0);
a426e122 1936 }
e22a25c9
AL
1937}
1938
1939#else /* !KVM_CAP_SET_GUEST_DEBUG */
1940
9349b4f9 1941int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
e22a25c9
AL
1942{
1943 return -EINVAL;
1944}
1945
9349b4f9 1946int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1947 target_ulong len, int type)
1948{
1949 return -EINVAL;
1950}
1951
9349b4f9 1952int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
e22a25c9
AL
1953 target_ulong len, int type)
1954{
1955 return -EINVAL;
1956}
1957
9349b4f9 1958void kvm_remove_all_breakpoints(CPUArchState *current_env)
e22a25c9
AL
1959{
1960}
1961#endif /* !KVM_CAP_SET_GUEST_DEBUG */
cc84de95 1962
9349b4f9 1963int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
cc84de95
MT
1964{
1965 struct kvm_signal_mask *sigmask;
1966 int r;
1967
a426e122 1968 if (!sigset) {
cc84de95 1969 return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
a426e122 1970 }
cc84de95 1971
7267c094 1972 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
cc84de95
MT
1973
1974 sigmask->len = 8;
1975 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1976 r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
7267c094 1977 g_free(sigmask);
cc84de95
MT
1978
1979 return r;
1980}
ca821806 1981
4b8f1c88
MT
1982int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val, bool assign,
1983 uint32_t size)
44f1a3d8 1984{
44f1a3d8
CM
1985 int ret;
1986 struct kvm_ioeventfd iofd;
1987
1988 iofd.datamatch = val;
1989 iofd.addr = addr;
4b8f1c88 1990 iofd.len = size;
44f1a3d8
CM
1991 iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
1992 iofd.fd = fd;
1993
1994 if (!kvm_enabled()) {
1995 return -ENOSYS;
1996 }
1997
1998 if (!assign) {
1999 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
2000 }
2001
2002 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
2003
2004 if (ret < 0) {
2005 return -errno;
2006 }
2007
2008 return 0;
44f1a3d8
CM
2009}
2010
ca821806
MT
2011int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
2012{
2013 struct kvm_ioeventfd kick = {
2014 .datamatch = val,
2015 .addr = addr,
2016 .len = 2,
2017 .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
2018 .fd = fd,
2019 };
2020 int r;
a426e122 2021 if (!kvm_enabled()) {
ca821806 2022 return -ENOSYS;
a426e122
JK
2023 }
2024 if (!assign) {
ca821806 2025 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
a426e122 2026 }
ca821806 2027 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
a426e122 2028 if (r < 0) {
ca821806 2029 return r;
a426e122 2030 }
ca821806 2031 return 0;
98c8573e 2032}
a1b87fe0 2033
9349b4f9 2034int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
a1b87fe0
JK
2035{
2036 return kvm_arch_on_sigbus_vcpu(env, code, addr);
2037}
2038
2039int kvm_on_sigbus(int code, void *addr)
2040{
2041 return kvm_arch_on_sigbus(code, addr);
2042}