]> git.proxmox.com Git - mirror_qemu.git/blame - kvm-all.c
target-ppc: Fix breakpoint registers for e300
[mirror_qemu.git] / kvm-all.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5832d1f2 5 * Red Hat, Inc. 2008
05330448
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
5832d1f2 9 * Glauber Costa <gcosta@redhat.com>
05330448
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/ioctl.h>
18#include <sys/mman.h>
984b5181 19#include <stdarg.h>
05330448
AL
20
21#include <linux/kvm.h>
22
23#include "qemu-common.h"
1de7afc9
PB
24#include "qemu/atomic.h"
25#include "qemu/option.h"
26#include "qemu/config-file.h"
9c17d615 27#include "sysemu/sysemu.h"
782c3f29 28#include "sysemu/accel.h"
d33a1810 29#include "hw/hw.h"
a2cb15b0 30#include "hw/pci/msi.h"
d426d9fb 31#include "hw/s390x/adapter.h"
022c62cb 32#include "exec/gdbstub.h"
9c17d615 33#include "sysemu/kvm.h"
1de7afc9 34#include "qemu/bswap.h"
022c62cb 35#include "exec/memory.h"
747afd5b 36#include "exec/ram_addr.h"
022c62cb 37#include "exec/address-spaces.h"
1de7afc9 38#include "qemu/event_notifier.h"
9c775729 39#include "trace.h"
05330448 40
135a129a
AK
41#include "hw/boards.h"
42
d2f2b8a7
SH
43/* This check must be after config-host.h is included */
44#ifdef CONFIG_EVENTFD
45#include <sys/eventfd.h>
46#endif
47
93148aa5 48/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
f65ed4c1
AL
49#define PAGE_SIZE TARGET_PAGE_SIZE
50
05330448
AL
51//#define DEBUG_KVM
52
53#ifdef DEBUG_KVM
8c0d577e 54#define DPRINTF(fmt, ...) \
05330448
AL
55 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56#else
8c0d577e 57#define DPRINTF(fmt, ...) \
05330448
AL
58 do { } while (0)
59#endif
60
04fa27f5
JK
61#define KVM_MSI_HASHTAB_SIZE 256
62
34fc643f
AL
63typedef struct KVMSlot
64{
a8170e5e 65 hwaddr start_addr;
c227f099 66 ram_addr_t memory_size;
9f213ed9 67 void *ram;
34fc643f
AL
68 int slot;
69 int flags;
70} KVMSlot;
05330448 71
5832d1f2
AL
72typedef struct kvm_dirty_log KVMDirtyLog;
73
9d1c35df 74struct KVMState
05330448 75{
fc02086b
EH
76 AccelState parent_obj;
77
fb541ca5
AW
78 KVMSlot *slots;
79 int nr_slots;
05330448
AL
80 int fd;
81 int vmfd;
f65ed4c1 82 int coalesced_mmio;
62a2744c 83 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
1cae88b9 84 bool coalesced_flush_in_progress;
e69917e2 85 int broken_set_mem_region;
4495d6a7 86 int migration_log;
a0fb002c 87 int vcpu_events;
b0b1d690 88 int robust_singlestep;
ff44f1a3 89 int debugregs;
e22a25c9
AL
90#ifdef KVM_CAP_SET_GUEST_DEBUG
91 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
92#endif
8a7c7393 93 int pit_state2;
f1665b21 94 int xsave, xcrs;
d2f2b8a7 95 int many_ioeventfds;
3ab73842 96 int intx_set_mask;
92e4b519
DG
97 /* The man page (and posix) say ioctl numbers are signed int, but
98 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
99 * unsigned, and treating them as signed here can break things */
e333cd69 100 unsigned irq_set_ioctl;
aed6efb9 101 unsigned int sigmask_len;
84b058d7
JK
102#ifdef KVM_CAP_IRQ_ROUTING
103 struct kvm_irq_routing *irq_routes;
104 int nr_allocated_irq_routes;
105 uint32_t *used_gsi_bitmap;
4e2e4e63 106 unsigned int gsi_count;
04fa27f5 107 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
4a3adebb 108 bool direct_msi;
84b058d7 109#endif
9d1c35df 110};
05330448 111
782c3f29
EH
112#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
113
fc02086b
EH
114#define KVM_STATE(obj) \
115 OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
116
6a7af8cb 117KVMState *kvm_state;
3d4b2649 118bool kvm_kernel_irqchip;
7ae26bd4 119bool kvm_async_interrupts_allowed;
215e79c0 120bool kvm_halt_in_kernel_allowed;
69e03ae6 121bool kvm_eventfds_allowed;
cc7e0ddf 122bool kvm_irqfds_allowed;
614e41bc 123bool kvm_msi_via_irqfd_allowed;
f3e1bed8 124bool kvm_gsi_routing_allowed;
76fe21de 125bool kvm_gsi_direct_mapping;
13eed94e 126bool kvm_allowed;
df9c8b75 127bool kvm_readonly_mem_allowed;
05330448 128
94a8d39a
JK
129static const KVMCapabilityInfo kvm_required_capabilites[] = {
130 KVM_CAP_INFO(USER_MEMORY),
131 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
132 KVM_CAP_LAST_INFO
133};
134
05330448
AL
135static KVMSlot *kvm_alloc_slot(KVMState *s)
136{
137 int i;
138
fb541ca5 139 for (i = 0; i < s->nr_slots; i++) {
a426e122 140 if (s->slots[i].memory_size == 0) {
05330448 141 return &s->slots[i];
a426e122 142 }
05330448
AL
143 }
144
d3f8d37f
AL
145 fprintf(stderr, "%s: no free slot available\n", __func__);
146 abort();
147}
148
149static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
a8170e5e
AK
150 hwaddr start_addr,
151 hwaddr end_addr)
d3f8d37f
AL
152{
153 int i;
154
fb541ca5 155 for (i = 0; i < s->nr_slots; i++) {
d3f8d37f
AL
156 KVMSlot *mem = &s->slots[i];
157
158 if (start_addr == mem->start_addr &&
159 end_addr == mem->start_addr + mem->memory_size) {
160 return mem;
161 }
162 }
163
05330448
AL
164 return NULL;
165}
166
6152e2ae
AL
167/*
168 * Find overlapping slot with lowest start address
169 */
170static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
a8170e5e
AK
171 hwaddr start_addr,
172 hwaddr end_addr)
05330448 173{
6152e2ae 174 KVMSlot *found = NULL;
05330448
AL
175 int i;
176
fb541ca5 177 for (i = 0; i < s->nr_slots; i++) {
05330448
AL
178 KVMSlot *mem = &s->slots[i];
179
6152e2ae
AL
180 if (mem->memory_size == 0 ||
181 (found && found->start_addr < mem->start_addr)) {
182 continue;
183 }
184
185 if (end_addr > mem->start_addr &&
186 start_addr < mem->start_addr + mem->memory_size) {
187 found = mem;
188 }
05330448
AL
189 }
190
6152e2ae 191 return found;
05330448
AL
192}
193
9f213ed9 194int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
a8170e5e 195 hwaddr *phys_addr)
983dfc3b
HY
196{
197 int i;
198
fb541ca5 199 for (i = 0; i < s->nr_slots; i++) {
983dfc3b
HY
200 KVMSlot *mem = &s->slots[i];
201
9f213ed9
AK
202 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
203 *phys_addr = mem->start_addr + (ram - mem->ram);
983dfc3b
HY
204 return 1;
205 }
206 }
207
208 return 0;
209}
210
5832d1f2
AL
211static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
212{
213 struct kvm_userspace_memory_region mem;
214
215 mem.slot = slot->slot;
216 mem.guest_phys_addr = slot->start_addr;
9f213ed9 217 mem.userspace_addr = (unsigned long)slot->ram;
5832d1f2 218 mem.flags = slot->flags;
4495d6a7
JK
219 if (s->migration_log) {
220 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
221 }
651eb0f4
XG
222
223 if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
235e8982
JJ
224 /* Set the slot size to 0 before setting the slot to the desired
225 * value. This is needed based on KVM commit 75d61fbc. */
226 mem.memory_size = 0;
227 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
228 }
229 mem.memory_size = slot->memory_size;
5832d1f2
AL
230 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
231}
232
504134d2 233int kvm_init_vcpu(CPUState *cpu)
05330448
AL
234{
235 KVMState *s = kvm_state;
236 long mmap_size;
237 int ret;
238
8c0d577e 239 DPRINTF("kvm_init_vcpu\n");
05330448 240
b164e48e 241 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
05330448 242 if (ret < 0) {
8c0d577e 243 DPRINTF("kvm_create_vcpu failed\n");
05330448
AL
244 goto err;
245 }
246
8737c51c 247 cpu->kvm_fd = ret;
a60f24b5 248 cpu->kvm_state = s;
20d695a9 249 cpu->kvm_vcpu_dirty = true;
05330448
AL
250
251 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
252 if (mmap_size < 0) {
748a680b 253 ret = mmap_size;
8c0d577e 254 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
05330448
AL
255 goto err;
256 }
257
f7575c96 258 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
8737c51c 259 cpu->kvm_fd, 0);
f7575c96 260 if (cpu->kvm_run == MAP_FAILED) {
05330448 261 ret = -errno;
8c0d577e 262 DPRINTF("mmap'ing vcpu state failed\n");
05330448
AL
263 goto err;
264 }
265
a426e122
JK
266 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
267 s->coalesced_mmio_ring =
f7575c96 268 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
a426e122 269 }
62a2744c 270
20d695a9 271 ret = kvm_arch_init_vcpu(cpu);
05330448
AL
272err:
273 return ret;
274}
275
5832d1f2
AL
276/*
277 * dirty pages logging control
278 */
25254bbc 279
235e8982 280static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
25254bbc 281{
235e8982
JJ
282 int flags = 0;
283 flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
284 if (readonly && kvm_readonly_mem_allowed) {
285 flags |= KVM_MEM_READONLY;
286 }
287 return flags;
25254bbc
MT
288}
289
290static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
5832d1f2
AL
291{
292 KVMState *s = kvm_state;
25254bbc 293 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
4495d6a7
JK
294 int old_flags;
295
4495d6a7 296 old_flags = mem->flags;
5832d1f2 297
235e8982 298 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
5832d1f2
AL
299 mem->flags = flags;
300
4495d6a7
JK
301 /* If nothing changed effectively, no need to issue ioctl */
302 if (s->migration_log) {
303 flags |= KVM_MEM_LOG_DIRTY_PAGES;
304 }
25254bbc 305
4495d6a7 306 if (flags == old_flags) {
25254bbc 307 return 0;
4495d6a7
JK
308 }
309
5832d1f2
AL
310 return kvm_set_user_memory_region(s, mem);
311}
312
a8170e5e 313static int kvm_dirty_pages_log_change(hwaddr phys_addr,
25254bbc
MT
314 ram_addr_t size, bool log_dirty)
315{
316 KVMState *s = kvm_state;
317 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
318
319 if (mem == NULL) {
320 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
321 TARGET_FMT_plx "\n", __func__, phys_addr,
a8170e5e 322 (hwaddr)(phys_addr + size - 1));
25254bbc
MT
323 return -EINVAL;
324 }
325 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
326}
327
a01672d3
AK
328static void kvm_log_start(MemoryListener *listener,
329 MemoryRegionSection *section)
5832d1f2 330{
a01672d3
AK
331 int r;
332
333 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
052e87b0 334 int128_get64(section->size), true);
a01672d3
AK
335 if (r < 0) {
336 abort();
337 }
5832d1f2
AL
338}
339
a01672d3
AK
340static void kvm_log_stop(MemoryListener *listener,
341 MemoryRegionSection *section)
5832d1f2 342{
a01672d3
AK
343 int r;
344
345 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
052e87b0 346 int128_get64(section->size), false);
a01672d3
AK
347 if (r < 0) {
348 abort();
349 }
5832d1f2
AL
350}
351
7b8f3b78 352static int kvm_set_migration_log(int enable)
4495d6a7
JK
353{
354 KVMState *s = kvm_state;
355 KVMSlot *mem;
356 int i, err;
357
358 s->migration_log = enable;
359
fb541ca5 360 for (i = 0; i < s->nr_slots; i++) {
4495d6a7
JK
361 mem = &s->slots[i];
362
70fedd76
AW
363 if (!mem->memory_size) {
364 continue;
365 }
4495d6a7
JK
366 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
367 continue;
368 }
369 err = kvm_set_user_memory_region(s, mem);
370 if (err) {
371 return err;
372 }
373 }
374 return 0;
375}
376
8369e01c 377/* get kvm's dirty pages bitmap and update qemu's */
ffcde12f
AK
378static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
379 unsigned long *bitmap)
96c1606b 380{
c9dd46fc 381 ram_addr_t start = section->offset_within_region + section->mr->ram_addr;
5ff7fb77
JQ
382 ram_addr_t pages = int128_get64(section->size) / getpagesize();
383
384 cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
8369e01c 385 return 0;
96c1606b
AG
386}
387
8369e01c
MT
388#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
389
5832d1f2
AL
390/**
391 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
fd4aa979
BS
392 * This function updates qemu's dirty bitmap using
393 * memory_region_set_dirty(). This means all bits are set
394 * to dirty.
5832d1f2 395 *
d3f8d37f 396 * @start_add: start of logged region.
5832d1f2
AL
397 * @end_addr: end of logged region.
398 */
ffcde12f 399static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
5832d1f2
AL
400{
401 KVMState *s = kvm_state;
151f7749 402 unsigned long size, allocated_size = 0;
151f7749
JK
403 KVMDirtyLog d;
404 KVMSlot *mem;
405 int ret = 0;
a8170e5e 406 hwaddr start_addr = section->offset_within_address_space;
052e87b0 407 hwaddr end_addr = start_addr + int128_get64(section->size);
5832d1f2 408
151f7749
JK
409 d.dirty_bitmap = NULL;
410 while (start_addr < end_addr) {
411 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
412 if (mem == NULL) {
413 break;
414 }
5832d1f2 415
51b0c606
MT
416 /* XXX bad kernel interface alert
417 * For dirty bitmap, kernel allocates array of size aligned to
418 * bits-per-long. But for case when the kernel is 64bits and
419 * the userspace is 32bits, userspace can't align to the same
420 * bits-per-long, since sizeof(long) is different between kernel
421 * and user space. This way, userspace will provide buffer which
422 * may be 4 bytes less than the kernel will use, resulting in
423 * userspace memory corruption (which is not detectable by valgrind
424 * too, in most cases).
425 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
426 * a hope that sizeof(long) wont become >8 any time soon.
427 */
428 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
429 /*HOST_LONG_BITS*/ 64) / 8;
151f7749 430 if (!d.dirty_bitmap) {
7267c094 431 d.dirty_bitmap = g_malloc(size);
151f7749 432 } else if (size > allocated_size) {
7267c094 433 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
151f7749
JK
434 }
435 allocated_size = size;
436 memset(d.dirty_bitmap, 0, allocated_size);
5832d1f2 437
151f7749 438 d.slot = mem->slot;
5832d1f2 439
50212d63 440 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
8c0d577e 441 DPRINTF("ioctl failed %d\n", errno);
151f7749
JK
442 ret = -1;
443 break;
444 }
5832d1f2 445
ffcde12f 446 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
8369e01c 447 start_addr = mem->start_addr + mem->memory_size;
5832d1f2 448 }
7267c094 449 g_free(d.dirty_bitmap);
151f7749
JK
450
451 return ret;
5832d1f2
AL
452}
453
95d2994a
AK
454static void kvm_coalesce_mmio_region(MemoryListener *listener,
455 MemoryRegionSection *secion,
a8170e5e 456 hwaddr start, hwaddr size)
f65ed4c1 457{
f65ed4c1
AL
458 KVMState *s = kvm_state;
459
460 if (s->coalesced_mmio) {
461 struct kvm_coalesced_mmio_zone zone;
462
463 zone.addr = start;
464 zone.size = size;
7e680753 465 zone.pad = 0;
f65ed4c1 466
95d2994a 467 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
f65ed4c1 468 }
f65ed4c1
AL
469}
470
95d2994a
AK
471static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
472 MemoryRegionSection *secion,
a8170e5e 473 hwaddr start, hwaddr size)
f65ed4c1 474{
f65ed4c1
AL
475 KVMState *s = kvm_state;
476
477 if (s->coalesced_mmio) {
478 struct kvm_coalesced_mmio_zone zone;
479
480 zone.addr = start;
481 zone.size = size;
7e680753 482 zone.pad = 0;
f65ed4c1 483
95d2994a 484 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
f65ed4c1 485 }
f65ed4c1
AL
486}
487
ad7b8b33
AL
488int kvm_check_extension(KVMState *s, unsigned int extension)
489{
490 int ret;
491
492 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
493 if (ret < 0) {
494 ret = 0;
495 }
496
497 return ret;
498}
499
7d0a07fa
AG
500int kvm_vm_check_extension(KVMState *s, unsigned int extension)
501{
502 int ret;
503
504 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
505 if (ret < 0) {
506 /* VM wide version not implemented, use global one instead */
507 ret = kvm_check_extension(s, extension);
508 }
509
510 return ret;
511}
512
584f2be7 513static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
41cb62c2 514 bool assign, uint32_t size, bool datamatch)
500ffd4a
MT
515{
516 int ret;
517 struct kvm_ioeventfd iofd;
518
41cb62c2 519 iofd.datamatch = datamatch ? val : 0;
500ffd4a
MT
520 iofd.addr = addr;
521 iofd.len = size;
41cb62c2 522 iofd.flags = 0;
500ffd4a
MT
523 iofd.fd = fd;
524
525 if (!kvm_enabled()) {
526 return -ENOSYS;
527 }
528
41cb62c2
MT
529 if (datamatch) {
530 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
531 }
500ffd4a
MT
532 if (!assign) {
533 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
534 }
535
536 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
537
538 if (ret < 0) {
539 return -errno;
540 }
541
542 return 0;
543}
544
44c3f8f7 545static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
41cb62c2 546 bool assign, uint32_t size, bool datamatch)
500ffd4a
MT
547{
548 struct kvm_ioeventfd kick = {
41cb62c2 549 .datamatch = datamatch ? val : 0,
500ffd4a 550 .addr = addr,
41cb62c2 551 .flags = KVM_IOEVENTFD_FLAG_PIO,
44c3f8f7 552 .len = size,
500ffd4a
MT
553 .fd = fd,
554 };
555 int r;
556 if (!kvm_enabled()) {
557 return -ENOSYS;
558 }
41cb62c2
MT
559 if (datamatch) {
560 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
561 }
500ffd4a
MT
562 if (!assign) {
563 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
564 }
565 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
566 if (r < 0) {
567 return r;
568 }
569 return 0;
570}
571
572
d2f2b8a7
SH
573static int kvm_check_many_ioeventfds(void)
574{
d0dcac83
SH
575 /* Userspace can use ioeventfd for io notification. This requires a host
576 * that supports eventfd(2) and an I/O thread; since eventfd does not
577 * support SIGIO it cannot interrupt the vcpu.
578 *
579 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
d2f2b8a7
SH
580 * can avoid creating too many ioeventfds.
581 */
12d4536f 582#if defined(CONFIG_EVENTFD)
d2f2b8a7
SH
583 int ioeventfds[7];
584 int i, ret = 0;
585 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
586 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
587 if (ioeventfds[i] < 0) {
588 break;
589 }
41cb62c2 590 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
d2f2b8a7
SH
591 if (ret < 0) {
592 close(ioeventfds[i]);
593 break;
594 }
595 }
596
597 /* Decide whether many devices are supported or not */
598 ret = i == ARRAY_SIZE(ioeventfds);
599
600 while (i-- > 0) {
41cb62c2 601 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
d2f2b8a7
SH
602 close(ioeventfds[i]);
603 }
604 return ret;
605#else
606 return 0;
607#endif
608}
609
94a8d39a
JK
610static const KVMCapabilityInfo *
611kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
612{
613 while (list->name) {
614 if (!kvm_check_extension(s, list->value)) {
615 return list;
616 }
617 list++;
618 }
619 return NULL;
620}
621
a01672d3 622static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
46dbef6a
MT
623{
624 KVMState *s = kvm_state;
46dbef6a
MT
625 KVMSlot *mem, old;
626 int err;
a01672d3
AK
627 MemoryRegion *mr = section->mr;
628 bool log_dirty = memory_region_is_logging(mr);
235e8982
JJ
629 bool writeable = !mr->readonly && !mr->rom_device;
630 bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
a8170e5e 631 hwaddr start_addr = section->offset_within_address_space;
052e87b0 632 ram_addr_t size = int128_get64(section->size);
9f213ed9 633 void *ram = NULL;
8f6f962b 634 unsigned delta;
46dbef6a 635
14542fea
GN
636 /* kvm works in page size chunks, but the function may be called
637 with sub-page size and unaligned start address. */
8f6f962b
AK
638 delta = TARGET_PAGE_ALIGN(size) - size;
639 if (delta > size) {
640 return;
641 }
642 start_addr += delta;
643 size -= delta;
644 size &= TARGET_PAGE_MASK;
645 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
646 return;
647 }
46dbef6a 648
a01672d3 649 if (!memory_region_is_ram(mr)) {
235e8982
JJ
650 if (writeable || !kvm_readonly_mem_allowed) {
651 return;
652 } else if (!mr->romd_mode) {
653 /* If the memory device is not in romd_mode, then we actually want
654 * to remove the kvm memory slot so all accesses will trap. */
655 add = false;
656 }
9f213ed9
AK
657 }
658
8f6f962b 659 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
a01672d3 660
46dbef6a
MT
661 while (1) {
662 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
663 if (!mem) {
664 break;
665 }
666
a01672d3 667 if (add && start_addr >= mem->start_addr &&
46dbef6a 668 (start_addr + size <= mem->start_addr + mem->memory_size) &&
9f213ed9 669 (ram - start_addr == mem->ram - mem->start_addr)) {
46dbef6a 670 /* The new slot fits into the existing one and comes with
25254bbc
MT
671 * identical parameters - update flags and done. */
672 kvm_slot_dirty_pages_log_change(mem, log_dirty);
46dbef6a
MT
673 return;
674 }
675
676 old = *mem;
677
3fbffb62
AK
678 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
679 kvm_physical_sync_dirty_bitmap(section);
680 }
681
46dbef6a
MT
682 /* unregister the overlapping slot */
683 mem->memory_size = 0;
684 err = kvm_set_user_memory_region(s, mem);
685 if (err) {
686 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
687 __func__, strerror(-err));
688 abort();
689 }
690
691 /* Workaround for older KVM versions: we can't join slots, even not by
692 * unregistering the previous ones and then registering the larger
693 * slot. We have to maintain the existing fragmentation. Sigh.
694 *
695 * This workaround assumes that the new slot starts at the same
696 * address as the first existing one. If not or if some overlapping
697 * slot comes around later, we will fail (not seen in practice so far)
698 * - and actually require a recent KVM version. */
699 if (s->broken_set_mem_region &&
a01672d3 700 old.start_addr == start_addr && old.memory_size < size && add) {
46dbef6a
MT
701 mem = kvm_alloc_slot(s);
702 mem->memory_size = old.memory_size;
703 mem->start_addr = old.start_addr;
9f213ed9 704 mem->ram = old.ram;
235e8982 705 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
46dbef6a
MT
706
707 err = kvm_set_user_memory_region(s, mem);
708 if (err) {
709 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
710 strerror(-err));
711 abort();
712 }
713
714 start_addr += old.memory_size;
9f213ed9 715 ram += old.memory_size;
46dbef6a
MT
716 size -= old.memory_size;
717 continue;
718 }
719
720 /* register prefix slot */
721 if (old.start_addr < start_addr) {
722 mem = kvm_alloc_slot(s);
723 mem->memory_size = start_addr - old.start_addr;
724 mem->start_addr = old.start_addr;
9f213ed9 725 mem->ram = old.ram;
235e8982 726 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
46dbef6a
MT
727
728 err = kvm_set_user_memory_region(s, mem);
729 if (err) {
730 fprintf(stderr, "%s: error registering prefix slot: %s\n",
731 __func__, strerror(-err));
d4d6868f
AG
732#ifdef TARGET_PPC
733 fprintf(stderr, "%s: This is probably because your kernel's " \
734 "PAGE_SIZE is too big. Please try to use 4k " \
735 "PAGE_SIZE!\n", __func__);
736#endif
46dbef6a
MT
737 abort();
738 }
739 }
740
741 /* register suffix slot */
742 if (old.start_addr + old.memory_size > start_addr + size) {
743 ram_addr_t size_delta;
744
745 mem = kvm_alloc_slot(s);
746 mem->start_addr = start_addr + size;
747 size_delta = mem->start_addr - old.start_addr;
748 mem->memory_size = old.memory_size - size_delta;
9f213ed9 749 mem->ram = old.ram + size_delta;
235e8982 750 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
46dbef6a
MT
751
752 err = kvm_set_user_memory_region(s, mem);
753 if (err) {
754 fprintf(stderr, "%s: error registering suffix slot: %s\n",
755 __func__, strerror(-err));
756 abort();
757 }
758 }
759 }
760
761 /* in case the KVM bug workaround already "consumed" the new slot */
a426e122 762 if (!size) {
46dbef6a 763 return;
a426e122 764 }
a01672d3 765 if (!add) {
46dbef6a 766 return;
a426e122 767 }
46dbef6a
MT
768 mem = kvm_alloc_slot(s);
769 mem->memory_size = size;
770 mem->start_addr = start_addr;
9f213ed9 771 mem->ram = ram;
235e8982 772 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
46dbef6a
MT
773
774 err = kvm_set_user_memory_region(s, mem);
775 if (err) {
776 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
777 strerror(-err));
778 abort();
779 }
780}
781
a01672d3
AK
782static void kvm_region_add(MemoryListener *listener,
783 MemoryRegionSection *section)
784{
dfde4e6e 785 memory_region_ref(section->mr);
a01672d3
AK
786 kvm_set_phys_mem(section, true);
787}
788
789static void kvm_region_del(MemoryListener *listener,
790 MemoryRegionSection *section)
791{
792 kvm_set_phys_mem(section, false);
dfde4e6e 793 memory_region_unref(section->mr);
a01672d3
AK
794}
795
796static void kvm_log_sync(MemoryListener *listener,
797 MemoryRegionSection *section)
7b8f3b78 798{
a01672d3
AK
799 int r;
800
ffcde12f 801 r = kvm_physical_sync_dirty_bitmap(section);
a01672d3
AK
802 if (r < 0) {
803 abort();
804 }
7b8f3b78
MT
805}
806
a01672d3 807static void kvm_log_global_start(struct MemoryListener *listener)
7b8f3b78 808{
a01672d3
AK
809 int r;
810
811 r = kvm_set_migration_log(1);
812 assert(r >= 0);
7b8f3b78
MT
813}
814
a01672d3 815static void kvm_log_global_stop(struct MemoryListener *listener)
7b8f3b78 816{
a01672d3
AK
817 int r;
818
819 r = kvm_set_migration_log(0);
820 assert(r >= 0);
7b8f3b78
MT
821}
822
d22b096e
AK
823static void kvm_mem_ioeventfd_add(MemoryListener *listener,
824 MemoryRegionSection *section,
825 bool match_data, uint64_t data,
826 EventNotifier *e)
827{
828 int fd = event_notifier_get_fd(e);
80a1ea37
AK
829 int r;
830
4b8f1c88 831 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
052e87b0
PB
832 data, true, int128_get64(section->size),
833 match_data);
80a1ea37 834 if (r < 0) {
fa4ba923
AK
835 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
836 __func__, strerror(-r));
80a1ea37
AK
837 abort();
838 }
839}
840
d22b096e
AK
841static void kvm_mem_ioeventfd_del(MemoryListener *listener,
842 MemoryRegionSection *section,
843 bool match_data, uint64_t data,
844 EventNotifier *e)
80a1ea37 845{
d22b096e 846 int fd = event_notifier_get_fd(e);
80a1ea37
AK
847 int r;
848
4b8f1c88 849 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
052e87b0
PB
850 data, false, int128_get64(section->size),
851 match_data);
80a1ea37
AK
852 if (r < 0) {
853 abort();
854 }
855}
856
d22b096e
AK
857static void kvm_io_ioeventfd_add(MemoryListener *listener,
858 MemoryRegionSection *section,
859 bool match_data, uint64_t data,
860 EventNotifier *e)
80a1ea37 861{
d22b096e 862 int fd = event_notifier_get_fd(e);
80a1ea37
AK
863 int r;
864
44c3f8f7 865 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
052e87b0
PB
866 data, true, int128_get64(section->size),
867 match_data);
80a1ea37 868 if (r < 0) {
fa4ba923
AK
869 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
870 __func__, strerror(-r));
80a1ea37
AK
871 abort();
872 }
873}
874
d22b096e
AK
875static void kvm_io_ioeventfd_del(MemoryListener *listener,
876 MemoryRegionSection *section,
877 bool match_data, uint64_t data,
878 EventNotifier *e)
80a1ea37
AK
879
880{
d22b096e 881 int fd = event_notifier_get_fd(e);
80a1ea37
AK
882 int r;
883
44c3f8f7 884 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
052e87b0
PB
885 data, false, int128_get64(section->size),
886 match_data);
80a1ea37
AK
887 if (r < 0) {
888 abort();
889 }
890}
891
a01672d3
AK
892static MemoryListener kvm_memory_listener = {
893 .region_add = kvm_region_add,
894 .region_del = kvm_region_del,
e5896b12
AP
895 .log_start = kvm_log_start,
896 .log_stop = kvm_log_stop,
a01672d3
AK
897 .log_sync = kvm_log_sync,
898 .log_global_start = kvm_log_global_start,
899 .log_global_stop = kvm_log_global_stop,
d22b096e
AK
900 .eventfd_add = kvm_mem_ioeventfd_add,
901 .eventfd_del = kvm_mem_ioeventfd_del,
95d2994a
AK
902 .coalesced_mmio_add = kvm_coalesce_mmio_region,
903 .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
d22b096e
AK
904 .priority = 10,
905};
906
907static MemoryListener kvm_io_listener = {
d22b096e
AK
908 .eventfd_add = kvm_io_ioeventfd_add,
909 .eventfd_del = kvm_io_ioeventfd_del,
72e22d2f 910 .priority = 10,
7b8f3b78
MT
911};
912
c3affe56 913static void kvm_handle_interrupt(CPUState *cpu, int mask)
aa7f74d1 914{
259186a7 915 cpu->interrupt_request |= mask;
aa7f74d1 916
60e82579 917 if (!qemu_cpu_is_self(cpu)) {
c08d7424 918 qemu_cpu_kick(cpu);
aa7f74d1
JK
919 }
920}
921
3889c3fa 922int kvm_set_irq(KVMState *s, int irq, int level)
84b058d7
JK
923{
924 struct kvm_irq_level event;
925 int ret;
926
7ae26bd4 927 assert(kvm_async_interrupts_enabled());
84b058d7
JK
928
929 event.level = level;
930 event.irq = irq;
e333cd69 931 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
84b058d7 932 if (ret < 0) {
3889c3fa 933 perror("kvm_set_irq");
84b058d7
JK
934 abort();
935 }
936
e333cd69 937 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
84b058d7
JK
938}
939
940#ifdef KVM_CAP_IRQ_ROUTING
d3d3bef0
JK
941typedef struct KVMMSIRoute {
942 struct kvm_irq_routing_entry kroute;
943 QTAILQ_ENTRY(KVMMSIRoute) entry;
944} KVMMSIRoute;
945
84b058d7
JK
946static void set_gsi(KVMState *s, unsigned int gsi)
947{
84b058d7
JK
948 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
949}
950
04fa27f5
JK
951static void clear_gsi(KVMState *s, unsigned int gsi)
952{
953 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
954}
955
7b774593 956void kvm_init_irq_routing(KVMState *s)
84b058d7 957{
04fa27f5 958 int gsi_count, i;
84b058d7 959
00008418 960 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
84b058d7
JK
961 if (gsi_count > 0) {
962 unsigned int gsi_bits, i;
963
964 /* Round up so we can search ints using ffs */
bc8c6788 965 gsi_bits = ALIGN(gsi_count, 32);
84b058d7 966 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
4e2e4e63 967 s->gsi_count = gsi_count;
84b058d7
JK
968
969 /* Mark any over-allocated bits as already in use */
970 for (i = gsi_count; i < gsi_bits; i++) {
971 set_gsi(s, i);
972 }
973 }
974
975 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
976 s->nr_allocated_irq_routes = 0;
977
4a3adebb
JK
978 if (!s->direct_msi) {
979 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
980 QTAILQ_INIT(&s->msi_hashtab[i]);
981 }
04fa27f5
JK
982 }
983
84b058d7
JK
984 kvm_arch_init_irq_routing(s);
985}
986
cb925cf9 987void kvm_irqchip_commit_routes(KVMState *s)
e7b20308
JK
988{
989 int ret;
990
991 s->irq_routes->flags = 0;
992 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
993 assert(ret == 0);
994}
995
84b058d7
JK
996static void kvm_add_routing_entry(KVMState *s,
997 struct kvm_irq_routing_entry *entry)
998{
999 struct kvm_irq_routing_entry *new;
1000 int n, size;
1001
1002 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1003 n = s->nr_allocated_irq_routes * 2;
1004 if (n < 64) {
1005 n = 64;
1006 }
1007 size = sizeof(struct kvm_irq_routing);
1008 size += n * sizeof(*new);
1009 s->irq_routes = g_realloc(s->irq_routes, size);
1010 s->nr_allocated_irq_routes = n;
1011 }
1012 n = s->irq_routes->nr++;
1013 new = &s->irq_routes->entries[n];
0fbc2074
MT
1014
1015 *new = *entry;
84b058d7
JK
1016
1017 set_gsi(s, entry->gsi);
1018}
1019
cc57407e
JK
1020static int kvm_update_routing_entry(KVMState *s,
1021 struct kvm_irq_routing_entry *new_entry)
1022{
1023 struct kvm_irq_routing_entry *entry;
1024 int n;
1025
1026 for (n = 0; n < s->irq_routes->nr; n++) {
1027 entry = &s->irq_routes->entries[n];
1028 if (entry->gsi != new_entry->gsi) {
1029 continue;
1030 }
1031
40509f7f
MT
1032 if(!memcmp(entry, new_entry, sizeof *entry)) {
1033 return 0;
1034 }
1035
0fbc2074 1036 *entry = *new_entry;
cc57407e
JK
1037
1038 kvm_irqchip_commit_routes(s);
1039
1040 return 0;
1041 }
1042
1043 return -ESRCH;
1044}
1045
1df186df 1046void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
84b058d7 1047{
0fbc2074 1048 struct kvm_irq_routing_entry e = {};
84b058d7 1049
4e2e4e63
JK
1050 assert(pin < s->gsi_count);
1051
84b058d7
JK
1052 e.gsi = irq;
1053 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1054 e.flags = 0;
1055 e.u.irqchip.irqchip = irqchip;
1056 e.u.irqchip.pin = pin;
1057 kvm_add_routing_entry(s, &e);
1058}
1059
1e2aa8be 1060void kvm_irqchip_release_virq(KVMState *s, int virq)
04fa27f5
JK
1061{
1062 struct kvm_irq_routing_entry *e;
1063 int i;
1064
76fe21de
AK
1065 if (kvm_gsi_direct_mapping()) {
1066 return;
1067 }
1068
04fa27f5
JK
1069 for (i = 0; i < s->irq_routes->nr; i++) {
1070 e = &s->irq_routes->entries[i];
1071 if (e->gsi == virq) {
1072 s->irq_routes->nr--;
1073 *e = s->irq_routes->entries[s->irq_routes->nr];
1074 }
1075 }
1076 clear_gsi(s, virq);
1077}
1078
1079static unsigned int kvm_hash_msi(uint32_t data)
1080{
1081 /* This is optimized for IA32 MSI layout. However, no other arch shall
1082 * repeat the mistake of not providing a direct MSI injection API. */
1083 return data & 0xff;
1084}
1085
1086static void kvm_flush_dynamic_msi_routes(KVMState *s)
1087{
1088 KVMMSIRoute *route, *next;
1089 unsigned int hash;
1090
1091 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1092 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1093 kvm_irqchip_release_virq(s, route->kroute.gsi);
1094 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1095 g_free(route);
1096 }
1097 }
1098}
1099
1100static int kvm_irqchip_get_virq(KVMState *s)
1101{
1102 uint32_t *word = s->used_gsi_bitmap;
1103 int max_words = ALIGN(s->gsi_count, 32) / 32;
1104 int i, bit;
1105 bool retry = true;
1106
1107again:
1108 /* Return the lowest unused GSI in the bitmap */
1109 for (i = 0; i < max_words; i++) {
1110 bit = ffs(~word[i]);
1111 if (!bit) {
1112 continue;
1113 }
1114
1115 return bit - 1 + i * 32;
1116 }
4a3adebb 1117 if (!s->direct_msi && retry) {
04fa27f5
JK
1118 retry = false;
1119 kvm_flush_dynamic_msi_routes(s);
1120 goto again;
1121 }
1122 return -ENOSPC;
1123
1124}
1125
1126static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1127{
1128 unsigned int hash = kvm_hash_msi(msg.data);
1129 KVMMSIRoute *route;
1130
1131 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1132 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1133 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
d07cc1f1 1134 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
04fa27f5
JK
1135 return route;
1136 }
1137 }
1138 return NULL;
1139}
1140
1141int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1142{
4a3adebb 1143 struct kvm_msi msi;
04fa27f5
JK
1144 KVMMSIRoute *route;
1145
4a3adebb
JK
1146 if (s->direct_msi) {
1147 msi.address_lo = (uint32_t)msg.address;
1148 msi.address_hi = msg.address >> 32;
d07cc1f1 1149 msi.data = le32_to_cpu(msg.data);
4a3adebb
JK
1150 msi.flags = 0;
1151 memset(msi.pad, 0, sizeof(msi.pad));
1152
1153 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1154 }
1155
04fa27f5
JK
1156 route = kvm_lookup_msi_route(s, msg);
1157 if (!route) {
e7b20308 1158 int virq;
04fa27f5
JK
1159
1160 virq = kvm_irqchip_get_virq(s);
1161 if (virq < 0) {
1162 return virq;
1163 }
1164
0fbc2074 1165 route = g_malloc0(sizeof(KVMMSIRoute));
04fa27f5
JK
1166 route->kroute.gsi = virq;
1167 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1168 route->kroute.flags = 0;
1169 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1170 route->kroute.u.msi.address_hi = msg.address >> 32;
d07cc1f1 1171 route->kroute.u.msi.data = le32_to_cpu(msg.data);
04fa27f5
JK
1172
1173 kvm_add_routing_entry(s, &route->kroute);
cb925cf9 1174 kvm_irqchip_commit_routes(s);
04fa27f5
JK
1175
1176 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1177 entry);
04fa27f5
JK
1178 }
1179
1180 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1181
3889c3fa 1182 return kvm_set_irq(s, route->kroute.gsi, 1);
04fa27f5
JK
1183}
1184
92b4e489
JK
1185int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1186{
0fbc2074 1187 struct kvm_irq_routing_entry kroute = {};
92b4e489
JK
1188 int virq;
1189
76fe21de
AK
1190 if (kvm_gsi_direct_mapping()) {
1191 return msg.data & 0xffff;
1192 }
1193
f3e1bed8 1194 if (!kvm_gsi_routing_enabled()) {
92b4e489
JK
1195 return -ENOSYS;
1196 }
1197
1198 virq = kvm_irqchip_get_virq(s);
1199 if (virq < 0) {
1200 return virq;
1201 }
1202
1203 kroute.gsi = virq;
1204 kroute.type = KVM_IRQ_ROUTING_MSI;
1205 kroute.flags = 0;
1206 kroute.u.msi.address_lo = (uint32_t)msg.address;
1207 kroute.u.msi.address_hi = msg.address >> 32;
d07cc1f1 1208 kroute.u.msi.data = le32_to_cpu(msg.data);
92b4e489
JK
1209
1210 kvm_add_routing_entry(s, &kroute);
cb925cf9 1211 kvm_irqchip_commit_routes(s);
92b4e489
JK
1212
1213 return virq;
1214}
1215
cc57407e
JK
1216int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1217{
0fbc2074 1218 struct kvm_irq_routing_entry kroute = {};
cc57407e 1219
76fe21de
AK
1220 if (kvm_gsi_direct_mapping()) {
1221 return 0;
1222 }
1223
cc57407e
JK
1224 if (!kvm_irqchip_in_kernel()) {
1225 return -ENOSYS;
1226 }
1227
1228 kroute.gsi = virq;
1229 kroute.type = KVM_IRQ_ROUTING_MSI;
1230 kroute.flags = 0;
1231 kroute.u.msi.address_lo = (uint32_t)msg.address;
1232 kroute.u.msi.address_hi = msg.address >> 32;
d07cc1f1 1233 kroute.u.msi.data = le32_to_cpu(msg.data);
cc57407e
JK
1234
1235 return kvm_update_routing_entry(s, &kroute);
1236}
1237
ca916d37
VM
1238static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1239 bool assign)
39853bbc
JK
1240{
1241 struct kvm_irqfd irqfd = {
1242 .fd = fd,
1243 .gsi = virq,
1244 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1245 };
1246
ca916d37
VM
1247 if (rfd != -1) {
1248 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1249 irqfd.resamplefd = rfd;
1250 }
1251
cc7e0ddf 1252 if (!kvm_irqfds_enabled()) {
39853bbc
JK
1253 return -ENOSYS;
1254 }
1255
1256 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1257}
1258
d426d9fb
CH
1259int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1260{
1261 struct kvm_irq_routing_entry kroute;
1262 int virq;
1263
1264 if (!kvm_gsi_routing_enabled()) {
1265 return -ENOSYS;
1266 }
1267
1268 virq = kvm_irqchip_get_virq(s);
1269 if (virq < 0) {
1270 return virq;
1271 }
1272
1273 kroute.gsi = virq;
1274 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1275 kroute.flags = 0;
1276 kroute.u.adapter.summary_addr = adapter->summary_addr;
1277 kroute.u.adapter.ind_addr = adapter->ind_addr;
1278 kroute.u.adapter.summary_offset = adapter->summary_offset;
1279 kroute.u.adapter.ind_offset = adapter->ind_offset;
1280 kroute.u.adapter.adapter_id = adapter->adapter_id;
1281
1282 kvm_add_routing_entry(s, &kroute);
1283 kvm_irqchip_commit_routes(s);
1284
1285 return virq;
1286}
1287
84b058d7
JK
1288#else /* !KVM_CAP_IRQ_ROUTING */
1289
7b774593 1290void kvm_init_irq_routing(KVMState *s)
84b058d7
JK
1291{
1292}
04fa27f5 1293
d3d3bef0
JK
1294void kvm_irqchip_release_virq(KVMState *s, int virq)
1295{
1296}
1297
04fa27f5
JK
1298int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1299{
1300 abort();
1301}
92b4e489
JK
1302
1303int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1304{
df410675 1305 return -ENOSYS;
92b4e489 1306}
39853bbc 1307
d426d9fb
CH
1308int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1309{
1310 return -ENOSYS;
1311}
1312
39853bbc
JK
1313static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1314{
1315 abort();
1316}
dabe3143
MT
1317
1318int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1319{
1320 return -ENOSYS;
1321}
84b058d7
JK
1322#endif /* !KVM_CAP_IRQ_ROUTING */
1323
ca916d37
VM
1324int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1325 EventNotifier *rn, int virq)
39853bbc 1326{
ca916d37
VM
1327 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1328 rn ? event_notifier_get_fd(rn) : -1, virq, true);
39853bbc
JK
1329}
1330
b131c74a 1331int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
15b2bd18 1332{
ca916d37
VM
1333 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1334 false);
15b2bd18
PB
1335}
1336
84b058d7
JK
1337static int kvm_irqchip_create(KVMState *s)
1338{
84b058d7
JK
1339 int ret;
1340
36ad0e94 1341 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "kernel_irqchip", true) ||
d426d9fb
CH
1342 (!kvm_check_extension(s, KVM_CAP_IRQCHIP) &&
1343 (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) {
84b058d7
JK
1344 return 0;
1345 }
1346
d6032e06
CD
1347 /* First probe and see if there's a arch-specific hook to create the
1348 * in-kernel irqchip for us */
1349 ret = kvm_arch_irqchip_create(s);
84b058d7 1350 if (ret < 0) {
84b058d7 1351 return ret;
d6032e06
CD
1352 } else if (ret == 0) {
1353 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1354 if (ret < 0) {
1355 fprintf(stderr, "Create kernel irqchip failed\n");
1356 return ret;
1357 }
84b058d7
JK
1358 }
1359
3d4b2649 1360 kvm_kernel_irqchip = true;
7ae26bd4
PM
1361 /* If we have an in-kernel IRQ chip then we must have asynchronous
1362 * interrupt delivery (though the reverse is not necessarily true)
1363 */
1364 kvm_async_interrupts_allowed = true;
215e79c0 1365 kvm_halt_in_kernel_allowed = true;
84b058d7
JK
1366
1367 kvm_init_irq_routing(s);
1368
1369 return 0;
1370}
1371
670436ce
AJ
1372/* Find number of supported CPUs using the recommended
1373 * procedure from the kernel API documentation to cope with
1374 * older kernels that may be missing capabilities.
1375 */
1376static int kvm_recommended_vcpus(KVMState *s)
3ed444e9 1377{
670436ce
AJ
1378 int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1379 return (ret) ? ret : 4;
1380}
3ed444e9 1381
670436ce
AJ
1382static int kvm_max_vcpus(KVMState *s)
1383{
1384 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1385 return (ret) ? ret : kvm_recommended_vcpus(s);
3ed444e9
DH
1386}
1387
f6a1ef64 1388static int kvm_init(MachineState *ms)
05330448 1389{
f6a1ef64 1390 MachineClass *mc = MACHINE_GET_CLASS(ms);
168ccc11
JK
1391 static const char upgrade_note[] =
1392 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1393 "(see http://sourceforge.net/projects/kvm).\n";
670436ce
AJ
1394 struct {
1395 const char *name;
1396 int num;
1397 } num_cpus[] = {
1398 { "SMP", smp_cpus },
1399 { "hotpluggable", max_cpus },
1400 { NULL, }
1401 }, *nc = num_cpus;
1402 int soft_vcpus_limit, hard_vcpus_limit;
05330448 1403 KVMState *s;
94a8d39a 1404 const KVMCapabilityInfo *missing_cap;
05330448 1405 int ret;
135a129a
AK
1406 int i, type = 0;
1407 const char *kvm_type;
05330448 1408
fc02086b 1409 s = KVM_STATE(ms->accelerator);
05330448 1410
3145fcb6
DG
1411 /*
1412 * On systems where the kernel can support different base page
1413 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1414 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1415 * page size for the system though.
1416 */
1417 assert(TARGET_PAGE_SIZE <= getpagesize());
47c16ed5 1418 page_size_init();
3145fcb6 1419
aed6efb9
JH
1420 s->sigmask_len = 8;
1421
e22a25c9 1422#ifdef KVM_CAP_SET_GUEST_DEBUG
72cf2d4f 1423 QTAILQ_INIT(&s->kvm_sw_breakpoints);
e22a25c9 1424#endif
05330448 1425 s->vmfd = -1;
40ff6d7e 1426 s->fd = qemu_open("/dev/kvm", O_RDWR);
05330448
AL
1427 if (s->fd == -1) {
1428 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1429 ret = -errno;
1430 goto err;
1431 }
1432
1433 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1434 if (ret < KVM_API_VERSION) {
0e1dac6c 1435 if (ret >= 0) {
05330448 1436 ret = -EINVAL;
a426e122 1437 }
05330448
AL
1438 fprintf(stderr, "kvm version too old\n");
1439 goto err;
1440 }
1441
1442 if (ret > KVM_API_VERSION) {
1443 ret = -EINVAL;
1444 fprintf(stderr, "kvm version not supported\n");
1445 goto err;
1446 }
1447
fb541ca5
AW
1448 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1449
1450 /* If unspecified, use the default value */
1451 if (!s->nr_slots) {
1452 s->nr_slots = 32;
1453 }
1454
1455 s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1456
1457 for (i = 0; i < s->nr_slots; i++) {
1458 s->slots[i].slot = i;
1459 }
1460
670436ce
AJ
1461 /* check the vcpu limits */
1462 soft_vcpus_limit = kvm_recommended_vcpus(s);
1463 hard_vcpus_limit = kvm_max_vcpus(s);
3ed444e9 1464
670436ce
AJ
1465 while (nc->name) {
1466 if (nc->num > soft_vcpus_limit) {
1467 fprintf(stderr,
1468 "Warning: Number of %s cpus requested (%d) exceeds "
1469 "the recommended cpus supported by KVM (%d)\n",
1470 nc->name, nc->num, soft_vcpus_limit);
1471
1472 if (nc->num > hard_vcpus_limit) {
670436ce
AJ
1473 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1474 "the maximum cpus supported by KVM (%d)\n",
1475 nc->name, nc->num, hard_vcpus_limit);
9ba3cf54 1476 exit(1);
670436ce
AJ
1477 }
1478 }
1479 nc++;
7dc52526
MT
1480 }
1481
135a129a 1482 kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
f1e29879
MA
1483 if (mc->kvm_type) {
1484 type = mc->kvm_type(kvm_type);
135a129a 1485 } else if (kvm_type) {
0e1dac6c 1486 ret = -EINVAL;
135a129a
AK
1487 fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1488 goto err;
1489 }
1490
94ccff13 1491 do {
135a129a 1492 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
94ccff13
TK
1493 } while (ret == -EINTR);
1494
1495 if (ret < 0) {
521f438e 1496 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
94ccff13
TK
1497 strerror(-ret));
1498
0104dcac
AG
1499#ifdef TARGET_S390X
1500 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1501 "your host kernel command line\n");
1502#endif
05330448 1503 goto err;
0104dcac 1504 }
05330448 1505
94ccff13 1506 s->vmfd = ret;
94a8d39a
JK
1507 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1508 if (!missing_cap) {
1509 missing_cap =
1510 kvm_check_extension_list(s, kvm_arch_required_capabilities);
05330448 1511 }
94a8d39a 1512 if (missing_cap) {
ad7b8b33 1513 ret = -EINVAL;
94a8d39a
JK
1514 fprintf(stderr, "kvm does not support %s\n%s",
1515 missing_cap->name, upgrade_note);
d85dc283
AL
1516 goto err;
1517 }
1518
ad7b8b33 1519 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
f65ed4c1 1520
e69917e2 1521 s->broken_set_mem_region = 1;
14a09518 1522 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
e69917e2
JK
1523 if (ret > 0) {
1524 s->broken_set_mem_region = 0;
1525 }
e69917e2 1526
a0fb002c
JK
1527#ifdef KVM_CAP_VCPU_EVENTS
1528 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1529#endif
1530
b0b1d690
JK
1531 s->robust_singlestep =
1532 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
b0b1d690 1533
ff44f1a3
JK
1534#ifdef KVM_CAP_DEBUGREGS
1535 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1536#endif
1537
f1665b21
SY
1538#ifdef KVM_CAP_XSAVE
1539 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1540#endif
1541
f1665b21
SY
1542#ifdef KVM_CAP_XCRS
1543 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1544#endif
1545
8a7c7393
JK
1546#ifdef KVM_CAP_PIT_STATE2
1547 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1548#endif
1549
d3d3bef0 1550#ifdef KVM_CAP_IRQ_ROUTING
4a3adebb 1551 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
d3d3bef0 1552#endif
4a3adebb 1553
3ab73842
JK
1554 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1555
e333cd69 1556 s->irq_set_ioctl = KVM_IRQ_LINE;
8732fbd2 1557 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
e333cd69 1558 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
8732fbd2
PM
1559 }
1560
df9c8b75
JJ
1561#ifdef KVM_CAP_READONLY_MEM
1562 kvm_readonly_mem_allowed =
1563 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1564#endif
1565
69e03ae6
NN
1566 kvm_eventfds_allowed =
1567 (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1568
cad1e282 1569 ret = kvm_arch_init(s);
a426e122 1570 if (ret < 0) {
05330448 1571 goto err;
a426e122 1572 }
05330448 1573
84b058d7
JK
1574 ret = kvm_irqchip_create(s);
1575 if (ret < 0) {
1576 goto err;
1577 }
1578
05330448 1579 kvm_state = s;
f6790af6
AK
1580 memory_listener_register(&kvm_memory_listener, &address_space_memory);
1581 memory_listener_register(&kvm_io_listener, &address_space_io);
05330448 1582
d2f2b8a7
SH
1583 s->many_ioeventfds = kvm_check_many_ioeventfds();
1584
aa7f74d1
JK
1585 cpu_interrupt_handler = kvm_handle_interrupt;
1586
05330448
AL
1587 return 0;
1588
1589err:
0e1dac6c 1590 assert(ret < 0);
6d1cc321
SW
1591 if (s->vmfd >= 0) {
1592 close(s->vmfd);
1593 }
1594 if (s->fd != -1) {
1595 close(s->fd);
05330448 1596 }
fb541ca5 1597 g_free(s->slots);
05330448
AL
1598
1599 return ret;
1600}
1601
aed6efb9
JH
1602void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1603{
1604 s->sigmask_len = sigmask_len;
1605}
1606
b30e93e9
JK
1607static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1608 uint32_t count)
05330448
AL
1609{
1610 int i;
1611 uint8_t *ptr = data;
1612
1613 for (i = 0; i < count; i++) {
354678c5
JK
1614 address_space_rw(&address_space_io, port, ptr, size,
1615 direction == KVM_EXIT_IO_OUT);
05330448
AL
1616 ptr += size;
1617 }
05330448
AL
1618}
1619
5326ab55 1620static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
7c80eef8 1621{
977c7b6d
RK
1622 fprintf(stderr, "KVM internal error. Suberror: %d\n",
1623 run->internal.suberror);
1624
7c80eef8
MT
1625 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1626 int i;
1627
7c80eef8
MT
1628 for (i = 0; i < run->internal.ndata; ++i) {
1629 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1630 i, (uint64_t)run->internal.data[i]);
1631 }
1632 }
7c80eef8
MT
1633 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1634 fprintf(stderr, "emulation failure\n");
20d695a9 1635 if (!kvm_arch_stop_on_emulation_error(cpu)) {
878096ee 1636 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
d73cd8f4 1637 return EXCP_INTERRUPT;
a426e122 1638 }
7c80eef8
MT
1639 }
1640 /* FIXME: Should trigger a qmp message to let management know
1641 * something went wrong.
1642 */
73aaec4a 1643 return -1;
7c80eef8 1644}
7c80eef8 1645
62a2744c 1646void kvm_flush_coalesced_mmio_buffer(void)
f65ed4c1 1647{
f65ed4c1 1648 KVMState *s = kvm_state;
1cae88b9
AK
1649
1650 if (s->coalesced_flush_in_progress) {
1651 return;
1652 }
1653
1654 s->coalesced_flush_in_progress = true;
1655
62a2744c
SY
1656 if (s->coalesced_mmio_ring) {
1657 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
f65ed4c1
AL
1658 while (ring->first != ring->last) {
1659 struct kvm_coalesced_mmio *ent;
1660
1661 ent = &ring->coalesced_mmio[ring->first];
1662
1663 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
85199474 1664 smp_wmb();
f65ed4c1
AL
1665 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1666 }
1667 }
1cae88b9
AK
1668
1669 s->coalesced_flush_in_progress = false;
f65ed4c1
AL
1670}
1671
20d695a9 1672static void do_kvm_cpu_synchronize_state(void *arg)
4c0960c0 1673{
20d695a9 1674 CPUState *cpu = arg;
2705d56a 1675
20d695a9
AF
1676 if (!cpu->kvm_vcpu_dirty) {
1677 kvm_arch_get_registers(cpu);
1678 cpu->kvm_vcpu_dirty = true;
4c0960c0
AK
1679 }
1680}
1681
dd1750d7 1682void kvm_cpu_synchronize_state(CPUState *cpu)
2705d56a 1683{
20d695a9
AF
1684 if (!cpu->kvm_vcpu_dirty) {
1685 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
a426e122 1686 }
2705d56a
JK
1687}
1688
c8e2085d 1689static void do_kvm_cpu_synchronize_post_reset(void *arg)
ea375f9a 1690{
c8e2085d
DH
1691 CPUState *cpu = arg;
1692
20d695a9
AF
1693 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1694 cpu->kvm_vcpu_dirty = false;
ea375f9a
JK
1695}
1696
c8e2085d
DH
1697void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1698{
1699 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu);
1700}
1701
1702static void do_kvm_cpu_synchronize_post_init(void *arg)
ea375f9a 1703{
c8e2085d
DH
1704 CPUState *cpu = arg;
1705
20d695a9
AF
1706 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1707 cpu->kvm_vcpu_dirty = false;
ea375f9a
JK
1708}
1709
c8e2085d
DH
1710void kvm_cpu_synchronize_post_init(CPUState *cpu)
1711{
1712 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
1713}
1714
de9d61e8
MT
1715void kvm_cpu_clean_state(CPUState *cpu)
1716{
1717 cpu->kvm_vcpu_dirty = false;
1718}
1719
1458c363 1720int kvm_cpu_exec(CPUState *cpu)
05330448 1721{
f7575c96 1722 struct kvm_run *run = cpu->kvm_run;
7cbb533f 1723 int ret, run_ret;
05330448 1724
8c0d577e 1725 DPRINTF("kvm_cpu_exec()\n");
05330448 1726
20d695a9 1727 if (kvm_arch_process_async_events(cpu)) {
fcd7d003 1728 cpu->exit_request = 0;
6792a57b 1729 return EXCP_HLT;
9ccfac9e 1730 }
0af691d7 1731
9ccfac9e 1732 do {
20d695a9
AF
1733 if (cpu->kvm_vcpu_dirty) {
1734 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1735 cpu->kvm_vcpu_dirty = false;
4c0960c0
AK
1736 }
1737
20d695a9 1738 kvm_arch_pre_run(cpu, run);
fcd7d003 1739 if (cpu->exit_request) {
9ccfac9e
JK
1740 DPRINTF("interrupt exit requested\n");
1741 /*
1742 * KVM requires us to reenter the kernel after IO exits to complete
1743 * instruction emulation. This self-signal will ensure that we
1744 * leave ASAP again.
1745 */
1746 qemu_cpu_kick_self();
1747 }
d549db5a 1748 qemu_mutex_unlock_iothread();
9ccfac9e 1749
1bc22652 1750 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
9ccfac9e 1751
d549db5a 1752 qemu_mutex_lock_iothread();
20d695a9 1753 kvm_arch_post_run(cpu, run);
05330448 1754
7cbb533f 1755 if (run_ret < 0) {
dc77d341
JK
1756 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1757 DPRINTF("io window exit\n");
d73cd8f4 1758 ret = EXCP_INTERRUPT;
dc77d341
JK
1759 break;
1760 }
7b011fbc
ME
1761 fprintf(stderr, "error: kvm run failed %s\n",
1762 strerror(-run_ret));
a85e130e
PB
1763 ret = -1;
1764 break;
05330448
AL
1765 }
1766
b76ac80a 1767 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
05330448
AL
1768 switch (run->exit_reason) {
1769 case KVM_EXIT_IO:
8c0d577e 1770 DPRINTF("handle_io\n");
b30e93e9
JK
1771 kvm_handle_io(run->io.port,
1772 (uint8_t *)run + run->io.data_offset,
1773 run->io.direction,
1774 run->io.size,
1775 run->io.count);
d73cd8f4 1776 ret = 0;
05330448
AL
1777 break;
1778 case KVM_EXIT_MMIO:
8c0d577e 1779 DPRINTF("handle_mmio\n");
05330448
AL
1780 cpu_physical_memory_rw(run->mmio.phys_addr,
1781 run->mmio.data,
1782 run->mmio.len,
1783 run->mmio.is_write);
d73cd8f4 1784 ret = 0;
05330448
AL
1785 break;
1786 case KVM_EXIT_IRQ_WINDOW_OPEN:
8c0d577e 1787 DPRINTF("irq_window_open\n");
d73cd8f4 1788 ret = EXCP_INTERRUPT;
05330448
AL
1789 break;
1790 case KVM_EXIT_SHUTDOWN:
8c0d577e 1791 DPRINTF("shutdown\n");
05330448 1792 qemu_system_reset_request();
d73cd8f4 1793 ret = EXCP_INTERRUPT;
05330448
AL
1794 break;
1795 case KVM_EXIT_UNKNOWN:
bb44e0d1
JK
1796 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1797 (uint64_t)run->hw.hardware_exit_reason);
73aaec4a 1798 ret = -1;
05330448 1799 break;
7c80eef8 1800 case KVM_EXIT_INTERNAL_ERROR:
5326ab55 1801 ret = kvm_handle_internal_error(cpu, run);
7c80eef8 1802 break;
99040447
PS
1803 case KVM_EXIT_SYSTEM_EVENT:
1804 switch (run->system_event.type) {
1805 case KVM_SYSTEM_EVENT_SHUTDOWN:
1806 qemu_system_shutdown_request();
1807 ret = EXCP_INTERRUPT;
1808 break;
1809 case KVM_SYSTEM_EVENT_RESET:
1810 qemu_system_reset_request();
1811 ret = EXCP_INTERRUPT;
1812 break;
1813 default:
1814 DPRINTF("kvm_arch_handle_exit\n");
1815 ret = kvm_arch_handle_exit(cpu, run);
1816 break;
1817 }
1818 break;
05330448 1819 default:
8c0d577e 1820 DPRINTF("kvm_arch_handle_exit\n");
20d695a9 1821 ret = kvm_arch_handle_exit(cpu, run);
05330448
AL
1822 break;
1823 }
d73cd8f4 1824 } while (ret == 0);
05330448 1825
73aaec4a 1826 if (ret < 0) {
878096ee 1827 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
0461d5a6 1828 vm_stop(RUN_STATE_INTERNAL_ERROR);
becfc390
AL
1829 }
1830
fcd7d003 1831 cpu->exit_request = 0;
05330448
AL
1832 return ret;
1833}
1834
984b5181 1835int kvm_ioctl(KVMState *s, int type, ...)
05330448
AL
1836{
1837 int ret;
984b5181
AL
1838 void *arg;
1839 va_list ap;
05330448 1840
984b5181
AL
1841 va_start(ap, type);
1842 arg = va_arg(ap, void *);
1843 va_end(ap);
1844
9c775729 1845 trace_kvm_ioctl(type, arg);
984b5181 1846 ret = ioctl(s->fd, type, arg);
a426e122 1847 if (ret == -1) {
05330448 1848 ret = -errno;
a426e122 1849 }
05330448
AL
1850 return ret;
1851}
1852
984b5181 1853int kvm_vm_ioctl(KVMState *s, int type, ...)
05330448
AL
1854{
1855 int ret;
984b5181
AL
1856 void *arg;
1857 va_list ap;
1858
1859 va_start(ap, type);
1860 arg = va_arg(ap, void *);
1861 va_end(ap);
05330448 1862
9c775729 1863 trace_kvm_vm_ioctl(type, arg);
984b5181 1864 ret = ioctl(s->vmfd, type, arg);
a426e122 1865 if (ret == -1) {
05330448 1866 ret = -errno;
a426e122 1867 }
05330448
AL
1868 return ret;
1869}
1870
1bc22652 1871int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
05330448
AL
1872{
1873 int ret;
984b5181
AL
1874 void *arg;
1875 va_list ap;
1876
1877 va_start(ap, type);
1878 arg = va_arg(ap, void *);
1879 va_end(ap);
05330448 1880
9c775729 1881 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
8737c51c 1882 ret = ioctl(cpu->kvm_fd, type, arg);
a426e122 1883 if (ret == -1) {
05330448 1884 ret = -errno;
a426e122 1885 }
05330448
AL
1886 return ret;
1887}
bd322087 1888
0a6a7cca
CD
1889int kvm_device_ioctl(int fd, int type, ...)
1890{
1891 int ret;
1892 void *arg;
1893 va_list ap;
1894
1895 va_start(ap, type);
1896 arg = va_arg(ap, void *);
1897 va_end(ap);
1898
1899 trace_kvm_device_ioctl(fd, type, arg);
1900 ret = ioctl(fd, type, arg);
1901 if (ret == -1) {
1902 ret = -errno;
1903 }
1904 return ret;
1905}
1906
bd322087
AL
1907int kvm_has_sync_mmu(void)
1908{
94a8d39a 1909 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
bd322087 1910}
e22a25c9 1911
a0fb002c
JK
1912int kvm_has_vcpu_events(void)
1913{
1914 return kvm_state->vcpu_events;
1915}
1916
b0b1d690
JK
1917int kvm_has_robust_singlestep(void)
1918{
1919 return kvm_state->robust_singlestep;
1920}
1921
ff44f1a3
JK
1922int kvm_has_debugregs(void)
1923{
1924 return kvm_state->debugregs;
1925}
1926
f1665b21
SY
1927int kvm_has_xsave(void)
1928{
1929 return kvm_state->xsave;
1930}
1931
1932int kvm_has_xcrs(void)
1933{
1934 return kvm_state->xcrs;
1935}
1936
8a7c7393
JK
1937int kvm_has_pit_state2(void)
1938{
1939 return kvm_state->pit_state2;
1940}
1941
d2f2b8a7
SH
1942int kvm_has_many_ioeventfds(void)
1943{
1944 if (!kvm_enabled()) {
1945 return 0;
1946 }
1947 return kvm_state->many_ioeventfds;
1948}
1949
84b058d7
JK
1950int kvm_has_gsi_routing(void)
1951{
a9c5eb0d 1952#ifdef KVM_CAP_IRQ_ROUTING
84b058d7 1953 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
a9c5eb0d
AG
1954#else
1955 return false;
1956#endif
84b058d7
JK
1957}
1958
3ab73842
JK
1959int kvm_has_intx_set_mask(void)
1960{
1961 return kvm_state->intx_set_mask;
1962}
1963
6f0437e8
JK
1964void kvm_setup_guest_memory(void *start, size_t size)
1965{
1966 if (!kvm_has_sync_mmu()) {
e78815a5 1967 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
6f0437e8
JK
1968
1969 if (ret) {
e78815a5
AF
1970 perror("qemu_madvise");
1971 fprintf(stderr,
1972 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
6f0437e8
JK
1973 exit(1);
1974 }
6f0437e8
JK
1975 }
1976}
1977
e22a25c9 1978#ifdef KVM_CAP_SET_GUEST_DEBUG
a60f24b5 1979struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
e22a25c9
AL
1980 target_ulong pc)
1981{
1982 struct kvm_sw_breakpoint *bp;
1983
a60f24b5 1984 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
a426e122 1985 if (bp->pc == pc) {
e22a25c9 1986 return bp;
a426e122 1987 }
e22a25c9
AL
1988 }
1989 return NULL;
1990}
1991
a60f24b5 1992int kvm_sw_breakpoints_active(CPUState *cpu)
e22a25c9 1993{
a60f24b5 1994 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
e22a25c9
AL
1995}
1996
452e4751
GC
1997struct kvm_set_guest_debug_data {
1998 struct kvm_guest_debug dbg;
a60f24b5 1999 CPUState *cpu;
452e4751
GC
2000 int err;
2001};
2002
2003static void kvm_invoke_set_guest_debug(void *data)
2004{
2005 struct kvm_set_guest_debug_data *dbg_data = data;
b3807725 2006
a60f24b5
AF
2007 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
2008 &dbg_data->dbg);
452e4751
GC
2009}
2010
38e478ec 2011int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
e22a25c9 2012{
452e4751 2013 struct kvm_set_guest_debug_data data;
e22a25c9 2014
b0b1d690 2015 data.dbg.control = reinject_trap;
e22a25c9 2016
ed2803da 2017 if (cpu->singlestep_enabled) {
b0b1d690
JK
2018 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2019 }
20d695a9 2020 kvm_arch_update_guest_debug(cpu, &data.dbg);
a60f24b5 2021 data.cpu = cpu;
e22a25c9 2022
f100f0b3 2023 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
452e4751 2024 return data.err;
e22a25c9
AL
2025}
2026
62278814 2027int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9
AL
2028 target_ulong len, int type)
2029{
2030 struct kvm_sw_breakpoint *bp;
e22a25c9
AL
2031 int err;
2032
2033 if (type == GDB_BREAKPOINT_SW) {
80b7cd73 2034 bp = kvm_find_sw_breakpoint(cpu, addr);
e22a25c9
AL
2035 if (bp) {
2036 bp->use_count++;
2037 return 0;
2038 }
2039
7267c094 2040 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
a426e122 2041 if (!bp) {
e22a25c9 2042 return -ENOMEM;
a426e122 2043 }
e22a25c9
AL
2044
2045 bp->pc = addr;
2046 bp->use_count = 1;
80b7cd73 2047 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
e22a25c9 2048 if (err) {
7267c094 2049 g_free(bp);
e22a25c9
AL
2050 return err;
2051 }
2052
80b7cd73 2053 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
e22a25c9
AL
2054 } else {
2055 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
a426e122 2056 if (err) {
e22a25c9 2057 return err;
a426e122 2058 }
e22a25c9
AL
2059 }
2060
bdc44640 2061 CPU_FOREACH(cpu) {
38e478ec 2062 err = kvm_update_guest_debug(cpu, 0);
a426e122 2063 if (err) {
e22a25c9 2064 return err;
a426e122 2065 }
e22a25c9
AL
2066 }
2067 return 0;
2068}
2069
62278814 2070int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9
AL
2071 target_ulong len, int type)
2072{
2073 struct kvm_sw_breakpoint *bp;
e22a25c9
AL
2074 int err;
2075
2076 if (type == GDB_BREAKPOINT_SW) {
80b7cd73 2077 bp = kvm_find_sw_breakpoint(cpu, addr);
a426e122 2078 if (!bp) {
e22a25c9 2079 return -ENOENT;
a426e122 2080 }
e22a25c9
AL
2081
2082 if (bp->use_count > 1) {
2083 bp->use_count--;
2084 return 0;
2085 }
2086
80b7cd73 2087 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
a426e122 2088 if (err) {
e22a25c9 2089 return err;
a426e122 2090 }
e22a25c9 2091
80b7cd73 2092 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
7267c094 2093 g_free(bp);
e22a25c9
AL
2094 } else {
2095 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
a426e122 2096 if (err) {
e22a25c9 2097 return err;
a426e122 2098 }
e22a25c9
AL
2099 }
2100
bdc44640 2101 CPU_FOREACH(cpu) {
38e478ec 2102 err = kvm_update_guest_debug(cpu, 0);
a426e122 2103 if (err) {
e22a25c9 2104 return err;
a426e122 2105 }
e22a25c9
AL
2106 }
2107 return 0;
2108}
2109
1d5791f4 2110void kvm_remove_all_breakpoints(CPUState *cpu)
e22a25c9
AL
2111{
2112 struct kvm_sw_breakpoint *bp, *next;
80b7cd73 2113 KVMState *s = cpu->kvm_state;
dc54e252 2114 CPUState *tmpcpu;
e22a25c9 2115
72cf2d4f 2116 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
80b7cd73 2117 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
e22a25c9 2118 /* Try harder to find a CPU that currently sees the breakpoint. */
dc54e252
CG
2119 CPU_FOREACH(tmpcpu) {
2120 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
e22a25c9 2121 break;
a426e122 2122 }
e22a25c9
AL
2123 }
2124 }
78021d6d
JK
2125 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2126 g_free(bp);
e22a25c9
AL
2127 }
2128 kvm_arch_remove_all_hw_breakpoints();
2129
bdc44640 2130 CPU_FOREACH(cpu) {
38e478ec 2131 kvm_update_guest_debug(cpu, 0);
a426e122 2132 }
e22a25c9
AL
2133}
2134
2135#else /* !KVM_CAP_SET_GUEST_DEBUG */
2136
38e478ec 2137int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
e22a25c9
AL
2138{
2139 return -EINVAL;
2140}
2141
62278814 2142int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9
AL
2143 target_ulong len, int type)
2144{
2145 return -EINVAL;
2146}
2147
62278814 2148int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
e22a25c9
AL
2149 target_ulong len, int type)
2150{
2151 return -EINVAL;
2152}
2153
1d5791f4 2154void kvm_remove_all_breakpoints(CPUState *cpu)
e22a25c9
AL
2155{
2156}
2157#endif /* !KVM_CAP_SET_GUEST_DEBUG */
cc84de95 2158
491d6e80 2159int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
cc84de95 2160{
aed6efb9 2161 KVMState *s = kvm_state;
cc84de95
MT
2162 struct kvm_signal_mask *sigmask;
2163 int r;
2164
a426e122 2165 if (!sigset) {
1bc22652 2166 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
a426e122 2167 }
cc84de95 2168
7267c094 2169 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
cc84de95 2170
aed6efb9 2171 sigmask->len = s->sigmask_len;
cc84de95 2172 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1bc22652 2173 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
7267c094 2174 g_free(sigmask);
cc84de95
MT
2175
2176 return r;
2177}
290adf38 2178int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
a1b87fe0 2179{
20d695a9 2180 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
a1b87fe0
JK
2181}
2182
2183int kvm_on_sigbus(int code, void *addr)
2184{
2185 return kvm_arch_on_sigbus(code, addr);
2186}
0a6a7cca
CD
2187
2188int kvm_create_device(KVMState *s, uint64_t type, bool test)
2189{
2190 int ret;
2191 struct kvm_create_device create_dev;
2192
2193 create_dev.type = type;
2194 create_dev.fd = -1;
2195 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2196
2197 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2198 return -ENOTSUP;
2199 }
2200
2201 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2202 if (ret) {
2203 return ret;
2204 }
2205
2206 return test ? 0 : create_dev.fd;
2207}
ada4135f
CH
2208
2209int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2210{
2211 struct kvm_one_reg reg;
2212 int r;
2213
2214 reg.id = id;
2215 reg.addr = (uintptr_t) source;
2216 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
2217 if (r) {
2218 trace_kvm_failed_reg_set(id, strerror(r));
2219 }
2220 return r;
2221}
2222
2223int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2224{
2225 struct kvm_one_reg reg;
2226 int r;
2227
2228 reg.id = id;
2229 reg.addr = (uintptr_t) target;
2230 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
2231 if (r) {
2232 trace_kvm_failed_reg_get(id, strerror(r));
2233 }
2234 return r;
2235}
782c3f29
EH
2236
2237static void kvm_accel_class_init(ObjectClass *oc, void *data)
2238{
2239 AccelClass *ac = ACCEL_CLASS(oc);
2240 ac->name = "KVM";
0d15da8e 2241 ac->init_machine = kvm_init;
782c3f29
EH
2242 ac->allowed = &kvm_allowed;
2243}
2244
2245static const TypeInfo kvm_accel_type = {
2246 .name = TYPE_KVM_ACCEL,
2247 .parent = TYPE_ACCEL,
2248 .class_init = kvm_accel_class_init,
fc02086b 2249 .instance_size = sizeof(KVMState),
782c3f29
EH
2250};
2251
2252static void kvm_type_init(void)
2253{
2254 type_register_static(&kvm_accel_type);
2255}
2256
2257type_init(kvm_type_init);