]>
Commit | Line | Data |
---|---|---|
05330448 AL |
1 | /* |
2 | * QEMU KVM support | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5832d1f2 | 5 | * Red Hat, Inc. 2008 |
05330448 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
5832d1f2 | 9 | * Glauber Costa <gcosta@redhat.com> |
05330448 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <sys/types.h> | |
17 | #include <sys/ioctl.h> | |
18 | #include <sys/mman.h> | |
984b5181 | 19 | #include <stdarg.h> |
05330448 AL |
20 | |
21 | #include <linux/kvm.h> | |
22 | ||
23 | #include "qemu-common.h" | |
85199474 | 24 | #include "qemu-barrier.h" |
05330448 | 25 | #include "sysemu.h" |
d33a1810 | 26 | #include "hw/hw.h" |
e22a25c9 | 27 | #include "gdbstub.h" |
05330448 | 28 | #include "kvm.h" |
8369e01c | 29 | #include "bswap.h" |
05330448 | 30 | |
d2f2b8a7 SH |
31 | /* This check must be after config-host.h is included */ |
32 | #ifdef CONFIG_EVENTFD | |
33 | #include <sys/eventfd.h> | |
34 | #endif | |
35 | ||
f65ed4c1 AL |
36 | /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */ |
37 | #define PAGE_SIZE TARGET_PAGE_SIZE | |
38 | ||
05330448 AL |
39 | //#define DEBUG_KVM |
40 | ||
41 | #ifdef DEBUG_KVM | |
8c0d577e | 42 | #define DPRINTF(fmt, ...) \ |
05330448 AL |
43 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
44 | #else | |
8c0d577e | 45 | #define DPRINTF(fmt, ...) \ |
05330448 AL |
46 | do { } while (0) |
47 | #endif | |
48 | ||
34fc643f AL |
49 | typedef struct KVMSlot |
50 | { | |
c227f099 AL |
51 | target_phys_addr_t start_addr; |
52 | ram_addr_t memory_size; | |
53 | ram_addr_t phys_offset; | |
34fc643f AL |
54 | int slot; |
55 | int flags; | |
56 | } KVMSlot; | |
05330448 | 57 | |
5832d1f2 AL |
58 | typedef struct kvm_dirty_log KVMDirtyLog; |
59 | ||
05330448 AL |
60 | struct KVMState |
61 | { | |
62 | KVMSlot slots[32]; | |
63 | int fd; | |
64 | int vmfd; | |
f65ed4c1 | 65 | int coalesced_mmio; |
62a2744c | 66 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
e69917e2 | 67 | int broken_set_mem_region; |
4495d6a7 | 68 | int migration_log; |
a0fb002c | 69 | int vcpu_events; |
b0b1d690 | 70 | int robust_singlestep; |
ff44f1a3 | 71 | int debugregs; |
e22a25c9 AL |
72 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
73 | struct kvm_sw_breakpoint_head kvm_sw_breakpoints; | |
74 | #endif | |
6f725c13 GC |
75 | int irqchip_in_kernel; |
76 | int pit_in_kernel; | |
f1665b21 | 77 | int xsave, xcrs; |
d2f2b8a7 | 78 | int many_ioeventfds; |
05330448 AL |
79 | }; |
80 | ||
81 | static KVMState *kvm_state; | |
82 | ||
94a8d39a JK |
83 | static const KVMCapabilityInfo kvm_required_capabilites[] = { |
84 | KVM_CAP_INFO(USER_MEMORY), | |
85 | KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS), | |
86 | KVM_CAP_LAST_INFO | |
87 | }; | |
88 | ||
05330448 AL |
89 | static KVMSlot *kvm_alloc_slot(KVMState *s) |
90 | { | |
91 | int i; | |
92 | ||
93 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
a426e122 | 94 | if (s->slots[i].memory_size == 0) { |
05330448 | 95 | return &s->slots[i]; |
a426e122 | 96 | } |
05330448 AL |
97 | } |
98 | ||
d3f8d37f AL |
99 | fprintf(stderr, "%s: no free slot available\n", __func__); |
100 | abort(); | |
101 | } | |
102 | ||
103 | static KVMSlot *kvm_lookup_matching_slot(KVMState *s, | |
c227f099 AL |
104 | target_phys_addr_t start_addr, |
105 | target_phys_addr_t end_addr) | |
d3f8d37f AL |
106 | { |
107 | int i; | |
108 | ||
109 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
110 | KVMSlot *mem = &s->slots[i]; | |
111 | ||
112 | if (start_addr == mem->start_addr && | |
113 | end_addr == mem->start_addr + mem->memory_size) { | |
114 | return mem; | |
115 | } | |
116 | } | |
117 | ||
05330448 AL |
118 | return NULL; |
119 | } | |
120 | ||
6152e2ae AL |
121 | /* |
122 | * Find overlapping slot with lowest start address | |
123 | */ | |
124 | static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, | |
c227f099 AL |
125 | target_phys_addr_t start_addr, |
126 | target_phys_addr_t end_addr) | |
05330448 | 127 | { |
6152e2ae | 128 | KVMSlot *found = NULL; |
05330448 AL |
129 | int i; |
130 | ||
131 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
132 | KVMSlot *mem = &s->slots[i]; | |
133 | ||
6152e2ae AL |
134 | if (mem->memory_size == 0 || |
135 | (found && found->start_addr < mem->start_addr)) { | |
136 | continue; | |
137 | } | |
138 | ||
139 | if (end_addr > mem->start_addr && | |
140 | start_addr < mem->start_addr + mem->memory_size) { | |
141 | found = mem; | |
142 | } | |
05330448 AL |
143 | } |
144 | ||
6152e2ae | 145 | return found; |
05330448 AL |
146 | } |
147 | ||
983dfc3b HY |
148 | int kvm_physical_memory_addr_from_ram(KVMState *s, ram_addr_t ram_addr, |
149 | target_phys_addr_t *phys_addr) | |
150 | { | |
151 | int i; | |
152 | ||
153 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
154 | KVMSlot *mem = &s->slots[i]; | |
155 | ||
156 | if (ram_addr >= mem->phys_offset && | |
157 | ram_addr < mem->phys_offset + mem->memory_size) { | |
158 | *phys_addr = mem->start_addr + (ram_addr - mem->phys_offset); | |
159 | return 1; | |
160 | } | |
161 | } | |
162 | ||
163 | return 0; | |
164 | } | |
165 | ||
5832d1f2 AL |
166 | static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
167 | { | |
168 | struct kvm_userspace_memory_region mem; | |
169 | ||
170 | mem.slot = slot->slot; | |
171 | mem.guest_phys_addr = slot->start_addr; | |
172 | mem.memory_size = slot->memory_size; | |
b2e0a138 | 173 | mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset); |
5832d1f2 | 174 | mem.flags = slot->flags; |
4495d6a7 JK |
175 | if (s->migration_log) { |
176 | mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; | |
177 | } | |
5832d1f2 AL |
178 | return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); |
179 | } | |
180 | ||
8d2ba1fb JK |
181 | static void kvm_reset_vcpu(void *opaque) |
182 | { | |
183 | CPUState *env = opaque; | |
184 | ||
caa5af0f | 185 | kvm_arch_reset_vcpu(env); |
8d2ba1fb | 186 | } |
5832d1f2 | 187 | |
6f725c13 GC |
188 | int kvm_irqchip_in_kernel(void) |
189 | { | |
190 | return kvm_state->irqchip_in_kernel; | |
191 | } | |
192 | ||
193 | int kvm_pit_in_kernel(void) | |
194 | { | |
195 | return kvm_state->pit_in_kernel; | |
196 | } | |
197 | ||
05330448 AL |
198 | int kvm_init_vcpu(CPUState *env) |
199 | { | |
200 | KVMState *s = kvm_state; | |
201 | long mmap_size; | |
202 | int ret; | |
203 | ||
8c0d577e | 204 | DPRINTF("kvm_init_vcpu\n"); |
05330448 | 205 | |
984b5181 | 206 | ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); |
05330448 | 207 | if (ret < 0) { |
8c0d577e | 208 | DPRINTF("kvm_create_vcpu failed\n"); |
05330448 AL |
209 | goto err; |
210 | } | |
211 | ||
212 | env->kvm_fd = ret; | |
213 | env->kvm_state = s; | |
214 | ||
215 | mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); | |
216 | if (mmap_size < 0) { | |
748a680b | 217 | ret = mmap_size; |
8c0d577e | 218 | DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n"); |
05330448 AL |
219 | goto err; |
220 | } | |
221 | ||
222 | env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, | |
223 | env->kvm_fd, 0); | |
224 | if (env->kvm_run == MAP_FAILED) { | |
225 | ret = -errno; | |
8c0d577e | 226 | DPRINTF("mmap'ing vcpu state failed\n"); |
05330448 AL |
227 | goto err; |
228 | } | |
229 | ||
a426e122 JK |
230 | if (s->coalesced_mmio && !s->coalesced_mmio_ring) { |
231 | s->coalesced_mmio_ring = | |
232 | (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE; | |
233 | } | |
62a2744c | 234 | |
05330448 | 235 | ret = kvm_arch_init_vcpu(env); |
8d2ba1fb | 236 | if (ret == 0) { |
a08d4367 | 237 | qemu_register_reset(kvm_reset_vcpu, env); |
caa5af0f | 238 | kvm_arch_reset_vcpu(env); |
8d2ba1fb | 239 | } |
05330448 AL |
240 | err: |
241 | return ret; | |
242 | } | |
243 | ||
5832d1f2 AL |
244 | /* |
245 | * dirty pages logging control | |
246 | */ | |
c227f099 AL |
247 | static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, |
248 | ram_addr_t size, int flags, int mask) | |
5832d1f2 AL |
249 | { |
250 | KVMState *s = kvm_state; | |
d3f8d37f | 251 | KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
4495d6a7 JK |
252 | int old_flags; |
253 | ||
5832d1f2 | 254 | if (mem == NULL) { |
d3f8d37f AL |
255 | fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
256 | TARGET_FMT_plx "\n", __func__, phys_addr, | |
c227f099 | 257 | (target_phys_addr_t)(phys_addr + size - 1)); |
5832d1f2 AL |
258 | return -EINVAL; |
259 | } | |
260 | ||
4495d6a7 | 261 | old_flags = mem->flags; |
5832d1f2 | 262 | |
4495d6a7 | 263 | flags = (mem->flags & ~mask) | flags; |
5832d1f2 AL |
264 | mem->flags = flags; |
265 | ||
4495d6a7 JK |
266 | /* If nothing changed effectively, no need to issue ioctl */ |
267 | if (s->migration_log) { | |
268 | flags |= KVM_MEM_LOG_DIRTY_PAGES; | |
269 | } | |
270 | if (flags == old_flags) { | |
271 | return 0; | |
272 | } | |
273 | ||
5832d1f2 AL |
274 | return kvm_set_user_memory_region(s, mem); |
275 | } | |
276 | ||
c227f099 | 277 | int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) |
5832d1f2 | 278 | { |
a426e122 JK |
279 | return kvm_dirty_pages_log_change(phys_addr, size, KVM_MEM_LOG_DIRTY_PAGES, |
280 | KVM_MEM_LOG_DIRTY_PAGES); | |
5832d1f2 AL |
281 | } |
282 | ||
c227f099 | 283 | int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size) |
5832d1f2 | 284 | { |
a426e122 JK |
285 | return kvm_dirty_pages_log_change(phys_addr, size, 0, |
286 | KVM_MEM_LOG_DIRTY_PAGES); | |
5832d1f2 AL |
287 | } |
288 | ||
7b8f3b78 | 289 | static int kvm_set_migration_log(int enable) |
4495d6a7 JK |
290 | { |
291 | KVMState *s = kvm_state; | |
292 | KVMSlot *mem; | |
293 | int i, err; | |
294 | ||
295 | s->migration_log = enable; | |
296 | ||
297 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
298 | mem = &s->slots[i]; | |
299 | ||
70fedd76 AW |
300 | if (!mem->memory_size) { |
301 | continue; | |
302 | } | |
4495d6a7 JK |
303 | if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) { |
304 | continue; | |
305 | } | |
306 | err = kvm_set_user_memory_region(s, mem); | |
307 | if (err) { | |
308 | return err; | |
309 | } | |
310 | } | |
311 | return 0; | |
312 | } | |
313 | ||
8369e01c MT |
314 | /* get kvm's dirty pages bitmap and update qemu's */ |
315 | static int kvm_get_dirty_pages_log_range(unsigned long start_addr, | |
316 | unsigned long *bitmap, | |
317 | unsigned long offset, | |
318 | unsigned long mem_size) | |
96c1606b | 319 | { |
8369e01c MT |
320 | unsigned int i, j; |
321 | unsigned long page_number, addr, addr1, c; | |
322 | ram_addr_t ram_addr; | |
323 | unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / | |
324 | HOST_LONG_BITS; | |
325 | ||
326 | /* | |
327 | * bitmap-traveling is faster than memory-traveling (for addr...) | |
328 | * especially when most of the memory is not dirty. | |
329 | */ | |
330 | for (i = 0; i < len; i++) { | |
331 | if (bitmap[i] != 0) { | |
332 | c = leul_to_cpu(bitmap[i]); | |
333 | do { | |
334 | j = ffsl(c) - 1; | |
335 | c &= ~(1ul << j); | |
336 | page_number = i * HOST_LONG_BITS + j; | |
337 | addr1 = page_number * TARGET_PAGE_SIZE; | |
338 | addr = offset + addr1; | |
339 | ram_addr = cpu_get_physical_page_desc(addr); | |
340 | cpu_physical_memory_set_dirty(ram_addr); | |
341 | } while (c != 0); | |
342 | } | |
343 | } | |
344 | return 0; | |
96c1606b AG |
345 | } |
346 | ||
8369e01c MT |
347 | #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1)) |
348 | ||
5832d1f2 AL |
349 | /** |
350 | * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space | |
351 | * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty(). | |
352 | * This means all bits are set to dirty. | |
353 | * | |
d3f8d37f | 354 | * @start_add: start of logged region. |
5832d1f2 AL |
355 | * @end_addr: end of logged region. |
356 | */ | |
7b8f3b78 | 357 | static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
a426e122 | 358 | target_phys_addr_t end_addr) |
5832d1f2 AL |
359 | { |
360 | KVMState *s = kvm_state; | |
151f7749 | 361 | unsigned long size, allocated_size = 0; |
151f7749 JK |
362 | KVMDirtyLog d; |
363 | KVMSlot *mem; | |
364 | int ret = 0; | |
5832d1f2 | 365 | |
151f7749 JK |
366 | d.dirty_bitmap = NULL; |
367 | while (start_addr < end_addr) { | |
368 | mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); | |
369 | if (mem == NULL) { | |
370 | break; | |
371 | } | |
5832d1f2 | 372 | |
8369e01c | 373 | size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), HOST_LONG_BITS) / 8; |
151f7749 JK |
374 | if (!d.dirty_bitmap) { |
375 | d.dirty_bitmap = qemu_malloc(size); | |
376 | } else if (size > allocated_size) { | |
377 | d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size); | |
378 | } | |
379 | allocated_size = size; | |
380 | memset(d.dirty_bitmap, 0, allocated_size); | |
5832d1f2 | 381 | |
151f7749 | 382 | d.slot = mem->slot; |
5832d1f2 | 383 | |
6e489f3f | 384 | if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { |
8c0d577e | 385 | DPRINTF("ioctl failed %d\n", errno); |
151f7749 JK |
386 | ret = -1; |
387 | break; | |
388 | } | |
5832d1f2 | 389 | |
8369e01c MT |
390 | kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap, |
391 | mem->start_addr, mem->memory_size); | |
392 | start_addr = mem->start_addr + mem->memory_size; | |
5832d1f2 | 393 | } |
5832d1f2 | 394 | qemu_free(d.dirty_bitmap); |
151f7749 JK |
395 | |
396 | return ret; | |
5832d1f2 AL |
397 | } |
398 | ||
c227f099 | 399 | int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) |
f65ed4c1 AL |
400 | { |
401 | int ret = -ENOSYS; | |
f65ed4c1 AL |
402 | KVMState *s = kvm_state; |
403 | ||
404 | if (s->coalesced_mmio) { | |
405 | struct kvm_coalesced_mmio_zone zone; | |
406 | ||
407 | zone.addr = start; | |
408 | zone.size = size; | |
409 | ||
410 | ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); | |
411 | } | |
f65ed4c1 AL |
412 | |
413 | return ret; | |
414 | } | |
415 | ||
c227f099 | 416 | int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) |
f65ed4c1 AL |
417 | { |
418 | int ret = -ENOSYS; | |
f65ed4c1 AL |
419 | KVMState *s = kvm_state; |
420 | ||
421 | if (s->coalesced_mmio) { | |
422 | struct kvm_coalesced_mmio_zone zone; | |
423 | ||
424 | zone.addr = start; | |
425 | zone.size = size; | |
426 | ||
427 | ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); | |
428 | } | |
f65ed4c1 AL |
429 | |
430 | return ret; | |
431 | } | |
432 | ||
ad7b8b33 AL |
433 | int kvm_check_extension(KVMState *s, unsigned int extension) |
434 | { | |
435 | int ret; | |
436 | ||
437 | ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); | |
438 | if (ret < 0) { | |
439 | ret = 0; | |
440 | } | |
441 | ||
442 | return ret; | |
443 | } | |
444 | ||
d2f2b8a7 SH |
445 | static int kvm_check_many_ioeventfds(void) |
446 | { | |
d0dcac83 SH |
447 | /* Userspace can use ioeventfd for io notification. This requires a host |
448 | * that supports eventfd(2) and an I/O thread; since eventfd does not | |
449 | * support SIGIO it cannot interrupt the vcpu. | |
450 | * | |
451 | * Older kernels have a 6 device limit on the KVM io bus. Find out so we | |
d2f2b8a7 SH |
452 | * can avoid creating too many ioeventfds. |
453 | */ | |
d0dcac83 | 454 | #if defined(CONFIG_EVENTFD) && defined(CONFIG_IOTHREAD) |
d2f2b8a7 SH |
455 | int ioeventfds[7]; |
456 | int i, ret = 0; | |
457 | for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) { | |
458 | ioeventfds[i] = eventfd(0, EFD_CLOEXEC); | |
459 | if (ioeventfds[i] < 0) { | |
460 | break; | |
461 | } | |
462 | ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true); | |
463 | if (ret < 0) { | |
464 | close(ioeventfds[i]); | |
465 | break; | |
466 | } | |
467 | } | |
468 | ||
469 | /* Decide whether many devices are supported or not */ | |
470 | ret = i == ARRAY_SIZE(ioeventfds); | |
471 | ||
472 | while (i-- > 0) { | |
473 | kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false); | |
474 | close(ioeventfds[i]); | |
475 | } | |
476 | return ret; | |
477 | #else | |
478 | return 0; | |
479 | #endif | |
480 | } | |
481 | ||
94a8d39a JK |
482 | static const KVMCapabilityInfo * |
483 | kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) | |
484 | { | |
485 | while (list->name) { | |
486 | if (!kvm_check_extension(s, list->value)) { | |
487 | return list; | |
488 | } | |
489 | list++; | |
490 | } | |
491 | return NULL; | |
492 | } | |
493 | ||
a426e122 JK |
494 | static void kvm_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, |
495 | ram_addr_t phys_offset) | |
46dbef6a MT |
496 | { |
497 | KVMState *s = kvm_state; | |
498 | ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; | |
499 | KVMSlot *mem, old; | |
500 | int err; | |
501 | ||
14542fea GN |
502 | /* kvm works in page size chunks, but the function may be called |
503 | with sub-page size and unaligned start address. */ | |
504 | size = TARGET_PAGE_ALIGN(size); | |
505 | start_addr = TARGET_PAGE_ALIGN(start_addr); | |
46dbef6a MT |
506 | |
507 | /* KVM does not support read-only slots */ | |
508 | phys_offset &= ~IO_MEM_ROM; | |
509 | ||
510 | while (1) { | |
511 | mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); | |
512 | if (!mem) { | |
513 | break; | |
514 | } | |
515 | ||
516 | if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && | |
517 | (start_addr + size <= mem->start_addr + mem->memory_size) && | |
518 | (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { | |
519 | /* The new slot fits into the existing one and comes with | |
520 | * identical parameters - nothing to be done. */ | |
521 | return; | |
522 | } | |
523 | ||
524 | old = *mem; | |
525 | ||
526 | /* unregister the overlapping slot */ | |
527 | mem->memory_size = 0; | |
528 | err = kvm_set_user_memory_region(s, mem); | |
529 | if (err) { | |
530 | fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", | |
531 | __func__, strerror(-err)); | |
532 | abort(); | |
533 | } | |
534 | ||
535 | /* Workaround for older KVM versions: we can't join slots, even not by | |
536 | * unregistering the previous ones and then registering the larger | |
537 | * slot. We have to maintain the existing fragmentation. Sigh. | |
538 | * | |
539 | * This workaround assumes that the new slot starts at the same | |
540 | * address as the first existing one. If not or if some overlapping | |
541 | * slot comes around later, we will fail (not seen in practice so far) | |
542 | * - and actually require a recent KVM version. */ | |
543 | if (s->broken_set_mem_region && | |
544 | old.start_addr == start_addr && old.memory_size < size && | |
545 | flags < IO_MEM_UNASSIGNED) { | |
546 | mem = kvm_alloc_slot(s); | |
547 | mem->memory_size = old.memory_size; | |
548 | mem->start_addr = old.start_addr; | |
549 | mem->phys_offset = old.phys_offset; | |
550 | mem->flags = 0; | |
551 | ||
552 | err = kvm_set_user_memory_region(s, mem); | |
553 | if (err) { | |
554 | fprintf(stderr, "%s: error updating slot: %s\n", __func__, | |
555 | strerror(-err)); | |
556 | abort(); | |
557 | } | |
558 | ||
559 | start_addr += old.memory_size; | |
560 | phys_offset += old.memory_size; | |
561 | size -= old.memory_size; | |
562 | continue; | |
563 | } | |
564 | ||
565 | /* register prefix slot */ | |
566 | if (old.start_addr < start_addr) { | |
567 | mem = kvm_alloc_slot(s); | |
568 | mem->memory_size = start_addr - old.start_addr; | |
569 | mem->start_addr = old.start_addr; | |
570 | mem->phys_offset = old.phys_offset; | |
571 | mem->flags = 0; | |
572 | ||
573 | err = kvm_set_user_memory_region(s, mem); | |
574 | if (err) { | |
575 | fprintf(stderr, "%s: error registering prefix slot: %s\n", | |
576 | __func__, strerror(-err)); | |
577 | abort(); | |
578 | } | |
579 | } | |
580 | ||
581 | /* register suffix slot */ | |
582 | if (old.start_addr + old.memory_size > start_addr + size) { | |
583 | ram_addr_t size_delta; | |
584 | ||
585 | mem = kvm_alloc_slot(s); | |
586 | mem->start_addr = start_addr + size; | |
587 | size_delta = mem->start_addr - old.start_addr; | |
588 | mem->memory_size = old.memory_size - size_delta; | |
589 | mem->phys_offset = old.phys_offset + size_delta; | |
590 | mem->flags = 0; | |
591 | ||
592 | err = kvm_set_user_memory_region(s, mem); | |
593 | if (err) { | |
594 | fprintf(stderr, "%s: error registering suffix slot: %s\n", | |
595 | __func__, strerror(-err)); | |
596 | abort(); | |
597 | } | |
598 | } | |
599 | } | |
600 | ||
601 | /* in case the KVM bug workaround already "consumed" the new slot */ | |
a426e122 | 602 | if (!size) { |
46dbef6a | 603 | return; |
a426e122 | 604 | } |
46dbef6a | 605 | /* KVM does not need to know about this memory */ |
a426e122 | 606 | if (flags >= IO_MEM_UNASSIGNED) { |
46dbef6a | 607 | return; |
a426e122 | 608 | } |
46dbef6a MT |
609 | mem = kvm_alloc_slot(s); |
610 | mem->memory_size = size; | |
611 | mem->start_addr = start_addr; | |
612 | mem->phys_offset = phys_offset; | |
613 | mem->flags = 0; | |
614 | ||
615 | err = kvm_set_user_memory_region(s, mem); | |
616 | if (err) { | |
617 | fprintf(stderr, "%s: error registering slot: %s\n", __func__, | |
618 | strerror(-err)); | |
619 | abort(); | |
620 | } | |
621 | } | |
622 | ||
7b8f3b78 | 623 | static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, |
a426e122 JK |
624 | target_phys_addr_t start_addr, |
625 | ram_addr_t size, ram_addr_t phys_offset) | |
7b8f3b78 | 626 | { |
a426e122 | 627 | kvm_set_phys_mem(start_addr, size, phys_offset); |
7b8f3b78 MT |
628 | } |
629 | ||
630 | static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, | |
a426e122 JK |
631 | target_phys_addr_t start_addr, |
632 | target_phys_addr_t end_addr) | |
7b8f3b78 | 633 | { |
a426e122 | 634 | return kvm_physical_sync_dirty_bitmap(start_addr, end_addr); |
7b8f3b78 MT |
635 | } |
636 | ||
637 | static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, | |
a426e122 | 638 | int enable) |
7b8f3b78 | 639 | { |
a426e122 | 640 | return kvm_set_migration_log(enable); |
7b8f3b78 MT |
641 | } |
642 | ||
643 | static CPUPhysMemoryClient kvm_cpu_phys_memory_client = { | |
a426e122 JK |
644 | .set_memory = kvm_client_set_memory, |
645 | .sync_dirty_bitmap = kvm_client_sync_dirty_bitmap, | |
646 | .migration_log = kvm_client_migration_log, | |
7b8f3b78 MT |
647 | }; |
648 | ||
cad1e282 | 649 | int kvm_init(void) |
05330448 | 650 | { |
168ccc11 JK |
651 | static const char upgrade_note[] = |
652 | "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" | |
653 | "(see http://sourceforge.net/projects/kvm).\n"; | |
05330448 | 654 | KVMState *s; |
94a8d39a | 655 | const KVMCapabilityInfo *missing_cap; |
05330448 AL |
656 | int ret; |
657 | int i; | |
658 | ||
05330448 | 659 | s = qemu_mallocz(sizeof(KVMState)); |
05330448 | 660 | |
e22a25c9 | 661 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
72cf2d4f | 662 | QTAILQ_INIT(&s->kvm_sw_breakpoints); |
e22a25c9 | 663 | #endif |
a426e122 | 664 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
05330448 | 665 | s->slots[i].slot = i; |
a426e122 | 666 | } |
05330448 | 667 | s->vmfd = -1; |
40ff6d7e | 668 | s->fd = qemu_open("/dev/kvm", O_RDWR); |
05330448 AL |
669 | if (s->fd == -1) { |
670 | fprintf(stderr, "Could not access KVM kernel module: %m\n"); | |
671 | ret = -errno; | |
672 | goto err; | |
673 | } | |
674 | ||
675 | ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); | |
676 | if (ret < KVM_API_VERSION) { | |
a426e122 | 677 | if (ret > 0) { |
05330448 | 678 | ret = -EINVAL; |
a426e122 | 679 | } |
05330448 AL |
680 | fprintf(stderr, "kvm version too old\n"); |
681 | goto err; | |
682 | } | |
683 | ||
684 | if (ret > KVM_API_VERSION) { | |
685 | ret = -EINVAL; | |
686 | fprintf(stderr, "kvm version not supported\n"); | |
687 | goto err; | |
688 | } | |
689 | ||
690 | s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); | |
0104dcac AG |
691 | if (s->vmfd < 0) { |
692 | #ifdef TARGET_S390X | |
693 | fprintf(stderr, "Please add the 'switch_amode' kernel parameter to " | |
694 | "your host kernel command line\n"); | |
695 | #endif | |
05330448 | 696 | goto err; |
0104dcac | 697 | } |
05330448 | 698 | |
94a8d39a JK |
699 | missing_cap = kvm_check_extension_list(s, kvm_required_capabilites); |
700 | if (!missing_cap) { | |
701 | missing_cap = | |
702 | kvm_check_extension_list(s, kvm_arch_required_capabilities); | |
05330448 | 703 | } |
94a8d39a | 704 | if (missing_cap) { |
ad7b8b33 | 705 | ret = -EINVAL; |
94a8d39a JK |
706 | fprintf(stderr, "kvm does not support %s\n%s", |
707 | missing_cap->name, upgrade_note); | |
d85dc283 AL |
708 | goto err; |
709 | } | |
710 | ||
ad7b8b33 | 711 | s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
f65ed4c1 | 712 | |
e69917e2 JK |
713 | s->broken_set_mem_region = 1; |
714 | #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS | |
14a09518 | 715 | ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); |
e69917e2 JK |
716 | if (ret > 0) { |
717 | s->broken_set_mem_region = 0; | |
718 | } | |
719 | #endif | |
720 | ||
a0fb002c JK |
721 | s->vcpu_events = 0; |
722 | #ifdef KVM_CAP_VCPU_EVENTS | |
723 | s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); | |
724 | #endif | |
725 | ||
b0b1d690 JK |
726 | s->robust_singlestep = 0; |
727 | #ifdef KVM_CAP_X86_ROBUST_SINGLESTEP | |
728 | s->robust_singlestep = | |
729 | kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); | |
730 | #endif | |
731 | ||
ff44f1a3 JK |
732 | s->debugregs = 0; |
733 | #ifdef KVM_CAP_DEBUGREGS | |
734 | s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); | |
735 | #endif | |
736 | ||
f1665b21 SY |
737 | s->xsave = 0; |
738 | #ifdef KVM_CAP_XSAVE | |
739 | s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); | |
740 | #endif | |
741 | ||
742 | s->xcrs = 0; | |
743 | #ifdef KVM_CAP_XCRS | |
744 | s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); | |
745 | #endif | |
746 | ||
cad1e282 | 747 | ret = kvm_arch_init(s); |
a426e122 | 748 | if (ret < 0) { |
05330448 | 749 | goto err; |
a426e122 | 750 | } |
05330448 AL |
751 | |
752 | kvm_state = s; | |
7b8f3b78 | 753 | cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); |
05330448 | 754 | |
d2f2b8a7 SH |
755 | s->many_ioeventfds = kvm_check_many_ioeventfds(); |
756 | ||
05330448 AL |
757 | return 0; |
758 | ||
759 | err: | |
760 | if (s) { | |
a426e122 | 761 | if (s->vmfd != -1) { |
05330448 | 762 | close(s->vmfd); |
a426e122 JK |
763 | } |
764 | if (s->fd != -1) { | |
05330448 | 765 | close(s->fd); |
a426e122 | 766 | } |
05330448 AL |
767 | } |
768 | qemu_free(s); | |
769 | ||
770 | return ret; | |
771 | } | |
772 | ||
b30e93e9 JK |
773 | static void kvm_handle_io(uint16_t port, void *data, int direction, int size, |
774 | uint32_t count) | |
05330448 AL |
775 | { |
776 | int i; | |
777 | uint8_t *ptr = data; | |
778 | ||
779 | for (i = 0; i < count; i++) { | |
780 | if (direction == KVM_EXIT_IO_IN) { | |
781 | switch (size) { | |
782 | case 1: | |
afcea8cb | 783 | stb_p(ptr, cpu_inb(port)); |
05330448 AL |
784 | break; |
785 | case 2: | |
afcea8cb | 786 | stw_p(ptr, cpu_inw(port)); |
05330448 AL |
787 | break; |
788 | case 4: | |
afcea8cb | 789 | stl_p(ptr, cpu_inl(port)); |
05330448 AL |
790 | break; |
791 | } | |
792 | } else { | |
793 | switch (size) { | |
794 | case 1: | |
afcea8cb | 795 | cpu_outb(port, ldub_p(ptr)); |
05330448 AL |
796 | break; |
797 | case 2: | |
afcea8cb | 798 | cpu_outw(port, lduw_p(ptr)); |
05330448 AL |
799 | break; |
800 | case 4: | |
afcea8cb | 801 | cpu_outl(port, ldl_p(ptr)); |
05330448 AL |
802 | break; |
803 | } | |
804 | } | |
805 | ||
806 | ptr += size; | |
807 | } | |
05330448 AL |
808 | } |
809 | ||
7c80eef8 | 810 | #ifdef KVM_CAP_INTERNAL_ERROR_DATA |
73aaec4a | 811 | static int kvm_handle_internal_error(CPUState *env, struct kvm_run *run) |
7c80eef8 | 812 | { |
bb44e0d1 | 813 | fprintf(stderr, "KVM internal error."); |
7c80eef8 MT |
814 | if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) { |
815 | int i; | |
816 | ||
bb44e0d1 | 817 | fprintf(stderr, " Suberror: %d\n", run->internal.suberror); |
7c80eef8 MT |
818 | for (i = 0; i < run->internal.ndata; ++i) { |
819 | fprintf(stderr, "extra data[%d]: %"PRIx64"\n", | |
820 | i, (uint64_t)run->internal.data[i]); | |
821 | } | |
bb44e0d1 JK |
822 | } else { |
823 | fprintf(stderr, "\n"); | |
7c80eef8 | 824 | } |
7c80eef8 MT |
825 | if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) { |
826 | fprintf(stderr, "emulation failure\n"); | |
a426e122 | 827 | if (!kvm_arch_stop_on_emulation_error(env)) { |
f5c848ee | 828 | cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
73aaec4a | 829 | return 0; |
a426e122 | 830 | } |
7c80eef8 MT |
831 | } |
832 | /* FIXME: Should trigger a qmp message to let management know | |
833 | * something went wrong. | |
834 | */ | |
73aaec4a | 835 | return -1; |
7c80eef8 MT |
836 | } |
837 | #endif | |
838 | ||
62a2744c | 839 | void kvm_flush_coalesced_mmio_buffer(void) |
f65ed4c1 | 840 | { |
f65ed4c1 | 841 | KVMState *s = kvm_state; |
62a2744c SY |
842 | if (s->coalesced_mmio_ring) { |
843 | struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; | |
f65ed4c1 AL |
844 | while (ring->first != ring->last) { |
845 | struct kvm_coalesced_mmio *ent; | |
846 | ||
847 | ent = &ring->coalesced_mmio[ring->first]; | |
848 | ||
849 | cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); | |
85199474 | 850 | smp_wmb(); |
f65ed4c1 AL |
851 | ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; |
852 | } | |
853 | } | |
f65ed4c1 AL |
854 | } |
855 | ||
2705d56a | 856 | static void do_kvm_cpu_synchronize_state(void *_env) |
4c0960c0 | 857 | { |
2705d56a JK |
858 | CPUState *env = _env; |
859 | ||
9ded2744 | 860 | if (!env->kvm_vcpu_dirty) { |
4c0960c0 | 861 | kvm_arch_get_registers(env); |
9ded2744 | 862 | env->kvm_vcpu_dirty = 1; |
4c0960c0 AK |
863 | } |
864 | } | |
865 | ||
2705d56a JK |
866 | void kvm_cpu_synchronize_state(CPUState *env) |
867 | { | |
a426e122 | 868 | if (!env->kvm_vcpu_dirty) { |
2705d56a | 869 | run_on_cpu(env, do_kvm_cpu_synchronize_state, env); |
a426e122 | 870 | } |
2705d56a JK |
871 | } |
872 | ||
ea375f9a JK |
873 | void kvm_cpu_synchronize_post_reset(CPUState *env) |
874 | { | |
875 | kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); | |
876 | env->kvm_vcpu_dirty = 0; | |
877 | } | |
878 | ||
879 | void kvm_cpu_synchronize_post_init(CPUState *env) | |
880 | { | |
881 | kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); | |
882 | env->kvm_vcpu_dirty = 0; | |
883 | } | |
884 | ||
05330448 AL |
885 | int kvm_cpu_exec(CPUState *env) |
886 | { | |
887 | struct kvm_run *run = env->kvm_run; | |
888 | int ret; | |
889 | ||
8c0d577e | 890 | DPRINTF("kvm_cpu_exec()\n"); |
05330448 | 891 | |
9ccfac9e JK |
892 | if (kvm_arch_process_irqchip_events(env)) { |
893 | env->exit_request = 0; | |
6792a57b | 894 | return EXCP_HLT; |
9ccfac9e | 895 | } |
0af691d7 | 896 | |
6792a57b JK |
897 | cpu_single_env = env; |
898 | ||
9ccfac9e | 899 | do { |
9ded2744 | 900 | if (env->kvm_vcpu_dirty) { |
ea375f9a | 901 | kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE); |
9ded2744 | 902 | env->kvm_vcpu_dirty = 0; |
4c0960c0 AK |
903 | } |
904 | ||
8c14c173 | 905 | kvm_arch_pre_run(env, run); |
9ccfac9e JK |
906 | if (env->exit_request) { |
907 | DPRINTF("interrupt exit requested\n"); | |
908 | /* | |
909 | * KVM requires us to reenter the kernel after IO exits to complete | |
910 | * instruction emulation. This self-signal will ensure that we | |
911 | * leave ASAP again. | |
912 | */ | |
913 | qemu_cpu_kick_self(); | |
914 | } | |
273faf1b | 915 | cpu_single_env = NULL; |
d549db5a | 916 | qemu_mutex_unlock_iothread(); |
9ccfac9e | 917 | |
05330448 | 918 | ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); |
9ccfac9e | 919 | |
d549db5a | 920 | qemu_mutex_lock_iothread(); |
273faf1b | 921 | cpu_single_env = env; |
05330448 AL |
922 | kvm_arch_post_run(env, run); |
923 | ||
b0c883b5 JK |
924 | kvm_flush_coalesced_mmio_buffer(); |
925 | ||
05330448 | 926 | if (ret == -EINTR || ret == -EAGAIN) { |
8c0d577e | 927 | DPRINTF("io window exit\n"); |
05330448 AL |
928 | ret = 0; |
929 | break; | |
930 | } | |
931 | ||
932 | if (ret < 0) { | |
8c0d577e | 933 | DPRINTF("kvm run failed %s\n", strerror(-ret)); |
05330448 AL |
934 | abort(); |
935 | } | |
936 | ||
937 | ret = 0; /* exit loop */ | |
938 | switch (run->exit_reason) { | |
939 | case KVM_EXIT_IO: | |
8c0d577e | 940 | DPRINTF("handle_io\n"); |
b30e93e9 JK |
941 | kvm_handle_io(run->io.port, |
942 | (uint8_t *)run + run->io.data_offset, | |
943 | run->io.direction, | |
944 | run->io.size, | |
945 | run->io.count); | |
946 | ret = 1; | |
05330448 AL |
947 | break; |
948 | case KVM_EXIT_MMIO: | |
8c0d577e | 949 | DPRINTF("handle_mmio\n"); |
05330448 AL |
950 | cpu_physical_memory_rw(run->mmio.phys_addr, |
951 | run->mmio.data, | |
952 | run->mmio.len, | |
953 | run->mmio.is_write); | |
954 | ret = 1; | |
955 | break; | |
956 | case KVM_EXIT_IRQ_WINDOW_OPEN: | |
8c0d577e | 957 | DPRINTF("irq_window_open\n"); |
05330448 AL |
958 | break; |
959 | case KVM_EXIT_SHUTDOWN: | |
8c0d577e | 960 | DPRINTF("shutdown\n"); |
05330448 | 961 | qemu_system_reset_request(); |
05330448 AL |
962 | break; |
963 | case KVM_EXIT_UNKNOWN: | |
bb44e0d1 JK |
964 | fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n", |
965 | (uint64_t)run->hw.hardware_exit_reason); | |
73aaec4a | 966 | ret = -1; |
05330448 | 967 | break; |
7c80eef8 MT |
968 | #ifdef KVM_CAP_INTERNAL_ERROR_DATA |
969 | case KVM_EXIT_INTERNAL_ERROR: | |
73aaec4a | 970 | ret = kvm_handle_internal_error(env, run); |
7c80eef8 MT |
971 | break; |
972 | #endif | |
05330448 | 973 | case KVM_EXIT_DEBUG: |
8c0d577e | 974 | DPRINTF("kvm_exit_debug\n"); |
e22a25c9 AL |
975 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
976 | if (kvm_arch_debug(&run->debug.arch)) { | |
6792a57b JK |
977 | ret = EXCP_DEBUG; |
978 | goto out; | |
e22a25c9 AL |
979 | } |
980 | /* re-enter, this exception was guest-internal */ | |
981 | ret = 1; | |
982 | #endif /* KVM_CAP_SET_GUEST_DEBUG */ | |
05330448 AL |
983 | break; |
984 | default: | |
8c0d577e | 985 | DPRINTF("kvm_arch_handle_exit\n"); |
05330448 AL |
986 | ret = kvm_arch_handle_exit(env, run); |
987 | break; | |
988 | } | |
989 | } while (ret > 0); | |
990 | ||
73aaec4a | 991 | if (ret < 0) { |
f5c848ee | 992 | cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE); |
e07bbac5 | 993 | vm_stop(VMSTOP_PANIC); |
becfc390 | 994 | } |
6792a57b | 995 | ret = EXCP_INTERRUPT; |
becfc390 | 996 | |
6792a57b JK |
997 | out: |
998 | env->exit_request = 0; | |
999 | cpu_single_env = NULL; | |
05330448 AL |
1000 | return ret; |
1001 | } | |
1002 | ||
984b5181 | 1003 | int kvm_ioctl(KVMState *s, int type, ...) |
05330448 AL |
1004 | { |
1005 | int ret; | |
984b5181 AL |
1006 | void *arg; |
1007 | va_list ap; | |
05330448 | 1008 | |
984b5181 AL |
1009 | va_start(ap, type); |
1010 | arg = va_arg(ap, void *); | |
1011 | va_end(ap); | |
1012 | ||
1013 | ret = ioctl(s->fd, type, arg); | |
a426e122 | 1014 | if (ret == -1) { |
05330448 | 1015 | ret = -errno; |
a426e122 | 1016 | } |
05330448 AL |
1017 | return ret; |
1018 | } | |
1019 | ||
984b5181 | 1020 | int kvm_vm_ioctl(KVMState *s, int type, ...) |
05330448 AL |
1021 | { |
1022 | int ret; | |
984b5181 AL |
1023 | void *arg; |
1024 | va_list ap; | |
1025 | ||
1026 | va_start(ap, type); | |
1027 | arg = va_arg(ap, void *); | |
1028 | va_end(ap); | |
05330448 | 1029 | |
984b5181 | 1030 | ret = ioctl(s->vmfd, type, arg); |
a426e122 | 1031 | if (ret == -1) { |
05330448 | 1032 | ret = -errno; |
a426e122 | 1033 | } |
05330448 AL |
1034 | return ret; |
1035 | } | |
1036 | ||
984b5181 | 1037 | int kvm_vcpu_ioctl(CPUState *env, int type, ...) |
05330448 AL |
1038 | { |
1039 | int ret; | |
984b5181 AL |
1040 | void *arg; |
1041 | va_list ap; | |
1042 | ||
1043 | va_start(ap, type); | |
1044 | arg = va_arg(ap, void *); | |
1045 | va_end(ap); | |
05330448 | 1046 | |
984b5181 | 1047 | ret = ioctl(env->kvm_fd, type, arg); |
a426e122 | 1048 | if (ret == -1) { |
05330448 | 1049 | ret = -errno; |
a426e122 | 1050 | } |
05330448 AL |
1051 | return ret; |
1052 | } | |
bd322087 AL |
1053 | |
1054 | int kvm_has_sync_mmu(void) | |
1055 | { | |
94a8d39a | 1056 | return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); |
bd322087 | 1057 | } |
e22a25c9 | 1058 | |
a0fb002c JK |
1059 | int kvm_has_vcpu_events(void) |
1060 | { | |
1061 | return kvm_state->vcpu_events; | |
1062 | } | |
1063 | ||
b0b1d690 JK |
1064 | int kvm_has_robust_singlestep(void) |
1065 | { | |
1066 | return kvm_state->robust_singlestep; | |
1067 | } | |
1068 | ||
ff44f1a3 JK |
1069 | int kvm_has_debugregs(void) |
1070 | { | |
1071 | return kvm_state->debugregs; | |
1072 | } | |
1073 | ||
f1665b21 SY |
1074 | int kvm_has_xsave(void) |
1075 | { | |
1076 | return kvm_state->xsave; | |
1077 | } | |
1078 | ||
1079 | int kvm_has_xcrs(void) | |
1080 | { | |
1081 | return kvm_state->xcrs; | |
1082 | } | |
1083 | ||
d2f2b8a7 SH |
1084 | int kvm_has_many_ioeventfds(void) |
1085 | { | |
1086 | if (!kvm_enabled()) { | |
1087 | return 0; | |
1088 | } | |
1089 | return kvm_state->many_ioeventfds; | |
1090 | } | |
1091 | ||
6f0437e8 JK |
1092 | void kvm_setup_guest_memory(void *start, size_t size) |
1093 | { | |
1094 | if (!kvm_has_sync_mmu()) { | |
e78815a5 | 1095 | int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK); |
6f0437e8 JK |
1096 | |
1097 | if (ret) { | |
e78815a5 AF |
1098 | perror("qemu_madvise"); |
1099 | fprintf(stderr, | |
1100 | "Need MADV_DONTFORK in absence of synchronous KVM MMU\n"); | |
6f0437e8 JK |
1101 | exit(1); |
1102 | } | |
6f0437e8 JK |
1103 | } |
1104 | } | |
1105 | ||
e22a25c9 AL |
1106 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
1107 | struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, | |
1108 | target_ulong pc) | |
1109 | { | |
1110 | struct kvm_sw_breakpoint *bp; | |
1111 | ||
72cf2d4f | 1112 | QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { |
a426e122 | 1113 | if (bp->pc == pc) { |
e22a25c9 | 1114 | return bp; |
a426e122 | 1115 | } |
e22a25c9 AL |
1116 | } |
1117 | return NULL; | |
1118 | } | |
1119 | ||
1120 | int kvm_sw_breakpoints_active(CPUState *env) | |
1121 | { | |
72cf2d4f | 1122 | return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); |
e22a25c9 AL |
1123 | } |
1124 | ||
452e4751 GC |
1125 | struct kvm_set_guest_debug_data { |
1126 | struct kvm_guest_debug dbg; | |
1127 | CPUState *env; | |
1128 | int err; | |
1129 | }; | |
1130 | ||
1131 | static void kvm_invoke_set_guest_debug(void *data) | |
1132 | { | |
1133 | struct kvm_set_guest_debug_data *dbg_data = data; | |
b3807725 JK |
1134 | CPUState *env = dbg_data->env; |
1135 | ||
b3807725 | 1136 | dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); |
452e4751 GC |
1137 | } |
1138 | ||
e22a25c9 AL |
1139 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
1140 | { | |
452e4751 | 1141 | struct kvm_set_guest_debug_data data; |
e22a25c9 | 1142 | |
b0b1d690 | 1143 | data.dbg.control = reinject_trap; |
e22a25c9 | 1144 | |
b0b1d690 JK |
1145 | if (env->singlestep_enabled) { |
1146 | data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; | |
1147 | } | |
452e4751 | 1148 | kvm_arch_update_guest_debug(env, &data.dbg); |
452e4751 | 1149 | data.env = env; |
e22a25c9 | 1150 | |
be41cbe0 | 1151 | run_on_cpu(env, kvm_invoke_set_guest_debug, &data); |
452e4751 | 1152 | return data.err; |
e22a25c9 AL |
1153 | } |
1154 | ||
1155 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, | |
1156 | target_ulong len, int type) | |
1157 | { | |
1158 | struct kvm_sw_breakpoint *bp; | |
1159 | CPUState *env; | |
1160 | int err; | |
1161 | ||
1162 | if (type == GDB_BREAKPOINT_SW) { | |
1163 | bp = kvm_find_sw_breakpoint(current_env, addr); | |
1164 | if (bp) { | |
1165 | bp->use_count++; | |
1166 | return 0; | |
1167 | } | |
1168 | ||
1169 | bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); | |
a426e122 | 1170 | if (!bp) { |
e22a25c9 | 1171 | return -ENOMEM; |
a426e122 | 1172 | } |
e22a25c9 AL |
1173 | |
1174 | bp->pc = addr; | |
1175 | bp->use_count = 1; | |
1176 | err = kvm_arch_insert_sw_breakpoint(current_env, bp); | |
1177 | if (err) { | |
1178 | free(bp); | |
1179 | return err; | |
1180 | } | |
1181 | ||
72cf2d4f | 1182 | QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, |
e22a25c9 AL |
1183 | bp, entry); |
1184 | } else { | |
1185 | err = kvm_arch_insert_hw_breakpoint(addr, len, type); | |
a426e122 | 1186 | if (err) { |
e22a25c9 | 1187 | return err; |
a426e122 | 1188 | } |
e22a25c9 AL |
1189 | } |
1190 | ||
1191 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
1192 | err = kvm_update_guest_debug(env, 0); | |
a426e122 | 1193 | if (err) { |
e22a25c9 | 1194 | return err; |
a426e122 | 1195 | } |
e22a25c9 AL |
1196 | } |
1197 | return 0; | |
1198 | } | |
1199 | ||
1200 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, | |
1201 | target_ulong len, int type) | |
1202 | { | |
1203 | struct kvm_sw_breakpoint *bp; | |
1204 | CPUState *env; | |
1205 | int err; | |
1206 | ||
1207 | if (type == GDB_BREAKPOINT_SW) { | |
1208 | bp = kvm_find_sw_breakpoint(current_env, addr); | |
a426e122 | 1209 | if (!bp) { |
e22a25c9 | 1210 | return -ENOENT; |
a426e122 | 1211 | } |
e22a25c9 AL |
1212 | |
1213 | if (bp->use_count > 1) { | |
1214 | bp->use_count--; | |
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | err = kvm_arch_remove_sw_breakpoint(current_env, bp); | |
a426e122 | 1219 | if (err) { |
e22a25c9 | 1220 | return err; |
a426e122 | 1221 | } |
e22a25c9 | 1222 | |
72cf2d4f | 1223 | QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); |
e22a25c9 AL |
1224 | qemu_free(bp); |
1225 | } else { | |
1226 | err = kvm_arch_remove_hw_breakpoint(addr, len, type); | |
a426e122 | 1227 | if (err) { |
e22a25c9 | 1228 | return err; |
a426e122 | 1229 | } |
e22a25c9 AL |
1230 | } |
1231 | ||
1232 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
1233 | err = kvm_update_guest_debug(env, 0); | |
a426e122 | 1234 | if (err) { |
e22a25c9 | 1235 | return err; |
a426e122 | 1236 | } |
e22a25c9 AL |
1237 | } |
1238 | return 0; | |
1239 | } | |
1240 | ||
1241 | void kvm_remove_all_breakpoints(CPUState *current_env) | |
1242 | { | |
1243 | struct kvm_sw_breakpoint *bp, *next; | |
1244 | KVMState *s = current_env->kvm_state; | |
1245 | CPUState *env; | |
1246 | ||
72cf2d4f | 1247 | QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { |
e22a25c9 AL |
1248 | if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { |
1249 | /* Try harder to find a CPU that currently sees the breakpoint. */ | |
1250 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
a426e122 | 1251 | if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) { |
e22a25c9 | 1252 | break; |
a426e122 | 1253 | } |
e22a25c9 AL |
1254 | } |
1255 | } | |
1256 | } | |
1257 | kvm_arch_remove_all_hw_breakpoints(); | |
1258 | ||
a426e122 | 1259 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
e22a25c9 | 1260 | kvm_update_guest_debug(env, 0); |
a426e122 | 1261 | } |
e22a25c9 AL |
1262 | } |
1263 | ||
1264 | #else /* !KVM_CAP_SET_GUEST_DEBUG */ | |
1265 | ||
1266 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) | |
1267 | { | |
1268 | return -EINVAL; | |
1269 | } | |
1270 | ||
1271 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, | |
1272 | target_ulong len, int type) | |
1273 | { | |
1274 | return -EINVAL; | |
1275 | } | |
1276 | ||
1277 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, | |
1278 | target_ulong len, int type) | |
1279 | { | |
1280 | return -EINVAL; | |
1281 | } | |
1282 | ||
1283 | void kvm_remove_all_breakpoints(CPUState *current_env) | |
1284 | { | |
1285 | } | |
1286 | #endif /* !KVM_CAP_SET_GUEST_DEBUG */ | |
cc84de95 MT |
1287 | |
1288 | int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) | |
1289 | { | |
1290 | struct kvm_signal_mask *sigmask; | |
1291 | int r; | |
1292 | ||
a426e122 | 1293 | if (!sigset) { |
cc84de95 | 1294 | return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); |
a426e122 | 1295 | } |
cc84de95 MT |
1296 | |
1297 | sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset)); | |
1298 | ||
1299 | sigmask->len = 8; | |
1300 | memcpy(sigmask->sigset, sigset, sizeof(*sigset)); | |
1301 | r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask); | |
1302 | free(sigmask); | |
1303 | ||
1304 | return r; | |
1305 | } | |
ca821806 | 1306 | |
44f1a3d8 CM |
1307 | int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign) |
1308 | { | |
1309 | #ifdef KVM_IOEVENTFD | |
1310 | int ret; | |
1311 | struct kvm_ioeventfd iofd; | |
1312 | ||
1313 | iofd.datamatch = val; | |
1314 | iofd.addr = addr; | |
1315 | iofd.len = 4; | |
1316 | iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH; | |
1317 | iofd.fd = fd; | |
1318 | ||
1319 | if (!kvm_enabled()) { | |
1320 | return -ENOSYS; | |
1321 | } | |
1322 | ||
1323 | if (!assign) { | |
1324 | iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; | |
1325 | } | |
1326 | ||
1327 | ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd); | |
1328 | ||
1329 | if (ret < 0) { | |
1330 | return -errno; | |
1331 | } | |
1332 | ||
1333 | return 0; | |
1334 | #else | |
1335 | return -ENOSYS; | |
1336 | #endif | |
1337 | } | |
1338 | ||
ca821806 MT |
1339 | int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) |
1340 | { | |
98c8573e | 1341 | #ifdef KVM_IOEVENTFD |
ca821806 MT |
1342 | struct kvm_ioeventfd kick = { |
1343 | .datamatch = val, | |
1344 | .addr = addr, | |
1345 | .len = 2, | |
1346 | .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, | |
1347 | .fd = fd, | |
1348 | }; | |
1349 | int r; | |
a426e122 | 1350 | if (!kvm_enabled()) { |
ca821806 | 1351 | return -ENOSYS; |
a426e122 JK |
1352 | } |
1353 | if (!assign) { | |
ca821806 | 1354 | kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
a426e122 | 1355 | } |
ca821806 | 1356 | r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
a426e122 | 1357 | if (r < 0) { |
ca821806 | 1358 | return r; |
a426e122 | 1359 | } |
ca821806 | 1360 | return 0; |
98c8573e PB |
1361 | #else |
1362 | return -ENOSYS; | |
ca821806 | 1363 | #endif |
98c8573e | 1364 | } |
a1b87fe0 JK |
1365 | |
1366 | int kvm_on_sigbus_vcpu(CPUState *env, int code, void *addr) | |
1367 | { | |
1368 | return kvm_arch_on_sigbus_vcpu(env, code, addr); | |
1369 | } | |
1370 | ||
1371 | int kvm_on_sigbus(int code, void *addr) | |
1372 | { | |
1373 | return kvm_arch_on_sigbus(code, addr); | |
1374 | } |