]>
Commit | Line | Data |
---|---|---|
05330448 AL |
1 | /* |
2 | * QEMU KVM support | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5832d1f2 | 5 | * Red Hat, Inc. 2008 |
05330448 AL |
6 | * |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
5832d1f2 | 9 | * Glauber Costa <gcosta@redhat.com> |
05330448 AL |
10 | * |
11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
12 | * See the COPYING file in the top-level directory. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <sys/types.h> | |
17 | #include <sys/ioctl.h> | |
18 | #include <sys/mman.h> | |
984b5181 | 19 | #include <stdarg.h> |
05330448 AL |
20 | |
21 | #include <linux/kvm.h> | |
22 | ||
23 | #include "qemu-common.h" | |
24 | #include "sysemu.h" | |
e22a25c9 | 25 | #include "gdbstub.h" |
05330448 AL |
26 | #include "kvm.h" |
27 | ||
f65ed4c1 AL |
28 | /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */ |
29 | #define PAGE_SIZE TARGET_PAGE_SIZE | |
30 | ||
05330448 AL |
31 | //#define DEBUG_KVM |
32 | ||
33 | #ifdef DEBUG_KVM | |
34 | #define dprintf(fmt, ...) \ | |
35 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) | |
36 | #else | |
37 | #define dprintf(fmt, ...) \ | |
38 | do { } while (0) | |
39 | #endif | |
40 | ||
34fc643f AL |
41 | typedef struct KVMSlot |
42 | { | |
43 | target_phys_addr_t start_addr; | |
44 | ram_addr_t memory_size; | |
45 | ram_addr_t phys_offset; | |
46 | int slot; | |
47 | int flags; | |
48 | } KVMSlot; | |
05330448 | 49 | |
5832d1f2 AL |
50 | typedef struct kvm_dirty_log KVMDirtyLog; |
51 | ||
05330448 AL |
52 | int kvm_allowed = 0; |
53 | ||
54 | struct KVMState | |
55 | { | |
56 | KVMSlot slots[32]; | |
57 | int fd; | |
58 | int vmfd; | |
f65ed4c1 | 59 | int coalesced_mmio; |
e22a25c9 AL |
60 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
61 | struct kvm_sw_breakpoint_head kvm_sw_breakpoints; | |
62 | #endif | |
05330448 AL |
63 | }; |
64 | ||
65 | static KVMState *kvm_state; | |
66 | ||
67 | static KVMSlot *kvm_alloc_slot(KVMState *s) | |
68 | { | |
69 | int i; | |
70 | ||
71 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
62d60e8c AL |
72 | /* KVM private memory slots */ |
73 | if (i >= 8 && i < 12) | |
74 | continue; | |
05330448 AL |
75 | if (s->slots[i].memory_size == 0) |
76 | return &s->slots[i]; | |
77 | } | |
78 | ||
d3f8d37f AL |
79 | fprintf(stderr, "%s: no free slot available\n", __func__); |
80 | abort(); | |
81 | } | |
82 | ||
83 | static KVMSlot *kvm_lookup_matching_slot(KVMState *s, | |
84 | target_phys_addr_t start_addr, | |
85 | target_phys_addr_t end_addr) | |
86 | { | |
87 | int i; | |
88 | ||
89 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
90 | KVMSlot *mem = &s->slots[i]; | |
91 | ||
92 | if (start_addr == mem->start_addr && | |
93 | end_addr == mem->start_addr + mem->memory_size) { | |
94 | return mem; | |
95 | } | |
96 | } | |
97 | ||
05330448 AL |
98 | return NULL; |
99 | } | |
100 | ||
6152e2ae AL |
101 | /* |
102 | * Find overlapping slot with lowest start address | |
103 | */ | |
104 | static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, | |
105 | target_phys_addr_t start_addr, | |
106 | target_phys_addr_t end_addr) | |
05330448 | 107 | { |
6152e2ae | 108 | KVMSlot *found = NULL; |
05330448 AL |
109 | int i; |
110 | ||
111 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { | |
112 | KVMSlot *mem = &s->slots[i]; | |
113 | ||
6152e2ae AL |
114 | if (mem->memory_size == 0 || |
115 | (found && found->start_addr < mem->start_addr)) { | |
116 | continue; | |
117 | } | |
118 | ||
119 | if (end_addr > mem->start_addr && | |
120 | start_addr < mem->start_addr + mem->memory_size) { | |
121 | found = mem; | |
122 | } | |
05330448 AL |
123 | } |
124 | ||
6152e2ae | 125 | return found; |
05330448 AL |
126 | } |
127 | ||
5832d1f2 AL |
128 | static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
129 | { | |
130 | struct kvm_userspace_memory_region mem; | |
131 | ||
132 | mem.slot = slot->slot; | |
133 | mem.guest_phys_addr = slot->start_addr; | |
134 | mem.memory_size = slot->memory_size; | |
5579c7f3 | 135 | mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset); |
5832d1f2 AL |
136 | mem.flags = slot->flags; |
137 | ||
138 | return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); | |
139 | } | |
140 | ||
141 | ||
05330448 AL |
142 | int kvm_init_vcpu(CPUState *env) |
143 | { | |
144 | KVMState *s = kvm_state; | |
145 | long mmap_size; | |
146 | int ret; | |
147 | ||
148 | dprintf("kvm_init_vcpu\n"); | |
149 | ||
984b5181 | 150 | ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); |
05330448 AL |
151 | if (ret < 0) { |
152 | dprintf("kvm_create_vcpu failed\n"); | |
153 | goto err; | |
154 | } | |
155 | ||
156 | env->kvm_fd = ret; | |
157 | env->kvm_state = s; | |
158 | ||
159 | mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); | |
160 | if (mmap_size < 0) { | |
161 | dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n"); | |
162 | goto err; | |
163 | } | |
164 | ||
165 | env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, | |
166 | env->kvm_fd, 0); | |
167 | if (env->kvm_run == MAP_FAILED) { | |
168 | ret = -errno; | |
169 | dprintf("mmap'ing vcpu state failed\n"); | |
170 | goto err; | |
171 | } | |
172 | ||
173 | ret = kvm_arch_init_vcpu(env); | |
174 | ||
175 | err: | |
176 | return ret; | |
177 | } | |
178 | ||
f5d6f51b AL |
179 | int kvm_sync_vcpus(void) |
180 | { | |
181 | CPUState *env; | |
182 | ||
183 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
184 | int ret; | |
185 | ||
186 | ret = kvm_arch_put_registers(env); | |
187 | if (ret) | |
188 | return ret; | |
189 | } | |
190 | ||
191 | return 0; | |
192 | } | |
193 | ||
5832d1f2 AL |
194 | /* |
195 | * dirty pages logging control | |
196 | */ | |
d3f8d37f AL |
197 | static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, |
198 | ram_addr_t size, unsigned flags, | |
5832d1f2 AL |
199 | unsigned mask) |
200 | { | |
201 | KVMState *s = kvm_state; | |
d3f8d37f | 202 | KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
5832d1f2 | 203 | if (mem == NULL) { |
d3f8d37f AL |
204 | fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
205 | TARGET_FMT_plx "\n", __func__, phys_addr, | |
206 | phys_addr + size - 1); | |
5832d1f2 AL |
207 | return -EINVAL; |
208 | } | |
209 | ||
210 | flags = (mem->flags & ~mask) | flags; | |
211 | /* Nothing changed, no need to issue ioctl */ | |
212 | if (flags == mem->flags) | |
213 | return 0; | |
214 | ||
215 | mem->flags = flags; | |
216 | ||
217 | return kvm_set_user_memory_region(s, mem); | |
218 | } | |
219 | ||
d3f8d37f | 220 | int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) |
5832d1f2 | 221 | { |
d3f8d37f | 222 | return kvm_dirty_pages_log_change(phys_addr, size, |
5832d1f2 AL |
223 | KVM_MEM_LOG_DIRTY_PAGES, |
224 | KVM_MEM_LOG_DIRTY_PAGES); | |
225 | } | |
226 | ||
d3f8d37f | 227 | int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size) |
5832d1f2 | 228 | { |
d3f8d37f | 229 | return kvm_dirty_pages_log_change(phys_addr, size, |
5832d1f2 AL |
230 | 0, |
231 | KVM_MEM_LOG_DIRTY_PAGES); | |
232 | } | |
233 | ||
234 | /** | |
235 | * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space | |
236 | * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty(). | |
237 | * This means all bits are set to dirty. | |
238 | * | |
d3f8d37f | 239 | * @start_add: start of logged region. |
5832d1f2 AL |
240 | * @end_addr: end of logged region. |
241 | */ | |
d3f8d37f AL |
242 | void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
243 | target_phys_addr_t end_addr) | |
5832d1f2 AL |
244 | { |
245 | KVMState *s = kvm_state; | |
246 | KVMDirtyLog d; | |
d3f8d37f | 247 | KVMSlot *mem = kvm_lookup_matching_slot(s, start_addr, end_addr); |
5832d1f2 AL |
248 | unsigned long alloc_size; |
249 | ram_addr_t addr; | |
250 | target_phys_addr_t phys_addr = start_addr; | |
251 | ||
d3f8d37f AL |
252 | dprintf("sync addr: " TARGET_FMT_lx " into %lx\n", start_addr, |
253 | mem->phys_offset); | |
5832d1f2 | 254 | if (mem == NULL) { |
d3f8d37f AL |
255 | fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
256 | TARGET_FMT_plx "\n", __func__, phys_addr, end_addr - 1); | |
5832d1f2 AL |
257 | return; |
258 | } | |
259 | ||
260 | alloc_size = mem->memory_size >> TARGET_PAGE_BITS / sizeof(d.dirty_bitmap); | |
261 | d.dirty_bitmap = qemu_mallocz(alloc_size); | |
262 | ||
5832d1f2 AL |
263 | d.slot = mem->slot; |
264 | dprintf("slot %d, phys_addr %llx, uaddr: %llx\n", | |
265 | d.slot, mem->start_addr, mem->phys_offset); | |
266 | ||
267 | if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { | |
268 | dprintf("ioctl failed %d\n", errno); | |
269 | goto out; | |
270 | } | |
271 | ||
272 | phys_addr = start_addr; | |
273 | for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { | |
274 | unsigned long *bitmap = (unsigned long *)d.dirty_bitmap; | |
275 | unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS; | |
276 | unsigned word = nr / (sizeof(*bitmap) * 8); | |
277 | unsigned bit = nr % (sizeof(*bitmap) * 8); | |
278 | if ((bitmap[word] >> bit) & 1) | |
279 | cpu_physical_memory_set_dirty(addr); | |
280 | } | |
281 | out: | |
282 | qemu_free(d.dirty_bitmap); | |
283 | } | |
284 | ||
f65ed4c1 AL |
285 | int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) |
286 | { | |
287 | int ret = -ENOSYS; | |
288 | #ifdef KVM_CAP_COALESCED_MMIO | |
289 | KVMState *s = kvm_state; | |
290 | ||
291 | if (s->coalesced_mmio) { | |
292 | struct kvm_coalesced_mmio_zone zone; | |
293 | ||
294 | zone.addr = start; | |
295 | zone.size = size; | |
296 | ||
297 | ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); | |
298 | } | |
299 | #endif | |
300 | ||
301 | return ret; | |
302 | } | |
303 | ||
304 | int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) | |
305 | { | |
306 | int ret = -ENOSYS; | |
307 | #ifdef KVM_CAP_COALESCED_MMIO | |
308 | KVMState *s = kvm_state; | |
309 | ||
310 | if (s->coalesced_mmio) { | |
311 | struct kvm_coalesced_mmio_zone zone; | |
312 | ||
313 | zone.addr = start; | |
314 | zone.size = size; | |
315 | ||
316 | ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); | |
317 | } | |
318 | #endif | |
319 | ||
320 | return ret; | |
321 | } | |
322 | ||
ad7b8b33 AL |
323 | int kvm_check_extension(KVMState *s, unsigned int extension) |
324 | { | |
325 | int ret; | |
326 | ||
327 | ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); | |
328 | if (ret < 0) { | |
329 | ret = 0; | |
330 | } | |
331 | ||
332 | return ret; | |
333 | } | |
334 | ||
05330448 AL |
335 | int kvm_init(int smp_cpus) |
336 | { | |
337 | KVMState *s; | |
338 | int ret; | |
339 | int i; | |
340 | ||
341 | if (smp_cpus > 1) | |
342 | return -EINVAL; | |
343 | ||
344 | s = qemu_mallocz(sizeof(KVMState)); | |
05330448 | 345 | |
e22a25c9 AL |
346 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
347 | TAILQ_INIT(&s->kvm_sw_breakpoints); | |
348 | #endif | |
05330448 AL |
349 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) |
350 | s->slots[i].slot = i; | |
351 | ||
352 | s->vmfd = -1; | |
353 | s->fd = open("/dev/kvm", O_RDWR); | |
354 | if (s->fd == -1) { | |
355 | fprintf(stderr, "Could not access KVM kernel module: %m\n"); | |
356 | ret = -errno; | |
357 | goto err; | |
358 | } | |
359 | ||
360 | ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); | |
361 | if (ret < KVM_API_VERSION) { | |
362 | if (ret > 0) | |
363 | ret = -EINVAL; | |
364 | fprintf(stderr, "kvm version too old\n"); | |
365 | goto err; | |
366 | } | |
367 | ||
368 | if (ret > KVM_API_VERSION) { | |
369 | ret = -EINVAL; | |
370 | fprintf(stderr, "kvm version not supported\n"); | |
371 | goto err; | |
372 | } | |
373 | ||
374 | s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); | |
375 | if (s->vmfd < 0) | |
376 | goto err; | |
377 | ||
378 | /* initially, KVM allocated its own memory and we had to jump through | |
379 | * hooks to make phys_ram_base point to this. Modern versions of KVM | |
5579c7f3 | 380 | * just use a user allocated buffer so we can use regular pages |
05330448 AL |
381 | * unmodified. Make sure we have a sufficiently modern version of KVM. |
382 | */ | |
ad7b8b33 AL |
383 | if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) { |
384 | ret = -EINVAL; | |
05330448 AL |
385 | fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n"); |
386 | goto err; | |
387 | } | |
388 | ||
d85dc283 AL |
389 | /* There was a nasty bug in < kvm-80 that prevents memory slots from being |
390 | * destroyed properly. Since we rely on this capability, refuse to work | |
391 | * with any kernel without this capability. */ | |
ad7b8b33 AL |
392 | if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) { |
393 | ret = -EINVAL; | |
d85dc283 AL |
394 | |
395 | fprintf(stderr, | |
396 | "KVM kernel module broken (DESTROY_MEMORY_REGION)\n" | |
397 | "Please upgrade to at least kvm-81.\n"); | |
398 | goto err; | |
399 | } | |
400 | ||
f65ed4c1 | 401 | #ifdef KVM_CAP_COALESCED_MMIO |
ad7b8b33 AL |
402 | s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
403 | #else | |
404 | s->coalesced_mmio = 0; | |
f65ed4c1 AL |
405 | #endif |
406 | ||
05330448 AL |
407 | ret = kvm_arch_init(s, smp_cpus); |
408 | if (ret < 0) | |
409 | goto err; | |
410 | ||
411 | kvm_state = s; | |
412 | ||
413 | return 0; | |
414 | ||
415 | err: | |
416 | if (s) { | |
417 | if (s->vmfd != -1) | |
418 | close(s->vmfd); | |
419 | if (s->fd != -1) | |
420 | close(s->fd); | |
421 | } | |
422 | qemu_free(s); | |
423 | ||
424 | return ret; | |
425 | } | |
426 | ||
427 | static int kvm_handle_io(CPUState *env, uint16_t port, void *data, | |
428 | int direction, int size, uint32_t count) | |
429 | { | |
430 | int i; | |
431 | uint8_t *ptr = data; | |
432 | ||
433 | for (i = 0; i < count; i++) { | |
434 | if (direction == KVM_EXIT_IO_IN) { | |
435 | switch (size) { | |
436 | case 1: | |
437 | stb_p(ptr, cpu_inb(env, port)); | |
438 | break; | |
439 | case 2: | |
440 | stw_p(ptr, cpu_inw(env, port)); | |
441 | break; | |
442 | case 4: | |
443 | stl_p(ptr, cpu_inl(env, port)); | |
444 | break; | |
445 | } | |
446 | } else { | |
447 | switch (size) { | |
448 | case 1: | |
449 | cpu_outb(env, port, ldub_p(ptr)); | |
450 | break; | |
451 | case 2: | |
452 | cpu_outw(env, port, lduw_p(ptr)); | |
453 | break; | |
454 | case 4: | |
455 | cpu_outl(env, port, ldl_p(ptr)); | |
456 | break; | |
457 | } | |
458 | } | |
459 | ||
460 | ptr += size; | |
461 | } | |
462 | ||
463 | return 1; | |
464 | } | |
465 | ||
f65ed4c1 AL |
466 | static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run) |
467 | { | |
468 | #ifdef KVM_CAP_COALESCED_MMIO | |
469 | KVMState *s = kvm_state; | |
470 | if (s->coalesced_mmio) { | |
471 | struct kvm_coalesced_mmio_ring *ring; | |
472 | ||
473 | ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE); | |
474 | while (ring->first != ring->last) { | |
475 | struct kvm_coalesced_mmio *ent; | |
476 | ||
477 | ent = &ring->coalesced_mmio[ring->first]; | |
478 | ||
479 | cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); | |
480 | /* FIXME smp_wmb() */ | |
481 | ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; | |
482 | } | |
483 | } | |
484 | #endif | |
485 | } | |
486 | ||
05330448 AL |
487 | int kvm_cpu_exec(CPUState *env) |
488 | { | |
489 | struct kvm_run *run = env->kvm_run; | |
490 | int ret; | |
491 | ||
492 | dprintf("kvm_cpu_exec()\n"); | |
493 | ||
494 | do { | |
495 | kvm_arch_pre_run(env, run); | |
496 | ||
be214e6c | 497 | if (env->exit_request) { |
05330448 AL |
498 | dprintf("interrupt exit requested\n"); |
499 | ret = 0; | |
500 | break; | |
501 | } | |
502 | ||
503 | ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); | |
504 | kvm_arch_post_run(env, run); | |
505 | ||
506 | if (ret == -EINTR || ret == -EAGAIN) { | |
507 | dprintf("io window exit\n"); | |
508 | ret = 0; | |
509 | break; | |
510 | } | |
511 | ||
512 | if (ret < 0) { | |
513 | dprintf("kvm run failed %s\n", strerror(-ret)); | |
514 | abort(); | |
515 | } | |
516 | ||
f65ed4c1 AL |
517 | kvm_run_coalesced_mmio(env, run); |
518 | ||
05330448 AL |
519 | ret = 0; /* exit loop */ |
520 | switch (run->exit_reason) { | |
521 | case KVM_EXIT_IO: | |
522 | dprintf("handle_io\n"); | |
523 | ret = kvm_handle_io(env, run->io.port, | |
524 | (uint8_t *)run + run->io.data_offset, | |
525 | run->io.direction, | |
526 | run->io.size, | |
527 | run->io.count); | |
528 | break; | |
529 | case KVM_EXIT_MMIO: | |
530 | dprintf("handle_mmio\n"); | |
531 | cpu_physical_memory_rw(run->mmio.phys_addr, | |
532 | run->mmio.data, | |
533 | run->mmio.len, | |
534 | run->mmio.is_write); | |
535 | ret = 1; | |
536 | break; | |
537 | case KVM_EXIT_IRQ_WINDOW_OPEN: | |
538 | dprintf("irq_window_open\n"); | |
539 | break; | |
540 | case KVM_EXIT_SHUTDOWN: | |
541 | dprintf("shutdown\n"); | |
542 | qemu_system_reset_request(); | |
543 | ret = 1; | |
544 | break; | |
545 | case KVM_EXIT_UNKNOWN: | |
546 | dprintf("kvm_exit_unknown\n"); | |
547 | break; | |
548 | case KVM_EXIT_FAIL_ENTRY: | |
549 | dprintf("kvm_exit_fail_entry\n"); | |
550 | break; | |
551 | case KVM_EXIT_EXCEPTION: | |
552 | dprintf("kvm_exit_exception\n"); | |
553 | break; | |
554 | case KVM_EXIT_DEBUG: | |
555 | dprintf("kvm_exit_debug\n"); | |
e22a25c9 AL |
556 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
557 | if (kvm_arch_debug(&run->debug.arch)) { | |
558 | gdb_set_stop_cpu(env); | |
559 | vm_stop(EXCP_DEBUG); | |
560 | env->exception_index = EXCP_DEBUG; | |
561 | return 0; | |
562 | } | |
563 | /* re-enter, this exception was guest-internal */ | |
564 | ret = 1; | |
565 | #endif /* KVM_CAP_SET_GUEST_DEBUG */ | |
05330448 AL |
566 | break; |
567 | default: | |
568 | dprintf("kvm_arch_handle_exit\n"); | |
569 | ret = kvm_arch_handle_exit(env, run); | |
570 | break; | |
571 | } | |
572 | } while (ret > 0); | |
573 | ||
be214e6c AJ |
574 | if (env->exit_request) { |
575 | env->exit_request = 0; | |
becfc390 AL |
576 | env->exception_index = EXCP_INTERRUPT; |
577 | } | |
578 | ||
05330448 AL |
579 | return ret; |
580 | } | |
581 | ||
582 | void kvm_set_phys_mem(target_phys_addr_t start_addr, | |
583 | ram_addr_t size, | |
584 | ram_addr_t phys_offset) | |
585 | { | |
586 | KVMState *s = kvm_state; | |
587 | ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; | |
6152e2ae AL |
588 | KVMSlot *mem, old; |
589 | int err; | |
05330448 | 590 | |
d3f8d37f | 591 | if (start_addr & ~TARGET_PAGE_MASK) { |
e6f4afe0 JK |
592 | if (flags >= IO_MEM_UNASSIGNED) { |
593 | if (!kvm_lookup_overlapping_slot(s, start_addr, | |
594 | start_addr + size)) { | |
595 | return; | |
596 | } | |
597 | fprintf(stderr, "Unaligned split of a KVM memory slot\n"); | |
598 | } else { | |
599 | fprintf(stderr, "Only page-aligned memory slots supported\n"); | |
600 | } | |
d3f8d37f AL |
601 | abort(); |
602 | } | |
603 | ||
05330448 AL |
604 | /* KVM does not support read-only slots */ |
605 | phys_offset &= ~IO_MEM_ROM; | |
606 | ||
6152e2ae AL |
607 | while (1) { |
608 | mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); | |
609 | if (!mem) { | |
610 | break; | |
611 | } | |
62d60e8c | 612 | |
6152e2ae AL |
613 | if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && |
614 | (start_addr + size <= mem->start_addr + mem->memory_size) && | |
615 | (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { | |
616 | /* The new slot fits into the existing one and comes with | |
617 | * identical parameters - nothing to be done. */ | |
05330448 | 618 | return; |
6152e2ae AL |
619 | } |
620 | ||
621 | old = *mem; | |
622 | ||
623 | /* unregister the overlapping slot */ | |
624 | mem->memory_size = 0; | |
625 | err = kvm_set_user_memory_region(s, mem); | |
626 | if (err) { | |
627 | fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", | |
628 | __func__, strerror(-err)); | |
62d60e8c AL |
629 | abort(); |
630 | } | |
6152e2ae AL |
631 | |
632 | /* Workaround for older KVM versions: we can't join slots, even not by | |
633 | * unregistering the previous ones and then registering the larger | |
634 | * slot. We have to maintain the existing fragmentation. Sigh. | |
635 | * | |
636 | * This workaround assumes that the new slot starts at the same | |
637 | * address as the first existing one. If not or if some overlapping | |
638 | * slot comes around later, we will fail (not seen in practice so far) | |
639 | * - and actually require a recent KVM version. */ | |
640 | if (old.start_addr == start_addr && old.memory_size < size && | |
641 | flags < IO_MEM_UNASSIGNED) { | |
642 | mem = kvm_alloc_slot(s); | |
643 | mem->memory_size = old.memory_size; | |
644 | mem->start_addr = old.start_addr; | |
645 | mem->phys_offset = old.phys_offset; | |
646 | mem->flags = 0; | |
647 | ||
648 | err = kvm_set_user_memory_region(s, mem); | |
649 | if (err) { | |
650 | fprintf(stderr, "%s: error updating slot: %s\n", __func__, | |
651 | strerror(-err)); | |
652 | abort(); | |
653 | } | |
654 | ||
655 | start_addr += old.memory_size; | |
656 | phys_offset += old.memory_size; | |
657 | size -= old.memory_size; | |
658 | continue; | |
659 | } | |
660 | ||
661 | /* register prefix slot */ | |
662 | if (old.start_addr < start_addr) { | |
663 | mem = kvm_alloc_slot(s); | |
664 | mem->memory_size = start_addr - old.start_addr; | |
665 | mem->start_addr = old.start_addr; | |
666 | mem->phys_offset = old.phys_offset; | |
667 | mem->flags = 0; | |
668 | ||
669 | err = kvm_set_user_memory_region(s, mem); | |
670 | if (err) { | |
671 | fprintf(stderr, "%s: error registering prefix slot: %s\n", | |
672 | __func__, strerror(-err)); | |
673 | abort(); | |
674 | } | |
675 | } | |
676 | ||
677 | /* register suffix slot */ | |
678 | if (old.start_addr + old.memory_size > start_addr + size) { | |
679 | ram_addr_t size_delta; | |
680 | ||
681 | mem = kvm_alloc_slot(s); | |
682 | mem->start_addr = start_addr + size; | |
683 | size_delta = mem->start_addr - old.start_addr; | |
684 | mem->memory_size = old.memory_size - size_delta; | |
685 | mem->phys_offset = old.phys_offset + size_delta; | |
686 | mem->flags = 0; | |
687 | ||
688 | err = kvm_set_user_memory_region(s, mem); | |
689 | if (err) { | |
690 | fprintf(stderr, "%s: error registering suffix slot: %s\n", | |
691 | __func__, strerror(-err)); | |
692 | abort(); | |
693 | } | |
694 | } | |
05330448 | 695 | } |
6152e2ae AL |
696 | |
697 | /* in case the KVM bug workaround already "consumed" the new slot */ | |
698 | if (!size) | |
699 | return; | |
700 | ||
05330448 AL |
701 | /* KVM does not need to know about this memory */ |
702 | if (flags >= IO_MEM_UNASSIGNED) | |
703 | return; | |
704 | ||
705 | mem = kvm_alloc_slot(s); | |
706 | mem->memory_size = size; | |
34fc643f AL |
707 | mem->start_addr = start_addr; |
708 | mem->phys_offset = phys_offset; | |
05330448 AL |
709 | mem->flags = 0; |
710 | ||
6152e2ae AL |
711 | err = kvm_set_user_memory_region(s, mem); |
712 | if (err) { | |
713 | fprintf(stderr, "%s: error registering slot: %s\n", __func__, | |
714 | strerror(-err)); | |
715 | abort(); | |
716 | } | |
05330448 AL |
717 | } |
718 | ||
984b5181 | 719 | int kvm_ioctl(KVMState *s, int type, ...) |
05330448 AL |
720 | { |
721 | int ret; | |
984b5181 AL |
722 | void *arg; |
723 | va_list ap; | |
05330448 | 724 | |
984b5181 AL |
725 | va_start(ap, type); |
726 | arg = va_arg(ap, void *); | |
727 | va_end(ap); | |
728 | ||
729 | ret = ioctl(s->fd, type, arg); | |
05330448 AL |
730 | if (ret == -1) |
731 | ret = -errno; | |
732 | ||
733 | return ret; | |
734 | } | |
735 | ||
984b5181 | 736 | int kvm_vm_ioctl(KVMState *s, int type, ...) |
05330448 AL |
737 | { |
738 | int ret; | |
984b5181 AL |
739 | void *arg; |
740 | va_list ap; | |
741 | ||
742 | va_start(ap, type); | |
743 | arg = va_arg(ap, void *); | |
744 | va_end(ap); | |
05330448 | 745 | |
984b5181 | 746 | ret = ioctl(s->vmfd, type, arg); |
05330448 AL |
747 | if (ret == -1) |
748 | ret = -errno; | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
984b5181 | 753 | int kvm_vcpu_ioctl(CPUState *env, int type, ...) |
05330448 AL |
754 | { |
755 | int ret; | |
984b5181 AL |
756 | void *arg; |
757 | va_list ap; | |
758 | ||
759 | va_start(ap, type); | |
760 | arg = va_arg(ap, void *); | |
761 | va_end(ap); | |
05330448 | 762 | |
984b5181 | 763 | ret = ioctl(env->kvm_fd, type, arg); |
05330448 AL |
764 | if (ret == -1) |
765 | ret = -errno; | |
766 | ||
767 | return ret; | |
768 | } | |
bd322087 AL |
769 | |
770 | int kvm_has_sync_mmu(void) | |
771 | { | |
a9c11522 | 772 | #ifdef KVM_CAP_SYNC_MMU |
bd322087 AL |
773 | KVMState *s = kvm_state; |
774 | ||
ad7b8b33 AL |
775 | return kvm_check_extension(s, KVM_CAP_SYNC_MMU); |
776 | #else | |
bd322087 | 777 | return 0; |
ad7b8b33 | 778 | #endif |
bd322087 | 779 | } |
e22a25c9 | 780 | |
6f0437e8 JK |
781 | void kvm_setup_guest_memory(void *start, size_t size) |
782 | { | |
783 | if (!kvm_has_sync_mmu()) { | |
784 | #ifdef MADV_DONTFORK | |
785 | int ret = madvise(start, size, MADV_DONTFORK); | |
786 | ||
787 | if (ret) { | |
788 | perror("madvice"); | |
789 | exit(1); | |
790 | } | |
791 | #else | |
792 | fprintf(stderr, | |
793 | "Need MADV_DONTFORK in absence of synchronous KVM MMU\n"); | |
794 | exit(1); | |
795 | #endif | |
796 | } | |
797 | } | |
798 | ||
e22a25c9 AL |
799 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
800 | struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, | |
801 | target_ulong pc) | |
802 | { | |
803 | struct kvm_sw_breakpoint *bp; | |
804 | ||
805 | TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { | |
806 | if (bp->pc == pc) | |
807 | return bp; | |
808 | } | |
809 | return NULL; | |
810 | } | |
811 | ||
812 | int kvm_sw_breakpoints_active(CPUState *env) | |
813 | { | |
814 | return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); | |
815 | } | |
816 | ||
817 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) | |
818 | { | |
819 | struct kvm_guest_debug dbg; | |
820 | ||
821 | dbg.control = 0; | |
822 | if (env->singlestep_enabled) | |
823 | dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; | |
824 | ||
825 | kvm_arch_update_guest_debug(env, &dbg); | |
826 | dbg.control |= reinject_trap; | |
827 | ||
828 | return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg); | |
829 | } | |
830 | ||
831 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, | |
832 | target_ulong len, int type) | |
833 | { | |
834 | struct kvm_sw_breakpoint *bp; | |
835 | CPUState *env; | |
836 | int err; | |
837 | ||
838 | if (type == GDB_BREAKPOINT_SW) { | |
839 | bp = kvm_find_sw_breakpoint(current_env, addr); | |
840 | if (bp) { | |
841 | bp->use_count++; | |
842 | return 0; | |
843 | } | |
844 | ||
845 | bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); | |
846 | if (!bp) | |
847 | return -ENOMEM; | |
848 | ||
849 | bp->pc = addr; | |
850 | bp->use_count = 1; | |
851 | err = kvm_arch_insert_sw_breakpoint(current_env, bp); | |
852 | if (err) { | |
853 | free(bp); | |
854 | return err; | |
855 | } | |
856 | ||
857 | TAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, | |
858 | bp, entry); | |
859 | } else { | |
860 | err = kvm_arch_insert_hw_breakpoint(addr, len, type); | |
861 | if (err) | |
862 | return err; | |
863 | } | |
864 | ||
865 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
866 | err = kvm_update_guest_debug(env, 0); | |
867 | if (err) | |
868 | return err; | |
869 | } | |
870 | return 0; | |
871 | } | |
872 | ||
873 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, | |
874 | target_ulong len, int type) | |
875 | { | |
876 | struct kvm_sw_breakpoint *bp; | |
877 | CPUState *env; | |
878 | int err; | |
879 | ||
880 | if (type == GDB_BREAKPOINT_SW) { | |
881 | bp = kvm_find_sw_breakpoint(current_env, addr); | |
882 | if (!bp) | |
883 | return -ENOENT; | |
884 | ||
885 | if (bp->use_count > 1) { | |
886 | bp->use_count--; | |
887 | return 0; | |
888 | } | |
889 | ||
890 | err = kvm_arch_remove_sw_breakpoint(current_env, bp); | |
891 | if (err) | |
892 | return err; | |
893 | ||
894 | TAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); | |
895 | qemu_free(bp); | |
896 | } else { | |
897 | err = kvm_arch_remove_hw_breakpoint(addr, len, type); | |
898 | if (err) | |
899 | return err; | |
900 | } | |
901 | ||
902 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
903 | err = kvm_update_guest_debug(env, 0); | |
904 | if (err) | |
905 | return err; | |
906 | } | |
907 | return 0; | |
908 | } | |
909 | ||
910 | void kvm_remove_all_breakpoints(CPUState *current_env) | |
911 | { | |
912 | struct kvm_sw_breakpoint *bp, *next; | |
913 | KVMState *s = current_env->kvm_state; | |
914 | CPUState *env; | |
915 | ||
916 | TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { | |
917 | if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { | |
918 | /* Try harder to find a CPU that currently sees the breakpoint. */ | |
919 | for (env = first_cpu; env != NULL; env = env->next_cpu) { | |
920 | if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) | |
921 | break; | |
922 | } | |
923 | } | |
924 | } | |
925 | kvm_arch_remove_all_hw_breakpoints(); | |
926 | ||
927 | for (env = first_cpu; env != NULL; env = env->next_cpu) | |
928 | kvm_update_guest_debug(env, 0); | |
929 | } | |
930 | ||
931 | #else /* !KVM_CAP_SET_GUEST_DEBUG */ | |
932 | ||
933 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) | |
934 | { | |
935 | return -EINVAL; | |
936 | } | |
937 | ||
938 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, | |
939 | target_ulong len, int type) | |
940 | { | |
941 | return -EINVAL; | |
942 | } | |
943 | ||
944 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, | |
945 | target_ulong len, int type) | |
946 | { | |
947 | return -EINVAL; | |
948 | } | |
949 | ||
950 | void kvm_remove_all_breakpoints(CPUState *current_env) | |
951 | { | |
952 | } | |
953 | #endif /* !KVM_CAP_SET_GUEST_DEBUG */ |