]> git.proxmox.com Git - mirror_qemu.git/blame - kvm-all.c
kvm: Add sanity checks to slot management (Jan Kiszka)
[mirror_qemu.git] / kvm-all.c
CommitLineData
05330448
AL
1/*
2 * QEMU KVM support
3 *
4 * Copyright IBM, Corp. 2008
5832d1f2 5 * Red Hat, Inc. 2008
05330448
AL
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
5832d1f2 9 * Glauber Costa <gcosta@redhat.com>
05330448
AL
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 *
14 */
15
16#include <sys/types.h>
17#include <sys/ioctl.h>
18#include <sys/mman.h>
984b5181 19#include <stdarg.h>
05330448
AL
20
21#include <linux/kvm.h>
22
23#include "qemu-common.h"
24#include "sysemu.h"
e22a25c9 25#include "gdbstub.h"
05330448
AL
26#include "kvm.h"
27
f65ed4c1
AL
28/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
29#define PAGE_SIZE TARGET_PAGE_SIZE
30
05330448
AL
31//#define DEBUG_KVM
32
33#ifdef DEBUG_KVM
34#define dprintf(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
36#else
37#define dprintf(fmt, ...) \
38 do { } while (0)
39#endif
40
34fc643f
AL
41typedef struct KVMSlot
42{
43 target_phys_addr_t start_addr;
44 ram_addr_t memory_size;
45 ram_addr_t phys_offset;
46 int slot;
47 int flags;
48} KVMSlot;
05330448 49
5832d1f2
AL
50typedef struct kvm_dirty_log KVMDirtyLog;
51
05330448
AL
52int kvm_allowed = 0;
53
54struct KVMState
55{
56 KVMSlot slots[32];
57 int fd;
58 int vmfd;
f65ed4c1 59 int coalesced_mmio;
e22a25c9
AL
60#ifdef KVM_CAP_SET_GUEST_DEBUG
61 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
62#endif
05330448
AL
63};
64
65static KVMState *kvm_state;
66
67static KVMSlot *kvm_alloc_slot(KVMState *s)
68{
69 int i;
70
71 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
62d60e8c
AL
72 /* KVM private memory slots */
73 if (i >= 8 && i < 12)
74 continue;
05330448
AL
75 if (s->slots[i].memory_size == 0)
76 return &s->slots[i];
77 }
78
d3f8d37f
AL
79 fprintf(stderr, "%s: no free slot available\n", __func__);
80 abort();
81}
82
83static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
84 target_phys_addr_t start_addr,
85 target_phys_addr_t end_addr)
86{
87 int i;
88
89 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
90 KVMSlot *mem = &s->slots[i];
91
92 if (start_addr == mem->start_addr &&
93 end_addr == mem->start_addr + mem->memory_size) {
94 return mem;
95 }
96 }
97
05330448
AL
98 return NULL;
99}
100
101static KVMSlot *kvm_lookup_slot(KVMState *s, target_phys_addr_t start_addr)
102{
103 int i;
104
105 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
106 KVMSlot *mem = &s->slots[i];
107
34fc643f
AL
108 if (start_addr >= mem->start_addr &&
109 start_addr < (mem->start_addr + mem->memory_size))
05330448
AL
110 return mem;
111 }
112
113 return NULL;
114}
115
5832d1f2
AL
116static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
117{
118 struct kvm_userspace_memory_region mem;
119
120 mem.slot = slot->slot;
121 mem.guest_phys_addr = slot->start_addr;
122 mem.memory_size = slot->memory_size;
5579c7f3 123 mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
5832d1f2
AL
124 mem.flags = slot->flags;
125
126 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
127}
128
129
05330448
AL
130int kvm_init_vcpu(CPUState *env)
131{
132 KVMState *s = kvm_state;
133 long mmap_size;
134 int ret;
135
136 dprintf("kvm_init_vcpu\n");
137
984b5181 138 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
05330448
AL
139 if (ret < 0) {
140 dprintf("kvm_create_vcpu failed\n");
141 goto err;
142 }
143
144 env->kvm_fd = ret;
145 env->kvm_state = s;
146
147 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
148 if (mmap_size < 0) {
149 dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
150 goto err;
151 }
152
153 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
154 env->kvm_fd, 0);
155 if (env->kvm_run == MAP_FAILED) {
156 ret = -errno;
157 dprintf("mmap'ing vcpu state failed\n");
158 goto err;
159 }
160
161 ret = kvm_arch_init_vcpu(env);
162
163err:
164 return ret;
165}
166
f5d6f51b
AL
167int kvm_sync_vcpus(void)
168{
169 CPUState *env;
170
171 for (env = first_cpu; env != NULL; env = env->next_cpu) {
172 int ret;
173
174 ret = kvm_arch_put_registers(env);
175 if (ret)
176 return ret;
177 }
178
179 return 0;
180}
181
5832d1f2
AL
182/*
183 * dirty pages logging control
184 */
d3f8d37f
AL
185static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
186 ram_addr_t size, unsigned flags,
5832d1f2
AL
187 unsigned mask)
188{
189 KVMState *s = kvm_state;
d3f8d37f 190 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
5832d1f2 191 if (mem == NULL) {
d3f8d37f
AL
192 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
193 TARGET_FMT_plx "\n", __func__, phys_addr,
194 phys_addr + size - 1);
5832d1f2
AL
195 return -EINVAL;
196 }
197
198 flags = (mem->flags & ~mask) | flags;
199 /* Nothing changed, no need to issue ioctl */
200 if (flags == mem->flags)
201 return 0;
202
203 mem->flags = flags;
204
205 return kvm_set_user_memory_region(s, mem);
206}
207
d3f8d37f 208int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size)
5832d1f2 209{
d3f8d37f 210 return kvm_dirty_pages_log_change(phys_addr, size,
5832d1f2
AL
211 KVM_MEM_LOG_DIRTY_PAGES,
212 KVM_MEM_LOG_DIRTY_PAGES);
213}
214
d3f8d37f 215int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size)
5832d1f2 216{
d3f8d37f 217 return kvm_dirty_pages_log_change(phys_addr, size,
5832d1f2
AL
218 0,
219 KVM_MEM_LOG_DIRTY_PAGES);
220}
221
222/**
223 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
224 * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
225 * This means all bits are set to dirty.
226 *
d3f8d37f 227 * @start_add: start of logged region.
5832d1f2
AL
228 * @end_addr: end of logged region.
229 */
d3f8d37f
AL
230void kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
231 target_phys_addr_t end_addr)
5832d1f2
AL
232{
233 KVMState *s = kvm_state;
234 KVMDirtyLog d;
d3f8d37f 235 KVMSlot *mem = kvm_lookup_matching_slot(s, start_addr, end_addr);
5832d1f2
AL
236 unsigned long alloc_size;
237 ram_addr_t addr;
238 target_phys_addr_t phys_addr = start_addr;
239
d3f8d37f
AL
240 dprintf("sync addr: " TARGET_FMT_lx " into %lx\n", start_addr,
241 mem->phys_offset);
5832d1f2 242 if (mem == NULL) {
d3f8d37f
AL
243 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
244 TARGET_FMT_plx "\n", __func__, phys_addr, end_addr - 1);
5832d1f2
AL
245 return;
246 }
247
248 alloc_size = mem->memory_size >> TARGET_PAGE_BITS / sizeof(d.dirty_bitmap);
249 d.dirty_bitmap = qemu_mallocz(alloc_size);
250
5832d1f2
AL
251 d.slot = mem->slot;
252 dprintf("slot %d, phys_addr %llx, uaddr: %llx\n",
253 d.slot, mem->start_addr, mem->phys_offset);
254
255 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
256 dprintf("ioctl failed %d\n", errno);
257 goto out;
258 }
259
260 phys_addr = start_addr;
261 for (addr = mem->phys_offset; phys_addr < end_addr; phys_addr+= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
262 unsigned long *bitmap = (unsigned long *)d.dirty_bitmap;
263 unsigned nr = (phys_addr - start_addr) >> TARGET_PAGE_BITS;
264 unsigned word = nr / (sizeof(*bitmap) * 8);
265 unsigned bit = nr % (sizeof(*bitmap) * 8);
266 if ((bitmap[word] >> bit) & 1)
267 cpu_physical_memory_set_dirty(addr);
268 }
269out:
270 qemu_free(d.dirty_bitmap);
271}
272
f65ed4c1
AL
273int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
274{
275 int ret = -ENOSYS;
276#ifdef KVM_CAP_COALESCED_MMIO
277 KVMState *s = kvm_state;
278
279 if (s->coalesced_mmio) {
280 struct kvm_coalesced_mmio_zone zone;
281
282 zone.addr = start;
283 zone.size = size;
284
285 ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
286 }
287#endif
288
289 return ret;
290}
291
292int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
293{
294 int ret = -ENOSYS;
295#ifdef KVM_CAP_COALESCED_MMIO
296 KVMState *s = kvm_state;
297
298 if (s->coalesced_mmio) {
299 struct kvm_coalesced_mmio_zone zone;
300
301 zone.addr = start;
302 zone.size = size;
303
304 ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
305 }
306#endif
307
308 return ret;
309}
310
05330448
AL
311int kvm_init(int smp_cpus)
312{
313 KVMState *s;
314 int ret;
315 int i;
316
317 if (smp_cpus > 1)
318 return -EINVAL;
319
320 s = qemu_mallocz(sizeof(KVMState));
05330448 321
e22a25c9
AL
322#ifdef KVM_CAP_SET_GUEST_DEBUG
323 TAILQ_INIT(&s->kvm_sw_breakpoints);
324#endif
05330448
AL
325 for (i = 0; i < ARRAY_SIZE(s->slots); i++)
326 s->slots[i].slot = i;
327
328 s->vmfd = -1;
329 s->fd = open("/dev/kvm", O_RDWR);
330 if (s->fd == -1) {
331 fprintf(stderr, "Could not access KVM kernel module: %m\n");
332 ret = -errno;
333 goto err;
334 }
335
336 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
337 if (ret < KVM_API_VERSION) {
338 if (ret > 0)
339 ret = -EINVAL;
340 fprintf(stderr, "kvm version too old\n");
341 goto err;
342 }
343
344 if (ret > KVM_API_VERSION) {
345 ret = -EINVAL;
346 fprintf(stderr, "kvm version not supported\n");
347 goto err;
348 }
349
350 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
351 if (s->vmfd < 0)
352 goto err;
353
354 /* initially, KVM allocated its own memory and we had to jump through
355 * hooks to make phys_ram_base point to this. Modern versions of KVM
5579c7f3 356 * just use a user allocated buffer so we can use regular pages
05330448
AL
357 * unmodified. Make sure we have a sufficiently modern version of KVM.
358 */
984b5181 359 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
05330448
AL
360 if (ret <= 0) {
361 if (ret == 0)
362 ret = -EINVAL;
363 fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n");
364 goto err;
365 }
366
d85dc283
AL
367 /* There was a nasty bug in < kvm-80 that prevents memory slots from being
368 * destroyed properly. Since we rely on this capability, refuse to work
369 * with any kernel without this capability. */
370 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION,
371 KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
372 if (ret <= 0) {
373 if (ret == 0)
374 ret = -EINVAL;
375
376 fprintf(stderr,
377 "KVM kernel module broken (DESTROY_MEMORY_REGION)\n"
378 "Please upgrade to at least kvm-81.\n");
379 goto err;
380 }
381
f65ed4c1
AL
382 s->coalesced_mmio = 0;
383#ifdef KVM_CAP_COALESCED_MMIO
384 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_COALESCED_MMIO);
385 if (ret > 0)
386 s->coalesced_mmio = ret;
387#endif
388
05330448
AL
389 ret = kvm_arch_init(s, smp_cpus);
390 if (ret < 0)
391 goto err;
392
393 kvm_state = s;
394
395 return 0;
396
397err:
398 if (s) {
399 if (s->vmfd != -1)
400 close(s->vmfd);
401 if (s->fd != -1)
402 close(s->fd);
403 }
404 qemu_free(s);
405
406 return ret;
407}
408
409static int kvm_handle_io(CPUState *env, uint16_t port, void *data,
410 int direction, int size, uint32_t count)
411{
412 int i;
413 uint8_t *ptr = data;
414
415 for (i = 0; i < count; i++) {
416 if (direction == KVM_EXIT_IO_IN) {
417 switch (size) {
418 case 1:
419 stb_p(ptr, cpu_inb(env, port));
420 break;
421 case 2:
422 stw_p(ptr, cpu_inw(env, port));
423 break;
424 case 4:
425 stl_p(ptr, cpu_inl(env, port));
426 break;
427 }
428 } else {
429 switch (size) {
430 case 1:
431 cpu_outb(env, port, ldub_p(ptr));
432 break;
433 case 2:
434 cpu_outw(env, port, lduw_p(ptr));
435 break;
436 case 4:
437 cpu_outl(env, port, ldl_p(ptr));
438 break;
439 }
440 }
441
442 ptr += size;
443 }
444
445 return 1;
446}
447
f65ed4c1
AL
448static void kvm_run_coalesced_mmio(CPUState *env, struct kvm_run *run)
449{
450#ifdef KVM_CAP_COALESCED_MMIO
451 KVMState *s = kvm_state;
452 if (s->coalesced_mmio) {
453 struct kvm_coalesced_mmio_ring *ring;
454
455 ring = (void *)run + (s->coalesced_mmio * TARGET_PAGE_SIZE);
456 while (ring->first != ring->last) {
457 struct kvm_coalesced_mmio *ent;
458
459 ent = &ring->coalesced_mmio[ring->first];
460
461 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
462 /* FIXME smp_wmb() */
463 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
464 }
465 }
466#endif
467}
468
05330448
AL
469int kvm_cpu_exec(CPUState *env)
470{
471 struct kvm_run *run = env->kvm_run;
472 int ret;
473
474 dprintf("kvm_cpu_exec()\n");
475
476 do {
477 kvm_arch_pre_run(env, run);
478
be214e6c 479 if (env->exit_request) {
05330448
AL
480 dprintf("interrupt exit requested\n");
481 ret = 0;
482 break;
483 }
484
485 ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
486 kvm_arch_post_run(env, run);
487
488 if (ret == -EINTR || ret == -EAGAIN) {
489 dprintf("io window exit\n");
490 ret = 0;
491 break;
492 }
493
494 if (ret < 0) {
495 dprintf("kvm run failed %s\n", strerror(-ret));
496 abort();
497 }
498
f65ed4c1
AL
499 kvm_run_coalesced_mmio(env, run);
500
05330448
AL
501 ret = 0; /* exit loop */
502 switch (run->exit_reason) {
503 case KVM_EXIT_IO:
504 dprintf("handle_io\n");
505 ret = kvm_handle_io(env, run->io.port,
506 (uint8_t *)run + run->io.data_offset,
507 run->io.direction,
508 run->io.size,
509 run->io.count);
510 break;
511 case KVM_EXIT_MMIO:
512 dprintf("handle_mmio\n");
513 cpu_physical_memory_rw(run->mmio.phys_addr,
514 run->mmio.data,
515 run->mmio.len,
516 run->mmio.is_write);
517 ret = 1;
518 break;
519 case KVM_EXIT_IRQ_WINDOW_OPEN:
520 dprintf("irq_window_open\n");
521 break;
522 case KVM_EXIT_SHUTDOWN:
523 dprintf("shutdown\n");
524 qemu_system_reset_request();
525 ret = 1;
526 break;
527 case KVM_EXIT_UNKNOWN:
528 dprintf("kvm_exit_unknown\n");
529 break;
530 case KVM_EXIT_FAIL_ENTRY:
531 dprintf("kvm_exit_fail_entry\n");
532 break;
533 case KVM_EXIT_EXCEPTION:
534 dprintf("kvm_exit_exception\n");
535 break;
536 case KVM_EXIT_DEBUG:
537 dprintf("kvm_exit_debug\n");
e22a25c9
AL
538#ifdef KVM_CAP_SET_GUEST_DEBUG
539 if (kvm_arch_debug(&run->debug.arch)) {
540 gdb_set_stop_cpu(env);
541 vm_stop(EXCP_DEBUG);
542 env->exception_index = EXCP_DEBUG;
543 return 0;
544 }
545 /* re-enter, this exception was guest-internal */
546 ret = 1;
547#endif /* KVM_CAP_SET_GUEST_DEBUG */
05330448
AL
548 break;
549 default:
550 dprintf("kvm_arch_handle_exit\n");
551 ret = kvm_arch_handle_exit(env, run);
552 break;
553 }
554 } while (ret > 0);
555
be214e6c
AJ
556 if (env->exit_request) {
557 env->exit_request = 0;
becfc390
AL
558 env->exception_index = EXCP_INTERRUPT;
559 }
560
05330448
AL
561 return ret;
562}
563
564void kvm_set_phys_mem(target_phys_addr_t start_addr,
565 ram_addr_t size,
566 ram_addr_t phys_offset)
567{
568 KVMState *s = kvm_state;
569 ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
570 KVMSlot *mem;
571
d3f8d37f
AL
572 if (start_addr & ~TARGET_PAGE_MASK) {
573 fprintf(stderr, "Only page-aligned memory slots supported\n");
574 abort();
575 }
576
05330448
AL
577 /* KVM does not support read-only slots */
578 phys_offset &= ~IO_MEM_ROM;
579
580 mem = kvm_lookup_slot(s, start_addr);
581 if (mem) {
494ada42 582 if (flags >= IO_MEM_UNASSIGNED) {
05330448 583 mem->memory_size = 0;
34fc643f
AL
584 mem->start_addr = start_addr;
585 mem->phys_offset = 0;
05330448
AL
586 mem->flags = 0;
587
34fc643f
AL
588 kvm_set_user_memory_region(s, mem);
589 } else if (start_addr >= mem->start_addr &&
590 (start_addr + size) <= (mem->start_addr +
62d60e8c
AL
591 mem->memory_size)) {
592 KVMSlot slot;
593 target_phys_addr_t mem_start;
594 ram_addr_t mem_size, mem_offset;
595
596 /* Not splitting */
34fc643f
AL
597 if ((phys_offset - (start_addr - mem->start_addr)) ==
598 mem->phys_offset)
62d60e8c
AL
599 return;
600
601 /* unregister whole slot */
602 memcpy(&slot, mem, sizeof(slot));
603 mem->memory_size = 0;
34fc643f 604 kvm_set_user_memory_region(s, mem);
62d60e8c
AL
605
606 /* register prefix slot */
34fc643f
AL
607 mem_start = slot.start_addr;
608 mem_size = start_addr - slot.start_addr;
609 mem_offset = slot.phys_offset;
62d60e8c
AL
610 if (mem_size)
611 kvm_set_phys_mem(mem_start, mem_size, mem_offset);
612
613 /* register new slot */
614 kvm_set_phys_mem(start_addr, size, phys_offset);
615
616 /* register suffix slot */
617 mem_start = start_addr + size;
618 mem_offset += mem_size + size;
619 mem_size = slot.memory_size - mem_size - size;
620 if (mem_size)
621 kvm_set_phys_mem(mem_start, mem_size, mem_offset);
622
05330448 623 return;
62d60e8c
AL
624 } else {
625 printf("Registering overlapping slot\n");
626 abort();
627 }
05330448 628 }
05330448
AL
629 /* KVM does not need to know about this memory */
630 if (flags >= IO_MEM_UNASSIGNED)
631 return;
632
633 mem = kvm_alloc_slot(s);
634 mem->memory_size = size;
34fc643f
AL
635 mem->start_addr = start_addr;
636 mem->phys_offset = phys_offset;
05330448
AL
637 mem->flags = 0;
638
34fc643f 639 kvm_set_user_memory_region(s, mem);
05330448
AL
640 /* FIXME deal with errors */
641}
642
984b5181 643int kvm_ioctl(KVMState *s, int type, ...)
05330448
AL
644{
645 int ret;
984b5181
AL
646 void *arg;
647 va_list ap;
05330448 648
984b5181
AL
649 va_start(ap, type);
650 arg = va_arg(ap, void *);
651 va_end(ap);
652
653 ret = ioctl(s->fd, type, arg);
05330448
AL
654 if (ret == -1)
655 ret = -errno;
656
657 return ret;
658}
659
984b5181 660int kvm_vm_ioctl(KVMState *s, int type, ...)
05330448
AL
661{
662 int ret;
984b5181
AL
663 void *arg;
664 va_list ap;
665
666 va_start(ap, type);
667 arg = va_arg(ap, void *);
668 va_end(ap);
05330448 669
984b5181 670 ret = ioctl(s->vmfd, type, arg);
05330448
AL
671 if (ret == -1)
672 ret = -errno;
673
674 return ret;
675}
676
984b5181 677int kvm_vcpu_ioctl(CPUState *env, int type, ...)
05330448
AL
678{
679 int ret;
984b5181
AL
680 void *arg;
681 va_list ap;
682
683 va_start(ap, type);
684 arg = va_arg(ap, void *);
685 va_end(ap);
05330448 686
984b5181 687 ret = ioctl(env->kvm_fd, type, arg);
05330448
AL
688 if (ret == -1)
689 ret = -errno;
690
691 return ret;
692}
bd322087
AL
693
694int kvm_has_sync_mmu(void)
695{
a9c11522 696#ifdef KVM_CAP_SYNC_MMU
bd322087
AL
697 KVMState *s = kvm_state;
698
bd322087
AL
699 if (kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_SYNC_MMU) > 0)
700 return 1;
701#endif
702
703 return 0;
704}
e22a25c9
AL
705
706#ifdef KVM_CAP_SET_GUEST_DEBUG
707struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env,
708 target_ulong pc)
709{
710 struct kvm_sw_breakpoint *bp;
711
712 TAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
713 if (bp->pc == pc)
714 return bp;
715 }
716 return NULL;
717}
718
719int kvm_sw_breakpoints_active(CPUState *env)
720{
721 return !TAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
722}
723
724int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
725{
726 struct kvm_guest_debug dbg;
727
728 dbg.control = 0;
729 if (env->singlestep_enabled)
730 dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
731
732 kvm_arch_update_guest_debug(env, &dbg);
733 dbg.control |= reinject_trap;
734
735 return kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg);
736}
737
738int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
739 target_ulong len, int type)
740{
741 struct kvm_sw_breakpoint *bp;
742 CPUState *env;
743 int err;
744
745 if (type == GDB_BREAKPOINT_SW) {
746 bp = kvm_find_sw_breakpoint(current_env, addr);
747 if (bp) {
748 bp->use_count++;
749 return 0;
750 }
751
752 bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint));
753 if (!bp)
754 return -ENOMEM;
755
756 bp->pc = addr;
757 bp->use_count = 1;
758 err = kvm_arch_insert_sw_breakpoint(current_env, bp);
759 if (err) {
760 free(bp);
761 return err;
762 }
763
764 TAILQ_INSERT_HEAD(&current_env->kvm_state->kvm_sw_breakpoints,
765 bp, entry);
766 } else {
767 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
768 if (err)
769 return err;
770 }
771
772 for (env = first_cpu; env != NULL; env = env->next_cpu) {
773 err = kvm_update_guest_debug(env, 0);
774 if (err)
775 return err;
776 }
777 return 0;
778}
779
780int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
781 target_ulong len, int type)
782{
783 struct kvm_sw_breakpoint *bp;
784 CPUState *env;
785 int err;
786
787 if (type == GDB_BREAKPOINT_SW) {
788 bp = kvm_find_sw_breakpoint(current_env, addr);
789 if (!bp)
790 return -ENOENT;
791
792 if (bp->use_count > 1) {
793 bp->use_count--;
794 return 0;
795 }
796
797 err = kvm_arch_remove_sw_breakpoint(current_env, bp);
798 if (err)
799 return err;
800
801 TAILQ_REMOVE(&current_env->kvm_state->kvm_sw_breakpoints, bp, entry);
802 qemu_free(bp);
803 } else {
804 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
805 if (err)
806 return err;
807 }
808
809 for (env = first_cpu; env != NULL; env = env->next_cpu) {
810 err = kvm_update_guest_debug(env, 0);
811 if (err)
812 return err;
813 }
814 return 0;
815}
816
817void kvm_remove_all_breakpoints(CPUState *current_env)
818{
819 struct kvm_sw_breakpoint *bp, *next;
820 KVMState *s = current_env->kvm_state;
821 CPUState *env;
822
823 TAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
824 if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
825 /* Try harder to find a CPU that currently sees the breakpoint. */
826 for (env = first_cpu; env != NULL; env = env->next_cpu) {
827 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0)
828 break;
829 }
830 }
831 }
832 kvm_arch_remove_all_hw_breakpoints();
833
834 for (env = first_cpu; env != NULL; env = env->next_cpu)
835 kvm_update_guest_debug(env, 0);
836}
837
838#else /* !KVM_CAP_SET_GUEST_DEBUG */
839
840int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap)
841{
842 return -EINVAL;
843}
844
845int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr,
846 target_ulong len, int type)
847{
848 return -EINVAL;
849}
850
851int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr,
852 target_ulong len, int type)
853{
854 return -EINVAL;
855}
856
857void kvm_remove_all_breakpoints(CPUState *current_env)
858{
859}
860#endif /* !KVM_CAP_SET_GUEST_DEBUG */