]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - tools/testing/selftests/kvm/lib/kvm_util.c
selftests: kvm: make allocation of extra memory take effect
[mirror_ubuntu-jammy-kernel.git] / tools / testing / selftests / kvm / lib / kvm_util.c
CommitLineData
7a338472 1// SPDX-License-Identifier: GPL-2.0-only
783e9e51
PB
2/*
3 * tools/testing/selftests/kvm/lib/kvm_util.c
4 *
5 * Copyright (C) 2018, Google LLC.
783e9e51
PB
6 */
7
ec2f18bb 8#define _GNU_SOURCE /* for program_invocation_name */
783e9e51
PB
9#include "test_util.h"
10#include "kvm_util.h"
11#include "kvm_util_internal.h"
567a9f1e 12#include "processor.h"
783e9e51
PB
13
14#include <assert.h>
15#include <sys/mman.h>
16#include <sys/types.h>
17#include <sys/stat.h>
0f55b67e 18#include <unistd.h>
bc8eb2fe 19#include <linux/kernel.h>
783e9e51 20
81d1cca0 21#define KVM_UTIL_MIN_PFN 2
783e9e51 22
6528fc0a
AL
23static int vcpu_mmap_sz(void);
24
783e9e51
PB
25/* Aligns x up to the next multiple of size. Size must be a power of 2. */
26static void *align(void *x, size_t size)
27{
28 size_t mask = size - 1;
29 TEST_ASSERT(size != 0 && !(size & (size - 1)),
30 "size not a power of 2: %lu", size);
31 return (void *) (((size_t) x + mask) & ~mask);
32}
33
eabe7881
AJ
34/*
35 * Capability
783e9e51
PB
36 *
37 * Input Args:
38 * cap - Capability
39 *
40 * Output Args: None
41 *
42 * Return:
43 * On success, the Value corresponding to the capability (KVM_CAP_*)
44 * specified by the value of cap. On failure a TEST_ASSERT failure
45 * is produced.
46 *
47 * Looks up and returns the value corresponding to the capability
48 * (KVM_CAP_*) given by cap.
49 */
50int kvm_check_cap(long cap)
51{
52 int ret;
53 int kvm_fd;
54
55 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
bcb2b94a
PB
56 if (kvm_fd < 0)
57 exit(KSFT_SKIP);
783e9e51
PB
58
59 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
60 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
61 " rc: %i errno: %i", ret, errno);
62
63 close(kvm_fd);
64
65 return ret;
66}
67
8b56ee91
DS
68/* VM Enable Capability
69 *
70 * Input Args:
71 * vm - Virtual Machine
72 * cap - Capability
73 *
74 * Output Args: None
75 *
76 * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
77 *
78 * Enables a capability (KVM_CAP_*) on the VM.
79 */
80int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
81{
82 int ret;
83
84 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
85 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
86 " rc: %i errno: %i", ret, errno);
87
88 return ret;
89}
90
ac4a4d6d
OU
91/* VCPU Enable Capability
92 *
93 * Input Args:
94 * vm - Virtual Machine
95 * vcpu_id - VCPU
96 * cap - Capability
97 *
98 * Output Args: None
99 *
100 * Return: On success, 0. On failure a TEST_ASSERT failure is produced.
101 *
102 * Enables a capability (KVM_CAP_*) on the VCPU.
103 */
104int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
105 struct kvm_enable_cap *cap)
106{
107 struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
108 int r;
109
110 TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
111
112 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
113 TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
114 " rc: %i, errno: %i", r, errno);
115
116 return r;
117}
118
84292e56
PX
119void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
120{
121 struct kvm_enable_cap cap = { 0 };
122
123 cap.cap = KVM_CAP_DIRTY_LOG_RING;
124 cap.args[0] = ring_size;
125 vm_enable_cap(vm, &cap);
126 vm->dirty_ring_size = ring_size;
127}
128
12c386b2 129static void vm_open(struct kvm_vm *vm, int perm)
fa3899ad
PB
130{
131 vm->kvm_fd = open(KVM_DEV_PATH, perm);
132 if (vm->kvm_fd < 0)
133 exit(KSFT_SKIP);
134
c68c21ca 135 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
d0aac332 136 print_skip("immediate_exit not available");
c68c21ca
PB
137 exit(KSFT_SKIP);
138 }
139
12c386b2 140 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
fa3899ad
PB
141 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
142 "rc: %i errno: %i", vm->fd, errno);
143}
144
6436430e
YW
145const char *vm_guest_mode_string(uint32_t i)
146{
147 static const char * const strings[] = {
148 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
149 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
150 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
151 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
152 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
153 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
154 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
155 };
156 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
157 "Missing new mode strings?");
158
159 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
160
161 return strings[i];
162}
81d1cca0 163
1133e17e 164const struct vm_guest_mode_params vm_guest_mode_params[] = {
377a41c9
AJ
165 { 52, 48, 0x1000, 12 },
166 { 52, 48, 0x10000, 16 },
167 { 48, 48, 0x1000, 12 },
168 { 48, 48, 0x10000, 16 },
169 { 40, 48, 0x1000, 12 },
170 { 40, 48, 0x10000, 16 },
171 { 0, 0, 0x1000, 12 },
172};
173_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
174 "Missing new mode params?");
175
eabe7881
AJ
176/*
177 * VM Create
783e9e51
PB
178 *
179 * Input Args:
81d1cca0 180 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
783e9e51
PB
181 * phy_pages - Physical memory pages
182 * perm - permission
183 *
184 * Output Args: None
185 *
186 * Return:
187 * Pointer to opaque structure that describes the created VM.
188 *
81d1cca0 189 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
783e9e51
PB
190 * When phy_pages is non-zero, a memory region of phy_pages physical pages
191 * is created and mapped starting at guest physical address 0. The file
192 * descriptor to control the created VM is created with the permissions
193 * given by perm (e.g. O_RDWR).
194 */
f663132d 195struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
783e9e51
PB
196{
197 struct kvm_vm *vm;
783e9e51 198
3439d886
AJ
199 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
200 vm_guest_mode_string(mode), phy_pages, perm);
52200d0d 201
783e9e51 202 vm = calloc(1, sizeof(*vm));
717da97e 203 TEST_ASSERT(vm != NULL, "Insufficient Memory");
783e9e51 204
4d9bba90
SC
205 INIT_LIST_HEAD(&vm->vcpus);
206 INIT_LIST_HEAD(&vm->userspace_mem_regions);
207
783e9e51 208 vm->mode = mode;
12c386b2 209 vm->type = 0;
783e9e51 210
377a41c9
AJ
211 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
212 vm->va_bits = vm_guest_mode_params[mode].va_bits;
213 vm->page_size = vm_guest_mode_params[mode].page_size;
214 vm->page_shift = vm_guest_mode_params[mode].page_shift;
215
783e9e51
PB
216 /* Setup mode specific traits. */
217 switch (vm->mode) {
81d1cca0 218 case VM_MODE_P52V48_4K:
7a6629ef 219 vm->pgtable_levels = 4;
783e9e51 220 break;
81d1cca0
AJ
221 case VM_MODE_P52V48_64K:
222 vm->pgtable_levels = 3;
cdbd2428
AJ
223 break;
224 case VM_MODE_P48V48_4K:
225 vm->pgtable_levels = 4;
cdbd2428
AJ
226 break;
227 case VM_MODE_P48V48_64K:
228 vm->pgtable_levels = 3;
81d1cca0 229 break;
e28934e6
AJ
230 case VM_MODE_P40V48_4K:
231 vm->pgtable_levels = 4;
e28934e6
AJ
232 break;
233 case VM_MODE_P40V48_64K:
234 vm->pgtable_levels = 3;
e28934e6 235 break;
567a9f1e
PX
236 case VM_MODE_PXXV48_4K:
237#ifdef __x86_64__
238 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
20670285
SC
239 /*
240 * Ignore KVM support for 5-level paging (vm->va_bits == 57),
241 * it doesn't take effect unless a CR4.LA57 is set, which it
242 * isn't for this VM_MODE.
243 */
244 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
245 "Linear address width (%d bits) not supported",
246 vm->va_bits);
3439d886
AJ
247 pr_debug("Guest physical address width detected: %d\n",
248 vm->pa_bits);
377a41c9 249 vm->pgtable_levels = 4;
20670285 250 vm->va_bits = 48;
567a9f1e 251#else
352be2c5 252 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
567a9f1e
PX
253#endif
254 break;
783e9e51 255 default:
352be2c5 256 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
783e9e51
PB
257 }
258
12c386b2
PX
259#ifdef __aarch64__
260 if (vm->pa_bits != 40)
261 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
262#endif
263
264 vm_open(vm, perm);
265
81d1cca0
AJ
266 /* Limit to VA-bit canonical virtual addresses. */
267 vm->vpages_valid = sparsebit_alloc();
268 sparsebit_set_num(vm->vpages_valid,
269 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
270 sparsebit_set_num(vm->vpages_valid,
271 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
272 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
273
274 /* Limit physical addresses to PA-bits. */
275 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
276
783e9e51
PB
277 /* Allocate and setup memory for guest. */
278 vm->vpages_mapped = sparsebit_alloc();
279 if (phy_pages != 0)
280 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
281 0, 0, phy_pages, 0);
282
283 return vm;
284}
285
0aa9ec45
AJ
286struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
287 uint64_t extra_mem_pages, uint32_t num_percpu_pages,
288 void *guest_code, uint32_t vcpuids[])
ec2f18bb
AJ
289{
290 /* The maximum page table size for a memory region will be when the
291 * smallest pages are used. Considering each page contains x page
292 * table descriptors, the total extra size for page tables (for extra
293 * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller
294 * than N/x*2.
295 */
0aa9ec45
AJ
296 uint64_t vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
297 uint64_t extra_pg_pages = (extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
39fe2fc9 298 uint64_t pages = DEFAULT_GUEST_PHY_PAGES + extra_mem_pages + vcpu_pages + extra_pg_pages;
ec2f18bb 299 struct kvm_vm *vm;
0aa9ec45
AJ
300 int i;
301
302 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
303 "nr_vcpus = %d too large for host, max-vcpus = %d",
304 nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
ec2f18bb 305
0aa9ec45
AJ
306 pages = vm_adjust_num_guest_pages(mode, pages);
307 vm = vm_create(mode, pages, O_RDWR);
ec2f18bb
AJ
308
309 kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
310
311#ifdef __x86_64__
312 vm_create_irqchip(vm);
313#endif
314
22f232d1
AJ
315 for (i = 0; i < nr_vcpus; ++i) {
316 uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
317
318 vm_vcpu_add_default(vm, vcpuid, guest_code);
319
320#ifdef __x86_64__
321 vcpu_set_cpuid(vm, vcpuid, kvm_get_supported_cpuid());
322#endif
323 }
ec2f18bb
AJ
324
325 return vm;
326}
327
0aa9ec45
AJ
328struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
329 uint32_t num_percpu_pages, void *guest_code,
330 uint32_t vcpuids[])
331{
332 return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, extra_mem_pages,
333 num_percpu_pages, guest_code, vcpuids);
334}
335
336struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
337 void *guest_code)
338{
339 return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
340 (uint32_t []){ vcpuid });
341}
342
eabe7881
AJ
343/*
344 * VM Restart
fa3899ad
PB
345 *
346 * Input Args:
347 * vm - VM that has been released before
348 * perm - permission
349 *
350 * Output Args: None
351 *
352 * Reopens the file descriptors associated to the VM and reinstates the
353 * global state, such as the irqchip and the memory regions that are mapped
354 * into the guest.
355 */
356void kvm_vm_restart(struct kvm_vm *vmp, int perm)
357{
358 struct userspace_mem_region *region;
359
12c386b2 360 vm_open(vmp, perm);
fa3899ad
PB
361 if (vmp->has_irqchip)
362 vm_create_irqchip(vmp);
363
4d9bba90 364 list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
fa3899ad
PB
365 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
366 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
367 " rc: %i errno: %i\n"
368 " slot: %u flags: 0x%x\n"
d9eaf19e 369 " guest_phys_addr: 0x%llx size: 0x%llx",
eabe7881
AJ
370 ret, errno, region->region.slot,
371 region->region.flags,
fa3899ad
PB
372 region->region.guest_phys_addr,
373 region->region.memory_size);
374 }
375}
376
3b4cd0ff
PX
377void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
378{
379 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
380 int ret;
381
382 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
383 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
d9eaf19e 384 __func__, strerror(-ret));
3b4cd0ff
PX
385}
386
2a31b9db
PB
387void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
388 uint64_t first_page, uint32_t num_pages)
389{
390 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
391 .first_page = first_page,
392 .num_pages = num_pages };
393 int ret;
394
395 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
396 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
d9eaf19e 397 __func__, strerror(-ret));
2a31b9db
PB
398}
399
84292e56
PX
400uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
401{
402 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS);
403}
404
eabe7881
AJ
405/*
406 * Userspace Memory Region Find
783e9e51
PB
407 *
408 * Input Args:
409 * vm - Virtual Machine
410 * start - Starting VM physical address
411 * end - Ending VM physical address, inclusive.
412 *
413 * Output Args: None
414 *
415 * Return:
416 * Pointer to overlapping region, NULL if no such region.
417 *
418 * Searches for a region with any physical memory that overlaps with
419 * any portion of the guest physical addresses from start to end
420 * inclusive. If multiple overlapping regions exist, a pointer to any
421 * of the regions is returned. Null is returned only when no overlapping
422 * region exists.
423 */
eabe7881
AJ
424static struct userspace_mem_region *
425userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
783e9e51
PB
426{
427 struct userspace_mem_region *region;
428
4d9bba90 429 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
783e9e51
PB
430 uint64_t existing_start = region->region.guest_phys_addr;
431 uint64_t existing_end = region->region.guest_phys_addr
432 + region->region.memory_size - 1;
433 if (start <= existing_end && end >= existing_start)
434 return region;
435 }
436
437 return NULL;
438}
439
eabe7881
AJ
440/*
441 * KVM Userspace Memory Region Find
783e9e51
PB
442 *
443 * Input Args:
444 * vm - Virtual Machine
445 * start - Starting VM physical address
446 * end - Ending VM physical address, inclusive.
447 *
448 * Output Args: None
449 *
450 * Return:
451 * Pointer to overlapping region, NULL if no such region.
452 *
453 * Public interface to userspace_mem_region_find. Allows tests to look up
454 * the memslot datastructure for a given range of guest physical memory.
455 */
456struct kvm_userspace_memory_region *
457kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
458 uint64_t end)
459{
460 struct userspace_mem_region *region;
461
462 region = userspace_mem_region_find(vm, start, end);
463 if (!region)
464 return NULL;
465
466 return &region->region;
467}
468
eabe7881
AJ
469/*
470 * VCPU Find
783e9e51
PB
471 *
472 * Input Args:
473 * vm - Virtual Machine
474 * vcpuid - VCPU ID
475 *
476 * Output Args: None
477 *
478 * Return:
479 * Pointer to VCPU structure
480 *
481 * Locates a vcpu structure that describes the VCPU specified by vcpuid and
482 * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU
483 * for the specified vcpuid.
484 */
eabe7881 485struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
783e9e51 486{
4d9bba90 487 struct vcpu *vcpu;
783e9e51 488
4d9bba90
SC
489 list_for_each_entry(vcpu, &vm->vcpus, list) {
490 if (vcpu->id == vcpuid)
491 return vcpu;
783e9e51
PB
492 }
493
494 return NULL;
495}
496
eabe7881
AJ
497/*
498 * VM VCPU Remove
783e9e51
PB
499 *
500 * Input Args:
238022ff 501 * vcpu - VCPU to remove
783e9e51
PB
502 *
503 * Output Args: None
504 *
505 * Return: None, TEST_ASSERT failures for all error conditions
506 *
4d9bba90 507 * Removes a vCPU from a VM and frees its resources.
783e9e51 508 */
84292e56 509static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
783e9e51 510{
0a505fe6 511 int ret;
783e9e51 512
84292e56
PX
513 if (vcpu->dirty_gfns) {
514 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
515 TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, "
516 "rc: %i errno: %i", ret, errno);
517 vcpu->dirty_gfns = NULL;
518 }
519
6528fc0a 520 ret = munmap(vcpu->state, vcpu_mmap_sz());
0a505fe6
PB
521 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
522 "errno: %i", ret, errno);
d4787579 523 ret = close(vcpu->fd);
783e9e51
PB
524 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
525 "errno: %i", ret, errno);
526
4d9bba90 527 list_del(&vcpu->list);
783e9e51
PB
528 free(vcpu);
529}
530
fa3899ad
PB
531void kvm_vm_release(struct kvm_vm *vmp)
532{
4d9bba90 533 struct vcpu *vcpu, *tmp;
fa3899ad
PB
534 int ret;
535
4d9bba90 536 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
84292e56 537 vm_vcpu_rm(vmp, vcpu);
fa3899ad 538
fa3899ad
PB
539 ret = close(vmp->fd);
540 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
541 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
542
d4787579 543 ret = close(vmp->kvm_fd);
fa3899ad
PB
544 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
545 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
546}
783e9e51 547
8c996e4d
SC
548static void __vm_mem_region_delete(struct kvm_vm *vm,
549 struct userspace_mem_region *region)
550{
551 int ret;
552
553 list_del(&region->list);
554
555 region->region.memory_size = 0;
556 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
557 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
558 "rc: %i errno: %i", ret, errno);
559
560 sparsebit_free(&region->unused_phy_pages);
561 ret = munmap(region->mmap_start, region->mmap_size);
562 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno);
563
564 free(region);
565}
566
eabe7881
AJ
567/*
568 * Destroys and frees the VM pointed to by vmp.
783e9e51
PB
569 */
570void kvm_vm_free(struct kvm_vm *vmp)
571{
4d9bba90 572 struct userspace_mem_region *region, *tmp;
783e9e51
PB
573
574 if (vmp == NULL)
575 return;
576
577 /* Free userspace_mem_regions. */
8c996e4d
SC
578 list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
579 __vm_mem_region_delete(vmp, region);
783e9e51 580
783e9e51
PB
581 /* Free sparsebit arrays. */
582 sparsebit_free(&vmp->vpages_valid);
583 sparsebit_free(&vmp->vpages_mapped);
584
fa3899ad 585 kvm_vm_release(vmp);
783e9e51
PB
586
587 /* Free the structure describing the VM. */
588 free(vmp);
589}
590
eabe7881
AJ
591/*
592 * Memory Compare, host virtual to guest virtual
783e9e51
PB
593 *
594 * Input Args:
595 * hva - Starting host virtual address
596 * vm - Virtual Machine
597 * gva - Starting guest virtual address
598 * len - number of bytes to compare
599 *
600 * Output Args: None
601 *
602 * Input/Output Args: None
603 *
604 * Return:
605 * Returns 0 if the bytes starting at hva for a length of len
606 * are equal the guest virtual bytes starting at gva. Returns
607 * a value < 0, if bytes at hva are less than those at gva.
608 * Otherwise a value > 0 is returned.
609 *
610 * Compares the bytes starting at the host virtual address hva, for
611 * a length of len, to the guest bytes starting at the guest virtual
612 * address given by gva.
613 */
eabe7881 614int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
783e9e51
PB
615{
616 size_t amt;
617
eabe7881
AJ
618 /*
619 * Compare a batch of bytes until either a match is found
783e9e51
PB
620 * or all the bytes have been compared.
621 */
622 for (uintptr_t offset = 0; offset < len; offset += amt) {
623 uintptr_t ptr1 = (uintptr_t)hva + offset;
624
eabe7881
AJ
625 /*
626 * Determine host address for guest virtual address
783e9e51
PB
627 * at offset.
628 */
629 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
630
eabe7881
AJ
631 /*
632 * Determine amount to compare on this pass.
783e9e51
PB
633 * Don't allow the comparsion to cross a page boundary.
634 */
635 amt = len - offset;
636 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
637 amt = vm->page_size - (ptr1 % vm->page_size);
638 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
639 amt = vm->page_size - (ptr2 % vm->page_size);
640
641 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
642 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
643
eabe7881
AJ
644 /*
645 * Perform the comparison. If there is a difference
783e9e51
PB
646 * return that result to the caller, otherwise need
647 * to continue on looking for a mismatch.
648 */
649 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
650 if (ret != 0)
651 return ret;
652 }
653
eabe7881
AJ
654 /*
655 * No mismatch found. Let the caller know the two memory
783e9e51
PB
656 * areas are equal.
657 */
658 return 0;
659}
660
eabe7881
AJ
661/*
662 * VM Userspace Memory Region Add
783e9e51
PB
663 *
664 * Input Args:
665 * vm - Virtual Machine
666 * backing_src - Storage source for this region.
667 * NULL to use anonymous memory.
668 * guest_paddr - Starting guest physical address
669 * slot - KVM region slot
670 * npages - Number of physical pages
671 * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES)
672 *
673 * Output Args: None
674 *
675 * Return: None
676 *
677 * Allocates a memory area of the number of pages specified by npages
678 * and maps it to the VM specified by vm, at a starting physical address
679 * given by guest_paddr. The region is created with a KVM region slot
680 * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The
681 * region is created with the flags given by flags.
682 */
683void vm_userspace_mem_region_add(struct kvm_vm *vm,
684 enum vm_mem_backing_src_type src_type,
685 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
686 uint32_t flags)
687{
688 int ret;
783e9e51 689 struct userspace_mem_region *region;
a4b3c8b5 690 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
da2a2d60 691 size_t alignment;
783e9e51 692
87a802d9
AJ
693 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
694 "Number of guest pages is not compatible with the host. "
695 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
696
783e9e51
PB
697 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
698 "address not on a page boundary.\n"
699 " guest_paddr: 0x%lx vm->page_size: 0x%x",
700 guest_paddr, vm->page_size);
701 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
702 <= vm->max_gfn, "Physical range beyond maximum "
703 "supported physical address,\n"
704 " guest_paddr: 0x%lx npages: 0x%lx\n"
705 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
706 guest_paddr, npages, vm->max_gfn, vm->page_size);
707
eabe7881
AJ
708 /*
709 * Confirm a mem region with an overlapping address doesn't
783e9e51
PB
710 * already exist.
711 */
712 region = (struct userspace_mem_region *) userspace_mem_region_find(
94a980c3 713 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
783e9e51 714 if (region != NULL)
352be2c5 715 TEST_FAIL("overlapping userspace_mem_region already "
783e9e51
PB
716 "exists\n"
717 " requested guest_paddr: 0x%lx npages: 0x%lx "
718 "page_size: 0x%x\n"
719 " existing guest_paddr: 0x%lx size: 0x%lx",
720 guest_paddr, npages, vm->page_size,
721 (uint64_t) region->region.guest_phys_addr,
722 (uint64_t) region->region.memory_size);
723
724 /* Confirm no region with the requested slot already exists. */
4d9bba90
SC
725 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
726 if (region->region.slot != slot)
727 continue;
728
352be2c5 729 TEST_FAIL("A mem region with the requested slot "
94a980c3 730 "already exists.\n"
783e9e51
PB
731 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
732 " existing slot: %u paddr: 0x%lx size: 0x%lx",
733 slot, guest_paddr, npages,
734 region->region.slot,
735 (uint64_t) region->region.guest_phys_addr,
736 (uint64_t) region->region.memory_size);
4d9bba90 737 }
783e9e51
PB
738
739 /* Allocate and initialize new mem region structure. */
740 region = calloc(1, sizeof(*region));
741 TEST_ASSERT(region != NULL, "Insufficient Memory");
742 region->mmap_size = npages * vm->page_size;
743
da2a2d60
TH
744#ifdef __s390x__
745 /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */
746 alignment = 0x100000;
747#else
748 alignment = 1;
749#endif
750
783e9e51 751 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
a4b3c8b5 752 alignment = max(backing_src_pagesz, alignment);
da2a2d60
TH
753
754 /* Add enough memory to align up if necessary */
755 if (alignment > 1)
756 region->mmap_size += alignment;
757
783e9e51
PB
758 region->mmap_start = mmap(NULL, region->mmap_size,
759 PROT_READ | PROT_WRITE,
760 MAP_PRIVATE | MAP_ANONYMOUS
623653b7 761 | vm_mem_backing_src_alias(src_type)->flag,
783e9e51
PB
762 -1, 0);
763 TEST_ASSERT(region->mmap_start != MAP_FAILED,
764 "test_malloc failed, mmap_start: %p errno: %i",
765 region->mmap_start, errno);
766
da2a2d60
TH
767 /* Align host address */
768 region->host_mem = align(region->mmap_start, alignment);
783e9e51
PB
769
770 /* As needed perform madvise */
a4b3c8b5
YW
771 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
772 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
773 ret = madvise(region->host_mem, npages * vm->page_size,
774 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
775 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
776 region->host_mem, npages * vm->page_size,
777 vm_mem_backing_src_alias(src_type)->name);
783e9e51
PB
778 }
779
780 region->unused_phy_pages = sparsebit_alloc();
781 sparsebit_set_num(region->unused_phy_pages,
782 guest_paddr >> vm->page_shift, npages);
783 region->region.slot = slot;
784 region->region.flags = flags;
785 region->region.guest_phys_addr = guest_paddr;
786 region->region.memory_size = npages * vm->page_size;
787 region->region.userspace_addr = (uintptr_t) region->host_mem;
788 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
789 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
790 " rc: %i errno: %i\n"
791 " slot: %u flags: 0x%x\n"
792 " guest_phys_addr: 0x%lx size: 0x%lx",
793 ret, errno, slot, flags,
794 guest_paddr, (uint64_t) region->region.memory_size);
795
796 /* Add to linked-list of memory regions. */
4d9bba90 797 list_add(&region->list, &vm->userspace_mem_regions);
783e9e51
PB
798}
799
eabe7881
AJ
800/*
801 * Memslot to region
783e9e51
PB
802 *
803 * Input Args:
804 * vm - Virtual Machine
805 * memslot - KVM memory slot ID
806 *
807 * Output Args: None
808 *
809 * Return:
810 * Pointer to memory region structure that describe memory region
811 * using kvm memory slot ID given by memslot. TEST_ASSERT failure
812 * on error (e.g. currently no memory region using memslot as a KVM
813 * memory slot ID).
814 */
09444420 815struct userspace_mem_region *
eabe7881 816memslot2region(struct kvm_vm *vm, uint32_t memslot)
783e9e51
PB
817{
818 struct userspace_mem_region *region;
819
4d9bba90 820 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
783e9e51 821 if (region->region.slot == memslot)
4d9bba90 822 return region;
783e9e51
PB
823 }
824
4d9bba90
SC
825 fprintf(stderr, "No mem region with the requested slot found,\n"
826 " requested slot: %u\n", memslot);
827 fputs("---- vm dump ----\n", stderr);
828 vm_dump(stderr, vm, 2);
829 TEST_FAIL("Mem region not found");
830 return NULL;
783e9e51
PB
831}
832
eabe7881
AJ
833/*
834 * VM Memory Region Flags Set
783e9e51
PB
835 *
836 * Input Args:
837 * vm - Virtual Machine
838 * flags - Starting guest physical address
839 *
840 * Output Args: None
841 *
842 * Return: None
843 *
844 * Sets the flags of the memory region specified by the value of slot,
845 * to the values given by flags.
846 */
847void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
848{
849 int ret;
850 struct userspace_mem_region *region;
851
783e9e51
PB
852 region = memslot2region(vm, slot);
853
854 region->region.flags = flags;
855
856 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
857
858 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
859 " rc: %i errno: %i slot: %u flags: 0x%x",
860 ret, errno, slot, flags);
861}
862
13e48aa9
SC
863/*
864 * VM Memory Region Move
865 *
866 * Input Args:
867 * vm - Virtual Machine
868 * slot - Slot of the memory region to move
4b547a86 869 * new_gpa - Starting guest physical address
13e48aa9
SC
870 *
871 * Output Args: None
872 *
873 * Return: None
874 *
875 * Change the gpa of a memory region.
876 */
877void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
878{
879 struct userspace_mem_region *region;
880 int ret;
881
882 region = memslot2region(vm, slot);
883
884 region->region.guest_phys_addr = new_gpa;
885
886 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region->region);
887
888 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
4b547a86 889 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
13e48aa9
SC
890 ret, errno, slot, new_gpa);
891}
892
8c996e4d
SC
893/*
894 * VM Memory Region Delete
895 *
896 * Input Args:
897 * vm - Virtual Machine
898 * slot - Slot of the memory region to delete
899 *
900 * Output Args: None
901 *
902 * Return: None
903 *
904 * Delete a memory region.
905 */
906void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
907{
908 __vm_mem_region_delete(vm, memslot2region(vm, slot));
909}
910
eabe7881
AJ
911/*
912 * VCPU mmap Size
783e9e51
PB
913 *
914 * Input Args: None
915 *
916 * Output Args: None
917 *
918 * Return:
919 * Size of VCPU state
920 *
921 * Returns the size of the structure pointed to by the return value
922 * of vcpu_state().
923 */
924static int vcpu_mmap_sz(void)
925{
926 int dev_fd, ret;
927
928 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
bcb2b94a
PB
929 if (dev_fd < 0)
930 exit(KSFT_SKIP);
783e9e51
PB
931
932 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
933 TEST_ASSERT(ret >= sizeof(struct kvm_run),
934 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
935 __func__, ret, errno);
936
937 close(dev_fd);
938
939 return ret;
940}
941
eabe7881
AJ
942/*
943 * VM VCPU Add
783e9e51
PB
944 *
945 * Input Args:
946 * vm - Virtual Machine
947 * vcpuid - VCPU ID
948 *
949 * Output Args: None
950 *
951 * Return: None
952 *
837ec79b
PB
953 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpuid.
954 * No additional VCPU setup is done.
783e9e51 955 */
837ec79b 956void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
783e9e51
PB
957{
958 struct vcpu *vcpu;
959
960 /* Confirm a vcpu with the specified id doesn't already exist. */
961 vcpu = vcpu_find(vm, vcpuid);
962 if (vcpu != NULL)
352be2c5 963 TEST_FAIL("vcpu with the specified id "
783e9e51
PB
964 "already exists,\n"
965 " requested vcpuid: %u\n"
966 " existing vcpuid: %u state: %p",
967 vcpuid, vcpu->id, vcpu->state);
968
969 /* Allocate and initialize new vcpu structure. */
970 vcpu = calloc(1, sizeof(*vcpu));
971 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
972 vcpu->id = vcpuid;
973 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
974 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
975 vcpu->fd, errno);
976
977 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
978 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
979 vcpu_mmap_sz(), sizeof(*vcpu->state));
6528fc0a 980 vcpu->state = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
783e9e51
PB
981 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
982 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
983 "vcpu id: %u errno: %i", vcpuid, errno);
984
985 /* Add to linked-list of VCPUs. */
4d9bba90 986 list_add(&vcpu->list, &vm->vcpus);
783e9e51
PB
987}
988
eabe7881
AJ
989/*
990 * VM Virtual Address Unused Gap
783e9e51
PB
991 *
992 * Input Args:
993 * vm - Virtual Machine
994 * sz - Size (bytes)
995 * vaddr_min - Minimum Virtual Address
996 *
997 * Output Args: None
998 *
999 * Return:
1000 * Lowest virtual address at or below vaddr_min, with at least
1001 * sz unused bytes. TEST_ASSERT failure if no area of at least
1002 * size sz is available.
1003 *
1004 * Within the VM specified by vm, locates the lowest starting virtual
1005 * address >= vaddr_min, that has at least sz unallocated bytes. A
1006 * TEST_ASSERT failure occurs for invalid input or no area of at least
1007 * sz unallocated bytes >= vaddr_min is available.
1008 */
1009static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
eabe7881 1010 vm_vaddr_t vaddr_min)
783e9e51
PB
1011{
1012 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1013
1014 /* Determine lowest permitted virtual page index. */
1015 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1016 if ((pgidx_start * vm->page_size) < vaddr_min)
eabe7881 1017 goto no_va_found;
783e9e51
PB
1018
1019 /* Loop over section with enough valid virtual page indexes. */
1020 if (!sparsebit_is_set_num(vm->vpages_valid,
1021 pgidx_start, pages))
1022 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1023 pgidx_start, pages);
1024 do {
1025 /*
1026 * Are there enough unused virtual pages available at
1027 * the currently proposed starting virtual page index.
1028 * If not, adjust proposed starting index to next
1029 * possible.
1030 */
1031 if (sparsebit_is_clear_num(vm->vpages_mapped,
1032 pgidx_start, pages))
1033 goto va_found;
1034 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1035 pgidx_start, pages);
1036 if (pgidx_start == 0)
1037 goto no_va_found;
1038
1039 /*
1040 * If needed, adjust proposed starting virtual address,
1041 * to next range of valid virtual addresses.
1042 */
1043 if (!sparsebit_is_set_num(vm->vpages_valid,
1044 pgidx_start, pages)) {
1045 pgidx_start = sparsebit_next_set_num(
1046 vm->vpages_valid, pgidx_start, pages);
1047 if (pgidx_start == 0)
1048 goto no_va_found;
1049 }
1050 } while (pgidx_start != 0);
1051
1052no_va_found:
352be2c5 1053 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
783e9e51
PB
1054
1055 /* NOT REACHED */
1056 return -1;
1057
1058va_found:
1059 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1060 pgidx_start, pages),
1061 "Unexpected, invalid virtual page index range,\n"
1062 " pgidx_start: 0x%lx\n"
1063 " pages: 0x%lx",
1064 pgidx_start, pages);
1065 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1066 pgidx_start, pages),
1067 "Unexpected, pages already mapped,\n"
1068 " pgidx_start: 0x%lx\n"
1069 " pages: 0x%lx",
1070 pgidx_start, pages);
1071
1072 return pgidx_start * vm->page_size;
1073}
1074
eabe7881
AJ
1075/*
1076 * VM Virtual Address Allocate
783e9e51
PB
1077 *
1078 * Input Args:
1079 * vm - Virtual Machine
1080 * sz - Size in bytes
1081 * vaddr_min - Minimum starting virtual address
1082 * data_memslot - Memory region slot for data pages
1083 * pgd_memslot - Memory region slot for new virtual translation tables
1084 *
1085 * Output Args: None
1086 *
1087 * Return:
1088 * Starting guest virtual address
1089 *
1090 * Allocates at least sz bytes within the virtual address space of the vm
1091 * given by vm. The allocated bytes are mapped to a virtual address >=
1092 * the address given by vaddr_min. Note that each allocation uses a
1093 * a unique set of pages, with the minimum real allocation being at least
1094 * a page.
1095 */
1096vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
eabe7881 1097 uint32_t data_memslot, uint32_t pgd_memslot)
783e9e51
PB
1098{
1099 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1100
1101 virt_pgd_alloc(vm, pgd_memslot);
1102
eabe7881
AJ
1103 /*
1104 * Find an unused range of virtual page addresses of at least
783e9e51
PB
1105 * pages in length.
1106 */
1107 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1108
1109 /* Map the virtual pages. */
1110 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1111 pages--, vaddr += vm->page_size) {
1112 vm_paddr_t paddr;
1113
81d1cca0
AJ
1114 paddr = vm_phy_page_alloc(vm,
1115 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
783e9e51
PB
1116
1117 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1118
1119 sparsebit_set(vm->vpages_mapped,
1120 vaddr >> vm->page_shift);
1121 }
1122
1123 return vaddr_start;
1124}
1125
3b4cd0ff
PX
1126/*
1127 * Map a range of VM virtual address to the VM's physical address
1128 *
1129 * Input Args:
1130 * vm - Virtual Machine
1131 * vaddr - Virtuall address to map
1132 * paddr - VM Physical Address
beca5470 1133 * npages - The number of pages to map
3b4cd0ff
PX
1134 * pgd_memslot - Memory region slot for new virtual translation tables
1135 *
1136 * Output Args: None
1137 *
1138 * Return: None
1139 *
beca5470
AJ
1140 * Within the VM given by @vm, creates a virtual translation for
1141 * @npages starting at @vaddr to the page range starting at @paddr.
3b4cd0ff
PX
1142 */
1143void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
beca5470 1144 unsigned int npages, uint32_t pgd_memslot)
3b4cd0ff
PX
1145{
1146 size_t page_size = vm->page_size;
beca5470 1147 size_t size = npages * page_size;
3b4cd0ff
PX
1148
1149 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1150 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1151
1152 while (npages--) {
1153 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1154 vaddr += page_size;
1155 paddr += page_size;
1156 }
1157}
1158
eabe7881
AJ
1159/*
1160 * Address VM Physical to Host Virtual
783e9e51
PB
1161 *
1162 * Input Args:
1163 * vm - Virtual Machine
1164 * gpa - VM physical address
1165 *
1166 * Output Args: None
1167 *
1168 * Return:
1169 * Equivalent host virtual address
1170 *
1171 * Locates the memory region containing the VM physical address given
1172 * by gpa, within the VM given by vm. When found, the host virtual
1173 * address providing the memory to the vm physical address is returned.
1174 * A TEST_ASSERT failure occurs if no region containing gpa exists.
1175 */
1176void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1177{
1178 struct userspace_mem_region *region;
4d9bba90
SC
1179
1180 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
783e9e51
PB
1181 if ((gpa >= region->region.guest_phys_addr)
1182 && (gpa <= (region->region.guest_phys_addr
1183 + region->region.memory_size - 1)))
1184 return (void *) ((uintptr_t) region->host_mem
1185 + (gpa - region->region.guest_phys_addr));
1186 }
1187
352be2c5 1188 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
783e9e51
PB
1189 return NULL;
1190}
1191
eabe7881
AJ
1192/*
1193 * Address Host Virtual to VM Physical
783e9e51
PB
1194 *
1195 * Input Args:
1196 * vm - Virtual Machine
1197 * hva - Host virtual address
1198 *
1199 * Output Args: None
1200 *
1201 * Return:
1202 * Equivalent VM physical address
1203 *
1204 * Locates the memory region containing the host virtual address given
1205 * by hva, within the VM given by vm. When found, the equivalent
1206 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1207 * region containing hva exists.
1208 */
1209vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1210{
1211 struct userspace_mem_region *region;
4d9bba90
SC
1212
1213 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
783e9e51
PB
1214 if ((hva >= region->host_mem)
1215 && (hva <= (region->host_mem
1216 + region->region.memory_size - 1)))
1217 return (vm_paddr_t) ((uintptr_t)
1218 region->region.guest_phys_addr
1219 + (hva - (uintptr_t) region->host_mem));
1220 }
1221
352be2c5 1222 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
783e9e51
PB
1223 return -1;
1224}
1225
eabe7881
AJ
1226/*
1227 * VM Create IRQ Chip
783e9e51
PB
1228 *
1229 * Input Args:
1230 * vm - Virtual Machine
1231 *
1232 * Output Args: None
1233 *
1234 * Return: None
1235 *
1236 * Creates an interrupt controller chip for the VM specified by vm.
1237 */
1238void vm_create_irqchip(struct kvm_vm *vm)
1239{
1240 int ret;
1241
1242 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1243 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1244 "rc: %i errno: %i", ret, errno);
fa3899ad
PB
1245
1246 vm->has_irqchip = true;
783e9e51
PB
1247}
1248
eabe7881
AJ
1249/*
1250 * VM VCPU State
783e9e51
PB
1251 *
1252 * Input Args:
1253 * vm - Virtual Machine
1254 * vcpuid - VCPU ID
1255 *
1256 * Output Args: None
1257 *
1258 * Return:
1259 * Pointer to structure that describes the state of the VCPU.
1260 *
1261 * Locates and returns a pointer to a structure that describes the
1262 * state of the VCPU with the given vcpuid.
1263 */
1264struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1265{
1266 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1267 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1268
1269 return vcpu->state;
1270}
1271
eabe7881
AJ
1272/*
1273 * VM VCPU Run
783e9e51
PB
1274 *
1275 * Input Args:
1276 * vm - Virtual Machine
1277 * vcpuid - VCPU ID
1278 *
1279 * Output Args: None
1280 *
1281 * Return: None
1282 *
1283 * Switch to executing the code for the VCPU given by vcpuid, within the VM
1284 * given by vm.
1285 */
1286void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1287{
1288 int ret = _vcpu_run(vm, vcpuid);
1289 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1290 "rc: %i errno: %i", ret, errno);
1291}
1292
1293int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1294{
1295 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1296 int rc;
1297
1298 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
eabe7881 1299 do {
783e9e51
PB
1300 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1301 } while (rc == -1 && errno == EINTR);
29faeb96
AL
1302
1303 assert_on_unhandled_exception(vm, vcpuid);
1304
783e9e51
PB
1305 return rc;
1306}
1307
84292e56
PX
1308int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid)
1309{
1310 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1311
1312 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1313
1314 return vcpu->fd;
1315}
1316
0f73bbc8
SC
1317void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1318{
1319 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1320 int ret;
1321
1322 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1323
1324 vcpu->state->immediate_exit = 1;
1325 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1326 vcpu->state->immediate_exit = 0;
1327
1328 TEST_ASSERT(ret == -1 && errno == EINTR,
1329 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1330 ret, errno);
1331}
1332
449aa906
PX
1333void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
1334 struct kvm_guest_debug *debug)
1335{
1336 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1337 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
1338
1339 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
1340}
1341
eabe7881
AJ
1342/*
1343 * VM VCPU Set MP State
783e9e51
PB
1344 *
1345 * Input Args:
1346 * vm - Virtual Machine
1347 * vcpuid - VCPU ID
1348 * mp_state - mp_state to be set
1349 *
1350 * Output Args: None
1351 *
1352 * Return: None
1353 *
1354 * Sets the MP state of the VCPU given by vcpuid, to the state given
1355 * by mp_state.
1356 */
1357void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
eabe7881 1358 struct kvm_mp_state *mp_state)
783e9e51
PB
1359{
1360 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1361 int ret;
1362
1363 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1364
1365 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1366 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1367 "rc: %i errno: %i", ret, errno);
1368}
1369
fd02029a
AJ
1370/*
1371 * VM VCPU Get Reg List
1372 *
1373 * Input Args:
1374 * vm - Virtual Machine
1375 * vcpuid - VCPU ID
1376 *
1377 * Output Args:
1378 * None
1379 *
1380 * Return:
1381 * A pointer to an allocated struct kvm_reg_list
1382 *
1383 * Get the list of guest registers which are supported for
1384 * KVM_GET_ONE_REG/KVM_SET_ONE_REG calls
1385 */
1386struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
1387{
1388 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1389 int ret;
1390
1391 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, &reg_list_n);
1392 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1393 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1394 reg_list->n = reg_list_n.n;
1395 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
1396 return reg_list;
1397}
1398
eabe7881
AJ
1399/*
1400 * VM VCPU Regs Get
783e9e51
PB
1401 *
1402 * Input Args:
1403 * vm - Virtual Machine
1404 * vcpuid - VCPU ID
1405 *
1406 * Output Args:
1407 * regs - current state of VCPU regs
1408 *
1409 * Return: None
1410 *
1411 * Obtains the current register state for the VCPU specified by vcpuid
1412 * and stores it at the location given by regs.
1413 */
eabe7881 1414void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
783e9e51
PB
1415{
1416 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1417 int ret;
1418
1419 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1420
783e9e51
PB
1421 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1422 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1423 ret, errno);
1424}
1425
eabe7881
AJ
1426/*
1427 * VM VCPU Regs Set
783e9e51
PB
1428 *
1429 * Input Args:
1430 * vm - Virtual Machine
1431 * vcpuid - VCPU ID
1432 * regs - Values to set VCPU regs to
1433 *
1434 * Output Args: None
1435 *
1436 * Return: None
1437 *
1438 * Sets the regs of the VCPU specified by vcpuid to the values
1439 * given by regs.
1440 */
eabe7881 1441void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
783e9e51
PB
1442{
1443 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1444 int ret;
1445
1446 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1447
783e9e51
PB
1448 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1449 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1450 ret, errno);
1451}
1452
a9c788f0 1453#ifdef __KVM_HAVE_VCPU_EVENTS
783e9e51 1454void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
eabe7881 1455 struct kvm_vcpu_events *events)
783e9e51
PB
1456{
1457 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1458 int ret;
1459
1460 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1461
783e9e51
PB
1462 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1463 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1464 ret, errno);
1465}
1466
1467void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
eabe7881 1468 struct kvm_vcpu_events *events)
783e9e51
PB
1469{
1470 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1471 int ret;
1472
1473 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1474
783e9e51
PB
1475 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1476 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1477 ret, errno);
1478}
a9c788f0 1479#endif
783e9e51 1480
c7957206 1481#ifdef __x86_64__
da1e3071
AL
1482void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1483 struct kvm_nested_state *state)
1484{
1485 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1486 int ret;
1487
1488 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1489
1490 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1491 TEST_ASSERT(ret == 0,
1492 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1493 ret, errno);
1494}
1495
1496int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1497 struct kvm_nested_state *state, bool ignore_error)
1498{
1499 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1500 int ret;
1501
1502 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1503
1504 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1505 if (!ignore_error) {
1506 TEST_ASSERT(ret == 0,
1507 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1508 ret, errno);
1509 }
1510
1511 return ret;
1512}
c7957206 1513#endif
da1e3071 1514
eabe7881
AJ
1515/*
1516 * VM VCPU System Regs Get
783e9e51
PB
1517 *
1518 * Input Args:
1519 * vm - Virtual Machine
1520 * vcpuid - VCPU ID
1521 *
1522 * Output Args:
1523 * sregs - current state of VCPU system regs
1524 *
1525 * Return: None
1526 *
1527 * Obtains the current system register state for the VCPU specified by
1528 * vcpuid and stores it at the location given by sregs.
1529 */
eabe7881 1530void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
783e9e51
PB
1531{
1532 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1533 int ret;
1534
1535 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1536
783e9e51
PB
1537 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1538 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1539 ret, errno);
1540}
1541
eabe7881
AJ
1542/*
1543 * VM VCPU System Regs Set
783e9e51
PB
1544 *
1545 * Input Args:
1546 * vm - Virtual Machine
1547 * vcpuid - VCPU ID
1548 * sregs - Values to set VCPU system regs to
1549 *
1550 * Output Args: None
1551 *
1552 * Return: None
1553 *
1554 * Sets the system regs of the VCPU specified by vcpuid to the values
1555 * given by sregs.
1556 */
eabe7881 1557void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
783e9e51
PB
1558{
1559 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1560 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1561 "rc: %i errno: %i", ret, errno);
1562}
1563
eabe7881 1564int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
783e9e51
PB
1565{
1566 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
783e9e51
PB
1567
1568 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1569
783e9e51
PB
1570 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1571}
1572
ada0a50d
JF
1573void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1574{
1575 int ret;
1576
1577 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
1578 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
1579 ret, errno, strerror(errno));
1580}
1581
1582void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1583{
1584 int ret;
1585
1586 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
1587 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
1588 ret, errno, strerror(errno));
1589}
1590
1591void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1592{
1593 int ret;
1594
1595 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
1596 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
1597 ret, errno, strerror(errno));
1598}
1599
1600void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1601{
1602 int ret;
1603
1604 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
1605 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
1606 ret, errno, strerror(errno));
1607}
1608
eabe7881
AJ
1609/*
1610 * VCPU Ioctl
783e9e51
PB
1611 *
1612 * Input Args:
1613 * vm - Virtual Machine
1614 * vcpuid - VCPU ID
1615 * cmd - Ioctl number
1616 * arg - Argument to pass to the ioctl
1617 *
1618 * Return: None
1619 *
1620 * Issues an arbitrary ioctl on a VCPU fd.
1621 */
eabe7881
AJ
1622void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1623 unsigned long cmd, void *arg)
7e50c424
VK
1624{
1625 int ret;
1626
1627 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1628 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1629 cmd, ret, errno, strerror(errno));
1630}
1631
1632int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1633 unsigned long cmd, void *arg)
783e9e51
PB
1634{
1635 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1636 int ret;
1637
1638 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1639
1640 ret = ioctl(vcpu->fd, cmd, arg);
7e50c424
VK
1641
1642 return ret;
783e9e51
PB
1643}
1644
84292e56
PX
1645void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
1646{
1647 struct vcpu *vcpu;
1648 uint32_t size = vm->dirty_ring_size;
1649
1650 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1651
1652 vcpu = vcpu_find(vm, vcpuid);
1653
1654 TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid);
1655
1656 if (!vcpu->dirty_gfns) {
1657 void *addr;
1658
1659 addr = mmap(NULL, size, PROT_READ,
1660 MAP_PRIVATE, vcpu->fd,
1661 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1662 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1663
1664 addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
1665 MAP_PRIVATE, vcpu->fd,
1666 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1667 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1668
1669 addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1670 MAP_SHARED, vcpu->fd,
1671 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1672 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1673
1674 vcpu->dirty_gfns = addr;
1675 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1676 }
1677
1678 return vcpu->dirty_gfns;
1679}
1680
eabe7881
AJ
1681/*
1682 * VM Ioctl
783e9e51
PB
1683 *
1684 * Input Args:
1685 * vm - Virtual Machine
1686 * cmd - Ioctl number
1687 * arg - Argument to pass to the ioctl
1688 *
1689 * Return: None
1690 *
1691 * Issues an arbitrary ioctl on a VM fd.
1692 */
1693void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1694{
1695 int ret;
1696
e2c12909 1697 ret = _vm_ioctl(vm, cmd, arg);
783e9e51
PB
1698 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1699 cmd, ret, errno, strerror(errno));
1700}
1701
e2c12909
EGE
1702int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1703{
1704 return ioctl(vm->fd, cmd, arg);
1705}
1706
8b460692
VK
1707/*
1708 * KVM system ioctl
1709 *
1710 * Input Args:
1711 * vm - Virtual Machine
1712 * cmd - Ioctl number
1713 * arg - Argument to pass to the ioctl
1714 *
1715 * Return: None
1716 *
1717 * Issues an arbitrary ioctl on a KVM fd.
1718 */
1719void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1720{
1721 int ret;
1722
1723 ret = ioctl(vm->kvm_fd, cmd, arg);
1724 TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)",
1725 cmd, ret, errno, strerror(errno));
1726}
1727
1728int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1729{
1730 return ioctl(vm->kvm_fd, cmd, arg);
1731}
1732
dc0e058e
EA
1733/*
1734 * Device Ioctl
1735 */
1736
1737int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
1738{
1739 struct kvm_device_attr attribute = {
1740 .group = group,
1741 .attr = attr,
1742 .flags = 0,
1743 };
dc0e058e 1744
4cffb2df 1745 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
dc0e058e
EA
1746}
1747
1748int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
1749{
1750 int ret = _kvm_device_check_attr(dev_fd, group, attr);
1751
4cffb2df 1752 TEST_ASSERT(ret >= 0, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
dc0e058e
EA
1753 return ret;
1754}
1755
4cffb2df 1756int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
dc0e058e
EA
1757{
1758 struct kvm_create_device create_dev;
1759 int ret;
1760
1761 create_dev.type = type;
1762 create_dev.fd = -1;
1763 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
1764 ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
4cffb2df
EA
1765 *fd = create_dev.fd;
1766 return ret;
dc0e058e
EA
1767}
1768
1769int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
1770{
4cffb2df 1771 int fd, ret;
dc0e058e 1772
4cffb2df
EA
1773 ret = _kvm_create_device(vm, type, test, &fd);
1774
1775 if (!test) {
1776 TEST_ASSERT(ret >= 0,
1777 "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
1778 return fd;
1779 }
dc0e058e
EA
1780 return ret;
1781}
1782
1783int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
1784 void *val, bool write)
1785{
1786 struct kvm_device_attr kvmattr = {
1787 .group = group,
1788 .attr = attr,
1789 .flags = 0,
1790 .addr = (uintptr_t)val,
1791 };
1792 int ret;
1793
1794 ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
1795 &kvmattr);
dc0e058e
EA
1796 return ret;
1797}
1798
1799int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
1800 void *val, bool write)
1801{
1802 int ret = _kvm_device_access(dev_fd, group, attr, val, write);
1803
4cffb2df 1804 TEST_ASSERT(ret >= 0, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
dc0e058e
EA
1805 return ret;
1806}
1807
eabe7881
AJ
1808/*
1809 * VM Dump
783e9e51
PB
1810 *
1811 * Input Args:
1812 * vm - Virtual Machine
1813 * indent - Left margin indent amount
1814 *
1815 * Output Args:
1816 * stream - Output FILE stream
1817 *
1818 * Return: None
1819 *
1820 * Dumps the current state of the VM given by vm, to the FILE stream
1821 * given by stream.
1822 */
1823void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1824{
1825 struct userspace_mem_region *region;
1826 struct vcpu *vcpu;
1827
1828 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1829 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1830 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1831 fprintf(stream, "%*sMem Regions:\n", indent, "");
4d9bba90 1832 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
783e9e51
PB
1833 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1834 "host_virt: %p\n", indent + 2, "",
1835 (uint64_t) region->region.guest_phys_addr,
1836 (uint64_t) region->region.memory_size,
1837 region->host_mem);
1838 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1839 sparsebit_dump(stream, region->unused_phy_pages, 0);
1840 }
1841 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1842 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1843 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1844 vm->pgd_created);
1845 if (vm->pgd_created) {
1846 fprintf(stream, "%*sVirtual Translation Tables:\n",
1847 indent + 2, "");
1848 virt_dump(stream, vm, indent + 4);
1849 }
1850 fprintf(stream, "%*sVCPUs:\n", indent, "");
4d9bba90 1851 list_for_each_entry(vcpu, &vm->vcpus, list)
783e9e51
PB
1852 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1853}
1854
783e9e51
PB
1855/* Known KVM exit reasons */
1856static struct exit_reason {
1857 unsigned int reason;
1858 const char *name;
1859} exit_reasons_known[] = {
1860 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1861 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1862 {KVM_EXIT_IO, "IO"},
1863 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1864 {KVM_EXIT_DEBUG, "DEBUG"},
1865 {KVM_EXIT_HLT, "HLT"},
1866 {KVM_EXIT_MMIO, "MMIO"},
1867 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1868 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1869 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1870 {KVM_EXIT_INTR, "INTR"},
1871 {KVM_EXIT_SET_TPR, "SET_TPR"},
1872 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1873 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1874 {KVM_EXIT_S390_RESET, "S390_RESET"},
1875 {KVM_EXIT_DCR, "DCR"},
1876 {KVM_EXIT_NMI, "NMI"},
1877 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1878 {KVM_EXIT_OSI, "OSI"},
1879 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
84292e56 1880 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
3cea1891
AL
1881 {KVM_EXIT_X86_RDMSR, "RDMSR"},
1882 {KVM_EXIT_X86_WRMSR, "WRMSR"},
23200b7a 1883 {KVM_EXIT_XEN, "XEN"},
783e9e51
PB
1884#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1885 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1886#endif
1887};
1888
eabe7881
AJ
1889/*
1890 * Exit Reason String
783e9e51
PB
1891 *
1892 * Input Args:
1893 * exit_reason - Exit reason
1894 *
1895 * Output Args: None
1896 *
1897 * Return:
1898 * Constant string pointer describing the exit reason.
1899 *
1900 * Locates and returns a constant string that describes the KVM exit
1901 * reason given by exit_reason. If no such string is found, a constant
1902 * string of "Unknown" is returned.
1903 */
1904const char *exit_reason_str(unsigned int exit_reason)
1905{
1906 unsigned int n1;
1907
1908 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1909 if (exit_reason == exit_reasons_known[n1].reason)
1910 return exit_reasons_known[n1].name;
1911 }
1912
1913 return "Unknown";
1914}
1915
eabe7881 1916/*
d5106539 1917 * Physical Contiguous Page Allocator
783e9e51
PB
1918 *
1919 * Input Args:
1920 * vm - Virtual Machine
d5106539 1921 * num - number of pages
783e9e51
PB
1922 * paddr_min - Physical address minimum
1923 * memslot - Memory region to allocate page from
1924 *
1925 * Output Args: None
1926 *
1927 * Return:
1928 * Starting physical address
1929 *
d5106539
AJ
1930 * Within the VM specified by vm, locates a range of available physical
1931 * pages at or above paddr_min. If found, the pages are marked as in use
cdbd2428 1932 * and their base address is returned. A TEST_ASSERT failure occurs if
d5106539 1933 * not enough pages are available at or above paddr_min.
783e9e51 1934 */
d5106539
AJ
1935vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1936 vm_paddr_t paddr_min, uint32_t memslot)
783e9e51
PB
1937{
1938 struct userspace_mem_region *region;
d5106539
AJ
1939 sparsebit_idx_t pg, base;
1940
1941 TEST_ASSERT(num > 0, "Must allocate at least one page");
783e9e51
PB
1942
1943 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
4d5f26ee 1944 "not divisible by page size.\n"
783e9e51
PB
1945 " paddr_min: 0x%lx page_size: 0x%x",
1946 paddr_min, vm->page_size);
1947
783e9e51 1948 region = memslot2region(vm, memslot);
d5106539
AJ
1949 base = pg = paddr_min >> vm->page_shift;
1950
1951 do {
1952 for (; pg < base + num; ++pg) {
1953 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1954 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1955 break;
1956 }
783e9e51 1957 }
d5106539
AJ
1958 } while (pg && pg != base + num);
1959
1960 if (pg == 0) {
1961 fprintf(stderr, "No guest physical page available, "
1962 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1963 paddr_min, vm->page_size, memslot);
1964 fputs("---- vm dump ----\n", stderr);
1965 vm_dump(stderr, vm, 2);
1966 abort();
783e9e51
PB
1967 }
1968
d5106539
AJ
1969 for (pg = base; pg < base + num; ++pg)
1970 sparsebit_clear(region->unused_phy_pages, pg);
1971
1972 return base * vm->page_size;
1973}
783e9e51 1974
d5106539
AJ
1975vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1976 uint32_t memslot)
1977{
1978 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
783e9e51
PB
1979}
1980
eabe7881
AJ
1981/*
1982 * Address Guest Virtual to Host Virtual
783e9e51
PB
1983 *
1984 * Input Args:
1985 * vm - Virtual Machine
1986 * gva - VM virtual address
1987 *
1988 * Output Args: None
1989 *
1990 * Return:
1991 * Equivalent host virtual address
1992 */
1993void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1994{
1995 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1996}
9dba988e
AL
1997
1998/*
1999 * Is Unrestricted Guest
2000 *
2001 * Input Args:
2002 * vm - Virtual Machine
2003 *
2004 * Output Args: None
2005 *
2006 * Return: True if the unrestricted guest is set to 'Y', otherwise return false.
2007 *
2008 * Check if the unrestricted guest flag is enabled.
2009 */
2010bool vm_is_unrestricted_guest(struct kvm_vm *vm)
2011{
2012 char val = 'N';
2013 size_t count;
2014 FILE *f;
2015
2016 if (vm == NULL) {
2017 /* Ensure that the KVM vendor-specific module is loaded. */
2018 f = fopen(KVM_DEV_PATH, "r");
2019 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
2020 errno);
2021 fclose(f);
2022 }
2023
2024 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
2025 if (f) {
2026 count = fread(&val, sizeof(char), 1, f);
2027 TEST_ASSERT(count == 1, "Unable to read from param file.");
2028 fclose(f);
2029 }
2030
2031 return val == 'Y';
2032}
52200d0d
PX
2033
2034unsigned int vm_get_page_size(struct kvm_vm *vm)
2035{
2036 return vm->page_size;
2037}
2038
2039unsigned int vm_get_page_shift(struct kvm_vm *vm)
2040{
2041 return vm->page_shift;
2042}
2043
2044unsigned int vm_get_max_gfn(struct kvm_vm *vm)
2045{
2046 return vm->max_gfn;
2047}
87a802d9 2048
4cd94d12
WSM
2049int vm_get_fd(struct kvm_vm *vm)
2050{
2051 return vm->fd;
2052}
2053
87a802d9
AJ
2054static unsigned int vm_calc_num_pages(unsigned int num_pages,
2055 unsigned int page_shift,
2056 unsigned int new_page_shift,
2057 bool ceil)
2058{
2059 unsigned int n = 1 << (new_page_shift - page_shift);
2060
2061 if (page_shift >= new_page_shift)
2062 return num_pages * (1 << (page_shift - new_page_shift));
2063
2064 return num_pages / n + !!(ceil && num_pages % n);
2065}
2066
2067static inline int getpageshift(void)
2068{
2069 return __builtin_ffs(getpagesize()) - 1;
2070}
2071
2072unsigned int
2073vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2074{
2075 return vm_calc_num_pages(num_guest_pages,
2076 vm_guest_mode_params[mode].page_shift,
2077 getpageshift(), true);
2078}
2079
2080unsigned int
2081vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2082{
2083 return vm_calc_num_pages(num_host_pages, getpageshift(),
2084 vm_guest_mode_params[mode].page_shift, false);
2085}
94c4b76b
AJ
2086
2087unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2088{
2089 unsigned int n;
2090 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2091 return vm_adjust_num_guest_pages(mode, n);
2092}