]>
Commit | Line | Data |
---|---|---|
783e9e51 PB |
1 | /* |
2 | * tools/testing/selftests/kvm/lib/kvm_util.c | |
3 | * | |
4 | * Copyright (C) 2018, Google LLC. | |
5 | * | |
6 | * This work is licensed under the terms of the GNU GPL, version 2. | |
7 | */ | |
8 | ||
9 | #include "test_util.h" | |
10 | #include "kvm_util.h" | |
11 | #include "kvm_util_internal.h" | |
12 | ||
13 | #include <assert.h> | |
14 | #include <sys/mman.h> | |
15 | #include <sys/types.h> | |
16 | #include <sys/stat.h> | |
bc8eb2fe | 17 | #include <linux/kernel.h> |
783e9e51 | 18 | |
783e9e51 | 19 | #define KVM_UTIL_PGS_PER_HUGEPG 512 |
81d1cca0 | 20 | #define KVM_UTIL_MIN_PFN 2 |
783e9e51 PB |
21 | |
22 | /* Aligns x up to the next multiple of size. Size must be a power of 2. */ | |
23 | static void *align(void *x, size_t size) | |
24 | { | |
25 | size_t mask = size - 1; | |
26 | TEST_ASSERT(size != 0 && !(size & (size - 1)), | |
27 | "size not a power of 2: %lu", size); | |
28 | return (void *) (((size_t) x + mask) & ~mask); | |
29 | } | |
30 | ||
eabe7881 AJ |
31 | /* |
32 | * Capability | |
783e9e51 PB |
33 | * |
34 | * Input Args: | |
35 | * cap - Capability | |
36 | * | |
37 | * Output Args: None | |
38 | * | |
39 | * Return: | |
40 | * On success, the Value corresponding to the capability (KVM_CAP_*) | |
41 | * specified by the value of cap. On failure a TEST_ASSERT failure | |
42 | * is produced. | |
43 | * | |
44 | * Looks up and returns the value corresponding to the capability | |
45 | * (KVM_CAP_*) given by cap. | |
46 | */ | |
47 | int kvm_check_cap(long cap) | |
48 | { | |
49 | int ret; | |
50 | int kvm_fd; | |
51 | ||
52 | kvm_fd = open(KVM_DEV_PATH, O_RDONLY); | |
bcb2b94a PB |
53 | if (kvm_fd < 0) |
54 | exit(KSFT_SKIP); | |
783e9e51 PB |
55 | |
56 | ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap); | |
57 | TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n" | |
58 | " rc: %i errno: %i", ret, errno); | |
59 | ||
60 | close(kvm_fd); | |
61 | ||
62 | return ret; | |
63 | } | |
64 | ||
8b56ee91 DS |
65 | /* VM Enable Capability |
66 | * | |
67 | * Input Args: | |
68 | * vm - Virtual Machine | |
69 | * cap - Capability | |
70 | * | |
71 | * Output Args: None | |
72 | * | |
73 | * Return: On success, 0. On failure a TEST_ASSERT failure is produced. | |
74 | * | |
75 | * Enables a capability (KVM_CAP_*) on the VM. | |
76 | */ | |
77 | int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap) | |
78 | { | |
79 | int ret; | |
80 | ||
81 | ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap); | |
82 | TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n" | |
83 | " rc: %i errno: %i", ret, errno); | |
84 | ||
85 | return ret; | |
86 | } | |
87 | ||
8cee5816 | 88 | static void vm_open(struct kvm_vm *vm, int perm, unsigned long type) |
fa3899ad PB |
89 | { |
90 | vm->kvm_fd = open(KVM_DEV_PATH, perm); | |
91 | if (vm->kvm_fd < 0) | |
92 | exit(KSFT_SKIP); | |
93 | ||
8cee5816 | 94 | vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type); |
fa3899ad PB |
95 | TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, " |
96 | "rc: %i errno: %i", vm->fd, errno); | |
97 | } | |
98 | ||
81d1cca0 AJ |
99 | const char * const vm_guest_mode_string[] = { |
100 | "PA-bits:52, VA-bits:48, 4K pages", | |
101 | "PA-bits:52, VA-bits:48, 64K pages", | |
cdbd2428 AJ |
102 | "PA-bits:48, VA-bits:48, 4K pages", |
103 | "PA-bits:48, VA-bits:48, 64K pages", | |
e28934e6 AJ |
104 | "PA-bits:40, VA-bits:48, 4K pages", |
105 | "PA-bits:40, VA-bits:48, 64K pages", | |
81d1cca0 | 106 | }; |
cdbd2428 AJ |
107 | _Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES, |
108 | "Missing new mode strings?"); | |
81d1cca0 | 109 | |
eabe7881 AJ |
110 | /* |
111 | * VM Create | |
783e9e51 PB |
112 | * |
113 | * Input Args: | |
81d1cca0 | 114 | * mode - VM Mode (e.g. VM_MODE_P52V48_4K) |
783e9e51 PB |
115 | * phy_pages - Physical memory pages |
116 | * perm - permission | |
117 | * | |
118 | * Output Args: None | |
119 | * | |
120 | * Return: | |
121 | * Pointer to opaque structure that describes the created VM. | |
122 | * | |
81d1cca0 | 123 | * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). |
783e9e51 PB |
124 | * When phy_pages is non-zero, a memory region of phy_pages physical pages |
125 | * is created and mapped starting at guest physical address 0. The file | |
126 | * descriptor to control the created VM is created with the permissions | |
127 | * given by perm (e.g. O_RDWR). | |
128 | */ | |
8cee5816 AJ |
129 | struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, |
130 | int perm, unsigned long type) | |
783e9e51 PB |
131 | { |
132 | struct kvm_vm *vm; | |
133 | int kvm_fd; | |
134 | ||
783e9e51 | 135 | vm = calloc(1, sizeof(*vm)); |
717da97e | 136 | TEST_ASSERT(vm != NULL, "Insufficient Memory"); |
783e9e51 PB |
137 | |
138 | vm->mode = mode; | |
8cee5816 AJ |
139 | vm->type = type; |
140 | vm_open(vm, perm, type); | |
783e9e51 PB |
141 | |
142 | /* Setup mode specific traits. */ | |
143 | switch (vm->mode) { | |
81d1cca0 | 144 | case VM_MODE_P52V48_4K: |
7a6629ef | 145 | vm->pgtable_levels = 4; |
cdbd2428 AJ |
146 | vm->pa_bits = 52; |
147 | vm->va_bits = 48; | |
783e9e51 PB |
148 | vm->page_size = 0x1000; |
149 | vm->page_shift = 12; | |
783e9e51 | 150 | break; |
81d1cca0 AJ |
151 | case VM_MODE_P52V48_64K: |
152 | vm->pgtable_levels = 3; | |
153 | vm->pa_bits = 52; | |
cdbd2428 | 154 | vm->va_bits = 48; |
81d1cca0 AJ |
155 | vm->page_size = 0x10000; |
156 | vm->page_shift = 16; | |
cdbd2428 AJ |
157 | break; |
158 | case VM_MODE_P48V48_4K: | |
159 | vm->pgtable_levels = 4; | |
160 | vm->pa_bits = 48; | |
81d1cca0 | 161 | vm->va_bits = 48; |
cdbd2428 AJ |
162 | vm->page_size = 0x1000; |
163 | vm->page_shift = 12; | |
164 | break; | |
165 | case VM_MODE_P48V48_64K: | |
166 | vm->pgtable_levels = 3; | |
167 | vm->pa_bits = 48; | |
168 | vm->va_bits = 48; | |
169 | vm->page_size = 0x10000; | |
170 | vm->page_shift = 16; | |
81d1cca0 | 171 | break; |
e28934e6 AJ |
172 | case VM_MODE_P40V48_4K: |
173 | vm->pgtable_levels = 4; | |
174 | vm->pa_bits = 40; | |
175 | vm->va_bits = 48; | |
176 | vm->page_size = 0x1000; | |
177 | vm->page_shift = 12; | |
178 | break; | |
179 | case VM_MODE_P40V48_64K: | |
180 | vm->pgtable_levels = 3; | |
181 | vm->pa_bits = 40; | |
182 | vm->va_bits = 48; | |
183 | vm->page_size = 0x10000; | |
184 | vm->page_shift = 16; | |
185 | break; | |
783e9e51 PB |
186 | default: |
187 | TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode); | |
188 | } | |
189 | ||
81d1cca0 AJ |
190 | /* Limit to VA-bit canonical virtual addresses. */ |
191 | vm->vpages_valid = sparsebit_alloc(); | |
192 | sparsebit_set_num(vm->vpages_valid, | |
193 | 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); | |
194 | sparsebit_set_num(vm->vpages_valid, | |
195 | (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, | |
196 | (1ULL << (vm->va_bits - 1)) >> vm->page_shift); | |
197 | ||
198 | /* Limit physical addresses to PA-bits. */ | |
199 | vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; | |
200 | ||
783e9e51 PB |
201 | /* Allocate and setup memory for guest. */ |
202 | vm->vpages_mapped = sparsebit_alloc(); | |
203 | if (phy_pages != 0) | |
204 | vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, | |
205 | 0, 0, phy_pages, 0); | |
206 | ||
207 | return vm; | |
208 | } | |
209 | ||
8cee5816 AJ |
210 | struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm) |
211 | { | |
212 | return _vm_create(mode, phy_pages, perm, 0); | |
213 | } | |
214 | ||
eabe7881 AJ |
215 | /* |
216 | * VM Restart | |
fa3899ad PB |
217 | * |
218 | * Input Args: | |
219 | * vm - VM that has been released before | |
220 | * perm - permission | |
221 | * | |
222 | * Output Args: None | |
223 | * | |
224 | * Reopens the file descriptors associated to the VM and reinstates the | |
225 | * global state, such as the irqchip and the memory regions that are mapped | |
226 | * into the guest. | |
227 | */ | |
228 | void kvm_vm_restart(struct kvm_vm *vmp, int perm) | |
229 | { | |
230 | struct userspace_mem_region *region; | |
231 | ||
8cee5816 | 232 | vm_open(vmp, perm, vmp->type); |
fa3899ad PB |
233 | if (vmp->has_irqchip) |
234 | vm_create_irqchip(vmp); | |
235 | ||
236 | for (region = vmp->userspace_mem_region_head; region; | |
237 | region = region->next) { | |
238 | int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
239 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
240 | " rc: %i errno: %i\n" | |
241 | " slot: %u flags: 0x%x\n" | |
242 | " guest_phys_addr: 0x%lx size: 0x%lx", | |
eabe7881 AJ |
243 | ret, errno, region->region.slot, |
244 | region->region.flags, | |
fa3899ad PB |
245 | region->region.guest_phys_addr, |
246 | region->region.memory_size); | |
247 | } | |
248 | } | |
249 | ||
3b4cd0ff PX |
250 | void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) |
251 | { | |
252 | struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot }; | |
253 | int ret; | |
254 | ||
255 | ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args); | |
256 | TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s", | |
257 | strerror(-ret)); | |
258 | } | |
259 | ||
2a31b9db PB |
260 | void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, |
261 | uint64_t first_page, uint32_t num_pages) | |
262 | { | |
263 | struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot, | |
264 | .first_page = first_page, | |
265 | .num_pages = num_pages }; | |
266 | int ret; | |
267 | ||
268 | ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args); | |
269 | TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s", | |
270 | strerror(-ret)); | |
271 | } | |
272 | ||
eabe7881 AJ |
273 | /* |
274 | * Userspace Memory Region Find | |
783e9e51 PB |
275 | * |
276 | * Input Args: | |
277 | * vm - Virtual Machine | |
278 | * start - Starting VM physical address | |
279 | * end - Ending VM physical address, inclusive. | |
280 | * | |
281 | * Output Args: None | |
282 | * | |
283 | * Return: | |
284 | * Pointer to overlapping region, NULL if no such region. | |
285 | * | |
286 | * Searches for a region with any physical memory that overlaps with | |
287 | * any portion of the guest physical addresses from start to end | |
288 | * inclusive. If multiple overlapping regions exist, a pointer to any | |
289 | * of the regions is returned. Null is returned only when no overlapping | |
290 | * region exists. | |
291 | */ | |
eabe7881 AJ |
292 | static struct userspace_mem_region * |
293 | userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) | |
783e9e51 PB |
294 | { |
295 | struct userspace_mem_region *region; | |
296 | ||
297 | for (region = vm->userspace_mem_region_head; region; | |
298 | region = region->next) { | |
299 | uint64_t existing_start = region->region.guest_phys_addr; | |
300 | uint64_t existing_end = region->region.guest_phys_addr | |
301 | + region->region.memory_size - 1; | |
302 | if (start <= existing_end && end >= existing_start) | |
303 | return region; | |
304 | } | |
305 | ||
306 | return NULL; | |
307 | } | |
308 | ||
eabe7881 AJ |
309 | /* |
310 | * KVM Userspace Memory Region Find | |
783e9e51 PB |
311 | * |
312 | * Input Args: | |
313 | * vm - Virtual Machine | |
314 | * start - Starting VM physical address | |
315 | * end - Ending VM physical address, inclusive. | |
316 | * | |
317 | * Output Args: None | |
318 | * | |
319 | * Return: | |
320 | * Pointer to overlapping region, NULL if no such region. | |
321 | * | |
322 | * Public interface to userspace_mem_region_find. Allows tests to look up | |
323 | * the memslot datastructure for a given range of guest physical memory. | |
324 | */ | |
325 | struct kvm_userspace_memory_region * | |
326 | kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, | |
327 | uint64_t end) | |
328 | { | |
329 | struct userspace_mem_region *region; | |
330 | ||
331 | region = userspace_mem_region_find(vm, start, end); | |
332 | if (!region) | |
333 | return NULL; | |
334 | ||
335 | return ®ion->region; | |
336 | } | |
337 | ||
eabe7881 AJ |
338 | /* |
339 | * VCPU Find | |
783e9e51 PB |
340 | * |
341 | * Input Args: | |
342 | * vm - Virtual Machine | |
343 | * vcpuid - VCPU ID | |
344 | * | |
345 | * Output Args: None | |
346 | * | |
347 | * Return: | |
348 | * Pointer to VCPU structure | |
349 | * | |
350 | * Locates a vcpu structure that describes the VCPU specified by vcpuid and | |
351 | * returns a pointer to it. Returns NULL if the VM doesn't contain a VCPU | |
352 | * for the specified vcpuid. | |
353 | */ | |
eabe7881 | 354 | struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid) |
783e9e51 PB |
355 | { |
356 | struct vcpu *vcpup; | |
357 | ||
358 | for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) { | |
359 | if (vcpup->id == vcpuid) | |
360 | return vcpup; | |
361 | } | |
362 | ||
363 | return NULL; | |
364 | } | |
365 | ||
eabe7881 AJ |
366 | /* |
367 | * VM VCPU Remove | |
783e9e51 PB |
368 | * |
369 | * Input Args: | |
370 | * vm - Virtual Machine | |
371 | * vcpuid - VCPU ID | |
372 | * | |
373 | * Output Args: None | |
374 | * | |
375 | * Return: None, TEST_ASSERT failures for all error conditions | |
376 | * | |
377 | * Within the VM specified by vm, removes the VCPU given by vcpuid. | |
378 | */ | |
379 | static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid) | |
380 | { | |
381 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
0a505fe6 | 382 | int ret; |
783e9e51 | 383 | |
0a505fe6 PB |
384 | ret = munmap(vcpu->state, sizeof(*vcpu->state)); |
385 | TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i " | |
386 | "errno: %i", ret, errno); | |
387 | close(vcpu->fd); | |
783e9e51 PB |
388 | TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i " |
389 | "errno: %i", ret, errno); | |
390 | ||
391 | if (vcpu->next) | |
392 | vcpu->next->prev = vcpu->prev; | |
393 | if (vcpu->prev) | |
394 | vcpu->prev->next = vcpu->next; | |
395 | else | |
396 | vm->vcpu_head = vcpu->next; | |
397 | free(vcpu); | |
398 | } | |
399 | ||
fa3899ad PB |
400 | void kvm_vm_release(struct kvm_vm *vmp) |
401 | { | |
402 | int ret; | |
403 | ||
fa3899ad PB |
404 | while (vmp->vcpu_head) |
405 | vm_vcpu_rm(vmp, vmp->vcpu_head->id); | |
406 | ||
fa3899ad PB |
407 | ret = close(vmp->fd); |
408 | TEST_ASSERT(ret == 0, "Close of vm fd failed,\n" | |
409 | " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno); | |
410 | ||
411 | close(vmp->kvm_fd); | |
412 | TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n" | |
413 | " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno); | |
414 | } | |
783e9e51 | 415 | |
eabe7881 AJ |
416 | /* |
417 | * Destroys and frees the VM pointed to by vmp. | |
783e9e51 PB |
418 | */ |
419 | void kvm_vm_free(struct kvm_vm *vmp) | |
420 | { | |
421 | int ret; | |
422 | ||
423 | if (vmp == NULL) | |
424 | return; | |
425 | ||
426 | /* Free userspace_mem_regions. */ | |
427 | while (vmp->userspace_mem_region_head) { | |
428 | struct userspace_mem_region *region | |
429 | = vmp->userspace_mem_region_head; | |
430 | ||
431 | region->region.memory_size = 0; | |
432 | ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, | |
433 | ®ion->region); | |
434 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, " | |
435 | "rc: %i errno: %i", ret, errno); | |
436 | ||
437 | vmp->userspace_mem_region_head = region->next; | |
438 | sparsebit_free(®ion->unused_phy_pages); | |
439 | ret = munmap(region->mmap_start, region->mmap_size); | |
440 | TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", | |
441 | ret, errno); | |
442 | ||
443 | free(region); | |
444 | } | |
445 | ||
783e9e51 PB |
446 | /* Free sparsebit arrays. */ |
447 | sparsebit_free(&vmp->vpages_valid); | |
448 | sparsebit_free(&vmp->vpages_mapped); | |
449 | ||
fa3899ad | 450 | kvm_vm_release(vmp); |
783e9e51 PB |
451 | |
452 | /* Free the structure describing the VM. */ | |
453 | free(vmp); | |
454 | } | |
455 | ||
eabe7881 AJ |
456 | /* |
457 | * Memory Compare, host virtual to guest virtual | |
783e9e51 PB |
458 | * |
459 | * Input Args: | |
460 | * hva - Starting host virtual address | |
461 | * vm - Virtual Machine | |
462 | * gva - Starting guest virtual address | |
463 | * len - number of bytes to compare | |
464 | * | |
465 | * Output Args: None | |
466 | * | |
467 | * Input/Output Args: None | |
468 | * | |
469 | * Return: | |
470 | * Returns 0 if the bytes starting at hva for a length of len | |
471 | * are equal the guest virtual bytes starting at gva. Returns | |
472 | * a value < 0, if bytes at hva are less than those at gva. | |
473 | * Otherwise a value > 0 is returned. | |
474 | * | |
475 | * Compares the bytes starting at the host virtual address hva, for | |
476 | * a length of len, to the guest bytes starting at the guest virtual | |
477 | * address given by gva. | |
478 | */ | |
eabe7881 | 479 | int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) |
783e9e51 PB |
480 | { |
481 | size_t amt; | |
482 | ||
eabe7881 AJ |
483 | /* |
484 | * Compare a batch of bytes until either a match is found | |
783e9e51 PB |
485 | * or all the bytes have been compared. |
486 | */ | |
487 | for (uintptr_t offset = 0; offset < len; offset += amt) { | |
488 | uintptr_t ptr1 = (uintptr_t)hva + offset; | |
489 | ||
eabe7881 AJ |
490 | /* |
491 | * Determine host address for guest virtual address | |
783e9e51 PB |
492 | * at offset. |
493 | */ | |
494 | uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); | |
495 | ||
eabe7881 AJ |
496 | /* |
497 | * Determine amount to compare on this pass. | |
783e9e51 PB |
498 | * Don't allow the comparsion to cross a page boundary. |
499 | */ | |
500 | amt = len - offset; | |
501 | if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) | |
502 | amt = vm->page_size - (ptr1 % vm->page_size); | |
503 | if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) | |
504 | amt = vm->page_size - (ptr2 % vm->page_size); | |
505 | ||
506 | assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); | |
507 | assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); | |
508 | ||
eabe7881 AJ |
509 | /* |
510 | * Perform the comparison. If there is a difference | |
783e9e51 PB |
511 | * return that result to the caller, otherwise need |
512 | * to continue on looking for a mismatch. | |
513 | */ | |
514 | int ret = memcmp((void *)ptr1, (void *)ptr2, amt); | |
515 | if (ret != 0) | |
516 | return ret; | |
517 | } | |
518 | ||
eabe7881 AJ |
519 | /* |
520 | * No mismatch found. Let the caller know the two memory | |
783e9e51 PB |
521 | * areas are equal. |
522 | */ | |
523 | return 0; | |
524 | } | |
525 | ||
eabe7881 AJ |
526 | /* |
527 | * VM Userspace Memory Region Add | |
783e9e51 PB |
528 | * |
529 | * Input Args: | |
530 | * vm - Virtual Machine | |
531 | * backing_src - Storage source for this region. | |
532 | * NULL to use anonymous memory. | |
533 | * guest_paddr - Starting guest physical address | |
534 | * slot - KVM region slot | |
535 | * npages - Number of physical pages | |
536 | * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) | |
537 | * | |
538 | * Output Args: None | |
539 | * | |
540 | * Return: None | |
541 | * | |
542 | * Allocates a memory area of the number of pages specified by npages | |
543 | * and maps it to the VM specified by vm, at a starting physical address | |
544 | * given by guest_paddr. The region is created with a KVM region slot | |
545 | * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The | |
546 | * region is created with the flags given by flags. | |
547 | */ | |
548 | void vm_userspace_mem_region_add(struct kvm_vm *vm, | |
549 | enum vm_mem_backing_src_type src_type, | |
550 | uint64_t guest_paddr, uint32_t slot, uint64_t npages, | |
551 | uint32_t flags) | |
552 | { | |
553 | int ret; | |
554 | unsigned long pmem_size = 0; | |
555 | struct userspace_mem_region *region; | |
556 | size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; | |
557 | ||
558 | TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " | |
559 | "address not on a page boundary.\n" | |
560 | " guest_paddr: 0x%lx vm->page_size: 0x%x", | |
561 | guest_paddr, vm->page_size); | |
562 | TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) | |
563 | <= vm->max_gfn, "Physical range beyond maximum " | |
564 | "supported physical address,\n" | |
565 | " guest_paddr: 0x%lx npages: 0x%lx\n" | |
566 | " vm->max_gfn: 0x%lx vm->page_size: 0x%x", | |
567 | guest_paddr, npages, vm->max_gfn, vm->page_size); | |
568 | ||
eabe7881 AJ |
569 | /* |
570 | * Confirm a mem region with an overlapping address doesn't | |
783e9e51 PB |
571 | * already exist. |
572 | */ | |
573 | region = (struct userspace_mem_region *) userspace_mem_region_find( | |
94a980c3 | 574 | vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); |
783e9e51 PB |
575 | if (region != NULL) |
576 | TEST_ASSERT(false, "overlapping userspace_mem_region already " | |
577 | "exists\n" | |
578 | " requested guest_paddr: 0x%lx npages: 0x%lx " | |
579 | "page_size: 0x%x\n" | |
580 | " existing guest_paddr: 0x%lx size: 0x%lx", | |
581 | guest_paddr, npages, vm->page_size, | |
582 | (uint64_t) region->region.guest_phys_addr, | |
583 | (uint64_t) region->region.memory_size); | |
584 | ||
585 | /* Confirm no region with the requested slot already exists. */ | |
586 | for (region = vm->userspace_mem_region_head; region; | |
587 | region = region->next) { | |
588 | if (region->region.slot == slot) | |
589 | break; | |
783e9e51 PB |
590 | } |
591 | if (region != NULL) | |
592 | TEST_ASSERT(false, "A mem region with the requested slot " | |
94a980c3 | 593 | "already exists.\n" |
783e9e51 PB |
594 | " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" |
595 | " existing slot: %u paddr: 0x%lx size: 0x%lx", | |
596 | slot, guest_paddr, npages, | |
597 | region->region.slot, | |
598 | (uint64_t) region->region.guest_phys_addr, | |
599 | (uint64_t) region->region.memory_size); | |
600 | ||
601 | /* Allocate and initialize new mem region structure. */ | |
602 | region = calloc(1, sizeof(*region)); | |
603 | TEST_ASSERT(region != NULL, "Insufficient Memory"); | |
604 | region->mmap_size = npages * vm->page_size; | |
605 | ||
606 | /* Enough memory to align up to a huge page. */ | |
607 | if (src_type == VM_MEM_SRC_ANONYMOUS_THP) | |
608 | region->mmap_size += huge_page_size; | |
609 | region->mmap_start = mmap(NULL, region->mmap_size, | |
610 | PROT_READ | PROT_WRITE, | |
611 | MAP_PRIVATE | MAP_ANONYMOUS | |
612 | | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0), | |
613 | -1, 0); | |
614 | TEST_ASSERT(region->mmap_start != MAP_FAILED, | |
615 | "test_malloc failed, mmap_start: %p errno: %i", | |
616 | region->mmap_start, errno); | |
617 | ||
618 | /* Align THP allocation up to start of a huge page. */ | |
619 | region->host_mem = align(region->mmap_start, | |
620 | src_type == VM_MEM_SRC_ANONYMOUS_THP ? huge_page_size : 1); | |
621 | ||
622 | /* As needed perform madvise */ | |
623 | if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) { | |
624 | ret = madvise(region->host_mem, npages * vm->page_size, | |
625 | src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); | |
626 | TEST_ASSERT(ret == 0, "madvise failed,\n" | |
627 | " addr: %p\n" | |
628 | " length: 0x%lx\n" | |
629 | " src_type: %x", | |
630 | region->host_mem, npages * vm->page_size, src_type); | |
631 | } | |
632 | ||
633 | region->unused_phy_pages = sparsebit_alloc(); | |
634 | sparsebit_set_num(region->unused_phy_pages, | |
635 | guest_paddr >> vm->page_shift, npages); | |
636 | region->region.slot = slot; | |
637 | region->region.flags = flags; | |
638 | region->region.guest_phys_addr = guest_paddr; | |
639 | region->region.memory_size = npages * vm->page_size; | |
640 | region->region.userspace_addr = (uintptr_t) region->host_mem; | |
641 | ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
642 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
643 | " rc: %i errno: %i\n" | |
644 | " slot: %u flags: 0x%x\n" | |
645 | " guest_phys_addr: 0x%lx size: 0x%lx", | |
646 | ret, errno, slot, flags, | |
647 | guest_paddr, (uint64_t) region->region.memory_size); | |
648 | ||
649 | /* Add to linked-list of memory regions. */ | |
650 | if (vm->userspace_mem_region_head) | |
651 | vm->userspace_mem_region_head->prev = region; | |
652 | region->next = vm->userspace_mem_region_head; | |
653 | vm->userspace_mem_region_head = region; | |
654 | } | |
655 | ||
eabe7881 AJ |
656 | /* |
657 | * Memslot to region | |
783e9e51 PB |
658 | * |
659 | * Input Args: | |
660 | * vm - Virtual Machine | |
661 | * memslot - KVM memory slot ID | |
662 | * | |
663 | * Output Args: None | |
664 | * | |
665 | * Return: | |
666 | * Pointer to memory region structure that describe memory region | |
667 | * using kvm memory slot ID given by memslot. TEST_ASSERT failure | |
668 | * on error (e.g. currently no memory region using memslot as a KVM | |
669 | * memory slot ID). | |
670 | */ | |
eabe7881 AJ |
671 | static struct userspace_mem_region * |
672 | memslot2region(struct kvm_vm *vm, uint32_t memslot) | |
783e9e51 PB |
673 | { |
674 | struct userspace_mem_region *region; | |
675 | ||
676 | for (region = vm->userspace_mem_region_head; region; | |
677 | region = region->next) { | |
678 | if (region->region.slot == memslot) | |
679 | break; | |
680 | } | |
681 | if (region == NULL) { | |
682 | fprintf(stderr, "No mem region with the requested slot found,\n" | |
683 | " requested slot: %u\n", memslot); | |
684 | fputs("---- vm dump ----\n", stderr); | |
685 | vm_dump(stderr, vm, 2); | |
686 | TEST_ASSERT(false, "Mem region not found"); | |
687 | } | |
688 | ||
689 | return region; | |
690 | } | |
691 | ||
eabe7881 AJ |
692 | /* |
693 | * VM Memory Region Flags Set | |
783e9e51 PB |
694 | * |
695 | * Input Args: | |
696 | * vm - Virtual Machine | |
697 | * flags - Starting guest physical address | |
698 | * | |
699 | * Output Args: None | |
700 | * | |
701 | * Return: None | |
702 | * | |
703 | * Sets the flags of the memory region specified by the value of slot, | |
704 | * to the values given by flags. | |
705 | */ | |
706 | void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) | |
707 | { | |
708 | int ret; | |
709 | struct userspace_mem_region *region; | |
710 | ||
783e9e51 PB |
711 | region = memslot2region(vm, slot); |
712 | ||
713 | region->region.flags = flags; | |
714 | ||
715 | ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); | |
716 | ||
717 | TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" | |
718 | " rc: %i errno: %i slot: %u flags: 0x%x", | |
719 | ret, errno, slot, flags); | |
720 | } | |
721 | ||
eabe7881 AJ |
722 | /* |
723 | * VCPU mmap Size | |
783e9e51 PB |
724 | * |
725 | * Input Args: None | |
726 | * | |
727 | * Output Args: None | |
728 | * | |
729 | * Return: | |
730 | * Size of VCPU state | |
731 | * | |
732 | * Returns the size of the structure pointed to by the return value | |
733 | * of vcpu_state(). | |
734 | */ | |
735 | static int vcpu_mmap_sz(void) | |
736 | { | |
737 | int dev_fd, ret; | |
738 | ||
739 | dev_fd = open(KVM_DEV_PATH, O_RDONLY); | |
bcb2b94a PB |
740 | if (dev_fd < 0) |
741 | exit(KSFT_SKIP); | |
783e9e51 PB |
742 | |
743 | ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); | |
744 | TEST_ASSERT(ret >= sizeof(struct kvm_run), | |
745 | "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i", | |
746 | __func__, ret, errno); | |
747 | ||
748 | close(dev_fd); | |
749 | ||
750 | return ret; | |
751 | } | |
752 | ||
eabe7881 AJ |
753 | /* |
754 | * VM VCPU Add | |
783e9e51 PB |
755 | * |
756 | * Input Args: | |
757 | * vm - Virtual Machine | |
758 | * vcpuid - VCPU ID | |
759 | * | |
760 | * Output Args: None | |
761 | * | |
762 | * Return: None | |
763 | * | |
764 | * Creates and adds to the VM specified by vm and virtual CPU with | |
765 | * the ID given by vcpuid. | |
766 | */ | |
eabe7881 AJ |
767 | void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot, |
768 | int gdt_memslot) | |
783e9e51 PB |
769 | { |
770 | struct vcpu *vcpu; | |
771 | ||
772 | /* Confirm a vcpu with the specified id doesn't already exist. */ | |
773 | vcpu = vcpu_find(vm, vcpuid); | |
774 | if (vcpu != NULL) | |
775 | TEST_ASSERT(false, "vcpu with the specified id " | |
776 | "already exists,\n" | |
777 | " requested vcpuid: %u\n" | |
778 | " existing vcpuid: %u state: %p", | |
779 | vcpuid, vcpu->id, vcpu->state); | |
780 | ||
781 | /* Allocate and initialize new vcpu structure. */ | |
782 | vcpu = calloc(1, sizeof(*vcpu)); | |
783 | TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); | |
784 | vcpu->id = vcpuid; | |
785 | vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid); | |
786 | TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i", | |
787 | vcpu->fd, errno); | |
788 | ||
789 | TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size " | |
790 | "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", | |
791 | vcpu_mmap_sz(), sizeof(*vcpu->state)); | |
792 | vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state), | |
793 | PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); | |
794 | TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, " | |
795 | "vcpu id: %u errno: %i", vcpuid, errno); | |
796 | ||
797 | /* Add to linked-list of VCPUs. */ | |
798 | if (vm->vcpu_head) | |
799 | vm->vcpu_head->prev = vcpu; | |
800 | vcpu->next = vm->vcpu_head; | |
801 | vm->vcpu_head = vcpu; | |
802 | ||
2305339e | 803 | vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot); |
783e9e51 PB |
804 | } |
805 | ||
eabe7881 AJ |
806 | /* |
807 | * VM Virtual Address Unused Gap | |
783e9e51 PB |
808 | * |
809 | * Input Args: | |
810 | * vm - Virtual Machine | |
811 | * sz - Size (bytes) | |
812 | * vaddr_min - Minimum Virtual Address | |
813 | * | |
814 | * Output Args: None | |
815 | * | |
816 | * Return: | |
817 | * Lowest virtual address at or below vaddr_min, with at least | |
818 | * sz unused bytes. TEST_ASSERT failure if no area of at least | |
819 | * size sz is available. | |
820 | * | |
821 | * Within the VM specified by vm, locates the lowest starting virtual | |
822 | * address >= vaddr_min, that has at least sz unallocated bytes. A | |
823 | * TEST_ASSERT failure occurs for invalid input or no area of at least | |
824 | * sz unallocated bytes >= vaddr_min is available. | |
825 | */ | |
826 | static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, | |
eabe7881 | 827 | vm_vaddr_t vaddr_min) |
783e9e51 PB |
828 | { |
829 | uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; | |
830 | ||
831 | /* Determine lowest permitted virtual page index. */ | |
832 | uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; | |
833 | if ((pgidx_start * vm->page_size) < vaddr_min) | |
eabe7881 | 834 | goto no_va_found; |
783e9e51 PB |
835 | |
836 | /* Loop over section with enough valid virtual page indexes. */ | |
837 | if (!sparsebit_is_set_num(vm->vpages_valid, | |
838 | pgidx_start, pages)) | |
839 | pgidx_start = sparsebit_next_set_num(vm->vpages_valid, | |
840 | pgidx_start, pages); | |
841 | do { | |
842 | /* | |
843 | * Are there enough unused virtual pages available at | |
844 | * the currently proposed starting virtual page index. | |
845 | * If not, adjust proposed starting index to next | |
846 | * possible. | |
847 | */ | |
848 | if (sparsebit_is_clear_num(vm->vpages_mapped, | |
849 | pgidx_start, pages)) | |
850 | goto va_found; | |
851 | pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, | |
852 | pgidx_start, pages); | |
853 | if (pgidx_start == 0) | |
854 | goto no_va_found; | |
855 | ||
856 | /* | |
857 | * If needed, adjust proposed starting virtual address, | |
858 | * to next range of valid virtual addresses. | |
859 | */ | |
860 | if (!sparsebit_is_set_num(vm->vpages_valid, | |
861 | pgidx_start, pages)) { | |
862 | pgidx_start = sparsebit_next_set_num( | |
863 | vm->vpages_valid, pgidx_start, pages); | |
864 | if (pgidx_start == 0) | |
865 | goto no_va_found; | |
866 | } | |
867 | } while (pgidx_start != 0); | |
868 | ||
869 | no_va_found: | |
870 | TEST_ASSERT(false, "No vaddr of specified pages available, " | |
871 | "pages: 0x%lx", pages); | |
872 | ||
873 | /* NOT REACHED */ | |
874 | return -1; | |
875 | ||
876 | va_found: | |
877 | TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, | |
878 | pgidx_start, pages), | |
879 | "Unexpected, invalid virtual page index range,\n" | |
880 | " pgidx_start: 0x%lx\n" | |
881 | " pages: 0x%lx", | |
882 | pgidx_start, pages); | |
883 | TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, | |
884 | pgidx_start, pages), | |
885 | "Unexpected, pages already mapped,\n" | |
886 | " pgidx_start: 0x%lx\n" | |
887 | " pages: 0x%lx", | |
888 | pgidx_start, pages); | |
889 | ||
890 | return pgidx_start * vm->page_size; | |
891 | } | |
892 | ||
eabe7881 AJ |
893 | /* |
894 | * VM Virtual Address Allocate | |
783e9e51 PB |
895 | * |
896 | * Input Args: | |
897 | * vm - Virtual Machine | |
898 | * sz - Size in bytes | |
899 | * vaddr_min - Minimum starting virtual address | |
900 | * data_memslot - Memory region slot for data pages | |
901 | * pgd_memslot - Memory region slot for new virtual translation tables | |
902 | * | |
903 | * Output Args: None | |
904 | * | |
905 | * Return: | |
906 | * Starting guest virtual address | |
907 | * | |
908 | * Allocates at least sz bytes within the virtual address space of the vm | |
909 | * given by vm. The allocated bytes are mapped to a virtual address >= | |
910 | * the address given by vaddr_min. Note that each allocation uses a | |
911 | * a unique set of pages, with the minimum real allocation being at least | |
912 | * a page. | |
913 | */ | |
914 | vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, | |
eabe7881 | 915 | uint32_t data_memslot, uint32_t pgd_memslot) |
783e9e51 PB |
916 | { |
917 | uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); | |
918 | ||
919 | virt_pgd_alloc(vm, pgd_memslot); | |
920 | ||
eabe7881 AJ |
921 | /* |
922 | * Find an unused range of virtual page addresses of at least | |
783e9e51 PB |
923 | * pages in length. |
924 | */ | |
925 | vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); | |
926 | ||
927 | /* Map the virtual pages. */ | |
928 | for (vm_vaddr_t vaddr = vaddr_start; pages > 0; | |
929 | pages--, vaddr += vm->page_size) { | |
930 | vm_paddr_t paddr; | |
931 | ||
81d1cca0 AJ |
932 | paddr = vm_phy_page_alloc(vm, |
933 | KVM_UTIL_MIN_PFN * vm->page_size, data_memslot); | |
783e9e51 PB |
934 | |
935 | virt_pg_map(vm, vaddr, paddr, pgd_memslot); | |
936 | ||
937 | sparsebit_set(vm->vpages_mapped, | |
938 | vaddr >> vm->page_shift); | |
939 | } | |
940 | ||
941 | return vaddr_start; | |
942 | } | |
943 | ||
3b4cd0ff PX |
944 | /* |
945 | * Map a range of VM virtual address to the VM's physical address | |
946 | * | |
947 | * Input Args: | |
948 | * vm - Virtual Machine | |
949 | * vaddr - Virtuall address to map | |
950 | * paddr - VM Physical Address | |
951 | * size - The size of the range to map | |
952 | * pgd_memslot - Memory region slot for new virtual translation tables | |
953 | * | |
954 | * Output Args: None | |
955 | * | |
956 | * Return: None | |
957 | * | |
958 | * Within the VM given by vm, creates a virtual translation for the | |
959 | * page range starting at vaddr to the page range starting at paddr. | |
960 | */ | |
961 | void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, | |
962 | size_t size, uint32_t pgd_memslot) | |
963 | { | |
964 | size_t page_size = vm->page_size; | |
965 | size_t npages = size / page_size; | |
966 | ||
967 | TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); | |
968 | TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); | |
969 | ||
970 | while (npages--) { | |
971 | virt_pg_map(vm, vaddr, paddr, pgd_memslot); | |
972 | vaddr += page_size; | |
973 | paddr += page_size; | |
974 | } | |
975 | } | |
976 | ||
eabe7881 AJ |
977 | /* |
978 | * Address VM Physical to Host Virtual | |
783e9e51 PB |
979 | * |
980 | * Input Args: | |
981 | * vm - Virtual Machine | |
982 | * gpa - VM physical address | |
983 | * | |
984 | * Output Args: None | |
985 | * | |
986 | * Return: | |
987 | * Equivalent host virtual address | |
988 | * | |
989 | * Locates the memory region containing the VM physical address given | |
990 | * by gpa, within the VM given by vm. When found, the host virtual | |
991 | * address providing the memory to the vm physical address is returned. | |
992 | * A TEST_ASSERT failure occurs if no region containing gpa exists. | |
993 | */ | |
994 | void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) | |
995 | { | |
996 | struct userspace_mem_region *region; | |
997 | for (region = vm->userspace_mem_region_head; region; | |
998 | region = region->next) { | |
999 | if ((gpa >= region->region.guest_phys_addr) | |
1000 | && (gpa <= (region->region.guest_phys_addr | |
1001 | + region->region.memory_size - 1))) | |
1002 | return (void *) ((uintptr_t) region->host_mem | |
1003 | + (gpa - region->region.guest_phys_addr)); | |
1004 | } | |
1005 | ||
1006 | TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa); | |
1007 | return NULL; | |
1008 | } | |
1009 | ||
eabe7881 AJ |
1010 | /* |
1011 | * Address Host Virtual to VM Physical | |
783e9e51 PB |
1012 | * |
1013 | * Input Args: | |
1014 | * vm - Virtual Machine | |
1015 | * hva - Host virtual address | |
1016 | * | |
1017 | * Output Args: None | |
1018 | * | |
1019 | * Return: | |
1020 | * Equivalent VM physical address | |
1021 | * | |
1022 | * Locates the memory region containing the host virtual address given | |
1023 | * by hva, within the VM given by vm. When found, the equivalent | |
1024 | * VM physical address is returned. A TEST_ASSERT failure occurs if no | |
1025 | * region containing hva exists. | |
1026 | */ | |
1027 | vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) | |
1028 | { | |
1029 | struct userspace_mem_region *region; | |
1030 | for (region = vm->userspace_mem_region_head; region; | |
1031 | region = region->next) { | |
1032 | if ((hva >= region->host_mem) | |
1033 | && (hva <= (region->host_mem | |
1034 | + region->region.memory_size - 1))) | |
1035 | return (vm_paddr_t) ((uintptr_t) | |
1036 | region->region.guest_phys_addr | |
1037 | + (hva - (uintptr_t) region->host_mem)); | |
1038 | } | |
1039 | ||
1040 | TEST_ASSERT(false, "No mapping to a guest physical address, " | |
1041 | "hva: %p", hva); | |
1042 | return -1; | |
1043 | } | |
1044 | ||
eabe7881 AJ |
1045 | /* |
1046 | * VM Create IRQ Chip | |
783e9e51 PB |
1047 | * |
1048 | * Input Args: | |
1049 | * vm - Virtual Machine | |
1050 | * | |
1051 | * Output Args: None | |
1052 | * | |
1053 | * Return: None | |
1054 | * | |
1055 | * Creates an interrupt controller chip for the VM specified by vm. | |
1056 | */ | |
1057 | void vm_create_irqchip(struct kvm_vm *vm) | |
1058 | { | |
1059 | int ret; | |
1060 | ||
1061 | ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0); | |
1062 | TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, " | |
1063 | "rc: %i errno: %i", ret, errno); | |
fa3899ad PB |
1064 | |
1065 | vm->has_irqchip = true; | |
783e9e51 PB |
1066 | } |
1067 | ||
eabe7881 AJ |
1068 | /* |
1069 | * VM VCPU State | |
783e9e51 PB |
1070 | * |
1071 | * Input Args: | |
1072 | * vm - Virtual Machine | |
1073 | * vcpuid - VCPU ID | |
1074 | * | |
1075 | * Output Args: None | |
1076 | * | |
1077 | * Return: | |
1078 | * Pointer to structure that describes the state of the VCPU. | |
1079 | * | |
1080 | * Locates and returns a pointer to a structure that describes the | |
1081 | * state of the VCPU with the given vcpuid. | |
1082 | */ | |
1083 | struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid) | |
1084 | { | |
1085 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1086 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1087 | ||
1088 | return vcpu->state; | |
1089 | } | |
1090 | ||
eabe7881 AJ |
1091 | /* |
1092 | * VM VCPU Run | |
783e9e51 PB |
1093 | * |
1094 | * Input Args: | |
1095 | * vm - Virtual Machine | |
1096 | * vcpuid - VCPU ID | |
1097 | * | |
1098 | * Output Args: None | |
1099 | * | |
1100 | * Return: None | |
1101 | * | |
1102 | * Switch to executing the code for the VCPU given by vcpuid, within the VM | |
1103 | * given by vm. | |
1104 | */ | |
1105 | void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) | |
1106 | { | |
1107 | int ret = _vcpu_run(vm, vcpuid); | |
1108 | TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " | |
1109 | "rc: %i errno: %i", ret, errno); | |
1110 | } | |
1111 | ||
1112 | int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid) | |
1113 | { | |
1114 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1115 | int rc; | |
1116 | ||
1117 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
eabe7881 | 1118 | do { |
783e9e51 PB |
1119 | rc = ioctl(vcpu->fd, KVM_RUN, NULL); |
1120 | } while (rc == -1 && errno == EINTR); | |
1121 | return rc; | |
1122 | } | |
1123 | ||
0f73bbc8 SC |
1124 | void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid) |
1125 | { | |
1126 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1127 | int ret; | |
1128 | ||
1129 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1130 | ||
1131 | vcpu->state->immediate_exit = 1; | |
1132 | ret = ioctl(vcpu->fd, KVM_RUN, NULL); | |
1133 | vcpu->state->immediate_exit = 0; | |
1134 | ||
1135 | TEST_ASSERT(ret == -1 && errno == EINTR, | |
1136 | "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", | |
1137 | ret, errno); | |
1138 | } | |
1139 | ||
eabe7881 AJ |
1140 | /* |
1141 | * VM VCPU Set MP State | |
783e9e51 PB |
1142 | * |
1143 | * Input Args: | |
1144 | * vm - Virtual Machine | |
1145 | * vcpuid - VCPU ID | |
1146 | * mp_state - mp_state to be set | |
1147 | * | |
1148 | * Output Args: None | |
1149 | * | |
1150 | * Return: None | |
1151 | * | |
1152 | * Sets the MP state of the VCPU given by vcpuid, to the state given | |
1153 | * by mp_state. | |
1154 | */ | |
1155 | void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid, | |
eabe7881 | 1156 | struct kvm_mp_state *mp_state) |
783e9e51 PB |
1157 | { |
1158 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1159 | int ret; | |
1160 | ||
1161 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1162 | ||
1163 | ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state); | |
1164 | TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, " | |
1165 | "rc: %i errno: %i", ret, errno); | |
1166 | } | |
1167 | ||
eabe7881 AJ |
1168 | /* |
1169 | * VM VCPU Regs Get | |
783e9e51 PB |
1170 | * |
1171 | * Input Args: | |
1172 | * vm - Virtual Machine | |
1173 | * vcpuid - VCPU ID | |
1174 | * | |
1175 | * Output Args: | |
1176 | * regs - current state of VCPU regs | |
1177 | * | |
1178 | * Return: None | |
1179 | * | |
1180 | * Obtains the current register state for the VCPU specified by vcpuid | |
1181 | * and stores it at the location given by regs. | |
1182 | */ | |
eabe7881 | 1183 | void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) |
783e9e51 PB |
1184 | { |
1185 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1186 | int ret; | |
1187 | ||
1188 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1189 | ||
783e9e51 PB |
1190 | ret = ioctl(vcpu->fd, KVM_GET_REGS, regs); |
1191 | TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i", | |
1192 | ret, errno); | |
1193 | } | |
1194 | ||
eabe7881 AJ |
1195 | /* |
1196 | * VM VCPU Regs Set | |
783e9e51 PB |
1197 | * |
1198 | * Input Args: | |
1199 | * vm - Virtual Machine | |
1200 | * vcpuid - VCPU ID | |
1201 | * regs - Values to set VCPU regs to | |
1202 | * | |
1203 | * Output Args: None | |
1204 | * | |
1205 | * Return: None | |
1206 | * | |
1207 | * Sets the regs of the VCPU specified by vcpuid to the values | |
1208 | * given by regs. | |
1209 | */ | |
eabe7881 | 1210 | void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs) |
783e9e51 PB |
1211 | { |
1212 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1213 | int ret; | |
1214 | ||
1215 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1216 | ||
783e9e51 PB |
1217 | ret = ioctl(vcpu->fd, KVM_SET_REGS, regs); |
1218 | TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i", | |
1219 | ret, errno); | |
1220 | } | |
1221 | ||
1222 | void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid, | |
eabe7881 | 1223 | struct kvm_vcpu_events *events) |
783e9e51 PB |
1224 | { |
1225 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1226 | int ret; | |
1227 | ||
1228 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1229 | ||
783e9e51 PB |
1230 | ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events); |
1231 | TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i", | |
1232 | ret, errno); | |
1233 | } | |
1234 | ||
1235 | void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid, | |
eabe7881 | 1236 | struct kvm_vcpu_events *events) |
783e9e51 PB |
1237 | { |
1238 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1239 | int ret; | |
1240 | ||
1241 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1242 | ||
783e9e51 PB |
1243 | ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events); |
1244 | TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i", | |
1245 | ret, errno); | |
1246 | } | |
1247 | ||
eabe7881 AJ |
1248 | /* |
1249 | * VM VCPU System Regs Get | |
783e9e51 PB |
1250 | * |
1251 | * Input Args: | |
1252 | * vm - Virtual Machine | |
1253 | * vcpuid - VCPU ID | |
1254 | * | |
1255 | * Output Args: | |
1256 | * sregs - current state of VCPU system regs | |
1257 | * | |
1258 | * Return: None | |
1259 | * | |
1260 | * Obtains the current system register state for the VCPU specified by | |
1261 | * vcpuid and stores it at the location given by sregs. | |
1262 | */ | |
eabe7881 | 1263 | void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1264 | { |
1265 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1266 | int ret; | |
1267 | ||
1268 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1269 | ||
783e9e51 PB |
1270 | ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs); |
1271 | TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i", | |
1272 | ret, errno); | |
1273 | } | |
1274 | ||
eabe7881 AJ |
1275 | /* |
1276 | * VM VCPU System Regs Set | |
783e9e51 PB |
1277 | * |
1278 | * Input Args: | |
1279 | * vm - Virtual Machine | |
1280 | * vcpuid - VCPU ID | |
1281 | * sregs - Values to set VCPU system regs to | |
1282 | * | |
1283 | * Output Args: None | |
1284 | * | |
1285 | * Return: None | |
1286 | * | |
1287 | * Sets the system regs of the VCPU specified by vcpuid to the values | |
1288 | * given by sregs. | |
1289 | */ | |
eabe7881 | 1290 | void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1291 | { |
1292 | int ret = _vcpu_sregs_set(vm, vcpuid, sregs); | |
1293 | TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, " | |
1294 | "rc: %i errno: %i", ret, errno); | |
1295 | } | |
1296 | ||
eabe7881 | 1297 | int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs) |
783e9e51 PB |
1298 | { |
1299 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1300 | int ret; | |
1301 | ||
1302 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1303 | ||
783e9e51 PB |
1304 | return ioctl(vcpu->fd, KVM_SET_SREGS, sregs); |
1305 | } | |
1306 | ||
eabe7881 AJ |
1307 | /* |
1308 | * VCPU Ioctl | |
783e9e51 PB |
1309 | * |
1310 | * Input Args: | |
1311 | * vm - Virtual Machine | |
1312 | * vcpuid - VCPU ID | |
1313 | * cmd - Ioctl number | |
1314 | * arg - Argument to pass to the ioctl | |
1315 | * | |
1316 | * Return: None | |
1317 | * | |
1318 | * Issues an arbitrary ioctl on a VCPU fd. | |
1319 | */ | |
eabe7881 AJ |
1320 | void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, |
1321 | unsigned long cmd, void *arg) | |
7e50c424 VK |
1322 | { |
1323 | int ret; | |
1324 | ||
1325 | ret = _vcpu_ioctl(vm, vcpuid, cmd, arg); | |
1326 | TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)", | |
1327 | cmd, ret, errno, strerror(errno)); | |
1328 | } | |
1329 | ||
1330 | int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid, | |
1331 | unsigned long cmd, void *arg) | |
783e9e51 PB |
1332 | { |
1333 | struct vcpu *vcpu = vcpu_find(vm, vcpuid); | |
1334 | int ret; | |
1335 | ||
1336 | TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid); | |
1337 | ||
1338 | ret = ioctl(vcpu->fd, cmd, arg); | |
7e50c424 VK |
1339 | |
1340 | return ret; | |
783e9e51 PB |
1341 | } |
1342 | ||
eabe7881 AJ |
1343 | /* |
1344 | * VM Ioctl | |
783e9e51 PB |
1345 | * |
1346 | * Input Args: | |
1347 | * vm - Virtual Machine | |
1348 | * cmd - Ioctl number | |
1349 | * arg - Argument to pass to the ioctl | |
1350 | * | |
1351 | * Return: None | |
1352 | * | |
1353 | * Issues an arbitrary ioctl on a VM fd. | |
1354 | */ | |
1355 | void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg) | |
1356 | { | |
1357 | int ret; | |
1358 | ||
1359 | ret = ioctl(vm->fd, cmd, arg); | |
1360 | TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)", | |
1361 | cmd, ret, errno, strerror(errno)); | |
1362 | } | |
1363 | ||
eabe7881 AJ |
1364 | /* |
1365 | * VM Dump | |
783e9e51 PB |
1366 | * |
1367 | * Input Args: | |
1368 | * vm - Virtual Machine | |
1369 | * indent - Left margin indent amount | |
1370 | * | |
1371 | * Output Args: | |
1372 | * stream - Output FILE stream | |
1373 | * | |
1374 | * Return: None | |
1375 | * | |
1376 | * Dumps the current state of the VM given by vm, to the FILE stream | |
1377 | * given by stream. | |
1378 | */ | |
1379 | void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) | |
1380 | { | |
1381 | struct userspace_mem_region *region; | |
1382 | struct vcpu *vcpu; | |
1383 | ||
1384 | fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); | |
1385 | fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); | |
1386 | fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); | |
1387 | fprintf(stream, "%*sMem Regions:\n", indent, ""); | |
1388 | for (region = vm->userspace_mem_region_head; region; | |
1389 | region = region->next) { | |
1390 | fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " | |
1391 | "host_virt: %p\n", indent + 2, "", | |
1392 | (uint64_t) region->region.guest_phys_addr, | |
1393 | (uint64_t) region->region.memory_size, | |
1394 | region->host_mem); | |
1395 | fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); | |
1396 | sparsebit_dump(stream, region->unused_phy_pages, 0); | |
1397 | } | |
1398 | fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); | |
1399 | sparsebit_dump(stream, vm->vpages_mapped, indent + 2); | |
1400 | fprintf(stream, "%*spgd_created: %u\n", indent, "", | |
1401 | vm->pgd_created); | |
1402 | if (vm->pgd_created) { | |
1403 | fprintf(stream, "%*sVirtual Translation Tables:\n", | |
1404 | indent + 2, ""); | |
1405 | virt_dump(stream, vm, indent + 4); | |
1406 | } | |
1407 | fprintf(stream, "%*sVCPUs:\n", indent, ""); | |
1408 | for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next) | |
1409 | vcpu_dump(stream, vm, vcpu->id, indent + 2); | |
1410 | } | |
1411 | ||
783e9e51 PB |
1412 | /* Known KVM exit reasons */ |
1413 | static struct exit_reason { | |
1414 | unsigned int reason; | |
1415 | const char *name; | |
1416 | } exit_reasons_known[] = { | |
1417 | {KVM_EXIT_UNKNOWN, "UNKNOWN"}, | |
1418 | {KVM_EXIT_EXCEPTION, "EXCEPTION"}, | |
1419 | {KVM_EXIT_IO, "IO"}, | |
1420 | {KVM_EXIT_HYPERCALL, "HYPERCALL"}, | |
1421 | {KVM_EXIT_DEBUG, "DEBUG"}, | |
1422 | {KVM_EXIT_HLT, "HLT"}, | |
1423 | {KVM_EXIT_MMIO, "MMIO"}, | |
1424 | {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, | |
1425 | {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, | |
1426 | {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, | |
1427 | {KVM_EXIT_INTR, "INTR"}, | |
1428 | {KVM_EXIT_SET_TPR, "SET_TPR"}, | |
1429 | {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, | |
1430 | {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, | |
1431 | {KVM_EXIT_S390_RESET, "S390_RESET"}, | |
1432 | {KVM_EXIT_DCR, "DCR"}, | |
1433 | {KVM_EXIT_NMI, "NMI"}, | |
1434 | {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, | |
1435 | {KVM_EXIT_OSI, "OSI"}, | |
1436 | {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, | |
1437 | #ifdef KVM_EXIT_MEMORY_NOT_PRESENT | |
1438 | {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, | |
1439 | #endif | |
1440 | }; | |
1441 | ||
eabe7881 AJ |
1442 | /* |
1443 | * Exit Reason String | |
783e9e51 PB |
1444 | * |
1445 | * Input Args: | |
1446 | * exit_reason - Exit reason | |
1447 | * | |
1448 | * Output Args: None | |
1449 | * | |
1450 | * Return: | |
1451 | * Constant string pointer describing the exit reason. | |
1452 | * | |
1453 | * Locates and returns a constant string that describes the KVM exit | |
1454 | * reason given by exit_reason. If no such string is found, a constant | |
1455 | * string of "Unknown" is returned. | |
1456 | */ | |
1457 | const char *exit_reason_str(unsigned int exit_reason) | |
1458 | { | |
1459 | unsigned int n1; | |
1460 | ||
1461 | for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { | |
1462 | if (exit_reason == exit_reasons_known[n1].reason) | |
1463 | return exit_reasons_known[n1].name; | |
1464 | } | |
1465 | ||
1466 | return "Unknown"; | |
1467 | } | |
1468 | ||
eabe7881 | 1469 | /* |
d5106539 | 1470 | * Physical Contiguous Page Allocator |
783e9e51 PB |
1471 | * |
1472 | * Input Args: | |
1473 | * vm - Virtual Machine | |
d5106539 | 1474 | * num - number of pages |
783e9e51 PB |
1475 | * paddr_min - Physical address minimum |
1476 | * memslot - Memory region to allocate page from | |
1477 | * | |
1478 | * Output Args: None | |
1479 | * | |
1480 | * Return: | |
1481 | * Starting physical address | |
1482 | * | |
d5106539 AJ |
1483 | * Within the VM specified by vm, locates a range of available physical |
1484 | * pages at or above paddr_min. If found, the pages are marked as in use | |
cdbd2428 | 1485 | * and their base address is returned. A TEST_ASSERT failure occurs if |
d5106539 | 1486 | * not enough pages are available at or above paddr_min. |
783e9e51 | 1487 | */ |
d5106539 AJ |
1488 | vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, |
1489 | vm_paddr_t paddr_min, uint32_t memslot) | |
783e9e51 PB |
1490 | { |
1491 | struct userspace_mem_region *region; | |
d5106539 AJ |
1492 | sparsebit_idx_t pg, base; |
1493 | ||
1494 | TEST_ASSERT(num > 0, "Must allocate at least one page"); | |
783e9e51 PB |
1495 | |
1496 | TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " | |
4d5f26ee | 1497 | "not divisible by page size.\n" |
783e9e51 PB |
1498 | " paddr_min: 0x%lx page_size: 0x%x", |
1499 | paddr_min, vm->page_size); | |
1500 | ||
783e9e51 | 1501 | region = memslot2region(vm, memslot); |
d5106539 AJ |
1502 | base = pg = paddr_min >> vm->page_shift; |
1503 | ||
1504 | do { | |
1505 | for (; pg < base + num; ++pg) { | |
1506 | if (!sparsebit_is_set(region->unused_phy_pages, pg)) { | |
1507 | base = pg = sparsebit_next_set(region->unused_phy_pages, pg); | |
1508 | break; | |
1509 | } | |
783e9e51 | 1510 | } |
d5106539 AJ |
1511 | } while (pg && pg != base + num); |
1512 | ||
1513 | if (pg == 0) { | |
1514 | fprintf(stderr, "No guest physical page available, " | |
1515 | "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", | |
1516 | paddr_min, vm->page_size, memslot); | |
1517 | fputs("---- vm dump ----\n", stderr); | |
1518 | vm_dump(stderr, vm, 2); | |
1519 | abort(); | |
783e9e51 PB |
1520 | } |
1521 | ||
d5106539 AJ |
1522 | for (pg = base; pg < base + num; ++pg) |
1523 | sparsebit_clear(region->unused_phy_pages, pg); | |
1524 | ||
1525 | return base * vm->page_size; | |
1526 | } | |
783e9e51 | 1527 | |
d5106539 AJ |
1528 | vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, |
1529 | uint32_t memslot) | |
1530 | { | |
1531 | return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); | |
783e9e51 PB |
1532 | } |
1533 | ||
eabe7881 AJ |
1534 | /* |
1535 | * Address Guest Virtual to Host Virtual | |
783e9e51 PB |
1536 | * |
1537 | * Input Args: | |
1538 | * vm - Virtual Machine | |
1539 | * gva - VM virtual address | |
1540 | * | |
1541 | * Output Args: None | |
1542 | * | |
1543 | * Return: | |
1544 | * Equivalent host virtual address | |
1545 | */ | |
1546 | void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) | |
1547 | { | |
1548 | return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); | |
1549 | } |