]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/test/unit/lib/vhost/vhost.c/vhost_ut.c
update source to Ceph Pacific 16.2.2
[ceph.git] / ceph / src / spdk / test / unit / lib / vhost / vhost.c / vhost_ut.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright (c) Intel Corporation.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "spdk/stdinc.h"
35
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
42
43 #include "vhost/vhost.c"
44
45 DEFINE_STUB(rte_vhost_set_vring_base, int, (int vid, uint16_t queue_id,
46 uint16_t last_avail_idx, uint16_t last_used_idx), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base, int, (int vid, uint16_t queue_id,
48 uint16_t *last_avail_idx, uint16_t *last_used_idx), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks,
50 (struct spdk_vhost_session *vsession));
51 DEFINE_STUB(vhost_register_unix_socket, int, (const char *path, const char *name,
52 uint64_t virtio_features, uint64_t disabled_features, uint64_t protocol_features), 0);
53 DEFINE_STUB(vhost_driver_unregister, int, (const char *path), 0);
54 DEFINE_STUB(spdk_mem_register, int, (void *vaddr, size_t len), 0);
55 DEFINE_STUB(spdk_mem_unregister, int, (void *vaddr, size_t len), 0);
56 DEFINE_STUB(rte_vhost_vring_call, int, (int vid, uint16_t vring_idx), 0);
57 DEFINE_STUB_V(rte_vhost_log_used_vring, (int vid, uint16_t vring_idx,
58 uint64_t offset, uint64_t len));
59
60 DEFINE_STUB(rte_vhost_get_mem_table, int, (int vid, struct rte_vhost_memory **mem), 0);
61 DEFINE_STUB(rte_vhost_get_negotiated_features, int, (int vid, uint64_t *features), 0);
62 DEFINE_STUB(rte_vhost_get_vhost_vring, int,
63 (int vid, uint16_t vring_idx, struct rte_vhost_vring *vring), 0);
64 DEFINE_STUB(rte_vhost_enable_guest_notification, int,
65 (int vid, uint16_t queue_id, int enable), 0);
66 DEFINE_STUB(rte_vhost_get_ifname, int, (int vid, char *buf, size_t len), 0);
67 DEFINE_STUB(rte_vhost_driver_start, int, (const char *name), 0);
68 DEFINE_STUB(rte_vhost_driver_callback_register, int,
69 (const char *path, struct vhost_device_ops const *const ops), 0);
70 DEFINE_STUB(rte_vhost_driver_disable_features, int, (const char *path, uint64_t features), 0);
71 DEFINE_STUB(rte_vhost_driver_set_features, int, (const char *path, uint64_t features), 0);
72 DEFINE_STUB(rte_vhost_driver_register, int, (const char *path, uint64_t flags), 0);
73 DEFINE_STUB(vhost_nvme_admin_passthrough, int, (int vid, void *cmd, void *cqe, void *buf), 0);
74 DEFINE_STUB(vhost_nvme_set_cq_call, int, (int vid, uint16_t qid, int fd), 0);
75 DEFINE_STUB(vhost_nvme_set_bar_mr, int, (int vid, void *bar, uint64_t bar_size), 0);
76 DEFINE_STUB(vhost_nvme_get_cap, int, (int vid, uint64_t *cap), 0);
77
78 void *
79 spdk_call_unaffinitized(void *cb(void *arg), void *arg)
80 {
81 return cb(arg);
82 }
83
84 static struct spdk_vhost_dev_backend g_vdev_backend;
85
86 static int
87 test_setup(void)
88 {
89 return 0;
90 }
91
92 static int
93 alloc_vdev(struct spdk_vhost_dev **vdev_p, const char *name, const char *cpumask)
94 {
95 struct spdk_vhost_dev *vdev = NULL;
96 int rc;
97
98 /* spdk_vhost_dev must be allocated on a cache line boundary. */
99 rc = posix_memalign((void **)&vdev, 64, sizeof(*vdev));
100 CU_ASSERT(rc == 0);
101 SPDK_CU_ASSERT_FATAL(vdev != NULL);
102 memset(vdev, 0, sizeof(*vdev));
103 rc = vhost_dev_register(vdev, name, cpumask, &g_vdev_backend);
104 if (rc == 0) {
105 *vdev_p = vdev;
106 } else {
107 free(vdev);
108 *vdev_p = NULL;
109 }
110
111 return rc;
112 }
113
114 static void
115 start_vdev(struct spdk_vhost_dev *vdev)
116 {
117 struct rte_vhost_memory *mem;
118 struct spdk_vhost_session *vsession = NULL;
119 int rc;
120
121 mem = calloc(1, sizeof(*mem) + 2 * sizeof(struct rte_vhost_mem_region));
122 SPDK_CU_ASSERT_FATAL(mem != NULL);
123 mem->nregions = 2;
124 mem->regions[0].guest_phys_addr = 0;
125 mem->regions[0].size = 0x400000; /* 4 MB */
126 mem->regions[0].host_user_addr = 0x1000000;
127 mem->regions[1].guest_phys_addr = 0x400000;
128 mem->regions[1].size = 0x400000; /* 4 MB */
129 mem->regions[1].host_user_addr = 0x2000000;
130
131 assert(TAILQ_EMPTY(&vdev->vsessions));
132 /* spdk_vhost_dev must be allocated on a cache line boundary. */
133 rc = posix_memalign((void **)&vsession, 64, sizeof(*vsession));
134 CU_ASSERT(rc == 0);
135 SPDK_CU_ASSERT_FATAL(vsession != NULL);
136 vsession->started = true;
137 vsession->vid = 0;
138 vsession->mem = mem;
139 TAILQ_INSERT_TAIL(&vdev->vsessions, vsession, tailq);
140 }
141
142 static void
143 stop_vdev(struct spdk_vhost_dev *vdev)
144 {
145 struct spdk_vhost_session *vsession = TAILQ_FIRST(&vdev->vsessions);
146
147 TAILQ_REMOVE(&vdev->vsessions, vsession, tailq);
148 free(vsession->mem);
149 free(vsession);
150 }
151
152 static void
153 cleanup_vdev(struct spdk_vhost_dev *vdev)
154 {
155 if (!TAILQ_EMPTY(&vdev->vsessions)) {
156 stop_vdev(vdev);
157 }
158 vhost_dev_unregister(vdev);
159 free(vdev);
160 }
161
162 static void
163 desc_to_iov_test(void)
164 {
165 struct spdk_vhost_dev *vdev;
166 struct spdk_vhost_session *vsession;
167 struct iovec iov[SPDK_VHOST_IOVS_MAX];
168 uint16_t iov_index;
169 struct vring_desc desc;
170 int rc;
171
172 spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
173
174 rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
175 SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
176 start_vdev(vdev);
177
178 vsession = TAILQ_FIRST(&vdev->vsessions);
179
180 /* Test simple case where iov falls fully within a 2MB page. */
181 desc.addr = 0x110000;
182 desc.len = 0x1000;
183 iov_index = 0;
184 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
185 CU_ASSERT(rc == 0);
186 CU_ASSERT(iov_index == 1);
187 CU_ASSERT(iov[0].iov_base == (void *)0x1110000);
188 CU_ASSERT(iov[0].iov_len == 0x1000);
189 /*
190 * Always memset the iov to ensure each test validates data written by its call
191 * to the function under test.
192 */
193 memset(iov, 0, sizeof(iov));
194
195 /* Same test, but ensure it respects the non-zero starting iov_index. */
196 iov_index = SPDK_VHOST_IOVS_MAX - 1;
197 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
198 CU_ASSERT(rc == 0);
199 CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
200 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x1110000);
201 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x1000);
202 memset(iov, 0, sizeof(iov));
203
204 /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
205 iov_index = SPDK_VHOST_IOVS_MAX;
206 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
207 CU_ASSERT(rc != 0);
208 memset(iov, 0, sizeof(iov));
209
210 /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
211 desc.addr = 0x1F0000;
212 desc.len = 0x20000;
213 iov_index = 0;
214 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
215 CU_ASSERT(rc == 0);
216 CU_ASSERT(iov_index == 1);
217 CU_ASSERT(iov[0].iov_base == (void *)0x11F0000);
218 CU_ASSERT(iov[0].iov_len == 0x20000);
219 memset(iov, 0, sizeof(iov));
220
221 /* Same test, but ensure it respects the non-zero starting iov_index. */
222 iov_index = SPDK_VHOST_IOVS_MAX - 1;
223 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
224 CU_ASSERT(rc == 0);
225 CU_ASSERT(iov_index == SPDK_VHOST_IOVS_MAX);
226 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_base == (void *)0x11F0000);
227 CU_ASSERT(iov[SPDK_VHOST_IOVS_MAX - 1].iov_len == 0x20000);
228 memset(iov, 0, sizeof(iov));
229
230 /* Test case where iov spans a vhost memory region. */
231 desc.addr = 0x3F0000;
232 desc.len = 0x20000;
233 iov_index = 0;
234 rc = vhost_vring_desc_to_iov(vsession, iov, &iov_index, &desc);
235 CU_ASSERT(rc == 0);
236 CU_ASSERT(iov_index == 2);
237 CU_ASSERT(iov[0].iov_base == (void *)0x13F0000);
238 CU_ASSERT(iov[0].iov_len == 0x10000);
239 CU_ASSERT(iov[1].iov_base == (void *)0x2000000);
240 CU_ASSERT(iov[1].iov_len == 0x10000);
241 memset(iov, 0, sizeof(iov));
242
243 cleanup_vdev(vdev);
244
245 CU_ASSERT(true);
246 }
247
248 static void
249 create_controller_test(void)
250 {
251 struct spdk_vhost_dev *vdev, *vdev2;
252 int ret;
253 char long_name[PATH_MAX];
254
255 spdk_cpuset_set_cpu(&g_vhost_core_mask, 0, true);
256
257 /* Create device with no name */
258 ret = alloc_vdev(&vdev, NULL, "0x1");
259 CU_ASSERT(ret != 0);
260
261 /* Create device with incorrect cpumask */
262 ret = alloc_vdev(&vdev, "vdev_name_0", "0x2");
263 CU_ASSERT(ret != 0);
264
265 /* Create device with too long name and path */
266 memset(long_name, 'x', sizeof(long_name));
267 long_name[PATH_MAX - 1] = 0;
268 snprintf(dev_dirname, sizeof(dev_dirname), "some_path/");
269 ret = alloc_vdev(&vdev, long_name, "0x1");
270 CU_ASSERT(ret != 0);
271 dev_dirname[0] = 0;
272
273 /* Create device when device name is already taken */
274 ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
275 SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
276 ret = alloc_vdev(&vdev2, "vdev_name_0", "0x1");
277 CU_ASSERT(ret != 0);
278 cleanup_vdev(vdev);
279 }
280
281 static void
282 session_find_by_vid_test(void)
283 {
284 struct spdk_vhost_dev *vdev;
285 struct spdk_vhost_session *vsession;
286 struct spdk_vhost_session *tmp;
287 int rc;
288
289 rc = alloc_vdev(&vdev, "vdev_name_0", "0x1");
290 SPDK_CU_ASSERT_FATAL(rc == 0 && vdev);
291 start_vdev(vdev);
292
293 vsession = TAILQ_FIRST(&vdev->vsessions);
294
295 tmp = vhost_session_find_by_vid(vsession->vid);
296 CU_ASSERT(tmp == vsession);
297
298 /* Search for a device with incorrect vid */
299 tmp = vhost_session_find_by_vid(vsession->vid + 0xFF);
300 CU_ASSERT(tmp == NULL);
301
302 cleanup_vdev(vdev);
303 }
304
305 static void
306 remove_controller_test(void)
307 {
308 struct spdk_vhost_dev *vdev;
309 int ret;
310
311 ret = alloc_vdev(&vdev, "vdev_name_0", "0x1");
312 SPDK_CU_ASSERT_FATAL(ret == 0 && vdev);
313
314 /* Remove device when controller is in use */
315 start_vdev(vdev);
316 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev->vsessions));
317 ret = vhost_dev_unregister(vdev);
318 CU_ASSERT(ret != 0);
319
320 cleanup_vdev(vdev);
321 }
322
323 static void
324 vq_avail_ring_get_test(void)
325 {
326 struct spdk_vhost_virtqueue vq;
327 uint16_t avail_mem[34];
328 uint16_t reqs[32];
329 uint16_t reqs_len, ret, i;
330
331 /* Basic example reap all requests */
332 vq.vring.avail = (struct vring_avail *)avail_mem;
333 vq.vring.size = 32;
334 vq.last_avail_idx = 24;
335 vq.vring.avail->idx = 29;
336 reqs_len = 6;
337
338 for (i = 0; i < 32; i++) {
339 vq.vring.avail->ring[i] = i;
340 }
341
342 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
343 CU_ASSERT(ret == 5);
344 CU_ASSERT(vq.last_avail_idx == 29);
345 for (i = 0; i < ret; i++) {
346 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 24]);
347 }
348
349 /* Basic example reap only some requests */
350 vq.last_avail_idx = 20;
351 vq.vring.avail->idx = 29;
352 reqs_len = 6;
353
354 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
355 CU_ASSERT(ret == reqs_len);
356 CU_ASSERT(vq.last_avail_idx == 26);
357 for (i = 0; i < ret; i++) {
358 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i + 20]);
359 }
360
361 /* Test invalid example */
362 vq.last_avail_idx = 20;
363 vq.vring.avail->idx = 156;
364 reqs_len = 6;
365
366 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
367 CU_ASSERT(ret == 0);
368
369 /* Test overflow in the avail->idx variable. */
370 vq.last_avail_idx = 65535;
371 vq.vring.avail->idx = 4;
372 reqs_len = 6;
373 ret = vhost_vq_avail_ring_get(&vq, reqs, reqs_len);
374 CU_ASSERT(ret == 5);
375 CU_ASSERT(vq.last_avail_idx == 4);
376 CU_ASSERT(reqs[0] == vq.vring.avail->ring[31]);
377 for (i = 1; i < ret; i++) {
378 CU_ASSERT(reqs[i] == vq.vring.avail->ring[i - 1]);
379 }
380 }
381
382 static bool
383 vq_desc_guest_is_used(struct spdk_vhost_virtqueue *vq, int16_t guest_last_used_idx,
384 int16_t guest_used_phase)
385 {
386 return (!!(vq->vring.desc_packed[guest_last_used_idx].flags & VRING_DESC_F_USED) ==
387 !!guest_used_phase);
388 }
389
390 static void
391 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_avail_idx,
392 int16_t *guest_avail_phase)
393 {
394 if (*guest_avail_phase) {
395 vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_AVAIL;
396 vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_USED;
397 } else {
398 vq->vring.desc_packed[*guest_last_avail_idx].flags &= ~VRING_DESC_F_AVAIL;
399 vq->vring.desc_packed[*guest_last_avail_idx].flags |= VRING_DESC_F_USED;
400 }
401
402 if (++(*guest_last_avail_idx) >= vq->vring.size) {
403 *guest_last_avail_idx -= vq->vring.size;
404 *guest_avail_phase = !(*guest_avail_phase);
405 }
406 }
407
408 static int16_t
409 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue *vq, int16_t *guest_last_used_idx,
410 int16_t *guest_used_phase)
411 {
412 int16_t buffer_id = -1;
413
414 if (vq_desc_guest_is_used(vq, *guest_last_used_idx, *guest_used_phase)) {
415 buffer_id = vq->vring.desc_packed[*guest_last_used_idx].id;
416 if (++(*guest_last_used_idx) >= vq->vring.size) {
417 *guest_last_used_idx -= vq->vring.size;
418 *guest_used_phase = !(*guest_used_phase);
419 }
420
421 return buffer_id;
422 }
423
424 return -1;
425 }
426
427 static void
428 vq_packed_ring_test(void)
429 {
430 struct spdk_vhost_session vs = {};
431 struct spdk_vhost_virtqueue vq = {};
432 struct vring_packed_desc descs[4];
433 uint16_t guest_last_avail_idx = 0, guest_last_used_idx = 0;
434 uint16_t guest_avail_phase = 1, guest_used_phase = 1;
435 int i;
436 int16_t chain_num;
437
438 vq.vring.desc_packed = descs;
439 vq.vring.size = 4;
440
441 /* avail and used wrap counter are initialized to 1 */
442 vq.packed.avail_phase = 1;
443 vq.packed.used_phase = 1;
444 vq.packed.packed_ring = true;
445 memset(descs, 0, sizeof(descs));
446
447 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == false);
448
449 /* Guest send requests */
450 for (i = 0; i < vq.vring.size; i++) {
451 descs[guest_last_avail_idx].id = i;
452 /* Set the desc available */
453 vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
454 }
455 CU_ASSERT(guest_last_avail_idx == 0);
456 CU_ASSERT(guest_avail_phase == 0);
457
458 /* Host handle available descs */
459 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
460 i = 0;
461 while (vhost_vq_packed_ring_is_avail(&vq)) {
462 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i++);
463 CU_ASSERT(chain_num == 1);
464 }
465
466 /* Host complete them out of order: 1, 0, 2. */
467 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
468 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
469 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
470
471 /* Host has got all the available request but only complete three requests */
472 CU_ASSERT(vq.last_avail_idx == 0);
473 CU_ASSERT(vq.packed.avail_phase == 0);
474 CU_ASSERT(vq.last_used_idx == 3);
475 CU_ASSERT(vq.packed.used_phase == 1);
476
477 /* Guest handle completed requests */
478 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
479 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
480 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
481 CU_ASSERT(guest_last_used_idx == 3);
482 CU_ASSERT(guest_used_phase == 1);
483
484 /* There are three descs available the guest can send three request again */
485 for (i = 0; i < 3; i++) {
486 descs[guest_last_avail_idx].id = 2 - i;
487 /* Set the desc available */
488 vq_desc_guest_set_avail(&vq, &guest_last_avail_idx, &guest_avail_phase);
489 }
490
491 /* Host handle available descs */
492 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq) == true);
493 i = 2;
494 while (vhost_vq_packed_ring_is_avail(&vq)) {
495 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq, vq.last_avail_idx, &chain_num) == i--);
496 CU_ASSERT(chain_num == 1);
497 }
498
499 /* There are four requests in Host, the new three ones and left one */
500 CU_ASSERT(vq.last_avail_idx == 3);
501 /* Available wrap conter should overturn */
502 CU_ASSERT(vq.packed.avail_phase == 0);
503
504 /* Host complete all the requests */
505 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 1, 1);
506 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 0, 1);
507 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 3, 1);
508 vhost_vq_packed_ring_enqueue(&vs, &vq, 1, 2, 1);
509
510 CU_ASSERT(vq.last_used_idx == vq.last_avail_idx);
511 CU_ASSERT(vq.packed.used_phase == vq.packed.avail_phase);
512
513 /* Guest handle completed requests */
514 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 1);
515 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 0);
516 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 3);
517 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq, &guest_last_used_idx, &guest_used_phase) == 2);
518
519 CU_ASSERT(guest_last_avail_idx == guest_last_used_idx);
520 CU_ASSERT(guest_avail_phase == guest_used_phase);
521 }
522
523 int
524 main(int argc, char **argv)
525 {
526 CU_pSuite suite = NULL;
527 unsigned int num_failures;
528
529 CU_set_error_action(CUEA_ABORT);
530 CU_initialize_registry();
531
532 suite = CU_add_suite("vhost_suite", test_setup, NULL);
533
534 CU_ADD_TEST(suite, desc_to_iov_test);
535 CU_ADD_TEST(suite, create_controller_test);
536 CU_ADD_TEST(suite, session_find_by_vid_test);
537 CU_ADD_TEST(suite, remove_controller_test);
538 CU_ADD_TEST(suite, vq_avail_ring_get_test);
539 CU_ADD_TEST(suite, vq_packed_ring_test);
540
541 CU_basic_set_mode(CU_BRM_VERBOSE);
542 CU_basic_run_tests();
543 num_failures = CU_get_number_of_failures();
544 CU_cleanup_registry();
545
546 return num_failures;
547 }