4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include "spdk/stdinc.h"
36 #include "CUnit/Basic.h"
37 #include "spdk_cunit.h"
38 #include "spdk/thread.h"
39 #include "spdk_internal/mock.h"
40 #include "common/lib/test_env.c"
41 #include "unit/lib/json_mock.c"
43 #include "vhost/vhost.c"
45 DEFINE_STUB(rte_vhost_set_vring_base
, int, (int vid
, uint16_t queue_id
,
46 uint16_t last_avail_idx
, uint16_t last_used_idx
), 0);
47 DEFINE_STUB(rte_vhost_get_vring_base
, int, (int vid
, uint16_t queue_id
,
48 uint16_t *last_avail_idx
, uint16_t *last_used_idx
), 0);
49 DEFINE_STUB_V(vhost_session_install_rte_compat_hooks
,
50 (struct spdk_vhost_session
*vsession
));
51 DEFINE_STUB(vhost_register_unix_socket
, int, (const char *path
, const char *name
,
52 uint64_t virtio_features
, uint64_t disabled_features
, uint64_t protocol_features
), 0);
53 DEFINE_STUB(vhost_driver_unregister
, int, (const char *path
), 0);
54 DEFINE_STUB(spdk_mem_register
, int, (void *vaddr
, size_t len
), 0);
55 DEFINE_STUB(spdk_mem_unregister
, int, (void *vaddr
, size_t len
), 0);
56 DEFINE_STUB(rte_vhost_vring_call
, int, (int vid
, uint16_t vring_idx
), 0);
57 DEFINE_STUB_V(rte_vhost_log_used_vring
, (int vid
, uint16_t vring_idx
,
58 uint64_t offset
, uint64_t len
));
60 DEFINE_STUB(rte_vhost_get_mem_table
, int, (int vid
, struct rte_vhost_memory
**mem
), 0);
61 DEFINE_STUB(rte_vhost_get_negotiated_features
, int, (int vid
, uint64_t *features
), 0);
62 DEFINE_STUB(rte_vhost_get_vhost_vring
, int,
63 (int vid
, uint16_t vring_idx
, struct rte_vhost_vring
*vring
), 0);
64 DEFINE_STUB(rte_vhost_enable_guest_notification
, int,
65 (int vid
, uint16_t queue_id
, int enable
), 0);
66 DEFINE_STUB(rte_vhost_get_ifname
, int, (int vid
, char *buf
, size_t len
), 0);
67 DEFINE_STUB(rte_vhost_driver_start
, int, (const char *name
), 0);
68 DEFINE_STUB(rte_vhost_driver_callback_register
, int,
69 (const char *path
, struct vhost_device_ops
const *const ops
), 0);
70 DEFINE_STUB(rte_vhost_driver_disable_features
, int, (const char *path
, uint64_t features
), 0);
71 DEFINE_STUB(rte_vhost_driver_set_features
, int, (const char *path
, uint64_t features
), 0);
72 DEFINE_STUB(rte_vhost_driver_register
, int, (const char *path
, uint64_t flags
), 0);
73 DEFINE_STUB(vhost_nvme_admin_passthrough
, int, (int vid
, void *cmd
, void *cqe
, void *buf
), 0);
74 DEFINE_STUB(vhost_nvme_set_cq_call
, int, (int vid
, uint16_t qid
, int fd
), 0);
75 DEFINE_STUB(vhost_nvme_set_bar_mr
, int, (int vid
, void *bar
, uint64_t bar_size
), 0);
76 DEFINE_STUB(vhost_nvme_get_cap
, int, (int vid
, uint64_t *cap
), 0);
79 spdk_call_unaffinitized(void *cb(void *arg
), void *arg
)
84 static struct spdk_vhost_dev_backend g_vdev_backend
;
93 alloc_vdev(struct spdk_vhost_dev
**vdev_p
, const char *name
, const char *cpumask
)
95 struct spdk_vhost_dev
*vdev
= NULL
;
98 /* spdk_vhost_dev must be allocated on a cache line boundary. */
99 rc
= posix_memalign((void **)&vdev
, 64, sizeof(*vdev
));
101 SPDK_CU_ASSERT_FATAL(vdev
!= NULL
);
102 memset(vdev
, 0, sizeof(*vdev
));
103 rc
= vhost_dev_register(vdev
, name
, cpumask
, &g_vdev_backend
);
115 start_vdev(struct spdk_vhost_dev
*vdev
)
117 struct rte_vhost_memory
*mem
;
118 struct spdk_vhost_session
*vsession
= NULL
;
121 mem
= calloc(1, sizeof(*mem
) + 2 * sizeof(struct rte_vhost_mem_region
));
122 SPDK_CU_ASSERT_FATAL(mem
!= NULL
);
124 mem
->regions
[0].guest_phys_addr
= 0;
125 mem
->regions
[0].size
= 0x400000; /* 4 MB */
126 mem
->regions
[0].host_user_addr
= 0x1000000;
127 mem
->regions
[1].guest_phys_addr
= 0x400000;
128 mem
->regions
[1].size
= 0x400000; /* 4 MB */
129 mem
->regions
[1].host_user_addr
= 0x2000000;
131 assert(TAILQ_EMPTY(&vdev
->vsessions
));
132 /* spdk_vhost_dev must be allocated on a cache line boundary. */
133 rc
= posix_memalign((void **)&vsession
, 64, sizeof(*vsession
));
135 SPDK_CU_ASSERT_FATAL(vsession
!= NULL
);
136 vsession
->started
= true;
139 TAILQ_INSERT_TAIL(&vdev
->vsessions
, vsession
, tailq
);
143 stop_vdev(struct spdk_vhost_dev
*vdev
)
145 struct spdk_vhost_session
*vsession
= TAILQ_FIRST(&vdev
->vsessions
);
147 TAILQ_REMOVE(&vdev
->vsessions
, vsession
, tailq
);
153 cleanup_vdev(struct spdk_vhost_dev
*vdev
)
155 if (!TAILQ_EMPTY(&vdev
->vsessions
)) {
158 vhost_dev_unregister(vdev
);
163 desc_to_iov_test(void)
165 struct spdk_vhost_dev
*vdev
;
166 struct spdk_vhost_session
*vsession
;
167 struct iovec iov
[SPDK_VHOST_IOVS_MAX
];
169 struct vring_desc desc
;
172 spdk_cpuset_set_cpu(&g_vhost_core_mask
, 0, true);
174 rc
= alloc_vdev(&vdev
, "vdev_name_0", "0x1");
175 SPDK_CU_ASSERT_FATAL(rc
== 0 && vdev
);
178 vsession
= TAILQ_FIRST(&vdev
->vsessions
);
180 /* Test simple case where iov falls fully within a 2MB page. */
181 desc
.addr
= 0x110000;
184 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
186 CU_ASSERT(iov_index
== 1);
187 CU_ASSERT(iov
[0].iov_base
== (void *)0x1110000);
188 CU_ASSERT(iov
[0].iov_len
== 0x1000);
190 * Always memset the iov to ensure each test validates data written by its call
191 * to the function under test.
193 memset(iov
, 0, sizeof(iov
));
195 /* Same test, but ensure it respects the non-zero starting iov_index. */
196 iov_index
= SPDK_VHOST_IOVS_MAX
- 1;
197 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
199 CU_ASSERT(iov_index
== SPDK_VHOST_IOVS_MAX
);
200 CU_ASSERT(iov
[SPDK_VHOST_IOVS_MAX
- 1].iov_base
== (void *)0x1110000);
201 CU_ASSERT(iov
[SPDK_VHOST_IOVS_MAX
- 1].iov_len
== 0x1000);
202 memset(iov
, 0, sizeof(iov
));
204 /* Test for failure if iov_index already equals SPDK_VHOST_IOVS_MAX. */
205 iov_index
= SPDK_VHOST_IOVS_MAX
;
206 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
208 memset(iov
, 0, sizeof(iov
));
210 /* Test case where iov spans a 2MB boundary, but does not span a vhost memory region. */
211 desc
.addr
= 0x1F0000;
214 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
216 CU_ASSERT(iov_index
== 1);
217 CU_ASSERT(iov
[0].iov_base
== (void *)0x11F0000);
218 CU_ASSERT(iov
[0].iov_len
== 0x20000);
219 memset(iov
, 0, sizeof(iov
));
221 /* Same test, but ensure it respects the non-zero starting iov_index. */
222 iov_index
= SPDK_VHOST_IOVS_MAX
- 1;
223 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
225 CU_ASSERT(iov_index
== SPDK_VHOST_IOVS_MAX
);
226 CU_ASSERT(iov
[SPDK_VHOST_IOVS_MAX
- 1].iov_base
== (void *)0x11F0000);
227 CU_ASSERT(iov
[SPDK_VHOST_IOVS_MAX
- 1].iov_len
== 0x20000);
228 memset(iov
, 0, sizeof(iov
));
230 /* Test case where iov spans a vhost memory region. */
231 desc
.addr
= 0x3F0000;
234 rc
= vhost_vring_desc_to_iov(vsession
, iov
, &iov_index
, &desc
);
236 CU_ASSERT(iov_index
== 2);
237 CU_ASSERT(iov
[0].iov_base
== (void *)0x13F0000);
238 CU_ASSERT(iov
[0].iov_len
== 0x10000);
239 CU_ASSERT(iov
[1].iov_base
== (void *)0x2000000);
240 CU_ASSERT(iov
[1].iov_len
== 0x10000);
241 memset(iov
, 0, sizeof(iov
));
249 create_controller_test(void)
251 struct spdk_vhost_dev
*vdev
, *vdev2
;
253 char long_name
[PATH_MAX
];
255 spdk_cpuset_set_cpu(&g_vhost_core_mask
, 0, true);
257 /* Create device with no name */
258 ret
= alloc_vdev(&vdev
, NULL
, "0x1");
261 /* Create device with incorrect cpumask */
262 ret
= alloc_vdev(&vdev
, "vdev_name_0", "0x2");
265 /* Create device with too long name and path */
266 memset(long_name
, 'x', sizeof(long_name
));
267 long_name
[PATH_MAX
- 1] = 0;
268 snprintf(dev_dirname
, sizeof(dev_dirname
), "some_path/");
269 ret
= alloc_vdev(&vdev
, long_name
, "0x1");
273 /* Create device when device name is already taken */
274 ret
= alloc_vdev(&vdev
, "vdev_name_0", "0x1");
275 SPDK_CU_ASSERT_FATAL(ret
== 0 && vdev
);
276 ret
= alloc_vdev(&vdev2
, "vdev_name_0", "0x1");
282 session_find_by_vid_test(void)
284 struct spdk_vhost_dev
*vdev
;
285 struct spdk_vhost_session
*vsession
;
286 struct spdk_vhost_session
*tmp
;
289 rc
= alloc_vdev(&vdev
, "vdev_name_0", "0x1");
290 SPDK_CU_ASSERT_FATAL(rc
== 0 && vdev
);
293 vsession
= TAILQ_FIRST(&vdev
->vsessions
);
295 tmp
= vhost_session_find_by_vid(vsession
->vid
);
296 CU_ASSERT(tmp
== vsession
);
298 /* Search for a device with incorrect vid */
299 tmp
= vhost_session_find_by_vid(vsession
->vid
+ 0xFF);
300 CU_ASSERT(tmp
== NULL
);
306 remove_controller_test(void)
308 struct spdk_vhost_dev
*vdev
;
311 ret
= alloc_vdev(&vdev
, "vdev_name_0", "0x1");
312 SPDK_CU_ASSERT_FATAL(ret
== 0 && vdev
);
314 /* Remove device when controller is in use */
316 SPDK_CU_ASSERT_FATAL(!TAILQ_EMPTY(&vdev
->vsessions
));
317 ret
= vhost_dev_unregister(vdev
);
324 vq_avail_ring_get_test(void)
326 struct spdk_vhost_virtqueue vq
;
327 uint16_t avail_mem
[34];
329 uint16_t reqs_len
, ret
, i
;
331 /* Basic example reap all requests */
332 vq
.vring
.avail
= (struct vring_avail
*)avail_mem
;
334 vq
.last_avail_idx
= 24;
335 vq
.vring
.avail
->idx
= 29;
338 for (i
= 0; i
< 32; i
++) {
339 vq
.vring
.avail
->ring
[i
] = i
;
342 ret
= vhost_vq_avail_ring_get(&vq
, reqs
, reqs_len
);
344 CU_ASSERT(vq
.last_avail_idx
== 29);
345 for (i
= 0; i
< ret
; i
++) {
346 CU_ASSERT(reqs
[i
] == vq
.vring
.avail
->ring
[i
+ 24]);
349 /* Basic example reap only some requests */
350 vq
.last_avail_idx
= 20;
351 vq
.vring
.avail
->idx
= 29;
354 ret
= vhost_vq_avail_ring_get(&vq
, reqs
, reqs_len
);
355 CU_ASSERT(ret
== reqs_len
);
356 CU_ASSERT(vq
.last_avail_idx
== 26);
357 for (i
= 0; i
< ret
; i
++) {
358 CU_ASSERT(reqs
[i
] == vq
.vring
.avail
->ring
[i
+ 20]);
361 /* Test invalid example */
362 vq
.last_avail_idx
= 20;
363 vq
.vring
.avail
->idx
= 156;
366 ret
= vhost_vq_avail_ring_get(&vq
, reqs
, reqs_len
);
369 /* Test overflow in the avail->idx variable. */
370 vq
.last_avail_idx
= 65535;
371 vq
.vring
.avail
->idx
= 4;
373 ret
= vhost_vq_avail_ring_get(&vq
, reqs
, reqs_len
);
375 CU_ASSERT(vq
.last_avail_idx
== 4);
376 CU_ASSERT(reqs
[0] == vq
.vring
.avail
->ring
[31]);
377 for (i
= 1; i
< ret
; i
++) {
378 CU_ASSERT(reqs
[i
] == vq
.vring
.avail
->ring
[i
- 1]);
383 vq_desc_guest_is_used(struct spdk_vhost_virtqueue
*vq
, int16_t guest_last_used_idx
,
384 int16_t guest_used_phase
)
386 return (!!(vq
->vring
.desc_packed
[guest_last_used_idx
].flags
& VRING_DESC_F_USED
) ==
391 vq_desc_guest_set_avail(struct spdk_vhost_virtqueue
*vq
, int16_t *guest_last_avail_idx
,
392 int16_t *guest_avail_phase
)
394 if (*guest_avail_phase
) {
395 vq
->vring
.desc_packed
[*guest_last_avail_idx
].flags
|= VRING_DESC_F_AVAIL
;
396 vq
->vring
.desc_packed
[*guest_last_avail_idx
].flags
&= ~VRING_DESC_F_USED
;
398 vq
->vring
.desc_packed
[*guest_last_avail_idx
].flags
&= ~VRING_DESC_F_AVAIL
;
399 vq
->vring
.desc_packed
[*guest_last_avail_idx
].flags
|= VRING_DESC_F_USED
;
402 if (++(*guest_last_avail_idx
) >= vq
->vring
.size
) {
403 *guest_last_avail_idx
-= vq
->vring
.size
;
404 *guest_avail_phase
= !(*guest_avail_phase
);
409 vq_desc_guest_handle_completed_desc(struct spdk_vhost_virtqueue
*vq
, int16_t *guest_last_used_idx
,
410 int16_t *guest_used_phase
)
412 int16_t buffer_id
= -1;
414 if (vq_desc_guest_is_used(vq
, *guest_last_used_idx
, *guest_used_phase
)) {
415 buffer_id
= vq
->vring
.desc_packed
[*guest_last_used_idx
].id
;
416 if (++(*guest_last_used_idx
) >= vq
->vring
.size
) {
417 *guest_last_used_idx
-= vq
->vring
.size
;
418 *guest_used_phase
= !(*guest_used_phase
);
428 vq_packed_ring_test(void)
430 struct spdk_vhost_session vs
= {};
431 struct spdk_vhost_virtqueue vq
= {};
432 struct vring_packed_desc descs
[4];
433 uint16_t guest_last_avail_idx
= 0, guest_last_used_idx
= 0;
434 uint16_t guest_avail_phase
= 1, guest_used_phase
= 1;
438 vq
.vring
.desc_packed
= descs
;
441 /* avail and used wrap counter are initialized to 1 */
442 vq
.packed
.avail_phase
= 1;
443 vq
.packed
.used_phase
= 1;
444 vq
.packed
.packed_ring
= true;
445 memset(descs
, 0, sizeof(descs
));
447 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq
) == false);
449 /* Guest send requests */
450 for (i
= 0; i
< vq
.vring
.size
; i
++) {
451 descs
[guest_last_avail_idx
].id
= i
;
452 /* Set the desc available */
453 vq_desc_guest_set_avail(&vq
, &guest_last_avail_idx
, &guest_avail_phase
);
455 CU_ASSERT(guest_last_avail_idx
== 0);
456 CU_ASSERT(guest_avail_phase
== 0);
458 /* Host handle available descs */
459 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq
) == true);
461 while (vhost_vq_packed_ring_is_avail(&vq
)) {
462 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq
, vq
.last_avail_idx
, &chain_num
) == i
++);
463 CU_ASSERT(chain_num
== 1);
466 /* Host complete them out of order: 1, 0, 2. */
467 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 1, 1);
468 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 0, 1);
469 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 2, 1);
471 /* Host has got all the available request but only complete three requests */
472 CU_ASSERT(vq
.last_avail_idx
== 0);
473 CU_ASSERT(vq
.packed
.avail_phase
== 0);
474 CU_ASSERT(vq
.last_used_idx
== 3);
475 CU_ASSERT(vq
.packed
.used_phase
== 1);
477 /* Guest handle completed requests */
478 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 1);
479 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 0);
480 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 2);
481 CU_ASSERT(guest_last_used_idx
== 3);
482 CU_ASSERT(guest_used_phase
== 1);
484 /* There are three descs available the guest can send three request again */
485 for (i
= 0; i
< 3; i
++) {
486 descs
[guest_last_avail_idx
].id
= 2 - i
;
487 /* Set the desc available */
488 vq_desc_guest_set_avail(&vq
, &guest_last_avail_idx
, &guest_avail_phase
);
491 /* Host handle available descs */
492 CU_ASSERT(vhost_vq_packed_ring_is_avail(&vq
) == true);
494 while (vhost_vq_packed_ring_is_avail(&vq
)) {
495 CU_ASSERT(vhost_vring_packed_desc_get_buffer_id(&vq
, vq
.last_avail_idx
, &chain_num
) == i
--);
496 CU_ASSERT(chain_num
== 1);
499 /* There are four requests in Host, the new three ones and left one */
500 CU_ASSERT(vq
.last_avail_idx
== 3);
501 /* Available wrap conter should overturn */
502 CU_ASSERT(vq
.packed
.avail_phase
== 0);
504 /* Host complete all the requests */
505 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 1, 1);
506 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 0, 1);
507 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 3, 1);
508 vhost_vq_packed_ring_enqueue(&vs
, &vq
, 1, 2, 1);
510 CU_ASSERT(vq
.last_used_idx
== vq
.last_avail_idx
);
511 CU_ASSERT(vq
.packed
.used_phase
== vq
.packed
.avail_phase
);
513 /* Guest handle completed requests */
514 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 1);
515 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 0);
516 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 3);
517 CU_ASSERT(vq_desc_guest_handle_completed_desc(&vq
, &guest_last_used_idx
, &guest_used_phase
) == 2);
519 CU_ASSERT(guest_last_avail_idx
== guest_last_used_idx
);
520 CU_ASSERT(guest_avail_phase
== guest_used_phase
);
524 main(int argc
, char **argv
)
526 CU_pSuite suite
= NULL
;
527 unsigned int num_failures
;
529 CU_set_error_action(CUEA_ABORT
);
530 CU_initialize_registry();
532 suite
= CU_add_suite("vhost_suite", test_setup
, NULL
);
534 CU_ADD_TEST(suite
, desc_to_iov_test
);
535 CU_ADD_TEST(suite
, create_controller_test
);
536 CU_ADD_TEST(suite
, session_find_by_vid_test
);
537 CU_ADD_TEST(suite
, remove_controller_test
);
538 CU_ADD_TEST(suite
, vq_avail_ring_get_test
);
539 CU_ADD_TEST(suite
, vq_packed_ring_test
);
541 CU_basic_set_mode(CU_BRM_VERBOSE
);
542 CU_basic_run_tests();
543 num_failures
= CU_get_number_of_failures();
544 CU_cleanup_registry();