2 * QTest testcase for the vhost-user
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "libqtest-single.h"
14 #include "qapi/error.h"
15 #include "qapi/qmp/qdict.h"
16 #include "qemu/config-file.h"
17 #include "qemu/option.h"
18 #include "qemu/range.h"
19 #include "qemu/sockets.h"
20 #include "chardev/char-fe.h"
21 #include "qemu/memfd.h"
22 #include "qemu/module.h"
23 #include "sysemu/sysemu.h"
24 #include "libqos/libqos.h"
25 #include "libqos/pci-pc.h"
26 #include "libqos/virtio-pci.h"
28 #include "libqos/malloc-pc.h"
29 #include "libqos/qgraph_internal.h"
30 #include "hw/virtio/virtio-net.h"
32 #include "standard-headers/linux/vhost_types.h"
33 #include "standard-headers/linux/virtio_ids.h"
34 #include "standard-headers/linux/virtio_net.h"
35 #include "standard-headers/linux/virtio_gpio.h"
42 #define QEMU_CMD_MEM " -m %d -object memory-backend-file,id=mem,size=%dM," \
43 "mem-path=%s,share=on -numa node,memdev=mem"
44 #define QEMU_CMD_MEMFD " -m %d -object memory-backend-memfd,id=mem,size=%dM," \
45 " -numa node,memdev=mem"
46 #define QEMU_CMD_CHR " -chardev socket,id=%s,path=%s%s"
47 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=hs0,chardev=%s,vhostforce=on"
49 #define HUGETLBFS_MAGIC 0x958458f6
51 /*********** FROM hw/virtio/vhost-user.c *************************************/
53 #define VHOST_MEMORY_MAX_NREGIONS 8
54 #define VHOST_MAX_VIRTQUEUES 0x100
56 #define VHOST_USER_F_PROTOCOL_FEATURES 30
57 #define VIRTIO_F_VERSION_1 32
59 #define VHOST_USER_PROTOCOL_F_MQ 0
60 #define VHOST_USER_PROTOCOL_F_LOG_SHMFD 1
61 #define VHOST_USER_PROTOCOL_F_CROSS_ENDIAN 6
62 #define VHOST_USER_PROTOCOL_F_CONFIG 9
64 #define VHOST_LOG_PAGE 0x1000
66 typedef enum VhostUserRequest
{
68 VHOST_USER_GET_FEATURES
= 1,
69 VHOST_USER_SET_FEATURES
= 2,
70 VHOST_USER_SET_OWNER
= 3,
71 VHOST_USER_RESET_OWNER
= 4,
72 VHOST_USER_SET_MEM_TABLE
= 5,
73 VHOST_USER_SET_LOG_BASE
= 6,
74 VHOST_USER_SET_LOG_FD
= 7,
75 VHOST_USER_SET_VRING_NUM
= 8,
76 VHOST_USER_SET_VRING_ADDR
= 9,
77 VHOST_USER_SET_VRING_BASE
= 10,
78 VHOST_USER_GET_VRING_BASE
= 11,
79 VHOST_USER_SET_VRING_KICK
= 12,
80 VHOST_USER_SET_VRING_CALL
= 13,
81 VHOST_USER_SET_VRING_ERR
= 14,
82 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
83 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
84 VHOST_USER_GET_QUEUE_NUM
= 17,
85 VHOST_USER_SET_VRING_ENABLE
= 18,
86 VHOST_USER_GET_CONFIG
= 24,
87 VHOST_USER_SET_CONFIG
= 25,
91 typedef struct VhostUserMemoryRegion
{
92 uint64_t guest_phys_addr
;
94 uint64_t userspace_addr
;
96 } VhostUserMemoryRegion
;
98 typedef struct VhostUserMemory
{
101 VhostUserMemoryRegion regions
[VHOST_MEMORY_MAX_NREGIONS
];
104 typedef struct VhostUserLog
{
106 uint64_t mmap_offset
;
109 typedef struct VhostUserMsg
{
110 VhostUserRequest request
;
112 #define VHOST_USER_VERSION_MASK (0x3)
113 #define VHOST_USER_REPLY_MASK (0x1<<2)
115 uint32_t size
; /* the following payload size */
117 #define VHOST_USER_VRING_IDX_MASK (0xff)
118 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
120 struct vhost_vring_state state
;
121 struct vhost_vring_addr addr
;
122 VhostUserMemory memory
;
125 } QEMU_PACKED VhostUserMsg
;
127 static VhostUserMsg m
__attribute__ ((unused
));
128 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
132 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
134 /* The version of the protocol we support */
135 #define VHOST_USER_VERSION (0x1)
136 /*****************************************************************************/
140 TEST_FLAGS_DISCONNECT
,
150 typedef struct TestServer
{
157 int fds
[VHOST_MEMORY_MAX_NREGIONS
];
158 VhostUserMemory memory
;
159 GMainContext
*context
;
169 struct vhost_user_ops
*vu_ops
;
172 struct vhost_user_ops
{
175 void (*append_opts
)(TestServer
*s
, GString
*cmd_line
,
176 const char *chr_opts
);
178 /* VHOST-USER commands. */
179 uint64_t (*get_features
)(TestServer
*s
);
180 void (*set_features
)(TestServer
*s
, CharBackend
*chr
,
182 void (*get_protocol_features
)(TestServer
*s
,
183 CharBackend
*chr
, VhostUserMsg
*msg
);
186 static const char *init_hugepagefs(void);
187 static TestServer
*test_server_new(const gchar
*name
,
188 struct vhost_user_ops
*ops
);
189 static void test_server_free(TestServer
*server
);
190 static void test_server_listen(TestServer
*server
);
198 static void append_vhost_net_opts(TestServer
*s
, GString
*cmd_line
,
199 const char *chr_opts
)
201 g_string_append_printf(cmd_line
, QEMU_CMD_CHR QEMU_CMD_NETDEV
,
202 s
->chr_name
, s
->socket_path
,
203 chr_opts
, s
->chr_name
);
207 * For GPIO there are no other magic devices we need to add (like
208 * block or netdev) so all we need to worry about is the vhost-user
211 static void append_vhost_gpio_opts(TestServer
*s
, GString
*cmd_line
,
212 const char *chr_opts
)
214 g_string_append_printf(cmd_line
, QEMU_CMD_CHR
,
215 s
->chr_name
, s
->socket_path
,
219 static void append_mem_opts(TestServer
*server
, GString
*cmd_line
,
220 int size
, enum test_memfd memfd
)
222 if (memfd
== TEST_MEMFD_AUTO
) {
223 memfd
= qemu_memfd_check(MFD_ALLOW_SEALING
) ? TEST_MEMFD_YES
227 if (memfd
== TEST_MEMFD_YES
) {
228 g_string_append_printf(cmd_line
, QEMU_CMD_MEMFD
, size
, size
);
230 const char *root
= init_hugepagefs() ? : server
->tmpfs
;
232 g_string_append_printf(cmd_line
, QEMU_CMD_MEM
, size
, size
, root
);
236 static bool wait_for_fds(TestServer
*s
)
242 g_mutex_lock(&s
->data_mutex
);
244 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
245 while (!s
->fds_num
) {
246 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
247 /* timeout has passed */
248 g_assert(s
->fds_num
);
253 /* check for sanity */
254 g_assert_cmpint(s
->fds_num
, >, 0);
255 g_assert_cmpint(s
->fds_num
, ==, s
->memory
.nregions
);
257 g_mutex_unlock(&s
->data_mutex
);
260 for (i
= 0; i
< s
->memory
.nregions
; ++i
) {
261 VhostUserMemoryRegion
*reg
= &s
->memory
.regions
[i
];
262 if (reg
->guest_phys_addr
== 0) {
268 g_test_skip("No memory at address 0x0");
273 static void read_guest_mem_server(QTestState
*qts
, TestServer
*s
)
279 g_mutex_lock(&s
->data_mutex
);
281 /* iterate all regions */
282 for (i
= 0; i
< s
->fds_num
; i
++) {
284 /* We'll check only the region starting at 0x0 */
285 if (s
->memory
.regions
[i
].guest_phys_addr
!= 0x0) {
289 g_assert_cmpint(s
->memory
.regions
[i
].memory_size
, >, 1024);
291 size
= s
->memory
.regions
[i
].memory_size
+
292 s
->memory
.regions
[i
].mmap_offset
;
294 guest_mem
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
295 MAP_SHARED
, s
->fds
[i
], 0);
297 g_assert(guest_mem
!= MAP_FAILED
);
298 guest_mem
+= (s
->memory
.regions
[i
].mmap_offset
/ sizeof(*guest_mem
));
300 for (j
= 0; j
< 1024; j
++) {
301 uint32_t a
= qtest_readb(qts
, s
->memory
.regions
[i
].guest_phys_addr
+ j
);
302 uint32_t b
= guest_mem
[j
];
304 g_assert_cmpint(a
, ==, b
);
307 munmap(guest_mem
, s
->memory
.regions
[i
].memory_size
);
310 g_mutex_unlock(&s
->data_mutex
);
313 static void *thread_function(void *data
)
315 GMainLoop
*loop
= data
;
316 g_main_loop_run(loop
);
320 static int chr_can_read(void *opaque
)
322 return VHOST_USER_HDR_SIZE
;
325 static void chr_read(void *opaque
, const uint8_t *buf
, int size
)
327 g_autoptr(GError
) err
= NULL
;
328 TestServer
*s
= opaque
;
329 CharBackend
*chr
= &s
->chr
;
331 uint8_t *p
= (uint8_t *) &msg
;
335 qemu_chr_fe_disconnect(chr
);
336 /* now switch to non-failure */
337 s
->test_fail
= false;
340 if (size
!= VHOST_USER_HDR_SIZE
) {
341 qos_printf("%s: Wrong message size received %d\n", __func__
, size
);
345 g_mutex_lock(&s
->data_mutex
);
346 memcpy(p
, buf
, VHOST_USER_HDR_SIZE
);
349 p
+= VHOST_USER_HDR_SIZE
;
350 size
= qemu_chr_fe_read_all(chr
, p
, msg
.size
);
351 if (size
!= msg
.size
) {
352 qos_printf("%s: Wrong message size received %d != %d\n",
353 __func__
, size
, msg
.size
);
358 switch (msg
.request
) {
359 case VHOST_USER_GET_FEATURES
:
360 /* Mandatory for tests to define get_features */
361 g_assert(s
->vu_ops
->get_features
);
363 /* send back features to qemu */
364 msg
.flags
|= VHOST_USER_REPLY_MASK
;
365 msg
.size
= sizeof(m
.payload
.u64
);
367 if (s
->test_flags
>= TEST_FLAGS_BAD
) {
369 s
->test_flags
= TEST_FLAGS_END
;
371 msg
.payload
.u64
= s
->vu_ops
->get_features(s
);
374 qemu_chr_fe_write_all(chr
, (uint8_t *) &msg
,
375 VHOST_USER_HDR_SIZE
+ msg
.size
);
378 case VHOST_USER_SET_FEATURES
:
379 if (s
->vu_ops
->set_features
) {
380 s
->vu_ops
->set_features(s
, chr
, &msg
);
384 case VHOST_USER_SET_OWNER
:
386 * We don't need to do anything here, the remote is just
387 * letting us know it is in charge. Just log it.
389 qos_printf("set_owner: start of session\n");
392 case VHOST_USER_GET_PROTOCOL_FEATURES
:
393 if (s
->vu_ops
->get_protocol_features
) {
394 s
->vu_ops
->get_protocol_features(s
, chr
, &msg
);
398 case VHOST_USER_GET_CONFIG
:
400 * Treat GET_CONFIG as a NOP and just reply and let the guest
401 * consider we have updated its memory. Tests currently don't
402 * require working configs.
404 msg
.flags
|= VHOST_USER_REPLY_MASK
;
405 p
= (uint8_t *) &msg
;
406 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
409 case VHOST_USER_SET_PROTOCOL_FEATURES
:
411 * We did set VHOST_USER_F_PROTOCOL_FEATURES so its valid for
412 * the remote end to send this. There is no handshake reply so
413 * just log the details for debugging.
415 qos_printf("set_protocol_features: 0x%"PRIx64
"\n", msg
.payload
.u64
);
419 * A real vhost-user backend would actually set the size and
420 * address of the vrings but we can simply report them.
422 case VHOST_USER_SET_VRING_NUM
:
423 qos_printf("set_vring_num: %d/%d\n",
424 msg
.payload
.state
.index
, msg
.payload
.state
.num
);
426 case VHOST_USER_SET_VRING_ADDR
:
427 qos_printf("set_vring_addr: 0x%"PRIx64
"/0x%"PRIx64
"/0x%"PRIx64
"\n",
428 msg
.payload
.addr
.avail_user_addr
,
429 msg
.payload
.addr
.desc_user_addr
,
430 msg
.payload
.addr
.used_user_addr
);
433 case VHOST_USER_GET_VRING_BASE
:
434 /* send back vring base to qemu */
435 msg
.flags
|= VHOST_USER_REPLY_MASK
;
436 msg
.size
= sizeof(m
.payload
.state
);
437 msg
.payload
.state
.num
= 0;
438 p
= (uint8_t *) &msg
;
439 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
441 assert(msg
.payload
.state
.index
< s
->queues
* 2);
442 s
->rings
&= ~(0x1ULL
<< msg
.payload
.state
.index
);
443 g_cond_broadcast(&s
->data_cond
);
446 case VHOST_USER_SET_MEM_TABLE
:
447 /* received the mem table */
448 memcpy(&s
->memory
, &msg
.payload
.memory
, sizeof(msg
.payload
.memory
));
449 s
->fds_num
= qemu_chr_fe_get_msgfds(chr
, s
->fds
,
450 G_N_ELEMENTS(s
->fds
));
452 /* signal the test that it can continue */
453 g_cond_broadcast(&s
->data_cond
);
456 case VHOST_USER_SET_VRING_KICK
:
457 case VHOST_USER_SET_VRING_CALL
:
459 qemu_chr_fe_get_msgfds(chr
, &fd
, 1);
461 * This is a non-blocking eventfd.
462 * The receive function forces it to be blocking,
463 * so revert it back to non-blocking.
465 g_unix_set_fd_nonblocking(fd
, true, &err
);
466 g_assert_no_error(err
);
469 case VHOST_USER_SET_LOG_BASE
:
470 if (s
->log_fd
!= -1) {
474 qemu_chr_fe_get_msgfds(chr
, &s
->log_fd
, 1);
475 msg
.flags
|= VHOST_USER_REPLY_MASK
;
477 p
= (uint8_t *) &msg
;
478 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
);
480 g_cond_broadcast(&s
->data_cond
);
483 case VHOST_USER_SET_VRING_BASE
:
484 assert(msg
.payload
.state
.index
< s
->queues
* 2);
485 s
->rings
|= 0x1ULL
<< msg
.payload
.state
.index
;
486 g_cond_broadcast(&s
->data_cond
);
489 case VHOST_USER_GET_QUEUE_NUM
:
490 msg
.flags
|= VHOST_USER_REPLY_MASK
;
491 msg
.size
= sizeof(m
.payload
.u64
);
492 msg
.payload
.u64
= s
->queues
;
493 p
= (uint8_t *) &msg
;
494 qemu_chr_fe_write_all(chr
, p
, VHOST_USER_HDR_SIZE
+ msg
.size
);
497 case VHOST_USER_SET_VRING_ENABLE
:
499 * Another case we ignore as we don't need to respond. With a
500 * fully functioning vhost-user we would enable/disable the
503 qos_printf("set_vring(%d)=%s\n", msg
.payload
.state
.index
,
504 msg
.payload
.state
.num
? "enabled" : "disabled");
508 qos_printf("vhost-user: un-handled message: %d\n", msg
.request
);
513 g_mutex_unlock(&s
->data_mutex
);
516 static const char *init_hugepagefs(void)
519 static const char *hugepagefs
;
520 const char *path
= getenv("QTEST_HUGETLBFS_PATH");
531 if (access(path
, R_OK
| W_OK
| X_OK
)) {
532 qos_printf("access on path (%s): %s", path
, strerror(errno
));
538 ret
= statfs(path
, &fs
);
539 } while (ret
!= 0 && errno
== EINTR
);
542 qos_printf("statfs on path (%s): %s", path
, strerror(errno
));
547 if (fs
.f_type
!= HUGETLBFS_MAGIC
) {
548 qos_printf("Warning: path not on HugeTLBFS: %s", path
);
560 static TestServer
*test_server_new(const gchar
*name
,
561 struct vhost_user_ops
*ops
)
563 TestServer
*server
= g_new0(TestServer
, 1);
564 g_autofree
const char *tmpfs
= NULL
;
567 server
->context
= g_main_context_new();
568 server
->loop
= g_main_loop_new(server
->context
, FALSE
);
570 /* run the main loop thread so the chardev may operate */
571 server
->thread
= g_thread_new(NULL
, thread_function
, server
->loop
);
573 tmpfs
= g_dir_make_tmp("vhost-test-XXXXXX", &err
);
575 g_test_message("Can't create temporary directory in %s: %s",
576 g_get_tmp_dir(), err
->message
);
581 server
->tmpfs
= g_strdup(tmpfs
);
582 server
->socket_path
= g_strdup_printf("%s/%s.sock", tmpfs
, name
);
583 server
->mig_path
= g_strdup_printf("%s/%s.mig", tmpfs
, name
);
584 server
->chr_name
= g_strdup_printf("chr-%s", name
);
586 g_mutex_init(&server
->data_mutex
);
587 g_cond_init(&server
->data_cond
);
591 server
->vu_ops
= ops
;
596 static void chr_event(void *opaque
, QEMUChrEvent event
)
598 TestServer
*s
= opaque
;
600 if (s
->test_flags
== TEST_FLAGS_END
&&
601 event
== CHR_EVENT_CLOSED
) {
602 s
->test_flags
= TEST_FLAGS_OK
;
606 static void test_server_create_chr(TestServer
*server
, const gchar
*opt
)
608 g_autofree gchar
*chr_path
= g_strdup_printf("unix:%s%s",
609 server
->socket_path
, opt
);
612 chr
= qemu_chr_new(server
->chr_name
, chr_path
, server
->context
);
615 qemu_chr_fe_init(&server
->chr
, chr
, &error_abort
);
616 qemu_chr_fe_set_handlers(&server
->chr
, chr_can_read
, chr_read
,
617 chr_event
, NULL
, server
, server
->context
, true);
620 static void test_server_listen(TestServer
*server
)
622 test_server_create_chr(server
, ",server=on,wait=off");
625 static void test_server_free(TestServer
*server
)
629 /* finish the helper thread and dispatch pending sources */
630 g_main_loop_quit(server
->loop
);
631 g_thread_join(server
->thread
);
632 while (g_main_context_pending(NULL
)) {
633 g_main_context_iteration(NULL
, TRUE
);
636 unlink(server
->socket_path
);
637 g_free(server
->socket_path
);
639 unlink(server
->mig_path
);
640 g_free(server
->mig_path
);
642 ret
= rmdir(server
->tmpfs
);
644 g_test_message("unable to rmdir: path (%s): %s",
645 server
->tmpfs
, strerror(errno
));
647 g_free(server
->tmpfs
);
649 qemu_chr_fe_deinit(&server
->chr
, true);
651 for (i
= 0; i
< server
->fds_num
; i
++) {
652 close(server
->fds
[i
]);
655 if (server
->log_fd
!= -1) {
656 close(server
->log_fd
);
659 g_free(server
->chr_name
);
661 g_main_loop_unref(server
->loop
);
662 g_main_context_unref(server
->context
);
663 g_cond_clear(&server
->data_cond
);
664 g_mutex_clear(&server
->data_mutex
);
668 static void wait_for_log_fd(TestServer
*s
)
672 g_mutex_lock(&s
->data_mutex
);
673 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
674 while (s
->log_fd
== -1) {
675 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
676 /* timeout has passed */
677 g_assert(s
->log_fd
!= -1);
682 g_mutex_unlock(&s
->data_mutex
);
685 static void write_guest_mem(TestServer
*s
, uint32_t seed
)
691 /* iterate all regions */
692 for (i
= 0; i
< s
->fds_num
; i
++) {
694 /* We'll write only the region statring at 0x0 */
695 if (s
->memory
.regions
[i
].guest_phys_addr
!= 0x0) {
699 g_assert_cmpint(s
->memory
.regions
[i
].memory_size
, >, 1024);
701 size
= s
->memory
.regions
[i
].memory_size
+
702 s
->memory
.regions
[i
].mmap_offset
;
704 guest_mem
= mmap(0, size
, PROT_READ
| PROT_WRITE
,
705 MAP_SHARED
, s
->fds
[i
], 0);
707 g_assert(guest_mem
!= MAP_FAILED
);
708 guest_mem
+= (s
->memory
.regions
[i
].mmap_offset
/ sizeof(*guest_mem
));
710 for (j
= 0; j
< 256; j
++) {
711 guest_mem
[j
] = seed
+ j
;
714 munmap(guest_mem
, s
->memory
.regions
[i
].memory_size
);
719 static guint64
get_log_size(TestServer
*s
)
721 guint64 log_size
= 0;
724 for (i
= 0; i
< s
->memory
.nregions
; ++i
) {
725 VhostUserMemoryRegion
*reg
= &s
->memory
.regions
[i
];
726 guint64 last
= range_get_last(reg
->guest_phys_addr
,
728 log_size
= MAX(log_size
, last
/ (8 * VHOST_LOG_PAGE
) + 1);
734 typedef struct TestMigrateSource
{
741 test_migrate_source_check(GSource
*source
)
743 TestMigrateSource
*t
= (TestMigrateSource
*)source
;
744 gboolean overlap
= t
->src
->rings
&& t
->dest
->rings
;
751 GSourceFuncs test_migrate_source_funcs
= {
752 .check
= test_migrate_source_check
,
755 static void vhost_user_test_cleanup(void *s
)
757 TestServer
*server
= s
;
759 qos_invalidate_command_line();
760 test_server_free(server
);
763 static void *vhost_user_test_setup(GString
*cmd_line
, void *arg
)
765 TestServer
*server
= test_server_new("vhost-user-test", arg
);
766 test_server_listen(server
);
768 append_mem_opts(server
, cmd_line
, 256, TEST_MEMFD_AUTO
);
769 server
->vu_ops
->append_opts(server
, cmd_line
, "");
771 g_test_queue_destroy(vhost_user_test_cleanup
, server
);
776 static void *vhost_user_test_setup_memfd(GString
*cmd_line
, void *arg
)
778 TestServer
*server
= test_server_new("vhost-user-test", arg
);
779 test_server_listen(server
);
781 append_mem_opts(server
, cmd_line
, 256, TEST_MEMFD_YES
);
782 server
->vu_ops
->append_opts(server
, cmd_line
, "");
784 g_test_queue_destroy(vhost_user_test_cleanup
, server
);
789 static void test_read_guest_mem(void *obj
, void *arg
, QGuestAllocator
*alloc
)
791 TestServer
*server
= arg
;
793 if (!wait_for_fds(server
)) {
797 read_guest_mem_server(global_qtest
, server
);
800 static void test_migrate(void *obj
, void *arg
, QGuestAllocator
*alloc
)
804 GString
*dest_cmdline
;
812 if (!wait_for_fds(s
)) {
816 dest
= test_server_new("dest", s
->vu_ops
);
817 dest_cmdline
= g_string_new(qos_get_current_command_line());
818 uri
= g_strdup_printf("%s%s", "unix:", dest
->mig_path
);
820 size
= get_log_size(s
);
821 g_assert_cmpint(size
, ==, (256 * 1024 * 1024) / (VHOST_LOG_PAGE
* 8));
823 test_server_listen(dest
);
824 g_string_append_printf(dest_cmdline
, " -incoming %s", uri
);
825 append_mem_opts(dest
, dest_cmdline
, 256, TEST_MEMFD_AUTO
);
826 dest
->vu_ops
->append_opts(dest
, dest_cmdline
, "");
827 to
= qtest_init(dest_cmdline
->str
);
829 /* This would be where you call qos_allocate_objects(to, NULL), if you want
830 * to talk to the QVirtioNet object on the destination.
833 source
= g_source_new(&test_migrate_source_funcs
,
834 sizeof(TestMigrateSource
));
835 ((TestMigrateSource
*)source
)->src
= s
;
836 ((TestMigrateSource
*)source
)->dest
= dest
;
837 g_source_attach(source
, s
->context
);
839 /* slow down migration to have time to fiddle with log */
840 /* TODO: qtest could learn to break on some places */
841 rsp
= qmp("{ 'execute': 'migrate-set-parameters',"
842 "'arguments': { 'max-bandwidth': 10 } }");
843 g_assert(qdict_haskey(rsp
, "return"));
846 rsp
= qmp("{ 'execute': 'migrate', 'arguments': { 'uri': %s } }", uri
);
847 g_assert(qdict_haskey(rsp
, "return"));
852 log
= mmap(0, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, s
->log_fd
, 0);
853 g_assert(log
!= MAP_FAILED
);
855 /* modify first page */
856 write_guest_mem(s
, 0x42);
860 /* speed things up */
861 rsp
= qmp("{ 'execute': 'migrate-set-parameters',"
862 "'arguments': { 'max-bandwidth': 0 } }");
863 g_assert(qdict_haskey(rsp
, "return"));
866 qmp_eventwait("STOP");
867 qtest_qmp_eventwait(to
, "RESUME");
869 g_assert(wait_for_fds(dest
));
870 read_guest_mem_server(to
, dest
);
872 g_source_destroy(source
);
873 g_source_unref(source
);
876 test_server_free(dest
);
878 g_string_free(dest_cmdline
, true);
881 static void wait_for_rings_started(TestServer
*s
, size_t count
)
885 g_mutex_lock(&s
->data_mutex
);
886 end_time
= g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND
;
887 while (ctpop64(s
->rings
) != count
) {
888 if (!g_cond_wait_until(&s
->data_cond
, &s
->data_mutex
, end_time
)) {
889 /* timeout has passed */
890 g_assert_cmpint(ctpop64(s
->rings
), ==, count
);
895 g_mutex_unlock(&s
->data_mutex
);
898 static inline void test_server_connect(TestServer
*server
)
900 test_server_create_chr(server
, ",reconnect=1");
904 reconnect_cb(gpointer user_data
)
906 TestServer
*s
= user_data
;
908 qemu_chr_fe_disconnect(&s
->chr
);
914 connect_thread(gpointer data
)
916 TestServer
*s
= data
;
918 /* wait for qemu to start before first try, to avoid extra warnings */
919 g_usleep(G_USEC_PER_SEC
);
920 test_server_connect(s
);
925 static void *vhost_user_test_setup_reconnect(GString
*cmd_line
, void *arg
)
927 TestServer
*s
= test_server_new("reconnect", arg
);
929 g_thread_new("connect", connect_thread
, s
);
930 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
931 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
933 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
938 static void test_reconnect(void *obj
, void *arg
, QGuestAllocator
*alloc
)
943 if (!wait_for_fds(s
)) {
947 wait_for_rings_started(s
, 2);
952 src
= g_idle_source_new();
953 g_source_set_callback(src
, reconnect_cb
, s
, NULL
);
954 g_source_attach(src
, s
->context
);
956 g_assert(wait_for_fds(s
));
957 wait_for_rings_started(s
, 2);
960 static void *vhost_user_test_setup_connect_fail(GString
*cmd_line
, void *arg
)
962 TestServer
*s
= test_server_new("connect-fail", arg
);
966 g_thread_new("connect", connect_thread
, s
);
967 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
968 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
970 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
975 static void *vhost_user_test_setup_flags_mismatch(GString
*cmd_line
, void *arg
)
977 TestServer
*s
= test_server_new("flags-mismatch", arg
);
979 s
->test_flags
= TEST_FLAGS_DISCONNECT
;
981 g_thread_new("connect", connect_thread
, s
);
982 append_mem_opts(s
, cmd_line
, 256, TEST_MEMFD_AUTO
);
983 s
->vu_ops
->append_opts(s
, cmd_line
, ",server=on");
985 g_test_queue_destroy(vhost_user_test_cleanup
, s
);
990 static void test_vhost_user_started(void *obj
, void *arg
, QGuestAllocator
*alloc
)
994 if (!wait_for_fds(s
)) {
997 wait_for_rings_started(s
, 2);
1000 static void *vhost_user_test_setup_multiqueue(GString
*cmd_line
, void *arg
)
1002 TestServer
*s
= vhost_user_test_setup(cmd_line
, arg
);
1005 g_string_append_printf(cmd_line
,
1006 " -set netdev.hs0.queues=%d"
1007 " -global virtio-net-pci.vectors=%d",
1008 s
->queues
, s
->queues
* 2 + 2);
1013 static void test_multiqueue(void *obj
, void *arg
, QGuestAllocator
*alloc
)
1015 TestServer
*s
= arg
;
1017 wait_for_rings_started(s
, s
->queues
* 2);
1021 static uint64_t vu_net_get_features(TestServer
*s
)
1023 uint64_t features
= 0x1ULL
<< VHOST_F_LOG_ALL
|
1024 0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
;
1026 if (s
->queues
> 1) {
1027 features
|= 0x1ULL
<< VIRTIO_NET_F_MQ
;
1033 static void vu_net_set_features(TestServer
*s
, CharBackend
*chr
,
1036 g_assert(msg
->payload
.u64
& (0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
));
1037 if (s
->test_flags
== TEST_FLAGS_DISCONNECT
) {
1038 qemu_chr_fe_disconnect(chr
);
1039 s
->test_flags
= TEST_FLAGS_BAD
;
1043 static void vu_net_get_protocol_features(TestServer
*s
, CharBackend
*chr
,
1046 /* send back features to qemu */
1047 msg
->flags
|= VHOST_USER_REPLY_MASK
;
1048 msg
->size
= sizeof(m
.payload
.u64
);
1049 msg
->payload
.u64
= 1 << VHOST_USER_PROTOCOL_F_LOG_SHMFD
;
1050 msg
->payload
.u64
|= 1 << VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
;
1051 if (s
->queues
> 1) {
1052 msg
->payload
.u64
|= 1 << VHOST_USER_PROTOCOL_F_MQ
;
1054 qemu_chr_fe_write_all(chr
, (uint8_t *)msg
, VHOST_USER_HDR_SIZE
+ msg
->size
);
1057 /* Each VHOST-USER device should have its ops structure defined. */
1058 static struct vhost_user_ops g_vu_net_ops
= {
1059 .type
= VHOST_USER_NET
,
1061 .append_opts
= append_vhost_net_opts
,
1063 .get_features
= vu_net_get_features
,
1064 .set_features
= vu_net_set_features
,
1065 .get_protocol_features
= vu_net_get_protocol_features
,
1068 static void register_vhost_user_test(void)
1070 QOSGraphTestOptions opts
= {
1071 .before
= vhost_user_test_setup
,
1073 .arg
= &g_vu_net_ops
,
1076 qemu_add_opts(&qemu_chardev_opts
);
1078 qos_add_test("vhost-user/read-guest-mem/memfile",
1080 test_read_guest_mem
, &opts
);
1082 if (qemu_memfd_check(MFD_ALLOW_SEALING
)) {
1083 opts
.before
= vhost_user_test_setup_memfd
;
1084 qos_add_test("vhost-user/read-guest-mem/memfd",
1086 test_read_guest_mem
, &opts
);
1089 qos_add_test("vhost-user/migrate",
1091 test_migrate
, &opts
);
1093 opts
.before
= vhost_user_test_setup_reconnect
;
1094 qos_add_test("vhost-user/reconnect", "virtio-net",
1095 test_reconnect
, &opts
);
1097 opts
.before
= vhost_user_test_setup_connect_fail
;
1098 qos_add_test("vhost-user/connect-fail", "virtio-net",
1099 test_vhost_user_started
, &opts
);
1101 opts
.before
= vhost_user_test_setup_flags_mismatch
;
1102 qos_add_test("vhost-user/flags-mismatch", "virtio-net",
1103 test_vhost_user_started
, &opts
);
1105 opts
.before
= vhost_user_test_setup_multiqueue
;
1106 opts
.edge
.extra_device_opts
= "mq=on";
1107 qos_add_test("vhost-user/multiqueue",
1109 test_multiqueue
, &opts
);
1111 libqos_init(register_vhost_user_test
);
1113 static uint64_t vu_gpio_get_features(TestServer
*s
)
1115 return 0x1ULL
<< VIRTIO_F_VERSION_1
|
1116 0x1ULL
<< VIRTIO_GPIO_F_IRQ
|
1117 0x1ULL
<< VHOST_USER_F_PROTOCOL_FEATURES
;
1121 * This stub can't handle all the message types but we should reply
1122 * that we support VHOST_USER_PROTOCOL_F_CONFIG as gpio would use it
1123 * talking to a read vhost-user daemon.
1125 static void vu_gpio_get_protocol_features(TestServer
*s
, CharBackend
*chr
,
1128 /* send back features to qemu */
1129 msg
->flags
|= VHOST_USER_REPLY_MASK
;
1130 msg
->size
= sizeof(m
.payload
.u64
);
1131 msg
->payload
.u64
= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG
;
1133 qemu_chr_fe_write_all(chr
, (uint8_t *)msg
, VHOST_USER_HDR_SIZE
+ msg
->size
);
1136 static struct vhost_user_ops g_vu_gpio_ops
= {
1137 .type
= VHOST_USER_GPIO
,
1139 .append_opts
= append_vhost_gpio_opts
,
1141 .get_features
= vu_gpio_get_features
,
1142 .set_features
= vu_net_set_features
,
1143 .get_protocol_features
= vu_gpio_get_protocol_features
,
1146 static void register_vhost_gpio_test(void)
1148 QOSGraphTestOptions opts
= {
1149 .before
= vhost_user_test_setup
,
1151 .arg
= &g_vu_gpio_ops
,
1154 qemu_add_opts(&qemu_chardev_opts
);
1156 qos_add_test("read-guest-mem/memfile",
1157 "vhost-user-gpio", test_read_guest_mem
, &opts
);
1159 libqos_init(register_vhost_gpio_test
);