]> git.proxmox.com Git - mirror_qemu.git/blob - tests/vhost-user-test.c
Add qtest for vhost-user
[mirror_qemu.git] / tests / vhost-user-test.c
1 /*
2 * QTest testcase for the vhost-user
3 *
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
11 #include "libqtest.h"
12 #include "qemu/option.h"
13 #include "sysemu/char.h"
14 #include "sysemu/sysemu.h"
15
16 #include <glib.h>
17 #include <linux/vhost.h>
18 #include <sys/mman.h>
19 #include <sys/vfs.h>
20 #include <qemu/sockets.h>
21
22 #define QEMU_CMD_ACCEL " -machine accel=tcg"
23 #define QEMU_CMD_MEM " -m 512 -object memory-backend-file,id=mem,size=512M,"\
24 "mem-path=%s,share=on -numa node,memdev=mem"
25 #define QEMU_CMD_CHR " -chardev socket,id=chr0,path=%s"
26 #define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=chr0,vhostforce"
27 #define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0 "
28 #define QEMU_CMD_ROM " -option-rom ../pc-bios/pxe-virtio.rom"
29
30 #define QEMU_CMD QEMU_CMD_ACCEL QEMU_CMD_MEM QEMU_CMD_CHR \
31 QEMU_CMD_NETDEV QEMU_CMD_NET QEMU_CMD_ROM
32
33 #define HUGETLBFS_MAGIC 0x958458f6
34
35 /*********** FROM hw/virtio/vhost-user.c *************************************/
36
37 #define VHOST_MEMORY_MAX_NREGIONS 8
38
39 typedef enum VhostUserRequest {
40 VHOST_USER_NONE = 0,
41 VHOST_USER_GET_FEATURES = 1,
42 VHOST_USER_SET_FEATURES = 2,
43 VHOST_USER_SET_OWNER = 3,
44 VHOST_USER_RESET_OWNER = 4,
45 VHOST_USER_SET_MEM_TABLE = 5,
46 VHOST_USER_SET_LOG_BASE = 6,
47 VHOST_USER_SET_LOG_FD = 7,
48 VHOST_USER_SET_VRING_NUM = 8,
49 VHOST_USER_SET_VRING_ADDR = 9,
50 VHOST_USER_SET_VRING_BASE = 10,
51 VHOST_USER_GET_VRING_BASE = 11,
52 VHOST_USER_SET_VRING_KICK = 12,
53 VHOST_USER_SET_VRING_CALL = 13,
54 VHOST_USER_SET_VRING_ERR = 14,
55 VHOST_USER_MAX
56 } VhostUserRequest;
57
58 typedef struct VhostUserMemoryRegion {
59 uint64_t guest_phys_addr;
60 uint64_t memory_size;
61 uint64_t userspace_addr;
62 } VhostUserMemoryRegion;
63
64 typedef struct VhostUserMemory {
65 uint32_t nregions;
66 uint32_t padding;
67 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
68 } VhostUserMemory;
69
70 typedef struct VhostUserMsg {
71 VhostUserRequest request;
72
73 #define VHOST_USER_VERSION_MASK (0x3)
74 #define VHOST_USER_REPLY_MASK (0x1<<2)
75 uint32_t flags;
76 uint32_t size; /* the following payload size */
77 union {
78 uint64_t u64;
79 struct vhost_vring_state state;
80 struct vhost_vring_addr addr;
81 VhostUserMemory memory;
82 };
83 } QEMU_PACKED VhostUserMsg;
84
85 static VhostUserMsg m __attribute__ ((unused));
86 #define VHOST_USER_HDR_SIZE (sizeof(m.request) \
87 + sizeof(m.flags) \
88 + sizeof(m.size))
89
90 #define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
91
92 /* The version of the protocol we support */
93 #define VHOST_USER_VERSION (0x1)
94 /*****************************************************************************/
95
96 int fds_num = 0, fds[VHOST_MEMORY_MAX_NREGIONS];
97 static VhostUserMemory memory;
98 static GMutex data_mutex;
99 static GCond data_cond;
100
101 static void read_guest_mem(void)
102 {
103 uint32_t *guest_mem;
104 gint64 end_time;
105 int i, j;
106
107 g_mutex_lock(&data_mutex);
108
109 end_time = g_get_monotonic_time() + 5 * G_TIME_SPAN_SECOND;
110 while (!fds_num) {
111 if (!g_cond_wait_until(&data_cond, &data_mutex, end_time)) {
112 /* timeout has passed */
113 g_assert(fds_num);
114 break;
115 }
116 }
117
118 /* check for sanity */
119 g_assert_cmpint(fds_num, >, 0);
120 g_assert_cmpint(fds_num, ==, memory.nregions);
121
122 /* iterate all regions */
123 for (i = 0; i < fds_num; i++) {
124
125 /* We'll check only the region statring at 0x0*/
126 if (memory.regions[i].guest_phys_addr != 0x0) {
127 continue;
128 }
129
130 g_assert_cmpint(memory.regions[i].memory_size, >, 1024);
131
132 guest_mem = mmap(0, memory.regions[i].memory_size,
133 PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0);
134
135 for (j = 0; j < 256; j++) {
136 uint32_t a = readl(memory.regions[i].guest_phys_addr + j*4);
137 uint32_t b = guest_mem[j];
138
139 g_assert_cmpint(a, ==, b);
140 }
141
142 munmap(guest_mem, memory.regions[i].memory_size);
143 }
144
145 g_assert_cmpint(1, ==, 1);
146 g_mutex_unlock(&data_mutex);
147 }
148
149 static void *thread_function(void *data)
150 {
151 GMainLoop *loop;
152 loop = g_main_loop_new(NULL, FALSE);
153 g_main_loop_run(loop);
154 return NULL;
155 }
156
157 static int chr_can_read(void *opaque)
158 {
159 return VHOST_USER_HDR_SIZE;
160 }
161
162 static void chr_read(void *opaque, const uint8_t *buf, int size)
163 {
164 CharDriverState *chr = opaque;
165 VhostUserMsg msg;
166 uint8_t *p = (uint8_t *) &msg;
167 int fd;
168
169 if (size != VHOST_USER_HDR_SIZE) {
170 g_test_message("Wrong message size received %d\n", size);
171 return;
172 }
173
174 memcpy(p, buf, VHOST_USER_HDR_SIZE);
175
176 if (msg.size) {
177 p += VHOST_USER_HDR_SIZE;
178 qemu_chr_fe_read_all(chr, p, msg.size);
179 }
180
181 switch (msg.request) {
182 case VHOST_USER_GET_FEATURES:
183 /* send back features to qemu */
184 msg.flags |= VHOST_USER_REPLY_MASK;
185 msg.size = sizeof(m.u64);
186 msg.u64 = 0;
187 p = (uint8_t *) &msg;
188 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
189 break;
190
191 case VHOST_USER_GET_VRING_BASE:
192 /* send back vring base to qemu */
193 msg.flags |= VHOST_USER_REPLY_MASK;
194 msg.size = sizeof(m.state);
195 msg.state.num = 0;
196 p = (uint8_t *) &msg;
197 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
198 break;
199
200 case VHOST_USER_SET_MEM_TABLE:
201 /* received the mem table */
202 memcpy(&memory, &msg.memory, sizeof(msg.memory));
203 fds_num = qemu_chr_fe_get_msgfds(chr, fds, sizeof(fds) / sizeof(int));
204
205 /* signal the test that it can continue */
206 g_cond_signal(&data_cond);
207 g_mutex_unlock(&data_mutex);
208 break;
209
210 case VHOST_USER_SET_VRING_KICK:
211 case VHOST_USER_SET_VRING_CALL:
212 /* consume the fd */
213 qemu_chr_fe_get_msgfds(chr, &fd, 1);
214 /*
215 * This is a non-blocking eventfd.
216 * The receive function forces it to be blocking,
217 * so revert it back to non-blocking.
218 */
219 qemu_set_nonblock(fd);
220 break;
221 default:
222 break;
223 }
224 }
225
226 static const char *init_hugepagefs(void)
227 {
228 const char *path;
229 struct statfs fs;
230 int ret;
231
232 path = getenv("QTEST_HUGETLBFS_PATH");
233 if (!path) {
234 path = "/hugetlbfs";
235 }
236
237 if (access(path, R_OK | W_OK | X_OK)) {
238 g_test_message("access on path (%s): %s\n", path, strerror(errno));
239 return NULL;
240 }
241
242 do {
243 ret = statfs(path, &fs);
244 } while (ret != 0 && errno == EINTR);
245
246 if (ret != 0) {
247 g_test_message("statfs on path (%s): %s\n", path, strerror(errno));
248 return NULL;
249 }
250
251 if (fs.f_type != HUGETLBFS_MAGIC) {
252 g_test_message("Warning: path not on HugeTLBFS: %s\n", path);
253 return NULL;
254 }
255
256 return path;
257 }
258
259 int main(int argc, char **argv)
260 {
261 QTestState *s = NULL;
262 CharDriverState *chr = NULL;
263 const char *hugefs = 0;
264 char *socket_path = 0;
265 char *qemu_cmd = 0;
266 char *chr_path = 0;
267 int ret;
268
269 g_test_init(&argc, &argv, NULL);
270
271 module_call_init(MODULE_INIT_QOM);
272
273 hugefs = init_hugepagefs();
274 if (!hugefs) {
275 return 0;
276 }
277
278 socket_path = g_strdup_printf("/tmp/vhost-%d.sock", getpid());
279
280 /* create char dev and add read handlers */
281 qemu_add_opts(&qemu_chardev_opts);
282 chr_path = g_strdup_printf("unix:%s,server,nowait", socket_path);
283 chr = qemu_chr_new("chr0", chr_path, NULL);
284 g_free(chr_path);
285 qemu_chr_add_handlers(chr, chr_can_read, chr_read, NULL, chr);
286
287 /* run the main loop thread so the chardev may operate */
288 g_mutex_init(&data_mutex);
289 g_cond_init(&data_cond);
290 g_mutex_lock(&data_mutex);
291 g_thread_new(NULL, thread_function, NULL);
292
293 qemu_cmd = g_strdup_printf(QEMU_CMD, hugefs, socket_path);
294 s = qtest_start(qemu_cmd);
295 g_free(qemu_cmd);
296
297 qtest_add_func("/vhost-user/read-guest-mem", read_guest_mem);
298
299 ret = g_test_run();
300
301 if (s) {
302 qtest_quit(s);
303 }
304
305 /* cleanup */
306 unlink(socket_path);
307 g_free(socket_path);
308 g_cond_clear(&data_cond);
309 g_mutex_clear(&data_mutex);
310
311 return ret;
312 }