]> git.proxmox.com Git - mirror_qemu.git/blame - tests/vhost-user-test.c
Merge remote-tracking branch 'remotes/kraxel/tags/pull-usb-20140711-1' into staging
[mirror_qemu.git] / tests / vhost-user-test.c
CommitLineData
a77e6b14
NN
1/*
2 * QTest testcase for the vhost-user
3 *
4 * Copyright (c) 2014 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
bd95939f
NN
11#define QEMU_GLIB_COMPAT_H
12#include <glib.h>
13
a77e6b14
NN
14#include "libqtest.h"
15#include "qemu/option.h"
16#include "sysemu/char.h"
17#include "sysemu/sysemu.h"
18
a77e6b14
NN
19#include <linux/vhost.h>
20#include <sys/mman.h>
21#include <sys/vfs.h>
22#include <qemu/sockets.h>
23
bd95939f 24/* GLIB version compatibility flags */
0a58991a
NN
25#if !GLIB_CHECK_VERSION(2, 26, 0)
26#define G_TIME_SPAN_SECOND (G_GINT64_CONSTANT(1000000))
27#endif
28
bd95939f
NN
29#if GLIB_CHECK_VERSION(2, 28, 0)
30#define HAVE_MONOTONIC_TIME
31#endif
32
33#if GLIB_CHECK_VERSION(2, 32, 0)
34#define HAVE_MUTEX_INIT
35#define HAVE_COND_INIT
36#define HAVE_THREAD_NEW
37#endif
38
a77e6b14
NN
39#define QEMU_CMD_ACCEL " -machine accel=tcg"
40#define QEMU_CMD_MEM " -m 512 -object memory-backend-file,id=mem,size=512M,"\
41 "mem-path=%s,share=on -numa node,memdev=mem"
42#define QEMU_CMD_CHR " -chardev socket,id=chr0,path=%s"
43#define QEMU_CMD_NETDEV " -netdev vhost-user,id=net0,chardev=chr0,vhostforce"
44#define QEMU_CMD_NET " -device virtio-net-pci,netdev=net0 "
45#define QEMU_CMD_ROM " -option-rom ../pc-bios/pxe-virtio.rom"
46
47#define QEMU_CMD QEMU_CMD_ACCEL QEMU_CMD_MEM QEMU_CMD_CHR \
48 QEMU_CMD_NETDEV QEMU_CMD_NET QEMU_CMD_ROM
49
50#define HUGETLBFS_MAGIC 0x958458f6
51
52/*********** FROM hw/virtio/vhost-user.c *************************************/
53
54#define VHOST_MEMORY_MAX_NREGIONS 8
55
56typedef enum VhostUserRequest {
57 VHOST_USER_NONE = 0,
58 VHOST_USER_GET_FEATURES = 1,
59 VHOST_USER_SET_FEATURES = 2,
60 VHOST_USER_SET_OWNER = 3,
61 VHOST_USER_RESET_OWNER = 4,
62 VHOST_USER_SET_MEM_TABLE = 5,
63 VHOST_USER_SET_LOG_BASE = 6,
64 VHOST_USER_SET_LOG_FD = 7,
65 VHOST_USER_SET_VRING_NUM = 8,
66 VHOST_USER_SET_VRING_ADDR = 9,
67 VHOST_USER_SET_VRING_BASE = 10,
68 VHOST_USER_GET_VRING_BASE = 11,
69 VHOST_USER_SET_VRING_KICK = 12,
70 VHOST_USER_SET_VRING_CALL = 13,
71 VHOST_USER_SET_VRING_ERR = 14,
72 VHOST_USER_MAX
73} VhostUserRequest;
74
75typedef struct VhostUserMemoryRegion {
76 uint64_t guest_phys_addr;
77 uint64_t memory_size;
78 uint64_t userspace_addr;
79} VhostUserMemoryRegion;
80
81typedef struct VhostUserMemory {
82 uint32_t nregions;
83 uint32_t padding;
84 VhostUserMemoryRegion regions[VHOST_MEMORY_MAX_NREGIONS];
85} VhostUserMemory;
86
87typedef struct VhostUserMsg {
88 VhostUserRequest request;
89
90#define VHOST_USER_VERSION_MASK (0x3)
91#define VHOST_USER_REPLY_MASK (0x1<<2)
92 uint32_t flags;
93 uint32_t size; /* the following payload size */
94 union {
95 uint64_t u64;
96 struct vhost_vring_state state;
97 struct vhost_vring_addr addr;
98 VhostUserMemory memory;
99 };
100} QEMU_PACKED VhostUserMsg;
101
102static VhostUserMsg m __attribute__ ((unused));
103#define VHOST_USER_HDR_SIZE (sizeof(m.request) \
104 + sizeof(m.flags) \
105 + sizeof(m.size))
106
107#define VHOST_USER_PAYLOAD_SIZE (sizeof(m) - VHOST_USER_HDR_SIZE)
108
109/* The version of the protocol we support */
110#define VHOST_USER_VERSION (0x1)
111/*****************************************************************************/
112
113int fds_num = 0, fds[VHOST_MEMORY_MAX_NREGIONS];
114static VhostUserMemory memory;
bd95939f
NN
115static GMutex *data_mutex;
116static GCond *data_cond;
117
118static gint64 _get_time(void)
119{
120#ifdef HAVE_MONOTONIC_TIME
121 return g_get_monotonic_time();
122#else
123 GTimeVal time;
124 g_get_current_time(&time);
125
126 return time.tv_sec * G_TIME_SPAN_SECOND + time.tv_usec;
127#endif
128}
129
130static GMutex *_mutex_new(void)
131{
132 GMutex *mutex;
133
134#ifdef HAVE_MUTEX_INIT
135 mutex = g_new(GMutex, 1);
136 g_mutex_init(mutex);
137#else
138 mutex = g_mutex_new();
139#endif
140
141 return mutex;
142}
143
144static void _mutex_free(GMutex *mutex)
145{
146#ifdef HAVE_MUTEX_INIT
147 g_mutex_clear(mutex);
148 g_free(mutex);
149#else
150 g_mutex_free(mutex);
151#endif
152}
153
154static GCond *_cond_new(void)
155{
156 GCond *cond;
157
158#ifdef HAVE_COND_INIT
159 cond = g_new(GCond, 1);
160 g_cond_init(cond);
161#else
162 cond = g_cond_new();
163#endif
164
165 return cond;
166}
167
168static gboolean _cond_wait_until(GCond *cond, GMutex *mutex, gint64 end_time)
169{
170 gboolean ret = FALSE;
171#ifdef HAVE_COND_INIT
172 ret = g_cond_wait_until(cond, mutex, end_time);
173#else
174 GTimeVal time = { end_time / G_TIME_SPAN_SECOND,
175 end_time % G_TIME_SPAN_SECOND };
176 ret = g_cond_timed_wait(cond, mutex, &time);
177#endif
178 return ret;
179}
180
181static void _cond_free(GCond *cond)
182{
183#ifdef HAVE_COND_INIT
184 g_cond_clear(cond);
185 g_free(cond);
186#else
187 g_cond_free(cond);
188#endif
189}
190
191static GThread *_thread_new(const gchar *name, GThreadFunc func, gpointer data)
192{
193 GThread *thread = NULL;
194 GError *error = NULL;
195#ifdef HAVE_THREAD_NEW
196 thread = g_thread_try_new(name, func, data, &error);
197#else
198 thread = g_thread_create(func, data, TRUE, &error);
199#endif
200 return thread;
201}
a77e6b14
NN
202
203static void read_guest_mem(void)
204{
205 uint32_t *guest_mem;
206 gint64 end_time;
207 int i, j;
208
bd95939f 209 g_mutex_lock(data_mutex);
a77e6b14 210
bd95939f 211 end_time = _get_time() + 5 * G_TIME_SPAN_SECOND;
a77e6b14 212 while (!fds_num) {
bd95939f 213 if (!_cond_wait_until(data_cond, data_mutex, end_time)) {
a77e6b14
NN
214 /* timeout has passed */
215 g_assert(fds_num);
216 break;
217 }
218 }
219
220 /* check for sanity */
221 g_assert_cmpint(fds_num, >, 0);
222 g_assert_cmpint(fds_num, ==, memory.nregions);
223
224 /* iterate all regions */
225 for (i = 0; i < fds_num; i++) {
226
227 /* We'll check only the region statring at 0x0*/
228 if (memory.regions[i].guest_phys_addr != 0x0) {
229 continue;
230 }
231
232 g_assert_cmpint(memory.regions[i].memory_size, >, 1024);
233
234 guest_mem = mmap(0, memory.regions[i].memory_size,
235 PROT_READ | PROT_WRITE, MAP_SHARED, fds[i], 0);
236
237 for (j = 0; j < 256; j++) {
238 uint32_t a = readl(memory.regions[i].guest_phys_addr + j*4);
239 uint32_t b = guest_mem[j];
240
241 g_assert_cmpint(a, ==, b);
242 }
243
244 munmap(guest_mem, memory.regions[i].memory_size);
245 }
246
247 g_assert_cmpint(1, ==, 1);
bd95939f 248 g_mutex_unlock(data_mutex);
a77e6b14
NN
249}
250
251static void *thread_function(void *data)
252{
253 GMainLoop *loop;
254 loop = g_main_loop_new(NULL, FALSE);
255 g_main_loop_run(loop);
256 return NULL;
257}
258
259static int chr_can_read(void *opaque)
260{
261 return VHOST_USER_HDR_SIZE;
262}
263
264static void chr_read(void *opaque, const uint8_t *buf, int size)
265{
266 CharDriverState *chr = opaque;
267 VhostUserMsg msg;
268 uint8_t *p = (uint8_t *) &msg;
269 int fd;
270
271 if (size != VHOST_USER_HDR_SIZE) {
272 g_test_message("Wrong message size received %d\n", size);
273 return;
274 }
275
f61badf3 276 g_mutex_lock(data_mutex);
a77e6b14
NN
277 memcpy(p, buf, VHOST_USER_HDR_SIZE);
278
279 if (msg.size) {
280 p += VHOST_USER_HDR_SIZE;
281 qemu_chr_fe_read_all(chr, p, msg.size);
282 }
283
284 switch (msg.request) {
285 case VHOST_USER_GET_FEATURES:
286 /* send back features to qemu */
287 msg.flags |= VHOST_USER_REPLY_MASK;
288 msg.size = sizeof(m.u64);
289 msg.u64 = 0;
290 p = (uint8_t *) &msg;
291 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
292 break;
293
294 case VHOST_USER_GET_VRING_BASE:
295 /* send back vring base to qemu */
296 msg.flags |= VHOST_USER_REPLY_MASK;
297 msg.size = sizeof(m.state);
298 msg.state.num = 0;
299 p = (uint8_t *) &msg;
300 qemu_chr_fe_write_all(chr, p, VHOST_USER_HDR_SIZE + msg.size);
301 break;
302
303 case VHOST_USER_SET_MEM_TABLE:
304 /* received the mem table */
305 memcpy(&memory, &msg.memory, sizeof(msg.memory));
306 fds_num = qemu_chr_fe_get_msgfds(chr, fds, sizeof(fds) / sizeof(int));
307
308 /* signal the test that it can continue */
bd95939f 309 g_cond_signal(data_cond);
a77e6b14
NN
310 break;
311
312 case VHOST_USER_SET_VRING_KICK:
313 case VHOST_USER_SET_VRING_CALL:
314 /* consume the fd */
315 qemu_chr_fe_get_msgfds(chr, &fd, 1);
316 /*
317 * This is a non-blocking eventfd.
318 * The receive function forces it to be blocking,
319 * so revert it back to non-blocking.
320 */
321 qemu_set_nonblock(fd);
322 break;
323 default:
324 break;
325 }
f61badf3 326 g_mutex_unlock(data_mutex);
a77e6b14
NN
327}
328
329static const char *init_hugepagefs(void)
330{
331 const char *path;
332 struct statfs fs;
333 int ret;
334
335 path = getenv("QTEST_HUGETLBFS_PATH");
336 if (!path) {
337 path = "/hugetlbfs";
338 }
339
340 if (access(path, R_OK | W_OK | X_OK)) {
341 g_test_message("access on path (%s): %s\n", path, strerror(errno));
342 return NULL;
343 }
344
345 do {
346 ret = statfs(path, &fs);
347 } while (ret != 0 && errno == EINTR);
348
349 if (ret != 0) {
350 g_test_message("statfs on path (%s): %s\n", path, strerror(errno));
351 return NULL;
352 }
353
354 if (fs.f_type != HUGETLBFS_MAGIC) {
355 g_test_message("Warning: path not on HugeTLBFS: %s\n", path);
356 return NULL;
357 }
358
359 return path;
360}
361
362int main(int argc, char **argv)
363{
364 QTestState *s = NULL;
365 CharDriverState *chr = NULL;
366 const char *hugefs = 0;
367 char *socket_path = 0;
368 char *qemu_cmd = 0;
369 char *chr_path = 0;
370 int ret;
371
372 g_test_init(&argc, &argv, NULL);
373
374 module_call_init(MODULE_INIT_QOM);
375
376 hugefs = init_hugepagefs();
377 if (!hugefs) {
378 return 0;
379 }
380
381 socket_path = g_strdup_printf("/tmp/vhost-%d.sock", getpid());
382
383 /* create char dev and add read handlers */
384 qemu_add_opts(&qemu_chardev_opts);
385 chr_path = g_strdup_printf("unix:%s,server,nowait", socket_path);
386 chr = qemu_chr_new("chr0", chr_path, NULL);
387 g_free(chr_path);
388 qemu_chr_add_handlers(chr, chr_can_read, chr_read, NULL, chr);
389
390 /* run the main loop thread so the chardev may operate */
bd95939f
NN
391 data_mutex = _mutex_new();
392 data_cond = _cond_new();
bd95939f 393 _thread_new(NULL, thread_function, NULL);
a77e6b14
NN
394
395 qemu_cmd = g_strdup_printf(QEMU_CMD, hugefs, socket_path);
396 s = qtest_start(qemu_cmd);
397 g_free(qemu_cmd);
398
399 qtest_add_func("/vhost-user/read-guest-mem", read_guest_mem);
400
401 ret = g_test_run();
402
403 if (s) {
404 qtest_quit(s);
405 }
406
407 /* cleanup */
408 unlink(socket_path);
409 g_free(socket_path);
bd95939f
NN
410 _cond_free(data_cond);
411 _mutex_free(data_mutex);
a77e6b14
NN
412
413 return ret;
414}