]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost-user.c
vhost: Distinguish errors in vhost_backend_init()
[mirror_qemu.git] / hw / virtio / vhost-user.c
CommitLineData
5f6f6664
NN
1/*
2 * vhost-user
3 *
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
9b8bfe21 11#include "qemu/osdep.h"
da34e65c 12#include "qapi/error.h"
5f6f6664 13#include "hw/virtio/vhost.h"
4d0cf552 14#include "hw/virtio/vhost-user.h"
5f6f6664 15#include "hw/virtio/vhost-backend.h"
44866521 16#include "hw/virtio/virtio.h"
3e866365 17#include "hw/virtio/virtio-net.h"
4d43a603 18#include "chardev/char-fe.h"
57dc0217 19#include "io/channel-socket.h"
5f6f6664
NN
20#include "sysemu/kvm.h"
21#include "qemu/error-report.h"
db725815 22#include "qemu/main-loop.h"
5f6f6664 23#include "qemu/sockets.h"
efbfeb81 24#include "sysemu/cryptodev.h"
9ccbfe14
DDAG
25#include "migration/migration.h"
26#include "migration/postcopy-ram.h"
6864a7b5 27#include "trace.h"
5f6f6664 28
5f6f6664
NN
29#include <sys/ioctl.h>
30#include <sys/socket.h>
31#include <sys/un.h>
18658a3c
PB
32
33#include "standard-headers/linux/vhost_types.h"
34
35#ifdef CONFIG_LINUX
375318d0 36#include <linux/userfaultfd.h>
18658a3c 37#endif
5f6f6664 38
27598393 39#define VHOST_MEMORY_BASELINE_NREGIONS 8
dcb10c00 40#define VHOST_USER_F_PROTOCOL_FEATURES 30
5f57fbea 41#define VHOST_USER_SLAVE_MAX_FDS 8
e2051e9e 42
27598393
RN
43/*
44 * Set maximum number of RAM slots supported to
45 * the maximum number supported by the target
46 * hardware plaform.
47 */
48#if defined(TARGET_X86) || defined(TARGET_X86_64) || \
49 defined(TARGET_ARM) || defined(TARGET_ARM_64)
50#include "hw/acpi/acpi.h"
51#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
52
53#elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
54#include "hw/ppc/spapr.h"
55#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
56
57#else
58#define VHOST_USER_MAX_RAM_SLOTS 512
59#endif
60
4c3e257b
CL
61/*
62 * Maximum size of virtio device config space
63 */
64#define VHOST_USER_MAX_CONFIG_SIZE 256
65
de1372d4
TC
66enum VhostUserProtocolFeature {
67 VHOST_USER_PROTOCOL_F_MQ = 0,
68 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
69 VHOST_USER_PROTOCOL_F_RARP = 2,
ca525ce5 70 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
c5f048d8 71 VHOST_USER_PROTOCOL_F_NET_MTU = 4,
4bbeeba0 72 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
5df04f17 73 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
efbfeb81 74 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
9ccbfe14 75 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
1c3e5a26 76 VHOST_USER_PROTOCOL_F_CONFIG = 9,
5f57fbea 77 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
44866521 78 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
5ad204bf 79 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
d91d57e6 80 VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
6b0eff1a
RN
81 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
82 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
de1372d4
TC
83 VHOST_USER_PROTOCOL_F_MAX
84};
85
86#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
5f6f6664
NN
87
88typedef enum VhostUserRequest {
89 VHOST_USER_NONE = 0,
90 VHOST_USER_GET_FEATURES = 1,
91 VHOST_USER_SET_FEATURES = 2,
92 VHOST_USER_SET_OWNER = 3,
60915dc4 93 VHOST_USER_RESET_OWNER = 4,
5f6f6664
NN
94 VHOST_USER_SET_MEM_TABLE = 5,
95 VHOST_USER_SET_LOG_BASE = 6,
96 VHOST_USER_SET_LOG_FD = 7,
97 VHOST_USER_SET_VRING_NUM = 8,
98 VHOST_USER_SET_VRING_ADDR = 9,
99 VHOST_USER_SET_VRING_BASE = 10,
100 VHOST_USER_GET_VRING_BASE = 11,
101 VHOST_USER_SET_VRING_KICK = 12,
102 VHOST_USER_SET_VRING_CALL = 13,
103 VHOST_USER_SET_VRING_ERR = 14,
dcb10c00
MT
104 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
105 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
e2051e9e 106 VHOST_USER_GET_QUEUE_NUM = 17,
7263a0ad 107 VHOST_USER_SET_VRING_ENABLE = 18,
3e866365 108 VHOST_USER_SEND_RARP = 19,
c5f048d8 109 VHOST_USER_NET_SET_MTU = 20,
4bbeeba0 110 VHOST_USER_SET_SLAVE_REQ_FD = 21,
6dcdd06e 111 VHOST_USER_IOTLB_MSG = 22,
5df04f17 112 VHOST_USER_SET_VRING_ENDIAN = 23,
4c3e257b
CL
113 VHOST_USER_GET_CONFIG = 24,
114 VHOST_USER_SET_CONFIG = 25,
efbfeb81
GA
115 VHOST_USER_CREATE_CRYPTO_SESSION = 26,
116 VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
d3dff7a5 117 VHOST_USER_POSTCOPY_ADVISE = 28,
6864a7b5 118 VHOST_USER_POSTCOPY_LISTEN = 29,
c639187e 119 VHOST_USER_POSTCOPY_END = 30,
5ad204bf
XY
120 VHOST_USER_GET_INFLIGHT_FD = 31,
121 VHOST_USER_SET_INFLIGHT_FD = 32,
bd2e44fe 122 VHOST_USER_GPU_SET_SOCKET = 33,
d91d57e6 123 VHOST_USER_RESET_DEVICE = 34,
6b0eff1a
RN
124 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
125 VHOST_USER_GET_MAX_MEM_SLOTS = 36,
f1aeb14b
RN
126 VHOST_USER_ADD_MEM_REG = 37,
127 VHOST_USER_REM_MEM_REG = 38,
5f6f6664
NN
128 VHOST_USER_MAX
129} VhostUserRequest;
130
4bbeeba0
MAL
131typedef enum VhostUserSlaveRequest {
132 VHOST_USER_SLAVE_NONE = 0,
6dcdd06e 133 VHOST_USER_SLAVE_IOTLB_MSG = 1,
4c3e257b 134 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
44866521 135 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
4bbeeba0
MAL
136 VHOST_USER_SLAVE_MAX
137} VhostUserSlaveRequest;
138
5f6f6664
NN
139typedef struct VhostUserMemoryRegion {
140 uint64_t guest_phys_addr;
141 uint64_t memory_size;
142 uint64_t userspace_addr;
3fd74b84 143 uint64_t mmap_offset;
5f6f6664
NN
144} VhostUserMemoryRegion;
145
146typedef struct VhostUserMemory {
147 uint32_t nregions;
148 uint32_t padding;
27598393 149 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
5f6f6664
NN
150} VhostUserMemory;
151
f1aeb14b 152typedef struct VhostUserMemRegMsg {
3009edff 153 uint64_t padding;
f1aeb14b
RN
154 VhostUserMemoryRegion region;
155} VhostUserMemRegMsg;
156
2b8819c6
VK
157typedef struct VhostUserLog {
158 uint64_t mmap_size;
159 uint64_t mmap_offset;
160} VhostUserLog;
161
4c3e257b
CL
162typedef struct VhostUserConfig {
163 uint32_t offset;
164 uint32_t size;
165 uint32_t flags;
166 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
167} VhostUserConfig;
168
efbfeb81
GA
169#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
170#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
171
172typedef struct VhostUserCryptoSession {
173 /* session id for success, -1 on errors */
174 int64_t session_id;
175 CryptoDevBackendSymSessionInfo session_setup_data;
176 uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
177 uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
178} VhostUserCryptoSession;
179
4c3e257b
CL
180static VhostUserConfig c __attribute__ ((unused));
181#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
182 + sizeof(c.size) \
183 + sizeof(c.flags))
184
44866521
TB
185typedef struct VhostUserVringArea {
186 uint64_t u64;
187 uint64_t size;
188 uint64_t offset;
189} VhostUserVringArea;
190
5ad204bf
XY
191typedef struct VhostUserInflight {
192 uint64_t mmap_size;
193 uint64_t mmap_offset;
194 uint16_t num_queues;
195 uint16_t queue_size;
196} VhostUserInflight;
197
24e34754 198typedef struct {
5f6f6664
NN
199 VhostUserRequest request;
200
201#define VHOST_USER_VERSION_MASK (0x3)
202#define VHOST_USER_REPLY_MASK (0x1<<2)
ca525ce5 203#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
5f6f6664
NN
204 uint32_t flags;
205 uint32_t size; /* the following payload size */
24e34754
MT
206} QEMU_PACKED VhostUserHeader;
207
208typedef union {
5f6f6664
NN
209#define VHOST_USER_VRING_IDX_MASK (0xff)
210#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
211 uint64_t u64;
212 struct vhost_vring_state state;
213 struct vhost_vring_addr addr;
214 VhostUserMemory memory;
f1aeb14b 215 VhostUserMemRegMsg mem_reg;
2b8819c6 216 VhostUserLog log;
6dcdd06e 217 struct vhost_iotlb_msg iotlb;
4c3e257b 218 VhostUserConfig config;
efbfeb81 219 VhostUserCryptoSession session;
44866521 220 VhostUserVringArea area;
5ad204bf 221 VhostUserInflight inflight;
24e34754
MT
222} VhostUserPayload;
223
224typedef struct VhostUserMsg {
225 VhostUserHeader hdr;
226 VhostUserPayload payload;
5f6f6664
NN
227} QEMU_PACKED VhostUserMsg;
228
229static VhostUserMsg m __attribute__ ((unused));
24e34754 230#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
5f6f6664 231
24e34754 232#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
5f6f6664
NN
233
234/* The version of the protocol we support */
235#define VHOST_USER_VERSION (0x1)
236
2152f3fe 237struct vhost_user {
9ccbfe14 238 struct vhost_dev *dev;
4d0cf552
TB
239 /* Shared between vhost devs of the same virtio device */
240 VhostUserState *user;
57dc0217
GK
241 QIOChannel *slave_ioc;
242 GSource *slave_src;
9ccbfe14 243 NotifierWithReturn postcopy_notifier;
f82c1116 244 struct PostCopyFD postcopy_fd;
27598393 245 uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
905125d0
DDAG
246 /* Length of the region_rb and region_rb_offset arrays */
247 size_t region_rb_len;
248 /* RAMBlock associated with a given region */
249 RAMBlock **region_rb;
250 /* The offset from the start of the RAMBlock to the start of the
251 * vhost region.
252 */
253 ram_addr_t *region_rb_offset;
254
6864a7b5
DDAG
255 /* True once we've entered postcopy_listen */
256 bool postcopy_listen;
f1aeb14b
RN
257
258 /* Our current regions */
259 int num_shadow_regions;
27598393 260 struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
f1aeb14b
RN
261};
262
263struct scrub_regions {
264 struct vhost_memory_region *region;
265 int reg_idx;
266 int fd_idx;
2152f3fe
MAL
267};
268
5f6f6664
NN
269static bool ioeventfd_enabled(void)
270{
b0aa77d3 271 return !kvm_enabled() || kvm_eventfds_enabled();
5f6f6664
NN
272}
273
9af84c02 274static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
5f6f6664 275{
2152f3fe 276 struct vhost_user *u = dev->opaque;
4d0cf552 277 CharBackend *chr = u->user->chr;
5f6f6664
NN
278 uint8_t *p = (uint8_t *) msg;
279 int r, size = VHOST_USER_HDR_SIZE;
280
281 r = qemu_chr_fe_read_all(chr, p, size);
282 if (r != size) {
5421f318 283 error_report("Failed to read msg header. Read %d instead of %d."
24e34754 284 " Original request %d.", r, size, msg->hdr.request);
9af84c02 285 return -1;
5f6f6664
NN
286 }
287
288 /* validate received flags */
24e34754 289 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
5f6f6664 290 error_report("Failed to read msg header."
24e34754 291 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
5f6f6664 292 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
9af84c02
MAL
293 return -1;
294 }
295
296 return 0;
297}
298
a7f523c7
GK
299struct vhost_user_read_cb_data {
300 struct vhost_dev *dev;
301 VhostUserMsg *msg;
302 GMainLoop *loop;
303 int ret;
304};
305
306static gboolean vhost_user_read_cb(GIOChannel *source, GIOCondition condition,
307 gpointer opaque)
9af84c02 308{
a7f523c7
GK
309 struct vhost_user_read_cb_data *data = opaque;
310 struct vhost_dev *dev = data->dev;
311 VhostUserMsg *msg = data->msg;
9af84c02
MAL
312 struct vhost_user *u = dev->opaque;
313 CharBackend *chr = u->user->chr;
314 uint8_t *p = (uint8_t *) msg;
315 int r, size;
316
317 if (vhost_user_read_header(dev, msg) < 0) {
a7f523c7
GK
318 data->ret = -1;
319 goto end;
5f6f6664
NN
320 }
321
322 /* validate message size is sane */
24e34754 323 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
5f6f6664 324 error_report("Failed to read msg header."
24e34754 325 " Size %d exceeds the maximum %zu.", msg->hdr.size,
5f6f6664 326 VHOST_USER_PAYLOAD_SIZE);
a7f523c7
GK
327 data->ret = -1;
328 goto end;
5f6f6664
NN
329 }
330
24e34754 331 if (msg->hdr.size) {
5f6f6664 332 p += VHOST_USER_HDR_SIZE;
24e34754 333 size = msg->hdr.size;
5f6f6664
NN
334 r = qemu_chr_fe_read_all(chr, p, size);
335 if (r != size) {
336 error_report("Failed to read msg payload."
24e34754 337 " Read %d instead of %d.", r, msg->hdr.size);
a7f523c7
GK
338 data->ret = -1;
339 goto end;
5f6f6664
NN
340 }
341 }
342
a7f523c7
GK
343end:
344 g_main_loop_quit(data->loop);
345 return G_SOURCE_REMOVE;
346}
347
db8a3772
GK
348static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
349 gpointer opaque);
350
351/*
352 * This updates the read handler to use a new event loop context.
353 * Event sources are removed from the previous context : this ensures
354 * that events detected in the previous context are purged. They will
355 * be re-detected and processed in the new context.
356 */
357static void slave_update_read_handler(struct vhost_dev *dev,
358 GMainContext *ctxt)
359{
360 struct vhost_user *u = dev->opaque;
361
362 if (!u->slave_ioc) {
363 return;
364 }
365
366 if (u->slave_src) {
367 g_source_destroy(u->slave_src);
368 g_source_unref(u->slave_src);
369 }
370
371 u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
372 G_IO_IN | G_IO_HUP,
373 slave_read, dev, NULL,
374 ctxt);
375}
376
a7f523c7
GK
377static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
378{
379 struct vhost_user *u = dev->opaque;
380 CharBackend *chr = u->user->chr;
381 GMainContext *prev_ctxt = chr->chr->gcontext;
382 GMainContext *ctxt = g_main_context_new();
383 GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
384 struct vhost_user_read_cb_data data = {
385 .dev = dev,
386 .loop = loop,
387 .msg = msg,
388 .ret = 0
389 };
390
391 /*
392 * We want to be able to monitor the slave channel fd while waiting
393 * for chr I/O. This requires an event loop, but we can't nest the
394 * one to which chr is currently attached : its fd handlers might not
395 * be prepared for re-entrancy. So we create a new one and switch chr
396 * to use it.
397 */
db8a3772 398 slave_update_read_handler(dev, ctxt);
a7f523c7
GK
399 qemu_chr_be_update_read_handlers(chr->chr, ctxt);
400 qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
401
402 g_main_loop_run(loop);
403
404 /*
405 * Restore the previous event loop context. This also destroys/recreates
406 * event sources : this guarantees that all pending events in the original
407 * context that have been processed by the nested loop are purged.
408 */
409 qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
db8a3772 410 slave_update_read_handler(dev, NULL);
a7f523c7
GK
411
412 g_main_loop_unref(loop);
413 g_main_context_unref(ctxt);
414
415 return data.ret;
5f6f6664
NN
416}
417
ca525ce5 418static int process_message_reply(struct vhost_dev *dev,
3cf7daf8 419 const VhostUserMsg *msg)
ca525ce5 420{
60cd1102 421 VhostUserMsg msg_reply;
ca525ce5 422
24e34754 423 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
60cd1102
ZY
424 return 0;
425 }
426
427 if (vhost_user_read(dev, &msg_reply) < 0) {
ca525ce5
PS
428 return -1;
429 }
430
24e34754 431 if (msg_reply.hdr.request != msg->hdr.request) {
ca525ce5
PS
432 error_report("Received unexpected msg type."
433 "Expected %d received %d",
24e34754 434 msg->hdr.request, msg_reply.hdr.request);
ca525ce5
PS
435 return -1;
436 }
437
60cd1102 438 return msg_reply.payload.u64 ? -1 : 0;
ca525ce5
PS
439}
440
21e70425
MAL
441static bool vhost_user_one_time_request(VhostUserRequest request)
442{
443 switch (request) {
444 case VHOST_USER_SET_OWNER:
60915dc4 445 case VHOST_USER_RESET_OWNER:
21e70425
MAL
446 case VHOST_USER_SET_MEM_TABLE:
447 case VHOST_USER_GET_QUEUE_NUM:
c5f048d8 448 case VHOST_USER_NET_SET_MTU:
21e70425
MAL
449 return true;
450 default:
451 return false;
452 }
453}
454
455/* most non-init callers ignore the error */
5f6f6664
NN
456static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
457 int *fds, int fd_num)
458{
2152f3fe 459 struct vhost_user *u = dev->opaque;
4d0cf552 460 CharBackend *chr = u->user->chr;
24e34754 461 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
5f6f6664 462
21e70425
MAL
463 /*
464 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
465 * we just need send it once in the first time. For later such
466 * request, we just ignore it.
467 */
24e34754
MT
468 if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
469 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
21e70425
MAL
470 return 0;
471 }
472
6fab2f3f 473 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
f6b85710 474 error_report("Failed to set msg fds.");
6fab2f3f
MAL
475 return -1;
476 }
5f6f6664 477
f6b85710
MAL
478 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
479 if (ret != size) {
480 error_report("Failed to write msg."
481 " Wrote %d instead of %d.", ret, size);
482 return -1;
483 }
484
485 return 0;
5f6f6664
NN
486}
487
bd2e44fe
MAL
488int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
489{
490 VhostUserMsg msg = {
491 .hdr.request = VHOST_USER_GPU_SET_SOCKET,
492 .hdr.flags = VHOST_USER_VERSION,
493 };
494
495 return vhost_user_write(dev, &msg, &fd, 1);
496}
497
21e70425
MAL
498static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
499 struct vhost_log *log)
b931bfbf 500{
27598393 501 int fds[VHOST_USER_MAX_RAM_SLOTS];
21e70425
MAL
502 size_t fd_num = 0;
503 bool shmfd = virtio_has_feature(dev->protocol_features,
504 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
505 VhostUserMsg msg = {
24e34754
MT
506 .hdr.request = VHOST_USER_SET_LOG_BASE,
507 .hdr.flags = VHOST_USER_VERSION,
48854f57 508 .payload.log.mmap_size = log->size * sizeof(*(log->log)),
2b8819c6 509 .payload.log.mmap_offset = 0,
24e34754 510 .hdr.size = sizeof(msg.payload.log),
21e70425
MAL
511 };
512
513 if (shmfd && log->fd != -1) {
514 fds[fd_num++] = log->fd;
515 }
516
c4843a45
MAL
517 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
518 return -1;
519 }
21e70425
MAL
520
521 if (shmfd) {
24e34754 522 msg.hdr.size = 0;
21e70425 523 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 524 return -1;
21e70425
MAL
525 }
526
24e34754 527 if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
21e70425
MAL
528 error_report("Received unexpected msg type. "
529 "Expected %d received %d",
24e34754 530 VHOST_USER_SET_LOG_BASE, msg.hdr.request);
21e70425
MAL
531 return -1;
532 }
b931bfbf 533 }
21e70425
MAL
534
535 return 0;
b931bfbf
CO
536}
537
23374a84
RN
538static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
539 int *fd)
540{
541 MemoryRegion *mr;
542
543 assert((uintptr_t)addr == addr);
544 mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
545 *fd = memory_region_get_fd(mr);
546
547 return mr;
548}
549
ece99091 550static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
8d193715
RN
551 struct vhost_memory_region *src,
552 uint64_t mmap_offset)
ece99091
RN
553{
554 assert(src != NULL && dst != NULL);
555 dst->userspace_addr = src->userspace_addr;
556 dst->memory_size = src->memory_size;
557 dst->guest_phys_addr = src->guest_phys_addr;
8d193715 558 dst->mmap_offset = mmap_offset;
ece99091
RN
559}
560
2d9da9df
RN
561static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
562 struct vhost_dev *dev,
563 VhostUserMsg *msg,
564 int *fds, size_t *fd_num,
565 bool track_ramblocks)
566{
567 int i, fd;
568 ram_addr_t offset;
569 MemoryRegion *mr;
570 struct vhost_memory_region *reg;
ece99091 571 VhostUserMemoryRegion region_buffer;
2d9da9df
RN
572
573 msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
574
575 for (i = 0; i < dev->mem->nregions; ++i) {
576 reg = dev->mem->regions + i;
577
23374a84 578 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
2d9da9df
RN
579 if (fd > 0) {
580 if (track_ramblocks) {
27598393 581 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
2d9da9df
RN
582 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
583 reg->memory_size,
584 reg->guest_phys_addr,
585 reg->userspace_addr,
586 offset);
587 u->region_rb_offset[i] = offset;
588 u->region_rb[i] = mr->ram_block;
27598393 589 } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
2d9da9df
RN
590 error_report("Failed preparing vhost-user memory table msg");
591 return -1;
592 }
8d193715 593 vhost_user_fill_msg_region(&region_buffer, reg, offset);
ece99091 594 msg->payload.memory.regions[*fd_num] = region_buffer;
2d9da9df
RN
595 fds[(*fd_num)++] = fd;
596 } else if (track_ramblocks) {
597 u->region_rb_offset[i] = 0;
598 u->region_rb[i] = NULL;
599 }
600 }
601
602 msg->payload.memory.nregions = *fd_num;
603
604 if (!*fd_num) {
605 error_report("Failed initializing vhost-user memory map, "
606 "consider using -object memory-backend-file share=on");
607 return -1;
608 }
609
610 msg->hdr.size = sizeof(msg->payload.memory.nregions);
611 msg->hdr.size += sizeof(msg->payload.memory.padding);
612 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
613
614 return 1;
615}
616
f1aeb14b
RN
617static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
618 struct vhost_memory_region *vdev_reg)
619{
620 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
621 shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
622 shadow_reg->memory_size == vdev_reg->memory_size;
623}
624
625static void scrub_shadow_regions(struct vhost_dev *dev,
626 struct scrub_regions *add_reg,
627 int *nr_add_reg,
628 struct scrub_regions *rem_reg,
629 int *nr_rem_reg, uint64_t *shadow_pcb,
630 bool track_ramblocks)
631{
632 struct vhost_user *u = dev->opaque;
27598393 633 bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b
RN
634 struct vhost_memory_region *reg, *shadow_reg;
635 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
636 ram_addr_t offset;
637 MemoryRegion *mr;
638 bool matching;
639
640 /*
641 * Find memory regions present in our shadow state which are not in
642 * the device's current memory state.
643 *
644 * Mark regions in both the shadow and device state as "found".
645 */
646 for (i = 0; i < u->num_shadow_regions; i++) {
647 shadow_reg = &u->shadow_regions[i];
648 matching = false;
649
650 for (j = 0; j < dev->mem->nregions; j++) {
651 reg = &dev->mem->regions[j];
652
653 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
654
655 if (reg_equal(shadow_reg, reg)) {
656 matching = true;
657 found[j] = true;
658 if (track_ramblocks) {
659 /*
660 * Reset postcopy client bases, region_rb, and
661 * region_rb_offset in case regions are removed.
662 */
663 if (fd > 0) {
664 u->region_rb_offset[j] = offset;
665 u->region_rb[j] = mr->ram_block;
666 shadow_pcb[j] = u->postcopy_client_bases[i];
667 } else {
668 u->region_rb_offset[j] = 0;
669 u->region_rb[j] = NULL;
670 }
671 }
672 break;
673 }
674 }
675
676 /*
677 * If the region was not found in the current device memory state
678 * create an entry for it in the removed list.
679 */
680 if (!matching) {
681 rem_reg[rm_idx].region = shadow_reg;
682 rem_reg[rm_idx++].reg_idx = i;
683 }
684 }
685
686 /*
687 * For regions not marked "found", create entries in the added list.
688 *
689 * Note their indexes in the device memory state and the indexes of their
690 * file descriptors.
691 */
692 for (i = 0; i < dev->mem->nregions; i++) {
693 reg = &dev->mem->regions[i];
8b616bee 694 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
f1aeb14b
RN
695 if (fd > 0) {
696 ++fd_num;
697 }
698
699 /*
700 * If the region was in both the shadow and device state we don't
701 * need to send a VHOST_USER_ADD_MEM_REG message for it.
702 */
703 if (found[i]) {
704 continue;
705 }
706
707 add_reg[add_idx].region = reg;
708 add_reg[add_idx].reg_idx = i;
709 add_reg[add_idx++].fd_idx = fd_num;
710 }
711 *nr_rem_reg = rm_idx;
712 *nr_add_reg = add_idx;
713
714 return;
715}
716
717static int send_remove_regions(struct vhost_dev *dev,
718 struct scrub_regions *remove_reg,
719 int nr_rem_reg, VhostUserMsg *msg,
720 bool reply_supported)
721{
722 struct vhost_user *u = dev->opaque;
723 struct vhost_memory_region *shadow_reg;
724 int i, fd, shadow_reg_idx, ret;
725 ram_addr_t offset;
726 VhostUserMemoryRegion region_buffer;
727
728 /*
729 * The regions in remove_reg appear in the same order they do in the
730 * shadow table. Therefore we can minimize memory copies by iterating
731 * through remove_reg backwards.
732 */
733 for (i = nr_rem_reg - 1; i >= 0; i--) {
734 shadow_reg = remove_reg[i].region;
735 shadow_reg_idx = remove_reg[i].reg_idx;
736
737 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
738
739 if (fd > 0) {
740 msg->hdr.request = VHOST_USER_REM_MEM_REG;
8d193715 741 vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
f1aeb14b
RN
742 msg->payload.mem_reg.region = region_buffer;
743
744 if (vhost_user_write(dev, msg, &fd, 1) < 0) {
745 return -1;
746 }
747
748 if (reply_supported) {
749 ret = process_message_reply(dev, msg);
750 if (ret) {
751 return ret;
752 }
753 }
754 }
755
756 /*
757 * At this point we know the backend has unmapped the region. It is now
758 * safe to remove it from the shadow table.
759 */
760 memmove(&u->shadow_regions[shadow_reg_idx],
761 &u->shadow_regions[shadow_reg_idx + 1],
762 sizeof(struct vhost_memory_region) *
4fdecf05 763 (u->num_shadow_regions - shadow_reg_idx - 1));
f1aeb14b
RN
764 u->num_shadow_regions--;
765 }
766
767 return 0;
768}
769
770static int send_add_regions(struct vhost_dev *dev,
771 struct scrub_regions *add_reg, int nr_add_reg,
772 VhostUserMsg *msg, uint64_t *shadow_pcb,
773 bool reply_supported, bool track_ramblocks)
774{
775 struct vhost_user *u = dev->opaque;
776 int i, fd, ret, reg_idx, reg_fd_idx;
777 struct vhost_memory_region *reg;
778 MemoryRegion *mr;
779 ram_addr_t offset;
780 VhostUserMsg msg_reply;
781 VhostUserMemoryRegion region_buffer;
782
783 for (i = 0; i < nr_add_reg; i++) {
784 reg = add_reg[i].region;
785 reg_idx = add_reg[i].reg_idx;
786 reg_fd_idx = add_reg[i].fd_idx;
787
788 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
789
790 if (fd > 0) {
791 if (track_ramblocks) {
792 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
793 reg->memory_size,
794 reg->guest_phys_addr,
795 reg->userspace_addr,
796 offset);
797 u->region_rb_offset[reg_idx] = offset;
798 u->region_rb[reg_idx] = mr->ram_block;
799 }
800 msg->hdr.request = VHOST_USER_ADD_MEM_REG;
8d193715 801 vhost_user_fill_msg_region(&region_buffer, reg, offset);
f1aeb14b 802 msg->payload.mem_reg.region = region_buffer;
f1aeb14b
RN
803
804 if (vhost_user_write(dev, msg, &fd, 1) < 0) {
805 return -1;
806 }
807
808 if (track_ramblocks) {
809 uint64_t reply_gpa;
810
811 if (vhost_user_read(dev, &msg_reply) < 0) {
812 return -1;
813 }
814
815 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
816
817 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
818 error_report("%s: Received unexpected msg type."
819 "Expected %d received %d", __func__,
820 VHOST_USER_ADD_MEM_REG,
821 msg_reply.hdr.request);
822 return -1;
823 }
824
825 /*
826 * We're using the same structure, just reusing one of the
827 * fields, so it should be the same size.
828 */
829 if (msg_reply.hdr.size != msg->hdr.size) {
830 error_report("%s: Unexpected size for postcopy reply "
831 "%d vs %d", __func__, msg_reply.hdr.size,
832 msg->hdr.size);
833 return -1;
834 }
835
836 /* Get the postcopy client base from the backend's reply. */
837 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
838 shadow_pcb[reg_idx] =
839 msg_reply.payload.mem_reg.region.userspace_addr;
840 trace_vhost_user_set_mem_table_postcopy(
841 msg_reply.payload.mem_reg.region.userspace_addr,
842 msg->payload.mem_reg.region.userspace_addr,
843 reg_fd_idx, reg_idx);
844 } else {
845 error_report("%s: invalid postcopy reply for region. "
846 "Got guest physical address %" PRIX64 ", expected "
847 "%" PRIX64, __func__, reply_gpa,
848 dev->mem->regions[reg_idx].guest_phys_addr);
849 return -1;
850 }
851 } else if (reply_supported) {
852 ret = process_message_reply(dev, msg);
853 if (ret) {
854 return ret;
855 }
856 }
857 } else if (track_ramblocks) {
858 u->region_rb_offset[reg_idx] = 0;
859 u->region_rb[reg_idx] = NULL;
860 }
861
862 /*
863 * At this point, we know the backend has mapped in the new
864 * region, if the region has a valid file descriptor.
865 *
866 * The region should now be added to the shadow table.
867 */
868 u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
869 reg->guest_phys_addr;
870 u->shadow_regions[u->num_shadow_regions].userspace_addr =
871 reg->userspace_addr;
872 u->shadow_regions[u->num_shadow_regions].memory_size =
873 reg->memory_size;
874 u->num_shadow_regions++;
875 }
876
877 return 0;
878}
879
880static int vhost_user_add_remove_regions(struct vhost_dev *dev,
881 VhostUserMsg *msg,
882 bool reply_supported,
883 bool track_ramblocks)
884{
885 struct vhost_user *u = dev->opaque;
27598393
RN
886 struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
887 struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
888 uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b
RN
889 int nr_add_reg, nr_rem_reg;
890
3009edff 891 msg->hdr.size = sizeof(msg->payload.mem_reg);
f1aeb14b
RN
892
893 /* Find the regions which need to be removed or added. */
894 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
895 shadow_pcb, track_ramblocks);
896
897 if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
898 reply_supported) < 0)
899 {
900 goto err;
901 }
902
903 if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
904 shadow_pcb, reply_supported, track_ramblocks) < 0)
905 {
906 goto err;
907 }
908
909 if (track_ramblocks) {
910 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 911 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
912 /*
913 * Now we've registered this with the postcopy code, we ack to the
914 * client, because now we're in the position to be able to deal with
915 * any faults it generates.
916 */
917 /* TODO: Use this for failure cases as well with a bad value. */
918 msg->hdr.size = sizeof(msg->payload.u64);
919 msg->payload.u64 = 0; /* OK */
920
921 if (vhost_user_write(dev, msg, NULL, 0) < 0) {
922 return -1;
923 }
924 }
925
926 return 0;
927
928err:
929 if (track_ramblocks) {
930 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 931 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
932 }
933
934 return -1;
935}
936
55d754b3 937static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
f1aeb14b
RN
938 struct vhost_memory *mem,
939 bool reply_supported,
940 bool config_mem_slots)
55d754b3 941{
9bb38019 942 struct vhost_user *u = dev->opaque;
27598393 943 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
55d754b3 944 size_t fd_num = 0;
9bb38019
DDAG
945 VhostUserMsg msg_reply;
946 int region_i, msg_i;
947
55d754b3 948 VhostUserMsg msg = {
55d754b3
DDAG
949 .hdr.flags = VHOST_USER_VERSION,
950 };
951
905125d0
DDAG
952 if (u->region_rb_len < dev->mem->nregions) {
953 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
954 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
955 dev->mem->nregions);
956 memset(&(u->region_rb[u->region_rb_len]), '\0',
957 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
958 memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
959 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
960 u->region_rb_len = dev->mem->nregions;
961 }
962
f1aeb14b
RN
963 if (config_mem_slots) {
964 if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
2d9da9df 965 true) < 0) {
f1aeb14b
RN
966 return -1;
967 }
968 } else {
969 if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
970 true) < 0) {
971 return -1;
972 }
55d754b3 973
f1aeb14b
RN
974 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
975 return -1;
976 }
55d754b3 977
f1aeb14b
RN
978 if (vhost_user_read(dev, &msg_reply) < 0) {
979 return -1;
980 }
9bb38019 981
f1aeb14b
RN
982 if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
983 error_report("%s: Received unexpected msg type."
984 "Expected %d received %d", __func__,
985 VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
986 return -1;
987 }
9bb38019 988
f1aeb14b
RN
989 /*
990 * We're using the same structure, just reusing one of the
991 * fields, so it should be the same size.
992 */
993 if (msg_reply.hdr.size != msg.hdr.size) {
994 error_report("%s: Unexpected size for postcopy reply "
995 "%d vs %d", __func__, msg_reply.hdr.size,
996 msg.hdr.size);
997 return -1;
998 }
999
1000 memset(u->postcopy_client_bases, 0,
27598393 1001 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
1002
1003 /*
1004 * They're in the same order as the regions that were sent
1005 * but some of the regions were skipped (above) if they
1006 * didn't have fd's
1007 */
1008 for (msg_i = 0, region_i = 0;
1009 region_i < dev->mem->nregions;
1010 region_i++) {
1011 if (msg_i < fd_num &&
1012 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
1013 dev->mem->regions[region_i].guest_phys_addr) {
1014 u->postcopy_client_bases[region_i] =
1015 msg_reply.payload.memory.regions[msg_i].userspace_addr;
1016 trace_vhost_user_set_mem_table_postcopy(
1017 msg_reply.payload.memory.regions[msg_i].userspace_addr,
1018 msg.payload.memory.regions[msg_i].userspace_addr,
1019 msg_i, region_i);
1020 msg_i++;
1021 }
1022 }
1023 if (msg_i != fd_num) {
1024 error_report("%s: postcopy reply not fully consumed "
1025 "%d vs %zd",
1026 __func__, msg_i, fd_num);
1027 return -1;
1028 }
1029
1030 /*
1031 * Now we've registered this with the postcopy code, we ack to the
1032 * client, because now we're in the position to be able to deal
1033 * with any faults it generates.
1034 */
1035 /* TODO: Use this for failure cases as well with a bad value. */
1036 msg.hdr.size = sizeof(msg.payload.u64);
1037 msg.payload.u64 = 0; /* OK */
1038 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1039 return -1;
9bb38019 1040 }
9bb38019
DDAG
1041 }
1042
55d754b3
DDAG
1043 return 0;
1044}
1045
94c9cb31
MT
1046static int vhost_user_set_mem_table(struct vhost_dev *dev,
1047 struct vhost_memory *mem)
1048{
55d754b3 1049 struct vhost_user *u = dev->opaque;
27598393 1050 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
94c9cb31 1051 size_t fd_num = 0;
55d754b3 1052 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
94c9cb31 1053 bool reply_supported = virtio_has_feature(dev->protocol_features,
5ce43896 1054 VHOST_USER_PROTOCOL_F_REPLY_ACK);
f1aeb14b
RN
1055 bool config_mem_slots =
1056 virtio_has_feature(dev->protocol_features,
1057 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
94c9cb31 1058
55d754b3 1059 if (do_postcopy) {
f1aeb14b
RN
1060 /*
1061 * Postcopy has enough differences that it's best done in it's own
55d754b3
DDAG
1062 * version
1063 */
f1aeb14b
RN
1064 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1065 config_mem_slots);
55d754b3
DDAG
1066 }
1067
94c9cb31 1068 VhostUserMsg msg = {
24e34754 1069 .hdr.flags = VHOST_USER_VERSION,
94c9cb31
MT
1070 };
1071
1072 if (reply_supported) {
24e34754 1073 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
94c9cb31
MT
1074 }
1075
f1aeb14b
RN
1076 if (config_mem_slots) {
1077 if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
2d9da9df 1078 false) < 0) {
f1aeb14b
RN
1079 return -1;
1080 }
1081 } else {
1082 if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1083 false) < 0) {
1084 return -1;
1085 }
1086 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1087 return -1;
1088 }
94c9cb31 1089
f1aeb14b
RN
1090 if (reply_supported) {
1091 return process_message_reply(dev, &msg);
1092 }
94c9cb31
MT
1093 }
1094
1095 return 0;
1096}
1097
21e70425
MAL
1098static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1099 struct vhost_vring_addr *addr)
1100{
1101 VhostUserMsg msg = {
24e34754
MT
1102 .hdr.request = VHOST_USER_SET_VRING_ADDR,
1103 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1104 .payload.addr = *addr,
24e34754 1105 .hdr.size = sizeof(msg.payload.addr),
21e70425 1106 };
5f6f6664 1107
c4843a45
MAL
1108 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1109 return -1;
1110 }
5f6f6664 1111
21e70425
MAL
1112 return 0;
1113}
5f6f6664 1114
21e70425
MAL
1115static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1116 struct vhost_vring_state *ring)
1117{
5df04f17
FF
1118 bool cross_endian = virtio_has_feature(dev->protocol_features,
1119 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1120 VhostUserMsg msg = {
24e34754
MT
1121 .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1122 .hdr.flags = VHOST_USER_VERSION,
5df04f17 1123 .payload.state = *ring,
24e34754 1124 .hdr.size = sizeof(msg.payload.state),
5df04f17
FF
1125 };
1126
1127 if (!cross_endian) {
1128 error_report("vhost-user trying to send unhandled ioctl");
1129 return -1;
1130 }
1131
1132 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1133 return -1;
1134 }
1135
1136 return 0;
21e70425 1137}
5f6f6664 1138
21e70425
MAL
1139static int vhost_set_vring(struct vhost_dev *dev,
1140 unsigned long int request,
1141 struct vhost_vring_state *ring)
1142{
1143 VhostUserMsg msg = {
24e34754
MT
1144 .hdr.request = request,
1145 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1146 .payload.state = *ring,
24e34754 1147 .hdr.size = sizeof(msg.payload.state),
21e70425
MAL
1148 };
1149
c4843a45
MAL
1150 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1151 return -1;
1152 }
21e70425
MAL
1153
1154 return 0;
1155}
1156
1157static int vhost_user_set_vring_num(struct vhost_dev *dev,
1158 struct vhost_vring_state *ring)
1159{
1160 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1161}
1162
44866521
TB
1163static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
1164 int queue_idx)
1165{
1166 struct vhost_user *u = dev->opaque;
1167 VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1168 VirtIODevice *vdev = dev->vdev;
1169
1170 if (n->addr && !n->set) {
1171 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
1172 n->set = true;
1173 }
1174}
1175
1176static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
1177 int queue_idx)
1178{
1179 struct vhost_user *u = dev->opaque;
1180 VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1181 VirtIODevice *vdev = dev->vdev;
1182
1183 if (n->addr && n->set) {
1184 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1185 n->set = false;
1186 }
1187}
1188
21e70425
MAL
1189static int vhost_user_set_vring_base(struct vhost_dev *dev,
1190 struct vhost_vring_state *ring)
1191{
44866521
TB
1192 vhost_user_host_notifier_restore(dev, ring->index);
1193
21e70425
MAL
1194 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1195}
1196
1197static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1198{
dc3db6ad 1199 int i;
21e70425 1200
923e2d98 1201 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
5f6f6664 1202 return -1;
5f6f6664
NN
1203 }
1204
dc3db6ad
MT
1205 for (i = 0; i < dev->nvqs; ++i) {
1206 struct vhost_vring_state state = {
1207 .index = dev->vq_index + i,
1208 .num = enable,
1209 };
1210
1211 vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1212 }
21e70425 1213
dc3db6ad
MT
1214 return 0;
1215}
21e70425
MAL
1216
1217static int vhost_user_get_vring_base(struct vhost_dev *dev,
1218 struct vhost_vring_state *ring)
1219{
1220 VhostUserMsg msg = {
24e34754
MT
1221 .hdr.request = VHOST_USER_GET_VRING_BASE,
1222 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1223 .payload.state = *ring,
24e34754 1224 .hdr.size = sizeof(msg.payload.state),
21e70425
MAL
1225 };
1226
44866521
TB
1227 vhost_user_host_notifier_remove(dev, ring->index);
1228
c4843a45
MAL
1229 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1230 return -1;
1231 }
21e70425
MAL
1232
1233 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 1234 return -1;
5f6f6664
NN
1235 }
1236
24e34754 1237 if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
21e70425 1238 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1239 VHOST_USER_GET_VRING_BASE, msg.hdr.request);
21e70425
MAL
1240 return -1;
1241 }
5f6f6664 1242
24e34754 1243 if (msg.hdr.size != sizeof(msg.payload.state)) {
21e70425
MAL
1244 error_report("Received bad msg size.");
1245 return -1;
5f6f6664
NN
1246 }
1247
7f4a930e 1248 *ring = msg.payload.state;
21e70425 1249
5f6f6664
NN
1250 return 0;
1251}
1252
21e70425
MAL
1253static int vhost_set_vring_file(struct vhost_dev *dev,
1254 VhostUserRequest request,
1255 struct vhost_vring_file *file)
c2bea314 1256{
27598393 1257 int fds[VHOST_USER_MAX_RAM_SLOTS];
9a78a5dd 1258 size_t fd_num = 0;
c2bea314 1259 VhostUserMsg msg = {
24e34754
MT
1260 .hdr.request = request,
1261 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1262 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
24e34754 1263 .hdr.size = sizeof(msg.payload.u64),
c2bea314
MAL
1264 };
1265
21e70425
MAL
1266 if (ioeventfd_enabled() && file->fd > 0) {
1267 fds[fd_num++] = file->fd;
1268 } else {
7f4a930e 1269 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
9a78a5dd
MAL
1270 }
1271
c4843a45
MAL
1272 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1273 return -1;
1274 }
9a78a5dd 1275
21e70425
MAL
1276 return 0;
1277}
9a78a5dd 1278
21e70425
MAL
1279static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1280 struct vhost_vring_file *file)
1281{
1282 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1283}
1284
1285static int vhost_user_set_vring_call(struct vhost_dev *dev,
1286 struct vhost_vring_file *file)
1287{
1288 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1289}
1290
1291static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
1292{
1293 VhostUserMsg msg = {
24e34754
MT
1294 .hdr.request = request,
1295 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1296 .payload.u64 = u64,
24e34754 1297 .hdr.size = sizeof(msg.payload.u64),
21e70425
MAL
1298 };
1299
c4843a45
MAL
1300 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1301 return -1;
1302 }
21e70425
MAL
1303
1304 return 0;
1305}
1306
1307static int vhost_user_set_features(struct vhost_dev *dev,
1308 uint64_t features)
1309{
1310 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
1311}
1312
1313static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1314 uint64_t features)
1315{
1316 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
1317}
1318
1319static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1320{
1321 VhostUserMsg msg = {
24e34754
MT
1322 .hdr.request = request,
1323 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1324 };
1325
1326 if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
1327 return 0;
9a78a5dd 1328 }
c2bea314 1329
c4843a45
MAL
1330 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1331 return -1;
1332 }
21e70425
MAL
1333
1334 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 1335 return -1;
21e70425
MAL
1336 }
1337
24e34754 1338 if (msg.hdr.request != request) {
21e70425 1339 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1340 request, msg.hdr.request);
21e70425
MAL
1341 return -1;
1342 }
1343
24e34754 1344 if (msg.hdr.size != sizeof(msg.payload.u64)) {
21e70425
MAL
1345 error_report("Received bad msg size.");
1346 return -1;
1347 }
1348
7f4a930e 1349 *u64 = msg.payload.u64;
21e70425
MAL
1350
1351 return 0;
1352}
1353
1354static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1355{
1356 return vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features);
1357}
1358
1359static int vhost_user_set_owner(struct vhost_dev *dev)
1360{
1361 VhostUserMsg msg = {
24e34754
MT
1362 .hdr.request = VHOST_USER_SET_OWNER,
1363 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1364 };
1365
c4843a45
MAL
1366 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1367 return -1;
1368 }
21e70425
MAL
1369
1370 return 0;
1371}
1372
6b0eff1a
RN
1373static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1374 uint64_t *max_memslots)
1375{
1376 uint64_t backend_max_memslots;
1377 int err;
1378
1379 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1380 &backend_max_memslots);
1381 if (err < 0) {
1382 return err;
1383 }
1384
1385 *max_memslots = backend_max_memslots;
1386
1387 return 0;
1388}
1389
21e70425
MAL
1390static int vhost_user_reset_device(struct vhost_dev *dev)
1391{
1392 VhostUserMsg msg = {
24e34754 1393 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1394 };
1395
d91d57e6
RN
1396 msg.hdr.request = virtio_has_feature(dev->protocol_features,
1397 VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1398 ? VHOST_USER_RESET_DEVICE
1399 : VHOST_USER_RESET_OWNER;
1400
c4843a45
MAL
1401 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1402 return -1;
1403 }
21e70425 1404
c2bea314
MAL
1405 return 0;
1406}
1407
4c3e257b
CL
1408static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
1409{
1410 int ret = -1;
1411
1412 if (!dev->config_ops) {
1413 return -1;
1414 }
1415
1416 if (dev->config_ops->vhost_dev_config_notifier) {
1417 ret = dev->config_ops->vhost_dev_config_notifier(dev);
1418 }
1419
1420 return ret;
1421}
1422
44866521
TB
1423static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
1424 VhostUserVringArea *area,
1425 int fd)
1426{
1427 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1428 size_t page_size = qemu_real_host_page_size;
1429 struct vhost_user *u = dev->opaque;
1430 VhostUserState *user = u->user;
1431 VirtIODevice *vdev = dev->vdev;
1432 VhostUserHostNotifier *n;
1433 void *addr;
1434 char *name;
1435
1436 if (!virtio_has_feature(dev->protocol_features,
1437 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1438 vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1439 return -1;
1440 }
1441
1442 n = &user->notifier[queue_idx];
1443
1444 if (n->addr) {
1445 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1446 object_unparent(OBJECT(&n->mr));
1447 munmap(n->addr, page_size);
1448 n->addr = NULL;
1449 }
1450
1451 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1452 return 0;
1453 }
1454
1455 /* Sanity check. */
1456 if (area->size != page_size) {
1457 return -1;
1458 }
1459
1460 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1461 fd, area->offset);
1462 if (addr == MAP_FAILED) {
1463 return -1;
1464 }
1465
1466 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1467 user, queue_idx);
1468 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1469 page_size, addr);
1470 g_free(name);
1471
1472 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1473 munmap(addr, page_size);
1474 return -1;
1475 }
1476
1477 n->addr = addr;
1478 n->set = true;
1479
1480 return 0;
1481}
1482
de62e494
GK
1483static void close_slave_channel(struct vhost_user *u)
1484{
57dc0217
GK
1485 g_source_destroy(u->slave_src);
1486 g_source_unref(u->slave_src);
1487 u->slave_src = NULL;
1488 object_unref(OBJECT(u->slave_ioc));
1489 u->slave_ioc = NULL;
de62e494
GK
1490}
1491
57dc0217
GK
1492static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
1493 gpointer opaque)
4bbeeba0
MAL
1494{
1495 struct vhost_dev *dev = opaque;
1496 struct vhost_user *u = dev->opaque;
69aff030
MT
1497 VhostUserHeader hdr = { 0, };
1498 VhostUserPayload payload = { 0, };
57dc0217
GK
1499 Error *local_err = NULL;
1500 gboolean rc = G_SOURCE_CONTINUE;
1501 int ret = 0;
1f3a4519 1502 struct iovec iov;
57dc0217
GK
1503 g_autofree int *fd = NULL;
1504 size_t fdsize = 0;
1505 int i;
5f57fbea 1506
4bbeeba0 1507 /* Read header */
1f3a4519
TB
1508 iov.iov_base = &hdr;
1509 iov.iov_len = VHOST_USER_HDR_SIZE;
1510
57dc0217
GK
1511 if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1512 error_report_err(local_err);
4bbeeba0
MAL
1513 goto err;
1514 }
1515
69aff030 1516 if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
4bbeeba0 1517 error_report("Failed to read msg header."
69aff030 1518 " Size %d exceeds the maximum %zu.", hdr.size,
4bbeeba0
MAL
1519 VHOST_USER_PAYLOAD_SIZE);
1520 goto err;
1521 }
1522
1523 /* Read payload */
57dc0217
GK
1524 if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1525 error_report_err(local_err);
4bbeeba0
MAL
1526 goto err;
1527 }
1528
69aff030 1529 switch (hdr.request) {
6dcdd06e 1530 case VHOST_USER_SLAVE_IOTLB_MSG:
69aff030 1531 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
6dcdd06e 1532 break;
4c3e257b
CL
1533 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1534 ret = vhost_user_slave_handle_config_change(dev);
1535 break;
44866521
TB
1536 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1537 ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
57dc0217 1538 fd ? fd[0] : -1);
44866521 1539 break;
4bbeeba0 1540 default:
0fdc465d 1541 error_report("Received unexpected msg type: %d.", hdr.request);
4bbeeba0
MAL
1542 ret = -EINVAL;
1543 }
1544
1545 /*
1546 * REPLY_ACK feature handling. Other reply types has to be managed
1547 * directly in their request handlers.
1548 */
69aff030
MT
1549 if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1550 struct iovec iovec[2];
4bbeeba0 1551
4bbeeba0 1552
69aff030
MT
1553 hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1554 hdr.flags |= VHOST_USER_REPLY_MASK;
1555
1556 payload.u64 = !!ret;
1557 hdr.size = sizeof(payload.u64);
1558
1559 iovec[0].iov_base = &hdr;
1560 iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1561 iovec[1].iov_base = &payload;
1562 iovec[1].iov_len = hdr.size;
1563
57dc0217
GK
1564 if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) {
1565 error_report_err(local_err);
4bbeeba0
MAL
1566 goto err;
1567 }
1568 }
1569
9e06080b 1570 goto fdcleanup;
4bbeeba0
MAL
1571
1572err:
de62e494 1573 close_slave_channel(u);
57dc0217 1574 rc = G_SOURCE_REMOVE;
9e06080b
GK
1575
1576fdcleanup:
57dc0217
GK
1577 if (fd) {
1578 for (i = 0; i < fdsize; i++) {
5f57fbea
TB
1579 close(fd[i]);
1580 }
1f3a4519 1581 }
57dc0217 1582 return rc;
4bbeeba0
MAL
1583}
1584
1585static int vhost_setup_slave_channel(struct vhost_dev *dev)
1586{
1587 VhostUserMsg msg = {
24e34754
MT
1588 .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1589 .hdr.flags = VHOST_USER_VERSION,
4bbeeba0
MAL
1590 };
1591 struct vhost_user *u = dev->opaque;
1592 int sv[2], ret = 0;
1593 bool reply_supported = virtio_has_feature(dev->protocol_features,
1594 VHOST_USER_PROTOCOL_F_REPLY_ACK);
57dc0217
GK
1595 Error *local_err = NULL;
1596 QIOChannel *ioc;
4bbeeba0
MAL
1597
1598 if (!virtio_has_feature(dev->protocol_features,
1599 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1600 return 0;
1601 }
1602
1603 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1604 error_report("socketpair() failed");
1605 return -1;
1606 }
1607
57dc0217
GK
1608 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1609 if (!ioc) {
1610 error_report_err(local_err);
1611 return -1;
1612 }
1613 u->slave_ioc = ioc;
db8a3772 1614 slave_update_read_handler(dev, NULL);
4bbeeba0
MAL
1615
1616 if (reply_supported) {
24e34754 1617 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4bbeeba0
MAL
1618 }
1619
1620 ret = vhost_user_write(dev, &msg, &sv[1], 1);
1621 if (ret) {
1622 goto out;
1623 }
1624
1625 if (reply_supported) {
1626 ret = process_message_reply(dev, &msg);
1627 }
1628
1629out:
1630 close(sv[1]);
1631 if (ret) {
de62e494 1632 close_slave_channel(u);
4bbeeba0
MAL
1633 }
1634
1635 return ret;
1636}
1637
18658a3c 1638#ifdef CONFIG_LINUX
f82c1116
DDAG
1639/*
1640 * Called back from the postcopy fault thread when a fault is received on our
1641 * ufd.
1642 * TODO: This is Linux specific
1643 */
1644static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1645 void *ufd)
1646{
375318d0
DDAG
1647 struct vhost_dev *dev = pcfd->data;
1648 struct vhost_user *u = dev->opaque;
1649 struct uffd_msg *msg = ufd;
1650 uint64_t faultaddr = msg->arg.pagefault.address;
1651 RAMBlock *rb = NULL;
1652 uint64_t rb_offset;
1653 int i;
1654
1655 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1656 dev->mem->nregions);
1657 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1658 trace_vhost_user_postcopy_fault_handler_loop(i,
1659 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1660 if (faultaddr >= u->postcopy_client_bases[i]) {
1661 /* Ofset of the fault address in the vhost region */
1662 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1663 if (region_offset < dev->mem->regions[i].memory_size) {
1664 rb_offset = region_offset + u->region_rb_offset[i];
1665 trace_vhost_user_postcopy_fault_handler_found(i,
1666 region_offset, rb_offset);
1667 rb = u->region_rb[i];
1668 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1669 rb_offset);
1670 }
1671 }
1672 }
1673 error_report("%s: Failed to find region for fault %" PRIx64,
1674 __func__, faultaddr);
1675 return -1;
f82c1116
DDAG
1676}
1677
c07e3615
DDAG
1678static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1679 uint64_t offset)
1680{
1681 struct vhost_dev *dev = pcfd->data;
1682 struct vhost_user *u = dev->opaque;
1683 int i;
1684
1685 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1686
1687 if (!u) {
1688 return 0;
1689 }
1690 /* Translate the offset into an address in the clients address space */
1691 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1692 if (u->region_rb[i] == rb &&
1693 offset >= u->region_rb_offset[i] &&
1694 offset < (u->region_rb_offset[i] +
1695 dev->mem->regions[i].memory_size)) {
1696 uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1697 u->postcopy_client_bases[i];
1698 trace_vhost_user_postcopy_waker_found(client_addr);
1699 return postcopy_wake_shared(pcfd, client_addr, rb);
1700 }
1701 }
1702
1703 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1704 return 0;
1705}
18658a3c 1706#endif
c07e3615 1707
d3dff7a5
DDAG
1708/*
1709 * Called at the start of an inbound postcopy on reception of the
1710 * 'advise' command.
1711 */
1712static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1713{
18658a3c 1714#ifdef CONFIG_LINUX
d3dff7a5 1715 struct vhost_user *u = dev->opaque;
4d0cf552 1716 CharBackend *chr = u->user->chr;
d3dff7a5
DDAG
1717 int ufd;
1718 VhostUserMsg msg = {
1719 .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1720 .hdr.flags = VHOST_USER_VERSION,
1721 };
1722
1723 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1724 error_setg(errp, "Failed to send postcopy_advise to vhost");
1725 return -1;
1726 }
1727
1728 if (vhost_user_read(dev, &msg) < 0) {
1729 error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1730 return -1;
1731 }
1732
1733 if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1734 error_setg(errp, "Unexpected msg type. Expected %d received %d",
1735 VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1736 return -1;
1737 }
1738
1739 if (msg.hdr.size) {
1740 error_setg(errp, "Received bad msg size.");
1741 return -1;
1742 }
1743 ufd = qemu_chr_fe_get_msgfd(chr);
1744 if (ufd < 0) {
1745 error_setg(errp, "%s: Failed to get ufd", __func__);
1746 return -1;
1747 }
9952e807 1748 qemu_set_nonblock(ufd);
d3dff7a5 1749
f82c1116
DDAG
1750 /* register ufd with userfault thread */
1751 u->postcopy_fd.fd = ufd;
1752 u->postcopy_fd.data = dev;
1753 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
c07e3615 1754 u->postcopy_fd.waker = vhost_user_postcopy_waker;
f82c1116
DDAG
1755 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1756 postcopy_register_shared_ufd(&u->postcopy_fd);
d3dff7a5 1757 return 0;
18658a3c
PB
1758#else
1759 error_setg(errp, "Postcopy not supported on non-Linux systems");
1760 return -1;
1761#endif
d3dff7a5
DDAG
1762}
1763
6864a7b5
DDAG
1764/*
1765 * Called at the switch to postcopy on reception of the 'listen' command.
1766 */
1767static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1768{
1769 struct vhost_user *u = dev->opaque;
1770 int ret;
1771 VhostUserMsg msg = {
1772 .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1773 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1774 };
1775 u->postcopy_listen = true;
1776 trace_vhost_user_postcopy_listen();
1777 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1778 error_setg(errp, "Failed to send postcopy_listen to vhost");
1779 return -1;
1780 }
1781
1782 ret = process_message_reply(dev, &msg);
1783 if (ret) {
1784 error_setg(errp, "Failed to receive reply to postcopy_listen");
1785 return ret;
1786 }
1787
1788 return 0;
1789}
1790
46343570
DDAG
1791/*
1792 * Called at the end of postcopy
1793 */
1794static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1795{
1796 VhostUserMsg msg = {
1797 .hdr.request = VHOST_USER_POSTCOPY_END,
1798 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1799 };
1800 int ret;
1801 struct vhost_user *u = dev->opaque;
1802
1803 trace_vhost_user_postcopy_end_entry();
1804 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1805 error_setg(errp, "Failed to send postcopy_end to vhost");
1806 return -1;
1807 }
1808
1809 ret = process_message_reply(dev, &msg);
1810 if (ret) {
1811 error_setg(errp, "Failed to receive reply to postcopy_end");
1812 return ret;
1813 }
1814 postcopy_unregister_shared_ufd(&u->postcopy_fd);
c4f75385 1815 close(u->postcopy_fd.fd);
46343570
DDAG
1816 u->postcopy_fd.handler = NULL;
1817
1818 trace_vhost_user_postcopy_end_exit();
1819
1820 return 0;
1821}
1822
9ccbfe14
DDAG
1823static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1824 void *opaque)
1825{
1826 struct PostcopyNotifyData *pnd = opaque;
1827 struct vhost_user *u = container_of(notifier, struct vhost_user,
1828 postcopy_notifier);
1829 struct vhost_dev *dev = u->dev;
1830
1831 switch (pnd->reason) {
1832 case POSTCOPY_NOTIFY_PROBE:
1833 if (!virtio_has_feature(dev->protocol_features,
1834 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1835 /* TODO: Get the device name into this error somehow */
1836 error_setg(pnd->errp,
1837 "vhost-user backend not capable of postcopy");
1838 return -ENOENT;
1839 }
1840 break;
1841
d3dff7a5
DDAG
1842 case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1843 return vhost_user_postcopy_advise(dev, pnd->errp);
1844
6864a7b5
DDAG
1845 case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1846 return vhost_user_postcopy_listen(dev, pnd->errp);
1847
46343570
DDAG
1848 case POSTCOPY_NOTIFY_INBOUND_END:
1849 return vhost_user_postcopy_end(dev, pnd->errp);
1850
9ccbfe14
DDAG
1851 default:
1852 /* We ignore notifications we don't know */
1853 break;
1854 }
1855
1856 return 0;
1857}
1858
28770ff9
KW
1859static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
1860 Error **errp)
5f6f6664 1861{
6b0eff1a 1862 uint64_t features, protocol_features, ram_slots;
2152f3fe 1863 struct vhost_user *u;
dcb10c00
MT
1864 int err;
1865
5f6f6664
NN
1866 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1867
2152f3fe 1868 u = g_new0(struct vhost_user, 1);
4d0cf552 1869 u->user = opaque;
9ccbfe14 1870 u->dev = dev;
2152f3fe 1871 dev->opaque = u;
5f6f6664 1872
21e70425 1873 err = vhost_user_get_features(dev, &features);
dcb10c00 1874 if (err < 0) {
28770ff9 1875 return -EPROTO;
dcb10c00
MT
1876 }
1877
1878 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1879 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1880
21e70425 1881 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
6dcdd06e 1882 &protocol_features);
dcb10c00 1883 if (err < 0) {
28770ff9 1884 return -EPROTO;
dcb10c00
MT
1885 }
1886
6dcdd06e
MC
1887 dev->protocol_features =
1888 protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
1c3e5a26
MC
1889
1890 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1891 /* Don't acknowledge CONFIG feature if device doesn't support it */
1892 dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1893 } else if (!(protocol_features &
1894 (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
28770ff9
KW
1895 error_setg(errp, "Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1896 "but backend does not support it.");
1897 return -EINVAL;
1c3e5a26
MC
1898 }
1899
21e70425 1900 err = vhost_user_set_protocol_features(dev, dev->protocol_features);
dcb10c00 1901 if (err < 0) {
28770ff9 1902 return -EPROTO;
dcb10c00 1903 }
e2051e9e
YL
1904
1905 /* query the max queues we support if backend supports Multiple Queue */
1906 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
21e70425
MAL
1907 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
1908 &dev->max_queues);
e2051e9e 1909 if (err < 0) {
28770ff9 1910 return -EPROTO;
e2051e9e
YL
1911 }
1912 }
c90bd505 1913 if (dev->num_queues && dev->max_queues < dev->num_queues) {
28770ff9
KW
1914 error_setg(errp, "The maximum number of queues supported by the "
1915 "backend is %" PRIu64, dev->max_queues);
c90bd505
KW
1916 return -EINVAL;
1917 }
6dcdd06e
MC
1918
1919 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
1920 !(virtio_has_feature(dev->protocol_features,
1921 VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
1922 virtio_has_feature(dev->protocol_features,
1923 VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
28770ff9
KW
1924 error_setg(errp, "IOMMU support requires reply-ack and "
1925 "slave-req protocol features.");
1926 return -EINVAL;
6dcdd06e 1927 }
6b0eff1a
RN
1928
1929 /* get max memory regions if backend supports configurable RAM slots */
1930 if (!virtio_has_feature(dev->protocol_features,
1931 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
27598393 1932 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
6b0eff1a
RN
1933 } else {
1934 err = vhost_user_get_max_memslots(dev, &ram_slots);
1935 if (err < 0) {
28770ff9 1936 return -EPROTO;
6b0eff1a
RN
1937 }
1938
1939 if (ram_slots < u->user->memory_slots) {
28770ff9
KW
1940 error_setg(errp, "The backend specified a max ram slots limit "
1941 "of %" PRIu64", when the prior validated limit was "
1942 "%d. This limit should never decrease.", ram_slots,
1943 u->user->memory_slots);
1944 return -EINVAL;
6b0eff1a
RN
1945 }
1946
27598393 1947 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
6b0eff1a 1948 }
dcb10c00
MT
1949 }
1950
d2fc4402
MAL
1951 if (dev->migration_blocker == NULL &&
1952 !virtio_has_feature(dev->protocol_features,
1953 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
1954 error_setg(&dev->migration_blocker,
1955 "Migration disabled: vhost-user backend lacks "
1956 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1957 }
1958
67b3965e
AM
1959 if (dev->vq_index == 0) {
1960 err = vhost_setup_slave_channel(dev);
1961 if (err < 0) {
28770ff9 1962 return -EPROTO;
67b3965e 1963 }
4bbeeba0
MAL
1964 }
1965
9ccbfe14
DDAG
1966 u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
1967 postcopy_add_notifier(&u->postcopy_notifier);
1968
5f6f6664
NN
1969 return 0;
1970}
1971
4d0cf552 1972static int vhost_user_backend_cleanup(struct vhost_dev *dev)
5f6f6664 1973{
2152f3fe
MAL
1974 struct vhost_user *u;
1975
5f6f6664
NN
1976 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1977
2152f3fe 1978 u = dev->opaque;
9ccbfe14
DDAG
1979 if (u->postcopy_notifier.notify) {
1980 postcopy_remove_notifier(&u->postcopy_notifier);
1981 u->postcopy_notifier.notify = NULL;
1982 }
c4f75385
IM
1983 u->postcopy_listen = false;
1984 if (u->postcopy_fd.handler) {
1985 postcopy_unregister_shared_ufd(&u->postcopy_fd);
1986 close(u->postcopy_fd.fd);
1987 u->postcopy_fd.handler = NULL;
1988 }
57dc0217 1989 if (u->slave_ioc) {
de62e494 1990 close_slave_channel(u);
4bbeeba0 1991 }
905125d0
DDAG
1992 g_free(u->region_rb);
1993 u->region_rb = NULL;
1994 g_free(u->region_rb_offset);
1995 u->region_rb_offset = NULL;
1996 u->region_rb_len = 0;
2152f3fe 1997 g_free(u);
5f6f6664
NN
1998 dev->opaque = 0;
1999
2000 return 0;
2001}
2002
fc57fd99
YL
2003static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2004{
2005 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2006
2007 return idx;
2008}
2009
2ce68e4c
IM
2010static int vhost_user_memslots_limit(struct vhost_dev *dev)
2011{
6b0eff1a
RN
2012 struct vhost_user *u = dev->opaque;
2013
2014 return u->user->memory_slots;
2ce68e4c
IM
2015}
2016
1be0ac21
MAL
2017static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2018{
2019 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2020
2021 return virtio_has_feature(dev->protocol_features,
2022 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2023}
2024
3e866365
TC
2025static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2026{
ebf2a499 2027 VhostUserMsg msg = { };
3e866365
TC
2028
2029 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2030
2031 /* If guest supports GUEST_ANNOUNCE do nothing */
2032 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2033 return 0;
2034 }
2035
2036 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2037 if (virtio_has_feature(dev->protocol_features,
2038 VHOST_USER_PROTOCOL_F_RARP)) {
24e34754
MT
2039 msg.hdr.request = VHOST_USER_SEND_RARP;
2040 msg.hdr.flags = VHOST_USER_VERSION;
7f4a930e 2041 memcpy((char *)&msg.payload.u64, mac_addr, 6);
24e34754 2042 msg.hdr.size = sizeof(msg.payload.u64);
3e866365 2043
c4843a45 2044 return vhost_user_write(dev, &msg, NULL, 0);
3e866365
TC
2045 }
2046 return -1;
2047}
2048
ffe42cc1
MT
2049static bool vhost_user_can_merge(struct vhost_dev *dev,
2050 uint64_t start1, uint64_t size1,
2051 uint64_t start2, uint64_t size2)
2052{
07bdaa41 2053 ram_addr_t offset;
ffe42cc1 2054 int mfd, rfd;
ffe42cc1 2055
23374a84
RN
2056 (void)vhost_user_get_mr_data(start1, &offset, &mfd);
2057 (void)vhost_user_get_mr_data(start2, &offset, &rfd);
ffe42cc1
MT
2058
2059 return mfd == rfd;
2060}
2061
c5f048d8
MC
2062static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2063{
2064 VhostUserMsg msg;
2065 bool reply_supported = virtio_has_feature(dev->protocol_features,
2066 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2067
2068 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2069 return 0;
2070 }
2071
24e34754 2072 msg.hdr.request = VHOST_USER_NET_SET_MTU;
c5f048d8 2073 msg.payload.u64 = mtu;
24e34754
MT
2074 msg.hdr.size = sizeof(msg.payload.u64);
2075 msg.hdr.flags = VHOST_USER_VERSION;
c5f048d8 2076 if (reply_supported) {
24e34754 2077 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
c5f048d8
MC
2078 }
2079
2080 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2081 return -1;
2082 }
2083
2084 /* If reply_ack supported, slave has to ack specified MTU is valid */
2085 if (reply_supported) {
3cf7daf8 2086 return process_message_reply(dev, &msg);
c5f048d8
MC
2087 }
2088
2089 return 0;
2090}
2091
6dcdd06e
MC
2092static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2093 struct vhost_iotlb_msg *imsg)
2094{
2095 VhostUserMsg msg = {
24e34754
MT
2096 .hdr.request = VHOST_USER_IOTLB_MSG,
2097 .hdr.size = sizeof(msg.payload.iotlb),
2098 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
6dcdd06e
MC
2099 .payload.iotlb = *imsg,
2100 };
2101
2102 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2103 return -EFAULT;
2104 }
2105
2106 return process_message_reply(dev, &msg);
2107}
2108
2109
2110static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2111{
2112 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2113}
2114
4c3e257b
CL
2115static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2116 uint32_t config_len)
2117{
2118 VhostUserMsg msg = {
24e34754
MT
2119 .hdr.request = VHOST_USER_GET_CONFIG,
2120 .hdr.flags = VHOST_USER_VERSION,
2121 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
4c3e257b
CL
2122 };
2123
1c3e5a26
MC
2124 if (!virtio_has_feature(dev->protocol_features,
2125 VHOST_USER_PROTOCOL_F_CONFIG)) {
2126 return -1;
2127 }
2128
4c3e257b
CL
2129 if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
2130 return -1;
2131 }
2132
2133 msg.payload.config.offset = 0;
2134 msg.payload.config.size = config_len;
2135 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2136 return -1;
2137 }
2138
2139 if (vhost_user_read(dev, &msg) < 0) {
2140 return -1;
2141 }
2142
24e34754 2143 if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
4c3e257b 2144 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 2145 VHOST_USER_GET_CONFIG, msg.hdr.request);
4c3e257b
CL
2146 return -1;
2147 }
2148
24e34754 2149 if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
4c3e257b
CL
2150 error_report("Received bad msg size.");
2151 return -1;
2152 }
2153
2154 memcpy(config, msg.payload.config.region, config_len);
2155
2156 return 0;
2157}
2158
2159static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2160 uint32_t offset, uint32_t size, uint32_t flags)
2161{
2162 uint8_t *p;
2163 bool reply_supported = virtio_has_feature(dev->protocol_features,
2164 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2165
2166 VhostUserMsg msg = {
24e34754
MT
2167 .hdr.request = VHOST_USER_SET_CONFIG,
2168 .hdr.flags = VHOST_USER_VERSION,
2169 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
4c3e257b
CL
2170 };
2171
1c3e5a26
MC
2172 if (!virtio_has_feature(dev->protocol_features,
2173 VHOST_USER_PROTOCOL_F_CONFIG)) {
2174 return -1;
2175 }
2176
4c3e257b 2177 if (reply_supported) {
24e34754 2178 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4c3e257b
CL
2179 }
2180
2181 if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2182 return -1;
2183 }
2184
2185 msg.payload.config.offset = offset,
2186 msg.payload.config.size = size,
2187 msg.payload.config.flags = flags,
2188 p = msg.payload.config.region;
2189 memcpy(p, data, size);
2190
2191 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2192 return -1;
2193 }
2194
2195 if (reply_supported) {
2196 return process_message_reply(dev, &msg);
2197 }
2198
2199 return 0;
2200}
2201
efbfeb81
GA
2202static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2203 void *session_info,
2204 uint64_t *session_id)
2205{
2206 bool crypto_session = virtio_has_feature(dev->protocol_features,
2207 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2208 CryptoDevBackendSymSessionInfo *sess_info = session_info;
2209 VhostUserMsg msg = {
2210 .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2211 .hdr.flags = VHOST_USER_VERSION,
2212 .hdr.size = sizeof(msg.payload.session),
2213 };
2214
2215 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2216
2217 if (!crypto_session) {
2218 error_report("vhost-user trying to send unhandled ioctl");
2219 return -1;
2220 }
2221
2222 memcpy(&msg.payload.session.session_setup_data, sess_info,
2223 sizeof(CryptoDevBackendSymSessionInfo));
2224 if (sess_info->key_len) {
2225 memcpy(&msg.payload.session.key, sess_info->cipher_key,
2226 sess_info->key_len);
2227 }
2228 if (sess_info->auth_key_len > 0) {
2229 memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
2230 sess_info->auth_key_len);
2231 }
2232 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2233 error_report("vhost_user_write() return -1, create session failed");
2234 return -1;
2235 }
2236
2237 if (vhost_user_read(dev, &msg) < 0) {
2238 error_report("vhost_user_read() return -1, create session failed");
2239 return -1;
2240 }
2241
2242 if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2243 error_report("Received unexpected msg type. Expected %d received %d",
2244 VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2245 return -1;
2246 }
2247
2248 if (msg.hdr.size != sizeof(msg.payload.session)) {
2249 error_report("Received bad msg size.");
2250 return -1;
2251 }
2252
2253 if (msg.payload.session.session_id < 0) {
2254 error_report("Bad session id: %" PRId64 "",
2255 msg.payload.session.session_id);
2256 return -1;
2257 }
2258 *session_id = msg.payload.session.session_id;
2259
2260 return 0;
2261}
2262
2263static int
2264vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2265{
2266 bool crypto_session = virtio_has_feature(dev->protocol_features,
2267 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2268 VhostUserMsg msg = {
2269 .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2270 .hdr.flags = VHOST_USER_VERSION,
2271 .hdr.size = sizeof(msg.payload.u64),
2272 };
2273 msg.payload.u64 = session_id;
2274
2275 if (!crypto_session) {
2276 error_report("vhost-user trying to send unhandled ioctl");
2277 return -1;
2278 }
2279
2280 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2281 error_report("vhost_user_write() return -1, close session failed");
2282 return -1;
2283 }
2284
2285 return 0;
2286}
2287
988a2775
TB
2288static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
2289 MemoryRegionSection *section)
2290{
2291 bool result;
2292
2293 result = memory_region_get_fd(section->mr) >= 0;
2294
2295 return result;
2296}
2297
5ad204bf
XY
2298static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2299 uint16_t queue_size,
2300 struct vhost_inflight *inflight)
2301{
2302 void *addr;
2303 int fd;
2304 struct vhost_user *u = dev->opaque;
2305 CharBackend *chr = u->user->chr;
2306 VhostUserMsg msg = {
2307 .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2308 .hdr.flags = VHOST_USER_VERSION,
2309 .payload.inflight.num_queues = dev->nvqs,
2310 .payload.inflight.queue_size = queue_size,
2311 .hdr.size = sizeof(msg.payload.inflight),
2312 };
2313
2314 if (!virtio_has_feature(dev->protocol_features,
2315 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2316 return 0;
2317 }
2318
2319 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2320 return -1;
2321 }
2322
2323 if (vhost_user_read(dev, &msg) < 0) {
2324 return -1;
2325 }
2326
2327 if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2328 error_report("Received unexpected msg type. "
2329 "Expected %d received %d",
2330 VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2331 return -1;
2332 }
2333
2334 if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2335 error_report("Received bad msg size.");
2336 return -1;
2337 }
2338
2339 if (!msg.payload.inflight.mmap_size) {
2340 return 0;
2341 }
2342
2343 fd = qemu_chr_fe_get_msgfd(chr);
2344 if (fd < 0) {
2345 error_report("Failed to get mem fd");
2346 return -1;
2347 }
2348
2349 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2350 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2351
2352 if (addr == MAP_FAILED) {
2353 error_report("Failed to mmap mem fd");
2354 close(fd);
2355 return -1;
2356 }
2357
2358 inflight->addr = addr;
2359 inflight->fd = fd;
2360 inflight->size = msg.payload.inflight.mmap_size;
2361 inflight->offset = msg.payload.inflight.mmap_offset;
2362 inflight->queue_size = queue_size;
2363
2364 return 0;
2365}
2366
2367static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2368 struct vhost_inflight *inflight)
2369{
2370 VhostUserMsg msg = {
2371 .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2372 .hdr.flags = VHOST_USER_VERSION,
2373 .payload.inflight.mmap_size = inflight->size,
2374 .payload.inflight.mmap_offset = inflight->offset,
2375 .payload.inflight.num_queues = dev->nvqs,
2376 .payload.inflight.queue_size = inflight->queue_size,
2377 .hdr.size = sizeof(msg.payload.inflight),
2378 };
2379
2380 if (!virtio_has_feature(dev->protocol_features,
2381 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2382 return 0;
2383 }
2384
2385 if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
2386 return -1;
2387 }
2388
2389 return 0;
2390}
2391
0b99f224 2392bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
4d0cf552 2393{
0b99f224
MAL
2394 if (user->chr) {
2395 error_setg(errp, "Cannot initialize vhost-user state");
2396 return false;
2397 }
2398 user->chr = chr;
6b0eff1a 2399 user->memory_slots = 0;
0b99f224 2400 return true;
4d0cf552
TB
2401}
2402
2403void vhost_user_cleanup(VhostUserState *user)
2404{
44866521
TB
2405 int i;
2406
0b99f224
MAL
2407 if (!user->chr) {
2408 return;
2409 }
2410
44866521
TB
2411 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2412 if (user->notifier[i].addr) {
2413 object_unparent(OBJECT(&user->notifier[i].mr));
2414 munmap(user->notifier[i].addr, qemu_real_host_page_size);
2415 user->notifier[i].addr = NULL;
2416 }
2417 }
0b99f224 2418 user->chr = NULL;
4d0cf552
TB
2419}
2420
5f6f6664
NN
2421const VhostOps user_ops = {
2422 .backend_type = VHOST_BACKEND_TYPE_USER,
4d0cf552
TB
2423 .vhost_backend_init = vhost_user_backend_init,
2424 .vhost_backend_cleanup = vhost_user_backend_cleanup,
2ce68e4c 2425 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
21e70425
MAL
2426 .vhost_set_log_base = vhost_user_set_log_base,
2427 .vhost_set_mem_table = vhost_user_set_mem_table,
2428 .vhost_set_vring_addr = vhost_user_set_vring_addr,
2429 .vhost_set_vring_endian = vhost_user_set_vring_endian,
2430 .vhost_set_vring_num = vhost_user_set_vring_num,
2431 .vhost_set_vring_base = vhost_user_set_vring_base,
2432 .vhost_get_vring_base = vhost_user_get_vring_base,
2433 .vhost_set_vring_kick = vhost_user_set_vring_kick,
2434 .vhost_set_vring_call = vhost_user_set_vring_call,
2435 .vhost_set_features = vhost_user_set_features,
2436 .vhost_get_features = vhost_user_get_features,
2437 .vhost_set_owner = vhost_user_set_owner,
2438 .vhost_reset_device = vhost_user_reset_device,
2439 .vhost_get_vq_index = vhost_user_get_vq_index,
2440 .vhost_set_vring_enable = vhost_user_set_vring_enable,
1be0ac21 2441 .vhost_requires_shm_log = vhost_user_requires_shm_log,
3e866365 2442 .vhost_migration_done = vhost_user_migration_done,
ffe42cc1 2443 .vhost_backend_can_merge = vhost_user_can_merge,
c5f048d8 2444 .vhost_net_set_mtu = vhost_user_net_set_mtu,
6dcdd06e
MC
2445 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2446 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
4c3e257b
CL
2447 .vhost_get_config = vhost_user_get_config,
2448 .vhost_set_config = vhost_user_set_config,
efbfeb81
GA
2449 .vhost_crypto_create_session = vhost_user_crypto_create_session,
2450 .vhost_crypto_close_session = vhost_user_crypto_close_session,
988a2775 2451 .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
5ad204bf
XY
2452 .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2453 .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
fc57fd99 2454};