]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost-user.c
vhost-user-blk: Add Error parameter to vhost_user_blk_start()
[mirror_qemu.git] / hw / virtio / vhost-user.c
CommitLineData
5f6f6664
NN
1/*
2 * vhost-user
3 *
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
9b8bfe21 11#include "qemu/osdep.h"
da34e65c 12#include "qapi/error.h"
5f6f6664 13#include "hw/virtio/vhost.h"
4d0cf552 14#include "hw/virtio/vhost-user.h"
5f6f6664 15#include "hw/virtio/vhost-backend.h"
44866521 16#include "hw/virtio/virtio.h"
3e866365 17#include "hw/virtio/virtio-net.h"
4d43a603 18#include "chardev/char-fe.h"
57dc0217 19#include "io/channel-socket.h"
5f6f6664
NN
20#include "sysemu/kvm.h"
21#include "qemu/error-report.h"
db725815 22#include "qemu/main-loop.h"
5f6f6664 23#include "qemu/sockets.h"
efbfeb81 24#include "sysemu/cryptodev.h"
9ccbfe14
DDAG
25#include "migration/migration.h"
26#include "migration/postcopy-ram.h"
6864a7b5 27#include "trace.h"
5f6f6664 28
5f6f6664
NN
29#include <sys/ioctl.h>
30#include <sys/socket.h>
31#include <sys/un.h>
18658a3c
PB
32
33#include "standard-headers/linux/vhost_types.h"
34
35#ifdef CONFIG_LINUX
375318d0 36#include <linux/userfaultfd.h>
18658a3c 37#endif
5f6f6664 38
27598393 39#define VHOST_MEMORY_BASELINE_NREGIONS 8
dcb10c00 40#define VHOST_USER_F_PROTOCOL_FEATURES 30
5f57fbea 41#define VHOST_USER_SLAVE_MAX_FDS 8
e2051e9e 42
27598393
RN
43/*
44 * Set maximum number of RAM slots supported to
45 * the maximum number supported by the target
46 * hardware plaform.
47 */
48#if defined(TARGET_X86) || defined(TARGET_X86_64) || \
49 defined(TARGET_ARM) || defined(TARGET_ARM_64)
50#include "hw/acpi/acpi.h"
51#define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
52
53#elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
54#include "hw/ppc/spapr.h"
55#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
56
57#else
58#define VHOST_USER_MAX_RAM_SLOTS 512
59#endif
60
4c3e257b
CL
61/*
62 * Maximum size of virtio device config space
63 */
64#define VHOST_USER_MAX_CONFIG_SIZE 256
65
de1372d4
TC
66enum VhostUserProtocolFeature {
67 VHOST_USER_PROTOCOL_F_MQ = 0,
68 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
69 VHOST_USER_PROTOCOL_F_RARP = 2,
ca525ce5 70 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
c5f048d8 71 VHOST_USER_PROTOCOL_F_NET_MTU = 4,
4bbeeba0 72 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
5df04f17 73 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
efbfeb81 74 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
9ccbfe14 75 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
1c3e5a26 76 VHOST_USER_PROTOCOL_F_CONFIG = 9,
5f57fbea 77 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
44866521 78 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
5ad204bf 79 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
d91d57e6 80 VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
6b0eff1a
RN
81 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
82 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
de1372d4
TC
83 VHOST_USER_PROTOCOL_F_MAX
84};
85
86#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
5f6f6664
NN
87
88typedef enum VhostUserRequest {
89 VHOST_USER_NONE = 0,
90 VHOST_USER_GET_FEATURES = 1,
91 VHOST_USER_SET_FEATURES = 2,
92 VHOST_USER_SET_OWNER = 3,
60915dc4 93 VHOST_USER_RESET_OWNER = 4,
5f6f6664
NN
94 VHOST_USER_SET_MEM_TABLE = 5,
95 VHOST_USER_SET_LOG_BASE = 6,
96 VHOST_USER_SET_LOG_FD = 7,
97 VHOST_USER_SET_VRING_NUM = 8,
98 VHOST_USER_SET_VRING_ADDR = 9,
99 VHOST_USER_SET_VRING_BASE = 10,
100 VHOST_USER_GET_VRING_BASE = 11,
101 VHOST_USER_SET_VRING_KICK = 12,
102 VHOST_USER_SET_VRING_CALL = 13,
103 VHOST_USER_SET_VRING_ERR = 14,
dcb10c00
MT
104 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
105 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
e2051e9e 106 VHOST_USER_GET_QUEUE_NUM = 17,
7263a0ad 107 VHOST_USER_SET_VRING_ENABLE = 18,
3e866365 108 VHOST_USER_SEND_RARP = 19,
c5f048d8 109 VHOST_USER_NET_SET_MTU = 20,
4bbeeba0 110 VHOST_USER_SET_SLAVE_REQ_FD = 21,
6dcdd06e 111 VHOST_USER_IOTLB_MSG = 22,
5df04f17 112 VHOST_USER_SET_VRING_ENDIAN = 23,
4c3e257b
CL
113 VHOST_USER_GET_CONFIG = 24,
114 VHOST_USER_SET_CONFIG = 25,
efbfeb81
GA
115 VHOST_USER_CREATE_CRYPTO_SESSION = 26,
116 VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
d3dff7a5 117 VHOST_USER_POSTCOPY_ADVISE = 28,
6864a7b5 118 VHOST_USER_POSTCOPY_LISTEN = 29,
c639187e 119 VHOST_USER_POSTCOPY_END = 30,
5ad204bf
XY
120 VHOST_USER_GET_INFLIGHT_FD = 31,
121 VHOST_USER_SET_INFLIGHT_FD = 32,
bd2e44fe 122 VHOST_USER_GPU_SET_SOCKET = 33,
d91d57e6 123 VHOST_USER_RESET_DEVICE = 34,
6b0eff1a
RN
124 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
125 VHOST_USER_GET_MAX_MEM_SLOTS = 36,
f1aeb14b
RN
126 VHOST_USER_ADD_MEM_REG = 37,
127 VHOST_USER_REM_MEM_REG = 38,
5f6f6664
NN
128 VHOST_USER_MAX
129} VhostUserRequest;
130
4bbeeba0
MAL
131typedef enum VhostUserSlaveRequest {
132 VHOST_USER_SLAVE_NONE = 0,
6dcdd06e 133 VHOST_USER_SLAVE_IOTLB_MSG = 1,
4c3e257b 134 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
44866521 135 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
4bbeeba0
MAL
136 VHOST_USER_SLAVE_MAX
137} VhostUserSlaveRequest;
138
5f6f6664
NN
139typedef struct VhostUserMemoryRegion {
140 uint64_t guest_phys_addr;
141 uint64_t memory_size;
142 uint64_t userspace_addr;
3fd74b84 143 uint64_t mmap_offset;
5f6f6664
NN
144} VhostUserMemoryRegion;
145
146typedef struct VhostUserMemory {
147 uint32_t nregions;
148 uint32_t padding;
27598393 149 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
5f6f6664
NN
150} VhostUserMemory;
151
f1aeb14b 152typedef struct VhostUserMemRegMsg {
3009edff 153 uint64_t padding;
f1aeb14b
RN
154 VhostUserMemoryRegion region;
155} VhostUserMemRegMsg;
156
2b8819c6
VK
157typedef struct VhostUserLog {
158 uint64_t mmap_size;
159 uint64_t mmap_offset;
160} VhostUserLog;
161
4c3e257b
CL
162typedef struct VhostUserConfig {
163 uint32_t offset;
164 uint32_t size;
165 uint32_t flags;
166 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
167} VhostUserConfig;
168
efbfeb81
GA
169#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
170#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
171
172typedef struct VhostUserCryptoSession {
173 /* session id for success, -1 on errors */
174 int64_t session_id;
175 CryptoDevBackendSymSessionInfo session_setup_data;
176 uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
177 uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
178} VhostUserCryptoSession;
179
4c3e257b
CL
180static VhostUserConfig c __attribute__ ((unused));
181#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
182 + sizeof(c.size) \
183 + sizeof(c.flags))
184
44866521
TB
185typedef struct VhostUserVringArea {
186 uint64_t u64;
187 uint64_t size;
188 uint64_t offset;
189} VhostUserVringArea;
190
5ad204bf
XY
191typedef struct VhostUserInflight {
192 uint64_t mmap_size;
193 uint64_t mmap_offset;
194 uint16_t num_queues;
195 uint16_t queue_size;
196} VhostUserInflight;
197
24e34754 198typedef struct {
5f6f6664
NN
199 VhostUserRequest request;
200
201#define VHOST_USER_VERSION_MASK (0x3)
202#define VHOST_USER_REPLY_MASK (0x1<<2)
ca525ce5 203#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
5f6f6664
NN
204 uint32_t flags;
205 uint32_t size; /* the following payload size */
24e34754
MT
206} QEMU_PACKED VhostUserHeader;
207
208typedef union {
5f6f6664
NN
209#define VHOST_USER_VRING_IDX_MASK (0xff)
210#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
211 uint64_t u64;
212 struct vhost_vring_state state;
213 struct vhost_vring_addr addr;
214 VhostUserMemory memory;
f1aeb14b 215 VhostUserMemRegMsg mem_reg;
2b8819c6 216 VhostUserLog log;
6dcdd06e 217 struct vhost_iotlb_msg iotlb;
4c3e257b 218 VhostUserConfig config;
efbfeb81 219 VhostUserCryptoSession session;
44866521 220 VhostUserVringArea area;
5ad204bf 221 VhostUserInflight inflight;
24e34754
MT
222} VhostUserPayload;
223
224typedef struct VhostUserMsg {
225 VhostUserHeader hdr;
226 VhostUserPayload payload;
5f6f6664
NN
227} QEMU_PACKED VhostUserMsg;
228
229static VhostUserMsg m __attribute__ ((unused));
24e34754 230#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
5f6f6664 231
24e34754 232#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
5f6f6664
NN
233
234/* The version of the protocol we support */
235#define VHOST_USER_VERSION (0x1)
236
2152f3fe 237struct vhost_user {
9ccbfe14 238 struct vhost_dev *dev;
4d0cf552
TB
239 /* Shared between vhost devs of the same virtio device */
240 VhostUserState *user;
57dc0217
GK
241 QIOChannel *slave_ioc;
242 GSource *slave_src;
9ccbfe14 243 NotifierWithReturn postcopy_notifier;
f82c1116 244 struct PostCopyFD postcopy_fd;
27598393 245 uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
905125d0
DDAG
246 /* Length of the region_rb and region_rb_offset arrays */
247 size_t region_rb_len;
248 /* RAMBlock associated with a given region */
249 RAMBlock **region_rb;
250 /* The offset from the start of the RAMBlock to the start of the
251 * vhost region.
252 */
253 ram_addr_t *region_rb_offset;
254
6864a7b5
DDAG
255 /* True once we've entered postcopy_listen */
256 bool postcopy_listen;
f1aeb14b
RN
257
258 /* Our current regions */
259 int num_shadow_regions;
27598393 260 struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
f1aeb14b
RN
261};
262
263struct scrub_regions {
264 struct vhost_memory_region *region;
265 int reg_idx;
266 int fd_idx;
2152f3fe
MAL
267};
268
5f6f6664
NN
269static bool ioeventfd_enabled(void)
270{
b0aa77d3 271 return !kvm_enabled() || kvm_eventfds_enabled();
5f6f6664
NN
272}
273
9af84c02 274static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
5f6f6664 275{
2152f3fe 276 struct vhost_user *u = dev->opaque;
4d0cf552 277 CharBackend *chr = u->user->chr;
5f6f6664
NN
278 uint8_t *p = (uint8_t *) msg;
279 int r, size = VHOST_USER_HDR_SIZE;
280
281 r = qemu_chr_fe_read_all(chr, p, size);
282 if (r != size) {
5421f318 283 error_report("Failed to read msg header. Read %d instead of %d."
24e34754 284 " Original request %d.", r, size, msg->hdr.request);
9af84c02 285 return -1;
5f6f6664
NN
286 }
287
288 /* validate received flags */
24e34754 289 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
5f6f6664 290 error_report("Failed to read msg header."
24e34754 291 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
5f6f6664 292 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
9af84c02
MAL
293 return -1;
294 }
295
296 return 0;
297}
298
a7f523c7
GK
299struct vhost_user_read_cb_data {
300 struct vhost_dev *dev;
301 VhostUserMsg *msg;
302 GMainLoop *loop;
303 int ret;
304};
305
306static gboolean vhost_user_read_cb(GIOChannel *source, GIOCondition condition,
307 gpointer opaque)
9af84c02 308{
a7f523c7
GK
309 struct vhost_user_read_cb_data *data = opaque;
310 struct vhost_dev *dev = data->dev;
311 VhostUserMsg *msg = data->msg;
9af84c02
MAL
312 struct vhost_user *u = dev->opaque;
313 CharBackend *chr = u->user->chr;
314 uint8_t *p = (uint8_t *) msg;
315 int r, size;
316
317 if (vhost_user_read_header(dev, msg) < 0) {
a7f523c7
GK
318 data->ret = -1;
319 goto end;
5f6f6664
NN
320 }
321
322 /* validate message size is sane */
24e34754 323 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
5f6f6664 324 error_report("Failed to read msg header."
24e34754 325 " Size %d exceeds the maximum %zu.", msg->hdr.size,
5f6f6664 326 VHOST_USER_PAYLOAD_SIZE);
a7f523c7
GK
327 data->ret = -1;
328 goto end;
5f6f6664
NN
329 }
330
24e34754 331 if (msg->hdr.size) {
5f6f6664 332 p += VHOST_USER_HDR_SIZE;
24e34754 333 size = msg->hdr.size;
5f6f6664
NN
334 r = qemu_chr_fe_read_all(chr, p, size);
335 if (r != size) {
336 error_report("Failed to read msg payload."
24e34754 337 " Read %d instead of %d.", r, msg->hdr.size);
a7f523c7
GK
338 data->ret = -1;
339 goto end;
5f6f6664
NN
340 }
341 }
342
a7f523c7
GK
343end:
344 g_main_loop_quit(data->loop);
345 return G_SOURCE_REMOVE;
346}
347
db8a3772
GK
348static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
349 gpointer opaque);
350
351/*
352 * This updates the read handler to use a new event loop context.
353 * Event sources are removed from the previous context : this ensures
354 * that events detected in the previous context are purged. They will
355 * be re-detected and processed in the new context.
356 */
357static void slave_update_read_handler(struct vhost_dev *dev,
358 GMainContext *ctxt)
359{
360 struct vhost_user *u = dev->opaque;
361
362 if (!u->slave_ioc) {
363 return;
364 }
365
366 if (u->slave_src) {
367 g_source_destroy(u->slave_src);
368 g_source_unref(u->slave_src);
369 }
370
371 u->slave_src = qio_channel_add_watch_source(u->slave_ioc,
372 G_IO_IN | G_IO_HUP,
373 slave_read, dev, NULL,
374 ctxt);
375}
376
a7f523c7
GK
377static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
378{
379 struct vhost_user *u = dev->opaque;
380 CharBackend *chr = u->user->chr;
381 GMainContext *prev_ctxt = chr->chr->gcontext;
382 GMainContext *ctxt = g_main_context_new();
383 GMainLoop *loop = g_main_loop_new(ctxt, FALSE);
384 struct vhost_user_read_cb_data data = {
385 .dev = dev,
386 .loop = loop,
387 .msg = msg,
388 .ret = 0
389 };
390
391 /*
392 * We want to be able to monitor the slave channel fd while waiting
393 * for chr I/O. This requires an event loop, but we can't nest the
394 * one to which chr is currently attached : its fd handlers might not
395 * be prepared for re-entrancy. So we create a new one and switch chr
396 * to use it.
397 */
db8a3772 398 slave_update_read_handler(dev, ctxt);
a7f523c7
GK
399 qemu_chr_be_update_read_handlers(chr->chr, ctxt);
400 qemu_chr_fe_add_watch(chr, G_IO_IN | G_IO_HUP, vhost_user_read_cb, &data);
401
402 g_main_loop_run(loop);
403
404 /*
405 * Restore the previous event loop context. This also destroys/recreates
406 * event sources : this guarantees that all pending events in the original
407 * context that have been processed by the nested loop are purged.
408 */
409 qemu_chr_be_update_read_handlers(chr->chr, prev_ctxt);
db8a3772 410 slave_update_read_handler(dev, NULL);
a7f523c7
GK
411
412 g_main_loop_unref(loop);
413 g_main_context_unref(ctxt);
414
415 return data.ret;
5f6f6664
NN
416}
417
ca525ce5 418static int process_message_reply(struct vhost_dev *dev,
3cf7daf8 419 const VhostUserMsg *msg)
ca525ce5 420{
60cd1102 421 VhostUserMsg msg_reply;
ca525ce5 422
24e34754 423 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
60cd1102
ZY
424 return 0;
425 }
426
427 if (vhost_user_read(dev, &msg_reply) < 0) {
ca525ce5
PS
428 return -1;
429 }
430
24e34754 431 if (msg_reply.hdr.request != msg->hdr.request) {
ca525ce5
PS
432 error_report("Received unexpected msg type."
433 "Expected %d received %d",
24e34754 434 msg->hdr.request, msg_reply.hdr.request);
ca525ce5
PS
435 return -1;
436 }
437
60cd1102 438 return msg_reply.payload.u64 ? -1 : 0;
ca525ce5
PS
439}
440
21e70425
MAL
441static bool vhost_user_one_time_request(VhostUserRequest request)
442{
443 switch (request) {
444 case VHOST_USER_SET_OWNER:
60915dc4 445 case VHOST_USER_RESET_OWNER:
21e70425
MAL
446 case VHOST_USER_SET_MEM_TABLE:
447 case VHOST_USER_GET_QUEUE_NUM:
c5f048d8 448 case VHOST_USER_NET_SET_MTU:
21e70425
MAL
449 return true;
450 default:
451 return false;
452 }
453}
454
455/* most non-init callers ignore the error */
5f6f6664
NN
456static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
457 int *fds, int fd_num)
458{
2152f3fe 459 struct vhost_user *u = dev->opaque;
4d0cf552 460 CharBackend *chr = u->user->chr;
24e34754 461 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
5f6f6664 462
21e70425
MAL
463 /*
464 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
465 * we just need send it once in the first time. For later such
466 * request, we just ignore it.
467 */
24e34754
MT
468 if (vhost_user_one_time_request(msg->hdr.request) && dev->vq_index != 0) {
469 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
21e70425
MAL
470 return 0;
471 }
472
6fab2f3f 473 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
f6b85710 474 error_report("Failed to set msg fds.");
6fab2f3f
MAL
475 return -1;
476 }
5f6f6664 477
f6b85710
MAL
478 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
479 if (ret != size) {
480 error_report("Failed to write msg."
481 " Wrote %d instead of %d.", ret, size);
482 return -1;
483 }
484
485 return 0;
5f6f6664
NN
486}
487
bd2e44fe
MAL
488int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
489{
490 VhostUserMsg msg = {
491 .hdr.request = VHOST_USER_GPU_SET_SOCKET,
492 .hdr.flags = VHOST_USER_VERSION,
493 };
494
495 return vhost_user_write(dev, &msg, &fd, 1);
496}
497
21e70425
MAL
498static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
499 struct vhost_log *log)
b931bfbf 500{
27598393 501 int fds[VHOST_USER_MAX_RAM_SLOTS];
21e70425
MAL
502 size_t fd_num = 0;
503 bool shmfd = virtio_has_feature(dev->protocol_features,
504 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
505 VhostUserMsg msg = {
24e34754
MT
506 .hdr.request = VHOST_USER_SET_LOG_BASE,
507 .hdr.flags = VHOST_USER_VERSION,
48854f57 508 .payload.log.mmap_size = log->size * sizeof(*(log->log)),
2b8819c6 509 .payload.log.mmap_offset = 0,
24e34754 510 .hdr.size = sizeof(msg.payload.log),
21e70425
MAL
511 };
512
513 if (shmfd && log->fd != -1) {
514 fds[fd_num++] = log->fd;
515 }
516
c4843a45
MAL
517 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
518 return -1;
519 }
21e70425
MAL
520
521 if (shmfd) {
24e34754 522 msg.hdr.size = 0;
21e70425 523 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 524 return -1;
21e70425
MAL
525 }
526
24e34754 527 if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
21e70425
MAL
528 error_report("Received unexpected msg type. "
529 "Expected %d received %d",
24e34754 530 VHOST_USER_SET_LOG_BASE, msg.hdr.request);
21e70425
MAL
531 return -1;
532 }
b931bfbf 533 }
21e70425
MAL
534
535 return 0;
b931bfbf
CO
536}
537
23374a84
RN
538static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
539 int *fd)
540{
541 MemoryRegion *mr;
542
543 assert((uintptr_t)addr == addr);
544 mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
545 *fd = memory_region_get_fd(mr);
546
547 return mr;
548}
549
ece99091 550static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
8d193715
RN
551 struct vhost_memory_region *src,
552 uint64_t mmap_offset)
ece99091
RN
553{
554 assert(src != NULL && dst != NULL);
555 dst->userspace_addr = src->userspace_addr;
556 dst->memory_size = src->memory_size;
557 dst->guest_phys_addr = src->guest_phys_addr;
8d193715 558 dst->mmap_offset = mmap_offset;
ece99091
RN
559}
560
2d9da9df
RN
561static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
562 struct vhost_dev *dev,
563 VhostUserMsg *msg,
564 int *fds, size_t *fd_num,
565 bool track_ramblocks)
566{
567 int i, fd;
568 ram_addr_t offset;
569 MemoryRegion *mr;
570 struct vhost_memory_region *reg;
ece99091 571 VhostUserMemoryRegion region_buffer;
2d9da9df
RN
572
573 msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
574
575 for (i = 0; i < dev->mem->nregions; ++i) {
576 reg = dev->mem->regions + i;
577
23374a84 578 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
2d9da9df
RN
579 if (fd > 0) {
580 if (track_ramblocks) {
27598393 581 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
2d9da9df
RN
582 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
583 reg->memory_size,
584 reg->guest_phys_addr,
585 reg->userspace_addr,
586 offset);
587 u->region_rb_offset[i] = offset;
588 u->region_rb[i] = mr->ram_block;
27598393 589 } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
2d9da9df
RN
590 error_report("Failed preparing vhost-user memory table msg");
591 return -1;
592 }
8d193715 593 vhost_user_fill_msg_region(&region_buffer, reg, offset);
ece99091 594 msg->payload.memory.regions[*fd_num] = region_buffer;
2d9da9df
RN
595 fds[(*fd_num)++] = fd;
596 } else if (track_ramblocks) {
597 u->region_rb_offset[i] = 0;
598 u->region_rb[i] = NULL;
599 }
600 }
601
602 msg->payload.memory.nregions = *fd_num;
603
604 if (!*fd_num) {
605 error_report("Failed initializing vhost-user memory map, "
606 "consider using -object memory-backend-file share=on");
607 return -1;
608 }
609
610 msg->hdr.size = sizeof(msg->payload.memory.nregions);
611 msg->hdr.size += sizeof(msg->payload.memory.padding);
612 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
613
614 return 1;
615}
616
f1aeb14b
RN
617static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
618 struct vhost_memory_region *vdev_reg)
619{
620 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
621 shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
622 shadow_reg->memory_size == vdev_reg->memory_size;
623}
624
625static void scrub_shadow_regions(struct vhost_dev *dev,
626 struct scrub_regions *add_reg,
627 int *nr_add_reg,
628 struct scrub_regions *rem_reg,
629 int *nr_rem_reg, uint64_t *shadow_pcb,
630 bool track_ramblocks)
631{
632 struct vhost_user *u = dev->opaque;
27598393 633 bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b
RN
634 struct vhost_memory_region *reg, *shadow_reg;
635 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
636 ram_addr_t offset;
637 MemoryRegion *mr;
638 bool matching;
639
640 /*
641 * Find memory regions present in our shadow state which are not in
642 * the device's current memory state.
643 *
644 * Mark regions in both the shadow and device state as "found".
645 */
646 for (i = 0; i < u->num_shadow_regions; i++) {
647 shadow_reg = &u->shadow_regions[i];
648 matching = false;
649
650 for (j = 0; j < dev->mem->nregions; j++) {
651 reg = &dev->mem->regions[j];
652
653 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
654
655 if (reg_equal(shadow_reg, reg)) {
656 matching = true;
657 found[j] = true;
658 if (track_ramblocks) {
659 /*
660 * Reset postcopy client bases, region_rb, and
661 * region_rb_offset in case regions are removed.
662 */
663 if (fd > 0) {
664 u->region_rb_offset[j] = offset;
665 u->region_rb[j] = mr->ram_block;
666 shadow_pcb[j] = u->postcopy_client_bases[i];
667 } else {
668 u->region_rb_offset[j] = 0;
669 u->region_rb[j] = NULL;
670 }
671 }
672 break;
673 }
674 }
675
676 /*
677 * If the region was not found in the current device memory state
678 * create an entry for it in the removed list.
679 */
680 if (!matching) {
681 rem_reg[rm_idx].region = shadow_reg;
682 rem_reg[rm_idx++].reg_idx = i;
683 }
684 }
685
686 /*
687 * For regions not marked "found", create entries in the added list.
688 *
689 * Note their indexes in the device memory state and the indexes of their
690 * file descriptors.
691 */
692 for (i = 0; i < dev->mem->nregions; i++) {
693 reg = &dev->mem->regions[i];
8b616bee 694 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
f1aeb14b
RN
695 if (fd > 0) {
696 ++fd_num;
697 }
698
699 /*
700 * If the region was in both the shadow and device state we don't
701 * need to send a VHOST_USER_ADD_MEM_REG message for it.
702 */
703 if (found[i]) {
704 continue;
705 }
706
707 add_reg[add_idx].region = reg;
708 add_reg[add_idx].reg_idx = i;
709 add_reg[add_idx++].fd_idx = fd_num;
710 }
711 *nr_rem_reg = rm_idx;
712 *nr_add_reg = add_idx;
713
714 return;
715}
716
717static int send_remove_regions(struct vhost_dev *dev,
718 struct scrub_regions *remove_reg,
719 int nr_rem_reg, VhostUserMsg *msg,
720 bool reply_supported)
721{
722 struct vhost_user *u = dev->opaque;
723 struct vhost_memory_region *shadow_reg;
724 int i, fd, shadow_reg_idx, ret;
725 ram_addr_t offset;
726 VhostUserMemoryRegion region_buffer;
727
728 /*
729 * The regions in remove_reg appear in the same order they do in the
730 * shadow table. Therefore we can minimize memory copies by iterating
731 * through remove_reg backwards.
732 */
733 for (i = nr_rem_reg - 1; i >= 0; i--) {
734 shadow_reg = remove_reg[i].region;
735 shadow_reg_idx = remove_reg[i].reg_idx;
736
737 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
738
739 if (fd > 0) {
740 msg->hdr.request = VHOST_USER_REM_MEM_REG;
8d193715 741 vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
f1aeb14b
RN
742 msg->payload.mem_reg.region = region_buffer;
743
744 if (vhost_user_write(dev, msg, &fd, 1) < 0) {
745 return -1;
746 }
747
748 if (reply_supported) {
749 ret = process_message_reply(dev, msg);
750 if (ret) {
751 return ret;
752 }
753 }
754 }
755
756 /*
757 * At this point we know the backend has unmapped the region. It is now
758 * safe to remove it from the shadow table.
759 */
760 memmove(&u->shadow_regions[shadow_reg_idx],
761 &u->shadow_regions[shadow_reg_idx + 1],
762 sizeof(struct vhost_memory_region) *
4fdecf05 763 (u->num_shadow_regions - shadow_reg_idx - 1));
f1aeb14b
RN
764 u->num_shadow_regions--;
765 }
766
767 return 0;
768}
769
770static int send_add_regions(struct vhost_dev *dev,
771 struct scrub_regions *add_reg, int nr_add_reg,
772 VhostUserMsg *msg, uint64_t *shadow_pcb,
773 bool reply_supported, bool track_ramblocks)
774{
775 struct vhost_user *u = dev->opaque;
776 int i, fd, ret, reg_idx, reg_fd_idx;
777 struct vhost_memory_region *reg;
778 MemoryRegion *mr;
779 ram_addr_t offset;
780 VhostUserMsg msg_reply;
781 VhostUserMemoryRegion region_buffer;
782
783 for (i = 0; i < nr_add_reg; i++) {
784 reg = add_reg[i].region;
785 reg_idx = add_reg[i].reg_idx;
786 reg_fd_idx = add_reg[i].fd_idx;
787
788 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
789
790 if (fd > 0) {
791 if (track_ramblocks) {
792 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
793 reg->memory_size,
794 reg->guest_phys_addr,
795 reg->userspace_addr,
796 offset);
797 u->region_rb_offset[reg_idx] = offset;
798 u->region_rb[reg_idx] = mr->ram_block;
799 }
800 msg->hdr.request = VHOST_USER_ADD_MEM_REG;
8d193715 801 vhost_user_fill_msg_region(&region_buffer, reg, offset);
f1aeb14b 802 msg->payload.mem_reg.region = region_buffer;
f1aeb14b
RN
803
804 if (vhost_user_write(dev, msg, &fd, 1) < 0) {
805 return -1;
806 }
807
808 if (track_ramblocks) {
809 uint64_t reply_gpa;
810
811 if (vhost_user_read(dev, &msg_reply) < 0) {
812 return -1;
813 }
814
815 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
816
817 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
818 error_report("%s: Received unexpected msg type."
819 "Expected %d received %d", __func__,
820 VHOST_USER_ADD_MEM_REG,
821 msg_reply.hdr.request);
822 return -1;
823 }
824
825 /*
826 * We're using the same structure, just reusing one of the
827 * fields, so it should be the same size.
828 */
829 if (msg_reply.hdr.size != msg->hdr.size) {
830 error_report("%s: Unexpected size for postcopy reply "
831 "%d vs %d", __func__, msg_reply.hdr.size,
832 msg->hdr.size);
833 return -1;
834 }
835
836 /* Get the postcopy client base from the backend's reply. */
837 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
838 shadow_pcb[reg_idx] =
839 msg_reply.payload.mem_reg.region.userspace_addr;
840 trace_vhost_user_set_mem_table_postcopy(
841 msg_reply.payload.mem_reg.region.userspace_addr,
842 msg->payload.mem_reg.region.userspace_addr,
843 reg_fd_idx, reg_idx);
844 } else {
845 error_report("%s: invalid postcopy reply for region. "
846 "Got guest physical address %" PRIX64 ", expected "
847 "%" PRIX64, __func__, reply_gpa,
848 dev->mem->regions[reg_idx].guest_phys_addr);
849 return -1;
850 }
851 } else if (reply_supported) {
852 ret = process_message_reply(dev, msg);
853 if (ret) {
854 return ret;
855 }
856 }
857 } else if (track_ramblocks) {
858 u->region_rb_offset[reg_idx] = 0;
859 u->region_rb[reg_idx] = NULL;
860 }
861
862 /*
863 * At this point, we know the backend has mapped in the new
864 * region, if the region has a valid file descriptor.
865 *
866 * The region should now be added to the shadow table.
867 */
868 u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
869 reg->guest_phys_addr;
870 u->shadow_regions[u->num_shadow_regions].userspace_addr =
871 reg->userspace_addr;
872 u->shadow_regions[u->num_shadow_regions].memory_size =
873 reg->memory_size;
874 u->num_shadow_regions++;
875 }
876
877 return 0;
878}
879
880static int vhost_user_add_remove_regions(struct vhost_dev *dev,
881 VhostUserMsg *msg,
882 bool reply_supported,
883 bool track_ramblocks)
884{
885 struct vhost_user *u = dev->opaque;
27598393
RN
886 struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
887 struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
888 uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b
RN
889 int nr_add_reg, nr_rem_reg;
890
3009edff 891 msg->hdr.size = sizeof(msg->payload.mem_reg);
f1aeb14b
RN
892
893 /* Find the regions which need to be removed or added. */
894 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
895 shadow_pcb, track_ramblocks);
896
897 if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
898 reply_supported) < 0)
899 {
900 goto err;
901 }
902
903 if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg,
904 shadow_pcb, reply_supported, track_ramblocks) < 0)
905 {
906 goto err;
907 }
908
909 if (track_ramblocks) {
910 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 911 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
912 /*
913 * Now we've registered this with the postcopy code, we ack to the
914 * client, because now we're in the position to be able to deal with
915 * any faults it generates.
916 */
917 /* TODO: Use this for failure cases as well with a bad value. */
918 msg->hdr.size = sizeof(msg->payload.u64);
919 msg->payload.u64 = 0; /* OK */
920
921 if (vhost_user_write(dev, msg, NULL, 0) < 0) {
922 return -1;
923 }
924 }
925
926 return 0;
927
928err:
929 if (track_ramblocks) {
930 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 931 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
932 }
933
934 return -1;
935}
936
55d754b3 937static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
f1aeb14b
RN
938 struct vhost_memory *mem,
939 bool reply_supported,
940 bool config_mem_slots)
55d754b3 941{
9bb38019 942 struct vhost_user *u = dev->opaque;
27598393 943 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
55d754b3 944 size_t fd_num = 0;
9bb38019
DDAG
945 VhostUserMsg msg_reply;
946 int region_i, msg_i;
947
55d754b3 948 VhostUserMsg msg = {
55d754b3
DDAG
949 .hdr.flags = VHOST_USER_VERSION,
950 };
951
905125d0
DDAG
952 if (u->region_rb_len < dev->mem->nregions) {
953 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
954 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
955 dev->mem->nregions);
956 memset(&(u->region_rb[u->region_rb_len]), '\0',
957 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
958 memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
959 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
960 u->region_rb_len = dev->mem->nregions;
961 }
962
f1aeb14b
RN
963 if (config_mem_slots) {
964 if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
2d9da9df 965 true) < 0) {
f1aeb14b
RN
966 return -1;
967 }
968 } else {
969 if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
970 true) < 0) {
971 return -1;
972 }
55d754b3 973
f1aeb14b
RN
974 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
975 return -1;
976 }
55d754b3 977
f1aeb14b
RN
978 if (vhost_user_read(dev, &msg_reply) < 0) {
979 return -1;
980 }
9bb38019 981
f1aeb14b
RN
982 if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
983 error_report("%s: Received unexpected msg type."
984 "Expected %d received %d", __func__,
985 VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
986 return -1;
987 }
9bb38019 988
f1aeb14b
RN
989 /*
990 * We're using the same structure, just reusing one of the
991 * fields, so it should be the same size.
992 */
993 if (msg_reply.hdr.size != msg.hdr.size) {
994 error_report("%s: Unexpected size for postcopy reply "
995 "%d vs %d", __func__, msg_reply.hdr.size,
996 msg.hdr.size);
997 return -1;
998 }
999
1000 memset(u->postcopy_client_bases, 0,
27598393 1001 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
1002
1003 /*
1004 * They're in the same order as the regions that were sent
1005 * but some of the regions were skipped (above) if they
1006 * didn't have fd's
1007 */
1008 for (msg_i = 0, region_i = 0;
1009 region_i < dev->mem->nregions;
1010 region_i++) {
1011 if (msg_i < fd_num &&
1012 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
1013 dev->mem->regions[region_i].guest_phys_addr) {
1014 u->postcopy_client_bases[region_i] =
1015 msg_reply.payload.memory.regions[msg_i].userspace_addr;
1016 trace_vhost_user_set_mem_table_postcopy(
1017 msg_reply.payload.memory.regions[msg_i].userspace_addr,
1018 msg.payload.memory.regions[msg_i].userspace_addr,
1019 msg_i, region_i);
1020 msg_i++;
1021 }
1022 }
1023 if (msg_i != fd_num) {
1024 error_report("%s: postcopy reply not fully consumed "
1025 "%d vs %zd",
1026 __func__, msg_i, fd_num);
1027 return -1;
1028 }
1029
1030 /*
1031 * Now we've registered this with the postcopy code, we ack to the
1032 * client, because now we're in the position to be able to deal
1033 * with any faults it generates.
1034 */
1035 /* TODO: Use this for failure cases as well with a bad value. */
1036 msg.hdr.size = sizeof(msg.payload.u64);
1037 msg.payload.u64 = 0; /* OK */
1038 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1039 return -1;
9bb38019 1040 }
9bb38019
DDAG
1041 }
1042
55d754b3
DDAG
1043 return 0;
1044}
1045
94c9cb31
MT
1046static int vhost_user_set_mem_table(struct vhost_dev *dev,
1047 struct vhost_memory *mem)
1048{
55d754b3 1049 struct vhost_user *u = dev->opaque;
27598393 1050 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
94c9cb31 1051 size_t fd_num = 0;
55d754b3 1052 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
94c9cb31 1053 bool reply_supported = virtio_has_feature(dev->protocol_features,
5ce43896 1054 VHOST_USER_PROTOCOL_F_REPLY_ACK);
f1aeb14b
RN
1055 bool config_mem_slots =
1056 virtio_has_feature(dev->protocol_features,
1057 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
94c9cb31 1058
55d754b3 1059 if (do_postcopy) {
f1aeb14b
RN
1060 /*
1061 * Postcopy has enough differences that it's best done in it's own
55d754b3
DDAG
1062 * version
1063 */
f1aeb14b
RN
1064 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1065 config_mem_slots);
55d754b3
DDAG
1066 }
1067
94c9cb31 1068 VhostUserMsg msg = {
24e34754 1069 .hdr.flags = VHOST_USER_VERSION,
94c9cb31
MT
1070 };
1071
1072 if (reply_supported) {
24e34754 1073 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
94c9cb31
MT
1074 }
1075
f1aeb14b
RN
1076 if (config_mem_slots) {
1077 if (vhost_user_add_remove_regions(dev, &msg, reply_supported,
2d9da9df 1078 false) < 0) {
f1aeb14b
RN
1079 return -1;
1080 }
1081 } else {
1082 if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1083 false) < 0) {
1084 return -1;
1085 }
1086 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1087 return -1;
1088 }
94c9cb31 1089
f1aeb14b
RN
1090 if (reply_supported) {
1091 return process_message_reply(dev, &msg);
1092 }
94c9cb31
MT
1093 }
1094
1095 return 0;
1096}
1097
21e70425
MAL
1098static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1099 struct vhost_vring_addr *addr)
1100{
1101 VhostUserMsg msg = {
24e34754
MT
1102 .hdr.request = VHOST_USER_SET_VRING_ADDR,
1103 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1104 .payload.addr = *addr,
24e34754 1105 .hdr.size = sizeof(msg.payload.addr),
21e70425 1106 };
5f6f6664 1107
c4843a45
MAL
1108 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1109 return -1;
1110 }
5f6f6664 1111
21e70425
MAL
1112 return 0;
1113}
5f6f6664 1114
21e70425
MAL
1115static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1116 struct vhost_vring_state *ring)
1117{
5df04f17
FF
1118 bool cross_endian = virtio_has_feature(dev->protocol_features,
1119 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1120 VhostUserMsg msg = {
24e34754
MT
1121 .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1122 .hdr.flags = VHOST_USER_VERSION,
5df04f17 1123 .payload.state = *ring,
24e34754 1124 .hdr.size = sizeof(msg.payload.state),
5df04f17
FF
1125 };
1126
1127 if (!cross_endian) {
1128 error_report("vhost-user trying to send unhandled ioctl");
1129 return -1;
1130 }
1131
1132 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1133 return -1;
1134 }
1135
1136 return 0;
21e70425 1137}
5f6f6664 1138
21e70425
MAL
1139static int vhost_set_vring(struct vhost_dev *dev,
1140 unsigned long int request,
1141 struct vhost_vring_state *ring)
1142{
1143 VhostUserMsg msg = {
24e34754
MT
1144 .hdr.request = request,
1145 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1146 .payload.state = *ring,
24e34754 1147 .hdr.size = sizeof(msg.payload.state),
21e70425
MAL
1148 };
1149
c4843a45
MAL
1150 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1151 return -1;
1152 }
21e70425
MAL
1153
1154 return 0;
1155}
1156
1157static int vhost_user_set_vring_num(struct vhost_dev *dev,
1158 struct vhost_vring_state *ring)
1159{
1160 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1161}
1162
44866521
TB
1163static void vhost_user_host_notifier_restore(struct vhost_dev *dev,
1164 int queue_idx)
1165{
1166 struct vhost_user *u = dev->opaque;
1167 VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1168 VirtIODevice *vdev = dev->vdev;
1169
1170 if (n->addr && !n->set) {
1171 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true);
1172 n->set = true;
1173 }
1174}
1175
1176static void vhost_user_host_notifier_remove(struct vhost_dev *dev,
1177 int queue_idx)
1178{
1179 struct vhost_user *u = dev->opaque;
1180 VhostUserHostNotifier *n = &u->user->notifier[queue_idx];
1181 VirtIODevice *vdev = dev->vdev;
1182
1183 if (n->addr && n->set) {
1184 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1185 n->set = false;
1186 }
1187}
1188
21e70425
MAL
1189static int vhost_user_set_vring_base(struct vhost_dev *dev,
1190 struct vhost_vring_state *ring)
1191{
44866521
TB
1192 vhost_user_host_notifier_restore(dev, ring->index);
1193
21e70425
MAL
1194 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1195}
1196
1197static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1198{
dc3db6ad 1199 int i;
21e70425 1200
923e2d98 1201 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
5f6f6664 1202 return -1;
5f6f6664
NN
1203 }
1204
dc3db6ad
MT
1205 for (i = 0; i < dev->nvqs; ++i) {
1206 struct vhost_vring_state state = {
1207 .index = dev->vq_index + i,
1208 .num = enable,
1209 };
1210
1211 vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1212 }
21e70425 1213
dc3db6ad
MT
1214 return 0;
1215}
21e70425
MAL
1216
1217static int vhost_user_get_vring_base(struct vhost_dev *dev,
1218 struct vhost_vring_state *ring)
1219{
1220 VhostUserMsg msg = {
24e34754
MT
1221 .hdr.request = VHOST_USER_GET_VRING_BASE,
1222 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1223 .payload.state = *ring,
24e34754 1224 .hdr.size = sizeof(msg.payload.state),
21e70425
MAL
1225 };
1226
44866521
TB
1227 vhost_user_host_notifier_remove(dev, ring->index);
1228
c4843a45
MAL
1229 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1230 return -1;
1231 }
21e70425
MAL
1232
1233 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 1234 return -1;
5f6f6664
NN
1235 }
1236
24e34754 1237 if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
21e70425 1238 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1239 VHOST_USER_GET_VRING_BASE, msg.hdr.request);
21e70425
MAL
1240 return -1;
1241 }
5f6f6664 1242
24e34754 1243 if (msg.hdr.size != sizeof(msg.payload.state)) {
21e70425
MAL
1244 error_report("Received bad msg size.");
1245 return -1;
5f6f6664
NN
1246 }
1247
7f4a930e 1248 *ring = msg.payload.state;
21e70425 1249
5f6f6664
NN
1250 return 0;
1251}
1252
21e70425
MAL
1253static int vhost_set_vring_file(struct vhost_dev *dev,
1254 VhostUserRequest request,
1255 struct vhost_vring_file *file)
c2bea314 1256{
27598393 1257 int fds[VHOST_USER_MAX_RAM_SLOTS];
9a78a5dd 1258 size_t fd_num = 0;
c2bea314 1259 VhostUserMsg msg = {
24e34754
MT
1260 .hdr.request = request,
1261 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1262 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
24e34754 1263 .hdr.size = sizeof(msg.payload.u64),
c2bea314
MAL
1264 };
1265
21e70425
MAL
1266 if (ioeventfd_enabled() && file->fd > 0) {
1267 fds[fd_num++] = file->fd;
1268 } else {
7f4a930e 1269 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
9a78a5dd
MAL
1270 }
1271
c4843a45
MAL
1272 if (vhost_user_write(dev, &msg, fds, fd_num) < 0) {
1273 return -1;
1274 }
9a78a5dd 1275
21e70425
MAL
1276 return 0;
1277}
9a78a5dd 1278
21e70425
MAL
1279static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1280 struct vhost_vring_file *file)
1281{
1282 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1283}
1284
1285static int vhost_user_set_vring_call(struct vhost_dev *dev,
1286 struct vhost_vring_file *file)
1287{
1288 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1289}
1290
1291static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64)
1292{
1293 VhostUserMsg msg = {
24e34754
MT
1294 .hdr.request = request,
1295 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1296 .payload.u64 = u64,
24e34754 1297 .hdr.size = sizeof(msg.payload.u64),
21e70425
MAL
1298 };
1299
c4843a45
MAL
1300 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1301 return -1;
1302 }
21e70425
MAL
1303
1304 return 0;
1305}
1306
1307static int vhost_user_set_features(struct vhost_dev *dev,
1308 uint64_t features)
1309{
1310 return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features);
1311}
1312
1313static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1314 uint64_t features)
1315{
1316 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features);
1317}
1318
1319static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1320{
1321 VhostUserMsg msg = {
24e34754
MT
1322 .hdr.request = request,
1323 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1324 };
1325
1326 if (vhost_user_one_time_request(request) && dev->vq_index != 0) {
1327 return 0;
9a78a5dd 1328 }
c2bea314 1329
c4843a45
MAL
1330 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1331 return -1;
1332 }
21e70425
MAL
1333
1334 if (vhost_user_read(dev, &msg) < 0) {
c4843a45 1335 return -1;
21e70425
MAL
1336 }
1337
24e34754 1338 if (msg.hdr.request != request) {
21e70425 1339 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1340 request, msg.hdr.request);
21e70425
MAL
1341 return -1;
1342 }
1343
24e34754 1344 if (msg.hdr.size != sizeof(msg.payload.u64)) {
21e70425
MAL
1345 error_report("Received bad msg size.");
1346 return -1;
1347 }
1348
7f4a930e 1349 *u64 = msg.payload.u64;
21e70425
MAL
1350
1351 return 0;
1352}
1353
1354static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1355{
f2a6e6c4
KW
1356 if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1357 return -EPROTO;
1358 }
1359
1360 return 0;
21e70425
MAL
1361}
1362
1363static int vhost_user_set_owner(struct vhost_dev *dev)
1364{
1365 VhostUserMsg msg = {
24e34754
MT
1366 .hdr.request = VHOST_USER_SET_OWNER,
1367 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1368 };
1369
c4843a45 1370 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
f2a6e6c4 1371 return -EPROTO;
c4843a45 1372 }
21e70425
MAL
1373
1374 return 0;
1375}
1376
6b0eff1a
RN
1377static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1378 uint64_t *max_memslots)
1379{
1380 uint64_t backend_max_memslots;
1381 int err;
1382
1383 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1384 &backend_max_memslots);
1385 if (err < 0) {
1386 return err;
1387 }
1388
1389 *max_memslots = backend_max_memslots;
1390
1391 return 0;
1392}
1393
21e70425
MAL
1394static int vhost_user_reset_device(struct vhost_dev *dev)
1395{
1396 VhostUserMsg msg = {
24e34754 1397 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1398 };
1399
d91d57e6
RN
1400 msg.hdr.request = virtio_has_feature(dev->protocol_features,
1401 VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1402 ? VHOST_USER_RESET_DEVICE
1403 : VHOST_USER_RESET_OWNER;
1404
c4843a45
MAL
1405 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1406 return -1;
1407 }
21e70425 1408
c2bea314
MAL
1409 return 0;
1410}
1411
4c3e257b
CL
1412static int vhost_user_slave_handle_config_change(struct vhost_dev *dev)
1413{
1414 int ret = -1;
1415
1416 if (!dev->config_ops) {
1417 return -1;
1418 }
1419
1420 if (dev->config_ops->vhost_dev_config_notifier) {
1421 ret = dev->config_ops->vhost_dev_config_notifier(dev);
1422 }
1423
1424 return ret;
1425}
1426
44866521
TB
1427static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev,
1428 VhostUserVringArea *area,
1429 int fd)
1430{
1431 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
1432 size_t page_size = qemu_real_host_page_size;
1433 struct vhost_user *u = dev->opaque;
1434 VhostUserState *user = u->user;
1435 VirtIODevice *vdev = dev->vdev;
1436 VhostUserHostNotifier *n;
1437 void *addr;
1438 char *name;
1439
1440 if (!virtio_has_feature(dev->protocol_features,
1441 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1442 vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
1443 return -1;
1444 }
1445
1446 n = &user->notifier[queue_idx];
1447
1448 if (n->addr) {
1449 virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false);
1450 object_unparent(OBJECT(&n->mr));
1451 munmap(n->addr, page_size);
1452 n->addr = NULL;
1453 }
1454
1455 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1456 return 0;
1457 }
1458
1459 /* Sanity check. */
1460 if (area->size != page_size) {
1461 return -1;
1462 }
1463
1464 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1465 fd, area->offset);
1466 if (addr == MAP_FAILED) {
1467 return -1;
1468 }
1469
1470 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1471 user, queue_idx);
1472 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1473 page_size, addr);
1474 g_free(name);
1475
1476 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1477 munmap(addr, page_size);
1478 return -1;
1479 }
1480
1481 n->addr = addr;
1482 n->set = true;
1483
1484 return 0;
1485}
1486
de62e494
GK
1487static void close_slave_channel(struct vhost_user *u)
1488{
57dc0217
GK
1489 g_source_destroy(u->slave_src);
1490 g_source_unref(u->slave_src);
1491 u->slave_src = NULL;
1492 object_unref(OBJECT(u->slave_ioc));
1493 u->slave_ioc = NULL;
de62e494
GK
1494}
1495
57dc0217
GK
1496static gboolean slave_read(QIOChannel *ioc, GIOCondition condition,
1497 gpointer opaque)
4bbeeba0
MAL
1498{
1499 struct vhost_dev *dev = opaque;
1500 struct vhost_user *u = dev->opaque;
69aff030
MT
1501 VhostUserHeader hdr = { 0, };
1502 VhostUserPayload payload = { 0, };
57dc0217
GK
1503 Error *local_err = NULL;
1504 gboolean rc = G_SOURCE_CONTINUE;
1505 int ret = 0;
1f3a4519 1506 struct iovec iov;
57dc0217
GK
1507 g_autofree int *fd = NULL;
1508 size_t fdsize = 0;
1509 int i;
5f57fbea 1510
4bbeeba0 1511 /* Read header */
1f3a4519
TB
1512 iov.iov_base = &hdr;
1513 iov.iov_len = VHOST_USER_HDR_SIZE;
1514
57dc0217
GK
1515 if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1516 error_report_err(local_err);
4bbeeba0
MAL
1517 goto err;
1518 }
1519
69aff030 1520 if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
4bbeeba0 1521 error_report("Failed to read msg header."
69aff030 1522 " Size %d exceeds the maximum %zu.", hdr.size,
4bbeeba0
MAL
1523 VHOST_USER_PAYLOAD_SIZE);
1524 goto err;
1525 }
1526
1527 /* Read payload */
57dc0217
GK
1528 if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1529 error_report_err(local_err);
4bbeeba0
MAL
1530 goto err;
1531 }
1532
69aff030 1533 switch (hdr.request) {
6dcdd06e 1534 case VHOST_USER_SLAVE_IOTLB_MSG:
69aff030 1535 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
6dcdd06e 1536 break;
4c3e257b
CL
1537 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG :
1538 ret = vhost_user_slave_handle_config_change(dev);
1539 break;
44866521
TB
1540 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG:
1541 ret = vhost_user_slave_handle_vring_host_notifier(dev, &payload.area,
57dc0217 1542 fd ? fd[0] : -1);
44866521 1543 break;
4bbeeba0 1544 default:
0fdc465d 1545 error_report("Received unexpected msg type: %d.", hdr.request);
4bbeeba0
MAL
1546 ret = -EINVAL;
1547 }
1548
1549 /*
1550 * REPLY_ACK feature handling. Other reply types has to be managed
1551 * directly in their request handlers.
1552 */
69aff030
MT
1553 if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1554 struct iovec iovec[2];
4bbeeba0 1555
4bbeeba0 1556
69aff030
MT
1557 hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
1558 hdr.flags |= VHOST_USER_REPLY_MASK;
1559
1560 payload.u64 = !!ret;
1561 hdr.size = sizeof(payload.u64);
1562
1563 iovec[0].iov_base = &hdr;
1564 iovec[0].iov_len = VHOST_USER_HDR_SIZE;
1565 iovec[1].iov_base = &payload;
1566 iovec[1].iov_len = hdr.size;
1567
57dc0217
GK
1568 if (qio_channel_writev_all(ioc, iovec, ARRAY_SIZE(iovec), &local_err)) {
1569 error_report_err(local_err);
4bbeeba0
MAL
1570 goto err;
1571 }
1572 }
1573
9e06080b 1574 goto fdcleanup;
4bbeeba0
MAL
1575
1576err:
de62e494 1577 close_slave_channel(u);
57dc0217 1578 rc = G_SOURCE_REMOVE;
9e06080b
GK
1579
1580fdcleanup:
57dc0217
GK
1581 if (fd) {
1582 for (i = 0; i < fdsize; i++) {
5f57fbea
TB
1583 close(fd[i]);
1584 }
1f3a4519 1585 }
57dc0217 1586 return rc;
4bbeeba0
MAL
1587}
1588
1589static int vhost_setup_slave_channel(struct vhost_dev *dev)
1590{
1591 VhostUserMsg msg = {
24e34754
MT
1592 .hdr.request = VHOST_USER_SET_SLAVE_REQ_FD,
1593 .hdr.flags = VHOST_USER_VERSION,
4bbeeba0
MAL
1594 };
1595 struct vhost_user *u = dev->opaque;
1596 int sv[2], ret = 0;
1597 bool reply_supported = virtio_has_feature(dev->protocol_features,
1598 VHOST_USER_PROTOCOL_F_REPLY_ACK);
57dc0217
GK
1599 Error *local_err = NULL;
1600 QIOChannel *ioc;
4bbeeba0
MAL
1601
1602 if (!virtio_has_feature(dev->protocol_features,
1603 VHOST_USER_PROTOCOL_F_SLAVE_REQ)) {
1604 return 0;
1605 }
1606
1607 if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
1608 error_report("socketpair() failed");
1609 return -1;
1610 }
1611
57dc0217
GK
1612 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1613 if (!ioc) {
1614 error_report_err(local_err);
1615 return -1;
1616 }
1617 u->slave_ioc = ioc;
db8a3772 1618 slave_update_read_handler(dev, NULL);
4bbeeba0
MAL
1619
1620 if (reply_supported) {
24e34754 1621 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4bbeeba0
MAL
1622 }
1623
1624 ret = vhost_user_write(dev, &msg, &sv[1], 1);
1625 if (ret) {
1626 goto out;
1627 }
1628
1629 if (reply_supported) {
1630 ret = process_message_reply(dev, &msg);
1631 }
1632
1633out:
1634 close(sv[1]);
1635 if (ret) {
de62e494 1636 close_slave_channel(u);
4bbeeba0
MAL
1637 }
1638
1639 return ret;
1640}
1641
18658a3c 1642#ifdef CONFIG_LINUX
f82c1116
DDAG
1643/*
1644 * Called back from the postcopy fault thread when a fault is received on our
1645 * ufd.
1646 * TODO: This is Linux specific
1647 */
1648static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1649 void *ufd)
1650{
375318d0
DDAG
1651 struct vhost_dev *dev = pcfd->data;
1652 struct vhost_user *u = dev->opaque;
1653 struct uffd_msg *msg = ufd;
1654 uint64_t faultaddr = msg->arg.pagefault.address;
1655 RAMBlock *rb = NULL;
1656 uint64_t rb_offset;
1657 int i;
1658
1659 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1660 dev->mem->nregions);
1661 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1662 trace_vhost_user_postcopy_fault_handler_loop(i,
1663 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1664 if (faultaddr >= u->postcopy_client_bases[i]) {
1665 /* Ofset of the fault address in the vhost region */
1666 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1667 if (region_offset < dev->mem->regions[i].memory_size) {
1668 rb_offset = region_offset + u->region_rb_offset[i];
1669 trace_vhost_user_postcopy_fault_handler_found(i,
1670 region_offset, rb_offset);
1671 rb = u->region_rb[i];
1672 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1673 rb_offset);
1674 }
1675 }
1676 }
1677 error_report("%s: Failed to find region for fault %" PRIx64,
1678 __func__, faultaddr);
1679 return -1;
f82c1116
DDAG
1680}
1681
c07e3615
DDAG
1682static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1683 uint64_t offset)
1684{
1685 struct vhost_dev *dev = pcfd->data;
1686 struct vhost_user *u = dev->opaque;
1687 int i;
1688
1689 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1690
1691 if (!u) {
1692 return 0;
1693 }
1694 /* Translate the offset into an address in the clients address space */
1695 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1696 if (u->region_rb[i] == rb &&
1697 offset >= u->region_rb_offset[i] &&
1698 offset < (u->region_rb_offset[i] +
1699 dev->mem->regions[i].memory_size)) {
1700 uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1701 u->postcopy_client_bases[i];
1702 trace_vhost_user_postcopy_waker_found(client_addr);
1703 return postcopy_wake_shared(pcfd, client_addr, rb);
1704 }
1705 }
1706
1707 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1708 return 0;
1709}
18658a3c 1710#endif
c07e3615 1711
d3dff7a5
DDAG
1712/*
1713 * Called at the start of an inbound postcopy on reception of the
1714 * 'advise' command.
1715 */
1716static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1717{
18658a3c 1718#ifdef CONFIG_LINUX
d3dff7a5 1719 struct vhost_user *u = dev->opaque;
4d0cf552 1720 CharBackend *chr = u->user->chr;
d3dff7a5
DDAG
1721 int ufd;
1722 VhostUserMsg msg = {
1723 .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1724 .hdr.flags = VHOST_USER_VERSION,
1725 };
1726
1727 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1728 error_setg(errp, "Failed to send postcopy_advise to vhost");
1729 return -1;
1730 }
1731
1732 if (vhost_user_read(dev, &msg) < 0) {
1733 error_setg(errp, "Failed to get postcopy_advise reply from vhost");
1734 return -1;
1735 }
1736
1737 if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1738 error_setg(errp, "Unexpected msg type. Expected %d received %d",
1739 VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
1740 return -1;
1741 }
1742
1743 if (msg.hdr.size) {
1744 error_setg(errp, "Received bad msg size.");
1745 return -1;
1746 }
1747 ufd = qemu_chr_fe_get_msgfd(chr);
1748 if (ufd < 0) {
1749 error_setg(errp, "%s: Failed to get ufd", __func__);
1750 return -1;
1751 }
9952e807 1752 qemu_set_nonblock(ufd);
d3dff7a5 1753
f82c1116
DDAG
1754 /* register ufd with userfault thread */
1755 u->postcopy_fd.fd = ufd;
1756 u->postcopy_fd.data = dev;
1757 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
c07e3615 1758 u->postcopy_fd.waker = vhost_user_postcopy_waker;
f82c1116
DDAG
1759 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
1760 postcopy_register_shared_ufd(&u->postcopy_fd);
d3dff7a5 1761 return 0;
18658a3c
PB
1762#else
1763 error_setg(errp, "Postcopy not supported on non-Linux systems");
1764 return -1;
1765#endif
d3dff7a5
DDAG
1766}
1767
6864a7b5
DDAG
1768/*
1769 * Called at the switch to postcopy on reception of the 'listen' command.
1770 */
1771static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
1772{
1773 struct vhost_user *u = dev->opaque;
1774 int ret;
1775 VhostUserMsg msg = {
1776 .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
1777 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1778 };
1779 u->postcopy_listen = true;
1780 trace_vhost_user_postcopy_listen();
1781 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1782 error_setg(errp, "Failed to send postcopy_listen to vhost");
1783 return -1;
1784 }
1785
1786 ret = process_message_reply(dev, &msg);
1787 if (ret) {
1788 error_setg(errp, "Failed to receive reply to postcopy_listen");
1789 return ret;
1790 }
1791
1792 return 0;
1793}
1794
46343570
DDAG
1795/*
1796 * Called at the end of postcopy
1797 */
1798static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
1799{
1800 VhostUserMsg msg = {
1801 .hdr.request = VHOST_USER_POSTCOPY_END,
1802 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
1803 };
1804 int ret;
1805 struct vhost_user *u = dev->opaque;
1806
1807 trace_vhost_user_postcopy_end_entry();
1808 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
1809 error_setg(errp, "Failed to send postcopy_end to vhost");
1810 return -1;
1811 }
1812
1813 ret = process_message_reply(dev, &msg);
1814 if (ret) {
1815 error_setg(errp, "Failed to receive reply to postcopy_end");
1816 return ret;
1817 }
1818 postcopy_unregister_shared_ufd(&u->postcopy_fd);
c4f75385 1819 close(u->postcopy_fd.fd);
46343570
DDAG
1820 u->postcopy_fd.handler = NULL;
1821
1822 trace_vhost_user_postcopy_end_exit();
1823
1824 return 0;
1825}
1826
9ccbfe14
DDAG
1827static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
1828 void *opaque)
1829{
1830 struct PostcopyNotifyData *pnd = opaque;
1831 struct vhost_user *u = container_of(notifier, struct vhost_user,
1832 postcopy_notifier);
1833 struct vhost_dev *dev = u->dev;
1834
1835 switch (pnd->reason) {
1836 case POSTCOPY_NOTIFY_PROBE:
1837 if (!virtio_has_feature(dev->protocol_features,
1838 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
1839 /* TODO: Get the device name into this error somehow */
1840 error_setg(pnd->errp,
1841 "vhost-user backend not capable of postcopy");
1842 return -ENOENT;
1843 }
1844 break;
1845
d3dff7a5
DDAG
1846 case POSTCOPY_NOTIFY_INBOUND_ADVISE:
1847 return vhost_user_postcopy_advise(dev, pnd->errp);
1848
6864a7b5
DDAG
1849 case POSTCOPY_NOTIFY_INBOUND_LISTEN:
1850 return vhost_user_postcopy_listen(dev, pnd->errp);
1851
46343570
DDAG
1852 case POSTCOPY_NOTIFY_INBOUND_END:
1853 return vhost_user_postcopy_end(dev, pnd->errp);
1854
9ccbfe14
DDAG
1855 default:
1856 /* We ignore notifications we don't know */
1857 break;
1858 }
1859
1860 return 0;
1861}
1862
28770ff9
KW
1863static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
1864 Error **errp)
5f6f6664 1865{
6b0eff1a 1866 uint64_t features, protocol_features, ram_slots;
2152f3fe 1867 struct vhost_user *u;
dcb10c00
MT
1868 int err;
1869
5f6f6664
NN
1870 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1871
2152f3fe 1872 u = g_new0(struct vhost_user, 1);
4d0cf552 1873 u->user = opaque;
9ccbfe14 1874 u->dev = dev;
2152f3fe 1875 dev->opaque = u;
5f6f6664 1876
21e70425 1877 err = vhost_user_get_features(dev, &features);
dcb10c00 1878 if (err < 0) {
f2a6e6c4 1879 return err;
dcb10c00
MT
1880 }
1881
1882 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
1883 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
1884
21e70425 1885 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
6dcdd06e 1886 &protocol_features);
dcb10c00 1887 if (err < 0) {
28770ff9 1888 return -EPROTO;
dcb10c00
MT
1889 }
1890
6dcdd06e
MC
1891 dev->protocol_features =
1892 protocol_features & VHOST_USER_PROTOCOL_FEATURE_MASK;
1c3e5a26
MC
1893
1894 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1895 /* Don't acknowledge CONFIG feature if device doesn't support it */
1896 dev->protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
1897 } else if (!(protocol_features &
1898 (1ULL << VHOST_USER_PROTOCOL_F_CONFIG))) {
28770ff9
KW
1899 error_setg(errp, "Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1900 "but backend does not support it.");
1901 return -EINVAL;
1c3e5a26
MC
1902 }
1903
21e70425 1904 err = vhost_user_set_protocol_features(dev, dev->protocol_features);
dcb10c00 1905 if (err < 0) {
28770ff9 1906 return -EPROTO;
dcb10c00 1907 }
e2051e9e
YL
1908
1909 /* query the max queues we support if backend supports Multiple Queue */
1910 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
21e70425
MAL
1911 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
1912 &dev->max_queues);
e2051e9e 1913 if (err < 0) {
28770ff9 1914 return -EPROTO;
e2051e9e
YL
1915 }
1916 }
c90bd505 1917 if (dev->num_queues && dev->max_queues < dev->num_queues) {
28770ff9
KW
1918 error_setg(errp, "The maximum number of queues supported by the "
1919 "backend is %" PRIu64, dev->max_queues);
c90bd505
KW
1920 return -EINVAL;
1921 }
6dcdd06e
MC
1922
1923 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
1924 !(virtio_has_feature(dev->protocol_features,
1925 VHOST_USER_PROTOCOL_F_SLAVE_REQ) &&
1926 virtio_has_feature(dev->protocol_features,
1927 VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
28770ff9
KW
1928 error_setg(errp, "IOMMU support requires reply-ack and "
1929 "slave-req protocol features.");
1930 return -EINVAL;
6dcdd06e 1931 }
6b0eff1a
RN
1932
1933 /* get max memory regions if backend supports configurable RAM slots */
1934 if (!virtio_has_feature(dev->protocol_features,
1935 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
27598393 1936 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
6b0eff1a
RN
1937 } else {
1938 err = vhost_user_get_max_memslots(dev, &ram_slots);
1939 if (err < 0) {
28770ff9 1940 return -EPROTO;
6b0eff1a
RN
1941 }
1942
1943 if (ram_slots < u->user->memory_slots) {
28770ff9
KW
1944 error_setg(errp, "The backend specified a max ram slots limit "
1945 "of %" PRIu64", when the prior validated limit was "
1946 "%d. This limit should never decrease.", ram_slots,
1947 u->user->memory_slots);
1948 return -EINVAL;
6b0eff1a
RN
1949 }
1950
27598393 1951 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
6b0eff1a 1952 }
dcb10c00
MT
1953 }
1954
d2fc4402
MAL
1955 if (dev->migration_blocker == NULL &&
1956 !virtio_has_feature(dev->protocol_features,
1957 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
1958 error_setg(&dev->migration_blocker,
1959 "Migration disabled: vhost-user backend lacks "
1960 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
1961 }
1962
67b3965e
AM
1963 if (dev->vq_index == 0) {
1964 err = vhost_setup_slave_channel(dev);
1965 if (err < 0) {
28770ff9 1966 return -EPROTO;
67b3965e 1967 }
4bbeeba0
MAL
1968 }
1969
9ccbfe14
DDAG
1970 u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
1971 postcopy_add_notifier(&u->postcopy_notifier);
1972
5f6f6664
NN
1973 return 0;
1974}
1975
4d0cf552 1976static int vhost_user_backend_cleanup(struct vhost_dev *dev)
5f6f6664 1977{
2152f3fe
MAL
1978 struct vhost_user *u;
1979
5f6f6664
NN
1980 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
1981
2152f3fe 1982 u = dev->opaque;
9ccbfe14
DDAG
1983 if (u->postcopy_notifier.notify) {
1984 postcopy_remove_notifier(&u->postcopy_notifier);
1985 u->postcopy_notifier.notify = NULL;
1986 }
c4f75385
IM
1987 u->postcopy_listen = false;
1988 if (u->postcopy_fd.handler) {
1989 postcopy_unregister_shared_ufd(&u->postcopy_fd);
1990 close(u->postcopy_fd.fd);
1991 u->postcopy_fd.handler = NULL;
1992 }
57dc0217 1993 if (u->slave_ioc) {
de62e494 1994 close_slave_channel(u);
4bbeeba0 1995 }
905125d0
DDAG
1996 g_free(u->region_rb);
1997 u->region_rb = NULL;
1998 g_free(u->region_rb_offset);
1999 u->region_rb_offset = NULL;
2000 u->region_rb_len = 0;
2152f3fe 2001 g_free(u);
5f6f6664
NN
2002 dev->opaque = 0;
2003
2004 return 0;
2005}
2006
fc57fd99
YL
2007static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2008{
2009 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2010
2011 return idx;
2012}
2013
2ce68e4c
IM
2014static int vhost_user_memslots_limit(struct vhost_dev *dev)
2015{
6b0eff1a
RN
2016 struct vhost_user *u = dev->opaque;
2017
2018 return u->user->memory_slots;
2ce68e4c
IM
2019}
2020
1be0ac21
MAL
2021static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2022{
2023 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2024
2025 return virtio_has_feature(dev->protocol_features,
2026 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2027}
2028
3e866365
TC
2029static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2030{
ebf2a499 2031 VhostUserMsg msg = { };
3e866365
TC
2032
2033 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2034
2035 /* If guest supports GUEST_ANNOUNCE do nothing */
2036 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2037 return 0;
2038 }
2039
2040 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2041 if (virtio_has_feature(dev->protocol_features,
2042 VHOST_USER_PROTOCOL_F_RARP)) {
24e34754
MT
2043 msg.hdr.request = VHOST_USER_SEND_RARP;
2044 msg.hdr.flags = VHOST_USER_VERSION;
7f4a930e 2045 memcpy((char *)&msg.payload.u64, mac_addr, 6);
24e34754 2046 msg.hdr.size = sizeof(msg.payload.u64);
3e866365 2047
c4843a45 2048 return vhost_user_write(dev, &msg, NULL, 0);
3e866365
TC
2049 }
2050 return -1;
2051}
2052
ffe42cc1
MT
2053static bool vhost_user_can_merge(struct vhost_dev *dev,
2054 uint64_t start1, uint64_t size1,
2055 uint64_t start2, uint64_t size2)
2056{
07bdaa41 2057 ram_addr_t offset;
ffe42cc1 2058 int mfd, rfd;
ffe42cc1 2059
23374a84
RN
2060 (void)vhost_user_get_mr_data(start1, &offset, &mfd);
2061 (void)vhost_user_get_mr_data(start2, &offset, &rfd);
ffe42cc1
MT
2062
2063 return mfd == rfd;
2064}
2065
c5f048d8
MC
2066static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2067{
2068 VhostUserMsg msg;
2069 bool reply_supported = virtio_has_feature(dev->protocol_features,
2070 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2071
2072 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2073 return 0;
2074 }
2075
24e34754 2076 msg.hdr.request = VHOST_USER_NET_SET_MTU;
c5f048d8 2077 msg.payload.u64 = mtu;
24e34754
MT
2078 msg.hdr.size = sizeof(msg.payload.u64);
2079 msg.hdr.flags = VHOST_USER_VERSION;
c5f048d8 2080 if (reply_supported) {
24e34754 2081 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
c5f048d8
MC
2082 }
2083
2084 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2085 return -1;
2086 }
2087
2088 /* If reply_ack supported, slave has to ack specified MTU is valid */
2089 if (reply_supported) {
3cf7daf8 2090 return process_message_reply(dev, &msg);
c5f048d8
MC
2091 }
2092
2093 return 0;
2094}
2095
6dcdd06e
MC
2096static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2097 struct vhost_iotlb_msg *imsg)
2098{
2099 VhostUserMsg msg = {
24e34754
MT
2100 .hdr.request = VHOST_USER_IOTLB_MSG,
2101 .hdr.size = sizeof(msg.payload.iotlb),
2102 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
6dcdd06e
MC
2103 .payload.iotlb = *imsg,
2104 };
2105
2106 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2107 return -EFAULT;
2108 }
2109
2110 return process_message_reply(dev, &msg);
2111}
2112
2113
2114static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2115{
2116 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2117}
2118
4c3e257b
CL
2119static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
2120 uint32_t config_len)
2121{
2122 VhostUserMsg msg = {
24e34754
MT
2123 .hdr.request = VHOST_USER_GET_CONFIG,
2124 .hdr.flags = VHOST_USER_VERSION,
2125 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
4c3e257b
CL
2126 };
2127
1c3e5a26
MC
2128 if (!virtio_has_feature(dev->protocol_features,
2129 VHOST_USER_PROTOCOL_F_CONFIG)) {
2130 return -1;
2131 }
2132
4c3e257b
CL
2133 if (config_len > VHOST_USER_MAX_CONFIG_SIZE) {
2134 return -1;
2135 }
2136
2137 msg.payload.config.offset = 0;
2138 msg.payload.config.size = config_len;
2139 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2140 return -1;
2141 }
2142
2143 if (vhost_user_read(dev, &msg) < 0) {
2144 return -1;
2145 }
2146
24e34754 2147 if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
4c3e257b 2148 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 2149 VHOST_USER_GET_CONFIG, msg.hdr.request);
4c3e257b
CL
2150 return -1;
2151 }
2152
24e34754 2153 if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
4c3e257b
CL
2154 error_report("Received bad msg size.");
2155 return -1;
2156 }
2157
2158 memcpy(config, msg.payload.config.region, config_len);
2159
2160 return 0;
2161}
2162
2163static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2164 uint32_t offset, uint32_t size, uint32_t flags)
2165{
2166 uint8_t *p;
2167 bool reply_supported = virtio_has_feature(dev->protocol_features,
2168 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2169
2170 VhostUserMsg msg = {
24e34754
MT
2171 .hdr.request = VHOST_USER_SET_CONFIG,
2172 .hdr.flags = VHOST_USER_VERSION,
2173 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
4c3e257b
CL
2174 };
2175
1c3e5a26
MC
2176 if (!virtio_has_feature(dev->protocol_features,
2177 VHOST_USER_PROTOCOL_F_CONFIG)) {
2178 return -1;
2179 }
2180
4c3e257b 2181 if (reply_supported) {
24e34754 2182 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4c3e257b
CL
2183 }
2184
2185 if (size > VHOST_USER_MAX_CONFIG_SIZE) {
2186 return -1;
2187 }
2188
2189 msg.payload.config.offset = offset,
2190 msg.payload.config.size = size,
2191 msg.payload.config.flags = flags,
2192 p = msg.payload.config.region;
2193 memcpy(p, data, size);
2194
2195 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2196 return -1;
2197 }
2198
2199 if (reply_supported) {
2200 return process_message_reply(dev, &msg);
2201 }
2202
2203 return 0;
2204}
2205
efbfeb81
GA
2206static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2207 void *session_info,
2208 uint64_t *session_id)
2209{
2210 bool crypto_session = virtio_has_feature(dev->protocol_features,
2211 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2212 CryptoDevBackendSymSessionInfo *sess_info = session_info;
2213 VhostUserMsg msg = {
2214 .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2215 .hdr.flags = VHOST_USER_VERSION,
2216 .hdr.size = sizeof(msg.payload.session),
2217 };
2218
2219 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2220
2221 if (!crypto_session) {
2222 error_report("vhost-user trying to send unhandled ioctl");
2223 return -1;
2224 }
2225
2226 memcpy(&msg.payload.session.session_setup_data, sess_info,
2227 sizeof(CryptoDevBackendSymSessionInfo));
2228 if (sess_info->key_len) {
2229 memcpy(&msg.payload.session.key, sess_info->cipher_key,
2230 sess_info->key_len);
2231 }
2232 if (sess_info->auth_key_len > 0) {
2233 memcpy(&msg.payload.session.auth_key, sess_info->auth_key,
2234 sess_info->auth_key_len);
2235 }
2236 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2237 error_report("vhost_user_write() return -1, create session failed");
2238 return -1;
2239 }
2240
2241 if (vhost_user_read(dev, &msg) < 0) {
2242 error_report("vhost_user_read() return -1, create session failed");
2243 return -1;
2244 }
2245
2246 if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2247 error_report("Received unexpected msg type. Expected %d received %d",
2248 VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
2249 return -1;
2250 }
2251
2252 if (msg.hdr.size != sizeof(msg.payload.session)) {
2253 error_report("Received bad msg size.");
2254 return -1;
2255 }
2256
2257 if (msg.payload.session.session_id < 0) {
2258 error_report("Bad session id: %" PRId64 "",
2259 msg.payload.session.session_id);
2260 return -1;
2261 }
2262 *session_id = msg.payload.session.session_id;
2263
2264 return 0;
2265}
2266
2267static int
2268vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2269{
2270 bool crypto_session = virtio_has_feature(dev->protocol_features,
2271 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2272 VhostUserMsg msg = {
2273 .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2274 .hdr.flags = VHOST_USER_VERSION,
2275 .hdr.size = sizeof(msg.payload.u64),
2276 };
2277 msg.payload.u64 = session_id;
2278
2279 if (!crypto_session) {
2280 error_report("vhost-user trying to send unhandled ioctl");
2281 return -1;
2282 }
2283
2284 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2285 error_report("vhost_user_write() return -1, close session failed");
2286 return -1;
2287 }
2288
2289 return 0;
2290}
2291
988a2775
TB
2292static bool vhost_user_mem_section_filter(struct vhost_dev *dev,
2293 MemoryRegionSection *section)
2294{
2295 bool result;
2296
2297 result = memory_region_get_fd(section->mr) >= 0;
2298
2299 return result;
2300}
2301
5ad204bf
XY
2302static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2303 uint16_t queue_size,
2304 struct vhost_inflight *inflight)
2305{
2306 void *addr;
2307 int fd;
2308 struct vhost_user *u = dev->opaque;
2309 CharBackend *chr = u->user->chr;
2310 VhostUserMsg msg = {
2311 .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2312 .hdr.flags = VHOST_USER_VERSION,
2313 .payload.inflight.num_queues = dev->nvqs,
2314 .payload.inflight.queue_size = queue_size,
2315 .hdr.size = sizeof(msg.payload.inflight),
2316 };
2317
2318 if (!virtio_has_feature(dev->protocol_features,
2319 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2320 return 0;
2321 }
2322
2323 if (vhost_user_write(dev, &msg, NULL, 0) < 0) {
2324 return -1;
2325 }
2326
2327 if (vhost_user_read(dev, &msg) < 0) {
2328 return -1;
2329 }
2330
2331 if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2332 error_report("Received unexpected msg type. "
2333 "Expected %d received %d",
2334 VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
2335 return -1;
2336 }
2337
2338 if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2339 error_report("Received bad msg size.");
2340 return -1;
2341 }
2342
2343 if (!msg.payload.inflight.mmap_size) {
2344 return 0;
2345 }
2346
2347 fd = qemu_chr_fe_get_msgfd(chr);
2348 if (fd < 0) {
2349 error_report("Failed to get mem fd");
2350 return -1;
2351 }
2352
2353 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2354 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2355
2356 if (addr == MAP_FAILED) {
2357 error_report("Failed to mmap mem fd");
2358 close(fd);
2359 return -1;
2360 }
2361
2362 inflight->addr = addr;
2363 inflight->fd = fd;
2364 inflight->size = msg.payload.inflight.mmap_size;
2365 inflight->offset = msg.payload.inflight.mmap_offset;
2366 inflight->queue_size = queue_size;
2367
2368 return 0;
2369}
2370
2371static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2372 struct vhost_inflight *inflight)
2373{
2374 VhostUserMsg msg = {
2375 .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2376 .hdr.flags = VHOST_USER_VERSION,
2377 .payload.inflight.mmap_size = inflight->size,
2378 .payload.inflight.mmap_offset = inflight->offset,
2379 .payload.inflight.num_queues = dev->nvqs,
2380 .payload.inflight.queue_size = inflight->queue_size,
2381 .hdr.size = sizeof(msg.payload.inflight),
2382 };
2383
2384 if (!virtio_has_feature(dev->protocol_features,
2385 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2386 return 0;
2387 }
2388
2389 if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) {
2390 return -1;
2391 }
2392
2393 return 0;
2394}
2395
0b99f224 2396bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
4d0cf552 2397{
0b99f224
MAL
2398 if (user->chr) {
2399 error_setg(errp, "Cannot initialize vhost-user state");
2400 return false;
2401 }
2402 user->chr = chr;
6b0eff1a 2403 user->memory_slots = 0;
0b99f224 2404 return true;
4d0cf552
TB
2405}
2406
2407void vhost_user_cleanup(VhostUserState *user)
2408{
44866521
TB
2409 int i;
2410
0b99f224
MAL
2411 if (!user->chr) {
2412 return;
2413 }
2414
44866521
TB
2415 for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
2416 if (user->notifier[i].addr) {
2417 object_unparent(OBJECT(&user->notifier[i].mr));
2418 munmap(user->notifier[i].addr, qemu_real_host_page_size);
2419 user->notifier[i].addr = NULL;
2420 }
2421 }
0b99f224 2422 user->chr = NULL;
4d0cf552
TB
2423}
2424
5f6f6664
NN
2425const VhostOps user_ops = {
2426 .backend_type = VHOST_BACKEND_TYPE_USER,
4d0cf552
TB
2427 .vhost_backend_init = vhost_user_backend_init,
2428 .vhost_backend_cleanup = vhost_user_backend_cleanup,
2ce68e4c 2429 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
21e70425
MAL
2430 .vhost_set_log_base = vhost_user_set_log_base,
2431 .vhost_set_mem_table = vhost_user_set_mem_table,
2432 .vhost_set_vring_addr = vhost_user_set_vring_addr,
2433 .vhost_set_vring_endian = vhost_user_set_vring_endian,
2434 .vhost_set_vring_num = vhost_user_set_vring_num,
2435 .vhost_set_vring_base = vhost_user_set_vring_base,
2436 .vhost_get_vring_base = vhost_user_get_vring_base,
2437 .vhost_set_vring_kick = vhost_user_set_vring_kick,
2438 .vhost_set_vring_call = vhost_user_set_vring_call,
2439 .vhost_set_features = vhost_user_set_features,
2440 .vhost_get_features = vhost_user_get_features,
2441 .vhost_set_owner = vhost_user_set_owner,
2442 .vhost_reset_device = vhost_user_reset_device,
2443 .vhost_get_vq_index = vhost_user_get_vq_index,
2444 .vhost_set_vring_enable = vhost_user_set_vring_enable,
1be0ac21 2445 .vhost_requires_shm_log = vhost_user_requires_shm_log,
3e866365 2446 .vhost_migration_done = vhost_user_migration_done,
ffe42cc1 2447 .vhost_backend_can_merge = vhost_user_can_merge,
c5f048d8 2448 .vhost_net_set_mtu = vhost_user_net_set_mtu,
6dcdd06e
MC
2449 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2450 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
4c3e257b
CL
2451 .vhost_get_config = vhost_user_get_config,
2452 .vhost_set_config = vhost_user_set_config,
efbfeb81
GA
2453 .vhost_crypto_create_session = vhost_user_crypto_create_session,
2454 .vhost_crypto_close_session = vhost_user_crypto_close_session,
988a2775 2455 .vhost_backend_mem_section_filter = vhost_user_mem_section_filter,
5ad204bf
XY
2456 .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2457 .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
fc57fd99 2458};