]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/vhost-user.c
Merge tag 'pull-ufs-20231013' of https://gitlab.com/jeuk20.kim/qemu into staging
[mirror_qemu.git] / hw / virtio / vhost-user.c
CommitLineData
5f6f6664
NN
1/*
2 * vhost-user
3 *
4 * Copyright (c) 2013 Virtual Open Systems Sarl.
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
8 *
9 */
10
9b8bfe21 11#include "qemu/osdep.h"
da34e65c 12#include "qapi/error.h"
16094766 13#include "hw/virtio/virtio-dmabuf.h"
5f6f6664 14#include "hw/virtio/vhost.h"
5c33f978 15#include "hw/virtio/virtio-crypto.h"
4d0cf552 16#include "hw/virtio/vhost-user.h"
5f6f6664 17#include "hw/virtio/vhost-backend.h"
44866521 18#include "hw/virtio/virtio.h"
3e866365 19#include "hw/virtio/virtio-net.h"
4d43a603 20#include "chardev/char-fe.h"
57dc0217 21#include "io/channel-socket.h"
5f6f6664
NN
22#include "sysemu/kvm.h"
23#include "qemu/error-report.h"
db725815 24#include "qemu/main-loop.h"
16094766 25#include "qemu/uuid.h"
5f6f6664 26#include "qemu/sockets.h"
71e076a0 27#include "sysemu/runstate.h"
efbfeb81 28#include "sysemu/cryptodev.h"
9ccbfe14
DDAG
29#include "migration/migration.h"
30#include "migration/postcopy-ram.h"
6864a7b5 31#include "trace.h"
0b0af4d6 32#include "exec/ramblock.h"
5f6f6664 33
5f6f6664
NN
34#include <sys/ioctl.h>
35#include <sys/socket.h>
36#include <sys/un.h>
18658a3c
PB
37
38#include "standard-headers/linux/vhost_types.h"
39
40#ifdef CONFIG_LINUX
375318d0 41#include <linux/userfaultfd.h>
18658a3c 42#endif
5f6f6664 43
27598393 44#define VHOST_MEMORY_BASELINE_NREGIONS 8
dcb10c00 45#define VHOST_USER_F_PROTOCOL_FEATURES 30
a84ec993 46#define VHOST_USER_BACKEND_MAX_FDS 8
e2051e9e 47
bab10530 48#if defined(TARGET_PPC) || defined(TARGET_PPC64)
27598393
RN
49#include "hw/ppc/spapr.h"
50#define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
51
52#else
53#define VHOST_USER_MAX_RAM_SLOTS 512
54#endif
55
4c3e257b
CL
56/*
57 * Maximum size of virtio device config space
58 */
59#define VHOST_USER_MAX_CONFIG_SIZE 256
60
de1372d4 61#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
5f6f6664
NN
62
63typedef enum VhostUserRequest {
64 VHOST_USER_NONE = 0,
65 VHOST_USER_GET_FEATURES = 1,
66 VHOST_USER_SET_FEATURES = 2,
67 VHOST_USER_SET_OWNER = 3,
60915dc4 68 VHOST_USER_RESET_OWNER = 4,
5f6f6664
NN
69 VHOST_USER_SET_MEM_TABLE = 5,
70 VHOST_USER_SET_LOG_BASE = 6,
71 VHOST_USER_SET_LOG_FD = 7,
72 VHOST_USER_SET_VRING_NUM = 8,
73 VHOST_USER_SET_VRING_ADDR = 9,
74 VHOST_USER_SET_VRING_BASE = 10,
75 VHOST_USER_GET_VRING_BASE = 11,
76 VHOST_USER_SET_VRING_KICK = 12,
77 VHOST_USER_SET_VRING_CALL = 13,
78 VHOST_USER_SET_VRING_ERR = 14,
dcb10c00
MT
79 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
80 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
e2051e9e 81 VHOST_USER_GET_QUEUE_NUM = 17,
7263a0ad 82 VHOST_USER_SET_VRING_ENABLE = 18,
3e866365 83 VHOST_USER_SEND_RARP = 19,
c5f048d8 84 VHOST_USER_NET_SET_MTU = 20,
a84ec993 85 VHOST_USER_SET_BACKEND_REQ_FD = 21,
6dcdd06e 86 VHOST_USER_IOTLB_MSG = 22,
5df04f17 87 VHOST_USER_SET_VRING_ENDIAN = 23,
4c3e257b
CL
88 VHOST_USER_GET_CONFIG = 24,
89 VHOST_USER_SET_CONFIG = 25,
efbfeb81
GA
90 VHOST_USER_CREATE_CRYPTO_SESSION = 26,
91 VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
d3dff7a5 92 VHOST_USER_POSTCOPY_ADVISE = 28,
6864a7b5 93 VHOST_USER_POSTCOPY_LISTEN = 29,
c639187e 94 VHOST_USER_POSTCOPY_END = 30,
5ad204bf
XY
95 VHOST_USER_GET_INFLIGHT_FD = 31,
96 VHOST_USER_SET_INFLIGHT_FD = 32,
bd2e44fe 97 VHOST_USER_GPU_SET_SOCKET = 33,
d91d57e6 98 VHOST_USER_RESET_DEVICE = 34,
6b0eff1a
RN
99 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
100 VHOST_USER_GET_MAX_MEM_SLOTS = 36,
f1aeb14b
RN
101 VHOST_USER_ADD_MEM_REG = 37,
102 VHOST_USER_REM_MEM_REG = 38,
923b8921
YW
103 VHOST_USER_SET_STATUS = 39,
104 VHOST_USER_GET_STATUS = 40,
16094766 105 VHOST_USER_GET_SHARED_OBJECT = 41,
5f6f6664
NN
106 VHOST_USER_MAX
107} VhostUserRequest;
108
f8ed3648 109typedef enum VhostUserBackendRequest {
a84ec993
MC
110 VHOST_USER_BACKEND_NONE = 0,
111 VHOST_USER_BACKEND_IOTLB_MSG = 1,
112 VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2,
113 VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3,
16094766
AE
114 VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6,
115 VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7,
116 VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8,
a84ec993 117 VHOST_USER_BACKEND_MAX
f8ed3648 118} VhostUserBackendRequest;
4bbeeba0 119
5f6f6664
NN
120typedef struct VhostUserMemoryRegion {
121 uint64_t guest_phys_addr;
122 uint64_t memory_size;
123 uint64_t userspace_addr;
3fd74b84 124 uint64_t mmap_offset;
5f6f6664
NN
125} VhostUserMemoryRegion;
126
127typedef struct VhostUserMemory {
128 uint32_t nregions;
129 uint32_t padding;
27598393 130 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
5f6f6664
NN
131} VhostUserMemory;
132
f1aeb14b 133typedef struct VhostUserMemRegMsg {
3009edff 134 uint64_t padding;
f1aeb14b
RN
135 VhostUserMemoryRegion region;
136} VhostUserMemRegMsg;
137
2b8819c6
VK
138typedef struct VhostUserLog {
139 uint64_t mmap_size;
140 uint64_t mmap_offset;
141} VhostUserLog;
142
4c3e257b
CL
143typedef struct VhostUserConfig {
144 uint32_t offset;
145 uint32_t size;
146 uint32_t flags;
147 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
148} VhostUserConfig;
149
efbfeb81
GA
150#define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
151#define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
5c33f978 152#define VHOST_CRYPTO_ASYM_MAX_KEY_LEN 1024
efbfeb81
GA
153
154typedef struct VhostUserCryptoSession {
5c33f978
GM
155 uint64_t op_code;
156 union {
157 struct {
158 CryptoDevBackendSymSessionInfo session_setup_data;
159 uint8_t key[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN];
160 uint8_t auth_key[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN];
161 } sym;
162 struct {
163 CryptoDevBackendAsymSessionInfo session_setup_data;
164 uint8_t key[VHOST_CRYPTO_ASYM_MAX_KEY_LEN];
165 } asym;
166 } u;
167
efbfeb81
GA
168 /* session id for success, -1 on errors */
169 int64_t session_id;
efbfeb81
GA
170} VhostUserCryptoSession;
171
4c3e257b
CL
172static VhostUserConfig c __attribute__ ((unused));
173#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
174 + sizeof(c.size) \
175 + sizeof(c.flags))
176
44866521
TB
177typedef struct VhostUserVringArea {
178 uint64_t u64;
179 uint64_t size;
180 uint64_t offset;
181} VhostUserVringArea;
182
5ad204bf
XY
183typedef struct VhostUserInflight {
184 uint64_t mmap_size;
185 uint64_t mmap_offset;
186 uint16_t num_queues;
187 uint16_t queue_size;
188} VhostUserInflight;
189
16094766
AE
190typedef struct VhostUserShared {
191 unsigned char uuid[16];
192} VhostUserShared;
193
24e34754 194typedef struct {
5f6f6664
NN
195 VhostUserRequest request;
196
197#define VHOST_USER_VERSION_MASK (0x3)
c97c76b3 198#define VHOST_USER_REPLY_MASK (0x1 << 2)
ca525ce5 199#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
5f6f6664
NN
200 uint32_t flags;
201 uint32_t size; /* the following payload size */
24e34754
MT
202} QEMU_PACKED VhostUserHeader;
203
204typedef union {
5f6f6664 205#define VHOST_USER_VRING_IDX_MASK (0xff)
c97c76b3 206#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
5f6f6664
NN
207 uint64_t u64;
208 struct vhost_vring_state state;
209 struct vhost_vring_addr addr;
210 VhostUserMemory memory;
f1aeb14b 211 VhostUserMemRegMsg mem_reg;
2b8819c6 212 VhostUserLog log;
6dcdd06e 213 struct vhost_iotlb_msg iotlb;
4c3e257b 214 VhostUserConfig config;
efbfeb81 215 VhostUserCryptoSession session;
44866521 216 VhostUserVringArea area;
5ad204bf 217 VhostUserInflight inflight;
16094766 218 VhostUserShared object;
24e34754
MT
219} VhostUserPayload;
220
221typedef struct VhostUserMsg {
222 VhostUserHeader hdr;
223 VhostUserPayload payload;
5f6f6664
NN
224} QEMU_PACKED VhostUserMsg;
225
226static VhostUserMsg m __attribute__ ((unused));
24e34754 227#define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
5f6f6664 228
24e34754 229#define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
5f6f6664
NN
230
231/* The version of the protocol we support */
232#define VHOST_USER_VERSION (0x1)
233
2152f3fe 234struct vhost_user {
9ccbfe14 235 struct vhost_dev *dev;
4d0cf552
TB
236 /* Shared between vhost devs of the same virtio device */
237 VhostUserState *user;
f8ed3648
MP
238 QIOChannel *backend_ioc;
239 GSource *backend_src;
9ccbfe14 240 NotifierWithReturn postcopy_notifier;
f82c1116 241 struct PostCopyFD postcopy_fd;
27598393 242 uint64_t postcopy_client_bases[VHOST_USER_MAX_RAM_SLOTS];
905125d0
DDAG
243 /* Length of the region_rb and region_rb_offset arrays */
244 size_t region_rb_len;
245 /* RAMBlock associated with a given region */
246 RAMBlock **region_rb;
c97c76b3
AB
247 /*
248 * The offset from the start of the RAMBlock to the start of the
905125d0
DDAG
249 * vhost region.
250 */
251 ram_addr_t *region_rb_offset;
252
6864a7b5
DDAG
253 /* True once we've entered postcopy_listen */
254 bool postcopy_listen;
f1aeb14b
RN
255
256 /* Our current regions */
257 int num_shadow_regions;
27598393 258 struct vhost_memory_region shadow_regions[VHOST_USER_MAX_RAM_SLOTS];
f1aeb14b
RN
259};
260
261struct scrub_regions {
262 struct vhost_memory_region *region;
263 int reg_idx;
264 int fd_idx;
2152f3fe
MAL
265};
266
5f6f6664
NN
267static bool ioeventfd_enabled(void)
268{
b0aa77d3 269 return !kvm_enabled() || kvm_eventfds_enabled();
5f6f6664
NN
270}
271
9af84c02 272static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg)
5f6f6664 273{
2152f3fe 274 struct vhost_user *u = dev->opaque;
4d0cf552 275 CharBackend *chr = u->user->chr;
5f6f6664
NN
276 uint8_t *p = (uint8_t *) msg;
277 int r, size = VHOST_USER_HDR_SIZE;
278
279 r = qemu_chr_fe_read_all(chr, p, size);
280 if (r != size) {
025faa87 281 int saved_errno = errno;
5421f318 282 error_report("Failed to read msg header. Read %d instead of %d."
24e34754 283 " Original request %d.", r, size, msg->hdr.request);
025faa87 284 return r < 0 ? -saved_errno : -EIO;
5f6f6664
NN
285 }
286
287 /* validate received flags */
24e34754 288 if (msg->hdr.flags != (VHOST_USER_REPLY_MASK | VHOST_USER_VERSION)) {
5f6f6664 289 error_report("Failed to read msg header."
24e34754 290 " Flags 0x%x instead of 0x%x.", msg->hdr.flags,
5f6f6664 291 VHOST_USER_REPLY_MASK | VHOST_USER_VERSION);
025faa87 292 return -EPROTO;
9af84c02
MAL
293 }
294
643a9435
AB
295 trace_vhost_user_read(msg->hdr.request, msg->hdr.flags);
296
9af84c02
MAL
297 return 0;
298}
299
4382138f 300static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg)
9af84c02
MAL
301{
302 struct vhost_user *u = dev->opaque;
303 CharBackend *chr = u->user->chr;
304 uint8_t *p = (uint8_t *) msg;
305 int r, size;
306
025faa87
RK
307 r = vhost_user_read_header(dev, msg);
308 if (r < 0) {
4382138f 309 return r;
5f6f6664
NN
310 }
311
312 /* validate message size is sane */
24e34754 313 if (msg->hdr.size > VHOST_USER_PAYLOAD_SIZE) {
5f6f6664 314 error_report("Failed to read msg header."
24e34754 315 " Size %d exceeds the maximum %zu.", msg->hdr.size,
5f6f6664 316 VHOST_USER_PAYLOAD_SIZE);
4382138f 317 return -EPROTO;
5f6f6664
NN
318 }
319
24e34754 320 if (msg->hdr.size) {
5f6f6664 321 p += VHOST_USER_HDR_SIZE;
24e34754 322 size = msg->hdr.size;
5f6f6664
NN
323 r = qemu_chr_fe_read_all(chr, p, size);
324 if (r != size) {
025faa87 325 int saved_errno = errno;
5f6f6664 326 error_report("Failed to read msg payload."
24e34754 327 " Read %d instead of %d.", r, msg->hdr.size);
4382138f 328 return r < 0 ? -saved_errno : -EIO;
5f6f6664
NN
329 }
330 }
331
4382138f 332 return 0;
5f6f6664
NN
333}
334
ca525ce5 335static int process_message_reply(struct vhost_dev *dev,
3cf7daf8 336 const VhostUserMsg *msg)
ca525ce5 337{
025faa87 338 int ret;
60cd1102 339 VhostUserMsg msg_reply;
ca525ce5 340
24e34754 341 if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) {
60cd1102
ZY
342 return 0;
343 }
344
025faa87
RK
345 ret = vhost_user_read(dev, &msg_reply);
346 if (ret < 0) {
347 return ret;
ca525ce5
PS
348 }
349
24e34754 350 if (msg_reply.hdr.request != msg->hdr.request) {
edb40732 351 error_report("Received unexpected msg type. "
ca525ce5 352 "Expected %d received %d",
24e34754 353 msg->hdr.request, msg_reply.hdr.request);
025faa87 354 return -EPROTO;
ca525ce5
PS
355 }
356
025faa87 357 return msg_reply.payload.u64 ? -EIO : 0;
ca525ce5
PS
358}
359
0dcb4172 360static bool vhost_user_per_device_request(VhostUserRequest request)
21e70425
MAL
361{
362 switch (request) {
363 case VHOST_USER_SET_OWNER:
60915dc4 364 case VHOST_USER_RESET_OWNER:
21e70425
MAL
365 case VHOST_USER_SET_MEM_TABLE:
366 case VHOST_USER_GET_QUEUE_NUM:
c5f048d8 367 case VHOST_USER_NET_SET_MTU:
667e58ae 368 case VHOST_USER_RESET_DEVICE:
920c184f
MY
369 case VHOST_USER_ADD_MEM_REG:
370 case VHOST_USER_REM_MEM_REG:
21e70425
MAL
371 return true;
372 default:
373 return false;
374 }
375}
376
377/* most non-init callers ignore the error */
5f6f6664
NN
378static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
379 int *fds, int fd_num)
380{
2152f3fe 381 struct vhost_user *u = dev->opaque;
4d0cf552 382 CharBackend *chr = u->user->chr;
24e34754 383 int ret, size = VHOST_USER_HDR_SIZE + msg->hdr.size;
5f6f6664 384
21e70425 385 /*
0dcb4172
TL
386 * Some devices, like virtio-scsi, are implemented as a single vhost_dev,
387 * while others, like virtio-net, contain multiple vhost_devs. For
388 * operations such as configuring device memory mappings or issuing device
389 * resets, which affect the whole device instead of individual VQs,
390 * vhost-user messages should only be sent once.
391 *
392 * Devices with multiple vhost_devs are given an associated dev->vq_index
393 * so per_device requests are only sent if vq_index is 0.
21e70425 394 */
0dcb4172
TL
395 if (vhost_user_per_device_request(msg->hdr.request)
396 && dev->vq_index != 0) {
24e34754 397 msg->hdr.flags &= ~VHOST_USER_NEED_REPLY_MASK;
21e70425
MAL
398 return 0;
399 }
400
6fab2f3f 401 if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) {
f6b85710 402 error_report("Failed to set msg fds.");
025faa87 403 return -EINVAL;
6fab2f3f 404 }
5f6f6664 405
f6b85710
MAL
406 ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size);
407 if (ret != size) {
025faa87 408 int saved_errno = errno;
f6b85710
MAL
409 error_report("Failed to write msg."
410 " Wrote %d instead of %d.", ret, size);
025faa87 411 return ret < 0 ? -saved_errno : -EIO;
f6b85710
MAL
412 }
413
6ca6d8ee
AB
414 trace_vhost_user_write(msg->hdr.request, msg->hdr.flags);
415
f6b85710 416 return 0;
5f6f6664
NN
417}
418
bd2e44fe
MAL
419int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
420{
421 VhostUserMsg msg = {
422 .hdr.request = VHOST_USER_GPU_SET_SOCKET,
423 .hdr.flags = VHOST_USER_VERSION,
424 };
425
426 return vhost_user_write(dev, &msg, &fd, 1);
427}
428
21e70425
MAL
429static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
430 struct vhost_log *log)
b931bfbf 431{
27598393 432 int fds[VHOST_USER_MAX_RAM_SLOTS];
21e70425
MAL
433 size_t fd_num = 0;
434 bool shmfd = virtio_has_feature(dev->protocol_features,
435 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
025faa87 436 int ret;
21e70425 437 VhostUserMsg msg = {
24e34754
MT
438 .hdr.request = VHOST_USER_SET_LOG_BASE,
439 .hdr.flags = VHOST_USER_VERSION,
48854f57 440 .payload.log.mmap_size = log->size * sizeof(*(log->log)),
2b8819c6 441 .payload.log.mmap_offset = 0,
24e34754 442 .hdr.size = sizeof(msg.payload.log),
21e70425
MAL
443 };
444
c98ac64c
YW
445 /* Send only once with first queue pair */
446 if (dev->vq_index != 0) {
447 return 0;
448 }
449
21e70425
MAL
450 if (shmfd && log->fd != -1) {
451 fds[fd_num++] = log->fd;
452 }
453
025faa87
RK
454 ret = vhost_user_write(dev, &msg, fds, fd_num);
455 if (ret < 0) {
456 return ret;
c4843a45 457 }
21e70425
MAL
458
459 if (shmfd) {
24e34754 460 msg.hdr.size = 0;
025faa87
RK
461 ret = vhost_user_read(dev, &msg);
462 if (ret < 0) {
463 return ret;
21e70425
MAL
464 }
465
24e34754 466 if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) {
21e70425
MAL
467 error_report("Received unexpected msg type. "
468 "Expected %d received %d",
24e34754 469 VHOST_USER_SET_LOG_BASE, msg.hdr.request);
025faa87 470 return -EPROTO;
21e70425 471 }
b931bfbf 472 }
21e70425
MAL
473
474 return 0;
b931bfbf
CO
475}
476
23374a84
RN
477static MemoryRegion *vhost_user_get_mr_data(uint64_t addr, ram_addr_t *offset,
478 int *fd)
479{
480 MemoryRegion *mr;
481
482 assert((uintptr_t)addr == addr);
483 mr = memory_region_from_host((void *)(uintptr_t)addr, offset);
484 *fd = memory_region_get_fd(mr);
4b870dc4 485 *offset += mr->ram_block->fd_offset;
23374a84
RN
486
487 return mr;
488}
489
ece99091 490static void vhost_user_fill_msg_region(VhostUserMemoryRegion *dst,
8d193715
RN
491 struct vhost_memory_region *src,
492 uint64_t mmap_offset)
ece99091
RN
493{
494 assert(src != NULL && dst != NULL);
495 dst->userspace_addr = src->userspace_addr;
496 dst->memory_size = src->memory_size;
497 dst->guest_phys_addr = src->guest_phys_addr;
8d193715 498 dst->mmap_offset = mmap_offset;
ece99091
RN
499}
500
2d9da9df
RN
501static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u,
502 struct vhost_dev *dev,
503 VhostUserMsg *msg,
504 int *fds, size_t *fd_num,
505 bool track_ramblocks)
506{
507 int i, fd;
508 ram_addr_t offset;
509 MemoryRegion *mr;
510 struct vhost_memory_region *reg;
ece99091 511 VhostUserMemoryRegion region_buffer;
2d9da9df
RN
512
513 msg->hdr.request = VHOST_USER_SET_MEM_TABLE;
514
515 for (i = 0; i < dev->mem->nregions; ++i) {
516 reg = dev->mem->regions + i;
517
23374a84 518 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
2d9da9df
RN
519 if (fd > 0) {
520 if (track_ramblocks) {
27598393 521 assert(*fd_num < VHOST_MEMORY_BASELINE_NREGIONS);
2d9da9df
RN
522 trace_vhost_user_set_mem_table_withfd(*fd_num, mr->name,
523 reg->memory_size,
524 reg->guest_phys_addr,
525 reg->userspace_addr,
526 offset);
527 u->region_rb_offset[i] = offset;
528 u->region_rb[i] = mr->ram_block;
27598393 529 } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) {
2d9da9df 530 error_report("Failed preparing vhost-user memory table msg");
025faa87 531 return -ENOBUFS;
2d9da9df 532 }
8d193715 533 vhost_user_fill_msg_region(&region_buffer, reg, offset);
ece99091 534 msg->payload.memory.regions[*fd_num] = region_buffer;
2d9da9df
RN
535 fds[(*fd_num)++] = fd;
536 } else if (track_ramblocks) {
537 u->region_rb_offset[i] = 0;
538 u->region_rb[i] = NULL;
539 }
540 }
541
542 msg->payload.memory.nregions = *fd_num;
543
544 if (!*fd_num) {
545 error_report("Failed initializing vhost-user memory map, "
546 "consider using -object memory-backend-file share=on");
025faa87 547 return -EINVAL;
2d9da9df
RN
548 }
549
550 msg->hdr.size = sizeof(msg->payload.memory.nregions);
551 msg->hdr.size += sizeof(msg->payload.memory.padding);
552 msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion);
553
025faa87 554 return 0;
2d9da9df
RN
555}
556
f1aeb14b
RN
557static inline bool reg_equal(struct vhost_memory_region *shadow_reg,
558 struct vhost_memory_region *vdev_reg)
559{
560 return shadow_reg->guest_phys_addr == vdev_reg->guest_phys_addr &&
561 shadow_reg->userspace_addr == vdev_reg->userspace_addr &&
562 shadow_reg->memory_size == vdev_reg->memory_size;
563}
564
565static void scrub_shadow_regions(struct vhost_dev *dev,
566 struct scrub_regions *add_reg,
567 int *nr_add_reg,
568 struct scrub_regions *rem_reg,
569 int *nr_rem_reg, uint64_t *shadow_pcb,
570 bool track_ramblocks)
571{
572 struct vhost_user *u = dev->opaque;
27598393 573 bool found[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b
RN
574 struct vhost_memory_region *reg, *shadow_reg;
575 int i, j, fd, add_idx = 0, rm_idx = 0, fd_num = 0;
576 ram_addr_t offset;
577 MemoryRegion *mr;
578 bool matching;
579
580 /*
581 * Find memory regions present in our shadow state which are not in
582 * the device's current memory state.
583 *
584 * Mark regions in both the shadow and device state as "found".
585 */
586 for (i = 0; i < u->num_shadow_regions; i++) {
587 shadow_reg = &u->shadow_regions[i];
588 matching = false;
589
590 for (j = 0; j < dev->mem->nregions; j++) {
591 reg = &dev->mem->regions[j];
592
593 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
594
595 if (reg_equal(shadow_reg, reg)) {
596 matching = true;
597 found[j] = true;
598 if (track_ramblocks) {
599 /*
600 * Reset postcopy client bases, region_rb, and
601 * region_rb_offset in case regions are removed.
602 */
603 if (fd > 0) {
604 u->region_rb_offset[j] = offset;
605 u->region_rb[j] = mr->ram_block;
606 shadow_pcb[j] = u->postcopy_client_bases[i];
607 } else {
608 u->region_rb_offset[j] = 0;
609 u->region_rb[j] = NULL;
610 }
611 }
612 break;
613 }
614 }
615
616 /*
617 * If the region was not found in the current device memory state
618 * create an entry for it in the removed list.
619 */
620 if (!matching) {
621 rem_reg[rm_idx].region = shadow_reg;
622 rem_reg[rm_idx++].reg_idx = i;
623 }
624 }
625
626 /*
627 * For regions not marked "found", create entries in the added list.
628 *
629 * Note their indexes in the device memory state and the indexes of their
630 * file descriptors.
631 */
632 for (i = 0; i < dev->mem->nregions; i++) {
633 reg = &dev->mem->regions[i];
8b616bee 634 vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
f1aeb14b
RN
635 if (fd > 0) {
636 ++fd_num;
637 }
638
639 /*
640 * If the region was in both the shadow and device state we don't
641 * need to send a VHOST_USER_ADD_MEM_REG message for it.
642 */
643 if (found[i]) {
644 continue;
645 }
646
647 add_reg[add_idx].region = reg;
648 add_reg[add_idx].reg_idx = i;
649 add_reg[add_idx++].fd_idx = fd_num;
650 }
651 *nr_rem_reg = rm_idx;
652 *nr_add_reg = add_idx;
653
654 return;
655}
656
657static int send_remove_regions(struct vhost_dev *dev,
658 struct scrub_regions *remove_reg,
659 int nr_rem_reg, VhostUserMsg *msg,
660 bool reply_supported)
661{
662 struct vhost_user *u = dev->opaque;
663 struct vhost_memory_region *shadow_reg;
664 int i, fd, shadow_reg_idx, ret;
665 ram_addr_t offset;
666 VhostUserMemoryRegion region_buffer;
667
668 /*
669 * The regions in remove_reg appear in the same order they do in the
670 * shadow table. Therefore we can minimize memory copies by iterating
671 * through remove_reg backwards.
672 */
673 for (i = nr_rem_reg - 1; i >= 0; i--) {
674 shadow_reg = remove_reg[i].region;
675 shadow_reg_idx = remove_reg[i].reg_idx;
676
677 vhost_user_get_mr_data(shadow_reg->userspace_addr, &offset, &fd);
678
679 if (fd > 0) {
680 msg->hdr.request = VHOST_USER_REM_MEM_REG;
8d193715 681 vhost_user_fill_msg_region(&region_buffer, shadow_reg, 0);
f1aeb14b
RN
682 msg->payload.mem_reg.region = region_buffer;
683
a81d8d4a 684 ret = vhost_user_write(dev, msg, NULL, 0);
025faa87
RK
685 if (ret < 0) {
686 return ret;
f1aeb14b
RN
687 }
688
689 if (reply_supported) {
690 ret = process_message_reply(dev, msg);
691 if (ret) {
692 return ret;
693 }
694 }
695 }
696
697 /*
698 * At this point we know the backend has unmapped the region. It is now
699 * safe to remove it from the shadow table.
700 */
701 memmove(&u->shadow_regions[shadow_reg_idx],
702 &u->shadow_regions[shadow_reg_idx + 1],
703 sizeof(struct vhost_memory_region) *
4fdecf05 704 (u->num_shadow_regions - shadow_reg_idx - 1));
f1aeb14b
RN
705 u->num_shadow_regions--;
706 }
707
708 return 0;
709}
710
711static int send_add_regions(struct vhost_dev *dev,
712 struct scrub_regions *add_reg, int nr_add_reg,
713 VhostUserMsg *msg, uint64_t *shadow_pcb,
714 bool reply_supported, bool track_ramblocks)
715{
716 struct vhost_user *u = dev->opaque;
717 int i, fd, ret, reg_idx, reg_fd_idx;
718 struct vhost_memory_region *reg;
719 MemoryRegion *mr;
720 ram_addr_t offset;
721 VhostUserMsg msg_reply;
722 VhostUserMemoryRegion region_buffer;
723
724 for (i = 0; i < nr_add_reg; i++) {
725 reg = add_reg[i].region;
726 reg_idx = add_reg[i].reg_idx;
727 reg_fd_idx = add_reg[i].fd_idx;
728
729 mr = vhost_user_get_mr_data(reg->userspace_addr, &offset, &fd);
730
731 if (fd > 0) {
732 if (track_ramblocks) {
733 trace_vhost_user_set_mem_table_withfd(reg_fd_idx, mr->name,
734 reg->memory_size,
735 reg->guest_phys_addr,
736 reg->userspace_addr,
737 offset);
738 u->region_rb_offset[reg_idx] = offset;
739 u->region_rb[reg_idx] = mr->ram_block;
740 }
741 msg->hdr.request = VHOST_USER_ADD_MEM_REG;
8d193715 742 vhost_user_fill_msg_region(&region_buffer, reg, offset);
f1aeb14b 743 msg->payload.mem_reg.region = region_buffer;
f1aeb14b 744
025faa87
RK
745 ret = vhost_user_write(dev, msg, &fd, 1);
746 if (ret < 0) {
747 return ret;
f1aeb14b
RN
748 }
749
750 if (track_ramblocks) {
751 uint64_t reply_gpa;
752
025faa87
RK
753 ret = vhost_user_read(dev, &msg_reply);
754 if (ret < 0) {
755 return ret;
f1aeb14b
RN
756 }
757
758 reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr;
759
760 if (msg_reply.hdr.request != VHOST_USER_ADD_MEM_REG) {
761 error_report("%s: Received unexpected msg type."
762 "Expected %d received %d", __func__,
763 VHOST_USER_ADD_MEM_REG,
764 msg_reply.hdr.request);
025faa87 765 return -EPROTO;
f1aeb14b
RN
766 }
767
768 /*
769 * We're using the same structure, just reusing one of the
770 * fields, so it should be the same size.
771 */
772 if (msg_reply.hdr.size != msg->hdr.size) {
773 error_report("%s: Unexpected size for postcopy reply "
774 "%d vs %d", __func__, msg_reply.hdr.size,
775 msg->hdr.size);
025faa87 776 return -EPROTO;
f1aeb14b
RN
777 }
778
779 /* Get the postcopy client base from the backend's reply. */
780 if (reply_gpa == dev->mem->regions[reg_idx].guest_phys_addr) {
781 shadow_pcb[reg_idx] =
782 msg_reply.payload.mem_reg.region.userspace_addr;
783 trace_vhost_user_set_mem_table_postcopy(
784 msg_reply.payload.mem_reg.region.userspace_addr,
785 msg->payload.mem_reg.region.userspace_addr,
786 reg_fd_idx, reg_idx);
787 } else {
788 error_report("%s: invalid postcopy reply for region. "
789 "Got guest physical address %" PRIX64 ", expected "
790 "%" PRIX64, __func__, reply_gpa,
791 dev->mem->regions[reg_idx].guest_phys_addr);
025faa87 792 return -EPROTO;
f1aeb14b
RN
793 }
794 } else if (reply_supported) {
795 ret = process_message_reply(dev, msg);
796 if (ret) {
797 return ret;
798 }
799 }
800 } else if (track_ramblocks) {
801 u->region_rb_offset[reg_idx] = 0;
802 u->region_rb[reg_idx] = NULL;
803 }
804
805 /*
806 * At this point, we know the backend has mapped in the new
807 * region, if the region has a valid file descriptor.
808 *
809 * The region should now be added to the shadow table.
810 */
811 u->shadow_regions[u->num_shadow_regions].guest_phys_addr =
812 reg->guest_phys_addr;
813 u->shadow_regions[u->num_shadow_regions].userspace_addr =
814 reg->userspace_addr;
815 u->shadow_regions[u->num_shadow_regions].memory_size =
816 reg->memory_size;
817 u->num_shadow_regions++;
818 }
819
820 return 0;
821}
822
823static int vhost_user_add_remove_regions(struct vhost_dev *dev,
824 VhostUserMsg *msg,
825 bool reply_supported,
826 bool track_ramblocks)
827{
828 struct vhost_user *u = dev->opaque;
27598393
RN
829 struct scrub_regions add_reg[VHOST_USER_MAX_RAM_SLOTS];
830 struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS];
831 uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {};
f1aeb14b 832 int nr_add_reg, nr_rem_reg;
025faa87 833 int ret;
f1aeb14b 834
3009edff 835 msg->hdr.size = sizeof(msg->payload.mem_reg);
f1aeb14b
RN
836
837 /* Find the regions which need to be removed or added. */
838 scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg,
839 shadow_pcb, track_ramblocks);
840
025faa87
RK
841 if (nr_rem_reg) {
842 ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg,
843 reply_supported);
844 if (ret < 0) {
845 goto err;
846 }
f1aeb14b
RN
847 }
848
025faa87
RK
849 if (nr_add_reg) {
850 ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb,
851 reply_supported, track_ramblocks);
852 if (ret < 0) {
853 goto err;
854 }
f1aeb14b
RN
855 }
856
857 if (track_ramblocks) {
858 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 859 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
860 /*
861 * Now we've registered this with the postcopy code, we ack to the
862 * client, because now we're in the position to be able to deal with
863 * any faults it generates.
864 */
865 /* TODO: Use this for failure cases as well with a bad value. */
866 msg->hdr.size = sizeof(msg->payload.u64);
867 msg->payload.u64 = 0; /* OK */
868
025faa87
RK
869 ret = vhost_user_write(dev, msg, NULL, 0);
870 if (ret < 0) {
871 return ret;
f1aeb14b
RN
872 }
873 }
874
875 return 0;
876
877err:
878 if (track_ramblocks) {
879 memcpy(u->postcopy_client_bases, shadow_pcb,
27598393 880 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
881 }
882
025faa87 883 return ret;
f1aeb14b
RN
884}
885
55d754b3 886static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev,
f1aeb14b
RN
887 struct vhost_memory *mem,
888 bool reply_supported,
889 bool config_mem_slots)
55d754b3 890{
9bb38019 891 struct vhost_user *u = dev->opaque;
27598393 892 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
55d754b3 893 size_t fd_num = 0;
9bb38019
DDAG
894 VhostUserMsg msg_reply;
895 int region_i, msg_i;
025faa87 896 int ret;
9bb38019 897
55d754b3 898 VhostUserMsg msg = {
55d754b3
DDAG
899 .hdr.flags = VHOST_USER_VERSION,
900 };
901
905125d0
DDAG
902 if (u->region_rb_len < dev->mem->nregions) {
903 u->region_rb = g_renew(RAMBlock*, u->region_rb, dev->mem->nregions);
904 u->region_rb_offset = g_renew(ram_addr_t, u->region_rb_offset,
905 dev->mem->nregions);
906 memset(&(u->region_rb[u->region_rb_len]), '\0',
907 sizeof(RAMBlock *) * (dev->mem->nregions - u->region_rb_len));
908 memset(&(u->region_rb_offset[u->region_rb_len]), '\0',
909 sizeof(ram_addr_t) * (dev->mem->nregions - u->region_rb_len));
910 u->region_rb_len = dev->mem->nregions;
911 }
912
f1aeb14b 913 if (config_mem_slots) {
025faa87
RK
914 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true);
915 if (ret < 0) {
916 return ret;
f1aeb14b
RN
917 }
918 } else {
025faa87
RK
919 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
920 true);
921 if (ret < 0) {
922 return ret;
f1aeb14b 923 }
55d754b3 924
025faa87
RK
925 ret = vhost_user_write(dev, &msg, fds, fd_num);
926 if (ret < 0) {
927 return ret;
f1aeb14b 928 }
55d754b3 929
025faa87
RK
930 ret = vhost_user_read(dev, &msg_reply);
931 if (ret < 0) {
932 return ret;
f1aeb14b 933 }
9bb38019 934
f1aeb14b
RN
935 if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) {
936 error_report("%s: Received unexpected msg type."
937 "Expected %d received %d", __func__,
938 VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request);
025faa87 939 return -EPROTO;
f1aeb14b 940 }
9bb38019 941
f1aeb14b
RN
942 /*
943 * We're using the same structure, just reusing one of the
944 * fields, so it should be the same size.
945 */
946 if (msg_reply.hdr.size != msg.hdr.size) {
947 error_report("%s: Unexpected size for postcopy reply "
948 "%d vs %d", __func__, msg_reply.hdr.size,
949 msg.hdr.size);
025faa87 950 return -EPROTO;
f1aeb14b
RN
951 }
952
953 memset(u->postcopy_client_bases, 0,
27598393 954 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS);
f1aeb14b
RN
955
956 /*
957 * They're in the same order as the regions that were sent
958 * but some of the regions were skipped (above) if they
959 * didn't have fd's
960 */
961 for (msg_i = 0, region_i = 0;
962 region_i < dev->mem->nregions;
963 region_i++) {
964 if (msg_i < fd_num &&
965 msg_reply.payload.memory.regions[msg_i].guest_phys_addr ==
966 dev->mem->regions[region_i].guest_phys_addr) {
967 u->postcopy_client_bases[region_i] =
968 msg_reply.payload.memory.regions[msg_i].userspace_addr;
969 trace_vhost_user_set_mem_table_postcopy(
970 msg_reply.payload.memory.regions[msg_i].userspace_addr,
971 msg.payload.memory.regions[msg_i].userspace_addr,
972 msg_i, region_i);
973 msg_i++;
974 }
975 }
976 if (msg_i != fd_num) {
977 error_report("%s: postcopy reply not fully consumed "
978 "%d vs %zd",
979 __func__, msg_i, fd_num);
025faa87 980 return -EIO;
f1aeb14b
RN
981 }
982
983 /*
984 * Now we've registered this with the postcopy code, we ack to the
985 * client, because now we're in the position to be able to deal
986 * with any faults it generates.
987 */
988 /* TODO: Use this for failure cases as well with a bad value. */
989 msg.hdr.size = sizeof(msg.payload.u64);
990 msg.payload.u64 = 0; /* OK */
025faa87
RK
991 ret = vhost_user_write(dev, &msg, NULL, 0);
992 if (ret < 0) {
993 return ret;
9bb38019 994 }
9bb38019
DDAG
995 }
996
55d754b3
DDAG
997 return 0;
998}
999
94c9cb31
MT
1000static int vhost_user_set_mem_table(struct vhost_dev *dev,
1001 struct vhost_memory *mem)
1002{
55d754b3 1003 struct vhost_user *u = dev->opaque;
27598393 1004 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
94c9cb31 1005 size_t fd_num = 0;
55d754b3 1006 bool do_postcopy = u->postcopy_listen && u->postcopy_fd.handler;
94c9cb31 1007 bool reply_supported = virtio_has_feature(dev->protocol_features,
5ce43896 1008 VHOST_USER_PROTOCOL_F_REPLY_ACK);
f1aeb14b
RN
1009 bool config_mem_slots =
1010 virtio_has_feature(dev->protocol_features,
1011 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS);
025faa87 1012 int ret;
94c9cb31 1013
55d754b3 1014 if (do_postcopy) {
f1aeb14b
RN
1015 /*
1016 * Postcopy has enough differences that it's best done in it's own
55d754b3
DDAG
1017 * version
1018 */
f1aeb14b
RN
1019 return vhost_user_set_mem_table_postcopy(dev, mem, reply_supported,
1020 config_mem_slots);
55d754b3
DDAG
1021 }
1022
94c9cb31 1023 VhostUserMsg msg = {
24e34754 1024 .hdr.flags = VHOST_USER_VERSION,
94c9cb31
MT
1025 };
1026
1027 if (reply_supported) {
24e34754 1028 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
94c9cb31
MT
1029 }
1030
f1aeb14b 1031 if (config_mem_slots) {
025faa87
RK
1032 ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false);
1033 if (ret < 0) {
1034 return ret;
f1aeb14b
RN
1035 }
1036 } else {
025faa87
RK
1037 ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num,
1038 false);
1039 if (ret < 0) {
1040 return ret;
f1aeb14b 1041 }
025faa87
RK
1042
1043 ret = vhost_user_write(dev, &msg, fds, fd_num);
1044 if (ret < 0) {
1045 return ret;
f1aeb14b 1046 }
94c9cb31 1047
f1aeb14b
RN
1048 if (reply_supported) {
1049 return process_message_reply(dev, &msg);
1050 }
94c9cb31
MT
1051 }
1052
1053 return 0;
1054}
1055
21e70425
MAL
1056static int vhost_user_set_vring_endian(struct vhost_dev *dev,
1057 struct vhost_vring_state *ring)
1058{
5df04f17
FF
1059 bool cross_endian = virtio_has_feature(dev->protocol_features,
1060 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN);
1061 VhostUserMsg msg = {
24e34754
MT
1062 .hdr.request = VHOST_USER_SET_VRING_ENDIAN,
1063 .hdr.flags = VHOST_USER_VERSION,
5df04f17 1064 .payload.state = *ring,
24e34754 1065 .hdr.size = sizeof(msg.payload.state),
5df04f17
FF
1066 };
1067
1068 if (!cross_endian) {
1069 error_report("vhost-user trying to send unhandled ioctl");
025faa87 1070 return -ENOTSUP;
5df04f17
FF
1071 }
1072
025faa87 1073 return vhost_user_write(dev, &msg, NULL, 0);
21e70425 1074}
5f6f6664 1075
21e70425
MAL
1076static int vhost_set_vring(struct vhost_dev *dev,
1077 unsigned long int request,
1078 struct vhost_vring_state *ring)
1079{
1080 VhostUserMsg msg = {
24e34754
MT
1081 .hdr.request = request,
1082 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1083 .payload.state = *ring,
24e34754 1084 .hdr.size = sizeof(msg.payload.state),
21e70425
MAL
1085 };
1086
025faa87 1087 return vhost_user_write(dev, &msg, NULL, 0);
21e70425
MAL
1088}
1089
1090static int vhost_user_set_vring_num(struct vhost_dev *dev,
1091 struct vhost_vring_state *ring)
1092{
1093 return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring);
1094}
1095
0b0af4d6 1096static void vhost_user_host_notifier_free(VhostUserHostNotifier *n)
44866521 1097{
0b0af4d6 1098 assert(n && n->unmap_addr);
8e3b0cbb 1099 munmap(n->unmap_addr, qemu_real_host_page_size());
0b0af4d6
XL
1100 n->unmap_addr = NULL;
1101}
1102
503e3554
AB
1103/*
1104 * clean-up function for notifier, will finally free the structure
1105 * under rcu.
1106 */
1107static void vhost_user_host_notifier_remove(VhostUserHostNotifier *n,
1108 VirtIODevice *vdev)
0b0af4d6 1109{
e867144b 1110 if (n->addr) {
0b0af4d6 1111 if (vdev) {
503e3554 1112 virtio_queue_set_host_notifier_mr(vdev, n->idx, &n->mr, false);
0b0af4d6
XL
1113 }
1114 assert(!n->unmap_addr);
1115 n->unmap_addr = n->addr;
1116 n->addr = NULL;
1117 call_rcu(n, vhost_user_host_notifier_free, rcu);
44866521
TB
1118 }
1119}
1120
21e70425
MAL
1121static int vhost_user_set_vring_base(struct vhost_dev *dev,
1122 struct vhost_vring_state *ring)
1123{
1124 return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring);
1125}
1126
1127static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable)
1128{
dc3db6ad 1129 int i;
21e70425 1130
923e2d98 1131 if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) {
025faa87 1132 return -EINVAL;
5f6f6664
NN
1133 }
1134
dc3db6ad 1135 for (i = 0; i < dev->nvqs; ++i) {
025faa87 1136 int ret;
dc3db6ad
MT
1137 struct vhost_vring_state state = {
1138 .index = dev->vq_index + i,
1139 .num = enable,
1140 };
1141
025faa87
RK
1142 ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state);
1143 if (ret < 0) {
1144 /*
1145 * Restoring the previous state is likely infeasible, as well as
1146 * proceeding regardless the error, so just bail out and hope for
1147 * the device-level recovery.
1148 */
1149 return ret;
1150 }
dc3db6ad 1151 }
21e70425 1152
dc3db6ad
MT
1153 return 0;
1154}
21e70425 1155
503e3554
AB
1156static VhostUserHostNotifier *fetch_notifier(VhostUserState *u,
1157 int idx)
1158{
1159 if (idx >= u->notifiers->len) {
1160 return NULL;
1161 }
1162 return g_ptr_array_index(u->notifiers, idx);
1163}
1164
21e70425
MAL
1165static int vhost_user_get_vring_base(struct vhost_dev *dev,
1166 struct vhost_vring_state *ring)
1167{
025faa87 1168 int ret;
21e70425 1169 VhostUserMsg msg = {
24e34754
MT
1170 .hdr.request = VHOST_USER_GET_VRING_BASE,
1171 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1172 .payload.state = *ring,
24e34754 1173 .hdr.size = sizeof(msg.payload.state),
21e70425 1174 };
0b0af4d6 1175 struct vhost_user *u = dev->opaque;
21e70425 1176
503e3554
AB
1177 VhostUserHostNotifier *n = fetch_notifier(u->user, ring->index);
1178 if (n) {
1179 vhost_user_host_notifier_remove(n, dev->vdev);
1180 }
44866521 1181
025faa87
RK
1182 ret = vhost_user_write(dev, &msg, NULL, 0);
1183 if (ret < 0) {
1184 return ret;
c4843a45 1185 }
21e70425 1186
025faa87
RK
1187 ret = vhost_user_read(dev, &msg);
1188 if (ret < 0) {
1189 return ret;
5f6f6664
NN
1190 }
1191
24e34754 1192 if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) {
21e70425 1193 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1194 VHOST_USER_GET_VRING_BASE, msg.hdr.request);
025faa87 1195 return -EPROTO;
21e70425 1196 }
5f6f6664 1197
24e34754 1198 if (msg.hdr.size != sizeof(msg.payload.state)) {
21e70425 1199 error_report("Received bad msg size.");
025faa87 1200 return -EPROTO;
5f6f6664
NN
1201 }
1202
7f4a930e 1203 *ring = msg.payload.state;
21e70425 1204
5f6f6664
NN
1205 return 0;
1206}
1207
21e70425
MAL
1208static int vhost_set_vring_file(struct vhost_dev *dev,
1209 VhostUserRequest request,
1210 struct vhost_vring_file *file)
c2bea314 1211{
27598393 1212 int fds[VHOST_USER_MAX_RAM_SLOTS];
9a78a5dd 1213 size_t fd_num = 0;
c2bea314 1214 VhostUserMsg msg = {
24e34754
MT
1215 .hdr.request = request,
1216 .hdr.flags = VHOST_USER_VERSION,
7f4a930e 1217 .payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK,
24e34754 1218 .hdr.size = sizeof(msg.payload.u64),
c2bea314
MAL
1219 };
1220
21e70425
MAL
1221 if (ioeventfd_enabled() && file->fd > 0) {
1222 fds[fd_num++] = file->fd;
1223 } else {
7f4a930e 1224 msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK;
9a78a5dd
MAL
1225 }
1226
025faa87 1227 return vhost_user_write(dev, &msg, fds, fd_num);
21e70425 1228}
9a78a5dd 1229
21e70425
MAL
1230static int vhost_user_set_vring_kick(struct vhost_dev *dev,
1231 struct vhost_vring_file *file)
1232{
1233 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_KICK, file);
1234}
1235
1236static int vhost_user_set_vring_call(struct vhost_dev *dev,
1237 struct vhost_vring_file *file)
1238{
1239 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_CALL, file);
1240}
1241
60dc3c5b
KK
1242static int vhost_user_set_vring_err(struct vhost_dev *dev,
1243 struct vhost_vring_file *file)
1244{
1245 return vhost_set_vring_file(dev, VHOST_USER_SET_VRING_ERR, file);
1246}
21e70425
MAL
1247
1248static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64)
1249{
025faa87 1250 int ret;
21e70425 1251 VhostUserMsg msg = {
24e34754
MT
1252 .hdr.request = request,
1253 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1254 };
1255
0dcb4172 1256 if (vhost_user_per_device_request(request) && dev->vq_index != 0) {
21e70425 1257 return 0;
9a78a5dd 1258 }
c2bea314 1259
025faa87
RK
1260 ret = vhost_user_write(dev, &msg, NULL, 0);
1261 if (ret < 0) {
1262 return ret;
c4843a45 1263 }
21e70425 1264
025faa87
RK
1265 ret = vhost_user_read(dev, &msg);
1266 if (ret < 0) {
1267 return ret;
21e70425
MAL
1268 }
1269
24e34754 1270 if (msg.hdr.request != request) {
21e70425 1271 error_report("Received unexpected msg type. Expected %d received %d",
24e34754 1272 request, msg.hdr.request);
025faa87 1273 return -EPROTO;
21e70425
MAL
1274 }
1275
24e34754 1276 if (msg.hdr.size != sizeof(msg.payload.u64)) {
21e70425 1277 error_report("Received bad msg size.");
025faa87 1278 return -EPROTO;
21e70425
MAL
1279 }
1280
7f4a930e 1281 *u64 = msg.payload.u64;
21e70425
MAL
1282
1283 return 0;
1284}
1285
1286static int vhost_user_get_features(struct vhost_dev *dev, uint64_t *features)
1287{
f2a6e6c4
KW
1288 if (vhost_user_get_u64(dev, VHOST_USER_GET_FEATURES, features) < 0) {
1289 return -EPROTO;
1290 }
1291
1292 return 0;
21e70425
MAL
1293}
1294
699f2e53
DP
1295static int enforce_reply(struct vhost_dev *dev,
1296 const VhostUserMsg *msg)
1297{
1298 uint64_t dummy;
1299
1300 if (msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
1301 return process_message_reply(dev, msg);
1302 }
1303
1304 /*
1305 * We need to wait for a reply but the backend does not
1306 * support replies for the command we just sent.
1307 * Send VHOST_USER_GET_FEATURES which makes all backends
1308 * send a reply.
1309 */
1310 return vhost_user_get_features(dev, &dummy);
1311}
1312
1313static int vhost_user_set_vring_addr(struct vhost_dev *dev,
1314 struct vhost_vring_addr *addr)
1315{
025faa87 1316 int ret;
699f2e53
DP
1317 VhostUserMsg msg = {
1318 .hdr.request = VHOST_USER_SET_VRING_ADDR,
1319 .hdr.flags = VHOST_USER_VERSION,
1320 .payload.addr = *addr,
1321 .hdr.size = sizeof(msg.payload.addr),
1322 };
1323
1324 bool reply_supported = virtio_has_feature(dev->protocol_features,
1325 VHOST_USER_PROTOCOL_F_REPLY_ACK);
1326
1327 /*
1328 * wait for a reply if logging is enabled to make sure
1329 * backend is actually logging changes
1330 */
1331 bool wait_for_reply = addr->flags & (1 << VHOST_VRING_F_LOG);
1332
1333 if (reply_supported && wait_for_reply) {
1334 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1335 }
1336
025faa87
RK
1337 ret = vhost_user_write(dev, &msg, NULL, 0);
1338 if (ret < 0) {
1339 return ret;
699f2e53
DP
1340 }
1341
1342 if (wait_for_reply) {
1343 return enforce_reply(dev, &msg);
1344 }
1345
1346 return 0;
1347}
1348
1349static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64,
1350 bool wait_for_reply)
1351{
1352 VhostUserMsg msg = {
1353 .hdr.request = request,
1354 .hdr.flags = VHOST_USER_VERSION,
1355 .payload.u64 = u64,
1356 .hdr.size = sizeof(msg.payload.u64),
1357 };
025faa87 1358 int ret;
699f2e53
DP
1359
1360 if (wait_for_reply) {
1361 bool reply_supported = virtio_has_feature(dev->protocol_features,
1362 VHOST_USER_PROTOCOL_F_REPLY_ACK);
1363 if (reply_supported) {
1364 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
1365 }
1366 }
1367
025faa87
RK
1368 ret = vhost_user_write(dev, &msg, NULL, 0);
1369 if (ret < 0) {
1370 return ret;
699f2e53
DP
1371 }
1372
1373 if (wait_for_reply) {
1374 return enforce_reply(dev, &msg);
1375 }
1376
1377 return 0;
1378}
1379
923b8921
YW
1380static int vhost_user_set_status(struct vhost_dev *dev, uint8_t status)
1381{
1382 return vhost_user_set_u64(dev, VHOST_USER_SET_STATUS, status, false);
1383}
1384
1385static int vhost_user_get_status(struct vhost_dev *dev, uint8_t *status)
1386{
1387 uint64_t value;
1388 int ret;
1389
1390 ret = vhost_user_get_u64(dev, VHOST_USER_GET_STATUS, &value);
1391 if (ret < 0) {
1392 return ret;
1393 }
1394 *status = value;
1395
1396 return 0;
1397}
1398
1399static int vhost_user_add_status(struct vhost_dev *dev, uint8_t status)
1400{
1401 uint8_t s;
1402 int ret;
1403
1404 ret = vhost_user_get_status(dev, &s);
1405 if (ret < 0) {
1406 return ret;
1407 }
1408
1409 if ((s & status) == status) {
1410 return 0;
1411 }
1412 s |= status;
1413
1414 return vhost_user_set_status(dev, s);
1415}
1416
699f2e53
DP
1417static int vhost_user_set_features(struct vhost_dev *dev,
1418 uint64_t features)
1419{
1420 /*
1421 * wait for a reply if logging is enabled to make sure
1422 * backend is actually logging changes
1423 */
1424 bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
923b8921 1425 int ret;
699f2e53 1426
02b61f38
AB
1427 /*
1428 * We need to include any extra backend only feature bits that
1429 * might be needed by our device. Currently this includes the
1430 * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
1431 * features.
1432 */
923b8921 1433 ret = vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
02b61f38 1434 features | dev->backend_features,
699f2e53 1435 log_enabled);
923b8921
YW
1436
1437 if (virtio_has_feature(dev->protocol_features,
1438 VHOST_USER_PROTOCOL_F_STATUS)) {
1439 if (!ret) {
1440 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
1441 }
1442 }
1443
1444 return ret;
699f2e53
DP
1445}
1446
1447static int vhost_user_set_protocol_features(struct vhost_dev *dev,
1448 uint64_t features)
1449{
1450 return vhost_user_set_u64(dev, VHOST_USER_SET_PROTOCOL_FEATURES, features,
1451 false);
1452}
1453
21e70425
MAL
1454static int vhost_user_set_owner(struct vhost_dev *dev)
1455{
1456 VhostUserMsg msg = {
24e34754
MT
1457 .hdr.request = VHOST_USER_SET_OWNER,
1458 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1459 };
1460
025faa87 1461 return vhost_user_write(dev, &msg, NULL, 0);
21e70425
MAL
1462}
1463
6b0eff1a
RN
1464static int vhost_user_get_max_memslots(struct vhost_dev *dev,
1465 uint64_t *max_memslots)
1466{
1467 uint64_t backend_max_memslots;
1468 int err;
1469
1470 err = vhost_user_get_u64(dev, VHOST_USER_GET_MAX_MEM_SLOTS,
1471 &backend_max_memslots);
1472 if (err < 0) {
1473 return err;
1474 }
1475
1476 *max_memslots = backend_max_memslots;
1477
1478 return 0;
1479}
1480
21e70425
MAL
1481static int vhost_user_reset_device(struct vhost_dev *dev)
1482{
1483 VhostUserMsg msg = {
24e34754 1484 .hdr.flags = VHOST_USER_VERSION,
21e70425
MAL
1485 };
1486
d91d57e6
RN
1487 msg.hdr.request = virtio_has_feature(dev->protocol_features,
1488 VHOST_USER_PROTOCOL_F_RESET_DEVICE)
1489 ? VHOST_USER_RESET_DEVICE
1490 : VHOST_USER_RESET_OWNER;
1491
025faa87 1492 return vhost_user_write(dev, &msg, NULL, 0);
c2bea314
MAL
1493}
1494
f8ed3648 1495static int vhost_user_backend_handle_config_change(struct vhost_dev *dev)
4c3e257b 1496{
025faa87
RK
1497 if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) {
1498 return -ENOSYS;
4c3e257b
CL
1499 }
1500
025faa87 1501 return dev->config_ops->vhost_dev_config_notifier(dev);
4c3e257b
CL
1502}
1503
503e3554
AB
1504/*
1505 * Fetch or create the notifier for a given idx. Newly created
1506 * notifiers are added to the pointer array that tracks them.
1507 */
1508static VhostUserHostNotifier *fetch_or_create_notifier(VhostUserState *u,
1509 int idx)
1510{
1511 VhostUserHostNotifier *n = NULL;
1512 if (idx >= u->notifiers->len) {
b595d627 1513 g_ptr_array_set_size(u->notifiers, idx + 1);
503e3554
AB
1514 }
1515
1516 n = g_ptr_array_index(u->notifiers, idx);
1517 if (!n) {
bd437c96
YW
1518 /*
1519 * In case notification arrive out-of-order,
1520 * make room for current index.
1521 */
1522 g_ptr_array_remove_index(u->notifiers, idx);
503e3554
AB
1523 n = g_new0(VhostUserHostNotifier, 1);
1524 n->idx = idx;
1525 g_ptr_array_insert(u->notifiers, idx, n);
1526 trace_vhost_user_create_notifier(idx, n);
1527 }
1528
1529 return n;
1530}
1531
f8ed3648 1532static int vhost_user_backend_handle_vring_host_notifier(struct vhost_dev *dev,
44866521
TB
1533 VhostUserVringArea *area,
1534 int fd)
1535{
1536 int queue_idx = area->u64 & VHOST_USER_VRING_IDX_MASK;
8e3b0cbb 1537 size_t page_size = qemu_real_host_page_size();
44866521
TB
1538 struct vhost_user *u = dev->opaque;
1539 VhostUserState *user = u->user;
1540 VirtIODevice *vdev = dev->vdev;
1541 VhostUserHostNotifier *n;
1542 void *addr;
1543 char *name;
1544
1545 if (!virtio_has_feature(dev->protocol_features,
1546 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) ||
1547 vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) {
025faa87 1548 return -EINVAL;
44866521
TB
1549 }
1550
503e3554
AB
1551 /*
1552 * Fetch notifier and invalidate any old data before setting up
1553 * new mapped address.
1554 */
1555 n = fetch_or_create_notifier(user, queue_idx);
1556 vhost_user_host_notifier_remove(n, vdev);
44866521
TB
1557
1558 if (area->u64 & VHOST_USER_VRING_NOFD_MASK) {
1559 return 0;
1560 }
1561
1562 /* Sanity check. */
1563 if (area->size != page_size) {
025faa87 1564 return -EINVAL;
44866521
TB
1565 }
1566
1567 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
1568 fd, area->offset);
1569 if (addr == MAP_FAILED) {
025faa87 1570 return -EFAULT;
44866521
TB
1571 }
1572
1573 name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1574 user, queue_idx);
0b0af4d6 1575 if (!n->mr.ram) { /* Don't init again after suspend. */
a1ed9ef1
XL
1576 memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
1577 page_size, addr);
0b0af4d6
XL
1578 } else {
1579 n->mr.ram_block->host = addr;
1580 }
44866521
TB
1581 g_free(name);
1582
1583 if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) {
1f89d3b9 1584 object_unparent(OBJECT(&n->mr));
44866521 1585 munmap(addr, page_size);
025faa87 1586 return -ENXIO;
44866521
TB
1587 }
1588
1589 n->addr = addr;
44866521
TB
1590
1591 return 0;
1592}
1593
16094766
AE
1594static int
1595vhost_user_backend_handle_shared_object_add(struct vhost_dev *dev,
1596 VhostUserShared *object)
1597{
1598 QemuUUID uuid;
1599
1600 memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1601 return virtio_add_vhost_device(&uuid, dev);
1602}
1603
1604static int
1605vhost_user_backend_handle_shared_object_remove(VhostUserShared *object)
1606{
1607 QemuUUID uuid;
1608
1609 memcpy(uuid.data, object->uuid, sizeof(object->uuid));
1610 return virtio_remove_resource(&uuid);
1611}
1612
1613static bool vhost_user_send_resp(QIOChannel *ioc, VhostUserHeader *hdr,
1614 VhostUserPayload *payload, Error **errp)
1615{
1616 struct iovec iov[] = {
1617 { .iov_base = hdr, .iov_len = VHOST_USER_HDR_SIZE },
1618 { .iov_base = payload, .iov_len = hdr->size },
1619 };
1620
1621 hdr->flags &= ~VHOST_USER_NEED_REPLY_MASK;
1622 hdr->flags |= VHOST_USER_REPLY_MASK;
1623
1624 return !qio_channel_writev_all(ioc, iov, ARRAY_SIZE(iov), errp);
1625}
1626
1627static bool
1628vhost_user_backend_send_dmabuf_fd(QIOChannel *ioc, VhostUserHeader *hdr,
1629 VhostUserPayload *payload, Error **errp)
1630{
1631 hdr->size = sizeof(payload->u64);
1632 return vhost_user_send_resp(ioc, hdr, payload, errp);
1633}
1634
1635int vhost_user_get_shared_object(struct vhost_dev *dev, unsigned char *uuid,
1636 int *dmabuf_fd)
1637{
1638 struct vhost_user *u = dev->opaque;
1639 CharBackend *chr = u->user->chr;
1640 int ret;
1641 VhostUserMsg msg = {
1642 .hdr.request = VHOST_USER_GET_SHARED_OBJECT,
1643 .hdr.flags = VHOST_USER_VERSION,
1644 };
1645 memcpy(msg.payload.object.uuid, uuid, sizeof(msg.payload.object.uuid));
1646
1647 ret = vhost_user_write(dev, &msg, NULL, 0);
1648 if (ret < 0) {
1649 return ret;
1650 }
1651
1652 ret = vhost_user_read(dev, &msg);
1653 if (ret < 0) {
1654 return ret;
1655 }
1656
1657 if (msg.hdr.request != VHOST_USER_GET_SHARED_OBJECT) {
1658 error_report("Received unexpected msg type. "
1659 "Expected %d received %d",
1660 VHOST_USER_GET_SHARED_OBJECT, msg.hdr.request);
1661 return -EPROTO;
1662 }
1663
1664 *dmabuf_fd = qemu_chr_fe_get_msgfd(chr);
1665 if (*dmabuf_fd < 0) {
1666 error_report("Failed to get dmabuf fd");
1667 return -EIO;
1668 }
1669
1670 return 0;
1671}
1672
1673static int
1674vhost_user_backend_handle_shared_object_lookup(struct vhost_user *u,
1675 QIOChannel *ioc,
1676 VhostUserHeader *hdr,
1677 VhostUserPayload *payload)
1678{
1679 QemuUUID uuid;
1680 CharBackend *chr = u->user->chr;
1681 Error *local_err = NULL;
1682 int dmabuf_fd = -1;
1683 int fd_num = 0;
1684
1685 memcpy(uuid.data, payload->object.uuid, sizeof(payload->object.uuid));
1686
1687 payload->u64 = 0;
1688 switch (virtio_object_type(&uuid)) {
1689 case TYPE_DMABUF:
1690 dmabuf_fd = virtio_lookup_dmabuf(&uuid);
1691 break;
1692 case TYPE_VHOST_DEV:
1693 {
1694 struct vhost_dev *dev = virtio_lookup_vhost_device(&uuid);
1695 if (dev == NULL) {
1696 payload->u64 = -EINVAL;
1697 break;
1698 }
1699 int ret = vhost_user_get_shared_object(dev, uuid.data, &dmabuf_fd);
1700 if (ret < 0) {
1701 payload->u64 = ret;
1702 }
1703 break;
1704 }
1705 case TYPE_INVALID:
1706 payload->u64 = -EINVAL;
1707 break;
1708 }
1709
1710 if (dmabuf_fd != -1) {
1711 fd_num++;
1712 }
1713
1714 if (qemu_chr_fe_set_msgfds(chr, &dmabuf_fd, fd_num) < 0) {
1715 error_report("Failed to set msg fds.");
1716 payload->u64 = -EINVAL;
1717 }
1718
1719 if (!vhost_user_backend_send_dmabuf_fd(ioc, hdr, payload, &local_err)) {
1720 error_report_err(local_err);
1721 return -EINVAL;
1722 }
1723
1724 return 0;
1725}
1726
f8ed3648 1727static void close_backend_channel(struct vhost_user *u)
de62e494 1728{
f8ed3648
MP
1729 g_source_destroy(u->backend_src);
1730 g_source_unref(u->backend_src);
1731 u->backend_src = NULL;
1732 object_unref(OBJECT(u->backend_ioc));
1733 u->backend_ioc = NULL;
de62e494
GK
1734}
1735
f8ed3648 1736static gboolean backend_read(QIOChannel *ioc, GIOCondition condition,
57dc0217 1737 gpointer opaque)
4bbeeba0
MAL
1738{
1739 struct vhost_dev *dev = opaque;
1740 struct vhost_user *u = dev->opaque;
69aff030
MT
1741 VhostUserHeader hdr = { 0, };
1742 VhostUserPayload payload = { 0, };
57dc0217
GK
1743 Error *local_err = NULL;
1744 gboolean rc = G_SOURCE_CONTINUE;
1745 int ret = 0;
1f3a4519 1746 struct iovec iov;
57dc0217
GK
1747 g_autofree int *fd = NULL;
1748 size_t fdsize = 0;
1749 int i;
5f57fbea 1750
4bbeeba0 1751 /* Read header */
1f3a4519
TB
1752 iov.iov_base = &hdr;
1753 iov.iov_len = VHOST_USER_HDR_SIZE;
1754
57dc0217
GK
1755 if (qio_channel_readv_full_all(ioc, &iov, 1, &fd, &fdsize, &local_err)) {
1756 error_report_err(local_err);
4bbeeba0
MAL
1757 goto err;
1758 }
1759
69aff030 1760 if (hdr.size > VHOST_USER_PAYLOAD_SIZE) {
4bbeeba0 1761 error_report("Failed to read msg header."
69aff030 1762 " Size %d exceeds the maximum %zu.", hdr.size,
4bbeeba0
MAL
1763 VHOST_USER_PAYLOAD_SIZE);
1764 goto err;
1765 }
1766
1767 /* Read payload */
57dc0217
GK
1768 if (qio_channel_read_all(ioc, (char *) &payload, hdr.size, &local_err)) {
1769 error_report_err(local_err);
4bbeeba0
MAL
1770 goto err;
1771 }
1772
69aff030 1773 switch (hdr.request) {
a84ec993 1774 case VHOST_USER_BACKEND_IOTLB_MSG:
69aff030 1775 ret = vhost_backend_handle_iotlb_msg(dev, &payload.iotlb);
6dcdd06e 1776 break;
a84ec993 1777 case VHOST_USER_BACKEND_CONFIG_CHANGE_MSG:
f8ed3648 1778 ret = vhost_user_backend_handle_config_change(dev);
4c3e257b 1779 break;
a84ec993 1780 case VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG:
f8ed3648 1781 ret = vhost_user_backend_handle_vring_host_notifier(dev, &payload.area,
57dc0217 1782 fd ? fd[0] : -1);
44866521 1783 break;
16094766
AE
1784 case VHOST_USER_BACKEND_SHARED_OBJECT_ADD:
1785 ret = vhost_user_backend_handle_shared_object_add(dev, &payload.object);
1786 break;
1787 case VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE:
1788 ret = vhost_user_backend_handle_shared_object_remove(&payload.object);
1789 break;
1790 case VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP:
1791 ret = vhost_user_backend_handle_shared_object_lookup(dev->opaque, ioc,
1792 &hdr, &payload);
1793 break;
4bbeeba0 1794 default:
0fdc465d 1795 error_report("Received unexpected msg type: %d.", hdr.request);
4bbeeba0
MAL
1796 ret = -EINVAL;
1797 }
1798
1799 /*
1800 * REPLY_ACK feature handling. Other reply types has to be managed
1801 * directly in their request handlers.
1802 */
69aff030 1803 if (hdr.flags & VHOST_USER_NEED_REPLY_MASK) {
69aff030
MT
1804 payload.u64 = !!ret;
1805 hdr.size = sizeof(payload.u64);
1806
16094766 1807 if (!vhost_user_send_resp(ioc, &hdr, &payload, &local_err)) {
57dc0217 1808 error_report_err(local_err);
4bbeeba0
MAL
1809 goto err;
1810 }
1811 }
1812
9e06080b 1813 goto fdcleanup;
4bbeeba0
MAL
1814
1815err:
f8ed3648 1816 close_backend_channel(u);
57dc0217 1817 rc = G_SOURCE_REMOVE;
9e06080b
GK
1818
1819fdcleanup:
57dc0217
GK
1820 if (fd) {
1821 for (i = 0; i < fdsize; i++) {
5f57fbea
TB
1822 close(fd[i]);
1823 }
1f3a4519 1824 }
57dc0217 1825 return rc;
4bbeeba0
MAL
1826}
1827
f8ed3648 1828static int vhost_setup_backend_channel(struct vhost_dev *dev)
4bbeeba0
MAL
1829{
1830 VhostUserMsg msg = {
a84ec993 1831 .hdr.request = VHOST_USER_SET_BACKEND_REQ_FD,
24e34754 1832 .hdr.flags = VHOST_USER_VERSION,
4bbeeba0
MAL
1833 };
1834 struct vhost_user *u = dev->opaque;
1835 int sv[2], ret = 0;
1836 bool reply_supported = virtio_has_feature(dev->protocol_features,
1837 VHOST_USER_PROTOCOL_F_REPLY_ACK);
57dc0217
GK
1838 Error *local_err = NULL;
1839 QIOChannel *ioc;
4bbeeba0
MAL
1840
1841 if (!virtio_has_feature(dev->protocol_features,
a84ec993 1842 VHOST_USER_PROTOCOL_F_BACKEND_REQ)) {
4bbeeba0
MAL
1843 return 0;
1844 }
1845
9cbda7b3 1846 if (qemu_socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
025faa87 1847 int saved_errno = errno;
4bbeeba0 1848 error_report("socketpair() failed");
025faa87 1849 return -saved_errno;
4bbeeba0
MAL
1850 }
1851
57dc0217
GK
1852 ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err));
1853 if (!ioc) {
1854 error_report_err(local_err);
025faa87 1855 return -ECONNREFUSED;
57dc0217 1856 }
f8ed3648
MP
1857 u->backend_ioc = ioc;
1858 u->backend_src = qio_channel_add_watch_source(u->backend_ioc,
f340a59d 1859 G_IO_IN | G_IO_HUP,
f8ed3648 1860 backend_read, dev, NULL, NULL);
4bbeeba0
MAL
1861
1862 if (reply_supported) {
24e34754 1863 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4bbeeba0
MAL
1864 }
1865
1866 ret = vhost_user_write(dev, &msg, &sv[1], 1);
1867 if (ret) {
1868 goto out;
1869 }
1870
1871 if (reply_supported) {
1872 ret = process_message_reply(dev, &msg);
1873 }
1874
1875out:
1876 close(sv[1]);
1877 if (ret) {
f8ed3648 1878 close_backend_channel(u);
4bbeeba0
MAL
1879 }
1880
1881 return ret;
1882}
1883
18658a3c 1884#ifdef CONFIG_LINUX
f82c1116
DDAG
1885/*
1886 * Called back from the postcopy fault thread when a fault is received on our
1887 * ufd.
1888 * TODO: This is Linux specific
1889 */
1890static int vhost_user_postcopy_fault_handler(struct PostCopyFD *pcfd,
1891 void *ufd)
1892{
375318d0
DDAG
1893 struct vhost_dev *dev = pcfd->data;
1894 struct vhost_user *u = dev->opaque;
1895 struct uffd_msg *msg = ufd;
1896 uint64_t faultaddr = msg->arg.pagefault.address;
1897 RAMBlock *rb = NULL;
1898 uint64_t rb_offset;
1899 int i;
1900
1901 trace_vhost_user_postcopy_fault_handler(pcfd->idstr, faultaddr,
1902 dev->mem->nregions);
1903 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1904 trace_vhost_user_postcopy_fault_handler_loop(i,
1905 u->postcopy_client_bases[i], dev->mem->regions[i].memory_size);
1906 if (faultaddr >= u->postcopy_client_bases[i]) {
1907 /* Ofset of the fault address in the vhost region */
1908 uint64_t region_offset = faultaddr - u->postcopy_client_bases[i];
1909 if (region_offset < dev->mem->regions[i].memory_size) {
1910 rb_offset = region_offset + u->region_rb_offset[i];
1911 trace_vhost_user_postcopy_fault_handler_found(i,
1912 region_offset, rb_offset);
1913 rb = u->region_rb[i];
1914 return postcopy_request_shared_page(pcfd, rb, faultaddr,
1915 rb_offset);
1916 }
1917 }
1918 }
1919 error_report("%s: Failed to find region for fault %" PRIx64,
1920 __func__, faultaddr);
1921 return -1;
f82c1116
DDAG
1922}
1923
c07e3615
DDAG
1924static int vhost_user_postcopy_waker(struct PostCopyFD *pcfd, RAMBlock *rb,
1925 uint64_t offset)
1926{
1927 struct vhost_dev *dev = pcfd->data;
1928 struct vhost_user *u = dev->opaque;
1929 int i;
1930
1931 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb), offset);
1932
1933 if (!u) {
1934 return 0;
1935 }
1936 /* Translate the offset into an address in the clients address space */
1937 for (i = 0; i < MIN(dev->mem->nregions, u->region_rb_len); i++) {
1938 if (u->region_rb[i] == rb &&
1939 offset >= u->region_rb_offset[i] &&
1940 offset < (u->region_rb_offset[i] +
1941 dev->mem->regions[i].memory_size)) {
1942 uint64_t client_addr = (offset - u->region_rb_offset[i]) +
1943 u->postcopy_client_bases[i];
1944 trace_vhost_user_postcopy_waker_found(client_addr);
1945 return postcopy_wake_shared(pcfd, client_addr, rb);
1946 }
1947 }
1948
1949 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb), offset);
1950 return 0;
1951}
18658a3c 1952#endif
c07e3615 1953
d3dff7a5
DDAG
1954/*
1955 * Called at the start of an inbound postcopy on reception of the
1956 * 'advise' command.
1957 */
1958static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp)
1959{
18658a3c 1960#ifdef CONFIG_LINUX
d3dff7a5 1961 struct vhost_user *u = dev->opaque;
4d0cf552 1962 CharBackend *chr = u->user->chr;
d3dff7a5 1963 int ufd;
025faa87 1964 int ret;
d3dff7a5
DDAG
1965 VhostUserMsg msg = {
1966 .hdr.request = VHOST_USER_POSTCOPY_ADVISE,
1967 .hdr.flags = VHOST_USER_VERSION,
1968 };
1969
025faa87
RK
1970 ret = vhost_user_write(dev, &msg, NULL, 0);
1971 if (ret < 0) {
d3dff7a5 1972 error_setg(errp, "Failed to send postcopy_advise to vhost");
025faa87 1973 return ret;
d3dff7a5
DDAG
1974 }
1975
025faa87
RK
1976 ret = vhost_user_read(dev, &msg);
1977 if (ret < 0) {
d3dff7a5 1978 error_setg(errp, "Failed to get postcopy_advise reply from vhost");
025faa87 1979 return ret;
d3dff7a5
DDAG
1980 }
1981
1982 if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) {
1983 error_setg(errp, "Unexpected msg type. Expected %d received %d",
1984 VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request);
025faa87 1985 return -EPROTO;
d3dff7a5
DDAG
1986 }
1987
1988 if (msg.hdr.size) {
1989 error_setg(errp, "Received bad msg size.");
025faa87 1990 return -EPROTO;
d3dff7a5
DDAG
1991 }
1992 ufd = qemu_chr_fe_get_msgfd(chr);
1993 if (ufd < 0) {
1994 error_setg(errp, "%s: Failed to get ufd", __func__);
025faa87 1995 return -EIO;
d3dff7a5 1996 }
ff5927ba 1997 qemu_socket_set_nonblock(ufd);
d3dff7a5 1998
f82c1116
DDAG
1999 /* register ufd with userfault thread */
2000 u->postcopy_fd.fd = ufd;
2001 u->postcopy_fd.data = dev;
2002 u->postcopy_fd.handler = vhost_user_postcopy_fault_handler;
c07e3615 2003 u->postcopy_fd.waker = vhost_user_postcopy_waker;
f82c1116
DDAG
2004 u->postcopy_fd.idstr = "vhost-user"; /* Need to find unique name */
2005 postcopy_register_shared_ufd(&u->postcopy_fd);
d3dff7a5 2006 return 0;
18658a3c
PB
2007#else
2008 error_setg(errp, "Postcopy not supported on non-Linux systems");
025faa87 2009 return -ENOSYS;
18658a3c 2010#endif
d3dff7a5
DDAG
2011}
2012
6864a7b5
DDAG
2013/*
2014 * Called at the switch to postcopy on reception of the 'listen' command.
2015 */
2016static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp)
2017{
2018 struct vhost_user *u = dev->opaque;
2019 int ret;
2020 VhostUserMsg msg = {
2021 .hdr.request = VHOST_USER_POSTCOPY_LISTEN,
2022 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2023 };
2024 u->postcopy_listen = true;
025faa87 2025
6864a7b5 2026 trace_vhost_user_postcopy_listen();
025faa87
RK
2027
2028 ret = vhost_user_write(dev, &msg, NULL, 0);
2029 if (ret < 0) {
6864a7b5 2030 error_setg(errp, "Failed to send postcopy_listen to vhost");
025faa87 2031 return ret;
6864a7b5
DDAG
2032 }
2033
2034 ret = process_message_reply(dev, &msg);
2035 if (ret) {
2036 error_setg(errp, "Failed to receive reply to postcopy_listen");
2037 return ret;
2038 }
2039
2040 return 0;
2041}
2042
46343570
DDAG
2043/*
2044 * Called at the end of postcopy
2045 */
2046static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp)
2047{
2048 VhostUserMsg msg = {
2049 .hdr.request = VHOST_USER_POSTCOPY_END,
2050 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
2051 };
2052 int ret;
2053 struct vhost_user *u = dev->opaque;
2054
2055 trace_vhost_user_postcopy_end_entry();
025faa87
RK
2056
2057 ret = vhost_user_write(dev, &msg, NULL, 0);
2058 if (ret < 0) {
46343570 2059 error_setg(errp, "Failed to send postcopy_end to vhost");
025faa87 2060 return ret;
46343570
DDAG
2061 }
2062
2063 ret = process_message_reply(dev, &msg);
2064 if (ret) {
2065 error_setg(errp, "Failed to receive reply to postcopy_end");
2066 return ret;
2067 }
2068 postcopy_unregister_shared_ufd(&u->postcopy_fd);
c4f75385 2069 close(u->postcopy_fd.fd);
46343570
DDAG
2070 u->postcopy_fd.handler = NULL;
2071
2072 trace_vhost_user_postcopy_end_exit();
2073
2074 return 0;
2075}
2076
9ccbfe14
DDAG
2077static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier,
2078 void *opaque)
2079{
2080 struct PostcopyNotifyData *pnd = opaque;
2081 struct vhost_user *u = container_of(notifier, struct vhost_user,
2082 postcopy_notifier);
2083 struct vhost_dev *dev = u->dev;
2084
2085 switch (pnd->reason) {
2086 case POSTCOPY_NOTIFY_PROBE:
2087 if (!virtio_has_feature(dev->protocol_features,
2088 VHOST_USER_PROTOCOL_F_PAGEFAULT)) {
2089 /* TODO: Get the device name into this error somehow */
2090 error_setg(pnd->errp,
2091 "vhost-user backend not capable of postcopy");
2092 return -ENOENT;
2093 }
2094 break;
2095
d3dff7a5
DDAG
2096 case POSTCOPY_NOTIFY_INBOUND_ADVISE:
2097 return vhost_user_postcopy_advise(dev, pnd->errp);
2098
6864a7b5
DDAG
2099 case POSTCOPY_NOTIFY_INBOUND_LISTEN:
2100 return vhost_user_postcopy_listen(dev, pnd->errp);
2101
46343570
DDAG
2102 case POSTCOPY_NOTIFY_INBOUND_END:
2103 return vhost_user_postcopy_end(dev, pnd->errp);
2104
9ccbfe14
DDAG
2105 default:
2106 /* We ignore notifications we don't know */
2107 break;
2108 }
2109
2110 return 0;
2111}
2112
28770ff9
KW
2113static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque,
2114 Error **errp)
5f6f6664 2115{
56534930 2116 uint64_t features, ram_slots;
2152f3fe 2117 struct vhost_user *u;
56534930 2118 VhostUserState *vus = (VhostUserState *) opaque;
dcb10c00
MT
2119 int err;
2120
5f6f6664
NN
2121 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2122
2152f3fe 2123 u = g_new0(struct vhost_user, 1);
56534930 2124 u->user = vus;
9ccbfe14 2125 u->dev = dev;
2152f3fe 2126 dev->opaque = u;
5f6f6664 2127
21e70425 2128 err = vhost_user_get_features(dev, &features);
dcb10c00 2129 if (err < 0) {
998647dc 2130 error_setg_errno(errp, -err, "vhost_backend_init failed");
f2a6e6c4 2131 return err;
dcb10c00
MT
2132 }
2133
2134 if (virtio_has_feature(features, VHOST_USER_F_PROTOCOL_FEATURES)) {
56534930
AB
2135 bool supports_f_config = vus->supports_config ||
2136 (dev->config_ops && dev->config_ops->vhost_dev_config_notifier);
2137 uint64_t protocol_features;
2138
dcb10c00
MT
2139 dev->backend_features |= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES;
2140
21e70425 2141 err = vhost_user_get_u64(dev, VHOST_USER_GET_PROTOCOL_FEATURES,
6dcdd06e 2142 &protocol_features);
dcb10c00 2143 if (err < 0) {
998647dc 2144 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
28770ff9 2145 return -EPROTO;
dcb10c00
MT
2146 }
2147
56534930
AB
2148 /*
2149 * We will use all the protocol features we support - although
2150 * we suppress F_CONFIG if we know QEMUs internal code can not support
2151 * it.
2152 */
2153 protocol_features &= VHOST_USER_PROTOCOL_FEATURE_MASK;
2154
2155 if (supports_f_config) {
2156 if (!virtio_has_feature(protocol_features,
2157 VHOST_USER_PROTOCOL_F_CONFIG)) {
fb38d0c9 2158 error_setg(errp, "vhost-user device expecting "
56534930 2159 "VHOST_USER_PROTOCOL_F_CONFIG but the vhost-user backend does "
fb38d0c9 2160 "not support it.");
56534930
AB
2161 return -EPROTO;
2162 }
2163 } else {
2164 if (virtio_has_feature(protocol_features,
2165 VHOST_USER_PROTOCOL_F_CONFIG)) {
90e31232
AE
2166 warn_report("vhost-user backend supports "
2167 "VHOST_USER_PROTOCOL_F_CONFIG but QEMU does not.");
56534930
AB
2168 protocol_features &= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG);
2169 }
1c3e5a26
MC
2170 }
2171
56534930
AB
2172 /* final set of protocol features */
2173 dev->protocol_features = protocol_features;
21e70425 2174 err = vhost_user_set_protocol_features(dev, dev->protocol_features);
dcb10c00 2175 if (err < 0) {
998647dc 2176 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
28770ff9 2177 return -EPROTO;
dcb10c00 2178 }
e2051e9e
YL
2179
2180 /* query the max queues we support if backend supports Multiple Queue */
2181 if (dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_MQ)) {
21e70425
MAL
2182 err = vhost_user_get_u64(dev, VHOST_USER_GET_QUEUE_NUM,
2183 &dev->max_queues);
e2051e9e 2184 if (err < 0) {
998647dc 2185 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
28770ff9 2186 return -EPROTO;
e2051e9e 2187 }
84affad1
KW
2188 } else {
2189 dev->max_queues = 1;
e2051e9e 2190 }
84affad1 2191
c90bd505 2192 if (dev->num_queues && dev->max_queues < dev->num_queues) {
28770ff9
KW
2193 error_setg(errp, "The maximum number of queues supported by the "
2194 "backend is %" PRIu64, dev->max_queues);
c90bd505
KW
2195 return -EINVAL;
2196 }
6dcdd06e
MC
2197
2198 if (virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM) &&
2199 !(virtio_has_feature(dev->protocol_features,
a84ec993 2200 VHOST_USER_PROTOCOL_F_BACKEND_REQ) &&
6dcdd06e
MC
2201 virtio_has_feature(dev->protocol_features,
2202 VHOST_USER_PROTOCOL_F_REPLY_ACK))) {
28770ff9 2203 error_setg(errp, "IOMMU support requires reply-ack and "
f8ed3648 2204 "backend-req protocol features.");
28770ff9 2205 return -EINVAL;
6dcdd06e 2206 }
6b0eff1a
RN
2207
2208 /* get max memory regions if backend supports configurable RAM slots */
2209 if (!virtio_has_feature(dev->protocol_features,
2210 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS)) {
27598393 2211 u->user->memory_slots = VHOST_MEMORY_BASELINE_NREGIONS;
6b0eff1a
RN
2212 } else {
2213 err = vhost_user_get_max_memslots(dev, &ram_slots);
2214 if (err < 0) {
998647dc 2215 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
28770ff9 2216 return -EPROTO;
6b0eff1a
RN
2217 }
2218
2219 if (ram_slots < u->user->memory_slots) {
28770ff9
KW
2220 error_setg(errp, "The backend specified a max ram slots limit "
2221 "of %" PRIu64", when the prior validated limit was "
2222 "%d. This limit should never decrease.", ram_slots,
2223 u->user->memory_slots);
2224 return -EINVAL;
6b0eff1a
RN
2225 }
2226
27598393 2227 u->user->memory_slots = MIN(ram_slots, VHOST_USER_MAX_RAM_SLOTS);
6b0eff1a 2228 }
dcb10c00
MT
2229 }
2230
d2fc4402
MAL
2231 if (dev->migration_blocker == NULL &&
2232 !virtio_has_feature(dev->protocol_features,
2233 VHOST_USER_PROTOCOL_F_LOG_SHMFD)) {
2234 error_setg(&dev->migration_blocker,
2235 "Migration disabled: vhost-user backend lacks "
2236 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2237 }
2238
67b3965e 2239 if (dev->vq_index == 0) {
f8ed3648 2240 err = vhost_setup_backend_channel(dev);
67b3965e 2241 if (err < 0) {
998647dc 2242 error_setg_errno(errp, EPROTO, "vhost_backend_init failed");
28770ff9 2243 return -EPROTO;
67b3965e 2244 }
4bbeeba0
MAL
2245 }
2246
9ccbfe14
DDAG
2247 u->postcopy_notifier.notify = vhost_user_postcopy_notifier;
2248 postcopy_add_notifier(&u->postcopy_notifier);
2249
5f6f6664
NN
2250 return 0;
2251}
2252
4d0cf552 2253static int vhost_user_backend_cleanup(struct vhost_dev *dev)
5f6f6664 2254{
2152f3fe
MAL
2255 struct vhost_user *u;
2256
5f6f6664
NN
2257 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2258
2152f3fe 2259 u = dev->opaque;
9ccbfe14
DDAG
2260 if (u->postcopy_notifier.notify) {
2261 postcopy_remove_notifier(&u->postcopy_notifier);
2262 u->postcopy_notifier.notify = NULL;
2263 }
c4f75385
IM
2264 u->postcopy_listen = false;
2265 if (u->postcopy_fd.handler) {
2266 postcopy_unregister_shared_ufd(&u->postcopy_fd);
2267 close(u->postcopy_fd.fd);
2268 u->postcopy_fd.handler = NULL;
2269 }
f8ed3648
MP
2270 if (u->backend_ioc) {
2271 close_backend_channel(u);
4bbeeba0 2272 }
905125d0
DDAG
2273 g_free(u->region_rb);
2274 u->region_rb = NULL;
2275 g_free(u->region_rb_offset);
2276 u->region_rb_offset = NULL;
2277 u->region_rb_len = 0;
2152f3fe 2278 g_free(u);
5f6f6664
NN
2279 dev->opaque = 0;
2280
2281 return 0;
2282}
2283
fc57fd99
YL
2284static int vhost_user_get_vq_index(struct vhost_dev *dev, int idx)
2285{
2286 assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
2287
2288 return idx;
2289}
2290
2ce68e4c
IM
2291static int vhost_user_memslots_limit(struct vhost_dev *dev)
2292{
6b0eff1a
RN
2293 struct vhost_user *u = dev->opaque;
2294
2295 return u->user->memory_slots;
2ce68e4c
IM
2296}
2297
1be0ac21
MAL
2298static bool vhost_user_requires_shm_log(struct vhost_dev *dev)
2299{
2300 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2301
2302 return virtio_has_feature(dev->protocol_features,
2303 VHOST_USER_PROTOCOL_F_LOG_SHMFD);
2304}
2305
3e866365
TC
2306static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr)
2307{
ebf2a499 2308 VhostUserMsg msg = { };
3e866365
TC
2309
2310 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2311
2312 /* If guest supports GUEST_ANNOUNCE do nothing */
2313 if (virtio_has_feature(dev->acked_features, VIRTIO_NET_F_GUEST_ANNOUNCE)) {
2314 return 0;
2315 }
2316
2317 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2318 if (virtio_has_feature(dev->protocol_features,
2319 VHOST_USER_PROTOCOL_F_RARP)) {
24e34754
MT
2320 msg.hdr.request = VHOST_USER_SEND_RARP;
2321 msg.hdr.flags = VHOST_USER_VERSION;
7f4a930e 2322 memcpy((char *)&msg.payload.u64, mac_addr, 6);
24e34754 2323 msg.hdr.size = sizeof(msg.payload.u64);
3e866365 2324
c4843a45 2325 return vhost_user_write(dev, &msg, NULL, 0);
3e866365 2326 }
025faa87 2327 return -ENOTSUP;
3e866365
TC
2328}
2329
c5f048d8
MC
2330static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu)
2331{
2332 VhostUserMsg msg;
2333 bool reply_supported = virtio_has_feature(dev->protocol_features,
2334 VHOST_USER_PROTOCOL_F_REPLY_ACK);
025faa87 2335 int ret;
c5f048d8
MC
2336
2337 if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) {
2338 return 0;
2339 }
2340
24e34754 2341 msg.hdr.request = VHOST_USER_NET_SET_MTU;
c5f048d8 2342 msg.payload.u64 = mtu;
24e34754
MT
2343 msg.hdr.size = sizeof(msg.payload.u64);
2344 msg.hdr.flags = VHOST_USER_VERSION;
c5f048d8 2345 if (reply_supported) {
24e34754 2346 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
c5f048d8
MC
2347 }
2348
025faa87
RK
2349 ret = vhost_user_write(dev, &msg, NULL, 0);
2350 if (ret < 0) {
2351 return ret;
c5f048d8
MC
2352 }
2353
f8ed3648 2354 /* If reply_ack supported, backend has to ack specified MTU is valid */
c5f048d8 2355 if (reply_supported) {
3cf7daf8 2356 return process_message_reply(dev, &msg);
c5f048d8
MC
2357 }
2358
2359 return 0;
2360}
2361
6dcdd06e
MC
2362static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev,
2363 struct vhost_iotlb_msg *imsg)
2364{
025faa87 2365 int ret;
6dcdd06e 2366 VhostUserMsg msg = {
24e34754
MT
2367 .hdr.request = VHOST_USER_IOTLB_MSG,
2368 .hdr.size = sizeof(msg.payload.iotlb),
2369 .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK,
6dcdd06e
MC
2370 .payload.iotlb = *imsg,
2371 };
2372
025faa87
RK
2373 ret = vhost_user_write(dev, &msg, NULL, 0);
2374 if (ret < 0) {
2375 return ret;
6dcdd06e
MC
2376 }
2377
2378 return process_message_reply(dev, &msg);
2379}
2380
2381
2382static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled)
2383{
2384 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2385}
2386
4c3e257b 2387static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config,
50de5138 2388 uint32_t config_len, Error **errp)
4c3e257b 2389{
025faa87 2390 int ret;
4c3e257b 2391 VhostUserMsg msg = {
24e34754
MT
2392 .hdr.request = VHOST_USER_GET_CONFIG,
2393 .hdr.flags = VHOST_USER_VERSION,
2394 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + config_len,
4c3e257b
CL
2395 };
2396
1c3e5a26
MC
2397 if (!virtio_has_feature(dev->protocol_features,
2398 VHOST_USER_PROTOCOL_F_CONFIG)) {
50de5138
KW
2399 error_setg(errp, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2400 return -EINVAL;
1c3e5a26
MC
2401 }
2402
50de5138 2403 assert(config_len <= VHOST_USER_MAX_CONFIG_SIZE);
4c3e257b
CL
2404
2405 msg.payload.config.offset = 0;
2406 msg.payload.config.size = config_len;
025faa87
RK
2407 ret = vhost_user_write(dev, &msg, NULL, 0);
2408 if (ret < 0) {
2409 error_setg_errno(errp, -ret, "vhost_get_config failed");
2410 return ret;
4c3e257b
CL
2411 }
2412
025faa87
RK
2413 ret = vhost_user_read(dev, &msg);
2414 if (ret < 0) {
2415 error_setg_errno(errp, -ret, "vhost_get_config failed");
2416 return ret;
4c3e257b
CL
2417 }
2418
24e34754 2419 if (msg.hdr.request != VHOST_USER_GET_CONFIG) {
50de5138
KW
2420 error_setg(errp,
2421 "Received unexpected msg type. Expected %d received %d",
2422 VHOST_USER_GET_CONFIG, msg.hdr.request);
025faa87 2423 return -EPROTO;
4c3e257b
CL
2424 }
2425
24e34754 2426 if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) {
50de5138 2427 error_setg(errp, "Received bad msg size.");
025faa87 2428 return -EPROTO;
4c3e257b
CL
2429 }
2430
2431 memcpy(config, msg.payload.config.region, config_len);
2432
2433 return 0;
2434}
2435
2436static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data,
2437 uint32_t offset, uint32_t size, uint32_t flags)
2438{
025faa87 2439 int ret;
4c3e257b
CL
2440 uint8_t *p;
2441 bool reply_supported = virtio_has_feature(dev->protocol_features,
2442 VHOST_USER_PROTOCOL_F_REPLY_ACK);
2443
2444 VhostUserMsg msg = {
24e34754
MT
2445 .hdr.request = VHOST_USER_SET_CONFIG,
2446 .hdr.flags = VHOST_USER_VERSION,
2447 .hdr.size = VHOST_USER_CONFIG_HDR_SIZE + size,
4c3e257b
CL
2448 };
2449
1c3e5a26
MC
2450 if (!virtio_has_feature(dev->protocol_features,
2451 VHOST_USER_PROTOCOL_F_CONFIG)) {
025faa87 2452 return -ENOTSUP;
1c3e5a26
MC
2453 }
2454
4c3e257b 2455 if (reply_supported) {
24e34754 2456 msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK;
4c3e257b
CL
2457 }
2458
2459 if (size > VHOST_USER_MAX_CONFIG_SIZE) {
025faa87 2460 return -EINVAL;
4c3e257b
CL
2461 }
2462
2463 msg.payload.config.offset = offset,
2464 msg.payload.config.size = size,
2465 msg.payload.config.flags = flags,
2466 p = msg.payload.config.region;
2467 memcpy(p, data, size);
2468
025faa87
RK
2469 ret = vhost_user_write(dev, &msg, NULL, 0);
2470 if (ret < 0) {
2471 return ret;
4c3e257b
CL
2472 }
2473
2474 if (reply_supported) {
2475 return process_message_reply(dev, &msg);
2476 }
2477
2478 return 0;
2479}
2480
efbfeb81
GA
2481static int vhost_user_crypto_create_session(struct vhost_dev *dev,
2482 void *session_info,
2483 uint64_t *session_id)
2484{
025faa87 2485 int ret;
efbfeb81
GA
2486 bool crypto_session = virtio_has_feature(dev->protocol_features,
2487 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
5c33f978 2488 CryptoDevBackendSessionInfo *backend_info = session_info;
efbfeb81
GA
2489 VhostUserMsg msg = {
2490 .hdr.request = VHOST_USER_CREATE_CRYPTO_SESSION,
2491 .hdr.flags = VHOST_USER_VERSION,
2492 .hdr.size = sizeof(msg.payload.session),
2493 };
2494
2495 assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER);
2496
2497 if (!crypto_session) {
2498 error_report("vhost-user trying to send unhandled ioctl");
025faa87 2499 return -ENOTSUP;
efbfeb81
GA
2500 }
2501
5c33f978
GM
2502 if (backend_info->op_code == VIRTIO_CRYPTO_AKCIPHER_CREATE_SESSION) {
2503 CryptoDevBackendAsymSessionInfo *sess = &backend_info->u.asym_sess_info;
2504 size_t keylen;
2505
2506 memcpy(&msg.payload.session.u.asym.session_setup_data, sess,
2507 sizeof(CryptoDevBackendAsymSessionInfo));
2508 if (sess->keylen) {
2509 keylen = sizeof(msg.payload.session.u.asym.key);
2510 if (sess->keylen > keylen) {
2511 error_report("Unsupported asymmetric key size");
2512 return -ENOTSUP;
2513 }
2514
2515 memcpy(&msg.payload.session.u.asym.key, sess->key,
2516 sess->keylen);
2517 }
2518 } else {
2519 CryptoDevBackendSymSessionInfo *sess = &backend_info->u.sym_sess_info;
2520 size_t keylen;
2521
2522 memcpy(&msg.payload.session.u.sym.session_setup_data, sess,
2523 sizeof(CryptoDevBackendSymSessionInfo));
2524 if (sess->key_len) {
2525 keylen = sizeof(msg.payload.session.u.sym.key);
2526 if (sess->key_len > keylen) {
2527 error_report("Unsupported cipher key size");
2528 return -ENOTSUP;
2529 }
2530
2531 memcpy(&msg.payload.session.u.sym.key, sess->cipher_key,
2532 sess->key_len);
2533 }
2534
2535 if (sess->auth_key_len > 0) {
2536 keylen = sizeof(msg.payload.session.u.sym.auth_key);
2537 if (sess->auth_key_len > keylen) {
2538 error_report("Unsupported auth key size");
2539 return -ENOTSUP;
2540 }
2541
2542 memcpy(&msg.payload.session.u.sym.auth_key, sess->auth_key,
2543 sess->auth_key_len);
2544 }
efbfeb81 2545 }
5c33f978
GM
2546
2547 msg.payload.session.op_code = backend_info->op_code;
2548 msg.payload.session.session_id = backend_info->session_id;
025faa87
RK
2549 ret = vhost_user_write(dev, &msg, NULL, 0);
2550 if (ret < 0) {
2551 error_report("vhost_user_write() return %d, create session failed",
2552 ret);
2553 return ret;
efbfeb81
GA
2554 }
2555
025faa87
RK
2556 ret = vhost_user_read(dev, &msg);
2557 if (ret < 0) {
2558 error_report("vhost_user_read() return %d, create session failed",
2559 ret);
2560 return ret;
efbfeb81
GA
2561 }
2562
2563 if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) {
2564 error_report("Received unexpected msg type. Expected %d received %d",
2565 VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request);
025faa87 2566 return -EPROTO;
efbfeb81
GA
2567 }
2568
2569 if (msg.hdr.size != sizeof(msg.payload.session)) {
2570 error_report("Received bad msg size.");
025faa87 2571 return -EPROTO;
efbfeb81
GA
2572 }
2573
2574 if (msg.payload.session.session_id < 0) {
2575 error_report("Bad session id: %" PRId64 "",
2576 msg.payload.session.session_id);
025faa87 2577 return -EINVAL;
efbfeb81
GA
2578 }
2579 *session_id = msg.payload.session.session_id;
2580
2581 return 0;
2582}
2583
2584static int
2585vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id)
2586{
025faa87 2587 int ret;
efbfeb81
GA
2588 bool crypto_session = virtio_has_feature(dev->protocol_features,
2589 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION);
2590 VhostUserMsg msg = {
2591 .hdr.request = VHOST_USER_CLOSE_CRYPTO_SESSION,
2592 .hdr.flags = VHOST_USER_VERSION,
2593 .hdr.size = sizeof(msg.payload.u64),
2594 };
2595 msg.payload.u64 = session_id;
2596
2597 if (!crypto_session) {
2598 error_report("vhost-user trying to send unhandled ioctl");
025faa87 2599 return -ENOTSUP;
efbfeb81
GA
2600 }
2601
025faa87
RK
2602 ret = vhost_user_write(dev, &msg, NULL, 0);
2603 if (ret < 0) {
2604 error_report("vhost_user_write() return %d, close session failed",
2605 ret);
2606 return ret;
efbfeb81
GA
2607 }
2608
2609 return 0;
2610}
2611
552b2522 2612static bool vhost_user_no_private_memslots(struct vhost_dev *dev)
988a2775 2613{
552b2522 2614 return true;
988a2775
TB
2615}
2616
5ad204bf
XY
2617static int vhost_user_get_inflight_fd(struct vhost_dev *dev,
2618 uint16_t queue_size,
2619 struct vhost_inflight *inflight)
2620{
2621 void *addr;
2622 int fd;
025faa87 2623 int ret;
5ad204bf
XY
2624 struct vhost_user *u = dev->opaque;
2625 CharBackend *chr = u->user->chr;
2626 VhostUserMsg msg = {
2627 .hdr.request = VHOST_USER_GET_INFLIGHT_FD,
2628 .hdr.flags = VHOST_USER_VERSION,
2629 .payload.inflight.num_queues = dev->nvqs,
2630 .payload.inflight.queue_size = queue_size,
2631 .hdr.size = sizeof(msg.payload.inflight),
2632 };
2633
2634 if (!virtio_has_feature(dev->protocol_features,
2635 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2636 return 0;
2637 }
2638
025faa87
RK
2639 ret = vhost_user_write(dev, &msg, NULL, 0);
2640 if (ret < 0) {
2641 return ret;
5ad204bf
XY
2642 }
2643
025faa87
RK
2644 ret = vhost_user_read(dev, &msg);
2645 if (ret < 0) {
2646 return ret;
5ad204bf
XY
2647 }
2648
2649 if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) {
2650 error_report("Received unexpected msg type. "
2651 "Expected %d received %d",
2652 VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request);
025faa87 2653 return -EPROTO;
5ad204bf
XY
2654 }
2655
2656 if (msg.hdr.size != sizeof(msg.payload.inflight)) {
2657 error_report("Received bad msg size.");
025faa87 2658 return -EPROTO;
5ad204bf
XY
2659 }
2660
2661 if (!msg.payload.inflight.mmap_size) {
2662 return 0;
2663 }
2664
2665 fd = qemu_chr_fe_get_msgfd(chr);
2666 if (fd < 0) {
2667 error_report("Failed to get mem fd");
025faa87 2668 return -EIO;
5ad204bf
XY
2669 }
2670
2671 addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE,
2672 MAP_SHARED, fd, msg.payload.inflight.mmap_offset);
2673
2674 if (addr == MAP_FAILED) {
2675 error_report("Failed to mmap mem fd");
2676 close(fd);
025faa87 2677 return -EFAULT;
5ad204bf
XY
2678 }
2679
2680 inflight->addr = addr;
2681 inflight->fd = fd;
2682 inflight->size = msg.payload.inflight.mmap_size;
2683 inflight->offset = msg.payload.inflight.mmap_offset;
2684 inflight->queue_size = queue_size;
2685
2686 return 0;
2687}
2688
2689static int vhost_user_set_inflight_fd(struct vhost_dev *dev,
2690 struct vhost_inflight *inflight)
2691{
2692 VhostUserMsg msg = {
2693 .hdr.request = VHOST_USER_SET_INFLIGHT_FD,
2694 .hdr.flags = VHOST_USER_VERSION,
2695 .payload.inflight.mmap_size = inflight->size,
2696 .payload.inflight.mmap_offset = inflight->offset,
2697 .payload.inflight.num_queues = dev->nvqs,
2698 .payload.inflight.queue_size = inflight->queue_size,
2699 .hdr.size = sizeof(msg.payload.inflight),
2700 };
2701
2702 if (!virtio_has_feature(dev->protocol_features,
2703 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD)) {
2704 return 0;
2705 }
2706
025faa87 2707 return vhost_user_write(dev, &msg, &inflight->fd, 1);
5ad204bf
XY
2708}
2709
503e3554
AB
2710static void vhost_user_state_destroy(gpointer data)
2711{
2712 VhostUserHostNotifier *n = (VhostUserHostNotifier *) data;
2713 if (n) {
2714 vhost_user_host_notifier_remove(n, NULL);
2715 object_unparent(OBJECT(&n->mr));
2716 /*
2717 * We can't free until vhost_user_host_notifier_remove has
2718 * done it's thing so schedule the free with RCU.
2719 */
2720 g_free_rcu(n, rcu);
2721 }
2722}
2723
0b99f224 2724bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp)
4d0cf552 2725{
0b99f224
MAL
2726 if (user->chr) {
2727 error_setg(errp, "Cannot initialize vhost-user state");
2728 return false;
2729 }
2730 user->chr = chr;
6b0eff1a 2731 user->memory_slots = 0;
503e3554
AB
2732 user->notifiers = g_ptr_array_new_full(VIRTIO_QUEUE_MAX / 4,
2733 &vhost_user_state_destroy);
0b99f224 2734 return true;
4d0cf552
TB
2735}
2736
2737void vhost_user_cleanup(VhostUserState *user)
2738{
0b99f224
MAL
2739 if (!user->chr) {
2740 return;
2741 }
c6effa9c 2742 memory_region_transaction_begin();
503e3554 2743 user->notifiers = (GPtrArray *) g_ptr_array_free(user->notifiers, true);
c6effa9c 2744 memory_region_transaction_commit();
0b99f224 2745 user->chr = NULL;
4d0cf552
TB
2746}
2747
71e076a0
AB
2748
2749typedef struct {
2750 vu_async_close_fn cb;
2751 DeviceState *dev;
2752 CharBackend *cd;
2753 struct vhost_dev *vhost;
2754} VhostAsyncCallback;
2755
2756static void vhost_user_async_close_bh(void *opaque)
2757{
2758 VhostAsyncCallback *data = opaque;
2759 struct vhost_dev *vhost = data->vhost;
2760
2761 /*
2762 * If the vhost_dev has been cleared in the meantime there is
2763 * nothing left to do as some other path has completed the
2764 * cleanup.
2765 */
2766 if (vhost->vdev) {
2767 data->cb(data->dev);
2768 }
2769
2770 g_free(data);
2771}
2772
2773/*
2774 * We only schedule the work if the machine is running. If suspended
2775 * we want to keep all the in-flight data as is for migration
2776 * purposes.
2777 */
2778void vhost_user_async_close(DeviceState *d,
2779 CharBackend *chardev, struct vhost_dev *vhost,
2780 vu_async_close_fn cb)
2781{
2782 if (!runstate_check(RUN_STATE_SHUTDOWN)) {
2783 /*
2784 * A close event may happen during a read/write, but vhost
2785 * code assumes the vhost_dev remains setup, so delay the
2786 * stop & clear.
2787 */
2788 AioContext *ctx = qemu_get_current_aio_context();
2789 VhostAsyncCallback *data = g_new0(VhostAsyncCallback, 1);
2790
2791 /* Save data for the callback */
2792 data->cb = cb;
2793 data->dev = d;
2794 data->cd = chardev;
2795 data->vhost = vhost;
2796
2797 /* Disable any further notifications on the chardev */
2798 qemu_chr_fe_set_handlers(chardev,
2799 NULL, NULL, NULL, NULL, NULL, NULL,
2800 false);
2801
2802 aio_bh_schedule_oneshot(ctx, vhost_user_async_close_bh, data);
2803
2804 /*
2805 * Move vhost device to the stopped state. The vhost-user device
2806 * will be clean up and disconnected in BH. This can be useful in
2807 * the vhost migration code. If disconnect was caught there is an
2808 * option for the general vhost code to get the dev state without
2809 * knowing its type (in this case vhost-user).
2810 *
2811 * Note if the vhost device is fully cleared by the time we
2812 * execute the bottom half we won't continue with the cleanup.
2813 */
2814 vhost->started = false;
2815 }
2816}
2817
923b8921
YW
2818static int vhost_user_dev_start(struct vhost_dev *dev, bool started)
2819{
2820 if (!virtio_has_feature(dev->protocol_features,
2821 VHOST_USER_PROTOCOL_F_STATUS)) {
2822 return 0;
2823 }
2824
2825 /* Set device status only for last queue pair */
2826 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
2827 return 0;
2828 }
2829
2830 if (started) {
2831 return vhost_user_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
2832 VIRTIO_CONFIG_S_DRIVER |
2833 VIRTIO_CONFIG_S_DRIVER_OK);
2834 } else {
6f8be29e
SH
2835 return 0;
2836 }
2837}
2838
2839static void vhost_user_reset_status(struct vhost_dev *dev)
2840{
2841 /* Set device status only for last queue pair */
2842 if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
2843 return;
2844 }
2845
2846 if (virtio_has_feature(dev->protocol_features,
2847 VHOST_USER_PROTOCOL_F_STATUS)) {
2848 vhost_user_set_status(dev, 0);
923b8921
YW
2849 }
2850}
2851
5f6f6664
NN
2852const VhostOps user_ops = {
2853 .backend_type = VHOST_BACKEND_TYPE_USER,
4d0cf552
TB
2854 .vhost_backend_init = vhost_user_backend_init,
2855 .vhost_backend_cleanup = vhost_user_backend_cleanup,
2ce68e4c 2856 .vhost_backend_memslots_limit = vhost_user_memslots_limit,
552b2522 2857 .vhost_backend_no_private_memslots = vhost_user_no_private_memslots,
21e70425
MAL
2858 .vhost_set_log_base = vhost_user_set_log_base,
2859 .vhost_set_mem_table = vhost_user_set_mem_table,
2860 .vhost_set_vring_addr = vhost_user_set_vring_addr,
2861 .vhost_set_vring_endian = vhost_user_set_vring_endian,
2862 .vhost_set_vring_num = vhost_user_set_vring_num,
2863 .vhost_set_vring_base = vhost_user_set_vring_base,
2864 .vhost_get_vring_base = vhost_user_get_vring_base,
2865 .vhost_set_vring_kick = vhost_user_set_vring_kick,
2866 .vhost_set_vring_call = vhost_user_set_vring_call,
60dc3c5b 2867 .vhost_set_vring_err = vhost_user_set_vring_err,
21e70425
MAL
2868 .vhost_set_features = vhost_user_set_features,
2869 .vhost_get_features = vhost_user_get_features,
2870 .vhost_set_owner = vhost_user_set_owner,
2871 .vhost_reset_device = vhost_user_reset_device,
2872 .vhost_get_vq_index = vhost_user_get_vq_index,
2873 .vhost_set_vring_enable = vhost_user_set_vring_enable,
1be0ac21 2874 .vhost_requires_shm_log = vhost_user_requires_shm_log,
3e866365 2875 .vhost_migration_done = vhost_user_migration_done,
c5f048d8 2876 .vhost_net_set_mtu = vhost_user_net_set_mtu,
6dcdd06e
MC
2877 .vhost_set_iotlb_callback = vhost_user_set_iotlb_callback,
2878 .vhost_send_device_iotlb_msg = vhost_user_send_device_iotlb_msg,
4c3e257b
CL
2879 .vhost_get_config = vhost_user_get_config,
2880 .vhost_set_config = vhost_user_set_config,
efbfeb81
GA
2881 .vhost_crypto_create_session = vhost_user_crypto_create_session,
2882 .vhost_crypto_close_session = vhost_user_crypto_close_session,
5ad204bf
XY
2883 .vhost_get_inflight_fd = vhost_user_get_inflight_fd,
2884 .vhost_set_inflight_fd = vhost_user_set_inflight_fd,
923b8921 2885 .vhost_dev_start = vhost_user_dev_start,
6f8be29e 2886 .vhost_reset_status = vhost_user_reset_status,
fc57fd99 2887};