4 * Copyright (c) 2013 Virtual Open Systems Sarl.
6 * This work is licensed under the terms of the GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
12 #include "qapi/error.h"
13 #include "hw/virtio/vhost.h"
14 #include "hw/virtio/vhost-user.h"
15 #include "hw/virtio/vhost-backend.h"
16 #include "hw/virtio/virtio.h"
17 #include "hw/virtio/virtio-net.h"
18 #include "chardev/char-fe.h"
19 #include "io/channel-socket.h"
20 #include "sysemu/kvm.h"
21 #include "qemu/error-report.h"
22 #include "qemu/main-loop.h"
23 #include "qemu/sockets.h"
24 #include "sysemu/cryptodev.h"
25 #include "migration/migration.h"
26 #include "migration/postcopy-ram.h"
28 #include "exec/ramblock.h"
30 #include <sys/ioctl.h>
31 #include <sys/socket.h>
34 #include "standard-headers/linux/vhost_types.h"
37 #include <linux/userfaultfd.h>
40 #define VHOST_MEMORY_BASELINE_NREGIONS 8
41 #define VHOST_USER_F_PROTOCOL_FEATURES 30
42 #define VHOST_USER_SLAVE_MAX_FDS 8
45 * Set maximum number of RAM slots supported to
46 * the maximum number supported by the target
49 #if defined(TARGET_X86) || defined(TARGET_X86_64) || \
50 defined(TARGET_ARM) || defined(TARGET_ARM_64)
51 #include "hw/acpi/acpi.h"
52 #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS
54 #elif defined(TARGET_PPC) || defined(TARGET_PPC_64)
55 #include "hw/ppc/spapr.h"
56 #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS
59 #define VHOST_USER_MAX_RAM_SLOTS 512
63 * Maximum size of virtio device config space
65 #define VHOST_USER_MAX_CONFIG_SIZE 256
67 enum VhostUserProtocolFeature
{
68 VHOST_USER_PROTOCOL_F_MQ
= 0,
69 VHOST_USER_PROTOCOL_F_LOG_SHMFD
= 1,
70 VHOST_USER_PROTOCOL_F_RARP
= 2,
71 VHOST_USER_PROTOCOL_F_REPLY_ACK
= 3,
72 VHOST_USER_PROTOCOL_F_NET_MTU
= 4,
73 VHOST_USER_PROTOCOL_F_SLAVE_REQ
= 5,
74 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
= 6,
75 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
= 7,
76 VHOST_USER_PROTOCOL_F_PAGEFAULT
= 8,
77 VHOST_USER_PROTOCOL_F_CONFIG
= 9,
78 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD
= 10,
79 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
= 11,
80 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
= 12,
81 VHOST_USER_PROTOCOL_F_RESET_DEVICE
= 13,
82 /* Feature 14 reserved for VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. */
83 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
= 15,
84 VHOST_USER_PROTOCOL_F_MAX
87 #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
89 typedef enum VhostUserRequest
{
91 VHOST_USER_GET_FEATURES
= 1,
92 VHOST_USER_SET_FEATURES
= 2,
93 VHOST_USER_SET_OWNER
= 3,
94 VHOST_USER_RESET_OWNER
= 4,
95 VHOST_USER_SET_MEM_TABLE
= 5,
96 VHOST_USER_SET_LOG_BASE
= 6,
97 VHOST_USER_SET_LOG_FD
= 7,
98 VHOST_USER_SET_VRING_NUM
= 8,
99 VHOST_USER_SET_VRING_ADDR
= 9,
100 VHOST_USER_SET_VRING_BASE
= 10,
101 VHOST_USER_GET_VRING_BASE
= 11,
102 VHOST_USER_SET_VRING_KICK
= 12,
103 VHOST_USER_SET_VRING_CALL
= 13,
104 VHOST_USER_SET_VRING_ERR
= 14,
105 VHOST_USER_GET_PROTOCOL_FEATURES
= 15,
106 VHOST_USER_SET_PROTOCOL_FEATURES
= 16,
107 VHOST_USER_GET_QUEUE_NUM
= 17,
108 VHOST_USER_SET_VRING_ENABLE
= 18,
109 VHOST_USER_SEND_RARP
= 19,
110 VHOST_USER_NET_SET_MTU
= 20,
111 VHOST_USER_SET_SLAVE_REQ_FD
= 21,
112 VHOST_USER_IOTLB_MSG
= 22,
113 VHOST_USER_SET_VRING_ENDIAN
= 23,
114 VHOST_USER_GET_CONFIG
= 24,
115 VHOST_USER_SET_CONFIG
= 25,
116 VHOST_USER_CREATE_CRYPTO_SESSION
= 26,
117 VHOST_USER_CLOSE_CRYPTO_SESSION
= 27,
118 VHOST_USER_POSTCOPY_ADVISE
= 28,
119 VHOST_USER_POSTCOPY_LISTEN
= 29,
120 VHOST_USER_POSTCOPY_END
= 30,
121 VHOST_USER_GET_INFLIGHT_FD
= 31,
122 VHOST_USER_SET_INFLIGHT_FD
= 32,
123 VHOST_USER_GPU_SET_SOCKET
= 33,
124 VHOST_USER_RESET_DEVICE
= 34,
125 /* Message number 35 reserved for VHOST_USER_VRING_KICK. */
126 VHOST_USER_GET_MAX_MEM_SLOTS
= 36,
127 VHOST_USER_ADD_MEM_REG
= 37,
128 VHOST_USER_REM_MEM_REG
= 38,
132 typedef enum VhostUserSlaveRequest
{
133 VHOST_USER_SLAVE_NONE
= 0,
134 VHOST_USER_SLAVE_IOTLB_MSG
= 1,
135 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
= 2,
136 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
= 3,
138 } VhostUserSlaveRequest
;
140 typedef struct VhostUserMemoryRegion
{
141 uint64_t guest_phys_addr
;
142 uint64_t memory_size
;
143 uint64_t userspace_addr
;
144 uint64_t mmap_offset
;
145 } VhostUserMemoryRegion
;
147 typedef struct VhostUserMemory
{
150 VhostUserMemoryRegion regions
[VHOST_MEMORY_BASELINE_NREGIONS
];
153 typedef struct VhostUserMemRegMsg
{
155 VhostUserMemoryRegion region
;
156 } VhostUserMemRegMsg
;
158 typedef struct VhostUserLog
{
160 uint64_t mmap_offset
;
163 typedef struct VhostUserConfig
{
167 uint8_t region
[VHOST_USER_MAX_CONFIG_SIZE
];
170 #define VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN 512
171 #define VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN 64
173 typedef struct VhostUserCryptoSession
{
174 /* session id for success, -1 on errors */
176 CryptoDevBackendSymSessionInfo session_setup_data
;
177 uint8_t key
[VHOST_CRYPTO_SYM_CIPHER_MAX_KEY_LEN
];
178 uint8_t auth_key
[VHOST_CRYPTO_SYM_HMAC_MAX_KEY_LEN
];
179 } VhostUserCryptoSession
;
181 static VhostUserConfig c
__attribute__ ((unused
));
182 #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
186 typedef struct VhostUserVringArea
{
190 } VhostUserVringArea
;
192 typedef struct VhostUserInflight
{
194 uint64_t mmap_offset
;
200 VhostUserRequest request
;
202 #define VHOST_USER_VERSION_MASK (0x3)
203 #define VHOST_USER_REPLY_MASK (0x1<<2)
204 #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
206 uint32_t size
; /* the following payload size */
207 } QEMU_PACKED VhostUserHeader
;
210 #define VHOST_USER_VRING_IDX_MASK (0xff)
211 #define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
213 struct vhost_vring_state state
;
214 struct vhost_vring_addr addr
;
215 VhostUserMemory memory
;
216 VhostUserMemRegMsg mem_reg
;
218 struct vhost_iotlb_msg iotlb
;
219 VhostUserConfig config
;
220 VhostUserCryptoSession session
;
221 VhostUserVringArea area
;
222 VhostUserInflight inflight
;
225 typedef struct VhostUserMsg
{
227 VhostUserPayload payload
;
228 } QEMU_PACKED VhostUserMsg
;
230 static VhostUserMsg m
__attribute__ ((unused
));
231 #define VHOST_USER_HDR_SIZE (sizeof(VhostUserHeader))
233 #define VHOST_USER_PAYLOAD_SIZE (sizeof(VhostUserPayload))
235 /* The version of the protocol we support */
236 #define VHOST_USER_VERSION (0x1)
239 struct vhost_dev
*dev
;
240 /* Shared between vhost devs of the same virtio device */
241 VhostUserState
*user
;
242 QIOChannel
*slave_ioc
;
244 NotifierWithReturn postcopy_notifier
;
245 struct PostCopyFD postcopy_fd
;
246 uint64_t postcopy_client_bases
[VHOST_USER_MAX_RAM_SLOTS
];
247 /* Length of the region_rb and region_rb_offset arrays */
248 size_t region_rb_len
;
249 /* RAMBlock associated with a given region */
250 RAMBlock
**region_rb
;
251 /* The offset from the start of the RAMBlock to the start of the
254 ram_addr_t
*region_rb_offset
;
256 /* True once we've entered postcopy_listen */
257 bool postcopy_listen
;
259 /* Our current regions */
260 int num_shadow_regions
;
261 struct vhost_memory_region shadow_regions
[VHOST_USER_MAX_RAM_SLOTS
];
264 struct scrub_regions
{
265 struct vhost_memory_region
*region
;
270 static bool ioeventfd_enabled(void)
272 return !kvm_enabled() || kvm_eventfds_enabled();
275 static int vhost_user_read_header(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
277 struct vhost_user
*u
= dev
->opaque
;
278 CharBackend
*chr
= u
->user
->chr
;
279 uint8_t *p
= (uint8_t *) msg
;
280 int r
, size
= VHOST_USER_HDR_SIZE
;
282 r
= qemu_chr_fe_read_all(chr
, p
, size
);
284 int saved_errno
= errno
;
285 error_report("Failed to read msg header. Read %d instead of %d."
286 " Original request %d.", r
, size
, msg
->hdr
.request
);
287 return r
< 0 ? -saved_errno
: -EIO
;
290 /* validate received flags */
291 if (msg
->hdr
.flags
!= (VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
)) {
292 error_report("Failed to read msg header."
293 " Flags 0x%x instead of 0x%x.", msg
->hdr
.flags
,
294 VHOST_USER_REPLY_MASK
| VHOST_USER_VERSION
);
301 struct vhost_user_read_cb_data
{
302 struct vhost_dev
*dev
;
308 static gboolean
vhost_user_read_cb(void *do_not_use
, GIOCondition condition
,
311 struct vhost_user_read_cb_data
*data
= opaque
;
312 struct vhost_dev
*dev
= data
->dev
;
313 VhostUserMsg
*msg
= data
->msg
;
314 struct vhost_user
*u
= dev
->opaque
;
315 CharBackend
*chr
= u
->user
->chr
;
316 uint8_t *p
= (uint8_t *) msg
;
319 r
= vhost_user_read_header(dev
, msg
);
325 /* validate message size is sane */
326 if (msg
->hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
327 error_report("Failed to read msg header."
328 " Size %d exceeds the maximum %zu.", msg
->hdr
.size
,
329 VHOST_USER_PAYLOAD_SIZE
);
335 p
+= VHOST_USER_HDR_SIZE
;
336 size
= msg
->hdr
.size
;
337 r
= qemu_chr_fe_read_all(chr
, p
, size
);
339 int saved_errno
= errno
;
340 error_report("Failed to read msg payload."
341 " Read %d instead of %d.", r
, msg
->hdr
.size
);
342 data
->ret
= r
< 0 ? -saved_errno
: -EIO
;
348 g_main_loop_quit(data
->loop
);
349 return G_SOURCE_REMOVE
;
352 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
356 * This updates the read handler to use a new event loop context.
357 * Event sources are removed from the previous context : this ensures
358 * that events detected in the previous context are purged. They will
359 * be re-detected and processed in the new context.
361 static void slave_update_read_handler(struct vhost_dev
*dev
,
364 struct vhost_user
*u
= dev
->opaque
;
371 g_source_destroy(u
->slave_src
);
372 g_source_unref(u
->slave_src
);
375 u
->slave_src
= qio_channel_add_watch_source(u
->slave_ioc
,
377 slave_read
, dev
, NULL
,
381 static int vhost_user_read(struct vhost_dev
*dev
, VhostUserMsg
*msg
)
383 struct vhost_user
*u
= dev
->opaque
;
384 CharBackend
*chr
= u
->user
->chr
;
385 GMainContext
*prev_ctxt
= chr
->chr
->gcontext
;
386 GMainContext
*ctxt
= g_main_context_new();
387 GMainLoop
*loop
= g_main_loop_new(ctxt
, FALSE
);
388 struct vhost_user_read_cb_data data
= {
396 * We want to be able to monitor the slave channel fd while waiting
397 * for chr I/O. This requires an event loop, but we can't nest the
398 * one to which chr is currently attached : its fd handlers might not
399 * be prepared for re-entrancy. So we create a new one and switch chr
402 slave_update_read_handler(dev
, ctxt
);
403 qemu_chr_be_update_read_handlers(chr
->chr
, ctxt
);
404 qemu_chr_fe_add_watch(chr
, G_IO_IN
| G_IO_HUP
, vhost_user_read_cb
, &data
);
406 g_main_loop_run(loop
);
409 * Restore the previous event loop context. This also destroys/recreates
410 * event sources : this guarantees that all pending events in the original
411 * context that have been processed by the nested loop are purged.
413 qemu_chr_be_update_read_handlers(chr
->chr
, prev_ctxt
);
414 slave_update_read_handler(dev
, NULL
);
416 g_main_loop_unref(loop
);
417 g_main_context_unref(ctxt
);
422 static int process_message_reply(struct vhost_dev
*dev
,
423 const VhostUserMsg
*msg
)
426 VhostUserMsg msg_reply
;
428 if ((msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
432 ret
= vhost_user_read(dev
, &msg_reply
);
437 if (msg_reply
.hdr
.request
!= msg
->hdr
.request
) {
438 error_report("Received unexpected msg type. "
439 "Expected %d received %d",
440 msg
->hdr
.request
, msg_reply
.hdr
.request
);
444 return msg_reply
.payload
.u64
? -EIO
: 0;
447 static bool vhost_user_one_time_request(VhostUserRequest request
)
450 case VHOST_USER_SET_OWNER
:
451 case VHOST_USER_RESET_OWNER
:
452 case VHOST_USER_SET_MEM_TABLE
:
453 case VHOST_USER_GET_QUEUE_NUM
:
454 case VHOST_USER_NET_SET_MTU
:
461 /* most non-init callers ignore the error */
462 static int vhost_user_write(struct vhost_dev
*dev
, VhostUserMsg
*msg
,
463 int *fds
, int fd_num
)
465 struct vhost_user
*u
= dev
->opaque
;
466 CharBackend
*chr
= u
->user
->chr
;
467 int ret
, size
= VHOST_USER_HDR_SIZE
+ msg
->hdr
.size
;
470 * For non-vring specific requests, like VHOST_USER_SET_MEM_TABLE,
471 * we just need send it once in the first time. For later such
472 * request, we just ignore it.
474 if (vhost_user_one_time_request(msg
->hdr
.request
) && dev
->vq_index
!= 0) {
475 msg
->hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
479 if (qemu_chr_fe_set_msgfds(chr
, fds
, fd_num
) < 0) {
480 error_report("Failed to set msg fds.");
484 ret
= qemu_chr_fe_write_all(chr
, (const uint8_t *) msg
, size
);
486 int saved_errno
= errno
;
487 error_report("Failed to write msg."
488 " Wrote %d instead of %d.", ret
, size
);
489 return ret
< 0 ? -saved_errno
: -EIO
;
495 int vhost_user_gpu_set_socket(struct vhost_dev
*dev
, int fd
)
498 .hdr
.request
= VHOST_USER_GPU_SET_SOCKET
,
499 .hdr
.flags
= VHOST_USER_VERSION
,
502 return vhost_user_write(dev
, &msg
, &fd
, 1);
505 static int vhost_user_set_log_base(struct vhost_dev
*dev
, uint64_t base
,
506 struct vhost_log
*log
)
508 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
510 bool shmfd
= virtio_has_feature(dev
->protocol_features
,
511 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
514 .hdr
.request
= VHOST_USER_SET_LOG_BASE
,
515 .hdr
.flags
= VHOST_USER_VERSION
,
516 .payload
.log
.mmap_size
= log
->size
* sizeof(*(log
->log
)),
517 .payload
.log
.mmap_offset
= 0,
518 .hdr
.size
= sizeof(msg
.payload
.log
),
521 if (shmfd
&& log
->fd
!= -1) {
522 fds
[fd_num
++] = log
->fd
;
525 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
532 ret
= vhost_user_read(dev
, &msg
);
537 if (msg
.hdr
.request
!= VHOST_USER_SET_LOG_BASE
) {
538 error_report("Received unexpected msg type. "
539 "Expected %d received %d",
540 VHOST_USER_SET_LOG_BASE
, msg
.hdr
.request
);
548 static MemoryRegion
*vhost_user_get_mr_data(uint64_t addr
, ram_addr_t
*offset
,
553 assert((uintptr_t)addr
== addr
);
554 mr
= memory_region_from_host((void *)(uintptr_t)addr
, offset
);
555 *fd
= memory_region_get_fd(mr
);
560 static void vhost_user_fill_msg_region(VhostUserMemoryRegion
*dst
,
561 struct vhost_memory_region
*src
,
562 uint64_t mmap_offset
)
564 assert(src
!= NULL
&& dst
!= NULL
);
565 dst
->userspace_addr
= src
->userspace_addr
;
566 dst
->memory_size
= src
->memory_size
;
567 dst
->guest_phys_addr
= src
->guest_phys_addr
;
568 dst
->mmap_offset
= mmap_offset
;
571 static int vhost_user_fill_set_mem_table_msg(struct vhost_user
*u
,
572 struct vhost_dev
*dev
,
574 int *fds
, size_t *fd_num
,
575 bool track_ramblocks
)
580 struct vhost_memory_region
*reg
;
581 VhostUserMemoryRegion region_buffer
;
583 msg
->hdr
.request
= VHOST_USER_SET_MEM_TABLE
;
585 for (i
= 0; i
< dev
->mem
->nregions
; ++i
) {
586 reg
= dev
->mem
->regions
+ i
;
588 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
590 if (track_ramblocks
) {
591 assert(*fd_num
< VHOST_MEMORY_BASELINE_NREGIONS
);
592 trace_vhost_user_set_mem_table_withfd(*fd_num
, mr
->name
,
594 reg
->guest_phys_addr
,
597 u
->region_rb_offset
[i
] = offset
;
598 u
->region_rb
[i
] = mr
->ram_block
;
599 } else if (*fd_num
== VHOST_MEMORY_BASELINE_NREGIONS
) {
600 error_report("Failed preparing vhost-user memory table msg");
603 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
604 msg
->payload
.memory
.regions
[*fd_num
] = region_buffer
;
605 fds
[(*fd_num
)++] = fd
;
606 } else if (track_ramblocks
) {
607 u
->region_rb_offset
[i
] = 0;
608 u
->region_rb
[i
] = NULL
;
612 msg
->payload
.memory
.nregions
= *fd_num
;
615 error_report("Failed initializing vhost-user memory map, "
616 "consider using -object memory-backend-file share=on");
620 msg
->hdr
.size
= sizeof(msg
->payload
.memory
.nregions
);
621 msg
->hdr
.size
+= sizeof(msg
->payload
.memory
.padding
);
622 msg
->hdr
.size
+= *fd_num
* sizeof(VhostUserMemoryRegion
);
627 static inline bool reg_equal(struct vhost_memory_region
*shadow_reg
,
628 struct vhost_memory_region
*vdev_reg
)
630 return shadow_reg
->guest_phys_addr
== vdev_reg
->guest_phys_addr
&&
631 shadow_reg
->userspace_addr
== vdev_reg
->userspace_addr
&&
632 shadow_reg
->memory_size
== vdev_reg
->memory_size
;
635 static void scrub_shadow_regions(struct vhost_dev
*dev
,
636 struct scrub_regions
*add_reg
,
638 struct scrub_regions
*rem_reg
,
639 int *nr_rem_reg
, uint64_t *shadow_pcb
,
640 bool track_ramblocks
)
642 struct vhost_user
*u
= dev
->opaque
;
643 bool found
[VHOST_USER_MAX_RAM_SLOTS
] = {};
644 struct vhost_memory_region
*reg
, *shadow_reg
;
645 int i
, j
, fd
, add_idx
= 0, rm_idx
= 0, fd_num
= 0;
651 * Find memory regions present in our shadow state which are not in
652 * the device's current memory state.
654 * Mark regions in both the shadow and device state as "found".
656 for (i
= 0; i
< u
->num_shadow_regions
; i
++) {
657 shadow_reg
= &u
->shadow_regions
[i
];
660 for (j
= 0; j
< dev
->mem
->nregions
; j
++) {
661 reg
= &dev
->mem
->regions
[j
];
663 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
665 if (reg_equal(shadow_reg
, reg
)) {
668 if (track_ramblocks
) {
670 * Reset postcopy client bases, region_rb, and
671 * region_rb_offset in case regions are removed.
674 u
->region_rb_offset
[j
] = offset
;
675 u
->region_rb
[j
] = mr
->ram_block
;
676 shadow_pcb
[j
] = u
->postcopy_client_bases
[i
];
678 u
->region_rb_offset
[j
] = 0;
679 u
->region_rb
[j
] = NULL
;
687 * If the region was not found in the current device memory state
688 * create an entry for it in the removed list.
691 rem_reg
[rm_idx
].region
= shadow_reg
;
692 rem_reg
[rm_idx
++].reg_idx
= i
;
697 * For regions not marked "found", create entries in the added list.
699 * Note their indexes in the device memory state and the indexes of their
702 for (i
= 0; i
< dev
->mem
->nregions
; i
++) {
703 reg
= &dev
->mem
->regions
[i
];
704 vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
710 * If the region was in both the shadow and device state we don't
711 * need to send a VHOST_USER_ADD_MEM_REG message for it.
717 add_reg
[add_idx
].region
= reg
;
718 add_reg
[add_idx
].reg_idx
= i
;
719 add_reg
[add_idx
++].fd_idx
= fd_num
;
721 *nr_rem_reg
= rm_idx
;
722 *nr_add_reg
= add_idx
;
727 static int send_remove_regions(struct vhost_dev
*dev
,
728 struct scrub_regions
*remove_reg
,
729 int nr_rem_reg
, VhostUserMsg
*msg
,
730 bool reply_supported
)
732 struct vhost_user
*u
= dev
->opaque
;
733 struct vhost_memory_region
*shadow_reg
;
734 int i
, fd
, shadow_reg_idx
, ret
;
736 VhostUserMemoryRegion region_buffer
;
739 * The regions in remove_reg appear in the same order they do in the
740 * shadow table. Therefore we can minimize memory copies by iterating
741 * through remove_reg backwards.
743 for (i
= nr_rem_reg
- 1; i
>= 0; i
--) {
744 shadow_reg
= remove_reg
[i
].region
;
745 shadow_reg_idx
= remove_reg
[i
].reg_idx
;
747 vhost_user_get_mr_data(shadow_reg
->userspace_addr
, &offset
, &fd
);
750 msg
->hdr
.request
= VHOST_USER_REM_MEM_REG
;
751 vhost_user_fill_msg_region(®ion_buffer
, shadow_reg
, 0);
752 msg
->payload
.mem_reg
.region
= region_buffer
;
754 ret
= vhost_user_write(dev
, msg
, &fd
, 1);
759 if (reply_supported
) {
760 ret
= process_message_reply(dev
, msg
);
768 * At this point we know the backend has unmapped the region. It is now
769 * safe to remove it from the shadow table.
771 memmove(&u
->shadow_regions
[shadow_reg_idx
],
772 &u
->shadow_regions
[shadow_reg_idx
+ 1],
773 sizeof(struct vhost_memory_region
) *
774 (u
->num_shadow_regions
- shadow_reg_idx
- 1));
775 u
->num_shadow_regions
--;
781 static int send_add_regions(struct vhost_dev
*dev
,
782 struct scrub_regions
*add_reg
, int nr_add_reg
,
783 VhostUserMsg
*msg
, uint64_t *shadow_pcb
,
784 bool reply_supported
, bool track_ramblocks
)
786 struct vhost_user
*u
= dev
->opaque
;
787 int i
, fd
, ret
, reg_idx
, reg_fd_idx
;
788 struct vhost_memory_region
*reg
;
791 VhostUserMsg msg_reply
;
792 VhostUserMemoryRegion region_buffer
;
794 for (i
= 0; i
< nr_add_reg
; i
++) {
795 reg
= add_reg
[i
].region
;
796 reg_idx
= add_reg
[i
].reg_idx
;
797 reg_fd_idx
= add_reg
[i
].fd_idx
;
799 mr
= vhost_user_get_mr_data(reg
->userspace_addr
, &offset
, &fd
);
802 if (track_ramblocks
) {
803 trace_vhost_user_set_mem_table_withfd(reg_fd_idx
, mr
->name
,
805 reg
->guest_phys_addr
,
808 u
->region_rb_offset
[reg_idx
] = offset
;
809 u
->region_rb
[reg_idx
] = mr
->ram_block
;
811 msg
->hdr
.request
= VHOST_USER_ADD_MEM_REG
;
812 vhost_user_fill_msg_region(®ion_buffer
, reg
, offset
);
813 msg
->payload
.mem_reg
.region
= region_buffer
;
815 ret
= vhost_user_write(dev
, msg
, &fd
, 1);
820 if (track_ramblocks
) {
823 ret
= vhost_user_read(dev
, &msg_reply
);
828 reply_gpa
= msg_reply
.payload
.mem_reg
.region
.guest_phys_addr
;
830 if (msg_reply
.hdr
.request
!= VHOST_USER_ADD_MEM_REG
) {
831 error_report("%s: Received unexpected msg type."
832 "Expected %d received %d", __func__
,
833 VHOST_USER_ADD_MEM_REG
,
834 msg_reply
.hdr
.request
);
839 * We're using the same structure, just reusing one of the
840 * fields, so it should be the same size.
842 if (msg_reply
.hdr
.size
!= msg
->hdr
.size
) {
843 error_report("%s: Unexpected size for postcopy reply "
844 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
849 /* Get the postcopy client base from the backend's reply. */
850 if (reply_gpa
== dev
->mem
->regions
[reg_idx
].guest_phys_addr
) {
851 shadow_pcb
[reg_idx
] =
852 msg_reply
.payload
.mem_reg
.region
.userspace_addr
;
853 trace_vhost_user_set_mem_table_postcopy(
854 msg_reply
.payload
.mem_reg
.region
.userspace_addr
,
855 msg
->payload
.mem_reg
.region
.userspace_addr
,
856 reg_fd_idx
, reg_idx
);
858 error_report("%s: invalid postcopy reply for region. "
859 "Got guest physical address %" PRIX64
", expected "
860 "%" PRIX64
, __func__
, reply_gpa
,
861 dev
->mem
->regions
[reg_idx
].guest_phys_addr
);
864 } else if (reply_supported
) {
865 ret
= process_message_reply(dev
, msg
);
870 } else if (track_ramblocks
) {
871 u
->region_rb_offset
[reg_idx
] = 0;
872 u
->region_rb
[reg_idx
] = NULL
;
876 * At this point, we know the backend has mapped in the new
877 * region, if the region has a valid file descriptor.
879 * The region should now be added to the shadow table.
881 u
->shadow_regions
[u
->num_shadow_regions
].guest_phys_addr
=
882 reg
->guest_phys_addr
;
883 u
->shadow_regions
[u
->num_shadow_regions
].userspace_addr
=
885 u
->shadow_regions
[u
->num_shadow_regions
].memory_size
=
887 u
->num_shadow_regions
++;
893 static int vhost_user_add_remove_regions(struct vhost_dev
*dev
,
895 bool reply_supported
,
896 bool track_ramblocks
)
898 struct vhost_user
*u
= dev
->opaque
;
899 struct scrub_regions add_reg
[VHOST_USER_MAX_RAM_SLOTS
];
900 struct scrub_regions rem_reg
[VHOST_USER_MAX_RAM_SLOTS
];
901 uint64_t shadow_pcb
[VHOST_USER_MAX_RAM_SLOTS
] = {};
902 int nr_add_reg
, nr_rem_reg
;
905 msg
->hdr
.size
= sizeof(msg
->payload
.mem_reg
);
907 /* Find the regions which need to be removed or added. */
908 scrub_shadow_regions(dev
, add_reg
, &nr_add_reg
, rem_reg
, &nr_rem_reg
,
909 shadow_pcb
, track_ramblocks
);
912 ret
= send_remove_regions(dev
, rem_reg
, nr_rem_reg
, msg
,
920 ret
= send_add_regions(dev
, add_reg
, nr_add_reg
, msg
, shadow_pcb
,
921 reply_supported
, track_ramblocks
);
927 if (track_ramblocks
) {
928 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
929 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
931 * Now we've registered this with the postcopy code, we ack to the
932 * client, because now we're in the position to be able to deal with
933 * any faults it generates.
935 /* TODO: Use this for failure cases as well with a bad value. */
936 msg
->hdr
.size
= sizeof(msg
->payload
.u64
);
937 msg
->payload
.u64
= 0; /* OK */
939 ret
= vhost_user_write(dev
, msg
, NULL
, 0);
948 if (track_ramblocks
) {
949 memcpy(u
->postcopy_client_bases
, shadow_pcb
,
950 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
956 static int vhost_user_set_mem_table_postcopy(struct vhost_dev
*dev
,
957 struct vhost_memory
*mem
,
958 bool reply_supported
,
959 bool config_mem_slots
)
961 struct vhost_user
*u
= dev
->opaque
;
962 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
964 VhostUserMsg msg_reply
;
969 .hdr
.flags
= VHOST_USER_VERSION
,
972 if (u
->region_rb_len
< dev
->mem
->nregions
) {
973 u
->region_rb
= g_renew(RAMBlock
*, u
->region_rb
, dev
->mem
->nregions
);
974 u
->region_rb_offset
= g_renew(ram_addr_t
, u
->region_rb_offset
,
976 memset(&(u
->region_rb
[u
->region_rb_len
]), '\0',
977 sizeof(RAMBlock
*) * (dev
->mem
->nregions
- u
->region_rb_len
));
978 memset(&(u
->region_rb_offset
[u
->region_rb_len
]), '\0',
979 sizeof(ram_addr_t
) * (dev
->mem
->nregions
- u
->region_rb_len
));
980 u
->region_rb_len
= dev
->mem
->nregions
;
983 if (config_mem_slots
) {
984 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, true);
989 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
995 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1000 ret
= vhost_user_read(dev
, &msg_reply
);
1005 if (msg_reply
.hdr
.request
!= VHOST_USER_SET_MEM_TABLE
) {
1006 error_report("%s: Received unexpected msg type."
1007 "Expected %d received %d", __func__
,
1008 VHOST_USER_SET_MEM_TABLE
, msg_reply
.hdr
.request
);
1013 * We're using the same structure, just reusing one of the
1014 * fields, so it should be the same size.
1016 if (msg_reply
.hdr
.size
!= msg
.hdr
.size
) {
1017 error_report("%s: Unexpected size for postcopy reply "
1018 "%d vs %d", __func__
, msg_reply
.hdr
.size
,
1023 memset(u
->postcopy_client_bases
, 0,
1024 sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS
);
1027 * They're in the same order as the regions that were sent
1028 * but some of the regions were skipped (above) if they
1031 for (msg_i
= 0, region_i
= 0;
1032 region_i
< dev
->mem
->nregions
;
1034 if (msg_i
< fd_num
&&
1035 msg_reply
.payload
.memory
.regions
[msg_i
].guest_phys_addr
==
1036 dev
->mem
->regions
[region_i
].guest_phys_addr
) {
1037 u
->postcopy_client_bases
[region_i
] =
1038 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
;
1039 trace_vhost_user_set_mem_table_postcopy(
1040 msg_reply
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1041 msg
.payload
.memory
.regions
[msg_i
].userspace_addr
,
1046 if (msg_i
!= fd_num
) {
1047 error_report("%s: postcopy reply not fully consumed "
1049 __func__
, msg_i
, fd_num
);
1054 * Now we've registered this with the postcopy code, we ack to the
1055 * client, because now we're in the position to be able to deal
1056 * with any faults it generates.
1058 /* TODO: Use this for failure cases as well with a bad value. */
1059 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
1060 msg
.payload
.u64
= 0; /* OK */
1061 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1070 static int vhost_user_set_mem_table(struct vhost_dev
*dev
,
1071 struct vhost_memory
*mem
)
1073 struct vhost_user
*u
= dev
->opaque
;
1074 int fds
[VHOST_MEMORY_BASELINE_NREGIONS
];
1076 bool do_postcopy
= u
->postcopy_listen
&& u
->postcopy_fd
.handler
;
1077 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1078 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1079 bool config_mem_slots
=
1080 virtio_has_feature(dev
->protocol_features
,
1081 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
);
1086 * Postcopy has enough differences that it's best done in it's own
1089 return vhost_user_set_mem_table_postcopy(dev
, mem
, reply_supported
,
1093 VhostUserMsg msg
= {
1094 .hdr
.flags
= VHOST_USER_VERSION
,
1097 if (reply_supported
) {
1098 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1101 if (config_mem_slots
) {
1102 ret
= vhost_user_add_remove_regions(dev
, &msg
, reply_supported
, false);
1107 ret
= vhost_user_fill_set_mem_table_msg(u
, dev
, &msg
, fds
, &fd_num
,
1113 ret
= vhost_user_write(dev
, &msg
, fds
, fd_num
);
1118 if (reply_supported
) {
1119 return process_message_reply(dev
, &msg
);
1126 static int vhost_user_set_vring_endian(struct vhost_dev
*dev
,
1127 struct vhost_vring_state
*ring
)
1129 bool cross_endian
= virtio_has_feature(dev
->protocol_features
,
1130 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN
);
1131 VhostUserMsg msg
= {
1132 .hdr
.request
= VHOST_USER_SET_VRING_ENDIAN
,
1133 .hdr
.flags
= VHOST_USER_VERSION
,
1134 .payload
.state
= *ring
,
1135 .hdr
.size
= sizeof(msg
.payload
.state
),
1138 if (!cross_endian
) {
1139 error_report("vhost-user trying to send unhandled ioctl");
1143 return vhost_user_write(dev
, &msg
, NULL
, 0);
1146 static int vhost_set_vring(struct vhost_dev
*dev
,
1147 unsigned long int request
,
1148 struct vhost_vring_state
*ring
)
1150 VhostUserMsg msg
= {
1151 .hdr
.request
= request
,
1152 .hdr
.flags
= VHOST_USER_VERSION
,
1153 .payload
.state
= *ring
,
1154 .hdr
.size
= sizeof(msg
.payload
.state
),
1157 return vhost_user_write(dev
, &msg
, NULL
, 0);
1160 static int vhost_user_set_vring_num(struct vhost_dev
*dev
,
1161 struct vhost_vring_state
*ring
)
1163 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_NUM
, ring
);
1166 static void vhost_user_host_notifier_free(VhostUserHostNotifier
*n
)
1168 assert(n
&& n
->unmap_addr
);
1169 munmap(n
->unmap_addr
, qemu_real_host_page_size());
1170 n
->unmap_addr
= NULL
;
1173 static void vhost_user_host_notifier_remove(VhostUserState
*user
,
1174 VirtIODevice
*vdev
, int queue_idx
)
1176 VhostUserHostNotifier
*n
= &user
->notifier
[queue_idx
];
1180 virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, false);
1182 assert(!n
->unmap_addr
);
1183 n
->unmap_addr
= n
->addr
;
1185 call_rcu(n
, vhost_user_host_notifier_free
, rcu
);
1189 static int vhost_user_set_vring_base(struct vhost_dev
*dev
,
1190 struct vhost_vring_state
*ring
)
1192 return vhost_set_vring(dev
, VHOST_USER_SET_VRING_BASE
, ring
);
1195 static int vhost_user_set_vring_enable(struct vhost_dev
*dev
, int enable
)
1199 if (!virtio_has_feature(dev
->features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1203 for (i
= 0; i
< dev
->nvqs
; ++i
) {
1205 struct vhost_vring_state state
= {
1206 .index
= dev
->vq_index
+ i
,
1210 ret
= vhost_set_vring(dev
, VHOST_USER_SET_VRING_ENABLE
, &state
);
1213 * Restoring the previous state is likely infeasible, as well as
1214 * proceeding regardless the error, so just bail out and hope for
1215 * the device-level recovery.
1224 static int vhost_user_get_vring_base(struct vhost_dev
*dev
,
1225 struct vhost_vring_state
*ring
)
1228 VhostUserMsg msg
= {
1229 .hdr
.request
= VHOST_USER_GET_VRING_BASE
,
1230 .hdr
.flags
= VHOST_USER_VERSION
,
1231 .payload
.state
= *ring
,
1232 .hdr
.size
= sizeof(msg
.payload
.state
),
1234 struct vhost_user
*u
= dev
->opaque
;
1236 vhost_user_host_notifier_remove(u
->user
, dev
->vdev
, ring
->index
);
1238 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1243 ret
= vhost_user_read(dev
, &msg
);
1248 if (msg
.hdr
.request
!= VHOST_USER_GET_VRING_BASE
) {
1249 error_report("Received unexpected msg type. Expected %d received %d",
1250 VHOST_USER_GET_VRING_BASE
, msg
.hdr
.request
);
1254 if (msg
.hdr
.size
!= sizeof(msg
.payload
.state
)) {
1255 error_report("Received bad msg size.");
1259 *ring
= msg
.payload
.state
;
1264 static int vhost_set_vring_file(struct vhost_dev
*dev
,
1265 VhostUserRequest request
,
1266 struct vhost_vring_file
*file
)
1268 int fds
[VHOST_USER_MAX_RAM_SLOTS
];
1270 VhostUserMsg msg
= {
1271 .hdr
.request
= request
,
1272 .hdr
.flags
= VHOST_USER_VERSION
,
1273 .payload
.u64
= file
->index
& VHOST_USER_VRING_IDX_MASK
,
1274 .hdr
.size
= sizeof(msg
.payload
.u64
),
1277 if (ioeventfd_enabled() && file
->fd
> 0) {
1278 fds
[fd_num
++] = file
->fd
;
1280 msg
.payload
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1283 return vhost_user_write(dev
, &msg
, fds
, fd_num
);
1286 static int vhost_user_set_vring_kick(struct vhost_dev
*dev
,
1287 struct vhost_vring_file
*file
)
1289 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_KICK
, file
);
1292 static int vhost_user_set_vring_call(struct vhost_dev
*dev
,
1293 struct vhost_vring_file
*file
)
1295 return vhost_set_vring_file(dev
, VHOST_USER_SET_VRING_CALL
, file
);
1299 static int vhost_user_get_u64(struct vhost_dev
*dev
, int request
, uint64_t *u64
)
1302 VhostUserMsg msg
= {
1303 .hdr
.request
= request
,
1304 .hdr
.flags
= VHOST_USER_VERSION
,
1307 if (vhost_user_one_time_request(request
) && dev
->vq_index
!= 0) {
1311 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1316 ret
= vhost_user_read(dev
, &msg
);
1321 if (msg
.hdr
.request
!= request
) {
1322 error_report("Received unexpected msg type. Expected %d received %d",
1323 request
, msg
.hdr
.request
);
1327 if (msg
.hdr
.size
!= sizeof(msg
.payload
.u64
)) {
1328 error_report("Received bad msg size.");
1332 *u64
= msg
.payload
.u64
;
1337 static int vhost_user_get_features(struct vhost_dev
*dev
, uint64_t *features
)
1339 if (vhost_user_get_u64(dev
, VHOST_USER_GET_FEATURES
, features
) < 0) {
1346 static int enforce_reply(struct vhost_dev
*dev
,
1347 const VhostUserMsg
*msg
)
1351 if (msg
->hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1352 return process_message_reply(dev
, msg
);
1356 * We need to wait for a reply but the backend does not
1357 * support replies for the command we just sent.
1358 * Send VHOST_USER_GET_FEATURES which makes all backends
1361 return vhost_user_get_features(dev
, &dummy
);
1364 static int vhost_user_set_vring_addr(struct vhost_dev
*dev
,
1365 struct vhost_vring_addr
*addr
)
1368 VhostUserMsg msg
= {
1369 .hdr
.request
= VHOST_USER_SET_VRING_ADDR
,
1370 .hdr
.flags
= VHOST_USER_VERSION
,
1371 .payload
.addr
= *addr
,
1372 .hdr
.size
= sizeof(msg
.payload
.addr
),
1375 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1376 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1379 * wait for a reply if logging is enabled to make sure
1380 * backend is actually logging changes
1382 bool wait_for_reply
= addr
->flags
& (1 << VHOST_VRING_F_LOG
);
1384 if (reply_supported
&& wait_for_reply
) {
1385 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1388 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1393 if (wait_for_reply
) {
1394 return enforce_reply(dev
, &msg
);
1400 static int vhost_user_set_u64(struct vhost_dev
*dev
, int request
, uint64_t u64
,
1401 bool wait_for_reply
)
1403 VhostUserMsg msg
= {
1404 .hdr
.request
= request
,
1405 .hdr
.flags
= VHOST_USER_VERSION
,
1407 .hdr
.size
= sizeof(msg
.payload
.u64
),
1411 if (wait_for_reply
) {
1412 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1413 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1414 if (reply_supported
) {
1415 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1419 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1424 if (wait_for_reply
) {
1425 return enforce_reply(dev
, &msg
);
1431 static int vhost_user_set_features(struct vhost_dev
*dev
,
1435 * wait for a reply if logging is enabled to make sure
1436 * backend is actually logging changes
1438 bool log_enabled
= features
& (0x1ULL
<< VHOST_F_LOG_ALL
);
1440 return vhost_user_set_u64(dev
, VHOST_USER_SET_FEATURES
, features
,
1444 static int vhost_user_set_protocol_features(struct vhost_dev
*dev
,
1447 return vhost_user_set_u64(dev
, VHOST_USER_SET_PROTOCOL_FEATURES
, features
,
1451 static int vhost_user_set_owner(struct vhost_dev
*dev
)
1453 VhostUserMsg msg
= {
1454 .hdr
.request
= VHOST_USER_SET_OWNER
,
1455 .hdr
.flags
= VHOST_USER_VERSION
,
1458 return vhost_user_write(dev
, &msg
, NULL
, 0);
1461 static int vhost_user_get_max_memslots(struct vhost_dev
*dev
,
1462 uint64_t *max_memslots
)
1464 uint64_t backend_max_memslots
;
1467 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_MAX_MEM_SLOTS
,
1468 &backend_max_memslots
);
1473 *max_memslots
= backend_max_memslots
;
1478 static int vhost_user_reset_device(struct vhost_dev
*dev
)
1480 VhostUserMsg msg
= {
1481 .hdr
.flags
= VHOST_USER_VERSION
,
1484 msg
.hdr
.request
= virtio_has_feature(dev
->protocol_features
,
1485 VHOST_USER_PROTOCOL_F_RESET_DEVICE
)
1486 ? VHOST_USER_RESET_DEVICE
1487 : VHOST_USER_RESET_OWNER
;
1489 return vhost_user_write(dev
, &msg
, NULL
, 0);
1492 static int vhost_user_slave_handle_config_change(struct vhost_dev
*dev
)
1494 if (!dev
->config_ops
|| !dev
->config_ops
->vhost_dev_config_notifier
) {
1498 return dev
->config_ops
->vhost_dev_config_notifier(dev
);
1501 static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev
*dev
,
1502 VhostUserVringArea
*area
,
1505 int queue_idx
= area
->u64
& VHOST_USER_VRING_IDX_MASK
;
1506 size_t page_size
= qemu_real_host_page_size();
1507 struct vhost_user
*u
= dev
->opaque
;
1508 VhostUserState
*user
= u
->user
;
1509 VirtIODevice
*vdev
= dev
->vdev
;
1510 VhostUserHostNotifier
*n
;
1514 if (!virtio_has_feature(dev
->protocol_features
,
1515 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
) ||
1516 vdev
== NULL
|| queue_idx
>= virtio_get_num_queues(vdev
)) {
1520 n
= &user
->notifier
[queue_idx
];
1522 vhost_user_host_notifier_remove(user
, vdev
, queue_idx
);
1524 if (area
->u64
& VHOST_USER_VRING_NOFD_MASK
) {
1529 if (area
->size
!= page_size
) {
1533 addr
= mmap(NULL
, page_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1535 if (addr
== MAP_FAILED
) {
1539 name
= g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]",
1541 if (!n
->mr
.ram
) { /* Don't init again after suspend. */
1542 memory_region_init_ram_device_ptr(&n
->mr
, OBJECT(vdev
), name
,
1545 n
->mr
.ram_block
->host
= addr
;
1549 if (virtio_queue_set_host_notifier_mr(vdev
, queue_idx
, &n
->mr
, true)) {
1550 object_unparent(OBJECT(&n
->mr
));
1551 munmap(addr
, page_size
);
1560 static void close_slave_channel(struct vhost_user
*u
)
1562 g_source_destroy(u
->slave_src
);
1563 g_source_unref(u
->slave_src
);
1564 u
->slave_src
= NULL
;
1565 object_unref(OBJECT(u
->slave_ioc
));
1566 u
->slave_ioc
= NULL
;
1569 static gboolean
slave_read(QIOChannel
*ioc
, GIOCondition condition
,
1572 struct vhost_dev
*dev
= opaque
;
1573 struct vhost_user
*u
= dev
->opaque
;
1574 VhostUserHeader hdr
= { 0, };
1575 VhostUserPayload payload
= { 0, };
1576 Error
*local_err
= NULL
;
1577 gboolean rc
= G_SOURCE_CONTINUE
;
1580 g_autofree
int *fd
= NULL
;
1585 iov
.iov_base
= &hdr
;
1586 iov
.iov_len
= VHOST_USER_HDR_SIZE
;
1588 if (qio_channel_readv_full_all(ioc
, &iov
, 1, &fd
, &fdsize
, &local_err
)) {
1589 error_report_err(local_err
);
1593 if (hdr
.size
> VHOST_USER_PAYLOAD_SIZE
) {
1594 error_report("Failed to read msg header."
1595 " Size %d exceeds the maximum %zu.", hdr
.size
,
1596 VHOST_USER_PAYLOAD_SIZE
);
1601 if (qio_channel_read_all(ioc
, (char *) &payload
, hdr
.size
, &local_err
)) {
1602 error_report_err(local_err
);
1606 switch (hdr
.request
) {
1607 case VHOST_USER_SLAVE_IOTLB_MSG
:
1608 ret
= vhost_backend_handle_iotlb_msg(dev
, &payload
.iotlb
);
1610 case VHOST_USER_SLAVE_CONFIG_CHANGE_MSG
:
1611 ret
= vhost_user_slave_handle_config_change(dev
);
1613 case VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG
:
1614 ret
= vhost_user_slave_handle_vring_host_notifier(dev
, &payload
.area
,
1618 error_report("Received unexpected msg type: %d.", hdr
.request
);
1623 * REPLY_ACK feature handling. Other reply types has to be managed
1624 * directly in their request handlers.
1626 if (hdr
.flags
& VHOST_USER_NEED_REPLY_MASK
) {
1627 struct iovec iovec
[2];
1630 hdr
.flags
&= ~VHOST_USER_NEED_REPLY_MASK
;
1631 hdr
.flags
|= VHOST_USER_REPLY_MASK
;
1633 payload
.u64
= !!ret
;
1634 hdr
.size
= sizeof(payload
.u64
);
1636 iovec
[0].iov_base
= &hdr
;
1637 iovec
[0].iov_len
= VHOST_USER_HDR_SIZE
;
1638 iovec
[1].iov_base
= &payload
;
1639 iovec
[1].iov_len
= hdr
.size
;
1641 if (qio_channel_writev_all(ioc
, iovec
, ARRAY_SIZE(iovec
), &local_err
)) {
1642 error_report_err(local_err
);
1650 close_slave_channel(u
);
1651 rc
= G_SOURCE_REMOVE
;
1655 for (i
= 0; i
< fdsize
; i
++) {
1662 static int vhost_setup_slave_channel(struct vhost_dev
*dev
)
1664 VhostUserMsg msg
= {
1665 .hdr
.request
= VHOST_USER_SET_SLAVE_REQ_FD
,
1666 .hdr
.flags
= VHOST_USER_VERSION
,
1668 struct vhost_user
*u
= dev
->opaque
;
1670 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
1671 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
1672 Error
*local_err
= NULL
;
1675 if (!virtio_has_feature(dev
->protocol_features
,
1676 VHOST_USER_PROTOCOL_F_SLAVE_REQ
)) {
1680 if (socketpair(PF_UNIX
, SOCK_STREAM
, 0, sv
) == -1) {
1681 int saved_errno
= errno
;
1682 error_report("socketpair() failed");
1683 return -saved_errno
;
1686 ioc
= QIO_CHANNEL(qio_channel_socket_new_fd(sv
[0], &local_err
));
1688 error_report_err(local_err
);
1689 return -ECONNREFUSED
;
1692 slave_update_read_handler(dev
, NULL
);
1694 if (reply_supported
) {
1695 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
1698 ret
= vhost_user_write(dev
, &msg
, &sv
[1], 1);
1703 if (reply_supported
) {
1704 ret
= process_message_reply(dev
, &msg
);
1710 close_slave_channel(u
);
1718 * Called back from the postcopy fault thread when a fault is received on our
1720 * TODO: This is Linux specific
1722 static int vhost_user_postcopy_fault_handler(struct PostCopyFD
*pcfd
,
1725 struct vhost_dev
*dev
= pcfd
->data
;
1726 struct vhost_user
*u
= dev
->opaque
;
1727 struct uffd_msg
*msg
= ufd
;
1728 uint64_t faultaddr
= msg
->arg
.pagefault
.address
;
1729 RAMBlock
*rb
= NULL
;
1733 trace_vhost_user_postcopy_fault_handler(pcfd
->idstr
, faultaddr
,
1734 dev
->mem
->nregions
);
1735 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1736 trace_vhost_user_postcopy_fault_handler_loop(i
,
1737 u
->postcopy_client_bases
[i
], dev
->mem
->regions
[i
].memory_size
);
1738 if (faultaddr
>= u
->postcopy_client_bases
[i
]) {
1739 /* Ofset of the fault address in the vhost region */
1740 uint64_t region_offset
= faultaddr
- u
->postcopy_client_bases
[i
];
1741 if (region_offset
< dev
->mem
->regions
[i
].memory_size
) {
1742 rb_offset
= region_offset
+ u
->region_rb_offset
[i
];
1743 trace_vhost_user_postcopy_fault_handler_found(i
,
1744 region_offset
, rb_offset
);
1745 rb
= u
->region_rb
[i
];
1746 return postcopy_request_shared_page(pcfd
, rb
, faultaddr
,
1751 error_report("%s: Failed to find region for fault %" PRIx64
,
1752 __func__
, faultaddr
);
1756 static int vhost_user_postcopy_waker(struct PostCopyFD
*pcfd
, RAMBlock
*rb
,
1759 struct vhost_dev
*dev
= pcfd
->data
;
1760 struct vhost_user
*u
= dev
->opaque
;
1763 trace_vhost_user_postcopy_waker(qemu_ram_get_idstr(rb
), offset
);
1768 /* Translate the offset into an address in the clients address space */
1769 for (i
= 0; i
< MIN(dev
->mem
->nregions
, u
->region_rb_len
); i
++) {
1770 if (u
->region_rb
[i
] == rb
&&
1771 offset
>= u
->region_rb_offset
[i
] &&
1772 offset
< (u
->region_rb_offset
[i
] +
1773 dev
->mem
->regions
[i
].memory_size
)) {
1774 uint64_t client_addr
= (offset
- u
->region_rb_offset
[i
]) +
1775 u
->postcopy_client_bases
[i
];
1776 trace_vhost_user_postcopy_waker_found(client_addr
);
1777 return postcopy_wake_shared(pcfd
, client_addr
, rb
);
1781 trace_vhost_user_postcopy_waker_nomatch(qemu_ram_get_idstr(rb
), offset
);
1787 * Called at the start of an inbound postcopy on reception of the
1790 static int vhost_user_postcopy_advise(struct vhost_dev
*dev
, Error
**errp
)
1793 struct vhost_user
*u
= dev
->opaque
;
1794 CharBackend
*chr
= u
->user
->chr
;
1797 VhostUserMsg msg
= {
1798 .hdr
.request
= VHOST_USER_POSTCOPY_ADVISE
,
1799 .hdr
.flags
= VHOST_USER_VERSION
,
1802 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1804 error_setg(errp
, "Failed to send postcopy_advise to vhost");
1808 ret
= vhost_user_read(dev
, &msg
);
1810 error_setg(errp
, "Failed to get postcopy_advise reply from vhost");
1814 if (msg
.hdr
.request
!= VHOST_USER_POSTCOPY_ADVISE
) {
1815 error_setg(errp
, "Unexpected msg type. Expected %d received %d",
1816 VHOST_USER_POSTCOPY_ADVISE
, msg
.hdr
.request
);
1821 error_setg(errp
, "Received bad msg size.");
1824 ufd
= qemu_chr_fe_get_msgfd(chr
);
1826 error_setg(errp
, "%s: Failed to get ufd", __func__
);
1829 qemu_set_nonblock(ufd
);
1831 /* register ufd with userfault thread */
1832 u
->postcopy_fd
.fd
= ufd
;
1833 u
->postcopy_fd
.data
= dev
;
1834 u
->postcopy_fd
.handler
= vhost_user_postcopy_fault_handler
;
1835 u
->postcopy_fd
.waker
= vhost_user_postcopy_waker
;
1836 u
->postcopy_fd
.idstr
= "vhost-user"; /* Need to find unique name */
1837 postcopy_register_shared_ufd(&u
->postcopy_fd
);
1840 error_setg(errp
, "Postcopy not supported on non-Linux systems");
1846 * Called at the switch to postcopy on reception of the 'listen' command.
1848 static int vhost_user_postcopy_listen(struct vhost_dev
*dev
, Error
**errp
)
1850 struct vhost_user
*u
= dev
->opaque
;
1852 VhostUserMsg msg
= {
1853 .hdr
.request
= VHOST_USER_POSTCOPY_LISTEN
,
1854 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1856 u
->postcopy_listen
= true;
1858 trace_vhost_user_postcopy_listen();
1860 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1862 error_setg(errp
, "Failed to send postcopy_listen to vhost");
1866 ret
= process_message_reply(dev
, &msg
);
1868 error_setg(errp
, "Failed to receive reply to postcopy_listen");
1876 * Called at the end of postcopy
1878 static int vhost_user_postcopy_end(struct vhost_dev
*dev
, Error
**errp
)
1880 VhostUserMsg msg
= {
1881 .hdr
.request
= VHOST_USER_POSTCOPY_END
,
1882 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1885 struct vhost_user
*u
= dev
->opaque
;
1887 trace_vhost_user_postcopy_end_entry();
1889 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
1891 error_setg(errp
, "Failed to send postcopy_end to vhost");
1895 ret
= process_message_reply(dev
, &msg
);
1897 error_setg(errp
, "Failed to receive reply to postcopy_end");
1900 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
1901 close(u
->postcopy_fd
.fd
);
1902 u
->postcopy_fd
.handler
= NULL
;
1904 trace_vhost_user_postcopy_end_exit();
1909 static int vhost_user_postcopy_notifier(NotifierWithReturn
*notifier
,
1912 struct PostcopyNotifyData
*pnd
= opaque
;
1913 struct vhost_user
*u
= container_of(notifier
, struct vhost_user
,
1915 struct vhost_dev
*dev
= u
->dev
;
1917 switch (pnd
->reason
) {
1918 case POSTCOPY_NOTIFY_PROBE
:
1919 if (!virtio_has_feature(dev
->protocol_features
,
1920 VHOST_USER_PROTOCOL_F_PAGEFAULT
)) {
1921 /* TODO: Get the device name into this error somehow */
1922 error_setg(pnd
->errp
,
1923 "vhost-user backend not capable of postcopy");
1928 case POSTCOPY_NOTIFY_INBOUND_ADVISE
:
1929 return vhost_user_postcopy_advise(dev
, pnd
->errp
);
1931 case POSTCOPY_NOTIFY_INBOUND_LISTEN
:
1932 return vhost_user_postcopy_listen(dev
, pnd
->errp
);
1934 case POSTCOPY_NOTIFY_INBOUND_END
:
1935 return vhost_user_postcopy_end(dev
, pnd
->errp
);
1938 /* We ignore notifications we don't know */
1945 static int vhost_user_backend_init(struct vhost_dev
*dev
, void *opaque
,
1948 uint64_t features
, protocol_features
, ram_slots
;
1949 struct vhost_user
*u
;
1952 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
1954 u
= g_new0(struct vhost_user
, 1);
1959 err
= vhost_user_get_features(dev
, &features
);
1961 error_setg_errno(errp
, -err
, "vhost_backend_init failed");
1965 if (virtio_has_feature(features
, VHOST_USER_F_PROTOCOL_FEATURES
)) {
1966 dev
->backend_features
|= 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
1968 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_PROTOCOL_FEATURES
,
1969 &protocol_features
);
1971 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
1975 dev
->protocol_features
=
1976 protocol_features
& VHOST_USER_PROTOCOL_FEATURE_MASK
;
1978 if (!dev
->config_ops
|| !dev
->config_ops
->vhost_dev_config_notifier
) {
1979 /* Don't acknowledge CONFIG feature if device doesn't support it */
1980 dev
->protocol_features
&= ~(1ULL << VHOST_USER_PROTOCOL_F_CONFIG
);
1981 } else if (!(protocol_features
&
1982 (1ULL << VHOST_USER_PROTOCOL_F_CONFIG
))) {
1983 error_setg(errp
, "Device expects VHOST_USER_PROTOCOL_F_CONFIG "
1984 "but backend does not support it.");
1988 err
= vhost_user_set_protocol_features(dev
, dev
->protocol_features
);
1990 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
1994 /* query the max queues we support if backend supports Multiple Queue */
1995 if (dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_MQ
)) {
1996 err
= vhost_user_get_u64(dev
, VHOST_USER_GET_QUEUE_NUM
,
1999 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2003 dev
->max_queues
= 1;
2006 if (dev
->num_queues
&& dev
->max_queues
< dev
->num_queues
) {
2007 error_setg(errp
, "The maximum number of queues supported by the "
2008 "backend is %" PRIu64
, dev
->max_queues
);
2012 if (virtio_has_feature(features
, VIRTIO_F_IOMMU_PLATFORM
) &&
2013 !(virtio_has_feature(dev
->protocol_features
,
2014 VHOST_USER_PROTOCOL_F_SLAVE_REQ
) &&
2015 virtio_has_feature(dev
->protocol_features
,
2016 VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
2017 error_setg(errp
, "IOMMU support requires reply-ack and "
2018 "slave-req protocol features.");
2022 /* get max memory regions if backend supports configurable RAM slots */
2023 if (!virtio_has_feature(dev
->protocol_features
,
2024 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
)) {
2025 u
->user
->memory_slots
= VHOST_MEMORY_BASELINE_NREGIONS
;
2027 err
= vhost_user_get_max_memslots(dev
, &ram_slots
);
2029 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2033 if (ram_slots
< u
->user
->memory_slots
) {
2034 error_setg(errp
, "The backend specified a max ram slots limit "
2035 "of %" PRIu64
", when the prior validated limit was "
2036 "%d. This limit should never decrease.", ram_slots
,
2037 u
->user
->memory_slots
);
2041 u
->user
->memory_slots
= MIN(ram_slots
, VHOST_USER_MAX_RAM_SLOTS
);
2045 if (dev
->migration_blocker
== NULL
&&
2046 !virtio_has_feature(dev
->protocol_features
,
2047 VHOST_USER_PROTOCOL_F_LOG_SHMFD
)) {
2048 error_setg(&dev
->migration_blocker
,
2049 "Migration disabled: vhost-user backend lacks "
2050 "VHOST_USER_PROTOCOL_F_LOG_SHMFD feature.");
2053 if (dev
->vq_index
== 0) {
2054 err
= vhost_setup_slave_channel(dev
);
2056 error_setg_errno(errp
, EPROTO
, "vhost_backend_init failed");
2061 u
->postcopy_notifier
.notify
= vhost_user_postcopy_notifier
;
2062 postcopy_add_notifier(&u
->postcopy_notifier
);
2067 static int vhost_user_backend_cleanup(struct vhost_dev
*dev
)
2069 struct vhost_user
*u
;
2071 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2074 if (u
->postcopy_notifier
.notify
) {
2075 postcopy_remove_notifier(&u
->postcopy_notifier
);
2076 u
->postcopy_notifier
.notify
= NULL
;
2078 u
->postcopy_listen
= false;
2079 if (u
->postcopy_fd
.handler
) {
2080 postcopy_unregister_shared_ufd(&u
->postcopy_fd
);
2081 close(u
->postcopy_fd
.fd
);
2082 u
->postcopy_fd
.handler
= NULL
;
2085 close_slave_channel(u
);
2087 g_free(u
->region_rb
);
2088 u
->region_rb
= NULL
;
2089 g_free(u
->region_rb_offset
);
2090 u
->region_rb_offset
= NULL
;
2091 u
->region_rb_len
= 0;
2098 static int vhost_user_get_vq_index(struct vhost_dev
*dev
, int idx
)
2100 assert(idx
>= dev
->vq_index
&& idx
< dev
->vq_index
+ dev
->nvqs
);
2105 static int vhost_user_memslots_limit(struct vhost_dev
*dev
)
2107 struct vhost_user
*u
= dev
->opaque
;
2109 return u
->user
->memory_slots
;
2112 static bool vhost_user_requires_shm_log(struct vhost_dev
*dev
)
2114 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2116 return virtio_has_feature(dev
->protocol_features
,
2117 VHOST_USER_PROTOCOL_F_LOG_SHMFD
);
2120 static int vhost_user_migration_done(struct vhost_dev
*dev
, char* mac_addr
)
2122 VhostUserMsg msg
= { };
2124 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2126 /* If guest supports GUEST_ANNOUNCE do nothing */
2127 if (virtio_has_feature(dev
->acked_features
, VIRTIO_NET_F_GUEST_ANNOUNCE
)) {
2131 /* if backend supports VHOST_USER_PROTOCOL_F_RARP ask it to send the RARP */
2132 if (virtio_has_feature(dev
->protocol_features
,
2133 VHOST_USER_PROTOCOL_F_RARP
)) {
2134 msg
.hdr
.request
= VHOST_USER_SEND_RARP
;
2135 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2136 memcpy((char *)&msg
.payload
.u64
, mac_addr
, 6);
2137 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2139 return vhost_user_write(dev
, &msg
, NULL
, 0);
2144 static bool vhost_user_can_merge(struct vhost_dev
*dev
,
2145 uint64_t start1
, uint64_t size1
,
2146 uint64_t start2
, uint64_t size2
)
2151 (void)vhost_user_get_mr_data(start1
, &offset
, &mfd
);
2152 (void)vhost_user_get_mr_data(start2
, &offset
, &rfd
);
2157 static int vhost_user_net_set_mtu(struct vhost_dev
*dev
, uint16_t mtu
)
2160 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2161 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2164 if (!(dev
->protocol_features
& (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU
))) {
2168 msg
.hdr
.request
= VHOST_USER_NET_SET_MTU
;
2169 msg
.payload
.u64
= mtu
;
2170 msg
.hdr
.size
= sizeof(msg
.payload
.u64
);
2171 msg
.hdr
.flags
= VHOST_USER_VERSION
;
2172 if (reply_supported
) {
2173 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2176 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2181 /* If reply_ack supported, slave has to ack specified MTU is valid */
2182 if (reply_supported
) {
2183 return process_message_reply(dev
, &msg
);
2189 static int vhost_user_send_device_iotlb_msg(struct vhost_dev
*dev
,
2190 struct vhost_iotlb_msg
*imsg
)
2193 VhostUserMsg msg
= {
2194 .hdr
.request
= VHOST_USER_IOTLB_MSG
,
2195 .hdr
.size
= sizeof(msg
.payload
.iotlb
),
2196 .hdr
.flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
2197 .payload
.iotlb
= *imsg
,
2200 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2205 return process_message_reply(dev
, &msg
);
2209 static void vhost_user_set_iotlb_callback(struct vhost_dev
*dev
, int enabled
)
2211 /* No-op as the receive channel is not dedicated to IOTLB messages. */
2214 static int vhost_user_get_config(struct vhost_dev
*dev
, uint8_t *config
,
2215 uint32_t config_len
, Error
**errp
)
2218 VhostUserMsg msg
= {
2219 .hdr
.request
= VHOST_USER_GET_CONFIG
,
2220 .hdr
.flags
= VHOST_USER_VERSION
,
2221 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
,
2224 if (!virtio_has_feature(dev
->protocol_features
,
2225 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2226 error_setg(errp
, "VHOST_USER_PROTOCOL_F_CONFIG not supported");
2230 assert(config_len
<= VHOST_USER_MAX_CONFIG_SIZE
);
2232 msg
.payload
.config
.offset
= 0;
2233 msg
.payload
.config
.size
= config_len
;
2234 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2236 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2240 ret
= vhost_user_read(dev
, &msg
);
2242 error_setg_errno(errp
, -ret
, "vhost_get_config failed");
2246 if (msg
.hdr
.request
!= VHOST_USER_GET_CONFIG
) {
2248 "Received unexpected msg type. Expected %d received %d",
2249 VHOST_USER_GET_CONFIG
, msg
.hdr
.request
);
2253 if (msg
.hdr
.size
!= VHOST_USER_CONFIG_HDR_SIZE
+ config_len
) {
2254 error_setg(errp
, "Received bad msg size.");
2258 memcpy(config
, msg
.payload
.config
.region
, config_len
);
2263 static int vhost_user_set_config(struct vhost_dev
*dev
, const uint8_t *data
,
2264 uint32_t offset
, uint32_t size
, uint32_t flags
)
2268 bool reply_supported
= virtio_has_feature(dev
->protocol_features
,
2269 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2271 VhostUserMsg msg
= {
2272 .hdr
.request
= VHOST_USER_SET_CONFIG
,
2273 .hdr
.flags
= VHOST_USER_VERSION
,
2274 .hdr
.size
= VHOST_USER_CONFIG_HDR_SIZE
+ size
,
2277 if (!virtio_has_feature(dev
->protocol_features
,
2278 VHOST_USER_PROTOCOL_F_CONFIG
)) {
2282 if (reply_supported
) {
2283 msg
.hdr
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2286 if (size
> VHOST_USER_MAX_CONFIG_SIZE
) {
2290 msg
.payload
.config
.offset
= offset
,
2291 msg
.payload
.config
.size
= size
,
2292 msg
.payload
.config
.flags
= flags
,
2293 p
= msg
.payload
.config
.region
;
2294 memcpy(p
, data
, size
);
2296 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2301 if (reply_supported
) {
2302 return process_message_reply(dev
, &msg
);
2308 static int vhost_user_crypto_create_session(struct vhost_dev
*dev
,
2310 uint64_t *session_id
)
2313 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2314 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2315 CryptoDevBackendSymSessionInfo
*sess_info
= session_info
;
2316 VhostUserMsg msg
= {
2317 .hdr
.request
= VHOST_USER_CREATE_CRYPTO_SESSION
,
2318 .hdr
.flags
= VHOST_USER_VERSION
,
2319 .hdr
.size
= sizeof(msg
.payload
.session
),
2322 assert(dev
->vhost_ops
->backend_type
== VHOST_BACKEND_TYPE_USER
);
2324 if (!crypto_session
) {
2325 error_report("vhost-user trying to send unhandled ioctl");
2329 memcpy(&msg
.payload
.session
.session_setup_data
, sess_info
,
2330 sizeof(CryptoDevBackendSymSessionInfo
));
2331 if (sess_info
->key_len
) {
2332 memcpy(&msg
.payload
.session
.key
, sess_info
->cipher_key
,
2333 sess_info
->key_len
);
2335 if (sess_info
->auth_key_len
> 0) {
2336 memcpy(&msg
.payload
.session
.auth_key
, sess_info
->auth_key
,
2337 sess_info
->auth_key_len
);
2339 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2341 error_report("vhost_user_write() return %d, create session failed",
2346 ret
= vhost_user_read(dev
, &msg
);
2348 error_report("vhost_user_read() return %d, create session failed",
2353 if (msg
.hdr
.request
!= VHOST_USER_CREATE_CRYPTO_SESSION
) {
2354 error_report("Received unexpected msg type. Expected %d received %d",
2355 VHOST_USER_CREATE_CRYPTO_SESSION
, msg
.hdr
.request
);
2359 if (msg
.hdr
.size
!= sizeof(msg
.payload
.session
)) {
2360 error_report("Received bad msg size.");
2364 if (msg
.payload
.session
.session_id
< 0) {
2365 error_report("Bad session id: %" PRId64
"",
2366 msg
.payload
.session
.session_id
);
2369 *session_id
= msg
.payload
.session
.session_id
;
2375 vhost_user_crypto_close_session(struct vhost_dev
*dev
, uint64_t session_id
)
2378 bool crypto_session
= virtio_has_feature(dev
->protocol_features
,
2379 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION
);
2380 VhostUserMsg msg
= {
2381 .hdr
.request
= VHOST_USER_CLOSE_CRYPTO_SESSION
,
2382 .hdr
.flags
= VHOST_USER_VERSION
,
2383 .hdr
.size
= sizeof(msg
.payload
.u64
),
2385 msg
.payload
.u64
= session_id
;
2387 if (!crypto_session
) {
2388 error_report("vhost-user trying to send unhandled ioctl");
2392 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2394 error_report("vhost_user_write() return %d, close session failed",
2402 static bool vhost_user_mem_section_filter(struct vhost_dev
*dev
,
2403 MemoryRegionSection
*section
)
2407 result
= memory_region_get_fd(section
->mr
) >= 0;
2412 static int vhost_user_get_inflight_fd(struct vhost_dev
*dev
,
2413 uint16_t queue_size
,
2414 struct vhost_inflight
*inflight
)
2419 struct vhost_user
*u
= dev
->opaque
;
2420 CharBackend
*chr
= u
->user
->chr
;
2421 VhostUserMsg msg
= {
2422 .hdr
.request
= VHOST_USER_GET_INFLIGHT_FD
,
2423 .hdr
.flags
= VHOST_USER_VERSION
,
2424 .payload
.inflight
.num_queues
= dev
->nvqs
,
2425 .payload
.inflight
.queue_size
= queue_size
,
2426 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2429 if (!virtio_has_feature(dev
->protocol_features
,
2430 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2434 ret
= vhost_user_write(dev
, &msg
, NULL
, 0);
2439 ret
= vhost_user_read(dev
, &msg
);
2444 if (msg
.hdr
.request
!= VHOST_USER_GET_INFLIGHT_FD
) {
2445 error_report("Received unexpected msg type. "
2446 "Expected %d received %d",
2447 VHOST_USER_GET_INFLIGHT_FD
, msg
.hdr
.request
);
2451 if (msg
.hdr
.size
!= sizeof(msg
.payload
.inflight
)) {
2452 error_report("Received bad msg size.");
2456 if (!msg
.payload
.inflight
.mmap_size
) {
2460 fd
= qemu_chr_fe_get_msgfd(chr
);
2462 error_report("Failed to get mem fd");
2466 addr
= mmap(0, msg
.payload
.inflight
.mmap_size
, PROT_READ
| PROT_WRITE
,
2467 MAP_SHARED
, fd
, msg
.payload
.inflight
.mmap_offset
);
2469 if (addr
== MAP_FAILED
) {
2470 error_report("Failed to mmap mem fd");
2475 inflight
->addr
= addr
;
2477 inflight
->size
= msg
.payload
.inflight
.mmap_size
;
2478 inflight
->offset
= msg
.payload
.inflight
.mmap_offset
;
2479 inflight
->queue_size
= queue_size
;
2484 static int vhost_user_set_inflight_fd(struct vhost_dev
*dev
,
2485 struct vhost_inflight
*inflight
)
2487 VhostUserMsg msg
= {
2488 .hdr
.request
= VHOST_USER_SET_INFLIGHT_FD
,
2489 .hdr
.flags
= VHOST_USER_VERSION
,
2490 .payload
.inflight
.mmap_size
= inflight
->size
,
2491 .payload
.inflight
.mmap_offset
= inflight
->offset
,
2492 .payload
.inflight
.num_queues
= dev
->nvqs
,
2493 .payload
.inflight
.queue_size
= inflight
->queue_size
,
2494 .hdr
.size
= sizeof(msg
.payload
.inflight
),
2497 if (!virtio_has_feature(dev
->protocol_features
,
2498 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2502 return vhost_user_write(dev
, &msg
, &inflight
->fd
, 1);
2505 bool vhost_user_init(VhostUserState
*user
, CharBackend
*chr
, Error
**errp
)
2508 error_setg(errp
, "Cannot initialize vhost-user state");
2512 user
->memory_slots
= 0;
2516 void vhost_user_cleanup(VhostUserState
*user
)
2519 VhostUserHostNotifier
*n
;
2524 memory_region_transaction_begin();
2525 for (i
= 0; i
< VIRTIO_QUEUE_MAX
; i
++) {
2526 n
= &user
->notifier
[i
];
2527 vhost_user_host_notifier_remove(user
, NULL
, i
);
2528 object_unparent(OBJECT(&n
->mr
));
2530 memory_region_transaction_commit();
2534 const VhostOps user_ops
= {
2535 .backend_type
= VHOST_BACKEND_TYPE_USER
,
2536 .vhost_backend_init
= vhost_user_backend_init
,
2537 .vhost_backend_cleanup
= vhost_user_backend_cleanup
,
2538 .vhost_backend_memslots_limit
= vhost_user_memslots_limit
,
2539 .vhost_set_log_base
= vhost_user_set_log_base
,
2540 .vhost_set_mem_table
= vhost_user_set_mem_table
,
2541 .vhost_set_vring_addr
= vhost_user_set_vring_addr
,
2542 .vhost_set_vring_endian
= vhost_user_set_vring_endian
,
2543 .vhost_set_vring_num
= vhost_user_set_vring_num
,
2544 .vhost_set_vring_base
= vhost_user_set_vring_base
,
2545 .vhost_get_vring_base
= vhost_user_get_vring_base
,
2546 .vhost_set_vring_kick
= vhost_user_set_vring_kick
,
2547 .vhost_set_vring_call
= vhost_user_set_vring_call
,
2548 .vhost_set_features
= vhost_user_set_features
,
2549 .vhost_get_features
= vhost_user_get_features
,
2550 .vhost_set_owner
= vhost_user_set_owner
,
2551 .vhost_reset_device
= vhost_user_reset_device
,
2552 .vhost_get_vq_index
= vhost_user_get_vq_index
,
2553 .vhost_set_vring_enable
= vhost_user_set_vring_enable
,
2554 .vhost_requires_shm_log
= vhost_user_requires_shm_log
,
2555 .vhost_migration_done
= vhost_user_migration_done
,
2556 .vhost_backend_can_merge
= vhost_user_can_merge
,
2557 .vhost_net_set_mtu
= vhost_user_net_set_mtu
,
2558 .vhost_set_iotlb_callback
= vhost_user_set_iotlb_callback
,
2559 .vhost_send_device_iotlb_msg
= vhost_user_send_device_iotlb_msg
,
2560 .vhost_get_config
= vhost_user_get_config
,
2561 .vhost_set_config
= vhost_user_set_config
,
2562 .vhost_crypto_create_session
= vhost_user_crypto_create_session
,
2563 .vhost_crypto_close_session
= vhost_user_crypto_close_session
,
2564 .vhost_backend_mem_section_filter
= vhost_user_mem_section_filter
,
2565 .vhost_get_inflight_fd
= vhost_user_get_inflight_fd
,
2566 .vhost_set_inflight_fd
= vhost_user_set_inflight_fd
,