4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2016 Red Hat, Inc.
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Marc-André Lureau <mlureau@redhat.com>
10 * Victor Kaplansky <victork@redhat.com>
12 * This work is licensed under the terms of the GNU GPL, version 2 or
13 * later. See the COPYING file in the top-level directory.
20 /* this code avoids GLib dependency */
29 #include <sys/types.h>
30 #include <sys/socket.h>
31 #include <sys/eventfd.h>
35 /* Necessary to provide VIRTIO_F_VERSION_1 on system
36 * with older linux headers. Must appear before
37 * <linux/vhost.h> below.
39 #include "standard-headers/linux/virtio_config.h"
41 #if defined(__linux__)
42 #include <sys/syscall.h>
44 #include <sys/ioctl.h>
45 #include <linux/vhost.h>
47 #ifdef __NR_userfaultfd
48 #include <linux/userfaultfd.h>
53 #include "include/atomic.h"
55 #include "libvhost-user.h"
57 /* usually provided by GLib */
58 #if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ > 4)
59 #if !defined(__clang__) && (__GNUC__ == 4 && __GNUC_MINOR__ == 4)
60 #define G_GNUC_PRINTF(format_idx, arg_idx) \
61 __attribute__((__format__(gnu_printf, format_idx, arg_idx)))
63 #define G_GNUC_PRINTF(format_idx, arg_idx) \
64 __attribute__((__format__(__printf__, format_idx, arg_idx)))
67 #define G_GNUC_PRINTF(format_idx, arg_idx)
68 #endif /* !__GNUC__ */
70 #define MIN(x, y) ({ \
71 __typeof__(x) _min1 = (x); \
72 __typeof__(y) _min2 = (y); \
73 (void) (&_min1 == &_min2); \
74 _min1 < _min2 ? _min1 : _min2; })
77 /* Round number down to multiple */
78 #define ALIGN_DOWN(n, m) ((n) / (m) * (m))
80 /* Round number up to multiple */
81 #define ALIGN_UP(n, m) ALIGN_DOWN((n) + (m) - 1, (m))
84 #define unlikely(x) __builtin_expect(!!(x), 0)
87 /* Align each region to cache line size in inflight buffer */
88 #define INFLIGHT_ALIGNMENT 64
90 /* The version of inflight buffer */
91 #define INFLIGHT_VERSION 1
93 /* The version of the protocol we support */
94 #define VHOST_USER_VERSION 1
95 #define LIBVHOST_USER_DEBUG 0
99 if (LIBVHOST_USER_DEBUG) { \
100 fprintf(stderr, __VA_ARGS__); \
105 bool has_feature(uint64_t features
, unsigned int fbit
)
108 return !!(features
& (1ULL << fbit
));
112 bool vu_has_feature(VuDev
*dev
,
115 return has_feature(dev
->features
, fbit
);
118 static inline bool vu_has_protocol_feature(VuDev
*dev
, unsigned int fbit
)
120 return has_feature(dev
->protocol_features
, fbit
);
124 vu_request_to_string(unsigned int req
)
126 #define REQ(req) [req] = #req
127 static const char *vu_request_str
[] = {
128 REQ(VHOST_USER_NONE
),
129 REQ(VHOST_USER_GET_FEATURES
),
130 REQ(VHOST_USER_SET_FEATURES
),
131 REQ(VHOST_USER_SET_OWNER
),
132 REQ(VHOST_USER_RESET_OWNER
),
133 REQ(VHOST_USER_SET_MEM_TABLE
),
134 REQ(VHOST_USER_SET_LOG_BASE
),
135 REQ(VHOST_USER_SET_LOG_FD
),
136 REQ(VHOST_USER_SET_VRING_NUM
),
137 REQ(VHOST_USER_SET_VRING_ADDR
),
138 REQ(VHOST_USER_SET_VRING_BASE
),
139 REQ(VHOST_USER_GET_VRING_BASE
),
140 REQ(VHOST_USER_SET_VRING_KICK
),
141 REQ(VHOST_USER_SET_VRING_CALL
),
142 REQ(VHOST_USER_SET_VRING_ERR
),
143 REQ(VHOST_USER_GET_PROTOCOL_FEATURES
),
144 REQ(VHOST_USER_SET_PROTOCOL_FEATURES
),
145 REQ(VHOST_USER_GET_QUEUE_NUM
),
146 REQ(VHOST_USER_SET_VRING_ENABLE
),
147 REQ(VHOST_USER_SEND_RARP
),
148 REQ(VHOST_USER_NET_SET_MTU
),
149 REQ(VHOST_USER_SET_BACKEND_REQ_FD
),
150 REQ(VHOST_USER_IOTLB_MSG
),
151 REQ(VHOST_USER_SET_VRING_ENDIAN
),
152 REQ(VHOST_USER_GET_CONFIG
),
153 REQ(VHOST_USER_SET_CONFIG
),
154 REQ(VHOST_USER_POSTCOPY_ADVISE
),
155 REQ(VHOST_USER_POSTCOPY_LISTEN
),
156 REQ(VHOST_USER_POSTCOPY_END
),
157 REQ(VHOST_USER_GET_INFLIGHT_FD
),
158 REQ(VHOST_USER_SET_INFLIGHT_FD
),
159 REQ(VHOST_USER_GPU_SET_SOCKET
),
160 REQ(VHOST_USER_VRING_KICK
),
161 REQ(VHOST_USER_GET_MAX_MEM_SLOTS
),
162 REQ(VHOST_USER_ADD_MEM_REG
),
163 REQ(VHOST_USER_REM_MEM_REG
),
168 if (req
< VHOST_USER_MAX
) {
169 return vu_request_str
[req
];
175 static void G_GNUC_PRINTF(2, 3)
176 vu_panic(VuDev
*dev
, const char *msg
, ...)
182 if (vasprintf(&buf
, msg
, ap
) < 0) {
188 dev
->panic(dev
, buf
);
193 * find a way to call virtio_error, or perhaps close the connection?
197 /* Translate guest physical address to our virtual address. */
199 vu_gpa_to_va(VuDev
*dev
, uint64_t *plen
, uint64_t guest_addr
)
207 /* Find matching memory region. */
208 for (i
= 0; i
< dev
->nregions
; i
++) {
209 VuDevRegion
*r
= &dev
->regions
[i
];
211 if ((guest_addr
>= r
->gpa
) && (guest_addr
< (r
->gpa
+ r
->size
))) {
212 if ((guest_addr
+ *plen
) > (r
->gpa
+ r
->size
)) {
213 *plen
= r
->gpa
+ r
->size
- guest_addr
;
215 return (void *)(uintptr_t)
216 guest_addr
- r
->gpa
+ r
->mmap_addr
+ r
->mmap_offset
;
223 /* Translate qemu virtual address to our virtual address. */
225 qva_to_va(VuDev
*dev
, uint64_t qemu_addr
)
229 /* Find matching memory region. */
230 for (i
= 0; i
< dev
->nregions
; i
++) {
231 VuDevRegion
*r
= &dev
->regions
[i
];
233 if ((qemu_addr
>= r
->qva
) && (qemu_addr
< (r
->qva
+ r
->size
))) {
234 return (void *)(uintptr_t)
235 qemu_addr
- r
->qva
+ r
->mmap_addr
+ r
->mmap_offset
;
243 vmsg_close_fds(VhostUserMsg
*vmsg
)
247 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
252 /* Set reply payload.u64 and clear request flags and fd_num */
253 static void vmsg_set_reply_u64(VhostUserMsg
*vmsg
, uint64_t val
)
255 vmsg
->flags
= 0; /* defaults will be set by vu_send_reply() */
256 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
257 vmsg
->payload
.u64
= val
;
261 /* A test to see if we have userfault available */
265 #if defined(__linux__) && defined(__NR_userfaultfd) &&\
266 defined(UFFD_FEATURE_MISSING_SHMEM) &&\
267 defined(UFFD_FEATURE_MISSING_HUGETLBFS)
268 /* Now test the kernel we're running on really has the features */
269 int ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
270 struct uffdio_api api_struct
;
275 api_struct
.api
= UFFD_API
;
276 api_struct
.features
= UFFD_FEATURE_MISSING_SHMEM
|
277 UFFD_FEATURE_MISSING_HUGETLBFS
;
278 if (ioctl(ufd
, UFFDIO_API
, &api_struct
)) {
291 vu_message_read_default(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
293 char control
[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS
* sizeof(int))] = {};
295 .iov_base
= (char *)vmsg
,
296 .iov_len
= VHOST_USER_HDR_SIZE
,
298 struct msghdr msg
= {
301 .msg_control
= control
,
302 .msg_controllen
= sizeof(control
),
305 struct cmsghdr
*cmsg
;
309 rc
= recvmsg(conn_fd
, &msg
, 0);
310 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
313 vu_panic(dev
, "Error while recvmsg: %s", strerror(errno
));
318 for (cmsg
= CMSG_FIRSTHDR(&msg
);
320 cmsg
= CMSG_NXTHDR(&msg
, cmsg
))
322 if (cmsg
->cmsg_level
== SOL_SOCKET
&& cmsg
->cmsg_type
== SCM_RIGHTS
) {
323 fd_size
= cmsg
->cmsg_len
- CMSG_LEN(0);
324 vmsg
->fd_num
= fd_size
/ sizeof(int);
325 memcpy(vmsg
->fds
, CMSG_DATA(cmsg
), fd_size
);
330 if (vmsg
->size
> sizeof(vmsg
->payload
)) {
332 "Error: too big message request: %d, size: vmsg->size: %u, "
333 "while sizeof(vmsg->payload) = %zu\n",
334 vmsg
->request
, vmsg
->size
, sizeof(vmsg
->payload
));
340 rc
= read(conn_fd
, &vmsg
->payload
, vmsg
->size
);
341 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
344 vu_panic(dev
, "Error while reading: %s", strerror(errno
));
348 assert((uint32_t)rc
== vmsg
->size
);
354 vmsg_close_fds(vmsg
);
360 vu_message_write(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
363 uint8_t *p
= (uint8_t *)vmsg
;
364 char control
[CMSG_SPACE(VHOST_MEMORY_BASELINE_NREGIONS
* sizeof(int))] = {};
366 .iov_base
= (char *)vmsg
,
367 .iov_len
= VHOST_USER_HDR_SIZE
,
369 struct msghdr msg
= {
372 .msg_control
= control
,
374 struct cmsghdr
*cmsg
;
376 memset(control
, 0, sizeof(control
));
377 assert(vmsg
->fd_num
<= VHOST_MEMORY_BASELINE_NREGIONS
);
378 if (vmsg
->fd_num
> 0) {
379 size_t fdsize
= vmsg
->fd_num
* sizeof(int);
380 msg
.msg_controllen
= CMSG_SPACE(fdsize
);
381 cmsg
= CMSG_FIRSTHDR(&msg
);
382 cmsg
->cmsg_len
= CMSG_LEN(fdsize
);
383 cmsg
->cmsg_level
= SOL_SOCKET
;
384 cmsg
->cmsg_type
= SCM_RIGHTS
;
385 memcpy(CMSG_DATA(cmsg
), vmsg
->fds
, fdsize
);
387 msg
.msg_controllen
= 0;
391 rc
= sendmsg(conn_fd
, &msg
, 0);
392 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
397 rc
= write(conn_fd
, vmsg
->data
, vmsg
->size
);
399 rc
= write(conn_fd
, p
+ VHOST_USER_HDR_SIZE
, vmsg
->size
);
401 } while (rc
< 0 && (errno
== EINTR
|| errno
== EAGAIN
));
405 vu_panic(dev
, "Error while writing: %s", strerror(errno
));
413 vu_send_reply(VuDev
*dev
, int conn_fd
, VhostUserMsg
*vmsg
)
415 /* Set the version in the flags when sending the reply */
416 vmsg
->flags
&= ~VHOST_USER_VERSION_MASK
;
417 vmsg
->flags
|= VHOST_USER_VERSION
;
418 vmsg
->flags
|= VHOST_USER_REPLY_MASK
;
420 return vu_message_write(dev
, conn_fd
, vmsg
);
424 * Processes a reply on the backend channel.
425 * Entered with backend_mutex held and releases it before exit.
426 * Returns true on success.
429 vu_process_message_reply(VuDev
*dev
, const VhostUserMsg
*vmsg
)
431 VhostUserMsg msg_reply
;
434 if ((vmsg
->flags
& VHOST_USER_NEED_REPLY_MASK
) == 0) {
439 if (!vu_message_read_default(dev
, dev
->backend_fd
, &msg_reply
)) {
443 if (msg_reply
.request
!= vmsg
->request
) {
444 DPRINT("Received unexpected msg type. Expected %d received %d",
445 vmsg
->request
, msg_reply
.request
);
449 result
= msg_reply
.payload
.u64
== 0;
452 pthread_mutex_unlock(&dev
->backend_mutex
);
456 /* Kick the log_call_fd if required. */
458 vu_log_kick(VuDev
*dev
)
460 if (dev
->log_call_fd
!= -1) {
461 DPRINT("Kicking the QEMU's log...\n");
462 if (eventfd_write(dev
->log_call_fd
, 1) < 0) {
463 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
469 vu_log_page(uint8_t *log_table
, uint64_t page
)
471 DPRINT("Logged dirty guest page: %"PRId64
"\n", page
);
472 qatomic_or(&log_table
[page
/ 8], 1 << (page
% 8));
476 vu_log_write(VuDev
*dev
, uint64_t address
, uint64_t length
)
480 if (!(dev
->features
& (1ULL << VHOST_F_LOG_ALL
)) ||
481 !dev
->log_table
|| !length
) {
485 assert(dev
->log_size
> ((address
+ length
- 1) / VHOST_LOG_PAGE
/ 8));
487 page
= address
/ VHOST_LOG_PAGE
;
488 while (page
* VHOST_LOG_PAGE
< address
+ length
) {
489 vu_log_page(dev
->log_table
, page
);
497 vu_kick_cb(VuDev
*dev
, int condition
, void *data
)
499 int index
= (intptr_t)data
;
500 VuVirtq
*vq
= &dev
->vq
[index
];
501 int sock
= vq
->kick_fd
;
505 rc
= eventfd_read(sock
, &kick_data
);
507 vu_panic(dev
, "kick eventfd_read(): %s", strerror(errno
));
508 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
510 DPRINT("Got kick_data: %016"PRIx64
" handler:%p idx:%d\n",
511 kick_data
, vq
->handler
, index
);
513 vq
->handler(dev
, index
);
519 vu_get_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
523 * The following VIRTIO feature bits are supported by our virtqueue
526 1ULL << VIRTIO_F_NOTIFY_ON_EMPTY
|
527 1ULL << VIRTIO_RING_F_INDIRECT_DESC
|
528 1ULL << VIRTIO_RING_F_EVENT_IDX
|
529 1ULL << VIRTIO_F_VERSION_1
|
531 /* vhost-user feature bits */
532 1ULL << VHOST_F_LOG_ALL
|
533 1ULL << VHOST_USER_F_PROTOCOL_FEATURES
;
535 if (dev
->iface
->get_features
) {
536 vmsg
->payload
.u64
|= dev
->iface
->get_features(dev
);
539 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
542 DPRINT("Sending back to guest u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
548 vu_set_enable_all_rings(VuDev
*dev
, bool enabled
)
552 for (i
= 0; i
< dev
->max_queues
; i
++) {
553 dev
->vq
[i
].enable
= enabled
;
558 vu_set_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
560 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
562 dev
->features
= vmsg
->payload
.u64
;
563 if (!vu_has_feature(dev
, VIRTIO_F_VERSION_1
)) {
565 * We only support devices conforming to VIRTIO 1.0 or
568 vu_panic(dev
, "virtio legacy devices aren't supported by libvhost-user");
572 if (!(dev
->features
& VHOST_USER_F_PROTOCOL_FEATURES
)) {
573 vu_set_enable_all_rings(dev
, true);
576 if (dev
->iface
->set_features
) {
577 dev
->iface
->set_features(dev
, dev
->features
);
584 vu_set_owner_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
590 vu_close_log(VuDev
*dev
)
592 if (dev
->log_table
) {
593 if (munmap(dev
->log_table
, dev
->log_size
) != 0) {
594 perror("close log munmap() error");
597 dev
->log_table
= NULL
;
599 if (dev
->log_call_fd
!= -1) {
600 close(dev
->log_call_fd
);
601 dev
->log_call_fd
= -1;
606 vu_reset_device_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
608 vu_set_enable_all_rings(dev
, false);
614 map_ring(VuDev
*dev
, VuVirtq
*vq
)
616 vq
->vring
.desc
= qva_to_va(dev
, vq
->vra
.desc_user_addr
);
617 vq
->vring
.used
= qva_to_va(dev
, vq
->vra
.used_user_addr
);
618 vq
->vring
.avail
= qva_to_va(dev
, vq
->vra
.avail_user_addr
);
620 DPRINT("Setting virtq addresses:\n");
621 DPRINT(" vring_desc at %p\n", vq
->vring
.desc
);
622 DPRINT(" vring_used at %p\n", vq
->vring
.used
);
623 DPRINT(" vring_avail at %p\n", vq
->vring
.avail
);
625 return !(vq
->vring
.desc
&& vq
->vring
.used
&& vq
->vring
.avail
);
629 generate_faults(VuDev
*dev
) {
631 for (i
= 0; i
< dev
->nregions
; i
++) {
632 VuDevRegion
*dev_region
= &dev
->regions
[i
];
634 #ifdef UFFDIO_REGISTER
635 struct uffdio_register reg_struct
;
638 * We should already have an open ufd. Mark each memory
640 * Discard any mapping we have here; note I can't use MADV_REMOVE
641 * or fallocate to make the hole since I don't want to lose
642 * data that's already arrived in the shared process.
643 * TODO: How to do hugepage
645 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
646 dev_region
->size
+ dev_region
->mmap_offset
,
650 "%s: Failed to madvise(DONTNEED) region %d: %s\n",
651 __func__
, i
, strerror(errno
));
654 * Turn off transparent hugepages so we dont get lose wakeups
655 * in neighbouring pages.
656 * TODO: Turn this backon later.
658 ret
= madvise((void *)(uintptr_t)dev_region
->mmap_addr
,
659 dev_region
->size
+ dev_region
->mmap_offset
,
663 * Note: This can happen legally on kernels that are configured
664 * without madvise'able hugepages
667 "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
668 __func__
, i
, strerror(errno
));
671 reg_struct
.range
.start
= (uintptr_t)dev_region
->mmap_addr
;
672 reg_struct
.range
.len
= dev_region
->size
+ dev_region
->mmap_offset
;
673 reg_struct
.mode
= UFFDIO_REGISTER_MODE_MISSING
;
675 if (ioctl(dev
->postcopy_ufd
, UFFDIO_REGISTER
, ®_struct
)) {
676 vu_panic(dev
, "%s: Failed to userfault region %d "
677 "@%" PRIx64
" + size:%" PRIx64
" offset: %" PRIx64
680 dev_region
->mmap_addr
,
681 dev_region
->size
, dev_region
->mmap_offset
,
682 dev
->postcopy_ufd
, strerror(errno
));
685 if (!(reg_struct
.ioctls
& ((__u64
)1 << _UFFDIO_COPY
))) {
686 vu_panic(dev
, "%s Region (%d) doesn't support COPY",
690 DPRINT("%s: region %d: Registered userfault for %"
691 PRIx64
" + %" PRIx64
"\n", __func__
, i
,
692 (uint64_t)reg_struct
.range
.start
,
693 (uint64_t)reg_struct
.range
.len
);
694 /* Now it's registered we can let the client at it */
695 if (mprotect((void *)(uintptr_t)dev_region
->mmap_addr
,
696 dev_region
->size
+ dev_region
->mmap_offset
,
697 PROT_READ
| PROT_WRITE
)) {
698 vu_panic(dev
, "failed to mprotect region %d for postcopy (%s)",
702 /* TODO: Stash 'zero' support flags somewhere */
710 vu_add_mem_reg(VuDev
*dev
, VhostUserMsg
*vmsg
) {
712 bool track_ramblocks
= dev
->postcopy_listening
;
713 VhostUserMemoryRegion m
= vmsg
->payload
.memreg
.region
, *msg_region
= &m
;
714 VuDevRegion
*dev_region
= &dev
->regions
[dev
->nregions
];
717 if (vmsg
->fd_num
!= 1) {
718 vmsg_close_fds(vmsg
);
719 vu_panic(dev
, "VHOST_USER_ADD_MEM_REG received %d fds - only 1 fd "
720 "should be sent for this message type", vmsg
->fd_num
);
724 if (vmsg
->size
< VHOST_USER_MEM_REG_SIZE
) {
726 vu_panic(dev
, "VHOST_USER_ADD_MEM_REG requires a message size of at "
727 "least %zu bytes and only %d bytes were received",
728 VHOST_USER_MEM_REG_SIZE
, vmsg
->size
);
732 if (dev
->nregions
== VHOST_USER_MAX_RAM_SLOTS
) {
734 vu_panic(dev
, "failing attempt to hot add memory via "
735 "VHOST_USER_ADD_MEM_REG message because the backend has "
736 "no free ram slots available");
741 * If we are in postcopy mode and we receive a u64 payload with a 0 value
742 * we know all the postcopy client bases have been received, and we
743 * should start generating faults.
745 if (track_ramblocks
&&
746 vmsg
->size
== sizeof(vmsg
->payload
.u64
) &&
747 vmsg
->payload
.u64
== 0) {
748 (void)generate_faults(dev
);
752 DPRINT("Adding region: %u\n", dev
->nregions
);
753 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
754 msg_region
->guest_phys_addr
);
755 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
756 msg_region
->memory_size
);
757 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
758 msg_region
->userspace_addr
);
759 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
760 msg_region
->mmap_offset
);
762 dev_region
->gpa
= msg_region
->guest_phys_addr
;
763 dev_region
->size
= msg_region
->memory_size
;
764 dev_region
->qva
= msg_region
->userspace_addr
;
765 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
768 * We don't use offset argument of mmap() since the
769 * mapped address has to be page aligned, and we use huge
772 if (track_ramblocks
) {
774 * In postcopy we're using PROT_NONE here to catch anyone
775 * accessing it before we userfault.
777 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
778 PROT_NONE
, MAP_SHARED
| MAP_NORESERVE
,
781 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
782 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_NORESERVE
,
786 if (mmap_addr
== MAP_FAILED
) {
787 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
789 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
790 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
791 dev_region
->mmap_addr
);
796 if (track_ramblocks
) {
798 * Return the address to QEMU so that it can translate the ufd
799 * fault addresses back.
801 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
802 dev_region
->mmap_offset
);
804 /* Send the message back to qemu with the addresses filled in. */
806 DPRINT("Successfully added new region in postcopy\n");
810 for (i
= 0; i
< dev
->max_queues
; i
++) {
811 if (dev
->vq
[i
].vring
.desc
) {
812 if (map_ring(dev
, &dev
->vq
[i
])) {
813 vu_panic(dev
, "remapping queue %d for new memory region",
819 DPRINT("Successfully added new region\n");
825 static inline bool reg_equal(VuDevRegion
*vudev_reg
,
826 VhostUserMemoryRegion
*msg_reg
)
828 if (vudev_reg
->gpa
== msg_reg
->guest_phys_addr
&&
829 vudev_reg
->qva
== msg_reg
->userspace_addr
&&
830 vudev_reg
->size
== msg_reg
->memory_size
) {
838 vu_rem_mem_reg(VuDev
*dev
, VhostUserMsg
*vmsg
) {
839 VhostUserMemoryRegion m
= vmsg
->payload
.memreg
.region
, *msg_region
= &m
;
843 if (vmsg
->fd_num
> 1) {
844 vmsg_close_fds(vmsg
);
845 vu_panic(dev
, "VHOST_USER_REM_MEM_REG received %d fds - at most 1 fd "
846 "should be sent for this message type", vmsg
->fd_num
);
850 if (vmsg
->size
< VHOST_USER_MEM_REG_SIZE
) {
851 vmsg_close_fds(vmsg
);
852 vu_panic(dev
, "VHOST_USER_REM_MEM_REG requires a message size of at "
853 "least %zu bytes and only %d bytes were received",
854 VHOST_USER_MEM_REG_SIZE
, vmsg
->size
);
858 DPRINT("Removing region:\n");
859 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
860 msg_region
->guest_phys_addr
);
861 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
862 msg_region
->memory_size
);
863 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
864 msg_region
->userspace_addr
);
865 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
866 msg_region
->mmap_offset
);
868 for (i
= 0; i
< dev
->nregions
; i
++) {
869 if (reg_equal(&dev
->regions
[i
], msg_region
)) {
870 VuDevRegion
*r
= &dev
->regions
[i
];
871 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
874 munmap(m
, r
->size
+ r
->mmap_offset
);
878 * Shift all affected entries by 1 to close the hole at index i and
879 * zero out the last entry.
881 memmove(dev
->regions
+ i
, dev
->regions
+ i
+ 1,
882 sizeof(VuDevRegion
) * (dev
->nregions
- i
- 1));
883 memset(dev
->regions
+ dev
->nregions
- 1, 0, sizeof(VuDevRegion
));
884 DPRINT("Successfully removed a region\n");
890 /* Continue the search for eventual duplicates. */
895 vu_panic(dev
, "Specified region not found\n");
898 vmsg_close_fds(vmsg
);
904 vu_set_mem_table_exec_postcopy(VuDev
*dev
, VhostUserMsg
*vmsg
)
907 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
908 dev
->nregions
= memory
->nregions
;
910 DPRINT("Nregions: %u\n", memory
->nregions
);
911 for (i
= 0; i
< dev
->nregions
; i
++) {
913 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
914 VuDevRegion
*dev_region
= &dev
->regions
[i
];
916 DPRINT("Region %d\n", i
);
917 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
918 msg_region
->guest_phys_addr
);
919 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
920 msg_region
->memory_size
);
921 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
922 msg_region
->userspace_addr
);
923 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
924 msg_region
->mmap_offset
);
926 dev_region
->gpa
= msg_region
->guest_phys_addr
;
927 dev_region
->size
= msg_region
->memory_size
;
928 dev_region
->qva
= msg_region
->userspace_addr
;
929 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
931 /* We don't use offset argument of mmap() since the
932 * mapped address has to be page aligned, and we use huge
934 * In postcopy we're using PROT_NONE here to catch anyone
935 * accessing it before we userfault
937 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
938 PROT_NONE
, MAP_SHARED
| MAP_NORESERVE
,
941 if (mmap_addr
== MAP_FAILED
) {
942 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
944 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
945 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
946 dev_region
->mmap_addr
);
949 /* Return the address to QEMU so that it can translate the ufd
950 * fault addresses back.
952 msg_region
->userspace_addr
= (uintptr_t)(mmap_addr
+
953 dev_region
->mmap_offset
);
957 /* Send the message back to qemu with the addresses filled in */
959 if (!vu_send_reply(dev
, dev
->sock
, vmsg
)) {
960 vu_panic(dev
, "failed to respond to set-mem-table for postcopy");
964 /* Wait for QEMU to confirm that it's registered the handler for the
967 if (!dev
->read_msg(dev
, dev
->sock
, vmsg
) ||
968 vmsg
->size
!= sizeof(vmsg
->payload
.u64
) ||
969 vmsg
->payload
.u64
!= 0) {
970 vu_panic(dev
, "failed to receive valid ack for postcopy set-mem-table");
974 /* OK, now we can go and register the memory and generate faults */
975 (void)generate_faults(dev
);
981 vu_set_mem_table_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
984 VhostUserMemory m
= vmsg
->payload
.memory
, *memory
= &m
;
986 for (i
= 0; i
< dev
->nregions
; i
++) {
987 VuDevRegion
*r
= &dev
->regions
[i
];
988 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
991 munmap(m
, r
->size
+ r
->mmap_offset
);
994 dev
->nregions
= memory
->nregions
;
996 if (dev
->postcopy_listening
) {
997 return vu_set_mem_table_exec_postcopy(dev
, vmsg
);
1000 DPRINT("Nregions: %u\n", memory
->nregions
);
1001 for (i
= 0; i
< dev
->nregions
; i
++) {
1003 VhostUserMemoryRegion
*msg_region
= &memory
->regions
[i
];
1004 VuDevRegion
*dev_region
= &dev
->regions
[i
];
1006 DPRINT("Region %d\n", i
);
1007 DPRINT(" guest_phys_addr: 0x%016"PRIx64
"\n",
1008 msg_region
->guest_phys_addr
);
1009 DPRINT(" memory_size: 0x%016"PRIx64
"\n",
1010 msg_region
->memory_size
);
1011 DPRINT(" userspace_addr 0x%016"PRIx64
"\n",
1012 msg_region
->userspace_addr
);
1013 DPRINT(" mmap_offset 0x%016"PRIx64
"\n",
1014 msg_region
->mmap_offset
);
1016 dev_region
->gpa
= msg_region
->guest_phys_addr
;
1017 dev_region
->size
= msg_region
->memory_size
;
1018 dev_region
->qva
= msg_region
->userspace_addr
;
1019 dev_region
->mmap_offset
= msg_region
->mmap_offset
;
1021 /* We don't use offset argument of mmap() since the
1022 * mapped address has to be page aligned, and we use huge
1024 mmap_addr
= mmap(0, dev_region
->size
+ dev_region
->mmap_offset
,
1025 PROT_READ
| PROT_WRITE
, MAP_SHARED
| MAP_NORESERVE
,
1028 if (mmap_addr
== MAP_FAILED
) {
1029 vu_panic(dev
, "region mmap error: %s", strerror(errno
));
1031 dev_region
->mmap_addr
= (uint64_t)(uintptr_t)mmap_addr
;
1032 DPRINT(" mmap_addr: 0x%016"PRIx64
"\n",
1033 dev_region
->mmap_addr
);
1036 close(vmsg
->fds
[i
]);
1039 for (i
= 0; i
< dev
->max_queues
; i
++) {
1040 if (dev
->vq
[i
].vring
.desc
) {
1041 if (map_ring(dev
, &dev
->vq
[i
])) {
1042 vu_panic(dev
, "remapping queue %d during setmemtable", i
);
1051 vu_set_log_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1054 uint64_t log_mmap_size
, log_mmap_offset
;
1057 if (vmsg
->fd_num
!= 1 ||
1058 vmsg
->size
!= sizeof(vmsg
->payload
.log
)) {
1059 vu_panic(dev
, "Invalid log_base message");
1064 log_mmap_offset
= vmsg
->payload
.log
.mmap_offset
;
1065 log_mmap_size
= vmsg
->payload
.log
.mmap_size
;
1066 DPRINT("Log mmap_offset: %"PRId64
"\n", log_mmap_offset
);
1067 DPRINT("Log mmap_size: %"PRId64
"\n", log_mmap_size
);
1069 rc
= mmap(0, log_mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
,
1072 if (rc
== MAP_FAILED
) {
1073 perror("log mmap error");
1076 if (dev
->log_table
) {
1077 munmap(dev
->log_table
, dev
->log_size
);
1079 dev
->log_table
= rc
;
1080 dev
->log_size
= log_mmap_size
;
1082 vmsg
->size
= sizeof(vmsg
->payload
.u64
);
1089 vu_set_log_fd_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1091 if (vmsg
->fd_num
!= 1) {
1092 vu_panic(dev
, "Invalid log_fd message");
1096 if (dev
->log_call_fd
!= -1) {
1097 close(dev
->log_call_fd
);
1099 dev
->log_call_fd
= vmsg
->fds
[0];
1100 DPRINT("Got log_call_fd: %d\n", vmsg
->fds
[0]);
1106 vu_set_vring_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1108 unsigned int index
= vmsg
->payload
.state
.index
;
1109 unsigned int num
= vmsg
->payload
.state
.num
;
1111 DPRINT("State.index: %u\n", index
);
1112 DPRINT("State.num: %u\n", num
);
1113 dev
->vq
[index
].vring
.num
= num
;
1119 vu_set_vring_addr_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1121 struct vhost_vring_addr addr
= vmsg
->payload
.addr
, *vra
= &addr
;
1122 unsigned int index
= vra
->index
;
1123 VuVirtq
*vq
= &dev
->vq
[index
];
1125 DPRINT("vhost_vring_addr:\n");
1126 DPRINT(" index: %d\n", vra
->index
);
1127 DPRINT(" flags: %d\n", vra
->flags
);
1128 DPRINT(" desc_user_addr: 0x%016" PRIx64
"\n", (uint64_t)vra
->desc_user_addr
);
1129 DPRINT(" used_user_addr: 0x%016" PRIx64
"\n", (uint64_t)vra
->used_user_addr
);
1130 DPRINT(" avail_user_addr: 0x%016" PRIx64
"\n", (uint64_t)vra
->avail_user_addr
);
1131 DPRINT(" log_guest_addr: 0x%016" PRIx64
"\n", (uint64_t)vra
->log_guest_addr
);
1134 vq
->vring
.flags
= vra
->flags
;
1135 vq
->vring
.log_guest_addr
= vra
->log_guest_addr
;
1138 if (map_ring(dev
, vq
)) {
1139 vu_panic(dev
, "Invalid vring_addr message");
1143 vq
->used_idx
= le16toh(vq
->vring
.used
->idx
);
1145 if (vq
->last_avail_idx
!= vq
->used_idx
) {
1146 bool resume
= dev
->iface
->queue_is_processed_in_order
&&
1147 dev
->iface
->queue_is_processed_in_order(dev
, index
);
1149 DPRINT("Last avail index != used index: %u != %u%s\n",
1150 vq
->last_avail_idx
, vq
->used_idx
,
1151 resume
? ", resuming" : "");
1154 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->used_idx
;
1162 vu_set_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1164 unsigned int index
= vmsg
->payload
.state
.index
;
1165 unsigned int num
= vmsg
->payload
.state
.num
;
1167 DPRINT("State.index: %u\n", index
);
1168 DPRINT("State.num: %u\n", num
);
1169 dev
->vq
[index
].shadow_avail_idx
= dev
->vq
[index
].last_avail_idx
= num
;
1175 vu_get_vring_base_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1177 unsigned int index
= vmsg
->payload
.state
.index
;
1179 DPRINT("State.index: %u\n", index
);
1180 vmsg
->payload
.state
.num
= dev
->vq
[index
].last_avail_idx
;
1181 vmsg
->size
= sizeof(vmsg
->payload
.state
);
1183 dev
->vq
[index
].started
= false;
1184 if (dev
->iface
->queue_set_started
) {
1185 dev
->iface
->queue_set_started(dev
, index
, false);
1188 if (dev
->vq
[index
].call_fd
!= -1) {
1189 close(dev
->vq
[index
].call_fd
);
1190 dev
->vq
[index
].call_fd
= -1;
1192 if (dev
->vq
[index
].kick_fd
!= -1) {
1193 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
1194 close(dev
->vq
[index
].kick_fd
);
1195 dev
->vq
[index
].kick_fd
= -1;
1202 vu_check_queue_msg_file(VuDev
*dev
, VhostUserMsg
*vmsg
)
1204 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1205 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1207 if (index
>= dev
->max_queues
) {
1208 vmsg_close_fds(vmsg
);
1209 vu_panic(dev
, "Invalid queue index: %u", index
);
1214 vmsg_close_fds(vmsg
);
1218 if (vmsg
->fd_num
!= 1) {
1219 vmsg_close_fds(vmsg
);
1220 vu_panic(dev
, "Invalid fds in request: %d", vmsg
->request
);
1228 inflight_desc_compare(const void *a
, const void *b
)
1230 VuVirtqInflightDesc
*desc0
= (VuVirtqInflightDesc
*)a
,
1231 *desc1
= (VuVirtqInflightDesc
*)b
;
1233 if (desc1
->counter
> desc0
->counter
&&
1234 (desc1
->counter
- desc0
->counter
) < VIRTQUEUE_MAX_SIZE
* 2) {
1242 vu_check_queue_inflights(VuDev
*dev
, VuVirtq
*vq
)
1246 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
1250 if (unlikely(!vq
->inflight
)) {
1254 if (unlikely(!vq
->inflight
->version
)) {
1255 /* initialize the buffer */
1256 vq
->inflight
->version
= INFLIGHT_VERSION
;
1260 vq
->used_idx
= le16toh(vq
->vring
.used
->idx
);
1261 vq
->resubmit_num
= 0;
1262 vq
->resubmit_list
= NULL
;
1265 if (unlikely(vq
->inflight
->used_idx
!= vq
->used_idx
)) {
1266 vq
->inflight
->desc
[vq
->inflight
->last_batch_head
].inflight
= 0;
1270 vq
->inflight
->used_idx
= vq
->used_idx
;
1273 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
1274 if (vq
->inflight
->desc
[i
].inflight
== 1) {
1279 vq
->shadow_avail_idx
= vq
->last_avail_idx
= vq
->inuse
+ vq
->used_idx
;
1282 vq
->resubmit_list
= calloc(vq
->inuse
, sizeof(VuVirtqInflightDesc
));
1283 if (!vq
->resubmit_list
) {
1287 for (i
= 0; i
< vq
->inflight
->desc_num
; i
++) {
1288 if (vq
->inflight
->desc
[i
].inflight
) {
1289 vq
->resubmit_list
[vq
->resubmit_num
].index
= i
;
1290 vq
->resubmit_list
[vq
->resubmit_num
].counter
=
1291 vq
->inflight
->desc
[i
].counter
;
1296 if (vq
->resubmit_num
> 1) {
1297 qsort(vq
->resubmit_list
, vq
->resubmit_num
,
1298 sizeof(VuVirtqInflightDesc
), inflight_desc_compare
);
1300 vq
->counter
= vq
->resubmit_list
[0].counter
+ 1;
1303 /* in case of I/O hang after reconnecting */
1304 if (eventfd_write(vq
->kick_fd
, 1)) {
1312 vu_set_vring_kick_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1314 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1315 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1317 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1319 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1323 if (dev
->vq
[index
].kick_fd
!= -1) {
1324 dev
->remove_watch(dev
, dev
->vq
[index
].kick_fd
);
1325 close(dev
->vq
[index
].kick_fd
);
1326 dev
->vq
[index
].kick_fd
= -1;
1329 dev
->vq
[index
].kick_fd
= nofd
? -1 : vmsg
->fds
[0];
1330 DPRINT("Got kick_fd: %d for vq: %d\n", dev
->vq
[index
].kick_fd
, index
);
1332 dev
->vq
[index
].started
= true;
1333 if (dev
->iface
->queue_set_started
) {
1334 dev
->iface
->queue_set_started(dev
, index
, true);
1337 if (dev
->vq
[index
].kick_fd
!= -1 && dev
->vq
[index
].handler
) {
1338 dev
->set_watch(dev
, dev
->vq
[index
].kick_fd
, VU_WATCH_IN
,
1339 vu_kick_cb
, (void *)(long)index
);
1341 DPRINT("Waiting for kicks on fd: %d for vq: %d\n",
1342 dev
->vq
[index
].kick_fd
, index
);
1345 if (vu_check_queue_inflights(dev
, &dev
->vq
[index
])) {
1346 vu_panic(dev
, "Failed to check inflights for vq: %d\n", index
);
1352 void vu_set_queue_handler(VuDev
*dev
, VuVirtq
*vq
,
1353 vu_queue_handler_cb handler
)
1355 int qidx
= vq
- dev
->vq
;
1357 vq
->handler
= handler
;
1358 if (vq
->kick_fd
>= 0) {
1360 dev
->set_watch(dev
, vq
->kick_fd
, VU_WATCH_IN
,
1361 vu_kick_cb
, (void *)(long)qidx
);
1363 dev
->remove_watch(dev
, vq
->kick_fd
);
1368 bool vu_set_queue_host_notifier(VuDev
*dev
, VuVirtq
*vq
, int fd
,
1369 int size
, int offset
)
1371 int qidx
= vq
- dev
->vq
;
1373 VhostUserMsg vmsg
= {
1374 .request
= VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG
,
1375 .flags
= VHOST_USER_VERSION
| VHOST_USER_NEED_REPLY_MASK
,
1376 .size
= sizeof(vmsg
.payload
.area
),
1378 .u64
= qidx
& VHOST_USER_VRING_IDX_MASK
,
1385 vmsg
.payload
.area
.u64
|= VHOST_USER_VRING_NOFD_MASK
;
1387 vmsg
.fds
[fd_num
++] = fd
;
1390 vmsg
.fd_num
= fd_num
;
1392 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD
)) {
1396 pthread_mutex_lock(&dev
->backend_mutex
);
1397 if (!vu_message_write(dev
, dev
->backend_fd
, &vmsg
)) {
1398 pthread_mutex_unlock(&dev
->backend_mutex
);
1402 /* Also unlocks the backend_mutex */
1403 return vu_process_message_reply(dev
, &vmsg
);
1407 vu_set_vring_call_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1409 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1410 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1412 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1414 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1418 if (dev
->vq
[index
].call_fd
!= -1) {
1419 close(dev
->vq
[index
].call_fd
);
1420 dev
->vq
[index
].call_fd
= -1;
1423 dev
->vq
[index
].call_fd
= nofd
? -1 : vmsg
->fds
[0];
1425 /* in case of I/O hang after reconnecting */
1426 if (dev
->vq
[index
].call_fd
!= -1 && eventfd_write(vmsg
->fds
[0], 1)) {
1430 DPRINT("Got call_fd: %d for vq: %d\n", dev
->vq
[index
].call_fd
, index
);
1436 vu_set_vring_err_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1438 int index
= vmsg
->payload
.u64
& VHOST_USER_VRING_IDX_MASK
;
1439 bool nofd
= vmsg
->payload
.u64
& VHOST_USER_VRING_NOFD_MASK
;
1441 DPRINT("u64: 0x%016"PRIx64
"\n", vmsg
->payload
.u64
);
1443 if (!vu_check_queue_msg_file(dev
, vmsg
)) {
1447 if (dev
->vq
[index
].err_fd
!= -1) {
1448 close(dev
->vq
[index
].err_fd
);
1449 dev
->vq
[index
].err_fd
= -1;
1452 dev
->vq
[index
].err_fd
= nofd
? -1 : vmsg
->fds
[0];
1458 vu_get_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1461 * Note that we support, but intentionally do not set,
1462 * VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS. This means that
1463 * a device implementation can return it in its callback
1464 * (get_protocol_features) if it wants to use this for
1465 * simulation, but it is otherwise not desirable (if even
1466 * implemented by the frontend.)
1468 uint64_t features
= 1ULL << VHOST_USER_PROTOCOL_F_MQ
|
1469 1ULL << VHOST_USER_PROTOCOL_F_LOG_SHMFD
|
1470 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_REQ
|
1471 1ULL << VHOST_USER_PROTOCOL_F_HOST_NOTIFIER
|
1472 1ULL << VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD
|
1473 1ULL << VHOST_USER_PROTOCOL_F_REPLY_ACK
|
1474 1ULL << VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS
;
1476 if (have_userfault()) {
1477 features
|= 1ULL << VHOST_USER_PROTOCOL_F_PAGEFAULT
;
1480 if (dev
->iface
->get_config
&& dev
->iface
->set_config
) {
1481 features
|= 1ULL << VHOST_USER_PROTOCOL_F_CONFIG
;
1484 if (dev
->iface
->get_protocol_features
) {
1485 features
|= dev
->iface
->get_protocol_features(dev
);
1488 vmsg_set_reply_u64(vmsg
, features
);
1493 vu_set_protocol_features_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1495 uint64_t features
= vmsg
->payload
.u64
;
1497 DPRINT("u64: 0x%016"PRIx64
"\n", features
);
1499 dev
->protocol_features
= vmsg
->payload
.u64
;
1501 if (vu_has_protocol_feature(dev
,
1502 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS
) &&
1503 (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_BACKEND_REQ
) ||
1504 !vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_REPLY_ACK
))) {
1506 * The use case for using messages for kick/call is simulation, to make
1507 * the kick and call synchronous. To actually get that behaviour, both
1508 * of the other features are required.
1509 * Theoretically, one could use only kick messages, or do them without
1510 * having F_REPLY_ACK, but too many (possibly pending) messages on the
1511 * socket will eventually cause the frontend to hang, to avoid this in
1512 * scenarios where not desired enforce that the settings are in a way
1513 * that actually enables the simulation case.
1516 "F_IN_BAND_NOTIFICATIONS requires F_BACKEND_REQ && F_REPLY_ACK");
1520 if (dev
->iface
->set_protocol_features
) {
1521 dev
->iface
->set_protocol_features(dev
, features
);
1528 vu_get_queue_num_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1530 vmsg_set_reply_u64(vmsg
, dev
->max_queues
);
1535 vu_set_vring_enable_exec(VuDev
*dev
, VhostUserMsg
*vmsg
)
1537 unsigned int index
= vmsg
->payload
.state
.index
;
1538 unsigned int enable
= vmsg
->payload
.state
.num
;
1540 DPRINT("State.index: %u\n", index
);
1541 DPRINT("State.enable: %u\n", enable
);
1543 if (index
>= dev
->max_queues
) {
1544 vu_panic(dev
, "Invalid vring_enable index: %u", index
);
1548 dev
->vq
[index
].enable
= enable
;
1553 vu_set_backend_req_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1555 if (vmsg
->fd_num
!= 1) {
1556 vu_panic(dev
, "Invalid backend_req_fd message (%d fd's)", vmsg
->fd_num
);
1560 if (dev
->backend_fd
!= -1) {
1561 close(dev
->backend_fd
);
1563 dev
->backend_fd
= vmsg
->fds
[0];
1564 DPRINT("Got backend_fd: %d\n", vmsg
->fds
[0]);
1570 vu_get_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1574 if (dev
->iface
->get_config
) {
1575 ret
= dev
->iface
->get_config(dev
, vmsg
->payload
.config
.region
,
1576 vmsg
->payload
.config
.size
);
1580 /* resize to zero to indicate an error to frontend */
1588 vu_set_config(VuDev
*dev
, VhostUserMsg
*vmsg
)
1592 if (dev
->iface
->set_config
) {
1593 ret
= dev
->iface
->set_config(dev
, vmsg
->payload
.config
.region
,
1594 vmsg
->payload
.config
.offset
,
1595 vmsg
->payload
.config
.size
,
1596 vmsg
->payload
.config
.flags
);
1598 vu_panic(dev
, "Set virtio configuration space failed");
1606 vu_set_postcopy_advise(VuDev
*dev
, VhostUserMsg
*vmsg
)
1609 struct uffdio_api api_struct
;
1611 dev
->postcopy_ufd
= syscall(__NR_userfaultfd
, O_CLOEXEC
| O_NONBLOCK
);
1614 dev
->postcopy_ufd
= -1;
1617 if (dev
->postcopy_ufd
== -1) {
1618 vu_panic(dev
, "Userfaultfd not available: %s", strerror(errno
));
1623 api_struct
.api
= UFFD_API
;
1624 api_struct
.features
= 0;
1625 if (ioctl(dev
->postcopy_ufd
, UFFDIO_API
, &api_struct
)) {
1626 vu_panic(dev
, "Failed UFFDIO_API: %s", strerror(errno
));
1627 close(dev
->postcopy_ufd
);
1628 dev
->postcopy_ufd
= -1;
1631 /* TODO: Stash feature flags somewhere */
1635 /* Return a ufd to the QEMU */
1637 vmsg
->fds
[0] = dev
->postcopy_ufd
;
1638 return true; /* = send a reply */
1642 vu_set_postcopy_listen(VuDev
*dev
, VhostUserMsg
*vmsg
)
1644 if (dev
->nregions
) {
1645 vu_panic(dev
, "Regions already registered at postcopy-listen");
1646 vmsg_set_reply_u64(vmsg
, -1);
1649 dev
->postcopy_listening
= true;
1651 vmsg_set_reply_u64(vmsg
, 0);
1656 vu_set_postcopy_end(VuDev
*dev
, VhostUserMsg
*vmsg
)
1658 DPRINT("%s: Entry\n", __func__
);
1659 dev
->postcopy_listening
= false;
1660 if (dev
->postcopy_ufd
> 0) {
1661 close(dev
->postcopy_ufd
);
1662 dev
->postcopy_ufd
= -1;
1663 DPRINT("%s: Done close\n", __func__
);
1666 vmsg_set_reply_u64(vmsg
, 0);
1667 DPRINT("%s: exit\n", __func__
);
1671 static inline uint64_t
1672 vu_inflight_queue_size(uint16_t queue_size
)
1674 return ALIGN_UP(sizeof(VuDescStateSplit
) * queue_size
+
1675 sizeof(uint16_t), INFLIGHT_ALIGNMENT
);
1678 #ifdef MFD_ALLOW_SEALING
1680 memfd_alloc(const char *name
, size_t size
, unsigned int flags
, int *fd
)
1685 *fd
= memfd_create(name
, MFD_ALLOW_SEALING
);
1690 ret
= ftruncate(*fd
, size
);
1696 ret
= fcntl(*fd
, F_ADD_SEALS
, flags
);
1702 ptr
= mmap(0, size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, *fd
, 0);
1703 if (ptr
== MAP_FAILED
) {
1713 vu_get_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1718 uint16_t num_queues
, queue_size
;
1720 if (vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1721 vu_panic(dev
, "Invalid get_inflight_fd message:%d", vmsg
->size
);
1722 vmsg
->payload
.inflight
.mmap_size
= 0;
1726 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1727 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1729 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1730 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1732 mmap_size
= vu_inflight_queue_size(queue_size
) * num_queues
;
1734 #ifdef MFD_ALLOW_SEALING
1735 addr
= memfd_alloc("vhost-inflight", mmap_size
,
1736 F_SEAL_GROW
| F_SEAL_SHRINK
| F_SEAL_SEAL
,
1739 vu_panic(dev
, "Not implemented: memfd support is missing");
1743 vu_panic(dev
, "Failed to alloc vhost inflight area");
1744 vmsg
->payload
.inflight
.mmap_size
= 0;
1748 memset(addr
, 0, mmap_size
);
1750 dev
->inflight_info
.addr
= addr
;
1751 dev
->inflight_info
.size
= vmsg
->payload
.inflight
.mmap_size
= mmap_size
;
1752 dev
->inflight_info
.fd
= vmsg
->fds
[0] = fd
;
1754 vmsg
->payload
.inflight
.mmap_offset
= 0;
1756 DPRINT("send inflight mmap_size: %"PRId64
"\n",
1757 vmsg
->payload
.inflight
.mmap_size
);
1758 DPRINT("send inflight mmap offset: %"PRId64
"\n",
1759 vmsg
->payload
.inflight
.mmap_offset
);
1765 vu_set_inflight_fd(VuDev
*dev
, VhostUserMsg
*vmsg
)
1768 uint64_t mmap_size
, mmap_offset
;
1769 uint16_t num_queues
, queue_size
;
1772 if (vmsg
->fd_num
!= 1 ||
1773 vmsg
->size
!= sizeof(vmsg
->payload
.inflight
)) {
1774 vu_panic(dev
, "Invalid set_inflight_fd message size:%d fds:%d",
1775 vmsg
->size
, vmsg
->fd_num
);
1780 mmap_size
= vmsg
->payload
.inflight
.mmap_size
;
1781 mmap_offset
= vmsg
->payload
.inflight
.mmap_offset
;
1782 num_queues
= vmsg
->payload
.inflight
.num_queues
;
1783 queue_size
= vmsg
->payload
.inflight
.queue_size
;
1785 DPRINT("set_inflight_fd mmap_size: %"PRId64
"\n", mmap_size
);
1786 DPRINT("set_inflight_fd mmap_offset: %"PRId64
"\n", mmap_offset
);
1787 DPRINT("set_inflight_fd num_queues: %"PRId16
"\n", num_queues
);
1788 DPRINT("set_inflight_fd queue_size: %"PRId16
"\n", queue_size
);
1790 rc
= mmap(0, mmap_size
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
1793 if (rc
== MAP_FAILED
) {
1794 vu_panic(dev
, "set_inflight_fd mmap error: %s", strerror(errno
));
1798 if (dev
->inflight_info
.fd
) {
1799 close(dev
->inflight_info
.fd
);
1802 if (dev
->inflight_info
.addr
) {
1803 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
1806 dev
->inflight_info
.fd
= fd
;
1807 dev
->inflight_info
.addr
= rc
;
1808 dev
->inflight_info
.size
= mmap_size
;
1810 for (i
= 0; i
< num_queues
; i
++) {
1811 dev
->vq
[i
].inflight
= (VuVirtqInflight
*)rc
;
1812 dev
->vq
[i
].inflight
->desc_num
= queue_size
;
1813 rc
= (void *)((char *)rc
+ vu_inflight_queue_size(queue_size
));
1820 vu_handle_vring_kick(VuDev
*dev
, VhostUserMsg
*vmsg
)
1822 unsigned int index
= vmsg
->payload
.state
.index
;
1824 if (index
>= dev
->max_queues
) {
1825 vu_panic(dev
, "Invalid queue index: %u", index
);
1829 DPRINT("Got kick message: handler:%p idx:%u\n",
1830 dev
->vq
[index
].handler
, index
);
1832 if (!dev
->vq
[index
].started
) {
1833 dev
->vq
[index
].started
= true;
1835 if (dev
->iface
->queue_set_started
) {
1836 dev
->iface
->queue_set_started(dev
, index
, true);
1840 if (dev
->vq
[index
].handler
) {
1841 dev
->vq
[index
].handler(dev
, index
);
1847 static bool vu_handle_get_max_memslots(VuDev
*dev
, VhostUserMsg
*vmsg
)
1849 vmsg_set_reply_u64(vmsg
, VHOST_USER_MAX_RAM_SLOTS
);
1851 DPRINT("u64: 0x%016"PRIx64
"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS
);
1857 vu_process_message(VuDev
*dev
, VhostUserMsg
*vmsg
)
1861 /* Print out generic part of the request. */
1862 DPRINT("================ Vhost user message ================\n");
1863 DPRINT("Request: %s (%d)\n", vu_request_to_string(vmsg
->request
),
1865 DPRINT("Flags: 0x%x\n", vmsg
->flags
);
1866 DPRINT("Size: %u\n", vmsg
->size
);
1871 for (i
= 0; i
< vmsg
->fd_num
; i
++) {
1872 DPRINT(" %d", vmsg
->fds
[i
]);
1877 if (dev
->iface
->process_msg
&&
1878 dev
->iface
->process_msg(dev
, vmsg
, &do_reply
)) {
1882 switch (vmsg
->request
) {
1883 case VHOST_USER_GET_FEATURES
:
1884 return vu_get_features_exec(dev
, vmsg
);
1885 case VHOST_USER_SET_FEATURES
:
1886 return vu_set_features_exec(dev
, vmsg
);
1887 case VHOST_USER_GET_PROTOCOL_FEATURES
:
1888 return vu_get_protocol_features_exec(dev
, vmsg
);
1889 case VHOST_USER_SET_PROTOCOL_FEATURES
:
1890 return vu_set_protocol_features_exec(dev
, vmsg
);
1891 case VHOST_USER_SET_OWNER
:
1892 return vu_set_owner_exec(dev
, vmsg
);
1893 case VHOST_USER_RESET_OWNER
:
1894 return vu_reset_device_exec(dev
, vmsg
);
1895 case VHOST_USER_SET_MEM_TABLE
:
1896 return vu_set_mem_table_exec(dev
, vmsg
);
1897 case VHOST_USER_SET_LOG_BASE
:
1898 return vu_set_log_base_exec(dev
, vmsg
);
1899 case VHOST_USER_SET_LOG_FD
:
1900 return vu_set_log_fd_exec(dev
, vmsg
);
1901 case VHOST_USER_SET_VRING_NUM
:
1902 return vu_set_vring_num_exec(dev
, vmsg
);
1903 case VHOST_USER_SET_VRING_ADDR
:
1904 return vu_set_vring_addr_exec(dev
, vmsg
);
1905 case VHOST_USER_SET_VRING_BASE
:
1906 return vu_set_vring_base_exec(dev
, vmsg
);
1907 case VHOST_USER_GET_VRING_BASE
:
1908 return vu_get_vring_base_exec(dev
, vmsg
);
1909 case VHOST_USER_SET_VRING_KICK
:
1910 return vu_set_vring_kick_exec(dev
, vmsg
);
1911 case VHOST_USER_SET_VRING_CALL
:
1912 return vu_set_vring_call_exec(dev
, vmsg
);
1913 case VHOST_USER_SET_VRING_ERR
:
1914 return vu_set_vring_err_exec(dev
, vmsg
);
1915 case VHOST_USER_GET_QUEUE_NUM
:
1916 return vu_get_queue_num_exec(dev
, vmsg
);
1917 case VHOST_USER_SET_VRING_ENABLE
:
1918 return vu_set_vring_enable_exec(dev
, vmsg
);
1919 case VHOST_USER_SET_BACKEND_REQ_FD
:
1920 return vu_set_backend_req_fd(dev
, vmsg
);
1921 case VHOST_USER_GET_CONFIG
:
1922 return vu_get_config(dev
, vmsg
);
1923 case VHOST_USER_SET_CONFIG
:
1924 return vu_set_config(dev
, vmsg
);
1925 case VHOST_USER_NONE
:
1926 /* if you need processing before exit, override iface->process_msg */
1928 case VHOST_USER_POSTCOPY_ADVISE
:
1929 return vu_set_postcopy_advise(dev
, vmsg
);
1930 case VHOST_USER_POSTCOPY_LISTEN
:
1931 return vu_set_postcopy_listen(dev
, vmsg
);
1932 case VHOST_USER_POSTCOPY_END
:
1933 return vu_set_postcopy_end(dev
, vmsg
);
1934 case VHOST_USER_GET_INFLIGHT_FD
:
1935 return vu_get_inflight_fd(dev
, vmsg
);
1936 case VHOST_USER_SET_INFLIGHT_FD
:
1937 return vu_set_inflight_fd(dev
, vmsg
);
1938 case VHOST_USER_VRING_KICK
:
1939 return vu_handle_vring_kick(dev
, vmsg
);
1940 case VHOST_USER_GET_MAX_MEM_SLOTS
:
1941 return vu_handle_get_max_memslots(dev
, vmsg
);
1942 case VHOST_USER_ADD_MEM_REG
:
1943 return vu_add_mem_reg(dev
, vmsg
);
1944 case VHOST_USER_REM_MEM_REG
:
1945 return vu_rem_mem_reg(dev
, vmsg
);
1947 vmsg_close_fds(vmsg
);
1948 vu_panic(dev
, "Unhandled request: %d", vmsg
->request
);
1955 vu_dispatch(VuDev
*dev
)
1957 VhostUserMsg vmsg
= { 0, };
1958 int reply_requested
;
1959 bool need_reply
, success
= false;
1961 if (!dev
->read_msg(dev
, dev
->sock
, &vmsg
)) {
1965 need_reply
= vmsg
.flags
& VHOST_USER_NEED_REPLY_MASK
;
1967 reply_requested
= vu_process_message(dev
, &vmsg
);
1968 if (!reply_requested
&& need_reply
) {
1969 vmsg_set_reply_u64(&vmsg
, 0);
1970 reply_requested
= 1;
1973 if (!reply_requested
) {
1978 if (!vu_send_reply(dev
, dev
->sock
, &vmsg
)) {
1990 vu_deinit(VuDev
*dev
)
1994 for (i
= 0; i
< dev
->nregions
; i
++) {
1995 VuDevRegion
*r
= &dev
->regions
[i
];
1996 void *m
= (void *) (uintptr_t) r
->mmap_addr
;
1997 if (m
!= MAP_FAILED
) {
1998 munmap(m
, r
->size
+ r
->mmap_offset
);
2003 for (i
= 0; i
< dev
->max_queues
; i
++) {
2004 VuVirtq
*vq
= &dev
->vq
[i
];
2006 if (vq
->call_fd
!= -1) {
2011 if (vq
->kick_fd
!= -1) {
2012 dev
->remove_watch(dev
, vq
->kick_fd
);
2017 if (vq
->err_fd
!= -1) {
2022 if (vq
->resubmit_list
) {
2023 free(vq
->resubmit_list
);
2024 vq
->resubmit_list
= NULL
;
2027 vq
->inflight
= NULL
;
2030 if (dev
->inflight_info
.addr
) {
2031 munmap(dev
->inflight_info
.addr
, dev
->inflight_info
.size
);
2032 dev
->inflight_info
.addr
= NULL
;
2035 if (dev
->inflight_info
.fd
> 0) {
2036 close(dev
->inflight_info
.fd
);
2037 dev
->inflight_info
.fd
= -1;
2041 if (dev
->backend_fd
!= -1) {
2042 close(dev
->backend_fd
);
2043 dev
->backend_fd
= -1;
2045 pthread_mutex_destroy(&dev
->backend_mutex
);
2047 if (dev
->sock
!= -1) {
2057 uint16_t max_queues
,
2060 vu_read_msg_cb read_msg
,
2061 vu_set_watch_cb set_watch
,
2062 vu_remove_watch_cb remove_watch
,
2063 const VuDevIface
*iface
)
2067 assert(max_queues
> 0);
2068 assert(socket
>= 0);
2070 assert(remove_watch
);
2074 memset(dev
, 0, sizeof(*dev
));
2078 dev
->read_msg
= read_msg
? read_msg
: vu_message_read_default
;
2079 dev
->set_watch
= set_watch
;
2080 dev
->remove_watch
= remove_watch
;
2082 dev
->log_call_fd
= -1;
2083 pthread_mutex_init(&dev
->backend_mutex
, NULL
);
2084 dev
->backend_fd
= -1;
2085 dev
->max_queues
= max_queues
;
2087 dev
->vq
= malloc(max_queues
* sizeof(dev
->vq
[0]));
2089 DPRINT("%s: failed to malloc virtqueues\n", __func__
);
2093 for (i
= 0; i
< max_queues
; i
++) {
2094 dev
->vq
[i
] = (VuVirtq
) {
2095 .call_fd
= -1, .kick_fd
= -1, .err_fd
= -1,
2096 .notification
= true,
2104 vu_get_queue(VuDev
*dev
, int qidx
)
2106 assert(qidx
< dev
->max_queues
);
2107 return &dev
->vq
[qidx
];
2111 vu_queue_enabled(VuDev
*dev
, VuVirtq
*vq
)
2117 vu_queue_started(const VuDev
*dev
, const VuVirtq
*vq
)
2122 static inline uint16_t
2123 vring_avail_flags(VuVirtq
*vq
)
2125 return le16toh(vq
->vring
.avail
->flags
);
2128 static inline uint16_t
2129 vring_avail_idx(VuVirtq
*vq
)
2131 vq
->shadow_avail_idx
= le16toh(vq
->vring
.avail
->idx
);
2133 return vq
->shadow_avail_idx
;
2136 static inline uint16_t
2137 vring_avail_ring(VuVirtq
*vq
, int i
)
2139 return le16toh(vq
->vring
.avail
->ring
[i
]);
2142 static inline uint16_t
2143 vring_get_used_event(VuVirtq
*vq
)
2145 return vring_avail_ring(vq
, vq
->vring
.num
);
2149 virtqueue_num_heads(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
)
2151 uint16_t num_heads
= vring_avail_idx(vq
) - idx
;
2153 /* Check it isn't doing very strange things with descriptor numbers. */
2154 if (num_heads
> vq
->vring
.num
) {
2155 vu_panic(dev
, "Guest moved used index from %u to %u",
2156 idx
, vq
->shadow_avail_idx
);
2160 /* On success, callers read a descriptor at vq->last_avail_idx.
2161 * Make sure descriptor read does not bypass avail index read. */
2169 virtqueue_get_head(VuDev
*dev
, VuVirtq
*vq
,
2170 unsigned int idx
, unsigned int *head
)
2172 /* Grab the next descriptor number they're advertising, and increment
2173 * the index we've seen. */
2174 *head
= vring_avail_ring(vq
, idx
% vq
->vring
.num
);
2176 /* If their number is silly, that's a fatal mistake. */
2177 if (*head
>= vq
->vring
.num
) {
2178 vu_panic(dev
, "Guest says index %u is available", *head
);
2186 virtqueue_read_indirect_desc(VuDev
*dev
, struct vring_desc
*desc
,
2187 uint64_t addr
, size_t len
)
2189 struct vring_desc
*ori_desc
;
2192 if (len
> (VIRTQUEUE_MAX_SIZE
* sizeof(struct vring_desc
))) {
2202 ori_desc
= vu_gpa_to_va(dev
, &read_len
, addr
);
2207 memcpy(desc
, ori_desc
, read_len
);
2217 VIRTQUEUE_READ_DESC_ERROR
= -1,
2218 VIRTQUEUE_READ_DESC_DONE
= 0, /* end of chain */
2219 VIRTQUEUE_READ_DESC_MORE
= 1, /* more buffers in chain */
2223 virtqueue_read_next_desc(VuDev
*dev
, struct vring_desc
*desc
,
2224 int i
, unsigned int max
, unsigned int *next
)
2226 /* If this descriptor says it doesn't chain, we're done. */
2227 if (!(le16toh(desc
[i
].flags
) & VRING_DESC_F_NEXT
)) {
2228 return VIRTQUEUE_READ_DESC_DONE
;
2231 /* Check they're not leading us off end of descriptors. */
2232 *next
= le16toh(desc
[i
].next
);
2233 /* Make sure compiler knows to grab that: we don't want it changing! */
2237 vu_panic(dev
, "Desc next is %u", *next
);
2238 return VIRTQUEUE_READ_DESC_ERROR
;
2241 return VIRTQUEUE_READ_DESC_MORE
;
2245 vu_queue_get_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int *in_bytes
,
2246 unsigned int *out_bytes
,
2247 unsigned max_in_bytes
, unsigned max_out_bytes
)
2250 unsigned int total_bufs
, in_total
, out_total
;
2253 idx
= vq
->last_avail_idx
;
2255 total_bufs
= in_total
= out_total
= 0;
2256 if (unlikely(dev
->broken
) ||
2257 unlikely(!vq
->vring
.avail
)) {
2261 while ((rc
= virtqueue_num_heads(dev
, vq
, idx
)) > 0) {
2262 unsigned int max
, desc_len
, num_bufs
, indirect
= 0;
2263 uint64_t desc_addr
, read_len
;
2264 struct vring_desc
*desc
;
2265 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2268 max
= vq
->vring
.num
;
2269 num_bufs
= total_bufs
;
2270 if (!virtqueue_get_head(dev
, vq
, idx
++, &i
)) {
2273 desc
= vq
->vring
.desc
;
2275 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2276 if (le32toh(desc
[i
].len
) % sizeof(struct vring_desc
)) {
2277 vu_panic(dev
, "Invalid size for indirect buffer table");
2281 /* If we've got too many, that implies a descriptor loop. */
2282 if (num_bufs
>= max
) {
2283 vu_panic(dev
, "Looped descriptor");
2287 /* loop over the indirect descriptor table */
2289 desc_addr
= le64toh(desc
[i
].addr
);
2290 desc_len
= le32toh(desc
[i
].len
);
2291 max
= desc_len
/ sizeof(struct vring_desc
);
2292 read_len
= desc_len
;
2293 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2294 if (unlikely(desc
&& read_len
!= desc_len
)) {
2295 /* Failed to use zero copy */
2297 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2304 vu_panic(dev
, "Invalid indirect buffer table");
2311 /* If we've got too many, that implies a descriptor loop. */
2312 if (++num_bufs
> max
) {
2313 vu_panic(dev
, "Looped descriptor");
2317 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2318 in_total
+= le32toh(desc
[i
].len
);
2320 out_total
+= le32toh(desc
[i
].len
);
2322 if (in_total
>= max_in_bytes
&& out_total
>= max_out_bytes
) {
2325 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
2326 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
2328 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
2333 total_bufs
= num_bufs
;
2343 *in_bytes
= in_total
;
2346 *out_bytes
= out_total
;
2351 in_total
= out_total
= 0;
2356 vu_queue_avail_bytes(VuDev
*dev
, VuVirtq
*vq
, unsigned int in_bytes
,
2357 unsigned int out_bytes
)
2359 unsigned int in_total
, out_total
;
2361 vu_queue_get_avail_bytes(dev
, vq
, &in_total
, &out_total
,
2362 in_bytes
, out_bytes
);
2364 return in_bytes
<= in_total
&& out_bytes
<= out_total
;
2367 /* Fetch avail_idx from VQ memory only when we really need to know if
2368 * guest has added some buffers. */
2370 vu_queue_empty(VuDev
*dev
, VuVirtq
*vq
)
2372 if (unlikely(dev
->broken
) ||
2373 unlikely(!vq
->vring
.avail
)) {
2377 if (vq
->shadow_avail_idx
!= vq
->last_avail_idx
) {
2381 return vring_avail_idx(vq
) == vq
->last_avail_idx
;
2385 vring_notify(VuDev
*dev
, VuVirtq
*vq
)
2390 /* We need to expose used array entries before checking used event. */
2393 /* Always notify when queue is empty (when feature acknowledge) */
2394 if (vu_has_feature(dev
, VIRTIO_F_NOTIFY_ON_EMPTY
) &&
2395 !vq
->inuse
&& vu_queue_empty(dev
, vq
)) {
2399 if (!vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2400 return !(vring_avail_flags(vq
) & VRING_AVAIL_F_NO_INTERRUPT
);
2403 v
= vq
->signalled_used_valid
;
2404 vq
->signalled_used_valid
= true;
2405 old
= vq
->signalled_used
;
2406 new = vq
->signalled_used
= vq
->used_idx
;
2407 return !v
|| vring_need_event(vring_get_used_event(vq
), new, old
);
2410 static void _vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
, bool sync
)
2412 if (unlikely(dev
->broken
) ||
2413 unlikely(!vq
->vring
.avail
)) {
2417 if (!vring_notify(dev
, vq
)) {
2418 DPRINT("skipped notify...\n");
2422 if (vq
->call_fd
< 0 &&
2423 vu_has_protocol_feature(dev
,
2424 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS
) &&
2425 vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_BACKEND_REQ
)) {
2426 VhostUserMsg vmsg
= {
2427 .request
= VHOST_USER_BACKEND_VRING_CALL
,
2428 .flags
= VHOST_USER_VERSION
,
2429 .size
= sizeof(vmsg
.payload
.state
),
2431 .index
= vq
- dev
->vq
,
2435 vu_has_protocol_feature(dev
,
2436 VHOST_USER_PROTOCOL_F_REPLY_ACK
);
2439 vmsg
.flags
|= VHOST_USER_NEED_REPLY_MASK
;
2442 vu_message_write(dev
, dev
->backend_fd
, &vmsg
);
2444 vu_message_read_default(dev
, dev
->backend_fd
, &vmsg
);
2449 if (eventfd_write(vq
->call_fd
, 1) < 0) {
2450 vu_panic(dev
, "Error writing eventfd: %s", strerror(errno
));
2454 void vu_queue_notify(VuDev
*dev
, VuVirtq
*vq
)
2456 _vu_queue_notify(dev
, vq
, false);
2459 void vu_queue_notify_sync(VuDev
*dev
, VuVirtq
*vq
)
2461 _vu_queue_notify(dev
, vq
, true);
2464 void vu_config_change_msg(VuDev
*dev
)
2466 VhostUserMsg vmsg
= {
2467 .request
= VHOST_USER_BACKEND_CONFIG_CHANGE_MSG
,
2468 .flags
= VHOST_USER_VERSION
,
2471 vu_message_write(dev
, dev
->backend_fd
, &vmsg
);
2475 vring_used_flags_set_bit(VuVirtq
*vq
, int mask
)
2479 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2480 offsetof(struct vring_used
, flags
));
2481 *flags
= htole16(le16toh(*flags
) | mask
);
2485 vring_used_flags_unset_bit(VuVirtq
*vq
, int mask
)
2489 flags
= (uint16_t *)((char*)vq
->vring
.used
+
2490 offsetof(struct vring_used
, flags
));
2491 *flags
= htole16(le16toh(*flags
) & ~mask
);
2495 vring_set_avail_event(VuVirtq
*vq
, uint16_t val
)
2497 uint16_t val_le
= htole16(val
);
2499 if (!vq
->notification
) {
2503 memcpy(&vq
->vring
.used
->ring
[vq
->vring
.num
], &val_le
, sizeof(uint16_t));
2507 vu_queue_set_notification(VuDev
*dev
, VuVirtq
*vq
, int enable
)
2509 vq
->notification
= enable
;
2510 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2511 vring_set_avail_event(vq
, vring_avail_idx(vq
));
2512 } else if (enable
) {
2513 vring_used_flags_unset_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2515 vring_used_flags_set_bit(vq
, VRING_USED_F_NO_NOTIFY
);
2518 /* Expose avail event/used flags before caller checks the avail idx. */
2524 virtqueue_map_desc(VuDev
*dev
,
2525 unsigned int *p_num_sg
, struct iovec
*iov
,
2526 unsigned int max_num_sg
, bool is_write
,
2527 uint64_t pa
, size_t sz
)
2529 unsigned num_sg
= *p_num_sg
;
2531 assert(num_sg
<= max_num_sg
);
2534 vu_panic(dev
, "virtio: zero sized buffers are not allowed");
2541 if (num_sg
== max_num_sg
) {
2542 vu_panic(dev
, "virtio: too many descriptors in indirect table");
2546 iov
[num_sg
].iov_base
= vu_gpa_to_va(dev
, &len
, pa
);
2547 if (iov
[num_sg
].iov_base
== NULL
) {
2548 vu_panic(dev
, "virtio: invalid address for buffers");
2551 iov
[num_sg
].iov_len
= len
;
2562 virtqueue_alloc_element(size_t sz
,
2563 unsigned out_num
, unsigned in_num
)
2565 VuVirtqElement
*elem
;
2566 size_t in_sg_ofs
= ALIGN_UP(sz
, __alignof__(elem
->in_sg
[0]));
2567 size_t out_sg_ofs
= in_sg_ofs
+ in_num
* sizeof(elem
->in_sg
[0]);
2568 size_t out_sg_end
= out_sg_ofs
+ out_num
* sizeof(elem
->out_sg
[0]);
2570 assert(sz
>= sizeof(VuVirtqElement
));
2571 elem
= malloc(out_sg_end
);
2573 DPRINT("%s: failed to malloc virtqueue element\n", __func__
);
2576 elem
->out_num
= out_num
;
2577 elem
->in_num
= in_num
;
2578 elem
->in_sg
= (void *)elem
+ in_sg_ofs
;
2579 elem
->out_sg
= (void *)elem
+ out_sg_ofs
;
2584 vu_queue_map_desc(VuDev
*dev
, VuVirtq
*vq
, unsigned int idx
, size_t sz
)
2586 struct vring_desc
*desc
= vq
->vring
.desc
;
2587 uint64_t desc_addr
, read_len
;
2588 unsigned int desc_len
;
2589 unsigned int max
= vq
->vring
.num
;
2590 unsigned int i
= idx
;
2591 VuVirtqElement
*elem
;
2592 unsigned int out_num
= 0, in_num
= 0;
2593 struct iovec iov
[VIRTQUEUE_MAX_SIZE
];
2594 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2597 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2598 if (le32toh(desc
[i
].len
) % sizeof(struct vring_desc
)) {
2599 vu_panic(dev
, "Invalid size for indirect buffer table");
2603 /* loop over the indirect descriptor table */
2604 desc_addr
= le64toh(desc
[i
].addr
);
2605 desc_len
= le32toh(desc
[i
].len
);
2606 max
= desc_len
/ sizeof(struct vring_desc
);
2607 read_len
= desc_len
;
2608 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2609 if (unlikely(desc
&& read_len
!= desc_len
)) {
2610 /* Failed to use zero copy */
2612 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2619 vu_panic(dev
, "Invalid indirect buffer table");
2625 /* Collect all the descriptors */
2627 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2628 if (!virtqueue_map_desc(dev
, &in_num
, iov
+ out_num
,
2629 VIRTQUEUE_MAX_SIZE
- out_num
, true,
2630 le64toh(desc
[i
].addr
),
2631 le32toh(desc
[i
].len
))) {
2636 vu_panic(dev
, "Incorrect order for descriptors");
2639 if (!virtqueue_map_desc(dev
, &out_num
, iov
,
2640 VIRTQUEUE_MAX_SIZE
, false,
2641 le64toh(desc
[i
].addr
),
2642 le32toh(desc
[i
].len
))) {
2647 /* If we've got too many, that implies a descriptor loop. */
2648 if ((in_num
+ out_num
) > max
) {
2649 vu_panic(dev
, "Looped descriptor");
2652 rc
= virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
);
2653 } while (rc
== VIRTQUEUE_READ_DESC_MORE
);
2655 if (rc
== VIRTQUEUE_READ_DESC_ERROR
) {
2656 vu_panic(dev
, "read descriptor error");
2660 /* Now copy what we have collected and mapped */
2661 elem
= virtqueue_alloc_element(sz
, out_num
, in_num
);
2666 for (i
= 0; i
< out_num
; i
++) {
2667 elem
->out_sg
[i
] = iov
[i
];
2669 for (i
= 0; i
< in_num
; i
++) {
2670 elem
->in_sg
[i
] = iov
[out_num
+ i
];
2677 vu_queue_inflight_get(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2679 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2683 if (unlikely(!vq
->inflight
)) {
2687 vq
->inflight
->desc
[desc_idx
].counter
= vq
->counter
++;
2688 vq
->inflight
->desc
[desc_idx
].inflight
= 1;
2694 vu_queue_inflight_pre_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2696 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2700 if (unlikely(!vq
->inflight
)) {
2704 vq
->inflight
->last_batch_head
= desc_idx
;
2710 vu_queue_inflight_post_put(VuDev
*dev
, VuVirtq
*vq
, int desc_idx
)
2712 if (!vu_has_protocol_feature(dev
, VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD
)) {
2716 if (unlikely(!vq
->inflight
)) {
2722 vq
->inflight
->desc
[desc_idx
].inflight
= 0;
2726 vq
->inflight
->used_idx
= vq
->used_idx
;
2732 vu_queue_pop(VuDev
*dev
, VuVirtq
*vq
, size_t sz
)
2736 VuVirtqElement
*elem
;
2738 if (unlikely(dev
->broken
) ||
2739 unlikely(!vq
->vring
.avail
)) {
2743 if (unlikely(vq
->resubmit_list
&& vq
->resubmit_num
> 0)) {
2744 i
= (--vq
->resubmit_num
);
2745 elem
= vu_queue_map_desc(dev
, vq
, vq
->resubmit_list
[i
].index
, sz
);
2747 if (!vq
->resubmit_num
) {
2748 free(vq
->resubmit_list
);
2749 vq
->resubmit_list
= NULL
;
2755 if (vu_queue_empty(dev
, vq
)) {
2759 * Needed after virtio_queue_empty(), see comment in
2760 * virtqueue_num_heads().
2764 if (vq
->inuse
>= vq
->vring
.num
) {
2765 vu_panic(dev
, "Virtqueue size exceeded");
2769 if (!virtqueue_get_head(dev
, vq
, vq
->last_avail_idx
++, &head
)) {
2773 if (vu_has_feature(dev
, VIRTIO_RING_F_EVENT_IDX
)) {
2774 vring_set_avail_event(vq
, vq
->last_avail_idx
);
2777 elem
= vu_queue_map_desc(dev
, vq
, head
, sz
);
2785 vu_queue_inflight_get(dev
, vq
, head
);
2791 vu_queue_detach_element(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2795 /* unmap, when DMA support is added */
2799 vu_queue_unpop(VuDev
*dev
, VuVirtq
*vq
, VuVirtqElement
*elem
,
2802 vq
->last_avail_idx
--;
2803 vu_queue_detach_element(dev
, vq
, elem
, len
);
2807 vu_queue_rewind(VuDev
*dev
, VuVirtq
*vq
, unsigned int num
)
2809 if (num
> vq
->inuse
) {
2812 vq
->last_avail_idx
-= num
;
2818 void vring_used_write(VuDev
*dev
, VuVirtq
*vq
,
2819 struct vring_used_elem
*uelem
, int i
)
2821 struct vring_used
*used
= vq
->vring
.used
;
2823 used
->ring
[i
] = *uelem
;
2824 vu_log_write(dev
, vq
->vring
.log_guest_addr
+
2825 offsetof(struct vring_used
, ring
[i
]),
2826 sizeof(used
->ring
[i
]));
2831 vu_log_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2832 const VuVirtqElement
*elem
,
2835 struct vring_desc
*desc
= vq
->vring
.desc
;
2836 unsigned int i
, max
, min
, desc_len
;
2837 uint64_t desc_addr
, read_len
;
2838 struct vring_desc desc_buf
[VIRTQUEUE_MAX_SIZE
];
2839 unsigned num_bufs
= 0;
2841 max
= vq
->vring
.num
;
2844 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_INDIRECT
) {
2845 if (le32toh(desc
[i
].len
) % sizeof(struct vring_desc
)) {
2846 vu_panic(dev
, "Invalid size for indirect buffer table");
2850 /* loop over the indirect descriptor table */
2851 desc_addr
= le64toh(desc
[i
].addr
);
2852 desc_len
= le32toh(desc
[i
].len
);
2853 max
= desc_len
/ sizeof(struct vring_desc
);
2854 read_len
= desc_len
;
2855 desc
= vu_gpa_to_va(dev
, &read_len
, desc_addr
);
2856 if (unlikely(desc
&& read_len
!= desc_len
)) {
2857 /* Failed to use zero copy */
2859 if (!virtqueue_read_indirect_desc(dev
, desc_buf
,
2866 vu_panic(dev
, "Invalid indirect buffer table");
2873 if (++num_bufs
> max
) {
2874 vu_panic(dev
, "Looped descriptor");
2878 if (le16toh(desc
[i
].flags
) & VRING_DESC_F_WRITE
) {
2879 min
= MIN(le32toh(desc
[i
].len
), len
);
2880 vu_log_write(dev
, le64toh(desc
[i
].addr
), min
);
2885 (virtqueue_read_next_desc(dev
, desc
, i
, max
, &i
)
2886 == VIRTQUEUE_READ_DESC_MORE
));
2890 vu_queue_fill(VuDev
*dev
, VuVirtq
*vq
,
2891 const VuVirtqElement
*elem
,
2892 unsigned int len
, unsigned int idx
)
2894 struct vring_used_elem uelem
;
2896 if (unlikely(dev
->broken
) ||
2897 unlikely(!vq
->vring
.avail
)) {
2901 vu_log_queue_fill(dev
, vq
, elem
, len
);
2903 idx
= (idx
+ vq
->used_idx
) % vq
->vring
.num
;
2905 uelem
.id
= htole32(elem
->index
);
2906 uelem
.len
= htole32(len
);
2907 vring_used_write(dev
, vq
, &uelem
, idx
);
2911 void vring_used_idx_set(VuDev
*dev
, VuVirtq
*vq
, uint16_t val
)
2913 vq
->vring
.used
->idx
= htole16(val
);
2915 vq
->vring
.log_guest_addr
+ offsetof(struct vring_used
, idx
),
2916 sizeof(vq
->vring
.used
->idx
));
2922 vu_queue_flush(VuDev
*dev
, VuVirtq
*vq
, unsigned int count
)
2926 if (unlikely(dev
->broken
) ||
2927 unlikely(!vq
->vring
.avail
)) {
2931 /* Make sure buffer is written before we update index. */
2936 vring_used_idx_set(dev
, vq
, new);
2938 if (unlikely((int16_t)(new - vq
->signalled_used
) < (uint16_t)(new - old
))) {
2939 vq
->signalled_used_valid
= false;
2944 vu_queue_push(VuDev
*dev
, VuVirtq
*vq
,
2945 const VuVirtqElement
*elem
, unsigned int len
)
2947 vu_queue_fill(dev
, vq
, elem
, len
, 0);
2948 vu_queue_inflight_pre_put(dev
, vq
, elem
->index
);
2949 vu_queue_flush(dev
, vq
, 1);
2950 vu_queue_inflight_post_put(dev
, vq
, elem
->index
);