#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <stdbool.h>
#include <unistd.h>
#include <sys/mman.h>
+#include <asm/mman.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <assert.h>
#include "vhost.h"
#include "vhost_user.h"
+#define VIRTIO_MIN_MTU 68
+#define VIRTIO_MAX_MTU 65535
+
static const char *vhost_message_str[VHOST_USER_MAX] = {
[VHOST_USER_NONE] = "VHOST_USER_NONE",
[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
[VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
[VHOST_USER_SEND_RARP] = "VHOST_USER_SEND_RARP",
+ [VHOST_USER_NET_SET_MTU] = "VHOST_USER_NET_SET_MTU",
+ [VHOST_USER_GET_CONFIG] = "VHOST_USER_GET_CONFIG",
+ [VHOST_USER_SET_CONFIG] = "VHOST_USER_SET_CONFIG",
+ [VHOST_USER_NVME_ADMIN] = "VHOST_USER_NVME_ADMIN",
+ [VHOST_USER_NVME_SET_CQ_CALL] = "VHOST_USER_NVME_SET_CQ_CALL",
+ [VHOST_USER_NVME_GET_CAP] = "VHOST_USER_NVME_GET_CAP",
+ [VHOST_USER_NVME_START_STOP] = "VHOST_USER_NVME_START_STOP",
+ [VHOST_USER_NVME_IO_CMD] = "VHOST_USER_NVME_IO_CMD"
};
static uint64_t
free_mem_region(struct virtio_net *dev)
{
uint32_t i;
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
if (!dev || !dev->mem)
return;
void
vhost_backend_cleanup(struct virtio_net *dev)
{
+ uint32_t i;
+
if (dev->mem) {
+ if (dev->has_new_mem_table) {
+ for (i = 0; i < dev->mem->nregions; i++) {
+ close(dev->mem_table_fds[i]);
+ }
+ dev->has_new_mem_table = 0;
+ }
free_mem_region(dev);
rte_free(dev->mem);
dev->mem = NULL;
}
+
+ free(dev->guest_pages);
+ dev->guest_pages = NULL;
+
if (dev->log_addr) {
munmap((void *)(uintptr_t)dev->log_addr, dev->log_size);
dev->log_addr = 0;
{
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev->vid);
+ dev->notify_ops->destroy_device(dev->vid);
}
cleanup_device(dev, 0);
* The features that we support are requested.
*/
static uint64_t
-vhost_user_get_features(void)
+vhost_user_get_features(struct virtio_net *dev)
{
- return VHOST_FEATURES;
+ return dev->features;
}
/*
static int
vhost_user_set_features(struct virtio_net *dev, uint64_t features)
{
- if (features & ~VHOST_FEATURES)
+ uint64_t vhost_features = 0;
+
+ vhost_features = vhost_user_get_features(dev);
+ if (features & ~vhost_features) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "(%d) received invalid negotiated features.\n",
+ dev->vid);
return -1;
+ }
+
+ if ((dev->flags & VIRTIO_DEV_RUNNING) && dev->negotiated_features != features) {
+ if (dev->notify_ops->features_changed) {
+ dev->notify_ops->features_changed(dev->vid, features);
+ } else {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+ }
- dev->features = features;
- if (dev->features &
+ dev->negotiated_features = features;
+ if (dev->negotiated_features &
((1 << VIRTIO_NET_F_MRG_RXBUF) | (1ULL << VIRTIO_F_VERSION_1))) {
dev->vhost_hlen = sizeof(struct virtio_net_hdr_mrg_rxbuf);
} else {
dev->vhost_hlen = sizeof(struct virtio_net_hdr);
}
- LOG_DEBUG(VHOST_CONFIG,
+ VHOST_LOG_DEBUG(VHOST_CONFIG,
"(%d) mergeable RX buffers %s, virtio 1 %s\n",
dev->vid,
- (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
- (dev->features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
+ (dev->negotiated_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? "on" : "off",
+ (dev->negotiated_features & (1ULL << VIRTIO_F_VERSION_1)) ? "on" : "off");
return 0;
}
struct vhost_virtqueue *old_vq, *vq;
int ret;
- /*
- * vq is allocated on pairs, we should try to do realloc
- * on first queue of one queue pair only.
- */
- if (index % VIRTIO_QNUM != 0)
- return dev;
-
old_dev = dev;
vq = old_vq = dev->virtqueue[index];
if (oldnode != newnode) {
RTE_LOG(INFO, VHOST_CONFIG,
"reallocate vq from %d to %d node\n", oldnode, newnode);
- vq = rte_malloc_socket(NULL, sizeof(*vq) * VIRTIO_QNUM, 0,
- newnode);
+ vq = rte_malloc_socket(NULL, sizeof(*vq), 0, newnode);
if (!vq)
return dev;
- memcpy(vq, old_vq, sizeof(*vq) * VIRTIO_QNUM);
+ memcpy(vq, old_vq, sizeof(*vq));
rte_free(old_vq);
}
out:
dev->virtqueue[index] = vq;
- dev->virtqueue[index + 1] = vq + 1;
vhost_devices[dev->vid] = dev;
return dev;
* used to convert the ring addresses to our address space.
*/
static uint64_t
-qva_to_vva(struct virtio_net *dev, uint64_t qva)
+qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len)
{
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
uint32_t i;
/* Find the region where the address lives. */
if (qva >= reg->guest_user_addr &&
qva < reg->guest_user_addr + reg->size) {
+
+ if (unlikely(*len > reg->guest_user_addr + reg->size - qva))
+ *len = reg->guest_user_addr + reg->size - qva;
+
return qva - reg->guest_user_addr +
reg->host_user_addr;
}
vhost_user_set_vring_addr(struct virtio_net *dev, VhostUserMsg *msg)
{
struct vhost_virtqueue *vq;
+ uint64_t len;
+
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
if (dev->has_new_mem_table) {
vhost_setup_mem_table(dev);
dev->has_new_mem_table = 0;
}
-
if (dev->mem == NULL)
return -1;
vq = dev->virtqueue[msg->payload.addr.index];
/* The addresses are converted from QEMU virtual to Vhost virtual. */
+ len = sizeof(struct vring_desc) * vq->size;
vq->desc = (struct vring_desc *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.desc_user_addr);
- if (vq->desc == 0) {
+ msg->payload.addr.desc_user_addr, &len);
+ if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) {
RTE_LOG(ERR, VHOST_CONFIG,
- "(%d) failed to find desc ring address.\n",
+ "(%d) failed to map desc ring.\n",
dev->vid);
return -1;
}
dev = numa_realloc(dev, msg->payload.addr.index);
vq = dev->virtqueue[msg->payload.addr.index];
+ len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size;
vq->avail = (struct vring_avail *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.avail_user_addr);
- if (vq->avail == 0) {
+ msg->payload.addr.avail_user_addr, &len);
+ if (vq->avail == 0 ||
+ len != sizeof(struct vring_avail)
+ + sizeof(uint16_t) * vq->size) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find avail ring address.\n",
dev->vid);
return -1;
}
+ len = sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size;
vq->used = (struct vring_used *)(uintptr_t)qva_to_vva(dev,
- msg->payload.addr.used_user_addr);
- if (vq->used == 0) {
+ msg->payload.addr.used_user_addr, &len);
+ if (vq->used == 0 || len != sizeof(struct vring_used) +
+ sizeof(struct vring_used_elem) * vq->size) {
+
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to find used ring address.\n",
dev->vid);
vq->log_guest_addr = msg->payload.addr.log_guest_addr;
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address desc: %p\n",
dev->vid, vq->desc);
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address avail: %p\n",
dev->vid, vq->avail);
- LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) mapped address used: %p\n",
dev->vid, vq->used);
- LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
+ VHOST_LOG_DEBUG(VHOST_CONFIG, "(%d) log_guest_addr: %" PRIx64 "\n",
dev->vid, vq->log_guest_addr);
return 0;
vhost_user_set_vring_base(struct virtio_net *dev,
VhostUserMsg *msg)
{
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+
dev->virtqueue[msg->payload.state.index]->last_used_idx = msg->payload.state.num;
dev->virtqueue[msg->payload.state.index]->last_avail_idx = msg->payload.state.num;
{
struct guest_page *page, *last_page;
- if (dev->nr_guest_pages == dev->max_guest_pages &&
- dev->nr_guest_pages > 0) {
- dev->max_guest_pages *= 2;
+ if (dev->nr_guest_pages == dev->max_guest_pages) {
+ dev->max_guest_pages = RTE_MAX(8U, dev->max_guest_pages * 2);
dev->guest_pages = realloc(dev->guest_pages,
dev->max_guest_pages * sizeof(*page));
- if (!dev->guest_pages) {
- RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n");
- abort();
- }
}
if (dev->nr_guest_pages > 0) {
}
static void
-add_guest_pages(struct virtio_net *dev, struct virtio_memory_region *reg,
+add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg,
uint64_t page_size)
{
uint64_t reg_size = reg->size;
reg_size -= size;
while (reg_size > 0) {
+ size = RTE_MIN(reg_size, page_size);
host_phys_addr = rte_mem_virt2phy((void *)(uintptr_t)
host_user_addr);
- add_one_guest_page(dev, guest_phys_addr, host_phys_addr,
- page_size);
+ add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size);
- host_user_addr += page_size;
- guest_phys_addr += page_size;
- reg_size -= page_size;
+ host_user_addr += size;
+ guest_phys_addr += size;
+ reg_size -= size;
}
}
memcpy(&dev->mem_table, &pmsg->payload.memory, sizeof(dev->mem_table));
memcpy(dev->mem_table_fds, pmsg->fds, sizeof(dev->mem_table_fds));
dev->has_new_mem_table = 1;
+ /* vhost-user-nvme will not send
+ * set vring addr message, enable
+ * memory address table now.
+ */
+ if (dev->has_new_mem_table && dev->is_nvme) {
+ vhost_setup_mem_table(dev);
+ dev->has_new_mem_table = 0;
+ }
return 0;
}
-static int
+ static int
vhost_setup_mem_table(struct virtio_net *dev)
{
struct VhostUserMemory memory = dev->mem_table;
- struct virtio_memory_region *reg;
+ struct rte_vhost_mem_region *reg;
+ struct vhost_virtqueue *vq;
void *mmap_addr;
uint64_t mmap_size;
uint64_t mmap_offset;
dev->mem = NULL;
}
+ for (i = 0; i < dev->nr_vring; i++) {
+ vq = dev->virtqueue[i];
+ /* Those addresses won't be valid anymore in host address space
+ * after setting new mem table. Initiator need to resend these
+ * addresses.
+ */
+ vq->desc = NULL;
+ vq->avail = NULL;
+ vq->used = NULL;
+ }
+
dev->nr_guest_pages = 0;
if (!dev->guest_pages) {
dev->max_guest_pages = 8;
sizeof(struct guest_page));
}
- dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct virtio_memory) +
- sizeof(struct virtio_memory_region) * memory.nregions, 0);
+ dev->mem = rte_zmalloc("vhost-mem-table", sizeof(struct rte_vhost_memory) +
+ sizeof(struct rte_vhost_mem_region) * memory.nregions, 0);
if (dev->mem == NULL) {
RTE_LOG(ERR, VHOST_CONFIG,
"(%d) failed to allocate memory for dev->mem\n",
goto err_mmap;
}
+ if (madvise(mmap_addr, mmap_size, MADV_DONTDUMP) != 0) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "MADV_DONTDUMP advice setting failed.\n");
+ }
+
reg->mmap_addr = mmap_addr;
reg->mmap_size = mmap_size;
reg->host_user_addr = (uint64_t)(uintptr_t)mmap_addr +
mmap_offset;
- add_guest_pages(dev, reg, alignment);
+ if (dev->dequeue_zero_copy)
+ add_guest_pages(dev, reg, alignment);
RTE_LOG(INFO, VHOST_CONFIG,
"guest memory region %u, size: 0x%" PRIx64 "\n"
{
return vq && vq->desc &&
vq->kickfd != VIRTIO_UNINITIALIZED_EVENTFD &&
- vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD;
+ vq->callfd != VIRTIO_UNINITIALIZED_EVENTFD &&
+ vq->kickfd != VIRTIO_INVALID_EVENTFD &&
+ vq->callfd != VIRTIO_INVALID_EVENTFD;
}
static int
struct vhost_virtqueue *vq;
uint32_t i;
- for (i = 0; i < dev->num_queues; i++) {
+ if (dev->nr_vring == 0)
+ return 0;
+
+ for (i = 0; i < dev->nr_vring; i++) {
vq = dev->virtqueue[i];
- if (!vq_is_ready(vq)) {
+ if (vq_is_ready(vq)) {
RTE_LOG(INFO, VHOST_CONFIG,
- "virtio is not ready for processing.\n");
- return 0;
+ "virtio is now ready for processing.\n");
+ return 1;
}
}
- RTE_LOG(INFO, VHOST_CONFIG,
- "virtio is now ready for processing.\n");
- return 1;
+ return 0;
}
static void
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
- uint32_t cur_qp_idx;
+
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
RTE_LOG(INFO, VHOST_CONFIG,
"vring call idx:%d file:%d\n", file.index, file.fd);
- if (file.index + 1 > dev->num_queues) {
- dev->num_queues = file.index + 1;
- }
-
- /*
- * FIXME: VHOST_SET_VRING_CALL is the first per-vring message
- * we get, so we do vring queue pair allocation here.
- */
- cur_qp_idx = file.index / VIRTIO_QNUM;
- if (cur_qp_idx + 1 > dev->virt_qp_nb) {
- if (alloc_vring_queue_pair(dev, cur_qp_idx) < 0)
- return;
- }
-
vq = dev->virtqueue[file.index];
- assert(vq != NULL);
-
if (vq->callfd >= 0)
close(vq->callfd);
vq->callfd = file.fd;
-
- if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
- notify_ops->new_device(dev->vid);
- }
}
-/*
- * In vhost-user, when we receive kick message, will test whether virtio
- * device is ready for packet processing.
- */
static void
vhost_user_set_vring_kick(struct virtio_net *dev, struct VhostUserMsg *pmsg)
{
struct vhost_vring_file file;
struct vhost_virtqueue *vq;
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+
file.index = pmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
if (pmsg->payload.u64 & VHOST_USER_VRING_NOFD_MASK)
file.fd = VIRTIO_INVALID_EVENTFD;
if (vq->kickfd >= 0)
close(vq->kickfd);
vq->kickfd = file.fd;
-
- if (virtio_is_ready(dev) && !(dev->flags & VIRTIO_DEV_RUNNING)) {
- if (dev->dequeue_zero_copy) {
- RTE_LOG(INFO, VHOST_CONFIG,
- "dequeue zero copy is enabled\n");
- }
-
- if (notify_ops->new_device(dev->vid) == 0)
- dev->flags |= VIRTIO_DEV_RUNNING;
- }
}
static void
/* We have to stop the queue (virtio) if it is running. */
if (dev->flags & VIRTIO_DEV_RUNNING) {
dev->flags &= ~VIRTIO_DEV_RUNNING;
- notify_ops->destroy_device(dev->vid);
+ dev->notify_ops->destroy_device(dev->vid);
}
+ dev->flags &= ~VIRTIO_DEV_READY;
+
/* Here we are safe to get the last used index */
msg->payload.state.num = vq->last_used_idx;
close(vq->kickfd);
vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD;
+
+ if (vq->callfd >= 0)
+ close(vq->callfd);
+
vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD;
if (dev->dequeue_zero_copy)
"set queue enable: %d to qp idx: %d\n",
enable, msg->payload.state.index);
- if (notify_ops->vring_state_changed)
- notify_ops->vring_state_changed(dev->vid, msg->payload.state.index, enable);
+ if (dev->notify_ops->vring_state_changed)
+ dev->notify_ops->vring_state_changed(dev->vid, msg->payload.state.index, enable);
dev->virtqueue[msg->payload.state.index]->enabled = enable;
if (protocol_features & ~VHOST_USER_PROTOCOL_FEATURES)
return;
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+
dev->protocol_features = protocol_features;
}
return -1;
}
+ /* Remove from the data plane. */
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+
size = msg->payload.log.mmap_size;
off = msg->payload.log.mmap_offset;
RTE_LOG(INFO, VHOST_CONFIG,
* mmap from 0 to workaround a hugepage mmap bug: mmap will
* fail when offset is not page size aligned.
*/
- addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
close(fd);
if (addr == MAP_FAILED) {
RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n");
return 0;
}
+static int
+vhost_user_net_set_mtu(struct virtio_net *dev, struct VhostUserMsg *msg)
+{
+ if (msg->payload.u64 < VIRTIO_MIN_MTU ||
+ msg->payload.u64 > VIRTIO_MAX_MTU) {
+ RTE_LOG(ERR, VHOST_CONFIG, "Invalid MTU size (%"PRIu64")\n",
+ msg->payload.u64);
+
+ return -1;
+ }
+
+ dev->mtu = msg->payload.u64;
+
+ return 0;
+}
+
/* return bytes# of read on success or negative val on failure. */
static int
read_vhost_message(int sockfd, struct VhostUserMsg *msg)
return 0;
msg->flags &= ~VHOST_USER_VERSION_MASK;
+ msg->flags &= ~VHOST_USER_NEED_REPLY;
msg->flags |= VHOST_USER_VERSION;
msg->flags |= VHOST_USER_REPLY_MASK;
return ret;
}
+/*
+ * Allocate a queue pair if it hasn't been allocated yet
+ */
+static int
+vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg)
+{
+ uint16_t vring_idx;
+
+ switch (msg->request) {
+ case VHOST_USER_SET_VRING_KICK:
+ case VHOST_USER_SET_VRING_CALL:
+ case VHOST_USER_SET_VRING_ERR:
+ vring_idx = msg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ break;
+ case VHOST_USER_SET_VRING_NUM:
+ case VHOST_USER_SET_VRING_BASE:
+ case VHOST_USER_SET_VRING_ENABLE:
+ vring_idx = msg->payload.state.index;
+ break;
+ case VHOST_USER_SET_VRING_ADDR:
+ vring_idx = msg->payload.addr.index;
+ break;
+ default:
+ return 0;
+ }
+
+ if (vring_idx >= VHOST_MAX_VRING) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "invalid vring index: %u\n", vring_idx);
+ return -1;
+ }
+
+ if (dev->virtqueue[vring_idx])
+ return 0;
+
+ return alloc_vring_queue(dev, vring_idx);
+}
+
+static int
+vhost_user_nvme_io_request_passthrough(struct virtio_net *dev,
+ uint16_t qid, uint16_t tail_head,
+ bool is_submission_queue)
+{
+ return -1;
+}
+
+static int
+vhost_user_nvme_admin_passthrough(struct virtio_net *dev,
+ void *cmd, void *cqe, void *buf)
+{
+ if (dev->notify_ops->vhost_nvme_admin_passthrough) {
+ return dev->notify_ops->vhost_nvme_admin_passthrough(dev->vid, cmd, cqe, buf);
+ }
+
+ return -1;
+}
+
+static int
+vhost_user_nvme_set_cq_call(struct virtio_net *dev, uint16_t qid, int fd)
+{
+ if (dev->notify_ops->vhost_nvme_set_cq_call) {
+ return dev->notify_ops->vhost_nvme_set_cq_call(dev->vid, qid, fd);
+ }
+
+ return -1;
+}
+
+static int
+vhost_user_nvme_get_cap(struct virtio_net *dev, uint64_t *cap)
+{
+ if (dev->notify_ops->vhost_nvme_get_cap) {
+ return dev->notify_ops->vhost_nvme_get_cap(dev->vid, cap);
+ }
+
+ return -1;
+}
+
int
vhost_user_msg_handler(int vid, int fd)
{
struct virtio_net *dev;
struct VhostUserMsg msg;
+ struct vhost_vring_file file;
int ret;
+ uint64_t cap;
+ uint64_t enable;
+ uint8_t cqe[16];
+ uint8_t cmd[64];
+ uint8_t buf[4096];
+ uint16_t qid, tail_head;
+ bool is_submission_queue;
dev = get_device(vid);
if (dev == NULL)
return -1;
+ if (!dev->notify_ops) {
+ dev->notify_ops = vhost_driver_callback_get(dev->ifname);
+ if (!dev->notify_ops) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to get callback ops for driver %s\n",
+ dev->ifname);
+ return -1;
+ }
+ }
+
ret = read_vhost_message(fd, &msg);
if (ret <= 0 || msg.request >= VHOST_USER_MAX) {
if (ret < 0)
return -1;
}
- RTE_LOG(INFO, VHOST_CONFIG, "read message %s\n",
- vhost_message_str[msg.request]);
+ RTE_LOG(INFO, VHOST_CONFIG, "%s: read message %s\n",
+ dev->ifname, vhost_message_str[msg.request]);
+
+ ret = vhost_user_check_and_alloc_queue_pair(dev, &msg);
+ if (ret < 0) {
+ RTE_LOG(ERR, VHOST_CONFIG,
+ "failed to alloc queue\n");
+ return -1;
+ }
+
switch (msg.request) {
+ case VHOST_USER_GET_CONFIG:
+ if (dev->notify_ops->get_config(dev->vid,
+ msg.payload.config.region,
+ msg.payload.config.size) != 0) {
+ msg.size = sizeof(uint64_t);
+ }
+ send_vhost_message(fd, &msg);
+ break;
+ case VHOST_USER_SET_CONFIG:
+ if ((dev->notify_ops->set_config(dev->vid,
+ msg.payload.config.region,
+ msg.payload.config.offset,
+ msg.payload.config.size,
+ msg.payload.config.flags)) != 0) {
+ ret = 1;
+ } else {
+ ret = 0;
+ }
+ break;
+ case VHOST_USER_NVME_ADMIN:
+ if (!dev->is_nvme) {
+ dev->is_nvme = 1;
+ }
+ memcpy(cmd, msg.payload.nvme.cmd.req, sizeof(cmd));
+ ret = vhost_user_nvme_admin_passthrough(dev, cmd, cqe, buf);
+ memcpy(msg.payload.nvme.cmd.cqe, cqe, sizeof(cqe));
+ msg.size = sizeof(cqe);
+ /* NVMe Identify Command */
+ if (cmd[0] == 0x06) {
+ memcpy(msg.payload.nvme.buf, &buf, 4096);
+ msg.size += 4096;
+ }
+ send_vhost_message(fd, &msg);
+ break;
+ case VHOST_USER_NVME_SET_CQ_CALL:
+ file.index = msg.payload.u64 & VHOST_USER_VRING_IDX_MASK;
+ file.fd = msg.fds[0];
+ ret = vhost_user_nvme_set_cq_call(dev, file.index, file.fd);
+ break;
+ case VHOST_USER_NVME_GET_CAP:
+ ret = vhost_user_nvme_get_cap(dev, &cap);
+ if (!ret)
+ msg.payload.u64 = cap;
+ else
+ msg.payload.u64 = 0;
+ msg.size = sizeof(msg.payload.u64);
+ send_vhost_message(fd, &msg);
+ break;
+ case VHOST_USER_NVME_START_STOP:
+ enable = msg.payload.u64;
+ /* device must be started before set cq call */
+ if (enable) {
+ if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (dev->notify_ops->new_device(dev->vid) == 0)
+ dev->flags |= VIRTIO_DEV_RUNNING;
+ }
+ } else {
+ if (dev->flags & VIRTIO_DEV_RUNNING) {
+ dev->flags &= ~VIRTIO_DEV_RUNNING;
+ dev->notify_ops->destroy_device(dev->vid);
+ }
+ }
+ break;
+ case VHOST_USER_NVME_IO_CMD:
+ qid = msg.payload.nvme_io.qid;
+ tail_head = msg.payload.nvme_io.tail_head;
+ is_submission_queue = (msg.payload.nvme_io.queue_type == VHOST_USER_NVME_SUBMISSION_QUEUE) ? true : false;
+ vhost_user_nvme_io_request_passthrough(dev, qid, tail_head, is_submission_queue);
+ break;
case VHOST_USER_GET_FEATURES:
- msg.payload.u64 = vhost_user_get_features();
+ msg.payload.u64 = vhost_user_get_features(dev);
msg.size = sizeof(msg.payload.u64);
send_vhost_message(fd, &msg);
break;
break;
case VHOST_USER_SET_MEM_TABLE:
- vhost_user_set_mem_table(dev, &msg);
+ ret = vhost_user_set_mem_table(dev, &msg);
break;
case VHOST_USER_SET_LOG_BASE:
vhost_user_send_rarp(dev, &msg);
break;
+ case VHOST_USER_NET_SET_MTU:
+ ret = vhost_user_net_set_mtu(dev, &msg);
+ break;
+
default:
+ ret = -1;
break;
}
+ if (msg.flags & VHOST_USER_NEED_REPLY) {
+ msg.payload.u64 = !!ret;
+ msg.size = sizeof(msg.payload.u64);
+ send_vhost_message(fd, &msg);
+ }
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING) && virtio_is_ready(dev)) {
+ dev->flags |= VIRTIO_DEV_READY;
+
+ if (!(dev->flags & VIRTIO_DEV_RUNNING)) {
+ if (dev->dequeue_zero_copy) {
+ RTE_LOG(INFO, VHOST_CONFIG,
+ "dequeue zero copy is enabled\n");
+ }
+
+ if (dev->notify_ops->new_device(dev->vid) == 0)
+ dev->flags |= VIRTIO_DEV_RUNNING;
+ }
+ }
+
return 0;
}