Currently libvhost-user is hardcoded to at most 8 virtqueues. The
device backend should decide the number of virtqueues, not
libvhost-user. This is important for multiqueue device backends where
the guest driver needs an accurate number of virtqueues.
This change breaks libvhost-user and libvhost-user-glib API stability.
There is no stability guarantee yet, so make this change now and update
all in-tree library users.
This patch touches up vhost-user-blk, vhost-user-gpu, vhost-user-input,
vhost-user-scsi, and vhost-user-bridge. If the device has a fixed
number of queues that exact number is used. Otherwise the previous
default of 8 virtqueues is used.
vu_init() and vug_init() can now fail if malloc() returns NULL. I
considered aborting with an error in libvhost-user but it should be safe
to instantiate new vhost-user instances at runtime without risk of
terminating the process. Therefore callers need to handle the vu_init()
failure now.
vhost-user-blk and vhost-user-scsi duplicate virtqueue index checks that
are already performed by libvhost-user. This code would need to be
modified to use max_queues but remove it completely instead since it's
redundant.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
Message-Id: <
20190626074815.19994-3-stefanha@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
}
}
-void
-vug_init(VugDev *dev, int socket,
+bool
+vug_init(VugDev *dev, uint16_t max_queues, int socket,
vu_panic_cb panic, const VuDevIface *iface)
{
g_assert(dev);
g_assert(iface);
- vu_init(&dev->parent, socket, panic, set_watch, remove_watch, iface);
+ if (!vu_init(&dev->parent, max_queues, socket, panic, set_watch,
+ remove_watch, iface)) {
+ return false;
+ }
+
dev->fdmap = g_hash_table_new_full(NULL, NULL, NULL,
(GDestroyNotify) g_source_destroy);
dev->src = vug_source_new(dev, socket, G_IO_IN, vug_watch, NULL);
+
+ return true;
}
void
GSource *src;
} VugDev;
-void vug_init(VugDev *dev, int socket,
+bool vug_init(VugDev *dev, uint16_t max_queues, int socket,
vu_panic_cb panic, const VuDevIface *iface);
void vug_deinit(VugDev *dev);
static void
vu_set_enable_all_rings(VuDev *dev, bool enabled)
{
- int i;
+ uint16_t i;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ for (i = 0; i < dev->max_queues; i++) {
dev->vq[i].enable = enabled;
}
}
{
int index = vmsg->payload.u64 & VHOST_USER_VRING_IDX_MASK;
- if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ if (index >= dev->max_queues) {
vmsg_close_fds(vmsg);
vu_panic(dev, "Invalid queue index: %u", index);
return false;
DPRINT("State.index: %d\n", index);
DPRINT("State.enable: %d\n", enable);
- if (index >= VHOST_MAX_NR_VIRTQUEUE) {
+ if (index >= dev->max_queues) {
vu_panic(dev, "Invalid vring_enable index: %u", index);
return false;
}
}
dev->nregions = 0;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ for (i = 0; i < dev->max_queues; i++) {
VuVirtq *vq = &dev->vq[i];
if (vq->call_fd != -1) {
if (dev->sock != -1) {
close(dev->sock);
}
+
+ free(dev->vq);
+ dev->vq = NULL;
}
-void
+bool
vu_init(VuDev *dev,
+ uint16_t max_queues,
int socket,
vu_panic_cb panic,
vu_set_watch_cb set_watch,
vu_remove_watch_cb remove_watch,
const VuDevIface *iface)
{
- int i;
+ uint16_t i;
+ assert(max_queues > 0);
assert(socket >= 0);
assert(set_watch);
assert(remove_watch);
dev->iface = iface;
dev->log_call_fd = -1;
dev->slave_fd = -1;
- for (i = 0; i < VHOST_MAX_NR_VIRTQUEUE; i++) {
+ dev->max_queues = max_queues;
+
+ dev->vq = malloc(max_queues * sizeof(dev->vq[0]));
+ if (!dev->vq) {
+ DPRINT("%s: failed to malloc virtqueues\n", __func__);
+ return false;
+ }
+
+ for (i = 0; i < max_queues; i++) {
dev->vq[i] = (VuVirtq) {
.call_fd = -1, .kick_fd = -1, .err_fd = -1,
.notification = true,
};
}
+
+ return true;
}
VuVirtq *
vu_get_queue(VuDev *dev, int qidx)
{
- assert(qidx < VHOST_MAX_NR_VIRTQUEUE);
+ assert(qidx < dev->max_queues);
return &dev->vq[qidx];
}
#define VHOST_USER_F_PROTOCOL_FEATURES 30
#define VHOST_LOG_PAGE 4096
-#define VHOST_MAX_NR_VIRTQUEUE 8
#define VIRTQUEUE_MAX_SIZE 1024
#define VHOST_MEMORY_MAX_NREGIONS 8
int sock;
uint32_t nregions;
VuDevRegion regions[VHOST_MEMORY_MAX_NREGIONS];
- VuVirtq vq[VHOST_MAX_NR_VIRTQUEUE];
+ VuVirtq *vq;
VuDevInflightInfo inflight_info;
int log_call_fd;
int slave_fd;
uint64_t features;
uint64_t protocol_features;
bool broken;
+ uint16_t max_queues;
/* @set_watch: add or update the given fd to the watch set,
* call cb when condition is met */
/**
* vu_init:
* @dev: a VuDev context
+ * @max_queues: maximum number of virtqueues
* @socket: the socket connected to vhost-user master
* @panic: a panic callback
* @set_watch: a set_watch callback
* @iface: a VuDevIface structure with vhost-user device callbacks
*
* Intializes a VuDev vhost-user context.
+ *
+ * Returns: true on success, false on failure.
**/
-void vu_init(VuDev *dev,
+bool vu_init(VuDev *dev,
+ uint16_t max_queues,
int socket,
vu_panic_cb panic,
vu_set_watch_cb set_watch,
#include <sys/ioctl.h>
#endif
+enum {
+ VHOST_USER_BLK_MAX_QUEUES = 8,
+};
+
struct virtio_blk_inhdr {
unsigned char status;
};
VuVirtq *vq;
int ret;
- if ((idx < 0) || (idx >= VHOST_MAX_NR_VIRTQUEUE)) {
- fprintf(stderr, "VQ Index out of range: %d\n", idx);
- vub_panic_cb(vu_dev, NULL);
- return;
- }
-
gdev = container_of(vu_dev, VugDev, parent);
vdev_blk = container_of(gdev, VubDev, parent);
assert(vdev_blk);
vdev_blk->enable_ro = true;
}
- vug_init(&vdev_blk->parent, csock, vub_panic_cb, &vub_iface);
+ if (!vug_init(&vdev_blk->parent, VHOST_USER_BLK_MAX_QUEUES, csock,
+ vub_panic_cb, &vub_iface)) {
+ fprintf(stderr, "Failed to initialized libvhost-user-glib\n");
+ goto err;
+ }
g_main_loop_run(vdev_blk->loop);
#include "virgl.h"
#include "vugbm.h"
+enum {
+ VHOST_USER_GPU_MAX_QUEUES = 2,
+};
+
struct virtio_gpu_simple_resource {
uint32_t resource_id;
uint32_t width;
exit(EXIT_FAILURE);
}
- vug_init(&g.dev, fd, vg_panic, &vuiface);
+ if (!vug_init(&g.dev, VHOST_USER_GPU_MAX_QUEUES, fd, vg_panic, &vuiface)) {
+ g_printerr("Failed to initialize libvhost-user-glib.\n");
+ exit(EXIT_FAILURE);
+ }
loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop);
#include "standard-headers/linux/virtio_input.h"
#include "qapi/error.h"
+enum {
+ VHOST_USER_INPUT_MAX_QUEUES = 2,
+};
+
typedef struct virtio_input_event virtio_input_event;
typedef struct virtio_input_config virtio_input_config;
g_printerr("Invalid vhost-user socket.\n");
exit(EXIT_FAILURE);
}
- vug_init(&vi.dev, fd, vi_panic, &vuiface);
+
+ if (!vug_init(&vi.dev, VHOST_USER_INPUT_MAX_QUEUES, fd, vi_panic,
+ &vuiface)) {
+ g_printerr("Failed to initialize libvhost-user-glib.\n");
+ exit(EXIT_FAILURE);
+ }
loop = g_main_loop_new(NULL, FALSE);
g_main_loop_run(loop);
#define VUS_ISCSI_INITIATOR "iqn.2016-11.com.nutanix:vhost-user-scsi"
+enum {
+ VHOST_USER_SCSI_MAX_QUEUES = 8,
+};
+
typedef struct VusIscsiLun {
struct iscsi_context *iscsi_ctx;
int iscsi_lun;
gdev = container_of(vu_dev, VugDev, parent);
vdev_scsi = container_of(gdev, VusDev, parent);
- if (idx < 0 || idx >= VHOST_MAX_NR_VIRTQUEUE) {
- g_warning("VQ Index out of range: %d", idx);
- vus_panic_cb(vu_dev, NULL);
- return;
- }
vq = vu_get_queue(vu_dev, idx);
if (!vq) {
assert(vu_dev);
- if (idx < 0 || idx >= VHOST_MAX_NR_VIRTQUEUE) {
- g_warning("VQ Index out of range: %d", idx);
- vus_panic_cb(vu_dev, NULL);
- return;
- }
-
vq = vu_get_queue(vu_dev, idx);
if (idx == 0 || idx == 1) {
goto err;
}
- vug_init(&vdev_scsi->parent, csock, vus_panic_cb, &vus_iface);
+ if (!vug_init(&vdev_scsi->parent, VHOST_USER_SCSI_MAX_QUEUES, csock,
+ vus_panic_cb, &vus_iface)) {
+ g_printerr("Failed to initialize libvhost-user-glib\n");
+ goto err;
+ }
g_main_loop_run(vdev_scsi->loop);
} \
} while (0)
+enum {
+ VHOST_USER_BRIDGE_MAX_QUEUES = 8,
+};
+
typedef void (*CallbackFunc)(int sock, void *ctx);
typedef struct Event {
}
DPRINT("Got connection from remote peer on sock %d\n", conn_fd);
- vu_init(&dev->vudev,
- conn_fd,
- vubr_panic,
- vubr_set_watch,
- vubr_remove_watch,
- &vuiface);
+ if (!vu_init(&dev->vudev,
+ VHOST_USER_BRIDGE_MAX_QUEUES,
+ conn_fd,
+ vubr_panic,
+ vubr_set_watch,
+ vubr_remove_watch,
+ &vuiface)) {
+ fprintf(stderr, "Failed to initialize libvhost-user\n");
+ exit(1);
+ }
dispatcher_add(&dev->dispatcher, conn_fd, ctx, vubr_receive_cb);
dispatcher_remove(&dev->dispatcher, sock);
if (connect(dev->sock, (struct sockaddr *)&un, len) == -1) {
vubr_die("connect");
}
- vu_init(&dev->vudev,
- dev->sock,
- vubr_panic,
- vubr_set_watch,
- vubr_remove_watch,
- &vuiface);
+
+ if (!vu_init(&dev->vudev,
+ VHOST_USER_BRIDGE_MAX_QUEUES,
+ dev->sock,
+ vubr_panic,
+ vubr_set_watch,
+ vubr_remove_watch,
+ &vuiface)) {
+ fprintf(stderr, "Failed to initialize libvhost-user\n");
+ exit(1);
+ }
+
cb = vubr_receive_cb;
}
int qidx;
while (true) {
- for (qidx = 0; qidx < VHOST_MAX_NR_VIRTQUEUE; qidx++) {
+ for (qidx = 0; qidx < VHOST_USER_BRIDGE_MAX_QUEUES; qidx++) {
uint16_t *n = vubr->notifier.addr + pagesize * qidx;
if (*n == qidx) {
void *addr;
int fd;
- length = getpagesize() * VHOST_MAX_NR_VIRTQUEUE;
+ length = getpagesize() * VHOST_USER_BRIDGE_MAX_QUEUES;
fd = mkstemp(template);
if (fd < 0) {