return 0;
}
+static int vhost_vdpa_net_data_load(NetClientState *nc)
+{
+ VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
+ struct vhost_vdpa *v = &s->vhost_vdpa;
+ bool has_cvq = v->dev->vq_index_end % 2;
+
+ if (has_cvq) {
+ return 0;
+ }
+
+ for (int i = 0; i < v->dev->nvqs; ++i) {
+ vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
+ }
+ return 0;
+}
+
static void vhost_vdpa_net_client_stop(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
dev = s->vhost_vdpa.dev;
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
+ } else {
+ s->vhost_vdpa.iova_tree = NULL;
}
}
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
.start = vhost_vdpa_net_data_start,
+ .load = vhost_vdpa_net_data_load,
.stop = vhost_vdpa_net_client_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
s0 = vhost_vdpa_net_first_nc_vdpa(s);
v->shadow_data = s0->vhost_vdpa.shadow_vqs_enabled;
- v->shadow_vqs_enabled = s->always_svq;
+ v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
if (s->vhost_vdpa.shadow_data) {
vhost_vdpa_net_client_stop(nc);
}
-static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len,
- size_t in_len)
+static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
+ const struct iovec *out_sg, size_t out_num,
+ const struct iovec *in_sg, size_t in_num)
{
- /* Buffers for the device */
- const struct iovec out = {
- .iov_base = s->cvq_cmd_out_buffer,
- .iov_len = out_len,
- };
- const struct iovec in = {
- .iov_base = s->status,
- .iov_len = sizeof(virtio_net_ctrl_ack),
- };
VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
int r;
- r = vhost_svq_add(svq, &out, 1, &in, 1, NULL);
+ r = vhost_svq_add(svq, out_sg, out_num, in_sg, in_num, NULL);
if (unlikely(r != 0)) {
if (unlikely(r == -ENOSPC)) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
* descriptor. Also, we need to take the answer before SVQ pulls by itself,
* when BQL is released
*/
- return vhost_svq_poll(svq);
+ return vhost_svq_poll(svq, 1);
}
static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class,
.cmd = cmd,
};
size_t data_size = iov_size(data_sg, data_num);
+ /* Buffers for the device */
+ const struct iovec out = {
+ .iov_base = s->cvq_cmd_out_buffer,
+ .iov_len = sizeof(ctrl) + data_size,
+ };
+ const struct iovec in = {
+ .iov_base = s->status,
+ .iov_len = sizeof(*s->status),
+ };
assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
iov_to_buf(data_sg, data_num, 0,
s->cvq_cmd_out_buffer + sizeof(ctrl), data_size);
- return vhost_vdpa_net_cvq_add(s, data_size + sizeof(ctrl),
- sizeof(virtio_net_ctrl_ack));
+ return vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
}
static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n)
.iov_base = &on,
.iov_len = sizeof(on),
};
- return vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
- cmd, &data, 1);
+ ssize_t dev_written;
+
+ dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_RX,
+ cmd, &data, 1);
+ if (unlikely(dev_written < 0)) {
+ return dev_written;
+ }
+ if (*s->status != VIRTIO_NET_OK) {
+ return -EIO;
+ }
+
+ return 0;
}
static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
const VirtIONet *n)
{
- ssize_t dev_written;
+ ssize_t r;
if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
return 0;
* configuration only at live migration.
*/
if (!n->mac_table.uni_overflow && !n->promisc) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_PROMISC, 0);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 0);
+ if (unlikely(r < 0)) {
+ return r;
}
}
* configuration only at live migration.
*/
if (n->mac_table.multi_overflow || n->allmulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
- if (unlikely(dev_written < 0)) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
+ if (unlikely(r < 0)) {
+ return r;
}
}
* configuration only at live migration.
*/
if (n->alluni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_ALLUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_ALLUNI, 1);
+ if (r < 0) {
+ return r;
}
}
* configuration only at live migration.
*/
if (n->nomulti) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOMULTI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOMULTI, 1);
+ if (r < 0) {
+ return r;
}
}
* configuration only at live migration.
*/
if (n->nouni) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOUNI, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOUNI, 1);
+ if (r < 0) {
+ return r;
}
}
* configuration only at live migration.
*/
if (n->nobcast) {
- dev_written = vhost_vdpa_net_load_rx_mode(s,
- VIRTIO_NET_CTRL_RX_NOBCAST, 1);
- if (dev_written < 0) {
- return dev_written;
- }
- if (*s->status != VIRTIO_NET_OK) {
- return -EIO;
+ r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_NOBCAST, 1);
+ if (r < 0) {
+ return r;
}
}
return 0;
}
-static int vhost_vdpa_net_load(NetClientState *nc)
+static int vhost_vdpa_net_cvq_load(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
struct vhost_vdpa *v = &s->vhost_vdpa;
assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
- if (!v->shadow_vqs_enabled) {
- return 0;
- }
+ vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
- n = VIRTIO_NET(v->dev->vdev);
- r = vhost_vdpa_net_load_mac(s, n);
- if (unlikely(r < 0)) {
- return r;
- }
- r = vhost_vdpa_net_load_mq(s, n);
- if (unlikely(r)) {
- return r;
- }
- r = vhost_vdpa_net_load_offloads(s, n);
- if (unlikely(r)) {
- return r;
- }
- r = vhost_vdpa_net_load_rx(s, n);
- if (unlikely(r)) {
- return r;
+ if (v->shadow_vqs_enabled) {
+ n = VIRTIO_NET(v->dev->vdev);
+ r = vhost_vdpa_net_load_mac(s, n);
+ if (unlikely(r < 0)) {
+ return r;
+ }
+ r = vhost_vdpa_net_load_mq(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = vhost_vdpa_net_load_offloads(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = vhost_vdpa_net_load_rx(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
+ r = vhost_vdpa_net_load_vlan(s, n);
+ if (unlikely(r)) {
+ return r;
+ }
}
- r = vhost_vdpa_net_load_vlan(s, n);
- if (unlikely(r)) {
- return r;
+
+ for (int i = 0; i < v->dev->vq_index; ++i) {
+ vhost_vdpa_set_vring_ready(v, i);
}
return 0;
.size = sizeof(VhostVDPAState),
.receive = vhost_vdpa_receive,
.start = vhost_vdpa_net_cvq_start,
- .load = vhost_vdpa_net_load,
+ .load = vhost_vdpa_net_cvq_load,
.stop = vhost_vdpa_net_cvq_stop,
.cleanup = vhost_vdpa_cleanup,
.has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
*/
static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
VirtQueueElement *elem,
- struct iovec *out)
+ struct iovec *out,
+ const struct iovec *in)
{
struct virtio_net_ctrl_mac mac_data, *mac_ptr;
struct virtio_net_ctrl_hdr *hdr_ptr;
uint32_t cursor;
ssize_t r;
+ uint8_t on = 1;
/* parse the non-multicast MAC address entries from CVQ command */
cursor = sizeof(*hdr_ptr);
* filter table to the vdpa device, it should send the
* VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
*/
- r = vhost_vdpa_net_load_rx_mode(s, VIRTIO_NET_CTRL_RX_PROMISC, 1);
+ hdr_ptr = out->iov_base;
+ out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
+
+ hdr_ptr->class = VIRTIO_NET_CTRL_RX;
+ hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
+ iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
+ r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
if (unlikely(r < 0)) {
return r;
}
.iov_base = s->cvq_cmd_out_buffer,
};
/* in buffer used for device model */
- const struct iovec in = {
+ const struct iovec model_in = {
.iov_base = &status,
.iov_len = sizeof(status),
};
+ /* in buffer used for vdpa device */
+ const struct iovec vdpa_in = {
+ .iov_base = s->status,
+ .iov_len = sizeof(*s->status),
+ };
ssize_t dev_written = -EINVAL;
out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
* the CVQ command directly.
*/
dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
- &out);
+ &out, &vdpa_in);
if (unlikely(dev_written < 0)) {
goto out;
}
} else {
- dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status));
+ dev_written = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
if (unlikely(dev_written < 0)) {
goto out;
}
}
status = VIRTIO_NET_ERR;
- virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1);
+ virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
if (status != VIRTIO_NET_OK) {
error_report("Bad CVQ processing in model");
}
uint64_t backend_features;
int64_t cvq_group;
uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
- VIRTIO_CONFIG_S_DRIVER |
- VIRTIO_CONFIG_S_FEATURES_OK;
+ VIRTIO_CONFIG_S_DRIVER;
int r;
ERRP_GUARD();
return 0;
}
+ r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
+ if (unlikely(r)) {
+ error_setg_errno(errp, -r, "Cannot set device status");
+ goto out;
+ }
+
r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
if (unlikely(r)) {
- error_setg_errno(errp, errno, "Cannot set features");
+ error_setg_errno(errp, -r, "Cannot set features");
+ goto out;
}
+ status |= VIRTIO_CONFIG_S_FEATURES_OK;
r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
if (unlikely(r)) {
- error_setg_errno(errp, -r, "Cannot set device features");
+ error_setg_errno(errp, -r, "Cannot set device status");
goto out;
}
VhostVDPAState *s;
int ret = 0;
assert(name);
- int cvq_isolated;
+ int cvq_isolated = 0;
if (is_datapath) {
nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
s->cvq_isolated = cvq_isolated;
-
- /*
- * TODO: We cannot migrate devices with CVQ and no x-svq enabled as
- * there is no way to set the device state (MAC, MQ, etc) before
- * starting the datapath.
- *
- * Migration blocker ownership now belongs to s->vhost_vdpa.
- */
- if (!svq) {
- error_setg(&s->vhost_vdpa.migration_blocker,
- "net vdpa cannot migrate with CVQ feature");
- }
}
ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
if (ret) {