g_free(caches);
}
+static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches;
+
+ caches = atomic_read(&vq->vring.caches);
+ atomic_rcu_set(&vq->vring.caches, NULL);
+ if (caches) {
+ call_rcu(caches, virtio_free_region_cache, rcu);
+ }
+}
+
static void virtio_init_region_cache(VirtIODevice *vdev, int n)
{
VirtQueue *vq = &vdev->vq[n];
VRingMemoryRegionCaches *old = vq->vring.caches;
- VRingMemoryRegionCaches *new;
+ VRingMemoryRegionCaches *new = NULL;
hwaddr addr, size;
int event_size;
int64_t len;
addr = vq->vring.desc;
if (!addr) {
- return;
+ goto out_no_cache;
}
new = g_new0(VRingMemoryRegionCaches, 1);
size = virtio_queue_get_desc_size(vdev, n);
return;
err_avail:
- address_space_cache_destroy(&new->used);
+ address_space_cache_destroy(&new->avail);
err_used:
- address_space_cache_destroy(&new->desc);
+ address_space_cache_destroy(&new->used);
err_desc:
+ address_space_cache_destroy(&new->desc);
+out_no_cache:
g_free(new);
+ virtio_virtqueue_reset_region_cache(vq);
}
/* virt queue functions */
* Called within rcu_read_lock(). */
static int virtio_queue_empty_rcu(VirtQueue *vq)
{
+ if (unlikely(vq->vdev->broken)) {
+ return 1;
+ }
+
if (unlikely(!vq->vring.avail)) {
return 1;
}
{
bool empty;
+ if (unlikely(vq->vdev->broken)) {
+ return 1;
+ }
+
if (unlikely(!vq->vring.avail)) {
return 1;
}
vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
- if (desc.len % sizeof(VRingDesc)) {
+ if (!desc.len || (desc.len % sizeof(VRingDesc))) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto err;
}
}
static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg,
- hwaddr *addr, unsigned int *num_sg,
+ hwaddr *addr, unsigned int num_sg,
int is_write)
{
unsigned int i;
hwaddr len;
- for (i = 0; i < *num_sg; i++) {
+ for (i = 0; i < num_sg; i++) {
len = sg[i].iov_len;
sg[i].iov_base = dma_memory_map(vdev->dma_as,
addr[i], &len, is_write ?
void virtqueue_map(VirtIODevice *vdev, VirtQueueElement *elem)
{
- virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, &elem->in_num, 1);
- virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, &elem->out_num, 0);
+ virtqueue_map_iovec(vdev, elem->in_sg, elem->in_addr, elem->in_num, 1);
+ virtqueue_map_iovec(vdev, elem->out_sg, elem->out_addr, elem->out_num, 0);
}
static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
desc_cache = &caches->desc;
vring_desc_read(vdev, &desc, desc_cache, i);
if (desc.flags & VRING_DESC_F_INDIRECT) {
- if (desc.len % sizeof(VRingDesc)) {
+ if (!desc.len || (desc.len % sizeof(VRingDesc))) {
virtio_error(vdev, "Invalid size for indirect buffer table");
goto done;
}
}
}
}
+ vdev->started = val & VIRTIO_CONFIG_S_DRIVER_OK;
+ if (unlikely(vdev->start_on_kick && vdev->started)) {
+ vdev->start_on_kick = false;
+ }
+
if (k->set_status) {
k->set_status(vdev, val);
}
vdev->status = val;
+
return 0;
}
-bool target_words_bigendian(void);
static enum virtio_device_endian virtio_default_endian(void)
{
if (target_words_bigendian()) {
}
}
-static void virtio_virtqueue_reset_region_cache(struct VirtQueue *vq)
-{
- VRingMemoryRegionCaches *caches;
-
- caches = atomic_read(&vq->vring.caches);
- atomic_rcu_set(&vq->vring.caches, NULL);
- if (caches) {
- call_rcu(caches, virtio_free_region_cache, rcu);
- }
-}
-
void virtio_reset(void *opaque)
{
VirtIODevice *vdev = opaque;
k->reset(vdev);
}
+ vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1));
+ vdev->started = false;
vdev->broken = false;
vdev->guest_features = 0;
vdev->queue_sel = 0;
static bool virtio_queue_notify_aio_vq(VirtQueue *vq)
{
+ bool ret = false;
+
if (vq->vring.desc && vq->handle_aio_output) {
VirtIODevice *vdev = vq->vdev;
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
- return vq->handle_aio_output(vdev, vq);
+ ret = vq->handle_aio_output(vdev, vq);
+
+ if (unlikely(vdev->start_on_kick)) {
+ vdev->started = true;
+ vdev->start_on_kick = false;
+ }
}
- return false;
+ return ret;
}
static void virtio_queue_notify_vq(VirtQueue *vq)
trace_virtio_queue_notify(vdev, vq - vdev->vq, vq);
vq->handle_output(vdev, vq);
+
+ if (unlikely(vdev->start_on_kick)) {
+ vdev->started = true;
+ vdev->start_on_kick = false;
+ }
}
}
} else if (vq->handle_output) {
vq->handle_output(vdev, vq);
}
+
+ if (unlikely(vdev->start_on_kick)) {
+ vdev->started = true;
+ vdev->start_on_kick = false;
+ }
}
uint16_t virtio_queue_vector(VirtIODevice *vdev, int n)
vdev->vq[n].vring.num = 0;
vdev->vq[n].vring.num_default = 0;
+ vdev->vq[n].handle_output = NULL;
+ vdev->vq[n].handle_aio_output = NULL;
}
static void virtio_set_isr(VirtIODevice *vdev, int value)
return vdev->broken;
}
+static bool virtio_started_needed(void *opaque)
+{
+ VirtIODevice *vdev = opaque;
+
+ return vdev->started;
+}
+
static const VMStateDescription vmstate_virtqueue = {
.name = "virtqueue_state",
.version_id = 1,
};
static int get_extra_state(QEMUFile *f, void *pv, size_t size,
- VMStateField *field)
+ const VMStateField *field)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
}
static int put_extra_state(QEMUFile *f, void *pv, size_t size,
- VMStateField *field, QJSON *vmdesc)
+ const VMStateField *field, QJSON *vmdesc)
{
VirtIODevice *vdev = pv;
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
}
};
+static const VMStateDescription vmstate_virtio_started = {
+ .name = "virtio/started",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = &virtio_started_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_BOOL(started, VirtIODevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_virtio = {
.name = "virtio",
.version_id = 1,
&vmstate_virtio_ringsize,
&vmstate_virtio_broken,
&vmstate_virtio_extra_state,
+ &vmstate_virtio_started,
NULL
}
};
/* A wrapper for use as a VMState .put function */
static int virtio_device_put(QEMUFile *f, void *opaque, size_t size,
- VMStateField *field, QJSON *vmdesc)
+ const VMStateField *field, QJSON *vmdesc)
{
return virtio_save(VIRTIO_DEVICE(opaque), f);
}
/* A wrapper for use as a VMState .get function */
static int virtio_device_get(QEMUFile *f, void *opaque, size_t size,
- VMStateField *field)
+ const VMStateField *field)
{
VirtIODevice *vdev = VIRTIO_DEVICE(opaque);
DeviceClass *dc = DEVICE_CLASS(VIRTIO_DEVICE_GET_CLASS(vdev));
int virtio_set_features(VirtIODevice *vdev, uint64_t val)
{
- /*
+ int ret;
+ /*
* The driver must not attempt to set features after feature negotiation
* has finished.
*/
if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
return -EINVAL;
}
- return virtio_set_features_nocheck(vdev, val);
+ ret = virtio_set_features_nocheck(vdev, val);
+ if (!ret && virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
+ /* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
+ int i;
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num != 0) {
+ virtio_init_region_cache(vdev, i);
+ }
+ }
+ }
+ return ret;
+}
+
+size_t virtio_feature_get_config_size(VirtIOFeature *feature_sizes,
+ uint64_t host_features)
+{
+ size_t config_size = 0;
+ int i;
+
+ for (i = 0; feature_sizes[i].flags != 0; i++) {
+ if (host_features & feature_sizes[i].flags) {
+ config_size = MAX(feature_sizes[i].end, config_size);
+ }
+ }
+
+ return config_size;
}
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
g_malloc0(sizeof(*vdev->vector_queues) * nvectors);
}
+ vdev->start_on_kick = (virtio_host_has_feature(vdev, VIRTIO_F_VERSION_1) &&
+ !virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1));
+ vdev->started = false;
vdev->device_id = device_id;
vdev->status = 0;
atomic_set(&vdev->isr, 0);
return vdev->vq[n].vring.desc;
}
+bool virtio_queue_enabled(VirtIODevice *vdev, int n)
+{
+ return virtio_queue_get_desc_addr(vdev, n) != 0;
+}
+
hwaddr virtio_queue_get_avail_addr(VirtIODevice *vdev, int n)
{
return vdev->vq[n].vring.avail;
return &vq->host_notifier;
}
+int virtio_queue_set_host_notifier_mr(VirtIODevice *vdev, int n,
+ MemoryRegion *mr, bool assign)
+{
+ BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+
+ if (k->set_host_notifier_mr) {
+ return k->set_host_notifier_mr(qbus->parent, n, mr, assign);
+ }
+
+ return -1;
+}
+
void virtio_device_set_child_bus_name(VirtIODevice *vdev, char *bus_name)
{
g_free(vdev->bus_name);
static int virtio_device_start_ioeventfd_impl(VirtIODevice *vdev)
{
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
- int n, r, err;
+ int i, n, r, err;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
}
event_notifier_set(&vq->host_notifier);
}
+ memory_region_transaction_commit();
return 0;
assign_error:
+ i = n; /* save n for a second iteration after transaction is committed. */
while (--n >= 0) {
VirtQueue *vq = &vdev->vq[n];
if (!virtio_queue_get_num(vdev, n)) {
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ while (--i >= 0) {
+ if (!virtio_queue_get_num(vdev, i)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, i);
+ }
return err;
}
VirtioBusState *qbus = VIRTIO_BUS(qdev_get_parent_bus(DEVICE(vdev)));
int n, r;
+ memory_region_transaction_begin();
for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
VirtQueue *vq = &vdev->vq[n];
r = virtio_bus_set_host_notifier(qbus, n, false);
assert(r >= 0);
}
+ memory_region_transaction_commit();
+
+ for (n = 0; n < VIRTIO_QUEUE_MAX; n++) {
+ if (!virtio_queue_get_num(vdev, n)) {
+ continue;
+ }
+ virtio_bus_cleanup_host_notifier(qbus, n);
+ }
}
void virtio_device_stop_ioeventfd(VirtIODevice *vdev)