VRingUsedElem ring[0];
} VRingUsed;
+typedef struct VRingMemoryRegionCaches {
+ struct rcu_head rcu;
+ MemoryRegionCache desc;
+ MemoryRegionCache avail;
+ MemoryRegionCache used;
+} VRingMemoryRegionCaches;
+
typedef struct VRing
{
unsigned int num;
hwaddr desc;
hwaddr avail;
hwaddr used;
+ VRingMemoryRegionCaches *caches;
} VRing;
struct VirtQueue
QLIST_ENTRY(VirtQueue) node;
};
+static void virtio_free_region_cache(VRingMemoryRegionCaches *caches)
+{
+ if (!caches) {
+ return;
+ }
+
+ address_space_cache_destroy(&caches->desc);
+ address_space_cache_destroy(&caches->avail);
+ address_space_cache_destroy(&caches->used);
+ g_free(caches);
+}
+
+static void virtio_init_region_cache(VirtIODevice *vdev, int n)
+{
+ VirtQueue *vq = &vdev->vq[n];
+ VRingMemoryRegionCaches *old = vq->vring.caches;
+ VRingMemoryRegionCaches *new;
+ hwaddr addr, size;
+ int event_size;
+
+ event_size = virtio_vdev_has_feature(vq->vdev, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
+
+ addr = vq->vring.desc;
+ if (!addr) {
+ return;
+ }
+ new = g_new0(VRingMemoryRegionCaches, 1);
+ size = virtio_queue_get_desc_size(vdev, n);
+ address_space_cache_init(&new->desc, vdev->dma_as,
+ addr, size, false);
+
+ size = virtio_queue_get_used_size(vdev, n) + event_size;
+ address_space_cache_init(&new->used, vdev->dma_as,
+ vq->vring.used, size, true);
+
+ size = virtio_queue_get_avail_size(vdev, n) + event_size;
+ address_space_cache_init(&new->avail, vdev->dma_as,
+ vq->vring.avail, size, false);
+
+ atomic_rcu_set(&vq->vring.caches, new);
+ if (old) {
+ call_rcu(old, virtio_free_region_cache, rcu);
+ }
+}
+
/* virt queue functions */
void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
vring->used = vring_align(vring->avail +
offsetof(VRingAvail, ring[vring->num]),
vring->align);
+ virtio_init_region_cache(vdev, n);
}
static void vring_desc_read(VirtIODevice *vdev, VRingDesc *desc,
vdev->vq[n].vring.desc = desc;
vdev->vq[n].vring.avail = avail;
vdev->vq[n].vring.used = used;
+ virtio_init_region_cache(vdev, n);
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
void virtio_cleanup(VirtIODevice *vdev)
{
qemu_del_vm_change_state_handler(vdev->vmstate);
- g_free(vdev->config);
- g_free(vdev->vq);
- g_free(vdev->vector_queues);
}
static void virtio_vmstate_change(void *opaque, int running, RunState state)
}
}
+static void virtio_memory_listener_commit(MemoryListener *listener)
+{
+ VirtIODevice *vdev = container_of(listener, VirtIODevice, listener);
+ int i;
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ virtio_init_region_cache(vdev, i);
+ }
+}
+
static void virtio_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
error_propagate(errp, err);
return;
}
+
+ vdev->listener.commit = virtio_memory_listener_commit;
+ memory_listener_register(&vdev->listener, vdev->dma_as);
}
static void virtio_device_unrealize(DeviceState *dev, Error **errp)
vdev->bus_name = NULL;
}
+static void virtio_device_free_virtqueues(VirtIODevice *vdev)
+{
+ int i;
+ if (!vdev->vq) {
+ return;
+ }
+
+ for (i = 0; i < VIRTIO_QUEUE_MAX; i++) {
+ VRingMemoryRegionCaches *caches;
+ if (vdev->vq[i].vring.num == 0) {
+ break;
+ }
+ caches = atomic_read(&vdev->vq[i].vring.caches);
+ atomic_set(&vdev->vq[i].vring.caches, NULL);
+ virtio_free_region_cache(caches);
+ }
+ g_free(vdev->vq);
+}
+
+static void virtio_device_instance_finalize(Object *obj)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(obj);
+
+ memory_listener_unregister(&vdev->listener);
+ virtio_device_free_virtqueues(vdev);
+
+ g_free(vdev->config);
+ g_free(vdev->vector_queues);
+}
+
static Property virtio_properties[] = {
DEFINE_VIRTIO_COMMON_FEATURES(VirtIODevice, host_features),
DEFINE_PROP_END_OF_LIST(),
.parent = TYPE_DEVICE,
.instance_size = sizeof(VirtIODevice),
.class_init = virtio_device_class_init,
+ .instance_finalize = virtio_device_instance_finalize,
.abstract = true,
.class_size = sizeof(VirtioDeviceClass),
};