mem_region.size = int128_get64(llsize) - 1,
mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
- r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &mem_region);
if (unlikely(r != IOVA_OK)) {
error_report("Can't allocate a mapping (%d)", r);
goto fail;
fail_map:
if (v->shadow_data) {
- vhost_iova_tree_remove(v->iova_tree, mem_region);
+ vhost_iova_tree_remove(v->shared->iova_tree, mem_region);
}
fail:
.size = int128_get64(llsize) - 1,
};
- result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
+ result = vhost_iova_tree_find_iova(v->shared->iova_tree, &mem_region);
if (!result) {
/* The memory listener map wasn't mapped */
return;
}
iova = result->iova;
- vhost_iova_tree_remove(v->iova_tree, *result);
+ vhost_iova_tree_remove(v->shared->iova_tree, *result);
}
vhost_vdpa_iotlb_batch_begin_once(v);
/*
const DMAMap needle = {
.translated_addr = addr,
};
- const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle);
+ const DMAMap *result = vhost_iova_tree_find_iova(v->shared->iova_tree,
+ &needle);
hwaddr size;
int r;
return;
}
- vhost_iova_tree_remove(v->iova_tree, *result);
+ vhost_iova_tree_remove(v->shared->iova_tree, *result);
}
static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
{
int r;
- r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, needle);
if (unlikely(r != IOVA_OK)) {
error_setg(errp, "Cannot allocate iova (%d)", r);
return false;
needle->perm == IOMMU_RO);
if (unlikely(r != 0)) {
error_setg_errno(errp, -r, "Cannot map region to device");
- vhost_iova_tree_remove(v->iova_tree, *needle);
+ vhost_iova_tree_remove(v->shared->iova_tree, *needle);
}
return r == 0;
goto err;
}
- vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
+ vhost_svq_start(svq, dev->vdev, vq, v->shared->iova_tree);
ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
if (unlikely(!ok)) {
goto err_map;
migration_add_notifier(&s->migration_state,
vdpa_net_migration_state_notifier);
if (v->shadow_vqs_enabled) {
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
}
}
return 0;
}
- if (v->shadow_vqs_enabled) {
- VhostVDPAState *s0 = vhost_vdpa_net_first_nc_vdpa(s);
- v->iova_tree = s0->vhost_vdpa.iova_tree;
- }
-
return 0;
}
dev = s->vhost_vdpa.dev;
if (dev->vq_index + dev->nvqs == dev->vq_index_end) {
- g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete);
- } else {
- s->vhost_vdpa.iova_tree = NULL;
+ g_clear_pointer(&s->vhost_vdpa.shared->iova_tree,
+ vhost_iova_tree_delete);
}
}
static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
{
- VhostIOVATree *tree = v->iova_tree;
+ VhostIOVATree *tree = v->shared->iova_tree;
DMAMap needle = {
/*
* No need to specify size or to look for more translations since
map.translated_addr = (hwaddr)(uintptr_t)buf;
map.size = size - 1;
map.perm = write ? IOMMU_RW : IOMMU_RO,
- r = vhost_iova_tree_map_alloc(v->iova_tree, &map);
+ r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map);
if (unlikely(r != IOVA_OK)) {
error_report("Cannot map injected element");
return r;
return 0;
dma_map_err:
- vhost_iova_tree_remove(v->iova_tree, map);
+ vhost_iova_tree_remove(v->shared->iova_tree, map);
return r;
}
return 0;
}
- if (s0->vhost_vdpa.iova_tree) {
- /*
- * SVQ is already configured for all virtqueues. Reuse IOVA tree for
- * simplicity, whether CVQ shares ASID with guest or not, because:
- * - Memory listener need access to guest's memory addresses allocated
- * in the IOVA tree.
- * - There should be plenty of IOVA address space for both ASID not to
- * worry about collisions between them. Guest's translations are
- * still validated with virtio virtqueue_pop so there is no risk for
- * the guest to access memory that it shouldn't.
- *
- * To allocate a iova tree per ASID is doable but it complicates the
- * code and it is not worth it for the moment.
- */
- v->iova_tree = s0->vhost_vdpa.iova_tree;
- } else {
- v->iova_tree = vhost_iova_tree_new(v->iova_range.first,
- v->iova_range.last);
+ /*
+ * If other vhost_vdpa already have an iova_tree, reuse it for simplicity,
+ * whether CVQ shares ASID with guest or not, because:
+ * - Memory listener need access to guest's memory addresses allocated in
+ * the IOVA tree.
+ * - There should be plenty of IOVA address space for both ASID not to
+ * worry about collisions between them. Guest's translations are still
+ * validated with virtio virtqueue_pop so there is no risk for the guest
+ * to access memory that it shouldn't.
+ *
+ * To allocate a iova tree per ASID is doable but it complicates the code
+ * and it is not worth it for the moment.
+ */
+ if (!v->shared->iova_tree) {
+ v->shared->iova_tree = vhost_iova_tree_new(v->iova_range.first,
+ v->iova_range.last);
}
r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,