struct virtio_gpu_object {
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
-
- struct sg_table *pages;
- uint32_t mapped;
-
bool dumb;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
+ struct virtio_gpu_object base;
+ struct sg_table *pages;
+ uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
- if (bo->pages) {
- if (bo->mapped) {
+ if (shmem->pages) {
+ if (shmem->mapped) {
dma_unmap_sg(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->mapped,
+ shmem->pages->sgl, shmem->mapped,
DMA_TO_DEVICE);
- bo->mapped = 0;
+ shmem->mapped = 0;
}
- sg_free_table(bo->pages);
- bo->pages = NULL;
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
drm_gem_shmem_unpin(&bo->base.base);
}
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
unsigned int *nents)
{
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
struct scatterlist *sg;
int si, ret;
if (ret < 0)
return -EINVAL;
- bo->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
- if (!bo->pages) {
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
drm_gem_shmem_unpin(&bo->base.base);
return -EINVAL;
}
if (use_dma_api) {
- bo->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
- DMA_TO_DEVICE);
- *nents = bo->mapped;
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
} else {
- *nents = bo->pages->nents;
+ *nents = shmem->pages->nents;
}
*ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
return -ENOMEM;
}
- for_each_sg(bo->pages->sgl, sg, *nents, si) {
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
(*ents)[si].addr = cpu_to_le64(use_dma_api
? sg_dma_address(sg)
: sg_phys(sg));
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));