struct list_head va;
/* Constant after initialization */
struct radeon_device *rdev;
- struct drm_gem_object gem_base;
struct ttm_bo_kmap_obj dma_buf_vmap;
pid_t pid;
struct radeon_mn *mn;
struct list_head mn_list;
};
-#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, gem_base)
+#define gem_to_radeon_bo(gobj) container_of((gobj), struct radeon_bo, tbo.base)
int radeon_gem_debugfs_init(struct radeon_device *rdev);
mutex_unlock(&bo->rdev->gem.mutex);
radeon_bo_clear_surface_reg(bo);
WARN_ON_ONCE(!list_empty(&bo->va));
- if (bo->gem_base.import_attach)
- drm_prime_gem_destroy(&bo->gem_base, bo->tbo.sg);
- drm_gem_object_release(&bo->gem_base);
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
kfree(bo);
}
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
- drm_gem_private_object_init(rdev->ddev, &bo->gem_base, size);
+ drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size);
bo->rdev = rdev;
bo->surface_reg = -1;
INIT_LIST_HEAD(&bo->list);
r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, page_align, !kernel, acc_size,
sg, resv, &radeon_ttm_bo_destroy);
- bo->gem_base.resv = bo->tbo.resv;
+ bo->tbo.base.resv = bo->tbo.resv;
up_read(&rdev->pm.mclk_lock);
if (unlikely(r != 0)) {
return r;
dev_err(rdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
dev_err(rdev->dev, "%p %p %lu %lu force free\n",
- &bo->gem_base, bo, (unsigned long)bo->gem_base.size,
- *((unsigned long *)&bo->gem_base.refcount));
+ &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
+ *((unsigned long *)&bo->tbo.base.refcount));
mutex_lock(&bo->rdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&bo->rdev->gem.mutex);
/* this should unref the ttm bo */
- drm_gem_object_put_unlocked(&bo->gem_base);
+ drm_gem_object_put_unlocked(&bo->tbo.base);
}
}