return >t->ttm.ttm;
}
-static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int amdgpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(ttm->bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(>t->ttm, adev->dev);
+ return ttm_dma_populate(>t->ttm, adev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(adev->dev, >t->ttm);
+ return ttm_populate_and_map_pages(adev->dev, >t->ttm, ctx);
}
static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
return tt;
}
-static int ast_ttm_tt_populate(struct ttm_tt *ttm)
+static int ast_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void ast_ttm_tt_unpopulate(struct ttm_tt *ttm)
return tt;
}
-static int cirrus_ttm_tt_populate(struct ttm_tt *ttm)
+static int cirrus_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void cirrus_ttm_tt_unpopulate(struct ttm_tt *ttm)
return tt;
}
-static int hibmc_ttm_tt_populate(struct ttm_tt *ttm)
+static int hibmc_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void hibmc_ttm_tt_unpopulate(struct ttm_tt *ttm)
return tt;
}
-static int mgag200_ttm_tt_populate(struct ttm_tt *ttm)
+static int mgag200_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void mgag200_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
static int
-nouveau_ttm_tt_populate(struct ttm_tt *ttm)
+nouveau_ttm_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
struct nouveau_drm *drm;
#if IS_ENABLED(CONFIG_AGP)
if (drm->agp.bridge) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate((void *)ttm, dev);
+ return ttm_dma_populate((void *)ttm, dev, ctx);
}
#endif
- r = ttm_pool_populate(ttm);
+ r = ttm_pool_populate(ttm, ctx);
if (r) {
return r;
}
.destroy = &qxl_ttm_backend_destroy,
};
-static int qxl_ttm_tt_populate(struct ttm_tt *ttm)
+static int qxl_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
int r;
if (ttm->state != tt_unpopulated)
return 0;
- r = ttm_pool_populate(ttm);
+ r = ttm_pool_populate(ttm, ctx);
if (r)
return r;
return (struct radeon_ttm_tt *)ttm;
}
-static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
+static int radeon_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm);
struct radeon_device *rdev;
rdev = radeon_get_rdev(ttm->bdev);
#if IS_ENABLED(CONFIG_AGP)
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_populate(ttm);
+ return ttm_agp_tt_populate(ttm, ctx);
}
#endif
#ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl()) {
- return ttm_dma_populate(>t->ttm, rdev->dev);
+ return ttm_dma_populate(>t->ttm, rdev->dev, ctx);
}
#endif
- return ttm_populate_and_map_pages(rdev->dev, >t->ttm);
+ return ttm_populate_and_map_pages(rdev->dev, >t->ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
EXPORT_SYMBOL(ttm_agp_tt_create);
-int ttm_agp_tt_populate(struct ttm_tt *ttm)
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
if (ttm->state != tt_unpopulated)
return 0;
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
EXPORT_SYMBOL(ttm_agp_tt_populate);
* TTM might be null for moves within the same region.
*/
if (ttm && ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, ctx);
if (ret)
goto out1;
}
unsigned long num_pages,
struct ttm_bo_kmap_obj *map)
{
- struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
+ struct ttm_mem_reg *mem = &bo->mem;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
struct ttm_tt *ttm = bo->ttm;
+ pgprot_t prot;
int ret;
BUG_ON(!ttm);
if (ttm->state == tt_unpopulated) {
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
if (ret)
return ret;
}
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
} else {
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
ttm = bo->ttm;
cvma.vm_page_prot = ttm_io_prot(bo->mem.placement,
cvma.vm_page_prot);
/* Allocate all page at once, most common usage */
- if (ttm->bdev->driver->ttm_tt_populate(ttm)) {
+ if (ttm->bdev->driver->ttm_tt_populate(ttm, &ctx)) {
retval = VM_FAULT_OOM;
goto out_io_unlock;
}
_manager = NULL;
}
-int ttm_pool_populate(struct ttm_tt *ttm)
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
unsigned i;
int ret;
for (i = 0; i < ttm->num_pages; ++i) {
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- PAGE_SIZE, &ctx);
+ PAGE_SIZE, ctx);
if (unlikely(ret != 0)) {
ttm_pool_unpopulate(ttm);
return -ENOMEM;
}
EXPORT_SYMBOL(ttm_pool_unpopulate);
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt)
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+ struct ttm_operation_ctx *ctx)
{
unsigned i, j;
int r;
- r = ttm_pool_populate(&tt->ttm);
+ r = ttm_pool_populate(&tt->ttm, ctx);
if (r)
return r;
* On success pages list will hold count number of correctly
* cached pages. On failure will hold the negative return value (-ENOMEM, etc).
*/
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm = &ttm_dma->ttm;
struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
- struct ttm_operation_ctx ctx = {
- .interruptible = false,
- .no_wait_gpu = false
- };
unsigned long num_pages = ttm->num_pages;
struct dma_pool *pool;
enum pool_type type;
break;
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, &ctx);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
}
ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
- pool->size, &ctx);
+ pool->size, ctx);
if (unlikely(ret != 0)) {
ttm_dma_unpopulate(ttm_dma, dev);
return -ENOMEM;
int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
{
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
int ret = 0;
if (!ttm)
if (ttm->state == tt_bound)
return 0;
- ret = ttm->bdev->driver->ttm_tt_populate(ttm);
+ ret = ttm->bdev->driver->ttm_tt_populate(ttm, &ctx);
if (ret)
return ret;
int ret;
struct page **pages = bo->tbo.ttm->pages;
int nr_pages = bo->tbo.num_pages;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
/* wtf swapping */
if (bo->pages)
return 0;
if (bo->tbo.ttm->state == tt_unpopulated)
- bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm);
+ bo->tbo.ttm->bdev->driver->ttm_tt_populate(bo->tbo.ttm, &ctx);
bo->pages = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!bo->pages)
goto out;
.destroy = &virtio_gpu_ttm_backend_destroy,
};
-static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm)
+static int virtio_gpu_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
if (ttm->state != tt_unpopulated)
return 0;
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void virtio_gpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
-static int vmw_ttm_populate(struct ttm_tt *ttm)
+static int vmw_ttm_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct vmw_ttm_tt *vmw_tt =
container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm);
struct vmw_private *dev_priv = vmw_tt->dev_priv;
struct ttm_mem_global *glob = vmw_mem_glob(dev_priv);
- struct ttm_operation_ctx ctx = {
- .interruptible = true,
- .no_wait_gpu = false
- };
int ret;
if (ttm->state != tt_unpopulated)
if (dev_priv->map_mode == vmw_dma_alloc_coherent) {
size_t size =
ttm_round_pot(ttm->num_pages * sizeof(dma_addr_t));
- ret = ttm_mem_global_alloc(glob, size, &ctx);
+ ret = ttm_mem_global_alloc(glob, size, ctx);
if (unlikely(ret != 0))
return ret;
- ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev);
+ ret = ttm_dma_populate(&vmw_tt->dma_ttm, dev_priv->dev->dev,
+ ctx);
if (unlikely(ret != 0))
ttm_mem_global_free(glob, size);
} else
- ret = ttm_pool_populate(ttm);
+ ret = ttm_pool_populate(ttm, ctx);
return ret;
}
unsigned long offset;
unsigned long bo_size;
struct vmw_otable *otables = batch->otables;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
SVGAOTableType i;
int ret;
ret = ttm_bo_reserve(batch->otable_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(batch->otable_bo);
struct vmw_mob *mob)
{
int ret;
+ struct ttm_operation_ctx ctx = {
+ .interruptible = false,
+ .no_wait_gpu = false
+ };
+
BUG_ON(mob->pt_bo != NULL);
ret = ttm_bo_create(&dev_priv->bdev, mob->num_pages * PAGE_SIZE,
ret = ttm_bo_reserve(mob->pt_bo, false, true, NULL);
BUG_ON(ret != 0);
- ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm);
+ ret = vmw_bo_driver.ttm_tt_populate(mob->pt_bo->ttm, &ctx);
if (unlikely(ret != 0))
goto out_unreserve;
ret = vmw_bo_map_dma(mob->pt_bo);
return tt;
}
-static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
+static int vbox_ttm_tt_populate(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx)
{
- return ttm_pool_populate(ttm);
+ return ttm_pool_populate(ttm, ctx);
}
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
* Returns:
* -ENOMEM: Out of memory.
*/
- int (*ttm_tt_populate)(struct ttm_tt *ttm);
+ int (*ttm_tt_populate)(struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate
struct agp_bridge_data *bridge,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page);
-int ttm_agp_tt_populate(struct ttm_tt *ttm);
+int ttm_agp_tt_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
void ttm_agp_tt_unpopulate(struct ttm_tt *ttm);
#endif
*
* Add backing pages to all of @ttm
*/
-int ttm_pool_populate(struct ttm_tt *ttm);
+int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_pool_unpopulate:
/**
* Populates and DMA maps pages to fullfil a ttm_dma_populate() request
*/
-int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt);
+int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt,
+ struct ttm_operation_ctx *ctx);
/**
* Unpopulates and DMA unmaps pages as part of a
*/
int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data);
-int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev);
+int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev,
+ struct ttm_operation_ctx *ctx);
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev);
#else
return 0;
}
static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma,
- struct device *dev)
+ struct device *dev,
+ struct ttm_operation_ctx *ctx)
{
return -ENOMEM;
}