Instead of signaling failure by setting the node pointer to
NULL do so by returning -ENOSPC.
v2: add memset() to make sure that mem is always initialized.
v3: drop memset() only set mm_node = NULL, move mm_node init in amdgpu
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Michael J. Ruhl <michael.j.ruhl@intel.com>
Link: https://patchwork.freedesktop.org/patch/373181/
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
- return 0;
+ return -ENOSPC;
}
atomic64_sub(mem->num_pages, &mgr->available);
spin_unlock(&mgr->lock);
if (unlikely(r)) {
kfree(node);
mem->mm_node = NULL;
- r = 0;
goto err_out;
}
} else {
mem_bytes = (u64)mem->num_pages << PAGE_SHIFT;
if (atomic64_add_return(mem_bytes, &mgr->usage) > max_bytes) {
atomic64_sub(mem_bytes, &mgr->usage);
- mem->mm_node = NULL;
- return 0;
+ return -ENOSPC;
}
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes);
- return r == -ENOSPC ? 0 : r;
+ return r;
}
/**
ret = nouveau_mem_vram(reg, nvbo->contig, nvbo->page);
if (ret) {
nouveau_mem_del(reg);
- if (ret == -ENOSPC) {
- reg->mm_node = NULL;
- return 0;
- }
return ret;
}
reg->num_pages << PAGE_SHIFT, &mem->vma[0]);
if (ret) {
nouveau_mem_del(reg);
- if (ret == -ENOSPC) {
- reg->mm_node = NULL;
- return 0;
- }
return ret;
}
ticket = dma_resv_locking_ctx(bo->base.resv);
do {
ret = (*man->func->get_node)(man, bo, place, mem);
- if (unlikely(ret != 0))
- return ret;
- if (mem->mm_node)
+ if (likely(!ret))
break;
+ if (unlikely(ret != -ENOSPC))
+ return ret;
ret = ttm_mem_evict_first(bdev, mem->mem_type, place, ctx,
ticket);
if (unlikely(ret != 0))
man = &bdev->man[mem->mem_type];
ret = (*man->func->get_node)(man, bo, place, mem);
+ if (ret == -ENOSPC)
+ continue;
if (unlikely(ret))
goto error;
- if (!mem->mm_node)
- continue;
-
ret = ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu);
if (unlikely(ret)) {
(*man->func->put_node)(man, mem);
mem.page_alignment = bo->mem.page_alignment;
mem.bus.io_reserved_vm = false;
mem.bus.io_reserved_count = 0;
+ mem.mm_node = NULL;
+
/*
* Determine where to move the buffer.
*/
mem->start = node->start;
}
- return 0;
+ return ret;
}
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
(struct vmwgfx_gmrid_man *)man->priv;
int id;
- mem->mm_node = NULL;
-
id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
if (id < 0)
return (id != -ENOMEM ? 0 : id);
gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock);
ida_free(&gman->gmr_ida, id);
- return 0;
+ return -ENOSPC;
}
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,