struct drm_mm_node *nodes;
enum drm_mm_insert_mode mode;
unsigned long lpfn, num_nodes, pages_per_node, pages_left;
- uint64_t usage = 0, vis_usage = 0;
+ uint64_t vis_usage = 0;
unsigned i;
int r;
if (!lpfn)
lpfn = man->size;
+ /* bail out quickly if there's likely not enough VRAM for this BO */
+ if (atomic64_add_return(mem->num_pages << PAGE_SHIFT, &mgr->usage) > adev->gmc.mc_vram_size) {
+ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
+ mem->mm_node = NULL;
+ return 0;
+ }
+
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
pages_per_node = ~0ul;
num_nodes = 1;
nodes = kvmalloc_array((uint32_t)num_nodes, sizeof(*nodes),
GFP_KERNEL | __GFP_ZERO);
- if (!nodes)
+ if (!nodes) {
+ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
return -ENOMEM;
+ }
mode = DRM_MM_INSERT_BEST;
if (place->flags & TTM_PL_FLAG_TOPDOWN)
if (unlikely(r))
break;
- usage += nodes[i].size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
pages_left -= pages;
if (unlikely(r))
goto error;
- usage += nodes[i].size << PAGE_SHIFT;
vis_usage += amdgpu_vram_mgr_vis_size(adev, &nodes[i]);
amdgpu_vram_mgr_virt_start(mem, &nodes[i]);
pages_left -= pages;
}
spin_unlock(&mgr->lock);
- atomic64_add(usage, &mgr->usage);
atomic64_add(vis_usage, &mgr->vis_usage);
mem->mm_node = nodes;
while (i--)
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
+ atomic64_sub(mem->num_pages << PAGE_SHIFT, &mgr->usage);
kvfree(nodes);
return r == -ENOSPC ? 0 : r;