]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/amdgpu: fix and cleanup amdgpu_gem_object_close v4
authorChristian König <christian.koenig@amd.com>
Thu, 12 Mar 2020 11:03:34 +0000 (12:03 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 1 Apr 2020 18:44:46 +0000 (14:44 -0400)
The problem is that we can't add the clear fence to the BO
when there is an exclusive fence on it since we can't
guarantee the the clear fence will complete after the
exclusive one.

To fix this refactor the function and also add the exclusive
fence as shared to the resv object.

v2: fix warning
v3: add excl fence as shared instead
v4: squash in fix for fence handling in amdgpu_gem_object_close

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: xinhui pan <xinhui.pan@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c

index 4277125a79ee45ef61a8f9ad42f903e9b0a0fe7f..32f36c940abb5a9eb922c79cf9af97192ec85c1a 100644 (file)
@@ -161,16 +161,17 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
 
        struct amdgpu_bo_list_entry vm_pd;
        struct list_head list, duplicates;
+       struct dma_fence *fence = NULL;
        struct ttm_validate_buffer tv;
        struct ww_acquire_ctx ticket;
        struct amdgpu_bo_va *bo_va;
-       int r;
+       long r;
 
        INIT_LIST_HEAD(&list);
        INIT_LIST_HEAD(&duplicates);
 
        tv.bo = &bo->tbo;
-       tv.num_shared = 1;
+       tv.num_shared = 2;
        list_add(&tv.head, &list);
 
        amdgpu_vm_get_pd_bo(vm, &list, &vm_pd);
@@ -178,28 +179,34 @@ void amdgpu_gem_object_close(struct drm_gem_object *obj,
        r = ttm_eu_reserve_buffers(&ticket, &list, false, &duplicates);
        if (r) {
                dev_err(adev->dev, "leaking bo va because "
-                       "we fail to reserve bo (%d)\n", r);
+                       "we fail to reserve bo (%ld)\n", r);
                return;
        }
        bo_va = amdgpu_vm_bo_find(vm, bo);
-       if (bo_va && --bo_va->ref_count == 0) {
-               amdgpu_vm_bo_rmv(adev, bo_va);
-
-               if (amdgpu_vm_ready(vm)) {
-                       struct dma_fence *fence = NULL;
+       if (!bo_va || --bo_va->ref_count)
+               goto out_unlock;
 
-                       r = amdgpu_vm_clear_freed(adev, vm, &fence);
-                       if (unlikely(r)) {
-                               dev_err(adev->dev, "failed to clear page "
-                                       "tables on GEM object close (%d)\n", r);
-                       }
+       amdgpu_vm_bo_rmv(adev, bo_va);
+       if (!amdgpu_vm_ready(vm))
+               goto out_unlock;
 
-                       if (fence) {
-                               amdgpu_bo_fence(bo, fence, true);
-                               dma_fence_put(fence);
-                       }
-               }
+       fence = dma_resv_get_excl(bo->tbo.base.resv);
+       if (fence) {
+               amdgpu_bo_fence(bo, fence, true);
+               fence = NULL;
        }
+
+       r = amdgpu_vm_clear_freed(adev, vm, &fence);
+       if (r || !fence)
+               goto out_unlock;
+
+       amdgpu_bo_fence(bo, fence, true);
+       dma_fence_put(fence);
+
+out_unlock:
+       if (unlikely(r < 0))
+               dev_err(adev->dev, "failed to clear page "
+                       "tables on GEM object close (%ld)\n", r);
        ttm_eu_backoff_reservation(&ticket, &list);
 }