]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
drm/nouveau: stop using TTMs fault callback
authorChristian König <christian.koenig@amd.com>
Fri, 25 Sep 2020 13:42:04 +0000 (15:42 +0200)
committerChristian König <christian.koenig@amd.com>
Mon, 28 Sep 2020 10:37:41 +0000 (12:37 +0200)
We already implemented the fault handler ourself,
just open code what is necessary here.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Dave Airlie <airlied@redhat.com>
Link: https://patchwork.freedesktop.org/patch/392323/
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_bo.h
drivers/gpu/drm/nouveau/nouveau_ttm.c

index 8d51cfca07c854baaa255575e611664303274571..1d4b16c0e353bd00cf6f9919c15b387111f72ec4 100644 (file)
@@ -1226,8 +1226,7 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
        mutex_unlock(&drm->ttm.io_reserve_mutex);
 }
 
-static int
-nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
        struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
        struct nouveau_bo *nvbo = nouveau_bo(bo);
@@ -1243,34 +1242,38 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
                    !nvbo->kind)
                        return 0;
 
-               if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-                       nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
-                                                0);
+               if (bo->mem.mem_type != TTM_PL_SYSTEM)
+                       return 0;
+
+               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
+
+       } else {
+               /* make sure bo is in mappable vram */
+               if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
+                   bo->mem.start + bo->mem.num_pages < mappable)
+                       return 0;
 
-                       ret = nouveau_bo_validate(nvbo, false, false);
-                       if (ret)
-                               return ret;
+               for (i = 0; i < nvbo->placement.num_placement; ++i) {
+                       nvbo->placements[i].fpfn = 0;
+                       nvbo->placements[i].lpfn = mappable;
                }
-               return 0;
-       }
 
-       /* make sure bo is in mappable vram */
-       if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
-           bo->mem.start + bo->mem.num_pages < mappable)
-               return 0;
+               for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
+                       nvbo->busy_placements[i].fpfn = 0;
+                       nvbo->busy_placements[i].lpfn = mappable;
+               }
 
-       for (i = 0; i < nvbo->placement.num_placement; ++i) {
-               nvbo->placements[i].fpfn = 0;
-               nvbo->placements[i].lpfn = mappable;
+               nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
        }
 
-       for (i = 0; i < nvbo->placement.num_busy_placement; ++i) {
-               nvbo->busy_placements[i].fpfn = 0;
-               nvbo->busy_placements[i].lpfn = mappable;
-       }
+       ret = nouveau_bo_validate(nvbo, false, false);
+       if (unlikely(ret == -EBUSY || ret == -ERESTARTSYS))
+               return VM_FAULT_NOPAGE;
+       else if (unlikely(ret))
+               return VM_FAULT_SIGBUS;
 
-       nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_VRAM, 0);
-       return nouveau_bo_validate(nvbo, false, false);
+       ttm_bo_move_to_lru_tail_unlocked(bo);
+       return 0;
 }
 
 static int
@@ -1381,7 +1384,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
        .move_notify = nouveau_bo_move_ntfy,
        .move = nouveau_bo_move,
        .verify_access = nouveau_bo_verify_access,
-       .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
        .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
        .io_mem_free = &nouveau_ttm_io_mem_free,
 };
index ff68ded8d590dec84c2d4f75118e235705127154..641ef6298a0ecdfb1e0abb2706bcdcea9a64e0b9 100644 (file)
@@ -89,6 +89,7 @@ void nouveau_bo_placement_set(struct nouveau_bo *, u32 type, u32 busy);
 void nouveau_bo_wr16(struct nouveau_bo *, unsigned index, u16 val);
 u32  nouveau_bo_rd32(struct nouveau_bo *, unsigned index);
 void nouveau_bo_wr32(struct nouveau_bo *, unsigned index, u32 val);
+vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo);
 void nouveau_bo_fence(struct nouveau_bo *, struct nouveau_fence *, bool exclusive);
 int  nouveau_bo_validate(struct nouveau_bo *, bool interruptible,
                         bool no_wait_gpu);
index 4273417534410cea259057dda4a8500d9d024280..edf3bb89a47f83cb980824d90f03dc12c12c123e 100644 (file)
@@ -134,17 +134,19 @@ static vm_fault_t nouveau_ttm_fault(struct vm_fault *vmf)
        if (ret)
                return ret;
 
-       nouveau_bo_del_io_reserve_lru(bo);
+       ret = nouveau_ttm_fault_reserve_notify(bo);
+       if (ret)
+               goto error_unlock;
 
+       nouveau_bo_del_io_reserve_lru(bo);
        prot = vm_get_page_prot(vma->vm_flags);
        ret = ttm_bo_vm_fault_reserved(vmf, prot, TTM_BO_VM_NUM_PREFAULT, 1);
+       nouveau_bo_add_io_reserve_lru(bo);
        if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
                return ret;
 
-       nouveau_bo_add_io_reserve_lru(bo);
-
+error_unlock:
        dma_resv_unlock(bo->base.resv);
-
        return ret;
 }