]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
gpu: drm: ttm: Adding new return type vm_fault_t
authorSouptick Joarder <jrdr.linux@gmail.com>
Fri, 1 Jun 2018 19:27:24 +0000 (00:57 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 19 Jun 2018 18:17:38 +0000 (13:17 -0500)
Use new return type vm_fault_t for fault handler. For
now, this is just documenting that the function returns
a VM_FAULT value rather than an errno. Once all instances
are converted, vm_fault_t will become a distinct type.

Ref-> commit 1c8f422059ae ("mm: change return type to vm_fault_t")

Previously vm_insert_{mixed,pfn} returns err which driver
mapped into VM_FAULT_* type. The new function
vmf_insert_{mixed,pfn} will replace this inefficiency by
returning VM_FAULT_* type.

Signed-off-by: Souptick Joarder <jrdr.linux@gmail.com>
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/ttm/ttm_bo_vm.c

index c7ece7613a6aa4c0e5d64bcd7054b8ad34c37e4f..0ca0ec47334e21b6decfdc0acaa36b1c5d041bae 100644 (file)
 
 #define TTM_BO_VM_NUM_PREFAULT 16
 
-static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
+static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
                                struct vm_fault *vmf)
 {
-       int ret = 0;
+       vm_fault_t ret = 0;
+       int err = 0;
 
        if (likely(!bo->moving))
                goto out_unlock;
@@ -78,9 +79,9 @@ static int ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo,
        /*
         * Ordinary wait.
         */
-       ret = dma_fence_wait(bo->moving, true);
-       if (unlikely(ret != 0)) {
-               ret = (ret != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
+       err = dma_fence_wait(bo->moving, true);
+       if (unlikely(err != 0)) {
+               ret = (err != -ERESTARTSYS) ? VM_FAULT_SIGBUS :
                        VM_FAULT_NOPAGE;
                goto out_unlock;
        }
@@ -105,7 +106,7 @@ static unsigned long ttm_bo_io_mem_pfn(struct ttm_buffer_object *bo,
                + page_offset;
 }
 
-static int ttm_bo_vm_fault(struct vm_fault *vmf)
+static vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf)
 {
        struct vm_area_struct *vma = vmf->vma;
        struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
@@ -116,8 +117,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        unsigned long pfn;
        struct ttm_tt *ttm = NULL;
        struct page *page;
-       int ret;
+       int err;
        int i;
+       vm_fault_t ret = VM_FAULT_NOPAGE;
        unsigned long address = vmf->address;
        struct ttm_mem_type_manager *man =
                &bdev->man[bo->mem.mem_type];
@@ -129,9 +131,9 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
         * for reserve, and if it fails, retry the fault after waiting
         * for the buffer to become unreserved.
         */
-       ret = ttm_bo_reserve(bo, true, true, NULL);
-       if (unlikely(ret != 0)) {
-               if (ret != -EBUSY)
+       err = ttm_bo_reserve(bo, true, true, NULL);
+       if (unlikely(err != 0)) {
+               if (err != -EBUSY)
                        return VM_FAULT_NOPAGE;
 
                if (vmf->flags & FAULT_FLAG_ALLOW_RETRY) {
@@ -163,8 +165,8 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
        }
 
        if (bdev->driver->fault_reserve_notify) {
-               ret = bdev->driver->fault_reserve_notify(bo);
-               switch (ret) {
+               err = bdev->driver->fault_reserve_notify(bo);
+               switch (err) {
                case 0:
                        break;
                case -EBUSY:
@@ -192,13 +194,13 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                goto out_unlock;
        }
 
-       ret = ttm_mem_io_lock(man, true);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_lock(man, true);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_NOPAGE;
                goto out_unlock;
        }
-       ret = ttm_mem_io_reserve_vm(bo);
-       if (unlikely(ret != 0)) {
+       err = ttm_mem_io_reserve_vm(bo);
+       if (unlikely(err != 0)) {
                ret = VM_FAULT_SIGBUS;
                goto out_io_unlock;
        }
@@ -266,23 +268,20 @@ static int ttm_bo_vm_fault(struct vm_fault *vmf)
                }
 
                if (vma->vm_flags & VM_MIXEDMAP)
-                       ret = vm_insert_mixed(&cvma, address,
+                       ret = vmf_insert_mixed(&cvma, address,
                                        __pfn_to_pfn_t(pfn, PFN_DEV));
                else
-                       ret = vm_insert_pfn(&cvma, address, pfn);
+                       ret = vmf_insert_pfn(&cvma, address, pfn);
 
                /*
                 * Somebody beat us to this PTE or prefaulting to
                 * an already populated PTE, or prefaulting error.
                 */
 
-               if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+               if (unlikely((ret == VM_FAULT_NOPAGE && i > 0)))
                        break;
-               else if (unlikely(ret != 0)) {
-                       ret =
-                           (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
+               else if (unlikely(ret & VM_FAULT_ERROR))
                        goto out_io_unlock;
-               }
 
                address += PAGE_SIZE;
                if (unlikely(++page_offset >= page_last))