]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
drm/i915: Start returning an error from i915_vma_move_to_active()
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 6 Jul 2018 10:39:44 +0000 (11:39 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 6 Jul 2018 17:22:37 +0000 (18:22 +0100)
Handling such a late error in request construction is tricky, but to
accommodate future patches which may allocate here, we potentially could
err. To handle the error after already adjusting global state to track
the new request, we must finish and submit the request. But we don't
want to use the request as not everything is being tracked by it, so we
opt to cancel the commands inside the request.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180706103947.15919-3-chris@chris-wilson.co.uk
12 files changed:
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/selftests/huge_pages.c
drivers/gpu/drm/i915/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/selftests/i915_gem_context.c
drivers/gpu/drm/i915/selftests/i915_gem_object.c
drivers/gpu/drm/i915/selftests/i915_request.c
drivers/gpu/drm/i915/selftests/intel_hangcheck.c
drivers/gpu/drm/i915/selftests/intel_lrc.c
drivers/gpu/drm/i915/selftests/intel_workarounds.c

index 928818f218f7fb91091208fbb365c5a53846e395..b0e566956b8d5ce1609c0f19f31ffa45e610adee 100644 (file)
@@ -476,7 +476,11 @@ static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
                        i915_gem_obj_finish_shmem_access(bb->obj);
                        bb->accessing = false;
 
-                       i915_vma_move_to_active(bb->vma, workload->req, 0);
+                       ret = i915_vma_move_to_active(bb->vma,
+                                                     workload->req,
+                                                     0);
+                       if (ret)
+                               goto err;
                }
        }
        return 0;
index c1c637e47bf5d4d3338f812e96e4cd3ac8efbfd7..07846e63671d0c33d59533cddae9e4bd96a2126a 100644 (file)
@@ -3090,9 +3090,9 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
 }
 
 int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct i915_request *rq,
-                            unsigned int flags);
+int __must_check i915_vma_move_to_active(struct i915_vma *vma,
+                                        struct i915_request *rq,
+                                        unsigned int flags);
 int i915_gem_dumb_create(struct drm_file *file_priv,
                         struct drm_device *dev,
                         struct drm_mode_create_dumb *args);
index 91f20445147f1c4dbd32c80dec816670627aaa52..97136e4ce91db1f7abd6d49eba9619c964e095f4 100644 (file)
@@ -1165,12 +1165,16 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
                goto err_request;
 
        GEM_BUG_ON(!reservation_object_test_signaled_rcu(batch->resv, true));
-       i915_vma_move_to_active(batch, rq, 0);
-       i915_vma_unpin(batch);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto skip_request;
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
 
        rq->batch = batch;
+       i915_vma_unpin(batch);
 
        cache->rq = rq;
        cache->rq_cmd = cmd;
@@ -1179,6 +1183,8 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
        /* Return with batch mapping (cmd) still pinned */
        return 0;
 
+skip_request:
+       i915_request_skip(rq, err);
 err_request:
        i915_request_add(rq);
 err_unpin:
@@ -1818,7 +1824,11 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
                unsigned int flags = eb->flags[i];
                struct i915_vma *vma = eb->vma[i];
 
-               i915_vma_move_to_active(vma, eb->request, flags);
+               err = i915_vma_move_to_active(vma, eb->request, flags);
+               if (unlikely(err)) {
+                       i915_request_skip(eb->request, err);
+                       return err;
+               }
 
                __eb_unreserve_vma(vma, flags);
                vma->exec_flags = NULL;
@@ -1877,9 +1887,9 @@ static void export_fence(struct i915_vma *vma,
        reservation_object_unlock(resv);
 }
 
-void i915_vma_move_to_active(struct i915_vma *vma,
-                            struct i915_request *rq,
-                            unsigned int flags)
+int i915_vma_move_to_active(struct i915_vma *vma,
+                           struct i915_request *rq,
+                           unsigned int flags)
 {
        struct drm_i915_gem_object *obj = vma->obj;
        const unsigned int idx = rq->engine->id;
@@ -1916,6 +1926,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
                i915_gem_active_set(&vma->last_fence, rq);
 
        export_fence(vma, rq, flags);
+       return 0;
 }
 
 static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
index 3210cedfa46c374829fa014932899a5637c610e7..90baf9086d0a49f0fd2667a20ec337f520361051 100644 (file)
@@ -222,7 +222,7 @@ int i915_gem_render_state_emit(struct i915_request *rq)
                        goto err_unpin;
        }
 
-       i915_vma_move_to_active(so.vma, rq, 0);
+       err = i915_vma_move_to_active(so.vma, rq, 0);
 err_unpin:
        i915_vma_unpin(so.vma);
 err_vma:
index 84bed69f30cc2c6b43741dc7dae5413241e1d1f6..d9f439f6219f6036f563753aa11ee85b5521d192 100644 (file)
@@ -985,7 +985,10 @@ static int gpu_write(struct i915_vma *vma,
                goto err_request;
        }
 
-       i915_vma_move_to_active(batch, rq, 0);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto err_request;
+
        i915_gem_object_set_active_reference(batch->obj);
        i915_vma_unpin(batch);
        i915_vma_close(batch);
@@ -996,7 +999,9 @@ static int gpu_write(struct i915_vma *vma,
        if (err)
                goto err_request;
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               i915_request_skip(rq, err);
 
 err_request:
        i915_request_add(rq);
index 97a16311f083102a0f16ade83e992da17821720f..1de7c1402fd5a724893606ed9aeba38a5dcecf23 100644 (file)
@@ -222,12 +222,12 @@ static int gpu_set(struct drm_i915_gem_object *obj,
        }
        intel_ring_advance(rq, cs);
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unpin(vma);
 
        i915_request_add(rq);
 
-       return 0;
+       return err;
 }
 
 static bool always_valid(struct drm_i915_private *i915)
index c642ab97698ee8d31c65085edb47d95336dbe0e9..5fbe15f4effdbd7888a3c4b89c34fe90ee20e792 100644 (file)
@@ -170,18 +170,26 @@ static int gpu_fill(struct drm_i915_gem_object *obj,
        if (err)
                goto err_request;
 
-       i915_vma_move_to_active(batch, rq, 0);
+       err = i915_vma_move_to_active(batch, rq, 0);
+       if (err)
+               goto skip_request;
+
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto skip_request;
+
        i915_gem_object_set_active_reference(batch->obj);
        i915_vma_unpin(batch);
        i915_vma_close(batch);
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
        i915_vma_unpin(vma);
 
        i915_request_add(rq);
 
        return 0;
 
+skip_request:
+       i915_request_skip(rq, err);
 err_request:
        i915_request_add(rq);
 err_batch:
index b2ccbc5e2bbedce312ee21fa83bd934c69cd9d8d..25c2b2d433bdb0e89726bcce7087fa63f83154fa 100644 (file)
@@ -464,13 +464,14 @@ static int make_obj_busy(struct drm_i915_gem_object *obj)
                return PTR_ERR(rq);
        }
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
 
        i915_request_add(rq);
 
-       i915_gem_object_set_active_reference(obj);
+       __i915_gem_object_release_unless_active(obj);
        i915_vma_unpin(vma);
-       return 0;
+
+       return err;
 }
 
 static bool assert_mmap_offset(struct drm_i915_private *i915,
index cc27edc40356c6cd0a202eef2976854bdbfa4d25..43995fc3534d77e46ecc1b2874d502525d279924 100644 (file)
@@ -675,7 +675,9 @@ static int live_all_engines(void *arg)
                        i915_gem_object_set_active_reference(batch->obj);
                }
 
-               i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_vma_move_to_active(batch, request[id], 0);
+               GEM_BUG_ON(err);
+
                i915_request_get(request[id]);
                i915_request_add(request[id]);
        }
@@ -785,7 +787,9 @@ static int live_sequential_engines(void *arg)
                GEM_BUG_ON(err);
                request[id]->batch = batch;
 
-               i915_vma_move_to_active(batch, request[id], 0);
+               err = i915_vma_move_to_active(batch, request[id], 0);
+               GEM_BUG_ON(err);
+
                i915_gem_object_set_active_reference(batch->obj);
                i915_vma_get(batch);
 
index c838f7d08cb97bb701f90999e8f431b5751405e3..73462a65a33035e8dddc33576dd25a4ccdb5c6a4 100644 (file)
@@ -130,13 +130,19 @@ static int emit_recurse_batch(struct hang *h,
        if (err)
                goto unpin_vma;
 
-       i915_vma_move_to_active(vma, rq, 0);
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(vma->obj)) {
                i915_gem_object_get(vma->obj);
                i915_gem_object_set_active_reference(vma->obj);
        }
 
-       i915_vma_move_to_active(hws, rq, 0);
+       err = i915_vma_move_to_active(hws, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(hws->obj)) {
                i915_gem_object_get(hws->obj);
                i915_gem_object_set_active_reference(hws->obj);
@@ -205,6 +211,7 @@ static int emit_recurse_batch(struct hang *h,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, flags);
 
+unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
index 730a02d3905865d1b0243f97cddb641d0b12d719..636cb68191e3b46737f5e4d43b7faa65051464f1 100644 (file)
@@ -104,13 +104,19 @@ static int emit_recurse_batch(struct spinner *spin,
        if (err)
                goto unpin_vma;
 
-       i915_vma_move_to_active(vma, rq, 0);
+       err = i915_vma_move_to_active(vma, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(vma->obj)) {
                i915_gem_object_get(vma->obj);
                i915_gem_object_set_active_reference(vma->obj);
        }
 
-       i915_vma_move_to_active(hws, rq, 0);
+       err = i915_vma_move_to_active(hws, rq, 0);
+       if (err)
+               goto unpin_hws;
+
        if (!i915_gem_object_has_active_reference(hws->obj)) {
                i915_gem_object_get(hws->obj);
                i915_gem_object_set_active_reference(hws->obj);
@@ -134,6 +140,7 @@ static int emit_recurse_batch(struct spinner *spin,
 
        err = rq->engine->emit_bb_start(rq, vma->node.start, PAGE_SIZE, 0);
 
+unpin_hws:
        i915_vma_unpin(hws);
 unpin_vma:
        i915_vma_unpin(vma);
index 4a9dc01a364a8a40728a77281b1df1991e4a818d..fafdec3fe83eb0616e3cda093fc37549fc67cb24 100644 (file)
@@ -49,6 +49,10 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
                goto err_pin;
        }
 
+       err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+       if (err)
+               goto err_req;
+
        srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
        if (INTEL_GEN(ctx->i915) >= 8)
                srm++;
@@ -67,8 +71,6 @@ read_nonprivs(struct i915_gem_context *ctx, struct intel_engine_cs *engine)
        }
        intel_ring_advance(rq, cs);
 
-       i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
-
        i915_gem_object_get(result);
        i915_gem_object_set_active_reference(result);