]> git.proxmox.com Git - mirror_ubuntu-disco-kernel.git/commitdiff
Revert "drm/i915: Extend LRC pinning to cover GPU context writeback"
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 4 Dec 2015 16:27:15 +0000 (17:27 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 4 Dec 2015 16:34:40 +0000 (17:34 +0100)
This reverts commit 6d65ba943a2d1e4292a07ca7ddb6c5138b9efa5d.

Mika Kuoppala traced down a use-after-free crash in module unload to
this commit, because ring->last_context is leaked beyond when the
context gets destroyed. Mika submitted a quick fix to patch that up in
the context destruction code, but that's too much of a hack.

The right fix is instead for the ring to hold a full reference onto
it's last context, like we do for legacy contexts.

Since this is causing a regression in BAT it gets reverted before we
can close this.

Cc: Nick Hoath <nicholas.hoath@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: David Gordon <david.s.gordon@intel.com>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Alex Dai <yu.dai@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=93248
Acked-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h

index f95f3676764970cf86216fea0a6b048ea5ce2a0e..5e276f5d61da69d4085c297342c97e3dd693f190 100644 (file)
@@ -884,7 +884,6 @@ struct intel_context {
        struct {
                struct drm_i915_gem_object *state;
                struct intel_ringbuffer *ringbuf;
-               bool dirty;
                int pin_count;
        } engine[I915_NUM_RINGS];
 
index ef4dbe3d442de54af8a5466b1166bd0a7f6d8a3f..a531cb83295c13fd5e7852fc25a268774ec2c61b 100644 (file)
@@ -1354,9 +1354,6 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
        trace_i915_gem_request_retire(request);
 
-       if (i915.enable_execlists)
-               intel_lr_context_complete_check(request);
-
        /* We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
         * of tail of the request to update the last known position
@@ -2767,6 +2764,10 @@ static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
                                        struct drm_i915_gem_request,
                                        execlist_link);
                        list_del(&submit_req->execlist_link);
+
+                       if (submit_req->ctx != ring->default_context)
+                               intel_lr_context_unpin(submit_req);
+
                        i915_gem_request_unreference(submit_req);
                }
                spin_unlock_irq(&ring->execlist_lock);
index c3504a09340c301a047fa686eb50a3ea4129113e..4ebafab53f30e6492e99c0bcac6256f1f5b1c2b9 100644 (file)
@@ -571,6 +571,9 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
        struct drm_i915_gem_request *cursor;
        int num_elements = 0;
 
+       if (request->ctx != ring->default_context)
+               intel_lr_context_pin(request);
+
        i915_gem_request_reference(request);
 
        spin_lock_irq(&ring->execlist_lock);
@@ -734,13 +737,6 @@ intel_logical_ring_advance_and_submit(struct drm_i915_gem_request *request)
        if (intel_ring_stopped(ring))
                return;
 
-       if (request->ctx != ring->default_context) {
-               if (!request->ctx->engine[ring->id].dirty) {
-                       intel_lr_context_pin(request);
-                       request->ctx->engine[ring->id].dirty = true;
-               }
-       }
-
        if (dev_priv->guc.execbuf_client)
                i915_guc_submit(dev_priv->guc.execbuf_client, request);
        else
@@ -967,6 +963,12 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
        spin_unlock_irq(&ring->execlist_lock);
 
        list_for_each_entry_safe(req, tmp, &retired_list, execlist_link) {
+               struct intel_context *ctx = req->ctx;
+               struct drm_i915_gem_object *ctx_obj =
+                               ctx->engine[ring->id].state;
+
+               if (ctx_obj && (ctx != ring->default_context))
+                       intel_lr_context_unpin(req);
                list_del(&req->execlist_link);
                i915_gem_request_unreference(req);
        }
@@ -1061,39 +1063,21 @@ reset_pin_count:
        return ret;
 }
 
-static void __intel_lr_context_unpin(struct intel_engine_cs *ring,
-               struct intel_context *ctx)
+void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
 {
-       struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
-       struct intel_ringbuffer *ringbuf = ctx->engine[ring->id].ringbuf;
+       struct intel_engine_cs *ring = rq->ring;
+       struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state;
+       struct intel_ringbuffer *ringbuf = rq->ringbuf;
+
        if (ctx_obj) {
                WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-               if (--ctx->engine[ring->id].pin_count == 0) {
+               if (--rq->ctx->engine[ring->id].pin_count == 0) {
                        intel_unpin_ringbuffer_obj(ringbuf);
                        i915_gem_object_ggtt_unpin(ctx_obj);
                }
        }
 }
 
-void intel_lr_context_unpin(struct drm_i915_gem_request *rq)
-{
-       __intel_lr_context_unpin(rq->ring, rq->ctx);
-}
-
-void intel_lr_context_complete_check(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *ring = req->ring;
-
-       if (ring->last_context && ring->last_context != req->ctx &&
-                       ring->last_context->engine[ring->id].dirty) {
-               __intel_lr_context_unpin(
-                               ring,
-                               ring->last_context);
-               ring->last_context->engine[ring->id].dirty = false;
-       }
-       ring->last_context = req->ctx;
-}
-
 static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req)
 {
        int ret, i;
@@ -2367,76 +2351,6 @@ populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_o
        return 0;
 }
 
-/**
- * intel_lr_context_clean_ring() - clean the ring specific parts of an LRC
- * @ctx: the LR context being freed.
- * @ring: the engine being cleaned
- * @ctx_obj: the hw context being unreferenced
- * @ringbuf: the ringbuf being freed
- *
- * Take care of cleaning up the per-engine backing
- * objects and the logical ringbuffer.
- */
-static void
-intel_lr_context_clean_ring(struct intel_context *ctx,
-                           struct intel_engine_cs *ring,
-                           struct drm_i915_gem_object *ctx_obj,
-                           struct intel_ringbuffer *ringbuf)
-{
-       int ret;
-
-       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
-
-       if (ctx == ring->default_context) {
-               intel_unpin_ringbuffer_obj(ringbuf);
-               i915_gem_object_ggtt_unpin(ctx_obj);
-       }
-
-       if (ctx->engine[ring->id].dirty) {
-               struct drm_i915_gem_request *req = NULL;
-
-               /**
-                * If there is already a request pending on
-                * this ring, wait for that to complete,
-                * otherwise create a switch to idle request
-                */
-               if (list_empty(&ring->request_list)) {
-                       int ret;
-
-                       ret = i915_gem_request_alloc(
-                                       ring,
-                                       ring->default_context,
-                                       &req);
-                       if (!ret)
-                               i915_add_request(req);
-                       else
-                               DRM_DEBUG("Failed to ensure context saved");
-               } else {
-                       req = list_first_entry(
-                                       &ring->request_list,
-                                       typeof(*req), list);
-               }
-               if (req) {
-                       ret = i915_wait_request(req);
-                       if (ret != 0) {
-                               /**
-                                * If we get here, there's probably been a ring
-                                * reset, so we just clean up the dirty flag.&
-                                * pin count.
-                                */
-                               ctx->engine[ring->id].dirty = false;
-                               __intel_lr_context_unpin(
-                                       ring,
-                                       ctx);
-                       }
-               }
-       }
-
-       WARN_ON(ctx->engine[ring->id].pin_count);
-       intel_ringbuffer_free(ringbuf);
-       drm_gem_object_unreference(&ctx_obj->base);
-}
-
 /**
  * intel_lr_context_free() - free the LRC specific bits of a context
  * @ctx: the LR context to free.
@@ -2449,7 +2363,7 @@ void intel_lr_context_free(struct intel_context *ctx)
 {
        int i;
 
-       for (i = 0; i < I915_NUM_RINGS; ++i) {
+       for (i = 0; i < I915_NUM_RINGS; i++) {
                struct drm_i915_gem_object *ctx_obj = ctx->engine[i].state;
 
                if (ctx_obj) {
@@ -2457,10 +2371,13 @@ void intel_lr_context_free(struct intel_context *ctx)
                                        ctx->engine[i].ringbuf;
                        struct intel_engine_cs *ring = ringbuf->ring;
 
-                       intel_lr_context_clean_ring(ctx,
-                                                   ring,
-                                                   ctx_obj,
-                                                   ringbuf);
+                       if (ctx == ring->default_context) {
+                               intel_unpin_ringbuffer_obj(ringbuf);
+                               i915_gem_object_ggtt_unpin(ctx_obj);
+                       }
+                       WARN_ON(ctx->engine[ring->id].pin_count);
+                       intel_ringbuffer_free(ringbuf);
+                       drm_gem_object_unreference(&ctx_obj->base);
                }
        }
 }
@@ -2622,12 +2539,5 @@ void intel_lr_context_reset(struct drm_device *dev,
 
                ringbuf->head = 0;
                ringbuf->tail = 0;
-
-               if (ctx->engine[ring->id].dirty) {
-                       __intel_lr_context_unpin(
-                                       ring,
-                                       ctx);
-                       ctx->engine[ring->id].dirty = false;
-               }
        }
 }
index fd1b6b43bacc75007c1bd7f94794c479e5e69973..0b821b91723a61a4766e761ef4975989e9eea61c 100644 (file)
@@ -91,7 +91,6 @@ void intel_lr_context_reset(struct drm_device *dev,
                        struct intel_context *ctx);
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
                                     struct intel_engine_cs *ring);
-void intel_lr_context_complete_check(struct drm_i915_gem_request *req);
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);