]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blobdiff - drivers/gpu/drm/i915/i915_gem_request.c
drm/i915: Tidy reporting busy status during i915_gem_retire_requests()
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / i915_gem_request.c
index 9e9aa6b725f75940a3fd8079657c90874ab70e78..5e55270bb2dee0aee1638554d6709e3e1abb4e9d 100644 (file)
  *
  */
 
+#include <linux/prefetch.h>
+
 #include "i915_drv.h"
 
+static const char *i915_fence_get_driver_name(struct fence *fence)
+{
+       return "i915";
+}
+
+static const char *i915_fence_get_timeline_name(struct fence *fence)
+{
+       /* Timelines are bound by eviction to a VM. However, since
+        * we only have a global seqno at the moment, we only have
+        * a single timeline. Note that each timeline will have
+        * multiple execution contexts (fence contexts) as we allow
+        * engines within a single timeline to execute in parallel.
+        */
+       return "global";
+}
+
+static bool i915_fence_signaled(struct fence *fence)
+{
+       return i915_gem_request_completed(to_request(fence));
+}
+
+static bool i915_fence_enable_signaling(struct fence *fence)
+{
+       if (i915_fence_signaled(fence))
+               return false;
+
+       intel_engine_enable_signaling(to_request(fence));
+       return true;
+}
+
+static signed long i915_fence_wait(struct fence *fence,
+                                  bool interruptible,
+                                  signed long timeout_jiffies)
+{
+       s64 timeout_ns, *timeout;
+       int ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT) {
+               timeout_ns = jiffies_to_nsecs(timeout_jiffies);
+               timeout = &timeout_ns;
+       } else {
+               timeout = NULL;
+       }
+
+       ret = i915_wait_request(to_request(fence),
+                               interruptible, timeout,
+                               NO_WAITBOOST);
+       if (ret == -ETIME)
+               return 0;
+
+       if (ret < 0)
+               return ret;
+
+       if (timeout_jiffies != MAX_SCHEDULE_TIMEOUT)
+               timeout_jiffies = nsecs_to_jiffies(timeout_ns);
+
+       return timeout_jiffies;
+}
+
+static void i915_fence_value_str(struct fence *fence, char *str, int size)
+{
+       snprintf(str, size, "%u", fence->seqno);
+}
+
+static void i915_fence_timeline_value_str(struct fence *fence, char *str,
+                                         int size)
+{
+       snprintf(str, size, "%u",
+                intel_engine_get_seqno(to_request(fence)->engine));
+}
+
+static void i915_fence_release(struct fence *fence)
+{
+       struct drm_i915_gem_request *req = to_request(fence);
+
+       kmem_cache_free(req->i915->requests, req);
+}
+
+const struct fence_ops i915_fence_ops = {
+       .get_driver_name = i915_fence_get_driver_name,
+       .get_timeline_name = i915_fence_get_timeline_name,
+       .enable_signaling = i915_fence_enable_signaling,
+       .signaled = i915_fence_signaled,
+       .wait = i915_fence_wait,
+       .release = i915_fence_release,
+       .fence_value_str = i915_fence_value_str,
+       .timeline_value_str = i915_fence_timeline_value_str,
+};
+
 int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file)
 {
@@ -46,8 +137,6 @@ int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
        list_add_tail(&req->client_list, &file_priv->mm.request_list);
        spin_unlock(&file_priv->mm.lock);
 
-       req->pid = get_pid(task_pid(current));
-
        return 0;
 }
 
@@ -63,15 +152,20 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
        list_del(&request->client_list);
        request->file_priv = NULL;
        spin_unlock(&file_priv->mm.lock);
+}
 
-       put_pid(request->pid);
-       request->pid = NULL;
+void i915_gem_retire_noop(struct i915_gem_active *active,
+                         struct drm_i915_gem_request *request)
+{
+       /* Space left intentionally blank */
 }
 
 static void i915_gem_request_retire(struct drm_i915_gem_request *request)
 {
+       struct i915_gem_active *active, *next;
+
        trace_i915_gem_request_retire(request);
-       list_del_init(&request->list);
+       list_del(&request->link);
 
        /* We know the GPU must have read the request to have
         * sent us the seqno + interrupt, so use the position
@@ -81,7 +175,35 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
         * Note this requires that we are always called in request
         * completion order.
         */
-       request->ringbuf->last_retired_head = request->postfix;
+       list_del(&request->ring_link);
+       request->ring->last_retired_head = request->postfix;
+
+       /* Walk through the active list, calling retire on each. This allows
+        * objects to track their GPU activity and mark themselves as idle
+        * when their *last* active request is completed (updating state
+        * tracking lists for eviction, active references for GEM, etc).
+        *
+        * As the ->retire() may free the node, we decouple it first and
+        * pass along the auxiliary information (to avoid dereferencing
+        * the node after the callback).
+        */
+       list_for_each_entry_safe(active, next, &request->active_list, link) {
+               /* In microbenchmarks or focusing upon time inside the kernel,
+                * we may spend an inordinate amount of time simply handling
+                * the retirement of requests and processing their callbacks.
+                * Of which, this loop itself is particularly hot due to the
+                * cache misses when jumping around the list of i915_gem_active.
+                * So we try to keep this loop as streamlined as possible and
+                * also prefetch the next i915_gem_active to try and hide
+                * the likely cache miss.
+                */
+               prefetchw(next);
+
+               INIT_LIST_HEAD(&active->link);
+               RCU_INIT_POINTER(active->request, NULL);
+
+               active->retire(active, request);
+       }
 
        i915_gem_request_remove_from_client(request);
 
@@ -91,8 +213,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
                                               request->engine);
        }
 
-       i915_gem_context_unreference(request->ctx);
-       i915_gem_request_unreference(request);
+       i915_gem_context_put(request->ctx);
+       i915_gem_request_put(request);
 }
 
 void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
@@ -101,18 +223,14 @@ void i915_gem_request_retire_upto(struct drm_i915_gem_request *req)
        struct drm_i915_gem_request *tmp;
 
        lockdep_assert_held(&req->i915->drm.struct_mutex);
-
-       if (list_empty(&req->list))
-               return;
+       GEM_BUG_ON(list_empty(&req->link));
 
        do {
                tmp = list_first_entry(&engine->request_list,
-                                      typeof(*tmp), list);
+                                      typeof(*tmp), link);
 
                i915_gem_request_retire(tmp);
        } while (tmp != req);
-
-       WARN_ON(i915_verify_lists(engine->dev));
 }
 
 static int i915_gem_check_wedge(unsigned int reset_counter, bool interruptible)
@@ -140,7 +258,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
        /* Carefully retire all requests without writing to the rings */
        for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine);
+               ret = intel_engine_idle(engine, true);
                if (ret)
                        return ret;
        }
@@ -155,7 +273,7 @@ static int i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
 
        /* Finally reset hw state */
        for_each_engine(engine, dev_priv)
-               intel_ring_init_seqno(engine, seqno);
+               intel_engine_init_seqno(engine, seqno);
 
        return 0;
 }
@@ -175,14 +293,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
        if (ret)
                return ret;
 
-       /* Carefully set the last_seqno value so that wrap
-        * detection still works
-        */
        dev_priv->next_seqno = seqno;
-       dev_priv->last_seqno = seqno - 1;
-       if (dev_priv->last_seqno == 0)
-               dev_priv->last_seqno--;
-
        return 0;
 }
 
@@ -199,46 +310,99 @@ static int i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
                dev_priv->next_seqno = 1;
        }
 
-       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
+       *seqno = dev_priv->next_seqno++;
        return 0;
 }
 
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-                        struct i915_gem_context *ctx,
-                        struct drm_i915_gem_request **req_out)
+/**
+ * i915_gem_request_alloc - allocate a request structure
+ *
+ * @engine: engine that we wish to issue the request on.
+ * @ctx: context that the request will be associated with.
+ *       This can be NULL if the request is not directly related to
+ *       any specific user context, in which case this function will
+ *       choose an appropriate context to use.
+ *
+ * Returns a pointer to the allocated request if successful,
+ * or an error code if not.
+ */
+struct drm_i915_gem_request *
+i915_gem_request_alloc(struct intel_engine_cs *engine,
+                      struct i915_gem_context *ctx)
 {
        struct drm_i915_private *dev_priv = engine->i915;
        unsigned int reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
+       u32 seqno;
        int ret;
 
-       if (!req_out)
-               return -EINVAL;
-
-       *req_out = NULL;
-
        /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
         * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
         * and restart.
         */
        ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
        if (ret)
-               return ret;
+               return ERR_PTR(ret);
 
-       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+       /* Move the oldest request to the slab-cache (if not in use!) */
+       req = list_first_entry_or_null(&engine->request_list,
+                                      typeof(*req), link);
+       if (req && i915_gem_request_completed(req))
+               i915_gem_request_retire(req);
+
+       /* Beware: Dragons be flying overhead.
+        *
+        * We use RCU to look up requests in flight. The lookups may
+        * race with the request being allocated from the slab freelist.
+        * That is the request we are writing to here, may be in the process
+        * of being read by __i915_gem_active_get_rcu(). As such,
+        * we have to be very careful when overwriting the contents. During
+        * the RCU lookup, we change chase the request->engine pointer,
+        * read the request->fence.seqno and increment the reference count.
+        *
+        * The reference count is incremented atomically. If it is zero,
+        * the lookup knows the request is unallocated and complete. Otherwise,
+        * it is either still in use, or has been reallocated and reset
+        * with fence_init(). This increment is safe for release as we check
+        * that the request we have a reference to and matches the active
+        * request.
+        *
+        * Before we increment the refcount, we chase the request->engine
+        * pointer. We must not call kmem_cache_zalloc() or else we set
+        * that pointer to NULL and cause a crash during the lookup. If
+        * we see the request is completed (based on the value of the
+        * old engine and seqno), the lookup is complete and reports NULL.
+        * If we decide the request is not completed (new engine or seqno),
+        * then we grab a reference and double check that it is still the
+        * active request - which it won't be and restart the lookup.
+        *
+        * Do not use kmem_cache_zalloc() here!
+        */
+       req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
        if (!req)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
 
-       ret = i915_gem_get_seqno(dev_priv, &req->seqno);
+       ret = i915_gem_get_seqno(dev_priv, &seqno);
        if (ret)
                goto err;
 
-       kref_init(&req->ref);
+       spin_lock_init(&req->lock);
+       fence_init(&req->fence,
+                  &i915_fence_ops,
+                  &req->lock,
+                  engine->fence_context,
+                  seqno);
+
+       INIT_LIST_HEAD(&req->active_list);
        req->i915 = dev_priv;
        req->engine = engine;
-       req->ctx = ctx;
-       i915_gem_context_reference(ctx);
+       req->ctx = i915_gem_context_get(ctx);
+
+       /* No zalloc, must clear what we need by hand */
+       req->previous_context = NULL;
+       req->file_priv = NULL;
+       req->batch = NULL;
+       req->elsp_submitted = 0;
 
        /*
         * Reserve space in the ring buffer for all the commands required to
@@ -256,39 +420,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
        if (ret)
                goto err_ctx;
 
-       *req_out = req;
-       return 0;
+       /* Record the position of the start of the request so that
+        * should we detect the updated seqno part-way through the
+        * GPU processing the request, we never over-estimate the
+        * position of the head.
+        */
+       req->head = req->ring->tail;
+
+       return req;
 
 err_ctx:
-       i915_gem_context_unreference(ctx);
+       i915_gem_context_put(ctx);
 err:
        kmem_cache_free(dev_priv->requests, req);
-       return ret;
-}
-
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct i915_gem_context *ctx)
-{
-       struct drm_i915_gem_request *req;
-       int err;
-
-       if (!ctx)
-               ctx = engine->i915->kernel_context;
-       err = __i915_gem_request_alloc(engine, ctx, &req);
-       return err ? ERR_PTR(err) : req;
+       return ERR_PTR(ret);
 }
 
 static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
@@ -317,28 +462,20 @@ static void i915_gem_mark_busy(const struct intel_engine_cs *engine)
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_add_request(struct drm_i915_gem_request *request,
-                       struct drm_i915_gem_object *obj,
-                       bool flush_caches)
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
 {
-       struct intel_engine_cs *engine;
-       struct intel_ringbuffer *ringbuf;
+       struct intel_engine_cs *engine = request->engine;
+       struct intel_ring *ring = request->ring;
        u32 request_start;
        u32 reserved_tail;
        int ret;
 
-       if (WARN_ON(!request))
-               return;
-
-       engine = request->engine;
-       ringbuf = request->ringbuf;
-
        /*
         * To ensure that this call will not fail, space for its emissions
         * should already have been reserved in the ring buffer. Let the ring
         * know that it is time to use that space up.
         */
-       request_start = intel_ring_get_tail(ringbuf);
+       request_start = ring->tail;
        reserved_tail = request->reserved_space;
        request->reserved_space = 0;
 
@@ -350,26 +487,14 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         * what.
         */
        if (flush_caches) {
-               if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(request);
-               else
-                       ret = intel_ring_flush_all_caches(request);
+               ret = engine->emit_flush(request, EMIT_FLUSH);
+
                /* Not allowed to fail! */
-               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
+               WARN(ret, "engine->emit_flush() failed: %d!\n", ret);
        }
 
        trace_i915_gem_request_add(request);
 
-       request->head = request_start;
-
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
-
        /* Seal the request and mark it as pending execution. Note that
         * we may inspect this state, without holding any locks, during
         * hangcheck. Hence we apply the barrier to ensure that we do not
@@ -377,35 +502,33 @@ void __i915_add_request(struct drm_i915_gem_request *request,
         */
        request->emitted_jiffies = jiffies;
        request->previous_seqno = engine->last_submitted_seqno;
-       smp_store_mb(engine->last_submitted_seqno, request->seqno);
-       list_add_tail(&request->list, &engine->request_list);
+       engine->last_submitted_seqno = request->fence.seqno;
+       i915_gem_active_set(&engine->last_request, request);
+       list_add_tail(&request->link, &engine->request_list);
+       list_add_tail(&request->ring_link, &ring->request_list);
 
-       /* Record the position of the start of the request so that
+       /* Record the position of the start of the breadcrumb so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
-        * position of the head.
+        * position of the ring's HEAD.
         */
-       request->postfix = intel_ring_get_tail(ringbuf);
+       request->postfix = ring->tail;
 
-       if (i915.enable_execlists) {
-               ret = engine->emit_request(request);
-       } else {
-               ret = engine->add_request(request);
-
-               request->tail = intel_ring_get_tail(ringbuf);
-       }
        /* Not allowed to fail! */
-       WARN(ret, "emit|add_request failed: %d!\n", ret);
+       ret = engine->emit_request(request);
+       WARN(ret, "(%s)->emit_request failed: %d!\n", engine->name, ret);
+
        /* Sanity check that the reserved size was large enough. */
-       ret = intel_ring_get_tail(ringbuf) - request_start;
+       ret = ring->tail - request_start;
        if (ret < 0)
-               ret += ringbuf->size;
+               ret += ring->size;
        WARN_ONCE(ret > reserved_tail,
                  "Not enough space reserved (%d bytes) "
                  "for adding the request (%d bytes)\n",
                  reserved_tail, ret);
 
        i915_gem_mark_busy(engine);
+       engine->submit_request(request);
 }
 
 static unsigned long local_clock_us(unsigned int *cpu)
@@ -473,7 +596,7 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
 }
 
 /**
- * __i915_wait_request - wait until execution of request has finished
+ * i915_wait_request - wait until execution of request has finished
  * @req: duh!
  * @interruptible: do an interruptible wait (normally yes)
  * @timeout: in - how long to wait (NULL forever); out - how much time remaining
@@ -489,10 +612,10 @@ bool __i915_spin_request(const struct drm_i915_gem_request *req,
  * Returns 0 if the request was found within the alloted time. Else returns the
  * errno with remaining time filled in timeout argument.
  */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+                     bool interruptible,
+                     s64 *timeout,
+                     struct intel_rps_client *rps)
 {
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(reset);
@@ -502,9 +625,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
 
        might_sleep();
 
-       if (list_empty(&req->list))
-               return 0;
-
        if (i915_gem_request_completed(req))
                return 0;
 
@@ -538,17 +658,17 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (INTEL_GEN(req->i915) >= 6)
+       if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
                gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
 
-       /* Optimistic spin for the next ~jiffie before touching IRQs */
+       /* Optimistic short spin before touching IRQs */
        if (i915_spin_request(req, state, 5))
                goto complete;
 
        set_current_state(state);
        add_wait_queue(&req->i915->gpu_error.wait_queue, &reset);
 
-       intel_wait_init(&wait, req->seqno);
+       intel_wait_init(&wait, req->fence.seqno);
        if (intel_engine_add_wait(req->engine, &wait))
                /* In order to check that we haven't missed the interrupt
                 * as we enabled it, we need to kick ourselves to do a
@@ -609,7 +729,8 @@ complete:
                        *timeout = 0;
        }
 
-       if (rps && req->seqno == req->engine->last_submitted_seqno) {
+       if (IS_RPS_USER(rps) &&
+           req->fence.seqno == req->engine->last_submitted_seqno) {
                /* The GPU is now idle and this client has stalled.
                 * Since no other client has submitted a request in the
                 * meantime, assume that this client is the only one
@@ -628,31 +749,37 @@ complete:
        return ret;
 }
 
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int i915_wait_request(struct drm_i915_gem_request *req)
+static bool engine_retire_requests(struct intel_engine_cs *engine)
 {
-       int ret;
+       struct drm_i915_gem_request *request, *next;
 
-       GEM_BUG_ON(!req);
-       lockdep_assert_held(&req->i915->drm.struct_mutex);
+       list_for_each_entry_safe(request, next, &engine->request_list, link) {
+               if (!i915_gem_request_completed(request))
+                       return false;
 
-       ret = __i915_wait_request(req, req->i915->mm.interruptible, NULL, NULL);
-       if (ret)
-               return ret;
-
-       /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (!i915_reset_in_progress(&req->i915->gpu_error))
-               i915_gem_request_retire_upto(req);
+               i915_gem_request_retire(request);
+       }
 
-       return 0;
+       return true;
 }
 
-void i915_gem_request_free(struct kref *req_ref)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_gem_request *req =
-               container_of(req_ref, typeof(*req), ref);
-       kmem_cache_free(req->i915->requests, req);
+       struct intel_engine_cs *engine;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
+
+       for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines)
+               if (engine_retire_requests(engine))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
+                                  msecs_to_jiffies(100));
 }