timeout = NULL;
}
- ret = __i915_wait_request(to_request(fence),
- interruptible, timeout,
- NO_WAITBOOST);
+ ret = i915_wait_request(to_request(fence),
+ interruptible, timeout,
+ NO_WAITBOOST);
if (ret == -ETIME)
return 0;
list_add_tail(&req->client_list, &file_priv->mm.request_list);
spin_unlock(&file_priv->mm.lock);
- req->pid = get_pid(task_pid(current));
-
return 0;
}
list_del(&request->client_list);
request->file_priv = NULL;
spin_unlock(&file_priv->mm.lock);
-
- put_pid(request->pid);
- request->pid = NULL;
}
void i915_gem_retire_noop(struct i915_gem_active *active,
struct i915_gem_active *active, *next;
trace_i915_gem_request_retire(request);
- list_del_init(&request->link);
+ list_del(&request->link);
/* We know the GPU must have read the request to have
* sent us the seqno + interrupt, so use the position
* Note this requires that we are always called in request
* completion order.
*/
+ list_del(&request->ring_link);
request->ring->last_retired_head = request->postfix;
/* Walk through the active list, calling retire on each. This allows
prefetchw(next);
INIT_LIST_HEAD(&active->link);
- active->request = NULL;
+ RCU_INIT_POINTER(active->request, NULL);
active->retire(active, request);
}
struct drm_i915_gem_request *tmp;
lockdep_assert_held(&req->i915->drm.struct_mutex);
-
- if (list_empty(&req->link))
- return;
+ GEM_BUG_ON(list_empty(&req->link));
do {
tmp = list_first_entry(&engine->request_list,
/* Carefully retire all requests without writing to the rings */
for_each_engine(engine, dev_priv) {
- ret = intel_engine_idle(engine);
+ ret = intel_engine_idle(engine, true);
if (ret)
return ret;
}
if (req && i915_gem_request_completed(req))
i915_gem_request_retire(req);
- req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
+ /* Beware: Dragons be flying overhead.
+ *
+ * We use RCU to look up requests in flight. The lookups may
+ * race with the request being allocated from the slab freelist.
+ * That is the request we are writing to here, may be in the process
+ * of being read by __i915_gem_active_get_rcu(). As such,
+ * we have to be very careful when overwriting the contents. During
+ * the RCU lookup, we change chase the request->engine pointer,
+ * read the request->fence.seqno and increment the reference count.
+ *
+ * The reference count is incremented atomically. If it is zero,
+ * the lookup knows the request is unallocated and complete. Otherwise,
+ * it is either still in use, or has been reallocated and reset
+ * with fence_init(). This increment is safe for release as we check
+ * that the request we have a reference to and matches the active
+ * request.
+ *
+ * Before we increment the refcount, we chase the request->engine
+ * pointer. We must not call kmem_cache_zalloc() or else we set
+ * that pointer to NULL and cause a crash during the lookup. If
+ * we see the request is completed (based on the value of the
+ * old engine and seqno), the lookup is complete and reports NULL.
+ * If we decide the request is not completed (new engine or seqno),
+ * then we grab a reference and double check that it is still the
+ * active request - which it won't be and restart the lookup.
+ *
+ * Do not use kmem_cache_zalloc() here!
+ */
+ req = kmem_cache_alloc(dev_priv->requests, GFP_KERNEL);
if (!req)
return ERR_PTR(-ENOMEM);
req->engine = engine;
req->ctx = i915_gem_context_get(ctx);
+ /* No zalloc, must clear what we need by hand */
+ req->previous_context = NULL;
+ req->file_priv = NULL;
+ req->batch = NULL;
+ req->elsp_submitted = 0;
+
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
if (ret)
goto err_ctx;
+ /* Record the position of the start of the request so that
+ * should we detect the updated seqno part-way through the
+ * GPU processing the request, we never over-estimate the
+ * position of the head.
+ */
+ req->head = req->ring->tail;
+
return req;
err_ctx:
* request is not being tracked for completion but the work itself is
* going to happen on the hardware. This would be a Bad Thing(tm).
*/
-void __i915_add_request(struct drm_i915_gem_request *request,
- struct drm_i915_gem_object *obj,
- bool flush_caches)
+void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
{
- struct intel_engine_cs *engine;
- struct intel_ring *ring;
+ struct intel_engine_cs *engine = request->engine;
+ struct intel_ring *ring = request->ring;
u32 request_start;
u32 reserved_tail;
int ret;
- if (WARN_ON(!request))
- return;
-
- engine = request->engine;
- ring = request->ring;
-
/*
* To ensure that this call will not fail, space for its emissions
* should already have been reserved in the ring buffer. Let the ring
trace_i915_gem_request_add(request);
- request->head = request_start;
-
- /* Whilst this request exists, batch_obj will be on the
- * active_list, and so will hold the active reference. Only when this
- * request is retired will the the batch_obj be moved onto the
- * inactive_list and lose its active reference. Hence we do not need
- * to explicitly hold another reference here.
- */
- request->batch_obj = obj;
-
/* Seal the request and mark it as pending execution. Note that
* we may inspect this state, without holding any locks, during
* hangcheck. Hence we apply the barrier to ensure that we do not
*/
request->emitted_jiffies = jiffies;
request->previous_seqno = engine->last_submitted_seqno;
- smp_store_mb(engine->last_submitted_seqno, request->fence.seqno);
+ engine->last_submitted_seqno = request->fence.seqno;
+ i915_gem_active_set(&engine->last_request, request);
list_add_tail(&request->link, &engine->request_list);
+ list_add_tail(&request->ring_link, &ring->request_list);
- /* Record the position of the start of the request so that
+ /* Record the position of the start of the breadcrumb so that
* should we detect the updated seqno part-way through the
* GPU processing the request, we never over-estimate the
- * position of the head.
+ * position of the ring's HEAD.
*/
request->postfix = ring->tail;
}
/**
- * __i915_wait_request - wait until execution of request has finished
+ * i915_wait_request - wait until execution of request has finished
* @req: duh!
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
* Returns 0 if the request was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
-int __i915_wait_request(struct drm_i915_gem_request *req,
- bool interruptible,
- s64 *timeout,
- struct intel_rps_client *rps)
+int i915_wait_request(struct drm_i915_gem_request *req,
+ bool interruptible,
+ s64 *timeout,
+ struct intel_rps_client *rps)
{
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
DEFINE_WAIT(reset);
if (IS_RPS_CLIENT(rps) && INTEL_GEN(req->i915) >= 6)
gen6_rps_boost(req->i915, rps, req->emitted_jiffies);
- /* Optimistic spin for the next ~jiffie before touching IRQs */
+ /* Optimistic short spin before touching IRQs */
if (i915_spin_request(req, state, 5))
goto complete;
return ret;
}
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int i915_wait_request(struct drm_i915_gem_request *req)
+static bool engine_retire_requests(struct intel_engine_cs *engine)
{
- int ret;
+ struct drm_i915_gem_request *request, *next;
- lockdep_assert_held(&req->i915->drm.struct_mutex);
- GEM_BUG_ON(list_empty(&req->link));
+ list_for_each_entry_safe(request, next, &engine->request_list, link) {
+ if (!i915_gem_request_completed(request))
+ return false;
- ret = __i915_wait_request(req,
- req->i915->mm.interruptible,
- NULL,
- NULL);
- if (ret)
- return ret;
+ i915_gem_request_retire(request);
+ }
- /* If the GPU hung, we want to keep the requests to find the guilty. */
- if (!i915_reset_in_progress(&req->i915->gpu_error))
- i915_gem_request_retire_upto(req);
+ return true;
+}
- return 0;
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
+{
+ struct intel_engine_cs *engine;
+
+ lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+ if (dev_priv->gt.active_engines == 0)
+ return;
+
+ GEM_BUG_ON(!dev_priv->gt.awake);
+
+ for_each_engine_masked(engine, dev_priv, dev_priv->gt.active_engines)
+ if (engine_retire_requests(engine))
+ dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
+
+ if (dev_priv->gt.active_engines == 0)
+ queue_delayed_work(dev_priv->wq,
+ &dev_priv->gt.idle_work,
+ msecs_to_jiffies(100));
}