might_sleep();
GEM_BUG_ON(!test_bit(I915_RESET_BACKOFF, &error->flags));
- lock_map_acquire(&i915->gt.reset_lockmap);
+ mutex_lock(&error->wedge_mutex);
/* Clear any previous failed attempts at recovery. Time to try again. */
if (!__i915_gem_unset_wedged(i915))
finish:
reset_finish(i915);
unlock:
- lock_map_release(&i915->gt.reset_lockmap);
+ mutex_unlock(&error->wedge_mutex);
return;
taint:
/* Flush everyone using a resource about to be clobbered */
synchronize_srcu_expedited(&error->reset_backoff_srcu);
- mutex_lock(&error->wedge_mutex);
i915_reset(i915, engine_mask, reason);
- mutex_unlock(&error->wedge_mutex);
intel_finish_reset(i915);
}
ktime_t last_init_time;
struct i915_vma *scratch;
-
- /*
- * We must never wait on the GPU while holding a lock as we
- * may need to perform a GPU reset. So while we don't need to
- * serialise wait/reset with an explicit lock, we do want
- * lockdep to detect potential dependency cycles.
- */
- struct lockdep_map reset_lockmap;
} gt;
struct {
int i915_gem_init_early(struct drm_i915_private *dev_priv)
{
- static struct lock_class_key reset_key;
int err;
intel_gt_pm_init(dev_priv);
INIT_LIST_HEAD(&dev_priv->gt.active_rings);
INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
spin_lock_init(&dev_priv->gt.closed_lock);
- lockdep_init_map(&dev_priv->gt.reset_lockmap,
- "i915.reset", &reset_key, 0);
i915_gem_init__mm(dev_priv);
i915_gem_init__pm(dev_priv);
return -ETIME;
trace_i915_request_wait_begin(rq, flags);
- lock_map_acquire(&rq->i915->gt.reset_lockmap);
+
+ /*
+ * We must never wait on the GPU while holding a lock as we
+ * may need to perform a GPU reset. So while we don't need to
+ * serialise wait/reset with an explicit lock, we do want
+ * lockdep to detect potential dependency cycles.
+ */
+ mutex_acquire(&rq->i915->gpu_error.wedge_mutex.dep_map,
+ 0, 0, _THIS_IP_);
/*
* Optimistic spin before touching IRQs.
dma_fence_remove_callback(&rq->fence, &wait.cb);
out:
- lock_map_release(&rq->i915->gt.reset_lockmap);
+ mutex_release(&rq->i915->gpu_error.wedge_mutex.dep_map, 0, _THIS_IP_);
trace_i915_request_wait_end(rq);
return timeout;
}
struct drm_i915_private *mock_gem_device(void)
{
- static struct lock_class_key reset_key;
struct drm_i915_private *i915;
struct pci_dev *pdev;
int err;
INIT_LIST_HEAD(&i915->gt.active_rings);
INIT_LIST_HEAD(&i915->gt.closed_vma);
spin_lock_init(&i915->gt.closed_lock);
- lockdep_init_map(&i915->gt.reset_lockmap, "i915.reset", &reset_key, 0);
mutex_lock(&i915->drm.struct_mutex);