]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
drm/i915: Mark up the calling context for intel_wakeref_put()
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 20 Nov 2019 12:54:33 +0000 (12:54 +0000)
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
Mon, 25 Nov 2019 13:29:17 +0000 (15:29 +0200)
Previously, we assumed we could use mutex_trylock() within an atomic
context, falling back to a worker if contended. However, such trickery
is illegal inside interrupt context, and so we need to always use a
worker under such circumstances. As we normally are in process context,
we can typically use a plain mutex, and only defer to a work when we
know we are being called from an interrupt path.

Fixes: 51fbd8de87dc ("drm/i915/pmu: Atomically acquire the gt_pm wakeref")
References: a0855d24fc22d ("locking/mutex: Complain upon mutex API misuse in IRQ contexts")
References: https://bugs.freedesktop.org/show_bug.cgi?id=111626
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191120125433.3767149-1-chris@chris-wilson.co.uk
(cherry picked from commit 07779a76ee1f93f930cf697b22be73d16e14f50c)
Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_pm.h
drivers/gpu/drm/i915/gt/intel_gt_pm.c
drivers/gpu/drm/i915/gt/intel_gt_pm.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_reset.c
drivers/gpu/drm/i915/gt/selftest_engine_pm.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_pmu.c
drivers/gpu/drm/i915/intel_wakeref.c
drivers/gpu/drm/i915/intel_wakeref.h

index 3c0f490ff2c7625adb42fbd6a603b86d41f88749..7269c87c137267897a60e2a14c0ed1c392636eb3 100644 (file)
@@ -177,7 +177,8 @@ static int __engine_park(struct intel_wakeref *wf)
 
        engine->execlists.no_priolist = false;
 
-       intel_gt_pm_put(engine->gt);
+       /* While gt calls i915_vma_parked(), we have to break the lock cycle */
+       intel_gt_pm_put_async(engine->gt);
        return 0;
 }
 
index 739c50fefcefd031835c0caf27e0dfcad25c2b13..24e20344dc22919c530d499bcda67811c1b1df3e 100644 (file)
@@ -31,6 +31,16 @@ static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
        intel_wakeref_put(&engine->wakeref);
 }
 
+static inline void intel_engine_pm_put_async(struct intel_engine_cs *engine)
+{
+       intel_wakeref_put_async(&engine->wakeref);
+}
+
+static inline void intel_engine_pm_flush(struct intel_engine_cs *engine)
+{
+       intel_wakeref_unlock_wait(&engine->wakeref);
+}
+
 void intel_engine_init__pm(struct intel_engine_cs *engine);
 
 #endif /* INTEL_ENGINE_PM_H */
index 7917cc348375da625ef1aaaaa2027b0e71f3dd1f..a459a42ad5c22a3e8e5b9491fd5c8df8fec07045 100644 (file)
@@ -105,7 +105,6 @@ static int __gt_park(struct intel_wakeref *wf)
 static const struct intel_wakeref_ops wf_ops = {
        .get = __gt_unpark,
        .put = __gt_park,
-       .flags = INTEL_WAKEREF_PUT_ASYNC,
 };
 
 void intel_gt_pm_init_early(struct intel_gt *gt)
index b3e17399be9baf9e1ff2ec088e3974dc00c3a63b..990efc27a4e4b02a5cfd2c1e4f4f073661f70bb6 100644 (file)
@@ -32,6 +32,11 @@ static inline void intel_gt_pm_put(struct intel_gt *gt)
        intel_wakeref_put(&gt->wakeref);
 }
 
+static inline void intel_gt_pm_put_async(struct intel_gt *gt)
+{
+       intel_wakeref_put_async(&gt->wakeref);
+}
+
 static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
 {
        return intel_wakeref_wait_for_idle(&gt->wakeref);
index 0ac3b26674ad9e0b3a817f86a9795ee22a510d52..37fe72aa8e27ec7977cc41cd92907c88e65b8a49 100644 (file)
@@ -1117,7 +1117,7 @@ __execlists_schedule_out(struct i915_request *rq,
 
        intel_engine_context_out(engine);
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
-       intel_gt_pm_put(engine->gt);
+       intel_gt_pm_put_async(engine->gt);
 
        /*
         * If this is part of a virtual engine, its next request may
index f03e000051c1537ddc353e4919194c6bff686d79..c97423a766420cf23311e86d3ccdc848d4d41c3a 100644 (file)
@@ -1114,7 +1114,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
 out:
        intel_engine_cancel_stop_cs(engine);
        reset_finish_engine(engine);
-       intel_engine_pm_put(engine);
+       intel_engine_pm_put_async(engine);
        return ret;
 }
 
index 20b9c83f43adb3687677e30fa968b9c91b9d284a..cbf6b073527248f8e4b98d3485db60fc80b60487 100644 (file)
@@ -51,11 +51,12 @@ static int live_engine_pm(void *arg)
                                pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
                                       engine->name, p->name);
                        else
-                               intel_engine_pm_put(engine);
-                       intel_engine_pm_put(engine);
+                               intel_engine_pm_put_async(engine);
+                       intel_engine_pm_put_async(engine);
                        p->critical_section_end();
 
-                       /* engine wakeref is sync (instant) */
+                       intel_engine_pm_flush(engine);
+
                        if (intel_engine_pm_is_awake(engine)) {
                                pr_err("%s is still awake after flushing pm\n",
                                       engine->name);
index 5448f37c81024a8210ed25809ca9f1f395033ae4..dca15ace88f6f8e2828e01b2b5c73bd810bb571f 100644 (file)
@@ -672,12 +672,13 @@ void i915_active_acquire_barrier(struct i915_active *ref)
         * populated by i915_request_add_active_barriers() to point to the
         * request that will eventually release them.
         */
-       spin_lock_irqsave_nested(&ref->tree_lock, flags, SINGLE_DEPTH_NESTING);
        llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
                struct active_node *node = barrier_from_ll(pos);
                struct intel_engine_cs *engine = barrier_to_engine(node);
                struct rb_node **p, *parent;
 
+               spin_lock_irqsave_nested(&ref->tree_lock, flags,
+                                        SINGLE_DEPTH_NESTING);
                parent = NULL;
                p = &ref->tree.rb_node;
                while (*p) {
@@ -693,12 +694,12 @@ void i915_active_acquire_barrier(struct i915_active *ref)
                }
                rb_link_node(&node->node, parent, p);
                rb_insert_color(&node->node, &ref->tree);
+               spin_unlock_irqrestore(&ref->tree_lock, flags);
 
                GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
                llist_add(barrier_to_ll(node), &engine->barrier_tasks);
                intel_engine_pm_put(engine);
        }
-       spin_unlock_irqrestore(&ref->tree_lock, flags);
 }
 
 void i915_request_add_active_barriers(struct i915_request *rq)
index 0d40dccd1409452850a6591bb5b1aef30438f71c..2814218c5ba18773f053d05e2733f5e67b38de17 100644 (file)
@@ -190,7 +190,7 @@ static u64 get_rc6(struct intel_gt *gt)
        val = 0;
        if (intel_gt_pm_get_if_awake(gt)) {
                val = __get_rc6(gt);
-               intel_gt_pm_put(gt);
+               intel_gt_pm_put_async(gt);
        }
 
        spin_lock_irqsave(&pmu->lock, flags);
@@ -343,7 +343,7 @@ engines_sample(struct intel_gt *gt, unsigned int period_ns)
 
 skip:
                spin_unlock_irqrestore(&engine->uncore->lock, flags);
-               intel_engine_pm_put(engine);
+               intel_engine_pm_put_async(engine);
        }
 }
 
@@ -368,7 +368,7 @@ frequency_sample(struct intel_gt *gt, unsigned int period_ns)
                if (intel_gt_pm_get_if_awake(gt)) {
                        val = intel_uncore_read_notrace(uncore, GEN6_RPSTAT1);
                        val = intel_get_cagf(rps, val);
-                       intel_gt_pm_put(gt);
+                       intel_gt_pm_put_async(gt);
                }
 
                add_sample_mult(&pmu->sample[__I915_SAMPLE_FREQ_ACT],
index ad26d7f4ca3df1b004b24540d8ca161e3b463256..59aa1b6f18277dee735991d2bf35d599af855544 100644 (file)
@@ -54,7 +54,8 @@ int __intel_wakeref_get_first(struct intel_wakeref *wf)
 
 static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
 {
-       if (!atomic_dec_and_test(&wf->count))
+       INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
+       if (unlikely(!atomic_dec_and_test(&wf->count)))
                goto unlock;
 
        /* ops->put() must reschedule its own release on error/deferral */
@@ -67,13 +68,12 @@ unlock:
        mutex_unlock(&wf->mutex);
 }
 
-void __intel_wakeref_put_last(struct intel_wakeref *wf)
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags)
 {
        INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
 
        /* Assume we are not in process context and so cannot sleep. */
-       if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
-           !mutex_trylock(&wf->mutex)) {
+       if (flags & INTEL_WAKEREF_PUT_ASYNC || !mutex_trylock(&wf->mutex)) {
                schedule_work(&wf->work);
                return;
        }
index affe4de3746b56c496332b4963b1ef956191b5e6..da6e8fd506e66f4057d376c1dc82412dd5323147 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/atomic.h>
 #include <linux/bits.h>
+#include <linux/lockdep.h>
 #include <linux/mutex.h>
 #include <linux/refcount.h>
 #include <linux/stackdepot.h>
@@ -29,9 +30,6 @@ typedef depot_stack_handle_t intel_wakeref_t;
 struct intel_wakeref_ops {
        int (*get)(struct intel_wakeref *wf);
        int (*put)(struct intel_wakeref *wf);
-
-       unsigned long flags;
-#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
 };
 
 struct intel_wakeref {
@@ -57,7 +55,7 @@ void __intel_wakeref_init(struct intel_wakeref *wf,
 } while (0)
 
 int __intel_wakeref_get_first(struct intel_wakeref *wf);
-void __intel_wakeref_put_last(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf, unsigned long flags);
 
 /**
  * intel_wakeref_get: Acquire the wakeref
@@ -100,10 +98,9 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
 }
 
 /**
- * intel_wakeref_put: Release the wakeref
- * @i915: the drm_i915_private device
+ * intel_wakeref_put_flags: Release the wakeref
  * @wf: the wakeref
- * @fn: callback for releasing the wakeref, called only on final release.
+ * @flags: control flags
  *
  * Release our hold on the wakeref. When there are no more users,
  * the runtime pm wakeref will be released after the @fn callback is called
@@ -116,11 +113,25 @@ intel_wakeref_get_if_active(struct intel_wakeref *wf)
  * code otherwise.
  */
 static inline void
-intel_wakeref_put(struct intel_wakeref *wf)
+__intel_wakeref_put(struct intel_wakeref *wf, unsigned long flags)
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
 {
        INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
        if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
-               __intel_wakeref_put_last(wf);
+               __intel_wakeref_put_last(wf, flags);
+}
+
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
+{
+       might_sleep();
+       __intel_wakeref_put(wf, 0);
+}
+
+static inline void
+intel_wakeref_put_async(struct intel_wakeref *wf)
+{
+       __intel_wakeref_put(wf, INTEL_WAKEREF_PUT_ASYNC);
 }
 
 /**
@@ -185,6 +196,7 @@ intel_wakeref_is_active(const struct intel_wakeref *wf)
 static inline void
 __intel_wakeref_defer_park(struct intel_wakeref *wf)
 {
+       lockdep_assert_held(&wf->mutex);
        INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count));
        atomic_set_release(&wf->count, 1);
 }