]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
drm/i915/gt: Schedule request retirement when timeline idles
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 25 Nov 2019 10:58:58 +0000 (10:58 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 25 Nov 2019 13:17:18 +0000 (13:17 +0000)
The major drawback of commit 7e34f4e4aad3 ("drm/i915/gen8+: Add RC6 CTX
corruption WA") is that it disables RC6 while Skylake (and friends) is
active, and we do not consider the GPU idle until all outstanding
requests have been retired and the engine switched over to the kernel
context. If userspace is idle, this task falls onto our background idle
worker, which only runs roughly once a second, meaning that userspace has
to have been idle for a couple of seconds before we enable RC6 again.
Naturally, this causes us to consume considerably more energy than
before as powersaving is effectively disabled while a display server
(here's looking at you Xorg) is running.

As execlists will get a completion event as each context is completed,
we can use this interrupt to queue a retire worker bound to this engine
to cleanup idle timelines. We will then immediately notice the idle
engine (without userspace intervention or the aid of the background
retire worker) and start parking the GPU. Thus during light workloads,
we will do much more work to idle the GPU faster...  Hopefully with
commensurate power saving!

v2: Watch context completions and only look at those local to the engine
when retiring to reduce the amount of excess work we perform.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=112315
References: 7e34f4e4aad3 ("drm/i915/gen8+: Add RC6 CTX corruption WA")
References: 2248a28384fe ("drm/i915/gen8+: Add RC6 CTX corruption WA")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20191125105858.1718307-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_gt_requests.c
drivers/gpu/drm/i915/gt/intel_gt_requests.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_timeline.c
drivers/gpu/drm/i915/gt/intel_timeline_types.h

index b9613d044393fac2468fb75d31d976842461eea1..8f6e353caa667ee2fee4554db4e6a232e071f9c1 100644 (file)
 
 #include "i915_drv.h"
 
-#include "gt/intel_gt.h"
-
+#include "intel_context.h"
 #include "intel_engine.h"
 #include "intel_engine_pm.h"
 #include "intel_engine_pool.h"
 #include "intel_engine_user.h"
-#include "intel_context.h"
+#include "intel_gt.h"
+#include "intel_gt_requests.h"
 #include "intel_lrc.h"
 #include "intel_reset.h"
 #include "intel_ring.h"
@@ -617,6 +617,7 @@ static int intel_engine_setup_common(struct intel_engine_cs *engine)
        intel_engine_init_execlists(engine);
        intel_engine_init_cmd_parser(engine);
        intel_engine_init__pm(engine);
+       intel_engine_init_retire(engine);
 
        intel_engine_pool_init(&engine->pool);
 
@@ -839,6 +840,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
 
        cleanup_status_page(engine);
 
+       intel_engine_fini_retire(engine);
        intel_engine_pool_fini(&engine->pool);
        intel_engine_fini_breadcrumbs(engine);
        intel_engine_cleanup_cmd_parser(engine);
index 758f0e8ec6723dd8f0a89d987e9e0eb248f4e259..17f1f1441efc9dba383e51959aa936a4177b8bfa 100644 (file)
@@ -451,6 +451,14 @@ struct intel_engine_cs {
 
        struct intel_engine_execlists execlists;
 
+       /*
+        * Keep track of completed timelines on this engine for early
+        * retirement with the goal of quickly enabling powersaving as
+        * soon as the engine is idle.
+        */
+       struct intel_timeline *retire;
+       struct work_struct retire_work;
+
        /* status_notifier: list of callbacks for context-switch changes */
        struct atomic_notifier_head context_status_notifier;
 
index f02f781b84924777d079dbcf03c0b1ff39ff93ad..8cb5421e5f0ec30c69867272195157bca97df66e 100644 (file)
@@ -4,6 +4,8 @@
  * Copyright © 2019 Intel Corporation
  */
 
+#include <linux/workqueue.h>
+
 #include "i915_drv.h" /* for_each_engine() */
 #include "i915_request.h"
 #include "intel_gt.h"
@@ -29,6 +31,79 @@ static void flush_submission(struct intel_gt *gt)
                intel_engine_flush_submission(engine);
 }
 
+static void engine_retire(struct work_struct *work)
+{
+       struct intel_engine_cs *engine =
+               container_of(work, typeof(*engine), retire_work);
+       struct intel_timeline *tl = xchg(&engine->retire, NULL);
+
+       do {
+               struct intel_timeline *next = xchg(&tl->retire, NULL);
+
+               /*
+                * Our goal here is to retire _idle_ timelines as soon as
+                * possible (as they are idle, we do not expect userspace
+                * to be cleaning up anytime soon).
+                *
+                * If the timeline is currently locked, either it is being
+                * retired elsewhere or about to be!
+                */
+               if (mutex_trylock(&tl->mutex)) {
+                       retire_requests(tl);
+                       mutex_unlock(&tl->mutex);
+               }
+               intel_timeline_put(tl);
+
+               GEM_BUG_ON(!next);
+               tl = ptr_mask_bits(next, 1);
+       } while (tl);
+}
+
+static bool add_retire(struct intel_engine_cs *engine,
+                      struct intel_timeline *tl)
+{
+       struct intel_timeline *first;
+
+       /*
+        * We open-code a llist here to include the additional tag [BIT(0)]
+        * so that we know when the timeline is already on a
+        * retirement queue: either this engine or another.
+        *
+        * However, we rely on that a timeline can only be active on a single
+        * engine at any one time and that add_retire() is called before the
+        * engine releases the timeline and transferred to another to retire.
+        */
+
+       if (READ_ONCE(tl->retire)) /* already queued */
+               return false;
+
+       intel_timeline_get(tl);
+       first = READ_ONCE(engine->retire);
+       do
+               tl->retire = ptr_pack_bits(first, 1, 1);
+       while (!try_cmpxchg(&engine->retire, &first, tl));
+
+       return !first;
+}
+
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+                            struct intel_timeline *tl)
+{
+       if (add_retire(engine, tl))
+               schedule_work(&engine->retire_work);
+}
+
+void intel_engine_init_retire(struct intel_engine_cs *engine)
+{
+       INIT_WORK(&engine->retire_work, engine_retire);
+}
+
+void intel_engine_fini_retire(struct intel_engine_cs *engine)
+{
+       flush_work(&engine->retire_work);
+       GEM_BUG_ON(engine->retire);
+}
+
 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
 {
        struct intel_gt_timelines *timelines = &gt->timelines;
index fde546424c63a8f4fc0b2e564d6538ea9f21b4ce..dbac53baf1cb83c519502f9440a3d88c901e3376 100644 (file)
@@ -7,7 +7,9 @@
 #ifndef INTEL_GT_REQUESTS_H
 #define INTEL_GT_REQUESTS_H
 
+struct intel_engine_cs;
 struct intel_gt;
+struct intel_timeline;
 
 long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout);
 static inline void intel_gt_retire_requests(struct intel_gt *gt)
@@ -15,6 +17,11 @@ static inline void intel_gt_retire_requests(struct intel_gt *gt)
        intel_gt_retire_requests_timeout(gt, 0);
 }
 
+void intel_engine_init_retire(struct intel_engine_cs *engine);
+void intel_engine_add_retire(struct intel_engine_cs *engine,
+                            struct intel_timeline *tl);
+void intel_engine_fini_retire(struct intel_engine_cs *engine);
+
 int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
 
 void intel_gt_init_requests(struct intel_gt *gt);
index 6090357a00fa1f721eb7b1fe815e6015cc34844e..4cd0d46b5da65cbe65a272457d5103ad412a459d 100644 (file)
 #include "intel_engine_pm.h"
 #include "intel_gt.h"
 #include "intel_gt_pm.h"
+#include "intel_gt_requests.h"
 #include "intel_lrc_reg.h"
 #include "intel_mocs.h"
 #include "intel_reset.h"
@@ -1170,6 +1171,14 @@ __execlists_schedule_out(struct i915_request *rq,
         * refrain from doing non-trivial work here.
         */
 
+       /*
+        * If we have just completed this context, the engine may now be
+        * idle and we want to re-enter powersaving.
+        */
+       if (list_is_last(&rq->link, &ce->timeline->requests) &&
+           i915_request_completed(rq))
+               intel_engine_add_retire(engine, ce->timeline);
+
        intel_engine_context_out(engine);
        execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
        intel_gt_pm_put_async(engine->gt);
index b190a5d9ab02e8d2b76c109bdee377653cc3043e..c1d2419444f8edcb46f8cd6053783821812a8355 100644 (file)
@@ -277,6 +277,7 @@ void intel_timeline_fini(struct intel_timeline *timeline)
 {
        GEM_BUG_ON(atomic_read(&timeline->pin_count));
        GEM_BUG_ON(!list_empty(&timeline->requests));
+       GEM_BUG_ON(timeline->retire);
 
        if (timeline->hwsp_cacheline)
                cacheline_free(timeline->hwsp_cacheline);
index 5244615ed1cb4a13b5a81737bcd67e75e7472b09..aaf15cbe1ce18526c6f8794957785b758d33a149 100644 (file)
@@ -66,6 +66,9 @@ struct intel_timeline {
         */
        struct i915_active_fence last_request;
 
+       /** A chain of completed timelines ready for early retirement. */
+       struct intel_timeline *retire;
+
        /**
         * We track the most recent seqno that we wait on in every context so
         * that we only have to emit a new await and dependency on a more