}
} while (i915_retire_requests(gt->i915) && result);
- GEM_BUG_ON(gt->awake);
+ if (intel_gt_pm_wait_for_idle(gt))
+ result = false;
+
return result;
}
mutex_unlock(&i915->drm.struct_mutex);
- /*
- * Assert that we successfully flushed all the work and
- * reset the GPU back to its idle, low power state.
- */
- GEM_BUG_ON(i915->gt.awake);
- flush_work(&i915->gem.idle_work);
-
cancel_delayed_work_sync(&i915->gt.hangcheck.work);
i915_gem_drain_freed_objects(i915);
{
GEM_TRACE("\n");
- WARN_ON(i915->gt.awake);
-
mutex_lock(&i915->drm.struct_mutex);
intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "mock_engine.c"
+#include "selftest_engine.c"
#include "selftest_engine_cs.c"
#endif
return 0;
}
-void intel_engine_pm_get(struct intel_engine_cs *engine)
-{
- intel_wakeref_get(&engine->i915->runtime_pm, &engine->wakeref, __engine_unpark);
-}
-
-void intel_engine_park(struct intel_engine_cs *engine)
-{
- /*
- * We are committed now to parking this engine, make sure there
- * will be no more interrupts arriving later and the engine
- * is truly idle.
- */
- if (wait_for(intel_engine_is_idle(engine), 10)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- dev_err(engine->i915->drm.dev,
- "%s is not idle before parking\n",
- engine->name);
- intel_engine_dump(engine, &p, NULL);
- }
-}
-
static bool switch_to_kernel_context(struct intel_engine_cs *engine)
{
struct i915_request *rq;
return 0;
}
-void intel_engine_pm_put(struct intel_engine_cs *engine)
-{
- intel_wakeref_put(&engine->i915->runtime_pm, &engine->wakeref, __engine_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __engine_unpark,
+ .put = __engine_park,
+};
void intel_engine_init__pm(struct intel_engine_cs *engine)
{
- intel_wakeref_init(&engine->wakeref);
+ struct intel_runtime_pm *rpm = &engine->i915->runtime_pm;
+
+ intel_wakeref_init(&engine->wakeref, rpm, &wf_ops);
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_engine_pm.c"
+#endif
#include "intel_engine_types.h"
#include "intel_wakeref.h"
-struct drm_i915_private;
-
-void intel_engine_pm_get(struct intel_engine_cs *engine);
-void intel_engine_pm_put(struct intel_engine_cs *engine);
-
static inline bool
intel_engine_pm_is_awake(const struct intel_engine_cs *engine)
{
return intel_wakeref_is_active(&engine->wakeref);
}
-static inline bool
-intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
+static inline void intel_engine_pm_get(struct intel_engine_cs *engine)
+{
+ intel_wakeref_get(&engine->wakeref);
+}
+
+static inline bool intel_engine_pm_get_if_awake(struct intel_engine_cs *engine)
{
return intel_wakeref_get_if_active(&engine->wakeref);
}
-void intel_engine_park(struct intel_engine_cs *engine);
+static inline void intel_engine_pm_put(struct intel_engine_cs *engine)
+{
+ intel_wakeref_put(&engine->wakeref);
+}
void intel_engine_init__pm(struct intel_engine_cs *engine);
blocking_notifier_call_chain(&i915->gt.pm_notifications, state, i915);
}
-static int intel_gt_unpark(struct intel_wakeref *wf)
+static int __gt_unpark(struct intel_wakeref *wf)
{
struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
struct drm_i915_private *i915 = gt->i915;
return 0;
}
-void intel_gt_pm_get(struct intel_gt *gt)
-{
- struct intel_runtime_pm *rpm = >->i915->runtime_pm;
-
- intel_wakeref_get(rpm, >->wakeref, intel_gt_unpark);
-}
-
-static int intel_gt_park(struct intel_wakeref *wf)
+static int __gt_park(struct intel_wakeref *wf)
{
struct drm_i915_private *i915 =
container_of(wf, typeof(*i915), gt.wakeref);
if (INTEL_GEN(i915) >= 6)
gen6_rps_idle(i915);
+ /* Everything switched off, flush any residual interrupt just in case */
+ intel_synchronize_irq(i915);
+
GEM_BUG_ON(!wakeref);
intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
return 0;
}
-void intel_gt_pm_put(struct intel_gt *gt)
-{
- struct intel_runtime_pm *rpm = >->i915->runtime_pm;
-
- intel_wakeref_put(rpm, >->wakeref, intel_gt_park);
-}
+static const struct intel_wakeref_ops wf_ops = {
+ .get = __gt_unpark,
+ .put = __gt_park,
+ .flags = INTEL_WAKEREF_PUT_ASYNC,
+};
void intel_gt_pm_init_early(struct intel_gt *gt)
{
- intel_wakeref_init(>->wakeref);
+ intel_wakeref_init(>->wakeref, >->i915->runtime_pm, &wf_ops);
+
BLOCKING_INIT_NOTIFIER_HEAD(>->pm_notifications);
}
INTEL_GT_PARK,
};
-void intel_gt_pm_get(struct intel_gt *gt);
-void intel_gt_pm_put(struct intel_gt *gt);
+static inline bool intel_gt_pm_is_awake(const struct intel_gt *gt)
+{
+ return intel_wakeref_is_active(>->wakeref);
+}
+
+static inline void intel_gt_pm_get(struct intel_gt *gt)
+{
+ intel_wakeref_get(>->wakeref);
+}
static inline bool intel_gt_pm_get_if_awake(struct intel_gt *gt)
{
return intel_wakeref_get_if_active(>->wakeref);
}
+static inline void intel_gt_pm_put(struct intel_gt *gt)
+{
+ intel_wakeref_put(>->wakeref);
+}
+
+static inline int intel_gt_pm_wait_for_idle(struct intel_gt *gt)
+{
+ return intel_wakeref_wait_for_idle(>->wakeref);
+}
+
void intel_gt_pm_init_early(struct intel_gt *gt);
void intel_gt_sanitize(struct intel_gt *gt, bool force);
#include "i915_vgpu.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
+#include "intel_gt_pm.h"
#include "intel_lrc_reg.h"
#include "intel_mocs.h"
#include "intel_reset.h"
intel_context_get(ce);
ce->inflight = rq->engine;
+ intel_gt_pm_get(ce->inflight->gt);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
intel_engine_context_in(ce->inflight);
}
if (!intel_context_inflight_count(ce)) {
intel_engine_context_out(ce->inflight);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
+ intel_gt_pm_put(ce->inflight->gt);
/*
* If this is part of a virtual engine, its next request may
static void execlists_park(struct intel_engine_cs *engine)
{
del_timer_sync(&engine->execlists.timer);
- intel_engine_park(engine);
}
void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+
+int intel_engine_live_selftests(struct drm_i915_private *i915)
+{
+ static int (* const tests[])(struct intel_gt *) = {
+ live_engine_pm_selftests,
+ NULL,
+ };
+ struct intel_gt *gt = &i915->gt;
+ typeof(*tests) *fn;
+
+ for (fn = tests; *fn; fn++) {
+ int err;
+
+ err = (*fn)(gt);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef SELFTEST_ENGINE_H
+#define SELFTEST_ENGINE_H
+
+struct intel_gt;
+
+int live_engine_pm_selftests(struct intel_gt *gt);
+
+#endif
--- /dev/null
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright © 2018 Intel Corporation
+ */
+
+#include "i915_selftest.h"
+#include "selftest_engine.h"
+#include "selftests/igt_atomic.h"
+
+static int live_engine_pm(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Check we can call intel_engine_pm_put from any context. No
+ * failures are reported directly, but if we mess up lockdep should
+ * tell us.
+ */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("Unable to flush GT pm before test\n");
+ return -EBUSY;
+ }
+
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ for_each_engine(engine, gt->i915, id) {
+ const typeof(*igt_atomic_phases) *p;
+
+ for (p = igt_atomic_phases; p->name; p++) {
+ /*
+ * Acquisition is always synchronous, except if we
+ * know that the engine is already awake, in which
+ * case we should use intel_engine_pm_get_if_awake()
+ * to atomically grab the wakeref.
+ *
+ * In practice,
+ * intel_engine_pm_get();
+ * intel_engine_pm_put();
+ * occurs in one thread, while simultaneously
+ * intel_engine_pm_get_if_awake();
+ * intel_engine_pm_put();
+ * occurs from atomic context in another.
+ */
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+ intel_engine_pm_get(engine);
+
+ p->critical_section_begin();
+ if (!intel_engine_pm_get_if_awake(engine))
+ pr_err("intel_engine_pm_get_if_awake(%s) failed under %s\n",
+ engine->name, p->name);
+ else
+ intel_engine_pm_put(engine);
+ intel_engine_pm_put(engine);
+ p->critical_section_end();
+
+ /* engine wakeref is sync (instant) */
+ if (intel_engine_pm_is_awake(engine)) {
+ pr_err("%s is still awake after flushing pm\n",
+ engine->name);
+ return -EINVAL;
+ }
+
+ /* gt wakeref is async (deferred to workqueue) */
+ if (intel_gt_pm_wait_for_idle(gt)) {
+ pr_err("GT failed to idle\n");
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int live_engine_pm_selftests(struct intel_gt *gt)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_engine_pm),
+ };
+
+ return intel_gt_live_subtests(tests, gt);
+}
#include "gt/intel_context.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_lrc_reg.h"
#include "intel_guc_submission.h"
if (!rq->hw_context->inflight)
rq->hw_context->inflight = rq->engine;
intel_context_inflight_inc(rq->hw_context);
+ intel_gt_pm_get(rq->engine->gt);
return i915_request_get(rq);
}
if (!intel_context_inflight_count(rq->hw_context))
rq->hw_context->inflight = NULL;
+ intel_gt_pm_put(rq->engine->gt);
i915_request_put(rq);
}
static void guc_submission_park(struct intel_engine_cs *engine)
{
- intel_engine_park(engine);
intel_engine_unpin_breadcrumbs_irq(engine);
engine->flags &= ~I915_ENGINE_NEEDS_BREADCRUMB_TASKLET;
}
#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
+#include "gt/intel_gt_pm.h"
#include "gt/intel_reset.h"
#include "gt/uc/intel_guc_submission.h"
i915_retire_requests(i915);
mutex_unlock(&i915->drm.struct_mutex);
+
+ if (ret == 0 && val & DROP_IDLE)
+ ret = intel_gt_pm_wait_for_idle(&i915->gt);
}
if (val & DROP_RESET_ACTIVE && intel_gt_terminally_wedged(&i915->gt))
}
}
-static int wait_for_engines(struct intel_gt *gt)
-{
- if (wait_for(intel_engines_are_idle(gt), I915_IDLE_ENGINES_TIMEOUT)) {
- dev_err(gt->i915->drm.dev,
- "Failed to idle engines, declaring wedged!\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- return -EIO;
- }
-
- return 0;
-}
-
static long
wait_for_timelines(struct drm_i915_private *i915,
unsigned int flags, long timeout)
unsigned int flags, long timeout)
{
/* If the device is asleep, we have no requests outstanding */
- if (!READ_ONCE(i915->gt.awake))
+ if (!intel_gt_pm_is_awake(&i915->gt))
return 0;
- GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
+ GEM_TRACE("flags=%x (%s), timeout=%ld%s\n",
flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
- timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
- yesno(i915->gt.awake));
+ timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "");
timeout = wait_for_timelines(i915, flags, timeout);
if (timeout < 0)
return timeout;
if (flags & I915_WAIT_LOCKED) {
- int err;
-
lockdep_assert_held(&i915->drm.struct_mutex);
- err = wait_for_engines(&i915->gt);
- if (err)
- return err;
-
i915_retire_requests(i915);
}
* Copyright © 2019 Intel Corporation
*/
+#include <linux/wait_bit.h>
+
#include "intel_runtime_pm.h"
#include "intel_wakeref.h"
-static void rpm_get(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_get(struct intel_wakeref *wf)
{
- wf->wakeref = intel_runtime_pm_get(rpm);
+ wf->wakeref = intel_runtime_pm_get(wf->rpm);
}
-static void rpm_put(struct intel_runtime_pm *rpm, struct intel_wakeref *wf)
+static void rpm_put(struct intel_wakeref *wf)
{
intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref);
- intel_runtime_pm_put(rpm, wakeref);
+ intel_runtime_pm_put(wf->rpm, wakeref);
INTEL_WAKEREF_BUG_ON(!wakeref);
}
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+int __intel_wakeref_get_first(struct intel_wakeref *wf)
{
/*
* Treat get/put as different subclasses, as we may need to run
if (!atomic_read(&wf->count)) {
int err;
- rpm_get(rpm, wf);
+ rpm_get(wf);
- err = fn(wf);
+ err = wf->ops->get(wf);
if (unlikely(err)) {
- rpm_put(rpm, wf);
+ rpm_put(wf);
mutex_unlock(&wf->mutex);
return err;
}
return 0;
}
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static void ____intel_wakeref_put_last(struct intel_wakeref *wf)
{
- int err;
+ if (!atomic_dec_and_test(&wf->count))
+ goto unlock;
+
+ if (likely(!wf->ops->put(wf))) {
+ rpm_put(wf);
+ wake_up_var(&wf->wakeref);
+ } else {
+ /* ops->put() must schedule its own release on deferral */
+ atomic_set_release(&wf->count, 1);
+ }
- err = fn(wf);
- if (likely(!err))
- rpm_put(rpm, wf);
- else
- atomic_inc(&wf->count);
+unlock:
mutex_unlock(&wf->mutex);
+}
+
+void __intel_wakeref_put_last(struct intel_wakeref *wf)
+{
+ INTEL_WAKEREF_BUG_ON(work_pending(&wf->work));
- return err;
+ /* Assume we are not in process context and so cannot sleep. */
+ if (wf->ops->flags & INTEL_WAKEREF_PUT_ASYNC ||
+ !mutex_trylock(&wf->mutex)) {
+ schedule_work(&wf->work);
+ return;
+ }
+
+ ____intel_wakeref_put_last(wf);
}
-void __intel_wakeref_init(struct intel_wakeref *wf, struct lock_class_key *key)
+static void __intel_wakeref_put_work(struct work_struct *wrk)
{
+ struct intel_wakeref *wf = container_of(wrk, typeof(*wf), work);
+
+ if (atomic_add_unless(&wf->count, -1, 1))
+ return;
+
+ mutex_lock(&wf->mutex);
+ ____intel_wakeref_put_last(wf);
+}
+
+void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
+ struct lock_class_key *key)
+{
+ wf->rpm = rpm;
+ wf->ops = ops;
+
__mutex_init(&wf->mutex, "wakeref", key);
atomic_set(&wf->count, 0);
wf->wakeref = 0;
+
+ INIT_WORK(&wf->work, __intel_wakeref_put_work);
+}
+
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf)
+{
+ return wait_var_event_killable(&wf->wakeref,
+ !intel_wakeref_is_active(wf));
}
static void wakeref_auto_timeout(struct timer_list *t)
#define INTEL_WAKEREF_H
#include <linux/atomic.h>
+#include <linux/bits.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <linux/stackdepot.h>
#include <linux/timer.h>
+#include <linux/workqueue.h>
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
#define INTEL_WAKEREF_BUG_ON(expr) BUG_ON(expr)
#endif
struct intel_runtime_pm;
+struct intel_wakeref;
typedef depot_stack_handle_t intel_wakeref_t;
+struct intel_wakeref_ops {
+ int (*get)(struct intel_wakeref *wf);
+ int (*put)(struct intel_wakeref *wf);
+
+ unsigned long flags;
+#define INTEL_WAKEREF_PUT_ASYNC BIT(0)
+};
+
struct intel_wakeref {
atomic_t count;
struct mutex mutex;
+
intel_wakeref_t wakeref;
+
+ struct intel_runtime_pm *rpm;
+ const struct intel_wakeref_ops *ops;
+
+ struct work_struct work;
};
void __intel_wakeref_init(struct intel_wakeref *wf,
+ struct intel_runtime_pm *rpm,
+ const struct intel_wakeref_ops *ops,
struct lock_class_key *key);
-#define intel_wakeref_init(wf) do { \
+#define intel_wakeref_init(wf, rpm, ops) do { \
static struct lock_class_key __key; \
\
- __intel_wakeref_init((wf), &__key); \
+ __intel_wakeref_init((wf), (rpm), (ops), &__key); \
} while (0)
-int __intel_wakeref_get_first(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
-int __intel_wakeref_put_last(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf));
+int __intel_wakeref_get_first(struct intel_wakeref *wf);
+void __intel_wakeref_put_last(struct intel_wakeref *wf);
/**
* intel_wakeref_get: Acquire the wakeref
* code otherwise.
*/
static inline int
-intel_wakeref_get(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+intel_wakeref_get(struct intel_wakeref *wf)
{
if (unlikely(!atomic_inc_not_zero(&wf->count)))
- return __intel_wakeref_get_first(rpm, wf, fn);
+ return __intel_wakeref_get_first(wf);
return 0;
}
* Returns: 0 if the wakeref was released successfully, or a negative error
* code otherwise.
*/
-static inline int
-intel_wakeref_put(struct intel_runtime_pm *rpm,
- struct intel_wakeref *wf,
- int (*fn)(struct intel_wakeref *wf))
+static inline void
+intel_wakeref_put(struct intel_wakeref *wf)
{
INTEL_WAKEREF_BUG_ON(atomic_read(&wf->count) <= 0);
- if (atomic_dec_and_mutex_lock(&wf->count, &wf->mutex))
- return __intel_wakeref_put_last(rpm, wf, fn);
-
- return 0;
+ if (unlikely(!atomic_add_unless(&wf->count, -1, 1)))
+ __intel_wakeref_put_last(wf);
}
/**
return READ_ONCE(wf->wakeref);
}
+/**
+ * intel_wakeref_wait_for_idle: Wait until the wakeref is idle
+ * @wf: the wakeref
+ *
+ * Wait for the earlier asynchronous release of the wakeref. Note
+ * this will wait for any third party as well, so make sure you only wait
+ * when you have control over the wakeref and trust no one else is acquiring
+ * it.
+ *
+ * Return: 0 on success, error code if killed.
+ */
+int intel_wakeref_wait_for_idle(struct intel_wakeref *wf);
+
struct intel_wakeref_auto {
struct intel_runtime_pm *rpm;
struct timer_list timer;
selftest(sanitycheck, i915_live_sanitycheck) /* keep first (igt selfcheck) */
selftest(uncore, intel_uncore_live_selftests)
selftest(workarounds, intel_workarounds_live_selftests)
-selftest(timelines, intel_timeline_live_selftests)
+selftest(gt_engines, intel_engine_live_selftests)
+selftest(gt_timelines, intel_timeline_live_selftests)
+selftest(gt_contexts, intel_context_live_selftests)
selftest(requests, i915_request_live_selftests)
selftest(active, i915_active_live_selftests)
-selftest(gt_contexts, intel_context_live_selftests)
selftest(objects, i915_gem_object_live_selftests)
selftest(mman, i915_gem_mman_live_selftests)
selftest(dmabuf, i915_gem_dmabuf_live_selftests)