]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
drm/i915: Introduce context->enter() and context->exit()
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 24 Apr 2019 20:07:15 +0000 (21:07 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 24 Apr 2019 21:25:32 +0000 (22:25 +0100)
We wish to start segregating the power management into different control
domains, both with respect to the hardware and the user interface. The
first step is that at the lowest level flow of requests, we want to
process a context event (and not a global GEM operation). In this patch,
we introduce the context callbacks that in future patches will be
redirected to per-engine interfaces leading to global operations as
required.

The intent is that this will be guarded by the timeline->mutex, except
that retiring has not quite finished transitioning over from being
guarded by struct_mutex. So at the moment it is protected by
struct_mutex with a reminded to switch.

v2: Rename default handlers to intel_context_enter_engine.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190424200717.1686-3-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context.h
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/i915_request.c

index ebd1e5919a4aeaf3c0858c33c254ee37249c3e7e..4410e20e8e132cf8cd8cbc1ffd52d0710fa336e2 100644 (file)
@@ -266,3 +266,20 @@ int __init i915_global_context_init(void)
        i915_global_register(&global.base);
        return 0;
 }
+
+void intel_context_enter_engine(struct intel_context *ce)
+{
+       struct drm_i915_private *i915 = ce->gem_context->i915;
+
+       if (!i915->gt.active_requests++)
+               i915_gem_unpark(i915);
+}
+
+void intel_context_exit_engine(struct intel_context *ce)
+{
+       struct drm_i915_private *i915 = ce->gem_context->i915;
+
+       GEM_BUG_ON(!i915->gt.active_requests);
+       if (!--i915->gt.active_requests)
+               i915_gem_park(i915);
+}
index ebc861b1a49e2bcf91a4c8b68ce7867b1cc3e42b..b732cf99efcb6e834cc1ae2c22f99102117c2cc8 100644 (file)
@@ -73,6 +73,27 @@ static inline void __intel_context_pin(struct intel_context *ce)
 
 void intel_context_unpin(struct intel_context *ce);
 
+void intel_context_enter_engine(struct intel_context *ce);
+void intel_context_exit_engine(struct intel_context *ce);
+
+static inline void intel_context_enter(struct intel_context *ce)
+{
+       if (!ce->active_count++)
+               ce->ops->enter(ce);
+}
+
+static inline void intel_context_mark_active(struct intel_context *ce)
+{
+       ++ce->active_count;
+}
+
+static inline void intel_context_exit(struct intel_context *ce)
+{
+       GEM_BUG_ON(!ce->active_count);
+       if (!--ce->active_count)
+               ce->ops->exit(ce);
+}
+
 static inline struct intel_context *intel_context_get(struct intel_context *ce)
 {
        kref_get(&ce->ref);
index 9ec4f787c9082594b25b0d11a606575487d9c965..f02d27734e3b6e50d4e26981135d0117462e47f3 100644 (file)
@@ -25,6 +25,9 @@ struct intel_context_ops {
        int (*pin)(struct intel_context *ce);
        void (*unpin)(struct intel_context *ce);
 
+       void (*enter)(struct intel_context *ce);
+       void (*exit)(struct intel_context *ce);
+
        void (*reset)(struct intel_context *ce);
        void (*destroy)(struct kref *kref);
 };
@@ -46,6 +49,8 @@ struct intel_context {
        u32 *lrc_reg_state;
        u64 lrc_desc;
 
+       unsigned int active_count; /* notionally protected by timeline->mutex */
+
        atomic_t pin_count;
        struct mutex pin_mutex; /* guards pinning and associated on-gpuing */
 
index 5cadf8f6a23ded725870c902d4f791dc8eaa9849..edec7f1836881eb7201e43f2687a252f61efcbd5 100644 (file)
@@ -1315,6 +1315,9 @@ static const struct intel_context_ops execlists_context_ops = {
        .pin = execlists_context_pin,
        .unpin = execlists_context_unpin,
 
+       .enter = intel_context_enter_engine,
+       .exit = intel_context_exit_engine,
+
        .reset = execlists_context_reset,
        .destroy = execlists_context_destroy,
 };
index ac84a383748ed1df8e396243ae31e8c8e633b5ed..5404fe382691a27a46e07eb899f563eee79320d8 100644 (file)
@@ -1510,6 +1510,9 @@ static const struct intel_context_ops ring_context_ops = {
        .pin = ring_context_pin,
        .unpin = ring_context_unpin,
 
+       .enter = intel_context_enter_engine,
+       .exit = intel_context_exit_engine,
+
        .reset = ring_context_reset,
        .destroy = ring_context_destroy,
 };
index 414afd2f27fe5062913c02677b37c1f62e696979..bcfeb0c67997d79313ebcfccc43cb7beee14d41c 100644 (file)
@@ -157,6 +157,9 @@ static const struct intel_context_ops mock_context_ops = {
        .pin = mock_context_pin,
        .unpin = mock_context_unpin,
 
+       .enter = intel_context_enter_engine,
+       .exit = intel_context_exit_engine,
+
        .destroy = mock_context_destroy,
 };
 
index 64ca8b3ea12f297b83d366e7fb596c9178155f8a..9a2665ee012a998a611f698279bc3a278bb2bded 100644 (file)
@@ -131,19 +131,6 @@ i915_request_remove_from_client(struct i915_request *request)
        spin_unlock(&file_priv->mm.lock);
 }
 
-static void reserve_gt(struct drm_i915_private *i915)
-{
-       if (!i915->gt.active_requests++)
-               i915_gem_unpark(i915);
-}
-
-static void unreserve_gt(struct drm_i915_private *i915)
-{
-       GEM_BUG_ON(!i915->gt.active_requests);
-       if (!--i915->gt.active_requests)
-               i915_gem_park(i915);
-}
-
 static void advance_ring(struct i915_request *request)
 {
        struct intel_ring *ring = request->ring;
@@ -301,11 +288,10 @@ static void i915_request_retire(struct i915_request *request)
 
        i915_request_remove_from_client(request);
 
-       intel_context_unpin(request->hw_context);
-
        __retire_engine_upto(request->engine, request);
 
-       unreserve_gt(request->i915);
+       intel_context_exit(request->hw_context);
+       intel_context_unpin(request->hw_context);
 
        i915_sched_node_fini(&request->sched);
        i915_request_put(request);
@@ -659,8 +645,8 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
        if (IS_ERR(ce))
                return ERR_CAST(ce);
 
-       reserve_gt(i915);
        mutex_lock(&ce->ring->timeline->mutex);
+       intel_context_enter(ce);
 
        /* Move our oldest request to the slab-cache (if not in use!) */
        rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link);
@@ -791,8 +777,8 @@ err_unwind:
 err_free:
        kmem_cache_free(global.slab_requests, rq);
 err_unreserve:
+       intel_context_exit(ce);
        mutex_unlock(&ce->ring->timeline->mutex);
-       unreserve_gt(i915);
        intel_context_unpin(ce);
        return ERR_PTR(ret);
 }