]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
drm/i915: Create a kmem_cache to allocate struct i915_priolist from
authorChris Wilson <chris@chris-wilson.co.uk>
Wed, 17 May 2017 12:10:04 +0000 (13:10 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Wed, 17 May 2017 12:38:12 +0000 (13:38 +0100)
The i915_priolist are allocated within an atomic context on a path where
we wish to minimise latency. If we use a dedicated kmem_cache, we have
the advantage of a local freelist from which to service new requests
that should keep the latency impact of an allocation small. Though
currently we expect the majority of requests to be at default priority
(and so hit the preallocate priolist), once userspace starts using
priorities they are likely to use many fine grained policies improving
the utilisation of a private slab.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20170517121007.27224-9-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_guc_submission.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/selftests/mock_gem_device.c

index a6f20471b4cd7883cb00e56206f0ace5da846434..08ee5c8834fb24bfc4204dbf88bde19597f5fbe3 100644 (file)
@@ -2027,6 +2027,7 @@ struct drm_i915_private {
        struct kmem_cache *vmas;
        struct kmem_cache *requests;
        struct kmem_cache *dependencies;
+       struct kmem_cache *priorities;
 
        const struct intel_device_info info;
 
index 3d9161c8c1a1e5fbdca7da2b7af296c5f9e5a98b..0680bd2e635a0afeee4708fb29c1c70bc32ce0d9 100644 (file)
@@ -4866,12 +4866,16 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
        if (!dev_priv->dependencies)
                goto err_requests;
 
+       dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+       if (!dev_priv->priorities)
+               goto err_dependencies;
+
        mutex_lock(&dev_priv->drm.struct_mutex);
        INIT_LIST_HEAD(&dev_priv->gt.timelines);
        err = i915_gem_timeline_init__global(dev_priv);
        mutex_unlock(&dev_priv->drm.struct_mutex);
        if (err)
-               goto err_dependencies;
+               goto err_priorities;
 
        INIT_LIST_HEAD(&dev_priv->context_list);
        INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
@@ -4895,6 +4899,8 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 
        return 0;
 
+err_priorities:
+       kmem_cache_destroy(dev_priv->priorities);
 err_dependencies:
        kmem_cache_destroy(dev_priv->dependencies);
 err_requests:
@@ -4918,6 +4924,7 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
        WARN_ON(!list_empty(&dev_priv->gt.timelines));
        mutex_unlock(&dev_priv->drm.struct_mutex);
 
+       kmem_cache_destroy(dev_priv->priorities);
        kmem_cache_destroy(dev_priv->dependencies);
        kmem_cache_destroy(dev_priv->requests);
        kmem_cache_destroy(dev_priv->vmas);
index 3b9cdb0907c21ea19a7aa7610f4519b1cfacf786..b3da056ea8f115df40e3d81cbb9d3c40b1c3f7e4 100644 (file)
@@ -704,7 +704,7 @@ static bool i915_guc_dequeue(struct intel_engine_cs *engine)
                rb_erase(&p->node, &engine->execlist_queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
-                       kfree(p);
+                       kmem_cache_free(engine->i915->priorities, p);
        }
 done:
        engine->execlist_first = rb;
index 626db6185a21e913025d59a7f8a925c627d3b750..8529746dd7cc9a8c04cacf5100e3c2f109a1d1b5 100644 (file)
@@ -499,7 +499,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
                rb_erase(&p->node, &engine->execlist_queue);
                INIT_LIST_HEAD(&p->requests);
                if (p->priority != I915_PRIORITY_NORMAL)
-                       kfree(p);
+                       kmem_cache_free(engine->i915->priorities, p);
        }
 done:
        engine->execlist_first = rb;
@@ -661,7 +661,7 @@ find_priolist:
        if (prio == I915_PRIORITY_NORMAL) {
                p = &engine->default_priolist;
        } else {
-               p = kmalloc(sizeof(*p), GFP_ATOMIC);
+               p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
                /* Convert an allocation failure to a priority bump */
                if (unlikely(!p)) {
                        prio = I915_PRIORITY_NORMAL; /* recurses just once */
index f321bdfe0b5bcbca96ddb942258ae80e7423e862..3d0e313497d08f33126d792798655a289b95571e 100644 (file)
@@ -74,6 +74,7 @@ static void mock_device_release(struct drm_device *dev)
 
        destroy_workqueue(i915->wq);
 
+       kmem_cache_destroy(i915->priorities);
        kmem_cache_destroy(i915->dependencies);
        kmem_cache_destroy(i915->requests);
        kmem_cache_destroy(i915->vmas);
@@ -186,12 +187,16 @@ struct drm_i915_private *mock_gem_device(void)
        if (!i915->dependencies)
                goto err_requests;
 
+       i915->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
+       if (!i915->priorities)
+               goto err_dependencies;
+
        mutex_lock(&i915->drm.struct_mutex);
        INIT_LIST_HEAD(&i915->gt.timelines);
        err = i915_gem_timeline_init__global(i915);
        if (err) {
                mutex_unlock(&i915->drm.struct_mutex);
-               goto err_dependencies;
+               goto err_priorities;
        }
 
        mock_init_ggtt(i915);
@@ -211,6 +216,8 @@ struct drm_i915_private *mock_gem_device(void)
 err_engine:
        for_each_engine(engine, i915, id)
                mock_engine_free(engine);
+err_priorities:
+       kmem_cache_destroy(i915->priorities);
 err_dependencies:
        kmem_cache_destroy(i915->dependencies);
 err_requests: