]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
slab: use memcg_kmem_cache_wq for slab destruction operations
authorTejun Heo <tj@kernel.org>
Wed, 22 Feb 2017 23:41:36 +0000 (15:41 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 23 Feb 2017 00:41:27 +0000 (16:41 -0800)
If there's contention on slab_mutex, queueing the per-cache destruction
work item on the system_wq can unnecessarily create and tie up a lot of
kworkers.

Rename memcg_kmem_cache_create_wq to memcg_kmem_cache_wq and make it
global and use that workqueue for the destruction work items too.  While
at it, convert the workqueue from an unbound workqueue to a per-cpu one
with concurrency limited to 1.  It's generally preferable to use per-cpu
workqueues and concurrency limit of 1 is safe enough.

This is suggested by Joonsoo Kim.

Link: http://lkml.kernel.org/r/20170117235411.9408-11-tj@kernel.org
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Jay Vana <jsvana@fb.com>
Acked-by: Vladimir Davydov <vdavydov@tarantool.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/memcontrol.h
mm/memcontrol.c
mm/slab_common.c

index 9fcece9be85da21a61907d5f872ed6072bc24f1b..5af37730388074ff65d4e42143c40c2880b5aff7 100644 (file)
@@ -830,6 +830,7 @@ void memcg_kmem_uncharge(struct page *page, int order);
 
 #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
 extern struct static_key_false memcg_kmem_enabled_key;
+extern struct workqueue_struct *memcg_kmem_cache_wq;
 
 extern int memcg_nr_cache_ids;
 void memcg_get_cache_ids(void);
index 834d641dfa8c9649da0914fbd3c6045652df3f10..1fd6affcdde7cfdff2c768f99ead14c62624371f 100644 (file)
@@ -317,6 +317,8 @@ void memcg_put_cache_ids(void)
 DEFINE_STATIC_KEY_FALSE(memcg_kmem_enabled_key);
 EXPORT_SYMBOL(memcg_kmem_enabled_key);
 
+struct workqueue_struct *memcg_kmem_cache_wq;
+
 #endif /* !CONFIG_SLOB */
 
 /**
@@ -2143,8 +2145,6 @@ struct memcg_kmem_cache_create_work {
        struct work_struct work;
 };
 
-static struct workqueue_struct *memcg_kmem_cache_create_wq;
-
 static void memcg_kmem_cache_create_func(struct work_struct *w)
 {
        struct memcg_kmem_cache_create_work *cw =
@@ -2176,7 +2176,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
        cw->cachep = cachep;
        INIT_WORK(&cw->work, memcg_kmem_cache_create_func);
 
-       queue_work(memcg_kmem_cache_create_wq, &cw->work);
+       queue_work(memcg_kmem_cache_wq, &cw->work);
 }
 
 static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
@@ -5778,12 +5778,12 @@ static int __init mem_cgroup_init(void)
 #ifndef CONFIG_SLOB
        /*
         * Kmem cache creation is mostly done with the slab_mutex held,
-        * so use a special workqueue to avoid stalling all worker
-        * threads in case lots of cgroups are created simultaneously.
+        * so use a workqueue with limited concurrency to avoid stalling
+        * all worker threads in case lots of cgroups are created and
+        * destroyed simultaneously.
         */
-       memcg_kmem_cache_create_wq =
-               alloc_ordered_workqueue("memcg_kmem_cache_create", 0);
-       BUG_ON(!memcg_kmem_cache_create_wq);
+       memcg_kmem_cache_wq = alloc_workqueue("memcg_kmem_cache", 0, 1);
+       BUG_ON(!memcg_kmem_cache_wq);
 #endif
 
        cpuhp_setup_state_nocalls(CPUHP_MM_MEMCQ_DEAD, "mm/memctrl:dead", NULL,
index c549296c79811c17b5eb7344cf335821c14a70bc..23ff74e618388508e359a5317d93ae284778f779 100644 (file)
@@ -659,7 +659,7 @@ static void kmemcg_deactivate_rcufn(struct rcu_head *head)
         * initialized eariler.
         */
        INIT_WORK(&s->memcg_params.deact_work, kmemcg_deactivate_workfn);
-       schedule_work(&s->memcg_params.deact_work);
+       queue_work(memcg_kmem_cache_wq, &s->memcg_params.deact_work);
 }
 
 /**