]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - mm/slub.c
slub: move synchronize_sched out of slab_mutex on shrink
[mirror_ubuntu-hirsute-kernel.git] / mm / slub.c
index 2b3e740609e92e29a7b52ddb6df6b8f0b0897136..4a861f265cd70df7e26194159b6562d4d60b07d6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3883,7 +3883,7 @@ EXPORT_SYMBOL(kfree);
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *s)
 {
        int node;
        int i;
@@ -3895,21 +3895,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
        unsigned long flags;
        int ret = 0;
 
-       if (deactivate) {
-               /*
-                * Disable empty slabs caching. Used to avoid pinning offline
-                * memory cgroups by kmem pages that can be freed.
-                */
-               s->cpu_partial = 0;
-               s->min_partial = 0;
-
-               /*
-                * s->cpu_partial is checked locklessly (see put_cpu_partial),
-                * so we have to make sure the change is visible.
-                */
-               synchronize_sched();
-       }
-
        flush_all(s);
        for_each_kmem_cache_node(s, node, n) {
                INIT_LIST_HEAD(&discard);
@@ -3966,7 +3951,7 @@ static int slab_mem_going_offline_callback(void *arg)
 
        mutex_lock(&slab_mutex);
        list_for_each_entry(s, &slab_caches, list)
-               __kmem_cache_shrink(s, false);
+               __kmem_cache_shrink(s);
        mutex_unlock(&slab_mutex);
 
        return 0;