]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blobdiff - mm/slub.c
slub: move synchronize_sched out of slab_mutex on shrink
[mirror_ubuntu-hirsute-kernel.git] / mm / slub.c
index 9adae58462f8191b22659b1aa438ec637f6fc765..4a861f265cd70df7e26194159b6562d4d60b07d6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -194,10 +194,6 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
 #define __OBJECT_POISON                0x80000000UL /* Poison object */
 #define __CMPXCHG_DOUBLE       0x40000000UL /* Use cmpxchg_double */
 
-#ifdef CONFIG_SMP
-static struct notifier_block slab_notifier;
-#endif
-
 /*
  * Tracking user of a slab.
  */
@@ -2304,6 +2300,25 @@ static void flush_all(struct kmem_cache *s)
        on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
 }
 
+/*
+ * Use the cpu notifier to insure that the cpu slabs are flushed when
+ * necessary.
+ */
+static int slub_cpu_dead(unsigned int cpu)
+{
+       struct kmem_cache *s;
+       unsigned long flags;
+
+       mutex_lock(&slab_mutex);
+       list_for_each_entry(s, &slab_caches, list) {
+               local_irq_save(flags);
+               __flush_cpu_slab(s, cpu);
+               local_irq_restore(flags);
+       }
+       mutex_unlock(&slab_mutex);
+       return 0;
+}
+
 /*
  * Check if the objects in a per cpu structure fit numa
  * locality expectations.
@@ -3868,7 +3883,7 @@ EXPORT_SYMBOL(kfree);
  * being allocated from last increasing the chance that the last objects
  * are freed in them.
  */
-int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
+int __kmem_cache_shrink(struct kmem_cache *s)
 {
        int node;
        int i;
@@ -3880,21 +3895,6 @@ int __kmem_cache_shrink(struct kmem_cache *s, bool deactivate)
        unsigned long flags;
        int ret = 0;
 
-       if (deactivate) {
-               /*
-                * Disable empty slabs caching. Used to avoid pinning offline
-                * memory cgroups by kmem pages that can be freed.
-                */
-               s->cpu_partial = 0;
-               s->min_partial = 0;
-
-               /*
-                * s->cpu_partial is checked locklessly (see put_cpu_partial),
-                * so we have to make sure the change is visible.
-                */
-               synchronize_sched();
-       }
-
        flush_all(s);
        for_each_kmem_cache_node(s, node, n) {
                INIT_LIST_HEAD(&discard);
@@ -3951,7 +3951,7 @@ static int slab_mem_going_offline_callback(void *arg)
 
        mutex_lock(&slab_mutex);
        list_for_each_entry(s, &slab_caches, list)
-               __kmem_cache_shrink(s, false);
+               __kmem_cache_shrink(s);
        mutex_unlock(&slab_mutex);
 
        return 0;
@@ -4144,9 +4144,8 @@ void __init kmem_cache_init(void)
        /* Setup random freelists for each cache */
        init_freelist_randomization();
 
-#ifdef CONFIG_SMP
-       register_cpu_notifier(&slab_notifier);
-#endif
+       cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
+                                 slub_cpu_dead);
 
        pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
                cache_line_size(),
@@ -4210,43 +4209,6 @@ int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
        return err;
 }
 
-#ifdef CONFIG_SMP
-/*
- * Use the cpu notifier to insure that the cpu slabs are flushed when
- * necessary.
- */
-static int slab_cpuup_callback(struct notifier_block *nfb,
-               unsigned long action, void *hcpu)
-{
-       long cpu = (long)hcpu;
-       struct kmem_cache *s;
-       unsigned long flags;
-
-       switch (action) {
-       case CPU_UP_CANCELED:
-       case CPU_UP_CANCELED_FROZEN:
-       case CPU_DEAD:
-       case CPU_DEAD_FROZEN:
-               mutex_lock(&slab_mutex);
-               list_for_each_entry(s, &slab_caches, list) {
-                       local_irq_save(flags);
-                       __flush_cpu_slab(s, cpu);
-                       local_irq_restore(flags);
-               }
-               mutex_unlock(&slab_mutex);
-               break;
-       default:
-               break;
-       }
-       return NOTIFY_OK;
-}
-
-static struct notifier_block slab_notifier = {
-       .notifier_call = slab_cpuup_callback
-};
-
-#endif
-
 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
 {
        struct kmem_cache *s;