]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
slub: remove kmalloc under list_lock from list_slab_objects() V2
authorChristopher Lameter <cl@linux.com>
Tue, 2 Jun 2020 04:45:53 +0000 (21:45 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 2 Jun 2020 17:59:06 +0000 (10:59 -0700)
list_slab_objects() is called when a slab is destroyed and there are
objects still left to list the objects in the syslog.  This is a pretty
rare event.

And there it seems we take the list_lock and call kmalloc while holding
that lock.

Perform the allocation in free_partial() before the list_lock is taken.

Fixes: bbd7d57bfe852d9788bae5fb171c7edb4021d8ac ("slub: Potential stack overflow")
Signed-off-by: Christopher Lameter <cl@linux.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
Cc: Yu Zhao <yuzhao@google.com>
Link: http://lkml.kernel.org/r/alpine.DEB.2.21.2002031721250.1668@www.lameter.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index 2ae5580433af73be7a3852cbe67843eaec6b5844..3a76de69a2682d12284fea435161dfd832fd9494 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3766,12 +3766,14 @@ error:
 }
 
 static void list_slab_objects(struct kmem_cache *s, struct page *page,
-                                                       const char *text)
+                             const char *text, unsigned long *map)
 {
 #ifdef CONFIG_SLUB_DEBUG
        void *addr = page_address(page);
        void *p;
-       unsigned long *map;
+
+       if (!map)
+               return;
 
        slab_err(s, page, text, s->name);
        slab_lock(page);
@@ -3784,8 +3786,6 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
                        print_tracking(s, p);
                }
        }
-       put_map(map);
-
        slab_unlock(page);
 #endif
 }
@@ -3799,6 +3799,11 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
 {
        LIST_HEAD(discard);
        struct page *page, *h;
+       unsigned long *map = NULL;
+
+#ifdef CONFIG_SLUB_DEBUG
+       map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL);
+#endif
 
        BUG_ON(irqs_disabled());
        spin_lock_irq(&n->list_lock);
@@ -3808,11 +3813,16 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
                        list_add(&page->slab_list, &discard);
                } else {
                        list_slab_objects(s, page,
-                       "Objects remaining in %s on __kmem_cache_shutdown()");
+                         "Objects remaining in %s on __kmem_cache_shutdown()",
+                         map);
                }
        }
        spin_unlock_irq(&n->list_lock);
 
+#ifdef CONFIG_SLUB_DEBUG
+       bitmap_free(map);
+#endif
+
        list_for_each_entry_safe(page, h, &discard, slab_list)
                discard_slab(s, page);
 }