]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
mm, debug_pagealloc: don't rely on static keys too early
authorVlastimil Babka <vbabka@suse.cz>
Tue, 14 Jan 2020 00:29:20 +0000 (16:29 -0800)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 14 Feb 2020 06:00:53 +0000 (01:00 -0500)
BugLink: https://bugs.launchpad.net/bugs/1862429
commit 8e57f8acbbd121ecfb0c9dc13b8b030f86c6bd3b upstream.

Commit 96a2b03f281d ("mm, debug_pagelloc: use static keys to enable
debugging") has introduced a static key to reduce overhead when
debug_pagealloc is compiled in but not enabled.  It relied on the
assumption that jump_label_init() is called before parse_early_param()
as in start_kernel(), so when the "debug_pagealloc=on" option is parsed,
it is safe to enable the static key.

However, it turns out multiple architectures call parse_early_param()
earlier from their setup_arch().  x86 also calls jump_label_init() even
earlier, so no issue was found while testing the commit, but same is not
true for e.g.  ppc64 and s390 where the kernel would not boot with
debug_pagealloc=on as found by our QA.

To fix this without tricky changes to init code of multiple
architectures, this patch partially reverts the static key conversion
from 96a2b03f281d.  Init-time and non-fastpath calls (such as in arch
code) of debug_pagealloc_enabled() will again test a simple bool
variable.  Fastpath mm code is converted to a new
debug_pagealloc_enabled_static() variant that relies on the static key,
which is enabled in a well-defined point in mm_init() where it's
guaranteed that jump_label_init() has been called, regardless of
architecture.

[sfr@canb.auug.org.au: export _debug_pagealloc_enabled_early]
Link: http://lkml.kernel.org/r/20200106164944.063ac07b@canb.auug.org.au
Link: http://lkml.kernel.org/r/20191219130612.23171-1-vbabka@suse.cz
Fixes: 96a2b03f281d ("mm, debug_pagelloc: use static keys to enable debugging")
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Qian Cai <cai@lca.pw>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
include/linux/mm.h
init/main.c
mm/page_alloc.c
mm/slab.c
mm/slub.c
mm/vmalloc.c

index ae92cd661955cc360dfc411e8d7e46862b4e32f3..dce15585562c6b53478a25ec7d71e32098029902 100644 (file)
@@ -2712,13 +2712,25 @@ static inline bool want_init_on_free(void)
               !page_poisoning_enabled();
 }
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
 #else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
 #endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
 
 static inline bool debug_pagealloc_enabled(void)
+{
+       return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+               _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
 {
        if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
                return false;
index 1e76f7dd0a809dbf43a9adc72fda649243948f45..0a5d83b9da077ec9229543fb49e7185a276ed931 100644 (file)
@@ -553,6 +553,7 @@ static void __init mm_init(void)
         * bigger than MAX_ORDER unless SPARSEMEM.
         */
        page_ext_init_flatmem();
+       init_debug_pagealloc();
        report_meminit();
        mem_init();
        kmem_cache_init();
index 702a1d02fc62c5eb689f975d69628b098c21fee1..151a874a730e514a4d0861deb9c2fa448b90a546 100644 (file)
@@ -693,34 +693,27 @@ void prep_compound_page(struct page *page, unsigned int order)
 #ifdef CONFIG_DEBUG_PAGEALLOC
 unsigned int _debug_guardpage_minorder;
 
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DEFINE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
-#else
+bool _debug_pagealloc_enabled_early __read_mostly
+                       = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
+EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
 DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
-#endif
 EXPORT_SYMBOL(_debug_pagealloc_enabled);
 
 DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
 
 static int __init early_debug_pagealloc(char *buf)
 {
-       bool enable = false;
-
-       if (kstrtobool(buf, &enable))
-               return -EINVAL;
-
-       if (enable)
-               static_branch_enable(&_debug_pagealloc_enabled);
-
-       return 0;
+       return kstrtobool(buf, &_debug_pagealloc_enabled_early);
 }
 early_param("debug_pagealloc", early_debug_pagealloc);
 
-static void init_debug_guardpage(void)
+void init_debug_pagealloc(void)
 {
        if (!debug_pagealloc_enabled())
                return;
 
+       static_branch_enable(&_debug_pagealloc_enabled);
+
        if (!debug_guardpage_minorder())
                return;
 
@@ -1185,7 +1178,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
         */
        arch_free_page(page, order);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 0);
 
        kasan_free_nondeferred_pages(page, order);
@@ -1206,7 +1199,7 @@ static bool free_pcp_prepare(struct page *page)
 
 static bool bulkfree_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_check(page);
        else
                return false;
@@ -1220,7 +1213,7 @@ static bool bulkfree_pcp_prepare(struct page *page)
  */
 static bool free_pcp_prepare(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return free_pages_prepare(page, 0, true);
        else
                return free_pages_prepare(page, 0, false);
@@ -1972,10 +1965,6 @@ void __init page_alloc_init_late(void)
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
-
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       init_debug_guardpage();
-#endif
 }
 
 #ifdef CONFIG_CMA
@@ -2105,7 +2094,7 @@ static inline bool free_pages_prezeroed(void)
  */
 static inline bool check_pcp_refill(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2127,7 +2116,7 @@ static inline bool check_pcp_refill(struct page *page)
 }
 static inline bool check_new_pcp(struct page *page)
 {
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                return check_new_page(page);
        else
                return false;
@@ -2154,7 +2143,7 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
        set_page_refcounted(page);
 
        arch_alloc_page(page, order);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                kernel_map_pages(page, 1 << order, 1);
        kasan_alloc_pages(page, order);
        kernel_poison_pages(page, 1 << order, 1);
index 9df370558e5d25fbfb073e58c666b72bcaf8a165..e33faa97a48920448c17d3be673e4cd6a89d11d1 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1415,7 +1415,7 @@ static void kmem_rcu_free(struct rcu_head *head)
 #if DEBUG
 static bool is_debug_pagealloc_cache(struct kmem_cache *cachep)
 {
-       if (debug_pagealloc_enabled() && OFF_SLAB(cachep) &&
+       if (debug_pagealloc_enabled_static() && OFF_SLAB(cachep) &&
                (cachep->size % PAGE_SIZE) == 0)
                return true;
 
@@ -2007,7 +2007,7 @@ int __kmem_cache_create(struct kmem_cache *cachep, slab_flags_t flags)
         * to check size >= 256. It guarantees that all necessary small
         * sized slab is initialized in current slab initialization sequence.
         */
-       if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
+       if (debug_pagealloc_enabled_static() && (flags & SLAB_POISON) &&
                size >= 256 && cachep->object_size > cache_line_size()) {
                if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
                        size_t tmp_size = ALIGN(size, PAGE_SIZE);
index f24ea152cdbb38a50cdc34c07fd674551e7d5308..dacf471e6afc41f5d86bd04765b980a5107c9d2f 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -290,7 +290,7 @@ static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
        unsigned long freepointer_addr;
        void *p;
 
-       if (!debug_pagealloc_enabled())
+       if (!debug_pagealloc_enabled_static())
                return get_freepointer(s, object);
 
        freepointer_addr = (unsigned long)object + s->offset;
index 8170add2abc8cff33fa399b527b5ee1822fa2871..19d1951856b57d1b4ba51dbdbe66b8368875ca2f 100644 (file)
@@ -1341,7 +1341,7 @@ static void free_unmap_vmap_area(struct vmap_area *va)
 {
        flush_cache_vunmap(va->va_start, va->va_end);
        unmap_vmap_area(va);
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range(va->va_start, va->va_end);
 
        free_vmap_area_noflush(va);
@@ -1639,7 +1639,7 @@ static void vb_free(const void *addr, unsigned long size)
 
        vunmap_page_range((unsigned long)addr, (unsigned long)addr + size);
 
-       if (debug_pagealloc_enabled())
+       if (debug_pagealloc_enabled_static())
                flush_tlb_kernel_range((unsigned long)addr,
                                        (unsigned long)addr + size);