]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
kmemleak: powerpc: skip scanning holes in the .bss section
authorCatalin Marinas <catalin.marinas@arm.com>
Sat, 6 Apr 2019 01:38:49 +0000 (18:38 -0700)
committerKleber Sacilotto de Souza <kleber.souza@canonical.com>
Wed, 14 Aug 2019 09:18:49 +0000 (11:18 +0200)
BugLink: https://bugs.launchpad.net/bugs/1838459
[ Upstream commit 298a32b132087550d3fa80641ca58323c5dfd4d9 ]

Commit 2d4f567103ff ("KVM: PPC: Introduce kvm_tmp framework") adds
kvm_tmp[] into the .bss section and then free the rest of unused spaces
back to the page allocator.

kernel_init
  kvm_guest_init
    kvm_free_tmp
      free_reserved_area
        free_unref_page
          free_unref_page_prepare

With DEBUG_PAGEALLOC=y, it will unmap those pages from kernel.  As the
result, kmemleak scan will trigger a panic when it scans the .bss
section with unmapped pages.

This patch creates dedicated kmemleak objects for the .data, .bss and
potentially .data..ro_after_init sections to allow partial freeing via
the kmemleak_free_part() in the powerpc kvm_free_tmp() function.

Link: http://lkml.kernel.org/r/20190321171917.62049-1-catalin.marinas@arm.com
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
Reported-by: Qian Cai <cai@lca.pw>
Acked-by: Michael Ellerman <mpe@ellerman.id.au> (powerpc)
Tested-by: Qian Cai <cai@lca.pw>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Avi Kivity <avi@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/powerpc/kernel/kvm.c
mm/kmemleak.c

index 9ad37f827a975f1f2a7f165447f951c9e2aa8fa7..7b59cc853abf74b19398716b67b260670075170d 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/kvm_host.h>
 #include <linux/init.h>
 #include <linux/export.h>
+#include <linux/kmemleak.h>
 #include <linux/kvm_para.h>
 #include <linux/slab.h>
 #include <linux/of.h>
@@ -712,6 +713,12 @@ static void kvm_use_magic_page(void)
 
 static __init void kvm_free_tmp(void)
 {
+       /*
+        * Inform kmemleak about the hole in the .bss section since the
+        * corresponding pages will be unmapped with DEBUG_PAGEALLOC=y.
+        */
+       kmemleak_free_part(&kvm_tmp[kvm_tmp_index],
+                          ARRAY_SIZE(kvm_tmp) - kvm_tmp_index);
        free_reserved_area(&kvm_tmp[kvm_tmp_index],
                           &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
 }
index 235ef08ac37a3d47dd8082e6d43840a2d77fbfbb..478103d0b721876b69a1f2479c8fe1190ca18ff6 100644 (file)
@@ -1492,11 +1492,6 @@ static void kmemleak_scan(void)
        }
        rcu_read_unlock();
 
-       /* data/bss scanning */
-       scan_large_block(_sdata, _edata);
-       scan_large_block(__bss_start, __bss_stop);
-       scan_large_block(__start_ro_after_init, __end_ro_after_init);
-
 #ifdef CONFIG_SMP
        /* per-cpu sections scanning */
        for_each_possible_cpu(i)
@@ -2027,6 +2022,17 @@ void __init kmemleak_init(void)
        }
        local_irq_restore(flags);
 
+       /* register the data/bss sections */
+       create_object((unsigned long)_sdata, _edata - _sdata,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+                     KMEMLEAK_GREY, GFP_ATOMIC);
+       /* only register .data..ro_after_init if not within .data */
+       if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+               create_object((unsigned long)__start_ro_after_init,
+                             __end_ro_after_init - __start_ro_after_init,
+                             KMEMLEAK_GREY, GFP_ATOMIC);
+
        /*
         * This is the point where tracking allocations is safe. Automatic
         * scanning is started during the late initcall. Add the early logged