]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
kmemcheck: stop using GFP_NOTRACK and SLAB_NOTRACK
authorLevin, Alexander (Sasha Levin) <alexander.levin@verizon.com>
Thu, 16 Nov 2017 01:35:54 +0000 (17:35 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 16 Nov 2017 02:21:04 +0000 (18:21 -0800)
Convert all allocations that used a NOTRACK flag to stop using it.

Link: http://lkml.kernel.org/r/20171007030159.22241-3-alexander.levin@verizon.com
Signed-off-by: Sasha Levin <alexander.levin@verizon.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Tim Hansen <devtimhansen@gmail.com>
Cc: Vegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
23 files changed:
arch/arm/include/asm/pgalloc.h
arch/arm64/include/asm/pgalloc.h
arch/powerpc/include/asm/pgalloc.h
arch/sh/kernel/dwarf.c
arch/sh/kernel/process.c
arch/sparc/mm/init_64.c
arch/unicore32/include/asm/pgalloc.h
arch/x86/kernel/espfix_64.c
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr.c
arch/x86/mm/pgtable.c
arch/x86/platform/efi/efi_64.c
crypto/xor.c
include/linux/thread_info.h
init/do_mounts.c
kernel/fork.c
kernel/signal.c
mm/kmemcheck.c
mm/slab.c
mm/slab.h
mm/slab_common.c
mm/slub.c

index b2902a5cd780fdbeaba81b7e4e7f9b4a610350be..2d7344f0e2085b2a72da34f04fc3264e88834b3a 100644 (file)
@@ -57,7 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
 extern pgd_t *pgd_alloc(struct mm_struct *mm);
 extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_ZERO)
 
 static inline void clean_pte_table(pte_t *pte)
 {
index d25f4f137c2aa1c758fa614af40361bfada50aef..5ca6a573a701249d87f080a24ab790b87058bee8 100644 (file)
@@ -26,7 +26,7 @@
 
 #define check_pgt_cache()              do { } while (0)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_ZERO)
 #define PGD_SIZE       (PTRS_PER_PGD * sizeof(pgd_t))
 
 #if CONFIG_PGTABLE_LEVELS > 2
index a14203c005f1a47608ec5f842795d3bb3fbd42bd..e11f03007b5753dfd75753651c22f27d56559957 100644 (file)
@@ -18,7 +18,7 @@ static inline gfp_t pgtable_gfp_flags(struct mm_struct *mm, gfp_t gfp)
 }
 #endif /* MODULE */
 
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
 
 #ifdef CONFIG_PPC_BOOK3S
 #include <asm/book3s/pgalloc.h>
index e1d751ae2498af3a2dd00b9f4fe8c5f4935065fe..1a2526676a8729b08b87c3955ed7ba007c5e531c 100644 (file)
@@ -1172,11 +1172,11 @@ static int __init dwarf_unwinder_init(void)
 
        dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
                        sizeof(struct dwarf_frame), 0,
-                       SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+                       SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
 
        dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
                        sizeof(struct dwarf_reg), 0,
-                       SLAB_PANIC | SLAB_HWCACHE_ALIGN | SLAB_NOTRACK, NULL);
+                       SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
 
        dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
                                                    dwarf_frame_cachep);
index b2d9963d5978ba3fb75ba3c8884a11f38d0b65f3..68b1a67533cea1b4c54965af7991cd0cef224c17 100644 (file)
@@ -59,7 +59,7 @@ void arch_task_cache_init(void)
 
        task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
                                               __alignof__(union thread_xstate),
-                                              SLAB_PANIC | SLAB_NOTRACK, NULL);
+                                              SLAB_PANIC, NULL);
 }
 
 #ifdef CONFIG_SH_FPU_EMU
index 61bdc1270d195e551d190977b03e1e9fb604ec75..2de22d703076e22a16f12e33cbc672f2c64beb48 100644 (file)
@@ -2927,7 +2927,7 @@ void __flush_tlb_all(void)
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
                            unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        pte_t *pte = NULL;
 
        if (page)
@@ -2939,7 +2939,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
 pgtable_t pte_alloc_one(struct mm_struct *mm,
                        unsigned long address)
 {
-       struct page *page = alloc_page(GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
+       struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO);
        if (!page)
                return NULL;
        if (!pgtable_page_ctor(page)) {
index 26775793c204e77d09f6ed1e62a3edab97aa76a5..f0fdb268f8f2efd07de226976bc26d231b2145fa 100644 (file)
@@ -28,7 +28,7 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
 #define pgd_alloc(mm)                  get_pgd_slow(mm)
 #define pgd_free(mm, pgd)              free_pgd_slow(mm, pgd)
 
-#define PGALLOC_GFP    (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP    (GFP_KERNEL | __GFP_ZERO)
 
 /*
  * Allocate one PTE table.
index 7d7715dde901539c3358e90bd0f449b04fe20700..e5ec3cafa72ea1b8aaccc2d4b5e2a3fc6f1cc557 100644 (file)
@@ -57,7 +57,7 @@
 # error "Need more virtual address space for the ESPFIX hack"
 #endif
 
-#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
 
 /* This contains the *bottom* address of the espfix stack */
 DEFINE_PER_CPU_READ_MOSTLY(unsigned long, espfix_stack);
index a22c2b95e5133919e839d3c7a7a33b82b17e629c..ef94620ceb8a32baeb6db02e28bd5247419e9158 100644 (file)
@@ -92,8 +92,7 @@ __ref void *alloc_low_pages(unsigned int num)
                unsigned int order;
 
                order = get_order((unsigned long)num << PAGE_SHIFT);
-               return (void *)__get_free_pages(GFP_ATOMIC | __GFP_NOTRACK |
-                                               __GFP_ZERO, order);
+               return (void *)__get_free_pages(GFP_ATOMIC | __GFP_ZERO, order);
        }
 
        if ((pgt_buf_end + num) > pgt_buf_top || !can_use_brk_pgt) {
index adcea90a2046e91aee1d4693e9c505dbcab1771c..5fa3a58b5d7865e50329b8c1e93573a6f7fc1db0 100644 (file)
@@ -184,7 +184,7 @@ static __ref void *spp_getpage(void)
        void *ptr;
 
        if (after_bootmem)
-               ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
+               ptr = (void *) get_zeroed_page(GFP_ATOMIC);
        else
                ptr = alloc_bootmem_pages(PAGE_SIZE);
 
index 3fe68483463ca55fb2445dcdfad20c291884eebf..85cf12219dea4a679158c7e6a8ea72be8e93ff2a 100644 (file)
@@ -753,7 +753,7 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
 
        if (!debug_pagealloc_enabled())
                spin_unlock(&cpa_lock);
-       base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
+       base = alloc_pages(GFP_KERNEL, 0);
        if (!debug_pagealloc_enabled())
                spin_lock(&cpa_lock);
        if (!base)
@@ -904,7 +904,7 @@ static void unmap_pud_range(p4d_t *p4d, unsigned long start, unsigned long end)
 
 static int alloc_pte_page(pmd_t *pmd)
 {
-       pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+       pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL);
        if (!pte)
                return -1;
 
@@ -914,7 +914,7 @@ static int alloc_pte_page(pmd_t *pmd)
 
 static int alloc_pmd_page(pud_t *pud)
 {
-       pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+       pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
        if (!pmd)
                return -1;
 
@@ -1120,7 +1120,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
        pgd_entry = cpa->pgd + pgd_index(addr);
 
        if (pgd_none(*pgd_entry)) {
-               p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+               p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
                if (!p4d)
                        return -1;
 
@@ -1132,7 +1132,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
         */
        p4d = p4d_offset(pgd_entry, addr);
        if (p4d_none(*p4d)) {
-               pud = (pud_t *)get_zeroed_page(GFP_KERNEL | __GFP_NOTRACK);
+               pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
                if (!pud)
                        return -1;
 
index 17ebc5a978ccd92b001e99adf8e9a59e63a7ff06..96d456a94b0342eb967f918e4233498ac24e4349 100644 (file)
@@ -7,7 +7,7 @@
 #include <asm/fixmap.h>
 #include <asm/mtrr.h>
 
-#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | __GFP_ZERO)
+#define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
 
 #ifdef CONFIG_HIGHPTE
 #define PGALLOC_USER_GFP __GFP_HIGHMEM
index 9e4ee5b04b2d40067196c21b03ef7de84884d653..6a151ce70e865caadde95c859855c4b63283ad4b 100644 (file)
@@ -207,7 +207,7 @@ int __init efi_alloc_page_tables(void)
        if (efi_enabled(EFI_OLD_MEMMAP))
                return 0;
 
-       gfp_mask = GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO;
+       gfp_mask = GFP_KERNEL | __GFP_ZERO;
        efi_pgd = (pgd_t *)__get_free_page(gfp_mask);
        if (!efi_pgd)
                return -ENOMEM;
index 263af9fb45ea281c94c41d5698d4d264d489bba9..bce9fe7af40ad44a68056d35d7d2e377bd3966f1 100644 (file)
@@ -122,12 +122,7 @@ calibrate_xor_blocks(void)
                goto out;
        }
 
-       /*
-        * Note: Since the memory is not actually used for _anything_ but to
-        * test the XOR speed, we don't really want kmemcheck to warn about
-        * reading uninitialized bytes here.
-        */
-       b1 = (void *) __get_free_pages(GFP_KERNEL | __GFP_NOTRACK, 2);
+       b1 = (void *) __get_free_pages(GFP_KERNEL, 2);
        if (!b1) {
                printk(KERN_WARNING "xor: Yikes!  No memory available.\n");
                return -ENOMEM;
index 4bcdf00c110fb177c63fcb75c6c9f4923b681a01..34f053a150a969bf03805cd56ee7f5487e041642 100644 (file)
@@ -44,10 +44,9 @@ enum {
 #endif
 
 #if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK)
-# define THREADINFO_GFP                (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK | \
-                                __GFP_ZERO)
+# define THREADINFO_GFP                (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
 #else
-# define THREADINFO_GFP                (GFP_KERNEL_ACCOUNT | __GFP_NOTRACK)
+# define THREADINFO_GFP                (GFP_KERNEL_ACCOUNT)
 #endif
 
 /*
index f6d4dd764a52483f0fed18593f7c38e4d1cfc30a..7cf4f6dafd5f32031db0a1d28072fc93c0474f5a 100644 (file)
@@ -380,8 +380,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
 
 void __init mount_block_root(char *name, int flags)
 {
-       struct page *page = alloc_page(GFP_KERNEL |
-                                       __GFP_NOTRACK_FALSE_POSITIVE);
+       struct page *page = alloc_page(GFP_KERNEL);
        char *fs_names = page_address(page);
        char *p;
 #ifdef CONFIG_BLOCK
index 006dc5899a1a3e276131a3b3b745eaeed9682601..4e55eedba8d689cfc949ea92caa4111cdca22686 100644 (file)
@@ -469,7 +469,7 @@ void __init fork_init(void)
        /* create a slab on which task_structs can be allocated */
        task_struct_cachep = kmem_cache_create("task_struct",
                        arch_task_struct_size, align,
-                       SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT, NULL);
+                       SLAB_PANIC|SLAB_ACCOUNT, NULL);
 #endif
 
        /* do the arch specific task caches init */
@@ -2205,18 +2205,18 @@ void __init proc_caches_init(void)
        sighand_cachep = kmem_cache_create("sighand_cache",
                        sizeof(struct sighand_struct), 0,
                        SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU|
-                       SLAB_NOTRACK|SLAB_ACCOUNT, sighand_ctor);
+                       SLAB_ACCOUNT, sighand_ctor);
        signal_cachep = kmem_cache_create("signal_cache",
                        sizeof(struct signal_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
        files_cachep = kmem_cache_create("files_cache",
                        sizeof(struct files_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
        fs_cachep = kmem_cache_create("fs_cache",
                        sizeof(struct fs_struct), 0,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
        /*
         * FIXME! The "sizeof(struct mm_struct)" currently includes the
@@ -2227,7 +2227,7 @@ void __init proc_caches_init(void)
         */
        mm_cachep = kmem_cache_create("mm_struct",
                        sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
-                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK|SLAB_ACCOUNT,
+                       SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT,
                        NULL);
        vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT);
        mmap_init();
index 8dcd8825b2dedf3f16c108808192f8751aa19c96..aa1fb9f905dbc5d66194218df339c467f04b2f9a 100644 (file)
@@ -1036,8 +1036,7 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
        else
                override_rlimit = 0;
 
-       q = __sigqueue_alloc(sig, t, GFP_ATOMIC | __GFP_NOTRACK_FALSE_POSITIVE,
-               override_rlimit);
+       q = __sigqueue_alloc(sig, t, GFP_ATOMIC, override_rlimit);
        if (q) {
                list_add_tail(&q->list, &pending->list);
                switch ((unsigned long) info) {
index 800d64b854ea6695eccf83dee42502bfca74cf45..b3a4d61d341c6a5b6d5187207774a11d2a040c63 100644 (file)
@@ -18,7 +18,7 @@ void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
         * With kmemcheck enabled, we need to allocate a memory area for the
         * shadow bits as well.
         */
-       shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
+       shadow = alloc_pages_node(node, flags, order);
        if (!shadow) {
                if (printk_ratelimit())
                        pr_err("kmemcheck: failed to allocate shadow bitmap\n");
index c84365e9a591063402fef3a933eee6e32fba4ce5..183e996dde5ff37a8881e9c223a348de947bf890 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1410,7 +1410,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
 
        flags |= cachep->allocflags;
 
-       page = __alloc_pages_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
+       page = __alloc_pages_node(nodeid, flags, cachep->gfporder);
        if (!page) {
                slab_out_of_memory(cachep, flags, nodeid);
                return NULL;
index e60a3d1d8f6fe76e765fece3983931e4549f276e..ad657ffa44e5db782c08ba53a71a6ccbc2089800 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -141,10 +141,10 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
 #if defined(CONFIG_SLAB)
 #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
                          SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
-                         SLAB_NOTRACK | SLAB_ACCOUNT)
+                         SLAB_ACCOUNT)
 #elif defined(CONFIG_SLUB)
 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
-                         SLAB_TEMPORARY | SLAB_NOTRACK | SLAB_ACCOUNT)
+                         SLAB_TEMPORARY | SLAB_ACCOUNT)
 #else
 #define SLAB_CACHE_FLAGS (0)
 #endif
@@ -163,7 +163,6 @@ static inline slab_flags_t kmem_cache_flags(unsigned long object_size,
                              SLAB_NOLEAKTRACE | \
                              SLAB_RECLAIM_ACCOUNT | \
                              SLAB_TEMPORARY | \
-                             SLAB_NOTRACK | \
                              SLAB_ACCOUNT)
 
 int __kmem_cache_shutdown(struct kmem_cache *);
index 175e86637afdce92328fdc20c817e5c747989a44..c8cb36774ba1802c925c0b65609a15d7241d5aee 100644 (file)
@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
                SLAB_FAILSLAB | SLAB_KASAN)
 
 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
-                        SLAB_NOTRACK | SLAB_ACCOUNT)
+                        SLAB_ACCOUNT)
 
 /*
  * Merge control. If this is set then no merging of slab caches will occur.
index ac3b50b9abecec4c739c4758cc991324e5e4339a..91aa99b4b8368b6f75985a48f410b7599224ebee 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1436,8 +1436,6 @@ static inline struct page *alloc_slab_page(struct kmem_cache *s,
        struct page *page;
        int order = oo_order(oo);
 
-       flags |= __GFP_NOTRACK;
-
        if (node == NUMA_NO_NODE)
                page = alloc_pages(flags, order);
        else
@@ -3774,7 +3772,7 @@ static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
        struct page *page;
        void *ptr = NULL;
 
-       flags |= __GFP_COMP | __GFP_NOTRACK;
+       flags |= __GFP_COMP;
        page = alloc_pages_node(node, flags, get_order(size));
        if (page)
                ptr = page_address(page);