]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
Merge branch 'kmemleak' of git://linux-arm.org/linux-2.6
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Dec 2009 00:00:19 +0000 (16:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 18 Dec 2009 00:00:19 +0000 (16:00 -0800)
* 'kmemleak' of git://linux-arm.org/linux-2.6:
  kmemleak: fix kconfig for crc32 build error
  kmemleak: Reduce the false positives by checking for modified objects
  kmemleak: Show the age of an unreferenced object
  kmemleak: Release the object lock before calling put_object()
  kmemleak: Scan the _ftrace_events section in modules
  kmemleak: Simplify the kmemleak_scan_area() function prototype
  kmemleak: Do not use off-slab management with SLAB_NOLEAKTRACE

include/linux/kmemleak.h
kernel/module.c
lib/Kconfig.debug
mm/kmemleak.c
mm/slab.c

index 3c7497d46ee9655fa86f7bb42f1637a023d6fd25..99d9a6766f7ec055feca4a25d7d5e4c89fd9b058 100644 (file)
@@ -32,8 +32,7 @@ extern void kmemleak_padding(const void *ptr, unsigned long offset,
                             size_t size) __ref;
 extern void kmemleak_not_leak(const void *ptr) __ref;
 extern void kmemleak_ignore(const void *ptr) __ref;
-extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
-                              size_t length, gfp_t gfp) __ref;
+extern void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) __ref;
 extern void kmemleak_no_scan(const void *ptr) __ref;
 
 static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
@@ -84,8 +83,7 @@ static inline void kmemleak_not_leak(const void *ptr)
 static inline void kmemleak_ignore(const void *ptr)
 {
 }
-static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
-                                     size_t length, gfp_t gfp)
+static inline void kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 {
 }
 static inline void kmemleak_erase(void **ptr)
index a65dc787a27bf6f28e4df980523820dacd1d14b3..e96b8ed1cb6aff09033114dd219157b84e8b23a3 100644 (file)
@@ -1910,9 +1910,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
        unsigned int i;
 
        /* only scan the sections containing data */
-       kmemleak_scan_area(mod->module_core, (unsigned long)mod -
-                          (unsigned long)mod->module_core,
-                          sizeof(struct module), GFP_KERNEL);
+       kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
 
        for (i = 1; i < hdr->e_shnum; i++) {
                if (!(sechdrs[i].sh_flags & SHF_ALLOC))
@@ -1921,8 +1919,7 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
                    && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
                        continue;
 
-               kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
-                                  (unsigned long)mod->module_core,
+               kmemleak_scan_area((void *)sechdrs[i].sh_addr,
                                   sechdrs[i].sh_size, GFP_KERNEL);
        }
 }
@@ -2250,6 +2247,12 @@ static noinline struct module *load_module(void __user *umod,
                                         "_ftrace_events",
                                         sizeof(*mod->trace_events),
                                         &mod->num_trace_events);
+       /*
+        * This section contains pointers to allocated objects in the trace
+        * code and not scanning it leads to false positives.
+        */
+       kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
+                          mod->num_trace_events, GFP_KERNEL);
 #endif
 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
        /* sechdrs[0].sh_size is always zero */
index 8cf9938dd147840d1dbc67166fa30fc22208bf78..25c3ed594c547966c1c291750d8adeb3b9e5674b 100644 (file)
@@ -360,6 +360,7 @@ config DEBUG_KMEMLEAK
        select DEBUG_FS if SYSFS
        select STACKTRACE if STACKTRACE_SUPPORT
        select KALLSYMS
+       select CRC32
        help
          Say Y here if you want to enable the memory leak
          detector. The memory allocation/freeing is traced in a way
index 13f33b3081ec2794afdae74d9071a88ec6aa9df3..5b069e4f5e485ac8f5906c4a0cad7dbdeef75f7e 100644 (file)
@@ -93,6 +93,7 @@
 #include <linux/nodemask.h>
 #include <linux/mm.h>
 #include <linux/workqueue.h>
+#include <linux/crc32.h>
 
 #include <asm/sections.h>
 #include <asm/processor.h>
 #define MSECS_MIN_AGE          5000    /* minimum object age for reporting */
 #define SECS_FIRST_SCAN                60      /* delay before the first scan */
 #define SECS_SCAN_WAIT         600     /* subsequent auto scanning delay */
-#define GRAY_LIST_PASSES       25      /* maximum number of gray list scans */
 #define MAX_SCAN_SIZE          4096    /* maximum size of a scanned block */
 
 #define BYTES_PER_POINTER      sizeof(void *)
 /* scanning area inside a memory block */
 struct kmemleak_scan_area {
        struct hlist_node node;
-       unsigned long offset;
-       size_t length;
+       unsigned long start;
+       size_t size;
 };
 
 #define KMEMLEAK_GREY  0
@@ -149,6 +149,8 @@ struct kmemleak_object {
        int min_count;
        /* the total number of pointers found pointing to this object */
        int count;
+       /* checksum for detecting modified objects */
+       u32 checksum;
        /* memory ranges to be scanned inside an object (empty for all) */
        struct hlist_head area_list;
        unsigned long trace[MAX_TRACE];
@@ -164,8 +166,6 @@ struct kmemleak_object {
 #define OBJECT_REPORTED                (1 << 1)
 /* flag set to not scan the object */
 #define OBJECT_NO_SCAN         (1 << 2)
-/* flag set on newly allocated objects */
-#define OBJECT_NEW             (1 << 3)
 
 /* number of bytes to print per line; must be 16 or 32 */
 #define HEX_ROW_SIZE           16
@@ -241,8 +241,6 @@ struct early_log {
        const void *ptr;                /* allocated/freed memory block */
        size_t size;                    /* memory block size */
        int min_count;                  /* minimum reference count */
-       unsigned long offset;           /* scan area offset */
-       size_t length;                  /* scan area length */
        unsigned long trace[MAX_TRACE]; /* stack trace */
        unsigned int trace_len;         /* stack trace length */
 };
@@ -323,11 +321,6 @@ static bool color_gray(const struct kmemleak_object *object)
                object->count >= object->min_count;
 }
 
-static bool color_black(const struct kmemleak_object *object)
-{
-       return object->min_count == KMEMLEAK_BLACK;
-}
-
 /*
  * Objects are considered unreferenced only if their color is white, they have
  * not be deleted and have a minimum age to avoid false positives caused by
@@ -335,7 +328,7 @@ static bool color_black(const struct kmemleak_object *object)
  */
 static bool unreferenced_object(struct kmemleak_object *object)
 {
-       return (object->flags & OBJECT_ALLOCATED) && color_white(object) &&
+       return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
                time_before_eq(object->jiffies + jiffies_min_age,
                               jiffies_last_scan);
 }
@@ -348,11 +341,13 @@ static void print_unreferenced(struct seq_file *seq,
                               struct kmemleak_object *object)
 {
        int i;
+       unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 
        seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
                   object->pointer, object->size);
-       seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
-                  object->comm, object->pid, object->jiffies);
+       seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
+                  object->comm, object->pid, object->jiffies,
+                  msecs_age / 1000, msecs_age % 1000);
        hex_dump_object(seq, object);
        seq_printf(seq, "  backtrace:\n");
 
@@ -381,6 +376,7 @@ static void dump_object_info(struct kmemleak_object *object)
        pr_notice("  min_count = %d\n", object->min_count);
        pr_notice("  count = %d\n", object->count);
        pr_notice("  flags = 0x%lx\n", object->flags);
+       pr_notice("  checksum = %d\n", object->checksum);
        pr_notice("  backtrace:\n");
        print_stack_trace(&trace, 4);
 }
@@ -522,12 +518,13 @@ static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
        INIT_HLIST_HEAD(&object->area_list);
        spin_lock_init(&object->lock);
        atomic_set(&object->use_count, 1);
-       object->flags = OBJECT_ALLOCATED | OBJECT_NEW;
+       object->flags = OBJECT_ALLOCATED;
        object->pointer = ptr;
        object->size = size;
        object->min_count = min_count;
-       object->count = -1;                     /* no color initially */
+       object->count = 0;                      /* white color initially */
        object->jiffies = jiffies;
+       object->checksum = 0;
 
        /* task information */
        if (in_irq()) {
@@ -720,14 +717,13 @@ static void make_black_object(unsigned long ptr)
  * Add a scanning area to the object. If at least one such area is added,
  * kmemleak will only scan these ranges rather than the whole memory block.
  */
-static void add_scan_area(unsigned long ptr, unsigned long offset,
-                         size_t length, gfp_t gfp)
+static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 {
        unsigned long flags;
        struct kmemleak_object *object;
        struct kmemleak_scan_area *area;
 
-       object = find_and_get_object(ptr, 0);
+       object = find_and_get_object(ptr, 1);
        if (!object) {
                kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
                              ptr);
@@ -741,7 +737,7 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
        }
 
        spin_lock_irqsave(&object->lock, flags);
-       if (offset + length > object->size) {
+       if (ptr + size > object->pointer + object->size) {
                kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
                dump_object_info(object);
                kmem_cache_free(scan_area_cache, area);
@@ -749,8 +745,8 @@ static void add_scan_area(unsigned long ptr, unsigned long offset,
        }
 
        INIT_HLIST_NODE(&area->node);
-       area->offset = offset;
-       area->length = length;
+       area->start = ptr;
+       area->size = size;
 
        hlist_add_head(&area->node, &object->area_list);
 out_unlock:
@@ -786,7 +782,7 @@ static void object_no_scan(unsigned long ptr)
  * processed later once kmemleak is fully initialized.
  */
 static void __init log_early(int op_type, const void *ptr, size_t size,
-                            int min_count, unsigned long offset, size_t length)
+                            int min_count)
 {
        unsigned long flags;
        struct early_log *log;
@@ -808,8 +804,6 @@ static void __init log_early(int op_type, const void *ptr, size_t size,
        log->ptr = ptr;
        log->size = size;
        log->min_count = min_count;
-       log->offset = offset;
-       log->length = length;
        if (op_type == KMEMLEAK_ALLOC)
                log->trace_len = __save_stack_trace(log->trace);
        crt_early_log++;
@@ -858,7 +852,7 @@ void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                create_object((unsigned long)ptr, size, min_count, gfp);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0);
+               log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 }
 EXPORT_SYMBOL_GPL(kmemleak_alloc);
 
@@ -873,7 +867,7 @@ void __ref kmemleak_free(const void *ptr)
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                delete_object_full((unsigned long)ptr);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0);
+               log_early(KMEMLEAK_FREE, ptr, 0, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free);
 
@@ -888,7 +882,7 @@ void __ref kmemleak_free_part(const void *ptr, size_t size)
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                delete_object_part((unsigned long)ptr, size);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0);
+               log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 }
 EXPORT_SYMBOL_GPL(kmemleak_free_part);
 
@@ -903,7 +897,7 @@ void __ref kmemleak_not_leak(const void *ptr)
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                make_gray_object((unsigned long)ptr);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0);
+               log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_not_leak);
 
@@ -919,22 +913,21 @@ void __ref kmemleak_ignore(const void *ptr)
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                make_black_object((unsigned long)ptr);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0);
+               log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_ignore);
 
 /*
  * Limit the range to be scanned in an allocated memory block.
  */
-void __ref kmemleak_scan_area(const void *ptr, unsigned long offset,
-                             size_t length, gfp_t gfp)
+void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 {
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
-               add_scan_area((unsigned long)ptr, offset, length, gfp);
+               add_scan_area((unsigned long)ptr, size, gfp);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length);
+               log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
 }
 EXPORT_SYMBOL(kmemleak_scan_area);
 
@@ -948,10 +941,24 @@ void __ref kmemleak_no_scan(const void *ptr)
        if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
                object_no_scan((unsigned long)ptr);
        else if (atomic_read(&kmemleak_early_log))
-               log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0);
+               log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
 }
 EXPORT_SYMBOL(kmemleak_no_scan);
 
+/*
+ * Update an object's checksum and return true if it was modified.
+ */
+static bool update_checksum(struct kmemleak_object *object)
+{
+       u32 old_csum = object->checksum;
+
+       if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
+               return false;
+
+       object->checksum = crc32(0, (void *)object->pointer, object->size);
+       return object->checksum != old_csum;
+}
+
 /*
  * Memory scanning is a long process and it needs to be interruptable. This
  * function checks whether such interrupt condition occured.
@@ -1031,11 +1038,14 @@ static void scan_block(void *_start, void *_end,
                 * added to the gray_list.
                 */
                object->count++;
-               if (color_gray(object))
+               if (color_gray(object)) {
                        list_add_tail(&object->gray_list, &gray_list);
-               else
-                       put_object(object);
+                       spin_unlock_irqrestore(&object->lock, flags);
+                       continue;
+               }
+
                spin_unlock_irqrestore(&object->lock, flags);
+               put_object(object);
        }
 }
 
@@ -1075,13 +1085,46 @@ static void scan_object(struct kmemleak_object *object)
                }
        } else
                hlist_for_each_entry(area, elem, &object->area_list, node)
-                       scan_block((void *)(object->pointer + area->offset),
-                                  (void *)(object->pointer + area->offset
-                                           + area->length), object, 0);
+                       scan_block((void *)area->start,
+                                  (void *)(area->start + area->size),
+                                  object, 0);
 out:
        spin_unlock_irqrestore(&object->lock, flags);
 }
 
+/*
+ * Scan the objects already referenced (gray objects). More objects will be
+ * referenced and, if there are no memory leaks, all the objects are scanned.
+ */
+static void scan_gray_list(void)
+{
+       struct kmemleak_object *object, *tmp;
+
+       /*
+        * The list traversal is safe for both tail additions and removals
+        * from inside the loop. The kmemleak objects cannot be freed from
+        * outside the loop because their use_count was incremented.
+        */
+       object = list_entry(gray_list.next, typeof(*object), gray_list);
+       while (&object->gray_list != &gray_list) {
+               cond_resched();
+
+               /* may add new objects to the list */
+               if (!scan_should_stop())
+                       scan_object(object);
+
+               tmp = list_entry(object->gray_list.next, typeof(*object),
+                                gray_list);
+
+               /* remove the object from the list and release it */
+               list_del(&object->gray_list);
+               put_object(object);
+
+               object = tmp;
+       }
+       WARN_ON(!list_empty(&gray_list));
+}
+
 /*
  * Scan data sections and all the referenced memory blocks allocated via the
  * kernel's standard allocators. This function must be called with the
@@ -1090,10 +1133,9 @@ out:
 static void kmemleak_scan(void)
 {
        unsigned long flags;
-       struct kmemleak_object *object, *tmp;
+       struct kmemleak_object *object;
        int i;
        int new_leaks = 0;
-       int gray_list_pass = 0;
 
        jiffies_last_scan = jiffies;
 
@@ -1114,7 +1156,6 @@ static void kmemleak_scan(void)
 #endif
                /* reset the reference count (whiten the object) */
                object->count = 0;
-               object->flags &= ~OBJECT_NEW;
                if (color_gray(object) && get_object(object))
                        list_add_tail(&object->gray_list, &gray_list);
 
@@ -1172,62 +1213,36 @@ static void kmemleak_scan(void)
 
        /*
         * Scan the objects already referenced from the sections scanned
-        * above. More objects will be referenced and, if there are no memory
-        * leaks, all the objects will be scanned. The list traversal is safe
-        * for both tail additions and removals from inside the loop. The
-        * kmemleak objects cannot be freed from outside the loop because their
-        * use_count was increased.
+        * above.
         */
-repeat:
-       object = list_entry(gray_list.next, typeof(*object), gray_list);
-       while (&object->gray_list != &gray_list) {
-               cond_resched();
-
-               /* may add new objects to the list */
-               if (!scan_should_stop())
-                       scan_object(object);
-
-               tmp = list_entry(object->gray_list.next, typeof(*object),
-                                gray_list);
-
-               /* remove the object from the list and release it */
-               list_del(&object->gray_list);
-               put_object(object);
-
-               object = tmp;
-       }
-
-       if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES)
-               goto scan_end;
+       scan_gray_list();
 
        /*
-        * Check for new objects allocated during this scanning and add them
-        * to the gray list.
+        * Check for new or unreferenced objects modified since the previous
+        * scan and color them gray until the next scan.
         */
        rcu_read_lock();
        list_for_each_entry_rcu(object, &object_list, object_list) {
                spin_lock_irqsave(&object->lock, flags);
-               if ((object->flags & OBJECT_NEW) && !color_black(object) &&
-                   get_object(object)) {
-                       object->flags &= ~OBJECT_NEW;
+               if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
+                   && update_checksum(object) && get_object(object)) {
+                       /* color it gray temporarily */
+                       object->count = object->min_count;
                        list_add_tail(&object->gray_list, &gray_list);
                }
                spin_unlock_irqrestore(&object->lock, flags);
        }
        rcu_read_unlock();
 
-       if (!list_empty(&gray_list))
-               goto repeat;
-
-scan_end:
-       WARN_ON(!list_empty(&gray_list));
+       /*
+        * Re-scan the gray list for modified unreferenced objects.
+        */
+       scan_gray_list();
 
        /*
-        * If scanning was stopped or new objects were being allocated at a
-        * higher rate than gray list scanning, do not report any new
-        * unreferenced objects.
+        * If scanning was stopped do not report any new unreferenced objects.
         */
-       if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES)
+       if (scan_should_stop())
                return;
 
        /*
@@ -1642,8 +1657,7 @@ void __init kmemleak_init(void)
                        kmemleak_ignore(log->ptr);
                        break;
                case KMEMLEAK_SCAN_AREA:
-                       kmemleak_scan_area(log->ptr, log->offset, log->length,
-                                          GFP_KERNEL);
+                       kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
                        break;
                case KMEMLEAK_NO_SCAN:
                        kmemleak_no_scan(log->ptr);
index 3f4822938f4605f4e6e1cf55f996f812332cd4a6..e17cc2c337b8b6d2794a1a92e0117a8dec16bf0a 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,9 +2275,11 @@ kmem_cache_create (const char *name, size_t size, size_t align,
        /*
         * Determine if the slab management is 'on' or 'off' slab.
         * (bootstrapping cannot cope with offslab caches so don't do
-        * it too early on.)
+        * it too early on. Always use on-slab management when
+        * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
         */
-       if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
+       if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
+           !(flags & SLAB_NOLEAKTRACE))
                /*
                 * Size is large, assume best to place the slab management obj
                 * off-slab (should allow better packing of objs).
@@ -2596,8 +2598,8 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
                 * kmemleak does not treat the ->s_mem pointer as a reference
                 * to the object. Otherwise we will not report the leak.
                 */
-               kmemleak_scan_area(slabp, offsetof(struct slab, list),
-                                  sizeof(struct list_head), local_flags);
+               kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
+                                  local_flags);
                if (!slabp)
                        return NULL;
        } else {