1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
14 * The following locks and mutexes are used by kmemleak:
16 * - kmemleak_lock (rwlock): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
26 * the metadata (e.g. count) are protected by this lock. Note that some
27 * members of this structure may be protected by other means (atomic or
28 * kmemleak_lock). This lock is also held when scanning the corresponding
29 * memory block to avoid the kernel freeing it via the kmemleak_free()
30 * callback. This is less heavyweight than holding a global lock like
31 * kmemleak_lock during scanning
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
43 * Locks and mutexes are acquired/nested in the following order:
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
99 #include <linux/kasan.h>
100 #include <linux/kmemleak.h>
101 #include <linux/memory_hotplug.h>
104 * Kmemleak configuration and common defines.
106 #define MAX_TRACE 16 /* stack trace length */
107 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
108 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
109 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
110 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112 #define BYTES_PER_POINTER sizeof(void *)
114 /* GFP bitmask for kmemleak internal allocations */
115 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
119 /* scanning area inside a memory block */
120 struct kmemleak_scan_area
{
121 struct hlist_node node
;
126 #define KMEMLEAK_GREY 0
127 #define KMEMLEAK_BLACK -1
130 * Structure holding the metadata for each allocated memory block.
131 * Modifications to such objects should be made while holding the
132 * object->lock. Insertions or deletions from object_list, gray_list or
133 * rb_node are already protected by the corresponding locks or mutex (see
134 * the notes on locking above). These objects are reference-counted
135 * (use_count) and freed using the RCU mechanism.
137 struct kmemleak_object
{
139 unsigned int flags
; /* object status flags */
140 struct list_head object_list
;
141 struct list_head gray_list
;
142 struct rb_node rb_node
;
143 struct rcu_head rcu
; /* object_list lockless traversal */
144 /* object usage count; object freed when use_count == 0 */
146 unsigned long pointer
;
148 /* pass surplus references to this pointer */
149 unsigned long excess_ref
;
150 /* minimum number of a pointers found before it is considered leak */
152 /* the total number of pointers found pointing to this object */
154 /* checksum for detecting modified objects */
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list
;
158 unsigned long trace
[MAX_TRACE
];
159 unsigned int trace_len
;
160 unsigned long jiffies
; /* creation timestamp */
161 pid_t pid
; /* pid of the current task */
162 char comm
[TASK_COMM_LEN
]; /* executable name */
165 /* flag representing the memory block allocation status */
166 #define OBJECT_ALLOCATED (1 << 0)
167 /* flag set after the first reporting of an unreference object */
168 #define OBJECT_REPORTED (1 << 1)
169 /* flag set to not scan the object */
170 #define OBJECT_NO_SCAN (1 << 2)
171 /* flag set to fully scan the object when scan_area allocation failed */
172 #define OBJECT_FULL_SCAN (1 << 3)
174 #define HEX_PREFIX " "
175 /* number of bytes to print per line; must be 16 or 32 */
176 #define HEX_ROW_SIZE 16
177 /* number of bytes to print at a time (1, 2, 4, 8) */
178 #define HEX_GROUP_SIZE 1
179 /* include ASCII after the hex output */
181 /* max number of lines to be printed */
182 #define HEX_MAX_LINES 2
184 /* the list of all allocated objects */
185 static LIST_HEAD(object_list
);
186 /* the list of gray-colored objects (see color_gray comment below) */
187 static LIST_HEAD(gray_list
);
188 /* memory pool allocation */
189 static struct kmemleak_object mem_pool
[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE
];
190 static int mem_pool_free_count
= ARRAY_SIZE(mem_pool
);
191 static LIST_HEAD(mem_pool_free_list
);
192 /* search tree for object boundaries */
193 static struct rb_root object_tree_root
= RB_ROOT
;
194 /* rw_lock protecting the access to object_list and object_tree_root */
195 static DEFINE_RWLOCK(kmemleak_lock
);
197 /* allocation caches for kmemleak internal data */
198 static struct kmem_cache
*object_cache
;
199 static struct kmem_cache
*scan_area_cache
;
201 /* set if tracing memory operations is enabled */
202 static int kmemleak_enabled
= 1;
203 /* same as above but only for the kmemleak_free() callback */
204 static int kmemleak_free_enabled
= 1;
205 /* set in the late_initcall if there were no errors */
206 static int kmemleak_initialized
;
207 /* set if a kmemleak warning was issued */
208 static int kmemleak_warning
;
209 /* set if a fatal kmemleak error has occurred */
210 static int kmemleak_error
;
212 /* minimum and maximum address that may be valid pointers */
213 static unsigned long min_addr
= ULONG_MAX
;
214 static unsigned long max_addr
;
216 static struct task_struct
*scan_thread
;
217 /* used to avoid reporting of recently allocated objects */
218 static unsigned long jiffies_min_age
;
219 static unsigned long jiffies_last_scan
;
220 /* delay between automatic memory scannings */
221 static signed long jiffies_scan_wait
;
222 /* enables or disables the task stacks scanning */
223 static int kmemleak_stack_scan
= 1;
224 /* protects the memory scanning, parameters and debug/kmemleak file access */
225 static DEFINE_MUTEX(scan_mutex
);
226 /* setting kmemleak=on, will set this var, skipping the disable */
227 static int kmemleak_skip_disable
;
228 /* If there are leaks that can be reported */
229 static bool kmemleak_found_leaks
;
231 static bool kmemleak_verbose
;
232 module_param_named(verbose
, kmemleak_verbose
, bool, 0600);
234 static void kmemleak_disable(void);
237 * Print a warning and dump the stack trace.
239 #define kmemleak_warn(x...) do { \
242 kmemleak_warning = 1; \
246 * Macro invoked when a serious kmemleak condition occurred and cannot be
247 * recovered from. Kmemleak will be disabled and further allocation/freeing
248 * tracing no longer available.
250 #define kmemleak_stop(x...) do { \
252 kmemleak_disable(); \
255 #define warn_or_seq_printf(seq, fmt, ...) do { \
257 seq_printf(seq, fmt, ##__VA_ARGS__); \
259 pr_warn(fmt, ##__VA_ARGS__); \
262 static void warn_or_seq_hex_dump(struct seq_file
*seq
, int prefix_type
,
263 int rowsize
, int groupsize
, const void *buf
,
264 size_t len
, bool ascii
)
267 seq_hex_dump(seq
, HEX_PREFIX
, prefix_type
, rowsize
, groupsize
,
270 print_hex_dump(KERN_WARNING
, pr_fmt(HEX_PREFIX
), prefix_type
,
271 rowsize
, groupsize
, buf
, len
, ascii
);
275 * Printing of the objects hex dump to the seq file. The number of lines to be
276 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
277 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
278 * with the object->lock held.
280 static void hex_dump_object(struct seq_file
*seq
,
281 struct kmemleak_object
*object
)
283 const u8
*ptr
= (const u8
*)object
->pointer
;
286 /* limit the number of lines to HEX_MAX_LINES */
287 len
= min_t(size_t, object
->size
, HEX_MAX_LINES
* HEX_ROW_SIZE
);
289 warn_or_seq_printf(seq
, " hex dump (first %zu bytes):\n", len
);
290 kasan_disable_current();
291 warn_or_seq_hex_dump(seq
, DUMP_PREFIX_NONE
, HEX_ROW_SIZE
,
292 HEX_GROUP_SIZE
, ptr
, len
, HEX_ASCII
);
293 kasan_enable_current();
297 * Object colors, encoded with count and min_count:
298 * - white - orphan object, not enough references to it (count < min_count)
299 * - gray - not orphan, not marked as false positive (min_count == 0) or
300 * sufficient references to it (count >= min_count)
301 * - black - ignore, it doesn't contain references (e.g. text section)
302 * (min_count == -1). No function defined for this color.
303 * Newly created objects don't have any color assigned (object->count == -1)
304 * before the next memory scan when they become white.
306 static bool color_white(const struct kmemleak_object
*object
)
308 return object
->count
!= KMEMLEAK_BLACK
&&
309 object
->count
< object
->min_count
;
312 static bool color_gray(const struct kmemleak_object
*object
)
314 return object
->min_count
!= KMEMLEAK_BLACK
&&
315 object
->count
>= object
->min_count
;
319 * Objects are considered unreferenced only if their color is white, they have
320 * not be deleted and have a minimum age to avoid false positives caused by
321 * pointers temporarily stored in CPU registers.
323 static bool unreferenced_object(struct kmemleak_object
*object
)
325 return (color_white(object
) && object
->flags
& OBJECT_ALLOCATED
) &&
326 time_before_eq(object
->jiffies
+ jiffies_min_age
,
331 * Printing of the unreferenced objects information to the seq file. The
332 * print_unreferenced function must be called with the object->lock held.
334 static void print_unreferenced(struct seq_file
*seq
,
335 struct kmemleak_object
*object
)
338 unsigned int msecs_age
= jiffies_to_msecs(jiffies
- object
->jiffies
);
340 warn_or_seq_printf(seq
, "unreferenced object 0x%08lx (size %zu):\n",
341 object
->pointer
, object
->size
);
342 warn_or_seq_printf(seq
, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
343 object
->comm
, object
->pid
, object
->jiffies
,
344 msecs_age
/ 1000, msecs_age
% 1000);
345 hex_dump_object(seq
, object
);
346 warn_or_seq_printf(seq
, " backtrace:\n");
348 for (i
= 0; i
< object
->trace_len
; i
++) {
349 void *ptr
= (void *)object
->trace
[i
];
350 warn_or_seq_printf(seq
, " [<%p>] %pS\n", ptr
, ptr
);
355 * Print the kmemleak_object information. This function is used mainly for
356 * debugging special cases when kmemleak operations. It must be called with
357 * the object->lock held.
359 static void dump_object_info(struct kmemleak_object
*object
)
361 pr_notice("Object 0x%08lx (size %zu):\n",
362 object
->pointer
, object
->size
);
363 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
364 object
->comm
, object
->pid
, object
->jiffies
);
365 pr_notice(" min_count = %d\n", object
->min_count
);
366 pr_notice(" count = %d\n", object
->count
);
367 pr_notice(" flags = 0x%x\n", object
->flags
);
368 pr_notice(" checksum = %u\n", object
->checksum
);
369 pr_notice(" backtrace:\n");
370 stack_trace_print(object
->trace
, object
->trace_len
, 4);
374 * Look-up a memory block metadata (kmemleak_object) in the object search
375 * tree based on a pointer value. If alias is 0, only values pointing to the
376 * beginning of the memory block are allowed. The kmemleak_lock must be held
377 * when calling this function.
379 static struct kmemleak_object
*lookup_object(unsigned long ptr
, int alias
)
381 struct rb_node
*rb
= object_tree_root
.rb_node
;
384 struct kmemleak_object
*object
=
385 rb_entry(rb
, struct kmemleak_object
, rb_node
);
386 if (ptr
< object
->pointer
)
387 rb
= object
->rb_node
.rb_left
;
388 else if (object
->pointer
+ object
->size
<= ptr
)
389 rb
= object
->rb_node
.rb_right
;
390 else if (object
->pointer
== ptr
|| alias
)
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
395 dump_object_info(object
);
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
406 * called under the protection of rcu_read_lock().
408 static int get_object(struct kmemleak_object
*object
)
410 return atomic_inc_not_zero(&object
->use_count
);
414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
416 static struct kmemleak_object
*mem_pool_alloc(gfp_t gfp
)
419 struct kmemleak_object
*object
;
421 /* try the slab allocator first */
423 object
= kmem_cache_alloc(object_cache
, gfp_kmemleak_mask(gfp
));
428 /* slab allocation failed, try the memory pool */
429 write_lock_irqsave(&kmemleak_lock
, flags
);
430 object
= list_first_entry_or_null(&mem_pool_free_list
,
431 typeof(*object
), object_list
);
433 list_del(&object
->object_list
);
434 else if (mem_pool_free_count
)
435 object
= &mem_pool
[--mem_pool_free_count
];
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
438 write_unlock_irqrestore(&kmemleak_lock
, flags
);
444 * Return the object to either the slab allocator or the memory pool.
446 static void mem_pool_free(struct kmemleak_object
*object
)
450 if (object
< mem_pool
|| object
>= mem_pool
+ ARRAY_SIZE(mem_pool
)) {
451 kmem_cache_free(object_cache
, object
);
455 /* add the object to the memory pool free list */
456 write_lock_irqsave(&kmemleak_lock
, flags
);
457 list_add(&object
->object_list
, &mem_pool_free_list
);
458 write_unlock_irqrestore(&kmemleak_lock
, flags
);
462 * RCU callback to free a kmemleak_object.
464 static void free_object_rcu(struct rcu_head
*rcu
)
466 struct hlist_node
*tmp
;
467 struct kmemleak_scan_area
*area
;
468 struct kmemleak_object
*object
=
469 container_of(rcu
, struct kmemleak_object
, rcu
);
472 * Once use_count is 0 (guaranteed by put_object), there is no other
473 * code accessing this object, hence no need for locking.
475 hlist_for_each_entry_safe(area
, tmp
, &object
->area_list
, node
) {
476 hlist_del(&area
->node
);
477 kmem_cache_free(scan_area_cache
, area
);
479 mem_pool_free(object
);
483 * Decrement the object use_count. Once the count is 0, free the object using
484 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
485 * delete_object() path, the delayed RCU freeing ensures that there is no
486 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
489 static void put_object(struct kmemleak_object
*object
)
491 if (!atomic_dec_and_test(&object
->use_count
))
494 /* should only get here after delete_object was called */
495 WARN_ON(object
->flags
& OBJECT_ALLOCATED
);
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
503 call_rcu(&object
->rcu
, free_object_rcu
);
505 free_object_rcu(&object
->rcu
);
509 * Look up an object in the object search tree and increase its use_count.
511 static struct kmemleak_object
*find_and_get_object(unsigned long ptr
, int alias
)
514 struct kmemleak_object
*object
;
517 read_lock_irqsave(&kmemleak_lock
, flags
);
518 object
= lookup_object(ptr
, alias
);
519 read_unlock_irqrestore(&kmemleak_lock
, flags
);
521 /* check whether the object is still available */
522 if (object
&& !get_object(object
))
530 * Look up an object in the object search tree and remove it from both
531 * object_tree_root and object_list. The returned object's use_count should be
532 * at least 1, as initially set by create_object().
534 static struct kmemleak_object
*find_and_remove_object(unsigned long ptr
, int alias
)
537 struct kmemleak_object
*object
;
539 write_lock_irqsave(&kmemleak_lock
, flags
);
540 object
= lookup_object(ptr
, alias
);
542 rb_erase(&object
->rb_node
, &object_tree_root
);
543 list_del_rcu(&object
->object_list
);
545 write_unlock_irqrestore(&kmemleak_lock
, flags
);
551 * Save stack trace to the given array of MAX_TRACE size.
553 static int __save_stack_trace(unsigned long *trace
)
555 return stack_trace_save(trace
, MAX_TRACE
, 2);
559 * Create the metadata (struct kmemleak_object) corresponding to an allocated
560 * memory block and add it to the object_list and object_tree_root.
562 static struct kmemleak_object
*create_object(unsigned long ptr
, size_t size
,
563 int min_count
, gfp_t gfp
)
566 struct kmemleak_object
*object
, *parent
;
567 struct rb_node
**link
, *rb_parent
;
568 unsigned long untagged_ptr
;
570 object
= mem_pool_alloc(gfp
);
572 pr_warn("Cannot allocate a kmemleak_object structure\n");
577 INIT_LIST_HEAD(&object
->object_list
);
578 INIT_LIST_HEAD(&object
->gray_list
);
579 INIT_HLIST_HEAD(&object
->area_list
);
580 spin_lock_init(&object
->lock
);
581 atomic_set(&object
->use_count
, 1);
582 object
->flags
= OBJECT_ALLOCATED
;
583 object
->pointer
= ptr
;
585 object
->excess_ref
= 0;
586 object
->min_count
= min_count
;
587 object
->count
= 0; /* white color initially */
588 object
->jiffies
= jiffies
;
589 object
->checksum
= 0;
591 /* task information */
594 strncpy(object
->comm
, "hardirq", sizeof(object
->comm
));
595 } else if (in_serving_softirq()) {
597 strncpy(object
->comm
, "softirq", sizeof(object
->comm
));
599 object
->pid
= current
->pid
;
601 * There is a small chance of a race with set_task_comm(),
602 * however using get_task_comm() here may cause locking
603 * dependency issues with current->alloc_lock. In the worst
604 * case, the command line is not correct.
606 strncpy(object
->comm
, current
->comm
, sizeof(object
->comm
));
609 /* kernel backtrace */
610 object
->trace_len
= __save_stack_trace(object
->trace
);
612 write_lock_irqsave(&kmemleak_lock
, flags
);
614 untagged_ptr
= (unsigned long)kasan_reset_tag((void *)ptr
);
615 min_addr
= min(min_addr
, untagged_ptr
);
616 max_addr
= max(max_addr
, untagged_ptr
+ size
);
617 link
= &object_tree_root
.rb_node
;
621 parent
= rb_entry(rb_parent
, struct kmemleak_object
, rb_node
);
622 if (ptr
+ size
<= parent
->pointer
)
623 link
= &parent
->rb_node
.rb_left
;
624 else if (parent
->pointer
+ parent
->size
<= ptr
)
625 link
= &parent
->rb_node
.rb_right
;
627 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
630 * No need for parent->lock here since "parent" cannot
631 * be freed while the kmemleak_lock is held.
633 dump_object_info(parent
);
634 kmem_cache_free(object_cache
, object
);
639 rb_link_node(&object
->rb_node
, rb_parent
, link
);
640 rb_insert_color(&object
->rb_node
, &object_tree_root
);
642 list_add_tail_rcu(&object
->object_list
, &object_list
);
644 write_unlock_irqrestore(&kmemleak_lock
, flags
);
649 * Mark the object as not allocated and schedule RCU freeing via put_object().
651 static void __delete_object(struct kmemleak_object
*object
)
655 WARN_ON(!(object
->flags
& OBJECT_ALLOCATED
));
656 WARN_ON(atomic_read(&object
->use_count
) < 1);
659 * Locking here also ensures that the corresponding memory block
660 * cannot be freed when it is being scanned.
662 spin_lock_irqsave(&object
->lock
, flags
);
663 object
->flags
&= ~OBJECT_ALLOCATED
;
664 spin_unlock_irqrestore(&object
->lock
, flags
);
669 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
672 static void delete_object_full(unsigned long ptr
)
674 struct kmemleak_object
*object
;
676 object
= find_and_remove_object(ptr
, 0);
679 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
684 __delete_object(object
);
688 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
689 * delete it. If the memory block is partially freed, the function may create
690 * additional metadata for the remaining parts of the block.
692 static void delete_object_part(unsigned long ptr
, size_t size
)
694 struct kmemleak_object
*object
;
695 unsigned long start
, end
;
697 object
= find_and_remove_object(ptr
, 1);
700 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
707 * Create one or two objects that may result from the memory block
708 * split. Note that partial freeing is only done by free_bootmem() and
709 * this happens before kmemleak_init() is called.
711 start
= object
->pointer
;
712 end
= object
->pointer
+ object
->size
;
714 create_object(start
, ptr
- start
, object
->min_count
,
716 if (ptr
+ size
< end
)
717 create_object(ptr
+ size
, end
- ptr
- size
, object
->min_count
,
720 __delete_object(object
);
723 static void __paint_it(struct kmemleak_object
*object
, int color
)
725 object
->min_count
= color
;
726 if (color
== KMEMLEAK_BLACK
)
727 object
->flags
|= OBJECT_NO_SCAN
;
730 static void paint_it(struct kmemleak_object
*object
, int color
)
734 spin_lock_irqsave(&object
->lock
, flags
);
735 __paint_it(object
, color
);
736 spin_unlock_irqrestore(&object
->lock
, flags
);
739 static void paint_ptr(unsigned long ptr
, int color
)
741 struct kmemleak_object
*object
;
743 object
= find_and_get_object(ptr
, 0);
745 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
747 (color
== KMEMLEAK_GREY
) ? "Grey" :
748 (color
== KMEMLEAK_BLACK
) ? "Black" : "Unknown");
751 paint_it(object
, color
);
756 * Mark an object permanently as gray-colored so that it can no longer be
757 * reported as a leak. This is used in general to mark a false positive.
759 static void make_gray_object(unsigned long ptr
)
761 paint_ptr(ptr
, KMEMLEAK_GREY
);
765 * Mark the object as black-colored so that it is ignored from scans and
768 static void make_black_object(unsigned long ptr
)
770 paint_ptr(ptr
, KMEMLEAK_BLACK
);
774 * Add a scanning area to the object. If at least one such area is added,
775 * kmemleak will only scan these ranges rather than the whole memory block.
777 static void add_scan_area(unsigned long ptr
, size_t size
, gfp_t gfp
)
780 struct kmemleak_object
*object
;
781 struct kmemleak_scan_area
*area
= NULL
;
783 object
= find_and_get_object(ptr
, 1);
785 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
791 area
= kmem_cache_alloc(scan_area_cache
, gfp_kmemleak_mask(gfp
));
793 spin_lock_irqsave(&object
->lock
, flags
);
795 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
796 /* mark the object for full scan to avoid false positives */
797 object
->flags
|= OBJECT_FULL_SCAN
;
800 if (size
== SIZE_MAX
) {
801 size
= object
->pointer
+ object
->size
- ptr
;
802 } else if (ptr
+ size
> object
->pointer
+ object
->size
) {
803 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr
);
804 dump_object_info(object
);
805 kmem_cache_free(scan_area_cache
, area
);
809 INIT_HLIST_NODE(&area
->node
);
813 hlist_add_head(&area
->node
, &object
->area_list
);
815 spin_unlock_irqrestore(&object
->lock
, flags
);
820 * Any surplus references (object already gray) to 'ptr' are passed to
821 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
822 * vm_struct may be used as an alternative reference to the vmalloc'ed object
823 * (see free_thread_stack()).
825 static void object_set_excess_ref(unsigned long ptr
, unsigned long excess_ref
)
828 struct kmemleak_object
*object
;
830 object
= find_and_get_object(ptr
, 0);
832 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
837 spin_lock_irqsave(&object
->lock
, flags
);
838 object
->excess_ref
= excess_ref
;
839 spin_unlock_irqrestore(&object
->lock
, flags
);
844 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
845 * pointer. Such object will not be scanned by kmemleak but references to it
848 static void object_no_scan(unsigned long ptr
)
851 struct kmemleak_object
*object
;
853 object
= find_and_get_object(ptr
, 0);
855 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr
);
859 spin_lock_irqsave(&object
->lock
, flags
);
860 object
->flags
|= OBJECT_NO_SCAN
;
861 spin_unlock_irqrestore(&object
->lock
, flags
);
866 * kmemleak_alloc - register a newly allocated object
867 * @ptr: pointer to beginning of the object
868 * @size: size of the object
869 * @min_count: minimum number of references to this object. If during memory
870 * scanning a number of references less than @min_count is found,
871 * the object is reported as a memory leak. If @min_count is 0,
872 * the object is never reported as a leak. If @min_count is -1,
873 * the object is ignored (not scanned and not reported as a leak)
874 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
876 * This function is called from the kernel allocators when a new object
877 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
879 void __ref
kmemleak_alloc(const void *ptr
, size_t size
, int min_count
,
882 pr_debug("%s(0x%p, %zu, %d)\n", __func__
, ptr
, size
, min_count
);
884 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
885 create_object((unsigned long)ptr
, size
, min_count
, gfp
);
887 EXPORT_SYMBOL_GPL(kmemleak_alloc
);
890 * kmemleak_alloc_percpu - register a newly allocated __percpu object
891 * @ptr: __percpu pointer to beginning of the object
892 * @size: size of the object
893 * @gfp: flags used for kmemleak internal memory allocations
895 * This function is called from the kernel percpu allocator when a new object
896 * (memory block) is allocated (alloc_percpu).
898 void __ref
kmemleak_alloc_percpu(const void __percpu
*ptr
, size_t size
,
903 pr_debug("%s(0x%p, %zu)\n", __func__
, ptr
, size
);
906 * Percpu allocations are only scanned and not reported as leaks
907 * (min_count is set to 0).
909 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
910 for_each_possible_cpu(cpu
)
911 create_object((unsigned long)per_cpu_ptr(ptr
, cpu
),
914 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu
);
917 * kmemleak_vmalloc - register a newly vmalloc'ed object
918 * @area: pointer to vm_struct
919 * @size: size of the object
920 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
922 * This function is called from the vmalloc() kernel allocator when a new
923 * object (memory block) is allocated.
925 void __ref
kmemleak_vmalloc(const struct vm_struct
*area
, size_t size
, gfp_t gfp
)
927 pr_debug("%s(0x%p, %zu)\n", __func__
, area
, size
);
930 * A min_count = 2 is needed because vm_struct contains a reference to
931 * the virtual address of the vmalloc'ed block.
933 if (kmemleak_enabled
) {
934 create_object((unsigned long)area
->addr
, size
, 2, gfp
);
935 object_set_excess_ref((unsigned long)area
,
936 (unsigned long)area
->addr
);
939 EXPORT_SYMBOL_GPL(kmemleak_vmalloc
);
942 * kmemleak_free - unregister a previously registered object
943 * @ptr: pointer to beginning of the object
945 * This function is called from the kernel allocators when an object (memory
946 * block) is freed (kmem_cache_free, kfree, vfree etc.).
948 void __ref
kmemleak_free(const void *ptr
)
950 pr_debug("%s(0x%p)\n", __func__
, ptr
);
952 if (kmemleak_free_enabled
&& ptr
&& !IS_ERR(ptr
))
953 delete_object_full((unsigned long)ptr
);
955 EXPORT_SYMBOL_GPL(kmemleak_free
);
958 * kmemleak_free_part - partially unregister a previously registered object
959 * @ptr: pointer to the beginning or inside the object. This also
960 * represents the start of the range to be freed
961 * @size: size to be unregistered
963 * This function is called when only a part of a memory block is freed
964 * (usually from the bootmem allocator).
966 void __ref
kmemleak_free_part(const void *ptr
, size_t size
)
968 pr_debug("%s(0x%p)\n", __func__
, ptr
);
970 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
971 delete_object_part((unsigned long)ptr
, size
);
973 EXPORT_SYMBOL_GPL(kmemleak_free_part
);
976 * kmemleak_free_percpu - unregister a previously registered __percpu object
977 * @ptr: __percpu pointer to beginning of the object
979 * This function is called from the kernel percpu allocator when an object
980 * (memory block) is freed (free_percpu).
982 void __ref
kmemleak_free_percpu(const void __percpu
*ptr
)
986 pr_debug("%s(0x%p)\n", __func__
, ptr
);
988 if (kmemleak_free_enabled
&& ptr
&& !IS_ERR(ptr
))
989 for_each_possible_cpu(cpu
)
990 delete_object_full((unsigned long)per_cpu_ptr(ptr
,
993 EXPORT_SYMBOL_GPL(kmemleak_free_percpu
);
996 * kmemleak_update_trace - update object allocation stack trace
997 * @ptr: pointer to beginning of the object
999 * Override the object allocation stack trace for cases where the actual
1000 * allocation place is not always useful.
1002 void __ref
kmemleak_update_trace(const void *ptr
)
1004 struct kmemleak_object
*object
;
1005 unsigned long flags
;
1007 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1009 if (!kmemleak_enabled
|| IS_ERR_OR_NULL(ptr
))
1012 object
= find_and_get_object((unsigned long)ptr
, 1);
1015 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1021 spin_lock_irqsave(&object
->lock
, flags
);
1022 object
->trace_len
= __save_stack_trace(object
->trace
);
1023 spin_unlock_irqrestore(&object
->lock
, flags
);
1027 EXPORT_SYMBOL(kmemleak_update_trace
);
1030 * kmemleak_not_leak - mark an allocated object as false positive
1031 * @ptr: pointer to beginning of the object
1033 * Calling this function on an object will cause the memory block to no longer
1034 * be reported as leak and always be scanned.
1036 void __ref
kmemleak_not_leak(const void *ptr
)
1038 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1040 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1041 make_gray_object((unsigned long)ptr
);
1043 EXPORT_SYMBOL(kmemleak_not_leak
);
1046 * kmemleak_ignore - ignore an allocated object
1047 * @ptr: pointer to beginning of the object
1049 * Calling this function on an object will cause the memory block to be
1050 * ignored (not scanned and not reported as a leak). This is usually done when
1051 * it is known that the corresponding block is not a leak and does not contain
1052 * any references to other allocated memory blocks.
1054 void __ref
kmemleak_ignore(const void *ptr
)
1056 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1058 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1059 make_black_object((unsigned long)ptr
);
1061 EXPORT_SYMBOL(kmemleak_ignore
);
1064 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1065 * @ptr: pointer to beginning or inside the object. This also
1066 * represents the start of the scan area
1067 * @size: size of the scan area
1068 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1070 * This function is used when it is known that only certain parts of an object
1071 * contain references to other objects. Kmemleak will only scan these areas
1072 * reducing the number false negatives.
1074 void __ref
kmemleak_scan_area(const void *ptr
, size_t size
, gfp_t gfp
)
1076 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1078 if (kmemleak_enabled
&& ptr
&& size
&& !IS_ERR(ptr
))
1079 add_scan_area((unsigned long)ptr
, size
, gfp
);
1081 EXPORT_SYMBOL(kmemleak_scan_area
);
1084 * kmemleak_no_scan - do not scan an allocated object
1085 * @ptr: pointer to beginning of the object
1087 * This function notifies kmemleak not to scan the given memory block. Useful
1088 * in situations where it is known that the given object does not contain any
1089 * references to other objects. Kmemleak will not scan such objects reducing
1090 * the number of false negatives.
1092 void __ref
kmemleak_no_scan(const void *ptr
)
1094 pr_debug("%s(0x%p)\n", __func__
, ptr
);
1096 if (kmemleak_enabled
&& ptr
&& !IS_ERR(ptr
))
1097 object_no_scan((unsigned long)ptr
);
1099 EXPORT_SYMBOL(kmemleak_no_scan
);
1102 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1104 * @phys: physical address of the object
1105 * @size: size of the object
1106 * @min_count: minimum number of references to this object.
1107 * See kmemleak_alloc()
1108 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1110 void __ref
kmemleak_alloc_phys(phys_addr_t phys
, size_t size
, int min_count
,
1113 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1114 kmemleak_alloc(__va(phys
), size
, min_count
, gfp
);
1116 EXPORT_SYMBOL(kmemleak_alloc_phys
);
1119 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1120 * physical address argument
1121 * @phys: physical address if the beginning or inside an object. This
1122 * also represents the start of the range to be freed
1123 * @size: size to be unregistered
1125 void __ref
kmemleak_free_part_phys(phys_addr_t phys
, size_t size
)
1127 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1128 kmemleak_free_part(__va(phys
), size
);
1130 EXPORT_SYMBOL(kmemleak_free_part_phys
);
1133 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1135 * @phys: physical address of the object
1137 void __ref
kmemleak_not_leak_phys(phys_addr_t phys
)
1139 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1140 kmemleak_not_leak(__va(phys
));
1142 EXPORT_SYMBOL(kmemleak_not_leak_phys
);
1145 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1147 * @phys: physical address of the object
1149 void __ref
kmemleak_ignore_phys(phys_addr_t phys
)
1151 if (!IS_ENABLED(CONFIG_HIGHMEM
) || PHYS_PFN(phys
) < max_low_pfn
)
1152 kmemleak_ignore(__va(phys
));
1154 EXPORT_SYMBOL(kmemleak_ignore_phys
);
1157 * Update an object's checksum and return true if it was modified.
1159 static bool update_checksum(struct kmemleak_object
*object
)
1161 u32 old_csum
= object
->checksum
;
1163 kasan_disable_current();
1164 object
->checksum
= crc32(0, (void *)object
->pointer
, object
->size
);
1165 kasan_enable_current();
1167 return object
->checksum
!= old_csum
;
1171 * Update an object's references. object->lock must be held by the caller.
1173 static void update_refs(struct kmemleak_object
*object
)
1175 if (!color_white(object
)) {
1176 /* non-orphan, ignored or new */
1181 * Increase the object's reference count (number of pointers to the
1182 * memory block). If this count reaches the required minimum, the
1183 * object's color will become gray and it will be added to the
1187 if (color_gray(object
)) {
1188 /* put_object() called when removing from gray_list */
1189 WARN_ON(!get_object(object
));
1190 list_add_tail(&object
->gray_list
, &gray_list
);
1195 * Memory scanning is a long process and it needs to be interruptable. This
1196 * function checks whether such interrupt condition occurred.
1198 static int scan_should_stop(void)
1200 if (!kmemleak_enabled
)
1204 * This function may be called from either process or kthread context,
1205 * hence the need to check for both stop conditions.
1208 return signal_pending(current
);
1210 return kthread_should_stop();
1216 * Scan a memory block (exclusive range) for valid pointers and add those
1217 * found to the gray list.
1219 static void scan_block(void *_start
, void *_end
,
1220 struct kmemleak_object
*scanned
)
1223 unsigned long *start
= PTR_ALIGN(_start
, BYTES_PER_POINTER
);
1224 unsigned long *end
= _end
- (BYTES_PER_POINTER
- 1);
1225 unsigned long flags
;
1226 unsigned long untagged_ptr
;
1228 read_lock_irqsave(&kmemleak_lock
, flags
);
1229 for (ptr
= start
; ptr
< end
; ptr
++) {
1230 struct kmemleak_object
*object
;
1231 unsigned long pointer
;
1232 unsigned long excess_ref
;
1234 if (scan_should_stop())
1237 kasan_disable_current();
1239 kasan_enable_current();
1241 untagged_ptr
= (unsigned long)kasan_reset_tag((void *)pointer
);
1242 if (untagged_ptr
< min_addr
|| untagged_ptr
>= max_addr
)
1246 * No need for get_object() here since we hold kmemleak_lock.
1247 * object->use_count cannot be dropped to 0 while the object
1248 * is still present in object_tree_root and object_list
1249 * (with updates protected by kmemleak_lock).
1251 object
= lookup_object(pointer
, 1);
1254 if (object
== scanned
)
1255 /* self referenced, ignore */
1259 * Avoid the lockdep recursive warning on object->lock being
1260 * previously acquired in scan_object(). These locks are
1261 * enclosed by scan_mutex.
1263 spin_lock_nested(&object
->lock
, SINGLE_DEPTH_NESTING
);
1264 /* only pass surplus references (object already gray) */
1265 if (color_gray(object
)) {
1266 excess_ref
= object
->excess_ref
;
1267 /* no need for update_refs() if object already gray */
1270 update_refs(object
);
1272 spin_unlock(&object
->lock
);
1275 object
= lookup_object(excess_ref
, 0);
1278 if (object
== scanned
)
1279 /* circular reference, ignore */
1281 spin_lock_nested(&object
->lock
, SINGLE_DEPTH_NESTING
);
1282 update_refs(object
);
1283 spin_unlock(&object
->lock
);
1286 read_unlock_irqrestore(&kmemleak_lock
, flags
);
1290 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1293 static void scan_large_block(void *start
, void *end
)
1297 while (start
< end
) {
1298 next
= min(start
+ MAX_SCAN_SIZE
, end
);
1299 scan_block(start
, next
, NULL
);
1307 * Scan a memory block corresponding to a kmemleak_object. A condition is
1308 * that object->use_count >= 1.
1310 static void scan_object(struct kmemleak_object
*object
)
1312 struct kmemleak_scan_area
*area
;
1313 unsigned long flags
;
1316 * Once the object->lock is acquired, the corresponding memory block
1317 * cannot be freed (the same lock is acquired in delete_object).
1319 spin_lock_irqsave(&object
->lock
, flags
);
1320 if (object
->flags
& OBJECT_NO_SCAN
)
1322 if (!(object
->flags
& OBJECT_ALLOCATED
))
1323 /* already freed object */
1325 if (hlist_empty(&object
->area_list
) ||
1326 object
->flags
& OBJECT_FULL_SCAN
) {
1327 void *start
= (void *)object
->pointer
;
1328 void *end
= (void *)(object
->pointer
+ object
->size
);
1332 next
= min(start
+ MAX_SCAN_SIZE
, end
);
1333 scan_block(start
, next
, object
);
1339 spin_unlock_irqrestore(&object
->lock
, flags
);
1341 spin_lock_irqsave(&object
->lock
, flags
);
1342 } while (object
->flags
& OBJECT_ALLOCATED
);
1344 hlist_for_each_entry(area
, &object
->area_list
, node
)
1345 scan_block((void *)area
->start
,
1346 (void *)(area
->start
+ area
->size
),
1349 spin_unlock_irqrestore(&object
->lock
, flags
);
1353 * Scan the objects already referenced (gray objects). More objects will be
1354 * referenced and, if there are no memory leaks, all the objects are scanned.
1356 static void scan_gray_list(void)
1358 struct kmemleak_object
*object
, *tmp
;
1361 * The list traversal is safe for both tail additions and removals
1362 * from inside the loop. The kmemleak objects cannot be freed from
1363 * outside the loop because their use_count was incremented.
1365 object
= list_entry(gray_list
.next
, typeof(*object
), gray_list
);
1366 while (&object
->gray_list
!= &gray_list
) {
1369 /* may add new objects to the list */
1370 if (!scan_should_stop())
1371 scan_object(object
);
1373 tmp
= list_entry(object
->gray_list
.next
, typeof(*object
),
1376 /* remove the object from the list and release it */
1377 list_del(&object
->gray_list
);
1382 WARN_ON(!list_empty(&gray_list
));
1386 * Scan data sections and all the referenced memory blocks allocated via the
1387 * kernel's standard allocators. This function must be called with the
1390 static void kmemleak_scan(void)
1392 unsigned long flags
;
1393 struct kmemleak_object
*object
;
1397 jiffies_last_scan
= jiffies
;
1399 /* prepare the kmemleak_object's */
1401 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1402 spin_lock_irqsave(&object
->lock
, flags
);
1405 * With a few exceptions there should be a maximum of
1406 * 1 reference to any object at this point.
1408 if (atomic_read(&object
->use_count
) > 1) {
1409 pr_debug("object->use_count = %d\n",
1410 atomic_read(&object
->use_count
));
1411 dump_object_info(object
);
1414 /* reset the reference count (whiten the object) */
1416 if (color_gray(object
) && get_object(object
))
1417 list_add_tail(&object
->gray_list
, &gray_list
);
1419 spin_unlock_irqrestore(&object
->lock
, flags
);
1424 /* per-cpu sections scanning */
1425 for_each_possible_cpu(i
)
1426 scan_large_block(__per_cpu_start
+ per_cpu_offset(i
),
1427 __per_cpu_end
+ per_cpu_offset(i
));
1431 * Struct page scanning for each node.
1434 for_each_online_node(i
) {
1435 unsigned long start_pfn
= node_start_pfn(i
);
1436 unsigned long end_pfn
= node_end_pfn(i
);
1439 for (pfn
= start_pfn
; pfn
< end_pfn
; pfn
++) {
1440 struct page
*page
= pfn_to_online_page(pfn
);
1445 /* only scan pages belonging to this node */
1446 if (page_to_nid(page
) != i
)
1448 /* only scan if page is in use */
1449 if (page_count(page
) == 0)
1451 scan_block(page
, page
+ 1, NULL
);
1459 * Scanning the task stacks (may introduce false negatives).
1461 if (kmemleak_stack_scan
) {
1462 struct task_struct
*p
, *g
;
1464 read_lock(&tasklist_lock
);
1465 do_each_thread(g
, p
) {
1466 void *stack
= try_get_task_stack(p
);
1468 scan_block(stack
, stack
+ THREAD_SIZE
, NULL
);
1471 } while_each_thread(g
, p
);
1472 read_unlock(&tasklist_lock
);
1476 * Scan the objects already referenced from the sections scanned
1482 * Check for new or unreferenced objects modified since the previous
1483 * scan and color them gray until the next scan.
1486 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1487 spin_lock_irqsave(&object
->lock
, flags
);
1488 if (color_white(object
) && (object
->flags
& OBJECT_ALLOCATED
)
1489 && update_checksum(object
) && get_object(object
)) {
1490 /* color it gray temporarily */
1491 object
->count
= object
->min_count
;
1492 list_add_tail(&object
->gray_list
, &gray_list
);
1494 spin_unlock_irqrestore(&object
->lock
, flags
);
1499 * Re-scan the gray list for modified unreferenced objects.
1504 * If scanning was stopped do not report any new unreferenced objects.
1506 if (scan_should_stop())
1510 * Scanning result reporting.
1513 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1514 spin_lock_irqsave(&object
->lock
, flags
);
1515 if (unreferenced_object(object
) &&
1516 !(object
->flags
& OBJECT_REPORTED
)) {
1517 object
->flags
|= OBJECT_REPORTED
;
1519 if (kmemleak_verbose
)
1520 print_unreferenced(NULL
, object
);
1524 spin_unlock_irqrestore(&object
->lock
, flags
);
1529 kmemleak_found_leaks
= true;
1531 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1538 * Thread function performing automatic memory scanning. Unreferenced objects
1539 * at the end of a memory scan are reported but only the first time.
1541 static int kmemleak_scan_thread(void *arg
)
1543 static int first_run
= IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN
);
1545 pr_info("Automatic memory scanning thread started\n");
1546 set_user_nice(current
, 10);
1549 * Wait before the first scan to allow the system to fully initialize.
1552 signed long timeout
= msecs_to_jiffies(SECS_FIRST_SCAN
* 1000);
1554 while (timeout
&& !kthread_should_stop())
1555 timeout
= schedule_timeout_interruptible(timeout
);
1558 while (!kthread_should_stop()) {
1559 signed long timeout
= jiffies_scan_wait
;
1561 mutex_lock(&scan_mutex
);
1563 mutex_unlock(&scan_mutex
);
1565 /* wait before the next scan */
1566 while (timeout
&& !kthread_should_stop())
1567 timeout
= schedule_timeout_interruptible(timeout
);
1570 pr_info("Automatic memory scanning thread ended\n");
1576 * Start the automatic memory scanning thread. This function must be called
1577 * with the scan_mutex held.
1579 static void start_scan_thread(void)
1583 scan_thread
= kthread_run(kmemleak_scan_thread
, NULL
, "kmemleak");
1584 if (IS_ERR(scan_thread
)) {
1585 pr_warn("Failed to create the scan thread\n");
1591 * Stop the automatic memory scanning thread.
1593 static void stop_scan_thread(void)
1596 kthread_stop(scan_thread
);
1602 * Iterate over the object_list and return the first valid object at or after
1603 * the required position with its use_count incremented. The function triggers
1604 * a memory scanning when the pos argument points to the first position.
1606 static void *kmemleak_seq_start(struct seq_file
*seq
, loff_t
*pos
)
1608 struct kmemleak_object
*object
;
1612 err
= mutex_lock_interruptible(&scan_mutex
);
1614 return ERR_PTR(err
);
1617 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1620 if (get_object(object
))
1629 * Return the next object in the object_list. The function decrements the
1630 * use_count of the previous object and increases that of the next one.
1632 static void *kmemleak_seq_next(struct seq_file
*seq
, void *v
, loff_t
*pos
)
1634 struct kmemleak_object
*prev_obj
= v
;
1635 struct kmemleak_object
*next_obj
= NULL
;
1636 struct kmemleak_object
*obj
= prev_obj
;
1640 list_for_each_entry_continue_rcu(obj
, &object_list
, object_list
) {
1641 if (get_object(obj
)) {
1647 put_object(prev_obj
);
1652 * Decrement the use_count of the last object required, if any.
1654 static void kmemleak_seq_stop(struct seq_file
*seq
, void *v
)
1658 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1659 * waiting was interrupted, so only release it if !IS_ERR.
1662 mutex_unlock(&scan_mutex
);
1669 * Print the information for an unreferenced object to the seq file.
1671 static int kmemleak_seq_show(struct seq_file
*seq
, void *v
)
1673 struct kmemleak_object
*object
= v
;
1674 unsigned long flags
;
1676 spin_lock_irqsave(&object
->lock
, flags
);
1677 if ((object
->flags
& OBJECT_REPORTED
) && unreferenced_object(object
))
1678 print_unreferenced(seq
, object
);
1679 spin_unlock_irqrestore(&object
->lock
, flags
);
1683 static const struct seq_operations kmemleak_seq_ops
= {
1684 .start
= kmemleak_seq_start
,
1685 .next
= kmemleak_seq_next
,
1686 .stop
= kmemleak_seq_stop
,
1687 .show
= kmemleak_seq_show
,
1690 static int kmemleak_open(struct inode
*inode
, struct file
*file
)
1692 return seq_open(file
, &kmemleak_seq_ops
);
1695 static int dump_str_object_info(const char *str
)
1697 unsigned long flags
;
1698 struct kmemleak_object
*object
;
1701 if (kstrtoul(str
, 0, &addr
))
1703 object
= find_and_get_object(addr
, 0);
1705 pr_info("Unknown object at 0x%08lx\n", addr
);
1709 spin_lock_irqsave(&object
->lock
, flags
);
1710 dump_object_info(object
);
1711 spin_unlock_irqrestore(&object
->lock
, flags
);
1718 * We use grey instead of black to ensure we can do future scans on the same
1719 * objects. If we did not do future scans these black objects could
1720 * potentially contain references to newly allocated objects in the future and
1721 * we'd end up with false positives.
1723 static void kmemleak_clear(void)
1725 struct kmemleak_object
*object
;
1726 unsigned long flags
;
1729 list_for_each_entry_rcu(object
, &object_list
, object_list
) {
1730 spin_lock_irqsave(&object
->lock
, flags
);
1731 if ((object
->flags
& OBJECT_REPORTED
) &&
1732 unreferenced_object(object
))
1733 __paint_it(object
, KMEMLEAK_GREY
);
1734 spin_unlock_irqrestore(&object
->lock
, flags
);
1738 kmemleak_found_leaks
= false;
1741 static void __kmemleak_do_cleanup(void);
1744 * File write operation to configure kmemleak at run-time. The following
1745 * commands can be written to the /sys/kernel/debug/kmemleak file:
1746 * off - disable kmemleak (irreversible)
1747 * stack=on - enable the task stacks scanning
1748 * stack=off - disable the tasks stacks scanning
1749 * scan=on - start the automatic memory scanning thread
1750 * scan=off - stop the automatic memory scanning thread
1751 * scan=... - set the automatic memory scanning period in seconds (0 to
1753 * scan - trigger a memory scan
1754 * clear - mark all current reported unreferenced kmemleak objects as
1755 * grey to ignore printing them, or free all kmemleak objects
1756 * if kmemleak has been disabled.
1757 * dump=... - dump information about the object found at the given address
1759 static ssize_t
kmemleak_write(struct file
*file
, const char __user
*user_buf
,
1760 size_t size
, loff_t
*ppos
)
1766 buf_size
= min(size
, (sizeof(buf
) - 1));
1767 if (strncpy_from_user(buf
, user_buf
, buf_size
) < 0)
1771 ret
= mutex_lock_interruptible(&scan_mutex
);
1775 if (strncmp(buf
, "clear", 5) == 0) {
1776 if (kmemleak_enabled
)
1779 __kmemleak_do_cleanup();
1783 if (!kmemleak_enabled
) {
1788 if (strncmp(buf
, "off", 3) == 0)
1790 else if (strncmp(buf
, "stack=on", 8) == 0)
1791 kmemleak_stack_scan
= 1;
1792 else if (strncmp(buf
, "stack=off", 9) == 0)
1793 kmemleak_stack_scan
= 0;
1794 else if (strncmp(buf
, "scan=on", 7) == 0)
1795 start_scan_thread();
1796 else if (strncmp(buf
, "scan=off", 8) == 0)
1798 else if (strncmp(buf
, "scan=", 5) == 0) {
1801 ret
= kstrtoul(buf
+ 5, 0, &secs
);
1806 jiffies_scan_wait
= msecs_to_jiffies(secs
* 1000);
1807 start_scan_thread();
1809 } else if (strncmp(buf
, "scan", 4) == 0)
1811 else if (strncmp(buf
, "dump=", 5) == 0)
1812 ret
= dump_str_object_info(buf
+ 5);
1817 mutex_unlock(&scan_mutex
);
1821 /* ignore the rest of the buffer, only one command at a time */
1826 static const struct file_operations kmemleak_fops
= {
1827 .owner
= THIS_MODULE
,
1828 .open
= kmemleak_open
,
1830 .write
= kmemleak_write
,
1831 .llseek
= seq_lseek
,
1832 .release
= seq_release
,
1835 static void __kmemleak_do_cleanup(void)
1837 struct kmemleak_object
*object
;
1840 list_for_each_entry_rcu(object
, &object_list
, object_list
)
1841 delete_object_full(object
->pointer
);
1846 * Stop the memory scanning thread and free the kmemleak internal objects if
1847 * no previous scan thread (otherwise, kmemleak may still have some useful
1848 * information on memory leaks).
1850 static void kmemleak_do_cleanup(struct work_struct
*work
)
1854 mutex_lock(&scan_mutex
);
1856 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1857 * longer track object freeing. Ordering of the scan thread stopping and
1858 * the memory accesses below is guaranteed by the kthread_stop()
1861 kmemleak_free_enabled
= 0;
1862 mutex_unlock(&scan_mutex
);
1864 if (!kmemleak_found_leaks
)
1865 __kmemleak_do_cleanup();
1867 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1870 static DECLARE_WORK(cleanup_work
, kmemleak_do_cleanup
);
1873 * Disable kmemleak. No memory allocation/freeing will be traced once this
1874 * function is called. Disabling kmemleak is an irreversible operation.
1876 static void kmemleak_disable(void)
1878 /* atomically check whether it was already invoked */
1879 if (cmpxchg(&kmemleak_error
, 0, 1))
1882 /* stop any memory operation tracing */
1883 kmemleak_enabled
= 0;
1885 /* check whether it is too early for a kernel thread */
1886 if (kmemleak_initialized
)
1887 schedule_work(&cleanup_work
);
1889 kmemleak_free_enabled
= 0;
1891 pr_info("Kernel memory leak detector disabled\n");
1895 * Allow boot-time kmemleak disabling (enabled by default).
1897 static int __init
kmemleak_boot_config(char *str
)
1901 if (strcmp(str
, "off") == 0)
1903 else if (strcmp(str
, "on") == 0)
1904 kmemleak_skip_disable
= 1;
1909 early_param("kmemleak", kmemleak_boot_config
);
1912 * Kmemleak initialization.
1914 void __init
kmemleak_init(void)
1916 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1917 if (!kmemleak_skip_disable
) {
1926 jiffies_min_age
= msecs_to_jiffies(MSECS_MIN_AGE
);
1927 jiffies_scan_wait
= msecs_to_jiffies(SECS_SCAN_WAIT
* 1000);
1929 object_cache
= KMEM_CACHE(kmemleak_object
, SLAB_NOLEAKTRACE
);
1930 scan_area_cache
= KMEM_CACHE(kmemleak_scan_area
, SLAB_NOLEAKTRACE
);
1932 /* register the data/bss sections */
1933 create_object((unsigned long)_sdata
, _edata
- _sdata
,
1934 KMEMLEAK_GREY
, GFP_ATOMIC
);
1935 create_object((unsigned long)__bss_start
, __bss_stop
- __bss_start
,
1936 KMEMLEAK_GREY
, GFP_ATOMIC
);
1937 /* only register .data..ro_after_init if not within .data */
1938 if (__start_ro_after_init
< _sdata
|| __end_ro_after_init
> _edata
)
1939 create_object((unsigned long)__start_ro_after_init
,
1940 __end_ro_after_init
- __start_ro_after_init
,
1941 KMEMLEAK_GREY
, GFP_ATOMIC
);
1945 * Late initialization function.
1947 static int __init
kmemleak_late_init(void)
1949 kmemleak_initialized
= 1;
1951 debugfs_create_file("kmemleak", 0644, NULL
, NULL
, &kmemleak_fops
);
1953 if (kmemleak_error
) {
1955 * Some error occurred and kmemleak was disabled. There is a
1956 * small chance that kmemleak_disable() was called immediately
1957 * after setting kmemleak_initialized and we may end up with
1958 * two clean-up threads but serialized by scan_mutex.
1960 schedule_work(&cleanup_work
);
1964 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN
)) {
1965 mutex_lock(&scan_mutex
);
1966 start_scan_thread();
1967 mutex_unlock(&scan_mutex
);
1970 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1971 mem_pool_free_count
);
1975 late_initcall(kmemleak_late_init
);