]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - mm/kmemleak.c
bpf: Wrap aux data inside bpf_sanitize_info container
[mirror_ubuntu-hirsute-kernel.git] / mm / kmemleak.c
CommitLineData
45051539 1// SPDX-License-Identifier: GPL-2.0-only
3c7b4e6b
CM
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
3c7b4e6b 8 * For more information on the algorithm and kmemleak usage, please see
22901c6c 9 * Documentation/dev-tools/kmemleak.rst.
3c7b4e6b
CM
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
8c96f1bc 16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
3c7b4e6b
CM
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
85d3a316 19 * blocks. The object_tree_root is a red black tree used to look-up
3c7b4e6b
CM
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
8c96f1bc
HZ
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
3c7b4e6b
CM
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
4698c1f2
CM
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
3c7b4e6b 42 *
93ada579 43 * Locks and mutexes are acquired/nested in the following order:
9d5a4c73 44 *
93ada579
CM
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
9d5a4c73 49 *
3c7b4e6b
CM
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
ae281064
JP
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
3c7b4e6b
CM
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
3f07c014 63#include <linux/sched/signal.h>
29930025 64#include <linux/sched/task.h>
68db0cf1 65#include <linux/sched/task_stack.h>
3c7b4e6b
CM
66#include <linux/jiffies.h>
67#include <linux/delay.h>
b95f1b31 68#include <linux/export.h>
3c7b4e6b 69#include <linux/kthread.h>
85d3a316 70#include <linux/rbtree.h>
3c7b4e6b
CM
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
154221c3 76#include <linux/module.h>
3c7b4e6b
CM
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
57c8a661 82#include <linux/memblock.h>
9099daed 83#include <linux/pfn.h>
3c7b4e6b
CM
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
179a8100 92#include <linux/workqueue.h>
04609ccc 93#include <linux/crc32.h>
3c7b4e6b
CM
94
95#include <asm/sections.h>
96#include <asm/processor.h>
60063497 97#include <linux/atomic.h>
3c7b4e6b 98
e79ed2f1 99#include <linux/kasan.h>
3c7b4e6b 100#include <linux/kmemleak.h>
029aeff5 101#include <linux/memory_hotplug.h>
3c7b4e6b
CM
102
103/*
104 * Kmemleak configuration and common defines.
105 */
106#define MAX_TRACE 16 /* stack trace length */
3c7b4e6b 107#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
3c7b4e6b
CM
108#define SECS_FIRST_SCAN 60 /* delay before the first scan */
109#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
af98603d 110#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
3c7b4e6b
CM
111
112#define BYTES_PER_POINTER sizeof(void *)
113
216c04b0 114/* GFP bitmask for kmemleak internal allocations */
20b5c303 115#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
6ae4bd1f 116 __GFP_NORETRY | __GFP_NOMEMALLOC | \
df9576de 117 __GFP_NOWARN)
216c04b0 118
3c7b4e6b
CM
119/* scanning area inside a memory block */
120struct kmemleak_scan_area {
121 struct hlist_node node;
c017b4be
CM
122 unsigned long start;
123 size_t size;
3c7b4e6b
CM
124};
125
a1084c87
LR
126#define KMEMLEAK_GREY 0
127#define KMEMLEAK_BLACK -1
128
3c7b4e6b
CM
129/*
130 * Structure holding the metadata for each allocated memory block.
131 * Modifications to such objects should be made while holding the
132 * object->lock. Insertions or deletions from object_list, gray_list or
85d3a316 133 * rb_node are already protected by the corresponding locks or mutex (see
3c7b4e6b
CM
134 * the notes on locking above). These objects are reference-counted
135 * (use_count) and freed using the RCU mechanism.
136 */
137struct kmemleak_object {
8c96f1bc 138 raw_spinlock_t lock;
f66abf09 139 unsigned int flags; /* object status flags */
3c7b4e6b
CM
140 struct list_head object_list;
141 struct list_head gray_list;
85d3a316 142 struct rb_node rb_node;
3c7b4e6b
CM
143 struct rcu_head rcu; /* object_list lockless traversal */
144 /* object usage count; object freed when use_count == 0 */
145 atomic_t use_count;
146 unsigned long pointer;
147 size_t size;
94f4a161
CM
148 /* pass surplus references to this pointer */
149 unsigned long excess_ref;
3c7b4e6b
CM
150 /* minimum number of a pointers found before it is considered leak */
151 int min_count;
152 /* the total number of pointers found pointing to this object */
153 int count;
04609ccc
CM
154 /* checksum for detecting modified objects */
155 u32 checksum;
3c7b4e6b
CM
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long trace[MAX_TRACE];
159 unsigned int trace_len;
160 unsigned long jiffies; /* creation timestamp */
161 pid_t pid; /* pid of the current task */
162 char comm[TASK_COMM_LEN]; /* executable name */
163};
164
165/* flag representing the memory block allocation status */
166#define OBJECT_ALLOCATED (1 << 0)
167/* flag set after the first reporting of an unreference object */
168#define OBJECT_REPORTED (1 << 1)
169/* flag set to not scan the object */
170#define OBJECT_NO_SCAN (1 << 2)
dba82d94
CM
171/* flag set to fully scan the object when scan_area allocation failed */
172#define OBJECT_FULL_SCAN (1 << 3)
3c7b4e6b 173
154221c3 174#define HEX_PREFIX " "
0494e082
SS
175/* number of bytes to print per line; must be 16 or 32 */
176#define HEX_ROW_SIZE 16
177/* number of bytes to print at a time (1, 2, 4, 8) */
178#define HEX_GROUP_SIZE 1
179/* include ASCII after the hex output */
180#define HEX_ASCII 1
181/* max number of lines to be printed */
182#define HEX_MAX_LINES 2
183
3c7b4e6b
CM
184/* the list of all allocated objects */
185static LIST_HEAD(object_list);
186/* the list of gray-colored objects (see color_gray comment below) */
187static LIST_HEAD(gray_list);
0647398a 188/* memory pool allocation */
c5665868 189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
0647398a
CM
190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
191static LIST_HEAD(mem_pool_free_list);
85d3a316
ML
192/* search tree for object boundaries */
193static struct rb_root object_tree_root = RB_ROOT;
8c96f1bc
HZ
194/* protecting the access to object_list and object_tree_root */
195static DEFINE_RAW_SPINLOCK(kmemleak_lock);
3c7b4e6b
CM
196
197/* allocation caches for kmemleak internal data */
198static struct kmem_cache *object_cache;
199static struct kmem_cache *scan_area_cache;
200
201/* set if tracing memory operations is enabled */
c5665868 202static int kmemleak_enabled = 1;
c5f3b1a5 203/* same as above but only for the kmemleak_free() callback */
c5665868 204static int kmemleak_free_enabled = 1;
3c7b4e6b 205/* set in the late_initcall if there were no errors */
8910ae89 206static int kmemleak_initialized;
5f79020c 207/* set if a kmemleak warning was issued */
8910ae89 208static int kmemleak_warning;
5f79020c 209/* set if a fatal kmemleak error has occurred */
8910ae89 210static int kmemleak_error;
3c7b4e6b
CM
211
212/* minimum and maximum address that may be valid pointers */
213static unsigned long min_addr = ULONG_MAX;
214static unsigned long max_addr;
215
3c7b4e6b 216static struct task_struct *scan_thread;
acf4968e 217/* used to avoid reporting of recently allocated objects */
3c7b4e6b 218static unsigned long jiffies_min_age;
acf4968e 219static unsigned long jiffies_last_scan;
3c7b4e6b
CM
220/* delay between automatic memory scannings */
221static signed long jiffies_scan_wait;
222/* enables or disables the task stacks scanning */
e0a2a160 223static int kmemleak_stack_scan = 1;
4698c1f2 224/* protects the memory scanning, parameters and debug/kmemleak file access */
3c7b4e6b 225static DEFINE_MUTEX(scan_mutex);
ab0155a2
JB
226/* setting kmemleak=on, will set this var, skipping the disable */
227static int kmemleak_skip_disable;
dc9b3f42
LZ
228/* If there are leaks that can be reported */
229static bool kmemleak_found_leaks;
3c7b4e6b 230
154221c3
VW
231static bool kmemleak_verbose;
232module_param_named(verbose, kmemleak_verbose, bool, 0600);
233
3c7b4e6b
CM
234static void kmemleak_disable(void);
235
236/*
237 * Print a warning and dump the stack trace.
238 */
5f79020c 239#define kmemleak_warn(x...) do { \
598d8091 240 pr_warn(x); \
5f79020c 241 dump_stack(); \
8910ae89 242 kmemleak_warning = 1; \
3c7b4e6b
CM
243} while (0)
244
245/*
25985edc 246 * Macro invoked when a serious kmemleak condition occurred and cannot be
2030117d 247 * recovered from. Kmemleak will be disabled and further allocation/freeing
3c7b4e6b
CM
248 * tracing no longer available.
249 */
000814f4 250#define kmemleak_stop(x...) do { \
3c7b4e6b
CM
251 kmemleak_warn(x); \
252 kmemleak_disable(); \
253} while (0)
254
154221c3
VW
255#define warn_or_seq_printf(seq, fmt, ...) do { \
256 if (seq) \
257 seq_printf(seq, fmt, ##__VA_ARGS__); \
258 else \
259 pr_warn(fmt, ##__VA_ARGS__); \
260} while (0)
261
262static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
263 int rowsize, int groupsize, const void *buf,
264 size_t len, bool ascii)
265{
266 if (seq)
267 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
268 buf, len, ascii);
269 else
270 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
271 rowsize, groupsize, buf, len, ascii);
272}
273
0494e082
SS
274/*
275 * Printing of the objects hex dump to the seq file. The number of lines to be
276 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
277 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
278 * with the object->lock held.
279 */
280static void hex_dump_object(struct seq_file *seq,
281 struct kmemleak_object *object)
282{
283 const u8 *ptr = (const u8 *)object->pointer;
6fc37c49 284 size_t len;
0494e082
SS
285
286 /* limit the number of lines to HEX_MAX_LINES */
6fc37c49 287 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
0494e082 288
154221c3 289 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
5c335fe0 290 kasan_disable_current();
154221c3
VW
291 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
292 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
5c335fe0 293 kasan_enable_current();
0494e082
SS
294}
295
3c7b4e6b
CM
296/*
297 * Object colors, encoded with count and min_count:
298 * - white - orphan object, not enough references to it (count < min_count)
299 * - gray - not orphan, not marked as false positive (min_count == 0) or
300 * sufficient references to it (count >= min_count)
301 * - black - ignore, it doesn't contain references (e.g. text section)
302 * (min_count == -1). No function defined for this color.
303 * Newly created objects don't have any color assigned (object->count == -1)
304 * before the next memory scan when they become white.
305 */
4a558dd6 306static bool color_white(const struct kmemleak_object *object)
3c7b4e6b 307{
a1084c87
LR
308 return object->count != KMEMLEAK_BLACK &&
309 object->count < object->min_count;
3c7b4e6b
CM
310}
311
4a558dd6 312static bool color_gray(const struct kmemleak_object *object)
3c7b4e6b 313{
a1084c87
LR
314 return object->min_count != KMEMLEAK_BLACK &&
315 object->count >= object->min_count;
3c7b4e6b
CM
316}
317
3c7b4e6b
CM
318/*
319 * Objects are considered unreferenced only if their color is white, they have
320 * not be deleted and have a minimum age to avoid false positives caused by
321 * pointers temporarily stored in CPU registers.
322 */
4a558dd6 323static bool unreferenced_object(struct kmemleak_object *object)
3c7b4e6b 324{
04609ccc 325 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
acf4968e
CM
326 time_before_eq(object->jiffies + jiffies_min_age,
327 jiffies_last_scan);
3c7b4e6b
CM
328}
329
330/*
bab4a34a
CM
331 * Printing of the unreferenced objects information to the seq file. The
332 * print_unreferenced function must be called with the object->lock held.
3c7b4e6b 333 */
3c7b4e6b
CM
334static void print_unreferenced(struct seq_file *seq,
335 struct kmemleak_object *object)
336{
337 int i;
fefdd336 338 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
3c7b4e6b 339
154221c3 340 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
bab4a34a 341 object->pointer, object->size);
154221c3 342 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
fefdd336
CM
343 object->comm, object->pid, object->jiffies,
344 msecs_age / 1000, msecs_age % 1000);
0494e082 345 hex_dump_object(seq, object);
154221c3 346 warn_or_seq_printf(seq, " backtrace:\n");
3c7b4e6b
CM
347
348 for (i = 0; i < object->trace_len; i++) {
349 void *ptr = (void *)object->trace[i];
154221c3 350 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
3c7b4e6b
CM
351 }
352}
353
354/*
355 * Print the kmemleak_object information. This function is used mainly for
356 * debugging special cases when kmemleak operations. It must be called with
357 * the object->lock held.
358 */
359static void dump_object_info(struct kmemleak_object *object)
360{
ae281064 361 pr_notice("Object 0x%08lx (size %zu):\n",
85d3a316 362 object->pointer, object->size);
3c7b4e6b
CM
363 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
364 object->comm, object->pid, object->jiffies);
365 pr_notice(" min_count = %d\n", object->min_count);
366 pr_notice(" count = %d\n", object->count);
f66abf09 367 pr_notice(" flags = 0x%x\n", object->flags);
aae0ad7a 368 pr_notice(" checksum = %u\n", object->checksum);
3c7b4e6b 369 pr_notice(" backtrace:\n");
07984aad 370 stack_trace_print(object->trace, object->trace_len, 4);
3c7b4e6b
CM
371}
372
373/*
85d3a316 374 * Look-up a memory block metadata (kmemleak_object) in the object search
3c7b4e6b
CM
375 * tree based on a pointer value. If alias is 0, only values pointing to the
376 * beginning of the memory block are allowed. The kmemleak_lock must be held
377 * when calling this function.
378 */
379static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
380{
85d3a316
ML
381 struct rb_node *rb = object_tree_root.rb_node;
382
383 while (rb) {
384 struct kmemleak_object *object =
385 rb_entry(rb, struct kmemleak_object, rb_node);
386 if (ptr < object->pointer)
387 rb = object->rb_node.rb_left;
388 else if (object->pointer + object->size <= ptr)
389 rb = object->rb_node.rb_right;
390 else if (object->pointer == ptr || alias)
391 return object;
392 else {
5f79020c
CM
393 kmemleak_warn("Found object by alias at 0x%08lx\n",
394 ptr);
a7686a45 395 dump_object_info(object);
85d3a316 396 break;
3c7b4e6b 397 }
85d3a316
ML
398 }
399 return NULL;
3c7b4e6b
CM
400}
401
402/*
403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
404 * that once an object's use_count reached 0, the RCU freeing was already
405 * registered and the object should no longer be used. This function must be
406 * called under the protection of rcu_read_lock().
407 */
408static int get_object(struct kmemleak_object *object)
409{
410 return atomic_inc_not_zero(&object->use_count);
411}
412
0647398a
CM
413/*
414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
415 */
416static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
417{
418 unsigned long flags;
419 struct kmemleak_object *object;
420
421 /* try the slab allocator first */
c5665868
CM
422 if (object_cache) {
423 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
424 if (object)
425 return object;
426 }
0647398a
CM
427
428 /* slab allocation failed, try the memory pool */
8c96f1bc 429 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0647398a
CM
430 object = list_first_entry_or_null(&mem_pool_free_list,
431 typeof(*object), object_list);
432 if (object)
433 list_del(&object->object_list);
434 else if (mem_pool_free_count)
435 object = &mem_pool[--mem_pool_free_count];
c5665868
CM
436 else
437 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
8c96f1bc 438 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0647398a
CM
439
440 return object;
441}
442
443/*
444 * Return the object to either the slab allocator or the memory pool.
445 */
446static void mem_pool_free(struct kmemleak_object *object)
447{
448 unsigned long flags;
449
450 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
451 kmem_cache_free(object_cache, object);
452 return;
453 }
454
455 /* add the object to the memory pool free list */
8c96f1bc 456 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0647398a 457 list_add(&object->object_list, &mem_pool_free_list);
8c96f1bc 458 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
0647398a
CM
459}
460
3c7b4e6b
CM
461/*
462 * RCU callback to free a kmemleak_object.
463 */
464static void free_object_rcu(struct rcu_head *rcu)
465{
b67bfe0d 466 struct hlist_node *tmp;
3c7b4e6b
CM
467 struct kmemleak_scan_area *area;
468 struct kmemleak_object *object =
469 container_of(rcu, struct kmemleak_object, rcu);
470
471 /*
472 * Once use_count is 0 (guaranteed by put_object), there is no other
473 * code accessing this object, hence no need for locking.
474 */
b67bfe0d
SL
475 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
476 hlist_del(&area->node);
3c7b4e6b
CM
477 kmem_cache_free(scan_area_cache, area);
478 }
0647398a 479 mem_pool_free(object);
3c7b4e6b
CM
480}
481
482/*
483 * Decrement the object use_count. Once the count is 0, free the object using
484 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
485 * delete_object() path, the delayed RCU freeing ensures that there is no
486 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
487 * is also possible.
488 */
489static void put_object(struct kmemleak_object *object)
490{
491 if (!atomic_dec_and_test(&object->use_count))
492 return;
493
494 /* should only get here after delete_object was called */
495 WARN_ON(object->flags & OBJECT_ALLOCATED);
496
c5665868
CM
497 /*
498 * It may be too early for the RCU callbacks, however, there is no
499 * concurrent object_list traversal when !object_cache and all objects
500 * came from the memory pool. Free the object directly.
501 */
502 if (object_cache)
503 call_rcu(&object->rcu, free_object_rcu);
504 else
505 free_object_rcu(&object->rcu);
3c7b4e6b
CM
506}
507
508/*
85d3a316 509 * Look up an object in the object search tree and increase its use_count.
3c7b4e6b
CM
510 */
511static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
512{
513 unsigned long flags;
9fbed254 514 struct kmemleak_object *object;
3c7b4e6b
CM
515
516 rcu_read_lock();
8c96f1bc 517 raw_spin_lock_irqsave(&kmemleak_lock, flags);
93ada579 518 object = lookup_object(ptr, alias);
8c96f1bc 519 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
3c7b4e6b
CM
520
521 /* check whether the object is still available */
522 if (object && !get_object(object))
523 object = NULL;
524 rcu_read_unlock();
525
526 return object;
527}
528
2abd839a
CM
529/*
530 * Remove an object from the object_tree_root and object_list. Must be called
531 * with the kmemleak_lock held _if_ kmemleak is still enabled.
532 */
533static void __remove_object(struct kmemleak_object *object)
534{
535 rb_erase(&object->rb_node, &object_tree_root);
536 list_del_rcu(&object->object_list);
537}
538
e781a9ab
CM
539/*
540 * Look up an object in the object search tree and remove it from both
541 * object_tree_root and object_list. The returned object's use_count should be
542 * at least 1, as initially set by create_object().
543 */
544static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
545{
546 unsigned long flags;
547 struct kmemleak_object *object;
548
8c96f1bc 549 raw_spin_lock_irqsave(&kmemleak_lock, flags);
e781a9ab 550 object = lookup_object(ptr, alias);
2abd839a
CM
551 if (object)
552 __remove_object(object);
8c96f1bc 553 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
e781a9ab
CM
554
555 return object;
556}
557
fd678967
CM
558/*
559 * Save stack trace to the given array of MAX_TRACE size.
560 */
561static int __save_stack_trace(unsigned long *trace)
562{
07984aad 563 return stack_trace_save(trace, MAX_TRACE, 2);
fd678967
CM
564}
565
3c7b4e6b
CM
566/*
567 * Create the metadata (struct kmemleak_object) corresponding to an allocated
568 * memory block and add it to the object_list and object_tree_root.
569 */
fd678967
CM
570static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
571 int min_count, gfp_t gfp)
3c7b4e6b
CM
572{
573 unsigned long flags;
85d3a316
ML
574 struct kmemleak_object *object, *parent;
575 struct rb_node **link, *rb_parent;
a2f77575 576 unsigned long untagged_ptr;
3c7b4e6b 577
0647398a 578 object = mem_pool_alloc(gfp);
3c7b4e6b 579 if (!object) {
598d8091 580 pr_warn("Cannot allocate a kmemleak_object structure\n");
6ae4bd1f 581 kmemleak_disable();
fd678967 582 return NULL;
3c7b4e6b
CM
583 }
584
585 INIT_LIST_HEAD(&object->object_list);
586 INIT_LIST_HEAD(&object->gray_list);
587 INIT_HLIST_HEAD(&object->area_list);
8c96f1bc 588 raw_spin_lock_init(&object->lock);
3c7b4e6b 589 atomic_set(&object->use_count, 1);
04609ccc 590 object->flags = OBJECT_ALLOCATED;
3c7b4e6b
CM
591 object->pointer = ptr;
592 object->size = size;
94f4a161 593 object->excess_ref = 0;
3c7b4e6b 594 object->min_count = min_count;
04609ccc 595 object->count = 0; /* white color initially */
3c7b4e6b 596 object->jiffies = jiffies;
04609ccc 597 object->checksum = 0;
3c7b4e6b
CM
598
599 /* task information */
600 if (in_irq()) {
601 object->pid = 0;
602 strncpy(object->comm, "hardirq", sizeof(object->comm));
6ef90569 603 } else if (in_serving_softirq()) {
3c7b4e6b
CM
604 object->pid = 0;
605 strncpy(object->comm, "softirq", sizeof(object->comm));
606 } else {
607 object->pid = current->pid;
608 /*
609 * There is a small chance of a race with set_task_comm(),
610 * however using get_task_comm() here may cause locking
611 * dependency issues with current->alloc_lock. In the worst
612 * case, the command line is not correct.
613 */
614 strncpy(object->comm, current->comm, sizeof(object->comm));
615 }
616
617 /* kernel backtrace */
fd678967 618 object->trace_len = __save_stack_trace(object->trace);
3c7b4e6b 619
8c96f1bc 620 raw_spin_lock_irqsave(&kmemleak_lock, flags);
0580a181 621
a2f77575
AK
622 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
623 min_addr = min(min_addr, untagged_ptr);
624 max_addr = max(max_addr, untagged_ptr + size);
85d3a316
ML
625 link = &object_tree_root.rb_node;
626 rb_parent = NULL;
627 while (*link) {
628 rb_parent = *link;
629 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
630 if (ptr + size <= parent->pointer)
631 link = &parent->rb_node.rb_left;
632 else if (parent->pointer + parent->size <= ptr)
633 link = &parent->rb_node.rb_right;
634 else {
756a025f 635 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
85d3a316 636 ptr);
9d5a4c73
CM
637 /*
638 * No need for parent->lock here since "parent" cannot
639 * be freed while the kmemleak_lock is held.
640 */
641 dump_object_info(parent);
85d3a316 642 kmem_cache_free(object_cache, object);
9d5a4c73 643 object = NULL;
85d3a316
ML
644 goto out;
645 }
3c7b4e6b 646 }
85d3a316
ML
647 rb_link_node(&object->rb_node, rb_parent, link);
648 rb_insert_color(&object->rb_node, &object_tree_root);
649
3c7b4e6b
CM
650 list_add_tail_rcu(&object->object_list, &object_list);
651out:
8c96f1bc 652 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
fd678967 653 return object;
3c7b4e6b
CM
654}
655
656/*
e781a9ab 657 * Mark the object as not allocated and schedule RCU freeing via put_object().
3c7b4e6b 658 */
53238a60 659static void __delete_object(struct kmemleak_object *object)
3c7b4e6b
CM
660{
661 unsigned long flags;
3c7b4e6b 662
3c7b4e6b 663 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
e781a9ab 664 WARN_ON(atomic_read(&object->use_count) < 1);
3c7b4e6b
CM
665
666 /*
667 * Locking here also ensures that the corresponding memory block
668 * cannot be freed when it is being scanned.
669 */
8c96f1bc 670 raw_spin_lock_irqsave(&object->lock, flags);
3c7b4e6b 671 object->flags &= ~OBJECT_ALLOCATED;
8c96f1bc 672 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
673 put_object(object);
674}
675
53238a60
CM
676/*
677 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
678 * delete it.
679 */
680static void delete_object_full(unsigned long ptr)
681{
682 struct kmemleak_object *object;
683
e781a9ab 684 object = find_and_remove_object(ptr, 0);
53238a60
CM
685 if (!object) {
686#ifdef DEBUG
687 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
688 ptr);
689#endif
690 return;
691 }
692 __delete_object(object);
53238a60
CM
693}
694
695/*
696 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
697 * delete it. If the memory block is partially freed, the function may create
698 * additional metadata for the remaining parts of the block.
699 */
700static void delete_object_part(unsigned long ptr, size_t size)
701{
702 struct kmemleak_object *object;
703 unsigned long start, end;
704
e781a9ab 705 object = find_and_remove_object(ptr, 1);
53238a60
CM
706 if (!object) {
707#ifdef DEBUG
756a025f
JP
708 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
709 ptr, size);
53238a60
CM
710#endif
711 return;
712 }
53238a60
CM
713
714 /*
715 * Create one or two objects that may result from the memory block
716 * split. Note that partial freeing is only done by free_bootmem() and
c5665868 717 * this happens before kmemleak_init() is called.
53238a60
CM
718 */
719 start = object->pointer;
720 end = object->pointer + object->size;
721 if (ptr > start)
722 create_object(start, ptr - start, object->min_count,
723 GFP_KERNEL);
724 if (ptr + size < end)
725 create_object(ptr + size, end - ptr - size, object->min_count,
726 GFP_KERNEL);
727
e781a9ab 728 __delete_object(object);
53238a60 729}
a1084c87
LR
730
731static void __paint_it(struct kmemleak_object *object, int color)
732{
733 object->min_count = color;
734 if (color == KMEMLEAK_BLACK)
735 object->flags |= OBJECT_NO_SCAN;
736}
737
738static void paint_it(struct kmemleak_object *object, int color)
3c7b4e6b
CM
739{
740 unsigned long flags;
a1084c87 741
8c96f1bc 742 raw_spin_lock_irqsave(&object->lock, flags);
a1084c87 743 __paint_it(object, color);
8c96f1bc 744 raw_spin_unlock_irqrestore(&object->lock, flags);
a1084c87
LR
745}
746
747static void paint_ptr(unsigned long ptr, int color)
748{
3c7b4e6b
CM
749 struct kmemleak_object *object;
750
751 object = find_and_get_object(ptr, 0);
752 if (!object) {
756a025f
JP
753 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
754 ptr,
a1084c87
LR
755 (color == KMEMLEAK_GREY) ? "Grey" :
756 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
3c7b4e6b
CM
757 return;
758 }
a1084c87 759 paint_it(object, color);
3c7b4e6b
CM
760 put_object(object);
761}
762
a1084c87 763/*
145b64b9 764 * Mark an object permanently as gray-colored so that it can no longer be
a1084c87
LR
765 * reported as a leak. This is used in general to mark a false positive.
766 */
767static void make_gray_object(unsigned long ptr)
768{
769 paint_ptr(ptr, KMEMLEAK_GREY);
770}
771
3c7b4e6b
CM
772/*
773 * Mark the object as black-colored so that it is ignored from scans and
774 * reporting.
775 */
776static void make_black_object(unsigned long ptr)
777{
a1084c87 778 paint_ptr(ptr, KMEMLEAK_BLACK);
3c7b4e6b
CM
779}
780
781/*
782 * Add a scanning area to the object. If at least one such area is added,
783 * kmemleak will only scan these ranges rather than the whole memory block.
784 */
c017b4be 785static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
786{
787 unsigned long flags;
788 struct kmemleak_object *object;
c5665868 789 struct kmemleak_scan_area *area = NULL;
3c7b4e6b 790
c017b4be 791 object = find_and_get_object(ptr, 1);
3c7b4e6b 792 if (!object) {
ae281064
JP
793 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
794 ptr);
3c7b4e6b
CM
795 return;
796 }
797
c5665868
CM
798 if (scan_area_cache)
799 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
3c7b4e6b 800
8c96f1bc 801 raw_spin_lock_irqsave(&object->lock, flags);
dba82d94
CM
802 if (!area) {
803 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
804 /* mark the object for full scan to avoid false positives */
805 object->flags |= OBJECT_FULL_SCAN;
806 goto out_unlock;
807 }
7f88f88f
CM
808 if (size == SIZE_MAX) {
809 size = object->pointer + object->size - ptr;
810 } else if (ptr + size > object->pointer + object->size) {
ae281064 811 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
3c7b4e6b
CM
812 dump_object_info(object);
813 kmem_cache_free(scan_area_cache, area);
814 goto out_unlock;
815 }
816
817 INIT_HLIST_NODE(&area->node);
c017b4be
CM
818 area->start = ptr;
819 area->size = size;
3c7b4e6b
CM
820
821 hlist_add_head(&area->node, &object->area_list);
822out_unlock:
8c96f1bc 823 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
824 put_object(object);
825}
826
94f4a161
CM
827/*
828 * Any surplus references (object already gray) to 'ptr' are passed to
829 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
830 * vm_struct may be used as an alternative reference to the vmalloc'ed object
831 * (see free_thread_stack()).
832 */
833static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
834{
835 unsigned long flags;
836 struct kmemleak_object *object;
837
838 object = find_and_get_object(ptr, 0);
839 if (!object) {
840 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
841 ptr);
842 return;
843 }
844
8c96f1bc 845 raw_spin_lock_irqsave(&object->lock, flags);
94f4a161 846 object->excess_ref = excess_ref;
8c96f1bc 847 raw_spin_unlock_irqrestore(&object->lock, flags);
94f4a161
CM
848 put_object(object);
849}
850
3c7b4e6b
CM
851/*
852 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
853 * pointer. Such object will not be scanned by kmemleak but references to it
854 * are searched.
855 */
856static void object_no_scan(unsigned long ptr)
857{
858 unsigned long flags;
859 struct kmemleak_object *object;
860
861 object = find_and_get_object(ptr, 0);
862 if (!object) {
ae281064 863 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
3c7b4e6b
CM
864 return;
865 }
866
8c96f1bc 867 raw_spin_lock_irqsave(&object->lock, flags);
3c7b4e6b 868 object->flags |= OBJECT_NO_SCAN;
8c96f1bc 869 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
870 put_object(object);
871}
872
a2b6bf63
CM
873/**
874 * kmemleak_alloc - register a newly allocated object
875 * @ptr: pointer to beginning of the object
876 * @size: size of the object
877 * @min_count: minimum number of references to this object. If during memory
878 * scanning a number of references less than @min_count is found,
879 * the object is reported as a memory leak. If @min_count is 0,
880 * the object is never reported as a leak. If @min_count is -1,
881 * the object is ignored (not scanned and not reported as a leak)
882 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
883 *
884 * This function is called from the kernel allocators when a new object
94f4a161 885 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
3c7b4e6b 886 */
a6186d89
CM
887void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
888 gfp_t gfp)
3c7b4e6b
CM
889{
890 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
891
8910ae89 892 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 893 create_object((unsigned long)ptr, size, min_count, gfp);
3c7b4e6b
CM
894}
895EXPORT_SYMBOL_GPL(kmemleak_alloc);
896
f528f0b8
CM
897/**
898 * kmemleak_alloc_percpu - register a newly allocated __percpu object
899 * @ptr: __percpu pointer to beginning of the object
900 * @size: size of the object
8a8c35fa 901 * @gfp: flags used for kmemleak internal memory allocations
f528f0b8
CM
902 *
903 * This function is called from the kernel percpu allocator when a new object
8a8c35fa 904 * (memory block) is allocated (alloc_percpu).
f528f0b8 905 */
8a8c35fa
LF
906void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
907 gfp_t gfp)
f528f0b8
CM
908{
909 unsigned int cpu;
910
911 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
912
913 /*
914 * Percpu allocations are only scanned and not reported as leaks
915 * (min_count is set to 0).
916 */
8910ae89 917 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
918 for_each_possible_cpu(cpu)
919 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
8a8c35fa 920 size, 0, gfp);
f528f0b8
CM
921}
922EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
923
94f4a161
CM
924/**
925 * kmemleak_vmalloc - register a newly vmalloc'ed object
926 * @area: pointer to vm_struct
927 * @size: size of the object
928 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
929 *
930 * This function is called from the vmalloc() kernel allocator when a new
931 * object (memory block) is allocated.
932 */
933void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
934{
935 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
936
937 /*
938 * A min_count = 2 is needed because vm_struct contains a reference to
939 * the virtual address of the vmalloc'ed block.
940 */
941 if (kmemleak_enabled) {
942 create_object((unsigned long)area->addr, size, 2, gfp);
943 object_set_excess_ref((unsigned long)area,
944 (unsigned long)area->addr);
94f4a161
CM
945 }
946}
947EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
948
a2b6bf63
CM
949/**
950 * kmemleak_free - unregister a previously registered object
951 * @ptr: pointer to beginning of the object
952 *
953 * This function is called from the kernel allocators when an object (memory
954 * block) is freed (kmem_cache_free, kfree, vfree etc.).
3c7b4e6b 955 */
a6186d89 956void __ref kmemleak_free(const void *ptr)
3c7b4e6b
CM
957{
958 pr_debug("%s(0x%p)\n", __func__, ptr);
959
c5f3b1a5 960 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
53238a60 961 delete_object_full((unsigned long)ptr);
3c7b4e6b
CM
962}
963EXPORT_SYMBOL_GPL(kmemleak_free);
964
a2b6bf63
CM
965/**
966 * kmemleak_free_part - partially unregister a previously registered object
967 * @ptr: pointer to the beginning or inside the object. This also
968 * represents the start of the range to be freed
969 * @size: size to be unregistered
970 *
971 * This function is called when only a part of a memory block is freed
972 * (usually from the bootmem allocator).
53238a60 973 */
a6186d89 974void __ref kmemleak_free_part(const void *ptr, size_t size)
53238a60
CM
975{
976 pr_debug("%s(0x%p)\n", __func__, ptr);
977
8910ae89 978 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
53238a60 979 delete_object_part((unsigned long)ptr, size);
53238a60
CM
980}
981EXPORT_SYMBOL_GPL(kmemleak_free_part);
982
f528f0b8
CM
983/**
984 * kmemleak_free_percpu - unregister a previously registered __percpu object
985 * @ptr: __percpu pointer to beginning of the object
986 *
987 * This function is called from the kernel percpu allocator when an object
988 * (memory block) is freed (free_percpu).
989 */
990void __ref kmemleak_free_percpu(const void __percpu *ptr)
991{
992 unsigned int cpu;
993
994 pr_debug("%s(0x%p)\n", __func__, ptr);
995
c5f3b1a5 996 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
f528f0b8
CM
997 for_each_possible_cpu(cpu)
998 delete_object_full((unsigned long)per_cpu_ptr(ptr,
999 cpu));
f528f0b8
CM
1000}
1001EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1002
ffe2c748
CM
1003/**
1004 * kmemleak_update_trace - update object allocation stack trace
1005 * @ptr: pointer to beginning of the object
1006 *
1007 * Override the object allocation stack trace for cases where the actual
1008 * allocation place is not always useful.
1009 */
1010void __ref kmemleak_update_trace(const void *ptr)
1011{
1012 struct kmemleak_object *object;
1013 unsigned long flags;
1014
1015 pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1018 return;
1019
1020 object = find_and_get_object((unsigned long)ptr, 1);
1021 if (!object) {
1022#ifdef DEBUG
1023 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1024 ptr);
1025#endif
1026 return;
1027 }
1028
8c96f1bc 1029 raw_spin_lock_irqsave(&object->lock, flags);
ffe2c748 1030 object->trace_len = __save_stack_trace(object->trace);
8c96f1bc 1031 raw_spin_unlock_irqrestore(&object->lock, flags);
ffe2c748
CM
1032
1033 put_object(object);
1034}
1035EXPORT_SYMBOL(kmemleak_update_trace);
1036
a2b6bf63
CM
1037/**
1038 * kmemleak_not_leak - mark an allocated object as false positive
1039 * @ptr: pointer to beginning of the object
1040 *
1041 * Calling this function on an object will cause the memory block to no longer
1042 * be reported as leak and always be scanned.
3c7b4e6b 1043 */
a6186d89 1044void __ref kmemleak_not_leak(const void *ptr)
3c7b4e6b
CM
1045{
1046 pr_debug("%s(0x%p)\n", __func__, ptr);
1047
8910ae89 1048 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1049 make_gray_object((unsigned long)ptr);
3c7b4e6b
CM
1050}
1051EXPORT_SYMBOL(kmemleak_not_leak);
1052
a2b6bf63
CM
1053/**
1054 * kmemleak_ignore - ignore an allocated object
1055 * @ptr: pointer to beginning of the object
1056 *
1057 * Calling this function on an object will cause the memory block to be
1058 * ignored (not scanned and not reported as a leak). This is usually done when
1059 * it is known that the corresponding block is not a leak and does not contain
1060 * any references to other allocated memory blocks.
3c7b4e6b 1061 */
a6186d89 1062void __ref kmemleak_ignore(const void *ptr)
3c7b4e6b
CM
1063{
1064 pr_debug("%s(0x%p)\n", __func__, ptr);
1065
8910ae89 1066 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1067 make_black_object((unsigned long)ptr);
3c7b4e6b
CM
1068}
1069EXPORT_SYMBOL(kmemleak_ignore);
1070
a2b6bf63
CM
1071/**
1072 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1073 * @ptr: pointer to beginning or inside the object. This also
1074 * represents the start of the scan area
1075 * @size: size of the scan area
1076 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1077 *
1078 * This function is used when it is known that only certain parts of an object
1079 * contain references to other objects. Kmemleak will only scan these areas
1080 * reducing the number false negatives.
3c7b4e6b 1081 */
c017b4be 1082void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
3c7b4e6b
CM
1083{
1084 pr_debug("%s(0x%p)\n", __func__, ptr);
1085
8910ae89 1086 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
c017b4be 1087 add_scan_area((unsigned long)ptr, size, gfp);
3c7b4e6b
CM
1088}
1089EXPORT_SYMBOL(kmemleak_scan_area);
1090
a2b6bf63
CM
1091/**
1092 * kmemleak_no_scan - do not scan an allocated object
1093 * @ptr: pointer to beginning of the object
1094 *
1095 * This function notifies kmemleak not to scan the given memory block. Useful
1096 * in situations where it is known that the given object does not contain any
1097 * references to other objects. Kmemleak will not scan such objects reducing
1098 * the number of false negatives.
3c7b4e6b 1099 */
a6186d89 1100void __ref kmemleak_no_scan(const void *ptr)
3c7b4e6b
CM
1101{
1102 pr_debug("%s(0x%p)\n", __func__, ptr);
1103
8910ae89 1104 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
3c7b4e6b 1105 object_no_scan((unsigned long)ptr);
3c7b4e6b
CM
1106}
1107EXPORT_SYMBOL(kmemleak_no_scan);
1108
9099daed
CM
1109/**
1110 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1111 * address argument
e8b098fc
MR
1112 * @phys: physical address of the object
1113 * @size: size of the object
1114 * @min_count: minimum number of references to this object.
1115 * See kmemleak_alloc()
1116 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
9099daed
CM
1117 */
1118void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1119 gfp_t gfp)
1120{
1121 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1122 kmemleak_alloc(__va(phys), size, min_count, gfp);
1123}
1124EXPORT_SYMBOL(kmemleak_alloc_phys);
1125
1126/**
1127 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1128 * physical address argument
e8b098fc
MR
1129 * @phys: physical address if the beginning or inside an object. This
1130 * also represents the start of the range to be freed
1131 * @size: size to be unregistered
9099daed
CM
1132 */
1133void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1134{
1135 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1136 kmemleak_free_part(__va(phys), size);
1137}
1138EXPORT_SYMBOL(kmemleak_free_part_phys);
1139
1140/**
1141 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1142 * address argument
e8b098fc 1143 * @phys: physical address of the object
9099daed
CM
1144 */
1145void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1146{
1147 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1148 kmemleak_not_leak(__va(phys));
1149}
1150EXPORT_SYMBOL(kmemleak_not_leak_phys);
1151
1152/**
1153 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1154 * address argument
e8b098fc 1155 * @phys: physical address of the object
9099daed
CM
1156 */
1157void __ref kmemleak_ignore_phys(phys_addr_t phys)
1158{
1159 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1160 kmemleak_ignore(__va(phys));
1161}
1162EXPORT_SYMBOL(kmemleak_ignore_phys);
1163
04609ccc
CM
1164/*
1165 * Update an object's checksum and return true if it was modified.
1166 */
1167static bool update_checksum(struct kmemleak_object *object)
1168{
1169 u32 old_csum = object->checksum;
1170
e79ed2f1 1171 kasan_disable_current();
69d0b54d 1172 kcsan_disable_current();
04609ccc 1173 object->checksum = crc32(0, (void *)object->pointer, object->size);
e79ed2f1 1174 kasan_enable_current();
69d0b54d 1175 kcsan_enable_current();
e79ed2f1 1176
04609ccc
CM
1177 return object->checksum != old_csum;
1178}
1179
04f70d13
CM
1180/*
1181 * Update an object's references. object->lock must be held by the caller.
1182 */
1183static void update_refs(struct kmemleak_object *object)
1184{
1185 if (!color_white(object)) {
1186 /* non-orphan, ignored or new */
1187 return;
1188 }
1189
1190 /*
1191 * Increase the object's reference count (number of pointers to the
1192 * memory block). If this count reaches the required minimum, the
1193 * object's color will become gray and it will be added to the
1194 * gray_list.
1195 */
1196 object->count++;
1197 if (color_gray(object)) {
1198 /* put_object() called when removing from gray_list */
1199 WARN_ON(!get_object(object));
1200 list_add_tail(&object->gray_list, &gray_list);
1201 }
1202}
1203
3c7b4e6b
CM
1204/*
1205 * Memory scanning is a long process and it needs to be interruptable. This
25985edc 1206 * function checks whether such interrupt condition occurred.
3c7b4e6b
CM
1207 */
1208static int scan_should_stop(void)
1209{
8910ae89 1210 if (!kmemleak_enabled)
3c7b4e6b
CM
1211 return 1;
1212
1213 /*
1214 * This function may be called from either process or kthread context,
1215 * hence the need to check for both stop conditions.
1216 */
1217 if (current->mm)
1218 return signal_pending(current);
1219 else
1220 return kthread_should_stop();
1221
1222 return 0;
1223}
1224
1225/*
1226 * Scan a memory block (exclusive range) for valid pointers and add those
1227 * found to the gray list.
1228 */
1229static void scan_block(void *_start, void *_end,
93ada579 1230 struct kmemleak_object *scanned)
3c7b4e6b
CM
1231{
1232 unsigned long *ptr;
1233 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1234 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
93ada579 1235 unsigned long flags;
a2f77575 1236 unsigned long untagged_ptr;
3c7b4e6b 1237
8c96f1bc 1238 raw_spin_lock_irqsave(&kmemleak_lock, flags);
3c7b4e6b 1239 for (ptr = start; ptr < end; ptr++) {
3c7b4e6b 1240 struct kmemleak_object *object;
8e019366 1241 unsigned long pointer;
94f4a161 1242 unsigned long excess_ref;
3c7b4e6b
CM
1243
1244 if (scan_should_stop())
1245 break;
1246
e79ed2f1 1247 kasan_disable_current();
8e019366 1248 pointer = *ptr;
e79ed2f1 1249 kasan_enable_current();
8e019366 1250
a2f77575
AK
1251 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1252 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
93ada579
CM
1253 continue;
1254
1255 /*
1256 * No need for get_object() here since we hold kmemleak_lock.
1257 * object->use_count cannot be dropped to 0 while the object
1258 * is still present in object_tree_root and object_list
1259 * (with updates protected by kmemleak_lock).
1260 */
1261 object = lookup_object(pointer, 1);
3c7b4e6b
CM
1262 if (!object)
1263 continue;
93ada579 1264 if (object == scanned)
3c7b4e6b 1265 /* self referenced, ignore */
3c7b4e6b 1266 continue;
3c7b4e6b
CM
1267
1268 /*
1269 * Avoid the lockdep recursive warning on object->lock being
1270 * previously acquired in scan_object(). These locks are
1271 * enclosed by scan_mutex.
1272 */
8c96f1bc 1273 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
94f4a161
CM
1274 /* only pass surplus references (object already gray) */
1275 if (color_gray(object)) {
1276 excess_ref = object->excess_ref;
1277 /* no need for update_refs() if object already gray */
1278 } else {
1279 excess_ref = 0;
1280 update_refs(object);
1281 }
8c96f1bc 1282 raw_spin_unlock(&object->lock);
94f4a161
CM
1283
1284 if (excess_ref) {
1285 object = lookup_object(excess_ref, 0);
1286 if (!object)
1287 continue;
1288 if (object == scanned)
1289 /* circular reference, ignore */
1290 continue;
8c96f1bc 1291 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
94f4a161 1292 update_refs(object);
8c96f1bc 1293 raw_spin_unlock(&object->lock);
94f4a161 1294 }
93ada579 1295 }
8c96f1bc 1296 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
93ada579 1297}
0587da40 1298
93ada579
CM
1299/*
1300 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1301 */
dce5b0bd 1302#ifdef CONFIG_SMP
93ada579
CM
1303static void scan_large_block(void *start, void *end)
1304{
1305 void *next;
1306
1307 while (start < end) {
1308 next = min(start + MAX_SCAN_SIZE, end);
1309 scan_block(start, next, NULL);
1310 start = next;
1311 cond_resched();
3c7b4e6b
CM
1312 }
1313}
dce5b0bd 1314#endif
3c7b4e6b
CM
1315
1316/*
1317 * Scan a memory block corresponding to a kmemleak_object. A condition is
1318 * that object->use_count >= 1.
1319 */
1320static void scan_object(struct kmemleak_object *object)
1321{
1322 struct kmemleak_scan_area *area;
3c7b4e6b
CM
1323 unsigned long flags;
1324
1325 /*
21ae2956
UKK
1326 * Once the object->lock is acquired, the corresponding memory block
1327 * cannot be freed (the same lock is acquired in delete_object).
3c7b4e6b 1328 */
8c96f1bc 1329 raw_spin_lock_irqsave(&object->lock, flags);
3c7b4e6b
CM
1330 if (object->flags & OBJECT_NO_SCAN)
1331 goto out;
1332 if (!(object->flags & OBJECT_ALLOCATED))
1333 /* already freed object */
1334 goto out;
dba82d94
CM
1335 if (hlist_empty(&object->area_list) ||
1336 object->flags & OBJECT_FULL_SCAN) {
af98603d
CM
1337 void *start = (void *)object->pointer;
1338 void *end = (void *)(object->pointer + object->size);
93ada579
CM
1339 void *next;
1340
1341 do {
1342 next = min(start + MAX_SCAN_SIZE, end);
1343 scan_block(start, next, object);
af98603d 1344
93ada579
CM
1345 start = next;
1346 if (start >= end)
1347 break;
af98603d 1348
8c96f1bc 1349 raw_spin_unlock_irqrestore(&object->lock, flags);
af98603d 1350 cond_resched();
8c96f1bc 1351 raw_spin_lock_irqsave(&object->lock, flags);
93ada579 1352 } while (object->flags & OBJECT_ALLOCATED);
af98603d 1353 } else
b67bfe0d 1354 hlist_for_each_entry(area, &object->area_list, node)
c017b4be
CM
1355 scan_block((void *)area->start,
1356 (void *)(area->start + area->size),
93ada579 1357 object);
3c7b4e6b 1358out:
8c96f1bc 1359 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
1360}
1361
04609ccc
CM
1362/*
1363 * Scan the objects already referenced (gray objects). More objects will be
1364 * referenced and, if there are no memory leaks, all the objects are scanned.
1365 */
1366static void scan_gray_list(void)
1367{
1368 struct kmemleak_object *object, *tmp;
1369
1370 /*
1371 * The list traversal is safe for both tail additions and removals
1372 * from inside the loop. The kmemleak objects cannot be freed from
1373 * outside the loop because their use_count was incremented.
1374 */
1375 object = list_entry(gray_list.next, typeof(*object), gray_list);
1376 while (&object->gray_list != &gray_list) {
1377 cond_resched();
1378
1379 /* may add new objects to the list */
1380 if (!scan_should_stop())
1381 scan_object(object);
1382
1383 tmp = list_entry(object->gray_list.next, typeof(*object),
1384 gray_list);
1385
1386 /* remove the object from the list and release it */
1387 list_del(&object->gray_list);
1388 put_object(object);
1389
1390 object = tmp;
1391 }
1392 WARN_ON(!list_empty(&gray_list));
1393}
1394
3c7b4e6b
CM
1395/*
1396 * Scan data sections and all the referenced memory blocks allocated via the
1397 * kernel's standard allocators. This function must be called with the
1398 * scan_mutex held.
1399 */
1400static void kmemleak_scan(void)
1401{
1402 unsigned long flags;
04609ccc 1403 struct kmemleak_object *object;
3c7b4e6b 1404 int i;
4698c1f2 1405 int new_leaks = 0;
3c7b4e6b 1406
acf4968e
CM
1407 jiffies_last_scan = jiffies;
1408
3c7b4e6b
CM
1409 /* prepare the kmemleak_object's */
1410 rcu_read_lock();
1411 list_for_each_entry_rcu(object, &object_list, object_list) {
8c96f1bc 1412 raw_spin_lock_irqsave(&object->lock, flags);
3c7b4e6b
CM
1413#ifdef DEBUG
1414 /*
1415 * With a few exceptions there should be a maximum of
1416 * 1 reference to any object at this point.
1417 */
1418 if (atomic_read(&object->use_count) > 1) {
ae281064 1419 pr_debug("object->use_count = %d\n",
3c7b4e6b
CM
1420 atomic_read(&object->use_count));
1421 dump_object_info(object);
1422 }
1423#endif
1424 /* reset the reference count (whiten the object) */
1425 object->count = 0;
1426 if (color_gray(object) && get_object(object))
1427 list_add_tail(&object->gray_list, &gray_list);
1428
8c96f1bc 1429 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
1430 }
1431 rcu_read_unlock();
1432
3c7b4e6b
CM
1433#ifdef CONFIG_SMP
1434 /* per-cpu sections scanning */
1435 for_each_possible_cpu(i)
93ada579
CM
1436 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1437 __per_cpu_end + per_cpu_offset(i));
3c7b4e6b
CM
1438#endif
1439
1440 /*
029aeff5 1441 * Struct page scanning for each node.
3c7b4e6b 1442 */
bfc8c901 1443 get_online_mems();
3c7b4e6b 1444 for_each_online_node(i) {
108bcc96
CS
1445 unsigned long start_pfn = node_start_pfn(i);
1446 unsigned long end_pfn = node_end_pfn(i);
3c7b4e6b
CM
1447 unsigned long pfn;
1448
1449 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
9f1eb38e 1450 struct page *page = pfn_to_online_page(pfn);
3c7b4e6b 1451
9f1eb38e
OS
1452 if (!page)
1453 continue;
1454
1455 /* only scan pages belonging to this node */
1456 if (page_to_nid(page) != i)
3c7b4e6b 1457 continue;
3c7b4e6b
CM
1458 /* only scan if page is in use */
1459 if (page_count(page) == 0)
1460 continue;
93ada579 1461 scan_block(page, page + 1, NULL);
13ab183d 1462 if (!(pfn & 63))
bde5f6bc 1463 cond_resched();
3c7b4e6b
CM
1464 }
1465 }
bfc8c901 1466 put_online_mems();
3c7b4e6b
CM
1467
1468 /*
43ed5d6e 1469 * Scanning the task stacks (may introduce false negatives).
3c7b4e6b
CM
1470 */
1471 if (kmemleak_stack_scan) {
43ed5d6e
CM
1472 struct task_struct *p, *g;
1473
c4b28963
DB
1474 rcu_read_lock();
1475 for_each_process_thread(g, p) {
37df49f4
CM
1476 void *stack = try_get_task_stack(p);
1477 if (stack) {
1478 scan_block(stack, stack + THREAD_SIZE, NULL);
1479 put_task_stack(p);
1480 }
c4b28963
DB
1481 }
1482 rcu_read_unlock();
3c7b4e6b
CM
1483 }
1484
1485 /*
1486 * Scan the objects already referenced from the sections scanned
04609ccc 1487 * above.
3c7b4e6b 1488 */
04609ccc 1489 scan_gray_list();
2587362e
CM
1490
1491 /*
04609ccc
CM
1492 * Check for new or unreferenced objects modified since the previous
1493 * scan and color them gray until the next scan.
2587362e
CM
1494 */
1495 rcu_read_lock();
1496 list_for_each_entry_rcu(object, &object_list, object_list) {
8c96f1bc 1497 raw_spin_lock_irqsave(&object->lock, flags);
04609ccc
CM
1498 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1499 && update_checksum(object) && get_object(object)) {
1500 /* color it gray temporarily */
1501 object->count = object->min_count;
2587362e
CM
1502 list_add_tail(&object->gray_list, &gray_list);
1503 }
8c96f1bc 1504 raw_spin_unlock_irqrestore(&object->lock, flags);
2587362e
CM
1505 }
1506 rcu_read_unlock();
1507
04609ccc
CM
1508 /*
1509 * Re-scan the gray list for modified unreferenced objects.
1510 */
1511 scan_gray_list();
4698c1f2 1512
17bb9e0d 1513 /*
04609ccc 1514 * If scanning was stopped do not report any new unreferenced objects.
17bb9e0d 1515 */
04609ccc 1516 if (scan_should_stop())
17bb9e0d
CM
1517 return;
1518
4698c1f2
CM
1519 /*
1520 * Scanning result reporting.
1521 */
1522 rcu_read_lock();
1523 list_for_each_entry_rcu(object, &object_list, object_list) {
8c96f1bc 1524 raw_spin_lock_irqsave(&object->lock, flags);
4698c1f2
CM
1525 if (unreferenced_object(object) &&
1526 !(object->flags & OBJECT_REPORTED)) {
1527 object->flags |= OBJECT_REPORTED;
154221c3
VW
1528
1529 if (kmemleak_verbose)
1530 print_unreferenced(NULL, object);
1531
4698c1f2
CM
1532 new_leaks++;
1533 }
8c96f1bc 1534 raw_spin_unlock_irqrestore(&object->lock, flags);
4698c1f2
CM
1535 }
1536 rcu_read_unlock();
1537
dc9b3f42
LZ
1538 if (new_leaks) {
1539 kmemleak_found_leaks = true;
1540
756a025f
JP
1541 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1542 new_leaks);
dc9b3f42 1543 }
4698c1f2 1544
3c7b4e6b
CM
1545}
1546
1547/*
1548 * Thread function performing automatic memory scanning. Unreferenced objects
1549 * at the end of a memory scan are reported but only the first time.
1550 */
1551static int kmemleak_scan_thread(void *arg)
1552{
d53ce042 1553 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
3c7b4e6b 1554
ae281064 1555 pr_info("Automatic memory scanning thread started\n");
bf2a76b3 1556 set_user_nice(current, 10);
3c7b4e6b
CM
1557
1558 /*
1559 * Wait before the first scan to allow the system to fully initialize.
1560 */
1561 if (first_run) {
98c42d94 1562 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
3c7b4e6b 1563 first_run = 0;
98c42d94
VN
1564 while (timeout && !kthread_should_stop())
1565 timeout = schedule_timeout_interruptible(timeout);
3c7b4e6b
CM
1566 }
1567
1568 while (!kthread_should_stop()) {
3c7b4e6b
CM
1569 signed long timeout = jiffies_scan_wait;
1570
1571 mutex_lock(&scan_mutex);
3c7b4e6b 1572 kmemleak_scan();
3c7b4e6b 1573 mutex_unlock(&scan_mutex);
4698c1f2 1574
3c7b4e6b
CM
1575 /* wait before the next scan */
1576 while (timeout && !kthread_should_stop())
1577 timeout = schedule_timeout_interruptible(timeout);
1578 }
1579
ae281064 1580 pr_info("Automatic memory scanning thread ended\n");
3c7b4e6b
CM
1581
1582 return 0;
1583}
1584
1585/*
1586 * Start the automatic memory scanning thread. This function must be called
4698c1f2 1587 * with the scan_mutex held.
3c7b4e6b 1588 */
7eb0d5e5 1589static void start_scan_thread(void)
3c7b4e6b
CM
1590{
1591 if (scan_thread)
1592 return;
1593 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1594 if (IS_ERR(scan_thread)) {
598d8091 1595 pr_warn("Failed to create the scan thread\n");
3c7b4e6b
CM
1596 scan_thread = NULL;
1597 }
1598}
1599
1600/*
914b6dff 1601 * Stop the automatic memory scanning thread.
3c7b4e6b 1602 */
7eb0d5e5 1603static void stop_scan_thread(void)
3c7b4e6b
CM
1604{
1605 if (scan_thread) {
1606 kthread_stop(scan_thread);
1607 scan_thread = NULL;
1608 }
1609}
1610
1611/*
1612 * Iterate over the object_list and return the first valid object at or after
1613 * the required position with its use_count incremented. The function triggers
1614 * a memory scanning when the pos argument points to the first position.
1615 */
1616static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1617{
1618 struct kmemleak_object *object;
1619 loff_t n = *pos;
b87324d0
CM
1620 int err;
1621
1622 err = mutex_lock_interruptible(&scan_mutex);
1623 if (err < 0)
1624 return ERR_PTR(err);
3c7b4e6b 1625
3c7b4e6b
CM
1626 rcu_read_lock();
1627 list_for_each_entry_rcu(object, &object_list, object_list) {
1628 if (n-- > 0)
1629 continue;
1630 if (get_object(object))
1631 goto out;
1632 }
1633 object = NULL;
1634out:
3c7b4e6b
CM
1635 return object;
1636}
1637
1638/*
1639 * Return the next object in the object_list. The function decrements the
1640 * use_count of the previous object and increases that of the next one.
1641 */
1642static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1643{
1644 struct kmemleak_object *prev_obj = v;
1645 struct kmemleak_object *next_obj = NULL;
58fac095 1646 struct kmemleak_object *obj = prev_obj;
3c7b4e6b
CM
1647
1648 ++(*pos);
3c7b4e6b 1649
58fac095 1650 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
52c3ce4e
CM
1651 if (get_object(obj)) {
1652 next_obj = obj;
3c7b4e6b 1653 break;
52c3ce4e 1654 }
3c7b4e6b 1655 }
288c857d 1656
3c7b4e6b
CM
1657 put_object(prev_obj);
1658 return next_obj;
1659}
1660
1661/*
1662 * Decrement the use_count of the last object required, if any.
1663 */
1664static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1665{
b87324d0
CM
1666 if (!IS_ERR(v)) {
1667 /*
1668 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1669 * waiting was interrupted, so only release it if !IS_ERR.
1670 */
f5886c7f 1671 rcu_read_unlock();
b87324d0
CM
1672 mutex_unlock(&scan_mutex);
1673 if (v)
1674 put_object(v);
1675 }
3c7b4e6b
CM
1676}
1677
1678/*
1679 * Print the information for an unreferenced object to the seq file.
1680 */
1681static int kmemleak_seq_show(struct seq_file *seq, void *v)
1682{
1683 struct kmemleak_object *object = v;
1684 unsigned long flags;
1685
8c96f1bc 1686 raw_spin_lock_irqsave(&object->lock, flags);
288c857d 1687 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
17bb9e0d 1688 print_unreferenced(seq, object);
8c96f1bc 1689 raw_spin_unlock_irqrestore(&object->lock, flags);
3c7b4e6b
CM
1690 return 0;
1691}
1692
1693static const struct seq_operations kmemleak_seq_ops = {
1694 .start = kmemleak_seq_start,
1695 .next = kmemleak_seq_next,
1696 .stop = kmemleak_seq_stop,
1697 .show = kmemleak_seq_show,
1698};
1699
1700static int kmemleak_open(struct inode *inode, struct file *file)
1701{
b87324d0 1702 return seq_open(file, &kmemleak_seq_ops);
3c7b4e6b
CM
1703}
1704
189d84ed
CM
1705static int dump_str_object_info(const char *str)
1706{
1707 unsigned long flags;
1708 struct kmemleak_object *object;
1709 unsigned long addr;
1710
dc053733
AP
1711 if (kstrtoul(str, 0, &addr))
1712 return -EINVAL;
189d84ed
CM
1713 object = find_and_get_object(addr, 0);
1714 if (!object) {
1715 pr_info("Unknown object at 0x%08lx\n", addr);
1716 return -EINVAL;
1717 }
1718
8c96f1bc 1719 raw_spin_lock_irqsave(&object->lock, flags);
189d84ed 1720 dump_object_info(object);
8c96f1bc 1721 raw_spin_unlock_irqrestore(&object->lock, flags);
189d84ed
CM
1722
1723 put_object(object);
1724 return 0;
1725}
1726
30b37101
LR
1727/*
1728 * We use grey instead of black to ensure we can do future scans on the same
1729 * objects. If we did not do future scans these black objects could
1730 * potentially contain references to newly allocated objects in the future and
1731 * we'd end up with false positives.
1732 */
1733static void kmemleak_clear(void)
1734{
1735 struct kmemleak_object *object;
1736 unsigned long flags;
1737
1738 rcu_read_lock();
1739 list_for_each_entry_rcu(object, &object_list, object_list) {
8c96f1bc 1740 raw_spin_lock_irqsave(&object->lock, flags);
30b37101
LR
1741 if ((object->flags & OBJECT_REPORTED) &&
1742 unreferenced_object(object))
a1084c87 1743 __paint_it(object, KMEMLEAK_GREY);
8c96f1bc 1744 raw_spin_unlock_irqrestore(&object->lock, flags);
30b37101
LR
1745 }
1746 rcu_read_unlock();
dc9b3f42
LZ
1747
1748 kmemleak_found_leaks = false;
30b37101
LR
1749}
1750
c89da70c
LZ
1751static void __kmemleak_do_cleanup(void);
1752
3c7b4e6b
CM
1753/*
1754 * File write operation to configure kmemleak at run-time. The following
1755 * commands can be written to the /sys/kernel/debug/kmemleak file:
1756 * off - disable kmemleak (irreversible)
1757 * stack=on - enable the task stacks scanning
1758 * stack=off - disable the tasks stacks scanning
1759 * scan=on - start the automatic memory scanning thread
1760 * scan=off - stop the automatic memory scanning thread
1761 * scan=... - set the automatic memory scanning period in seconds (0 to
1762 * disable it)
4698c1f2 1763 * scan - trigger a memory scan
30b37101 1764 * clear - mark all current reported unreferenced kmemleak objects as
c89da70c
LZ
1765 * grey to ignore printing them, or free all kmemleak objects
1766 * if kmemleak has been disabled.
189d84ed 1767 * dump=... - dump information about the object found at the given address
3c7b4e6b
CM
1768 */
1769static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1770 size_t size, loff_t *ppos)
1771{
1772 char buf[64];
1773 int buf_size;
b87324d0 1774 int ret;
3c7b4e6b
CM
1775
1776 buf_size = min(size, (sizeof(buf) - 1));
1777 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1778 return -EFAULT;
1779 buf[buf_size] = 0;
1780
b87324d0
CM
1781 ret = mutex_lock_interruptible(&scan_mutex);
1782 if (ret < 0)
1783 return ret;
1784
c89da70c 1785 if (strncmp(buf, "clear", 5) == 0) {
8910ae89 1786 if (kmemleak_enabled)
c89da70c
LZ
1787 kmemleak_clear();
1788 else
1789 __kmemleak_do_cleanup();
1790 goto out;
1791 }
1792
8910ae89 1793 if (!kmemleak_enabled) {
4e4dfce2 1794 ret = -EPERM;
c89da70c
LZ
1795 goto out;
1796 }
1797
3c7b4e6b
CM
1798 if (strncmp(buf, "off", 3) == 0)
1799 kmemleak_disable();
1800 else if (strncmp(buf, "stack=on", 8) == 0)
1801 kmemleak_stack_scan = 1;
1802 else if (strncmp(buf, "stack=off", 9) == 0)
1803 kmemleak_stack_scan = 0;
1804 else if (strncmp(buf, "scan=on", 7) == 0)
1805 start_scan_thread();
1806 else if (strncmp(buf, "scan=off", 8) == 0)
1807 stop_scan_thread();
1808 else if (strncmp(buf, "scan=", 5) == 0) {
1809 unsigned long secs;
3c7b4e6b 1810
3dbb95f7 1811 ret = kstrtoul(buf + 5, 0, &secs);
b87324d0
CM
1812 if (ret < 0)
1813 goto out;
3c7b4e6b
CM
1814 stop_scan_thread();
1815 if (secs) {
1816 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1817 start_scan_thread();
1818 }
4698c1f2
CM
1819 } else if (strncmp(buf, "scan", 4) == 0)
1820 kmemleak_scan();
189d84ed
CM
1821 else if (strncmp(buf, "dump=", 5) == 0)
1822 ret = dump_str_object_info(buf + 5);
4698c1f2 1823 else
b87324d0
CM
1824 ret = -EINVAL;
1825
1826out:
1827 mutex_unlock(&scan_mutex);
1828 if (ret < 0)
1829 return ret;
3c7b4e6b
CM
1830
1831 /* ignore the rest of the buffer, only one command at a time */
1832 *ppos += size;
1833 return size;
1834}
1835
1836static const struct file_operations kmemleak_fops = {
1837 .owner = THIS_MODULE,
1838 .open = kmemleak_open,
1839 .read = seq_read,
1840 .write = kmemleak_write,
1841 .llseek = seq_lseek,
5f3bf19a 1842 .release = seq_release,
3c7b4e6b
CM
1843};
1844
c89da70c
LZ
1845static void __kmemleak_do_cleanup(void)
1846{
2abd839a 1847 struct kmemleak_object *object, *tmp;
c89da70c 1848
2abd839a
CM
1849 /*
1850 * Kmemleak has already been disabled, no need for RCU list traversal
1851 * or kmemleak_lock held.
1852 */
1853 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1854 __remove_object(object);
1855 __delete_object(object);
1856 }
c89da70c
LZ
1857}
1858
3c7b4e6b 1859/*
74341703
CM
1860 * Stop the memory scanning thread and free the kmemleak internal objects if
1861 * no previous scan thread (otherwise, kmemleak may still have some useful
1862 * information on memory leaks).
3c7b4e6b 1863 */
179a8100 1864static void kmemleak_do_cleanup(struct work_struct *work)
3c7b4e6b 1865{
3c7b4e6b 1866 stop_scan_thread();
3c7b4e6b 1867
914b6dff 1868 mutex_lock(&scan_mutex);
c5f3b1a5 1869 /*
914b6dff
VM
1870 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1871 * longer track object freeing. Ordering of the scan thread stopping and
1872 * the memory accesses below is guaranteed by the kthread_stop()
1873 * function.
c5f3b1a5
CM
1874 */
1875 kmemleak_free_enabled = 0;
914b6dff 1876 mutex_unlock(&scan_mutex);
c5f3b1a5 1877
c89da70c
LZ
1878 if (!kmemleak_found_leaks)
1879 __kmemleak_do_cleanup();
1880 else
756a025f 1881 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
3c7b4e6b
CM
1882}
1883
179a8100 1884static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
3c7b4e6b
CM
1885
1886/*
1887 * Disable kmemleak. No memory allocation/freeing will be traced once this
1888 * function is called. Disabling kmemleak is an irreversible operation.
1889 */
1890static void kmemleak_disable(void)
1891{
1892 /* atomically check whether it was already invoked */
8910ae89 1893 if (cmpxchg(&kmemleak_error, 0, 1))
3c7b4e6b
CM
1894 return;
1895
1896 /* stop any memory operation tracing */
8910ae89 1897 kmemleak_enabled = 0;
3c7b4e6b
CM
1898
1899 /* check whether it is too early for a kernel thread */
8910ae89 1900 if (kmemleak_initialized)
179a8100 1901 schedule_work(&cleanup_work);
c5f3b1a5
CM
1902 else
1903 kmemleak_free_enabled = 0;
3c7b4e6b
CM
1904
1905 pr_info("Kernel memory leak detector disabled\n");
1906}
1907
1908/*
1909 * Allow boot-time kmemleak disabling (enabled by default).
1910 */
8bd30c10 1911static int __init kmemleak_boot_config(char *str)
3c7b4e6b
CM
1912{
1913 if (!str)
1914 return -EINVAL;
1915 if (strcmp(str, "off") == 0)
1916 kmemleak_disable();
ab0155a2
JB
1917 else if (strcmp(str, "on") == 0)
1918 kmemleak_skip_disable = 1;
1919 else
3c7b4e6b
CM
1920 return -EINVAL;
1921 return 0;
1922}
1923early_param("kmemleak", kmemleak_boot_config);
1924
1925/*
2030117d 1926 * Kmemleak initialization.
3c7b4e6b
CM
1927 */
1928void __init kmemleak_init(void)
1929{
ab0155a2
JB
1930#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1931 if (!kmemleak_skip_disable) {
1932 kmemleak_disable();
1933 return;
1934 }
1935#endif
1936
c5665868
CM
1937 if (kmemleak_error)
1938 return;
1939
3c7b4e6b
CM
1940 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1941 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1942
1943 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1944 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
3c7b4e6b 1945
298a32b1
CM
1946 /* register the data/bss sections */
1947 create_object((unsigned long)_sdata, _edata - _sdata,
1948 KMEMLEAK_GREY, GFP_ATOMIC);
1949 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1950 KMEMLEAK_GREY, GFP_ATOMIC);
1951 /* only register .data..ro_after_init if not within .data */
b0d14fc4 1952 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
298a32b1
CM
1953 create_object((unsigned long)__start_ro_after_init,
1954 __end_ro_after_init - __start_ro_after_init,
1955 KMEMLEAK_GREY, GFP_ATOMIC);
3c7b4e6b
CM
1956}
1957
1958/*
1959 * Late initialization function.
1960 */
1961static int __init kmemleak_late_init(void)
1962{
8910ae89 1963 kmemleak_initialized = 1;
3c7b4e6b 1964
282401df 1965 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
b353756b 1966
8910ae89 1967 if (kmemleak_error) {
3c7b4e6b 1968 /*
25985edc 1969 * Some error occurred and kmemleak was disabled. There is a
3c7b4e6b
CM
1970 * small chance that kmemleak_disable() was called immediately
1971 * after setting kmemleak_initialized and we may end up with
1972 * two clean-up threads but serialized by scan_mutex.
1973 */
179a8100 1974 schedule_work(&cleanup_work);
3c7b4e6b
CM
1975 return -ENOMEM;
1976 }
1977
d53ce042
SK
1978 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1979 mutex_lock(&scan_mutex);
1980 start_scan_thread();
1981 mutex_unlock(&scan_mutex);
1982 }
3c7b4e6b 1983
0e965a6b
QC
1984 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1985 mem_pool_free_count);
3c7b4e6b
CM
1986
1987 return 0;
1988}
1989late_initcall(kmemleak_late_init);