]> git.proxmox.com Git - mirror_ubuntu-impish-kernel.git/blob - mm/kmemleak.c
UBUNTU: Ubuntu-5.13.0-12.12
[mirror_ubuntu-impish-kernel.git] / mm / kmemleak.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60 #include <linux/init.h>
61 #include <linux/kernel.h>
62 #include <linux/list.h>
63 #include <linux/sched/signal.h>
64 #include <linux/sched/task.h>
65 #include <linux/sched/task_stack.h>
66 #include <linux/jiffies.h>
67 #include <linux/delay.h>
68 #include <linux/export.h>
69 #include <linux/kthread.h>
70 #include <linux/rbtree.h>
71 #include <linux/fs.h>
72 #include <linux/debugfs.h>
73 #include <linux/seq_file.h>
74 #include <linux/cpumask.h>
75 #include <linux/spinlock.h>
76 #include <linux/module.h>
77 #include <linux/mutex.h>
78 #include <linux/rcupdate.h>
79 #include <linux/stacktrace.h>
80 #include <linux/cache.h>
81 #include <linux/percpu.h>
82 #include <linux/memblock.h>
83 #include <linux/pfn.h>
84 #include <linux/mmzone.h>
85 #include <linux/slab.h>
86 #include <linux/thread_info.h>
87 #include <linux/err.h>
88 #include <linux/uaccess.h>
89 #include <linux/string.h>
90 #include <linux/nodemask.h>
91 #include <linux/mm.h>
92 #include <linux/workqueue.h>
93 #include <linux/crc32.h>
94
95 #include <asm/sections.h>
96 #include <asm/processor.h>
97 #include <linux/atomic.h>
98
99 #include <linux/kasan.h>
100 #include <linux/kfence.h>
101 #include <linux/kmemleak.h>
102 #include <linux/memory_hotplug.h>
103
104 /*
105 * Kmemleak configuration and common defines.
106 */
107 #define MAX_TRACE 16 /* stack trace length */
108 #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109 #define SECS_FIRST_SCAN 60 /* delay before the first scan */
110 #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111 #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112
113 #define BYTES_PER_POINTER sizeof(void *)
114
115 /* GFP bitmask for kmemleak internal allocations */
116 #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118 __GFP_NOWARN)
119
120 /* scanning area inside a memory block */
121 struct kmemleak_scan_area {
122 struct hlist_node node;
123 unsigned long start;
124 size_t size;
125 };
126
127 #define KMEMLEAK_GREY 0
128 #define KMEMLEAK_BLACK -1
129
130 /*
131 * Structure holding the metadata for each allocated memory block.
132 * Modifications to such objects should be made while holding the
133 * object->lock. Insertions or deletions from object_list, gray_list or
134 * rb_node are already protected by the corresponding locks or mutex (see
135 * the notes on locking above). These objects are reference-counted
136 * (use_count) and freed using the RCU mechanism.
137 */
138 struct kmemleak_object {
139 raw_spinlock_t lock;
140 unsigned int flags; /* object status flags */
141 struct list_head object_list;
142 struct list_head gray_list;
143 struct rb_node rb_node;
144 struct rcu_head rcu; /* object_list lockless traversal */
145 /* object usage count; object freed when use_count == 0 */
146 atomic_t use_count;
147 unsigned long pointer;
148 size_t size;
149 /* pass surplus references to this pointer */
150 unsigned long excess_ref;
151 /* minimum number of a pointers found before it is considered leak */
152 int min_count;
153 /* the total number of pointers found pointing to this object */
154 int count;
155 /* checksum for detecting modified objects */
156 u32 checksum;
157 /* memory ranges to be scanned inside an object (empty for all) */
158 struct hlist_head area_list;
159 unsigned long trace[MAX_TRACE];
160 unsigned int trace_len;
161 unsigned long jiffies; /* creation timestamp */
162 pid_t pid; /* pid of the current task */
163 char comm[TASK_COMM_LEN]; /* executable name */
164 };
165
166 /* flag representing the memory block allocation status */
167 #define OBJECT_ALLOCATED (1 << 0)
168 /* flag set after the first reporting of an unreference object */
169 #define OBJECT_REPORTED (1 << 1)
170 /* flag set to not scan the object */
171 #define OBJECT_NO_SCAN (1 << 2)
172 /* flag set to fully scan the object when scan_area allocation failed */
173 #define OBJECT_FULL_SCAN (1 << 3)
174
175 #define HEX_PREFIX " "
176 /* number of bytes to print per line; must be 16 or 32 */
177 #define HEX_ROW_SIZE 16
178 /* number of bytes to print at a time (1, 2, 4, 8) */
179 #define HEX_GROUP_SIZE 1
180 /* include ASCII after the hex output */
181 #define HEX_ASCII 1
182 /* max number of lines to be printed */
183 #define HEX_MAX_LINES 2
184
185 /* the list of all allocated objects */
186 static LIST_HEAD(object_list);
187 /* the list of gray-colored objects (see color_gray comment below) */
188 static LIST_HEAD(gray_list);
189 /* memory pool allocation */
190 static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
191 static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
192 static LIST_HEAD(mem_pool_free_list);
193 /* search tree for object boundaries */
194 static struct rb_root object_tree_root = RB_ROOT;
195 /* protecting the access to object_list and object_tree_root */
196 static DEFINE_RAW_SPINLOCK(kmemleak_lock);
197
198 /* allocation caches for kmemleak internal data */
199 static struct kmem_cache *object_cache;
200 static struct kmem_cache *scan_area_cache;
201
202 /* set if tracing memory operations is enabled */
203 static int kmemleak_enabled = 1;
204 /* same as above but only for the kmemleak_free() callback */
205 static int kmemleak_free_enabled = 1;
206 /* set in the late_initcall if there were no errors */
207 static int kmemleak_initialized;
208 /* set if a kmemleak warning was issued */
209 static int kmemleak_warning;
210 /* set if a fatal kmemleak error has occurred */
211 static int kmemleak_error;
212
213 /* minimum and maximum address that may be valid pointers */
214 static unsigned long min_addr = ULONG_MAX;
215 static unsigned long max_addr;
216
217 static struct task_struct *scan_thread;
218 /* used to avoid reporting of recently allocated objects */
219 static unsigned long jiffies_min_age;
220 static unsigned long jiffies_last_scan;
221 /* delay between automatic memory scannings */
222 static signed long jiffies_scan_wait;
223 /* enables or disables the task stacks scanning */
224 static int kmemleak_stack_scan = 1;
225 /* protects the memory scanning, parameters and debug/kmemleak file access */
226 static DEFINE_MUTEX(scan_mutex);
227 /* setting kmemleak=on, will set this var, skipping the disable */
228 static int kmemleak_skip_disable;
229 /* If there are leaks that can be reported */
230 static bool kmemleak_found_leaks;
231
232 static bool kmemleak_verbose;
233 module_param_named(verbose, kmemleak_verbose, bool, 0600);
234
235 static void kmemleak_disable(void);
236
237 /*
238 * Print a warning and dump the stack trace.
239 */
240 #define kmemleak_warn(x...) do { \
241 pr_warn(x); \
242 dump_stack(); \
243 kmemleak_warning = 1; \
244 } while (0)
245
246 /*
247 * Macro invoked when a serious kmemleak condition occurred and cannot be
248 * recovered from. Kmemleak will be disabled and further allocation/freeing
249 * tracing no longer available.
250 */
251 #define kmemleak_stop(x...) do { \
252 kmemleak_warn(x); \
253 kmemleak_disable(); \
254 } while (0)
255
256 #define warn_or_seq_printf(seq, fmt, ...) do { \
257 if (seq) \
258 seq_printf(seq, fmt, ##__VA_ARGS__); \
259 else \
260 pr_warn(fmt, ##__VA_ARGS__); \
261 } while (0)
262
263 static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
264 int rowsize, int groupsize, const void *buf,
265 size_t len, bool ascii)
266 {
267 if (seq)
268 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
269 buf, len, ascii);
270 else
271 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
272 rowsize, groupsize, buf, len, ascii);
273 }
274
275 /*
276 * Printing of the objects hex dump to the seq file. The number of lines to be
277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279 * with the object->lock held.
280 */
281 static void hex_dump_object(struct seq_file *seq,
282 struct kmemleak_object *object)
283 {
284 const u8 *ptr = (const u8 *)object->pointer;
285 size_t len;
286
287 /* limit the number of lines to HEX_MAX_LINES */
288 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
289
290 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
291 kasan_disable_current();
292 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
293 HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
294 kasan_enable_current();
295 }
296
297 /*
298 * Object colors, encoded with count and min_count:
299 * - white - orphan object, not enough references to it (count < min_count)
300 * - gray - not orphan, not marked as false positive (min_count == 0) or
301 * sufficient references to it (count >= min_count)
302 * - black - ignore, it doesn't contain references (e.g. text section)
303 * (min_count == -1). No function defined for this color.
304 * Newly created objects don't have any color assigned (object->count == -1)
305 * before the next memory scan when they become white.
306 */
307 static bool color_white(const struct kmemleak_object *object)
308 {
309 return object->count != KMEMLEAK_BLACK &&
310 object->count < object->min_count;
311 }
312
313 static bool color_gray(const struct kmemleak_object *object)
314 {
315 return object->min_count != KMEMLEAK_BLACK &&
316 object->count >= object->min_count;
317 }
318
319 /*
320 * Objects are considered unreferenced only if their color is white, they have
321 * not be deleted and have a minimum age to avoid false positives caused by
322 * pointers temporarily stored in CPU registers.
323 */
324 static bool unreferenced_object(struct kmemleak_object *object)
325 {
326 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
327 time_before_eq(object->jiffies + jiffies_min_age,
328 jiffies_last_scan);
329 }
330
331 /*
332 * Printing of the unreferenced objects information to the seq file. The
333 * print_unreferenced function must be called with the object->lock held.
334 */
335 static void print_unreferenced(struct seq_file *seq,
336 struct kmemleak_object *object)
337 {
338 int i;
339 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
340
341 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
342 object->pointer, object->size);
343 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
344 object->comm, object->pid, object->jiffies,
345 msecs_age / 1000, msecs_age % 1000);
346 hex_dump_object(seq, object);
347 warn_or_seq_printf(seq, " backtrace:\n");
348
349 for (i = 0; i < object->trace_len; i++) {
350 void *ptr = (void *)object->trace[i];
351 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
352 }
353 }
354
355 /*
356 * Print the kmemleak_object information. This function is used mainly for
357 * debugging special cases when kmemleak operations. It must be called with
358 * the object->lock held.
359 */
360 static void dump_object_info(struct kmemleak_object *object)
361 {
362 pr_notice("Object 0x%08lx (size %zu):\n",
363 object->pointer, object->size);
364 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
365 object->comm, object->pid, object->jiffies);
366 pr_notice(" min_count = %d\n", object->min_count);
367 pr_notice(" count = %d\n", object->count);
368 pr_notice(" flags = 0x%x\n", object->flags);
369 pr_notice(" checksum = %u\n", object->checksum);
370 pr_notice(" backtrace:\n");
371 stack_trace_print(object->trace, object->trace_len, 4);
372 }
373
374 /*
375 * Look-up a memory block metadata (kmemleak_object) in the object search
376 * tree based on a pointer value. If alias is 0, only values pointing to the
377 * beginning of the memory block are allowed. The kmemleak_lock must be held
378 * when calling this function.
379 */
380 static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
381 {
382 struct rb_node *rb = object_tree_root.rb_node;
383
384 while (rb) {
385 struct kmemleak_object *object =
386 rb_entry(rb, struct kmemleak_object, rb_node);
387 if (ptr < object->pointer)
388 rb = object->rb_node.rb_left;
389 else if (object->pointer + object->size <= ptr)
390 rb = object->rb_node.rb_right;
391 else if (object->pointer == ptr || alias)
392 return object;
393 else {
394 kmemleak_warn("Found object by alias at 0x%08lx\n",
395 ptr);
396 dump_object_info(object);
397 break;
398 }
399 }
400 return NULL;
401 }
402
403 /*
404 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
405 * that once an object's use_count reached 0, the RCU freeing was already
406 * registered and the object should no longer be used. This function must be
407 * called under the protection of rcu_read_lock().
408 */
409 static int get_object(struct kmemleak_object *object)
410 {
411 return atomic_inc_not_zero(&object->use_count);
412 }
413
414 /*
415 * Memory pool allocation and freeing. kmemleak_lock must not be held.
416 */
417 static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
418 {
419 unsigned long flags;
420 struct kmemleak_object *object;
421
422 /* try the slab allocator first */
423 if (object_cache) {
424 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
425 if (object)
426 return object;
427 }
428
429 /* slab allocation failed, try the memory pool */
430 raw_spin_lock_irqsave(&kmemleak_lock, flags);
431 object = list_first_entry_or_null(&mem_pool_free_list,
432 typeof(*object), object_list);
433 if (object)
434 list_del(&object->object_list);
435 else if (mem_pool_free_count)
436 object = &mem_pool[--mem_pool_free_count];
437 else
438 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
439 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
440
441 return object;
442 }
443
444 /*
445 * Return the object to either the slab allocator or the memory pool.
446 */
447 static void mem_pool_free(struct kmemleak_object *object)
448 {
449 unsigned long flags;
450
451 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
452 kmem_cache_free(object_cache, object);
453 return;
454 }
455
456 /* add the object to the memory pool free list */
457 raw_spin_lock_irqsave(&kmemleak_lock, flags);
458 list_add(&object->object_list, &mem_pool_free_list);
459 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
460 }
461
462 /*
463 * RCU callback to free a kmemleak_object.
464 */
465 static void free_object_rcu(struct rcu_head *rcu)
466 {
467 struct hlist_node *tmp;
468 struct kmemleak_scan_area *area;
469 struct kmemleak_object *object =
470 container_of(rcu, struct kmemleak_object, rcu);
471
472 /*
473 * Once use_count is 0 (guaranteed by put_object), there is no other
474 * code accessing this object, hence no need for locking.
475 */
476 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
477 hlist_del(&area->node);
478 kmem_cache_free(scan_area_cache, area);
479 }
480 mem_pool_free(object);
481 }
482
483 /*
484 * Decrement the object use_count. Once the count is 0, free the object using
485 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
486 * delete_object() path, the delayed RCU freeing ensures that there is no
487 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
488 * is also possible.
489 */
490 static void put_object(struct kmemleak_object *object)
491 {
492 if (!atomic_dec_and_test(&object->use_count))
493 return;
494
495 /* should only get here after delete_object was called */
496 WARN_ON(object->flags & OBJECT_ALLOCATED);
497
498 /*
499 * It may be too early for the RCU callbacks, however, there is no
500 * concurrent object_list traversal when !object_cache and all objects
501 * came from the memory pool. Free the object directly.
502 */
503 if (object_cache)
504 call_rcu(&object->rcu, free_object_rcu);
505 else
506 free_object_rcu(&object->rcu);
507 }
508
509 /*
510 * Look up an object in the object search tree and increase its use_count.
511 */
512 static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
513 {
514 unsigned long flags;
515 struct kmemleak_object *object;
516
517 rcu_read_lock();
518 raw_spin_lock_irqsave(&kmemleak_lock, flags);
519 object = lookup_object(ptr, alias);
520 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
521
522 /* check whether the object is still available */
523 if (object && !get_object(object))
524 object = NULL;
525 rcu_read_unlock();
526
527 return object;
528 }
529
530 /*
531 * Remove an object from the object_tree_root and object_list. Must be called
532 * with the kmemleak_lock held _if_ kmemleak is still enabled.
533 */
534 static void __remove_object(struct kmemleak_object *object)
535 {
536 rb_erase(&object->rb_node, &object_tree_root);
537 list_del_rcu(&object->object_list);
538 }
539
540 /*
541 * Look up an object in the object search tree and remove it from both
542 * object_tree_root and object_list. The returned object's use_count should be
543 * at least 1, as initially set by create_object().
544 */
545 static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
546 {
547 unsigned long flags;
548 struct kmemleak_object *object;
549
550 raw_spin_lock_irqsave(&kmemleak_lock, flags);
551 object = lookup_object(ptr, alias);
552 if (object)
553 __remove_object(object);
554 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
555
556 return object;
557 }
558
559 /*
560 * Save stack trace to the given array of MAX_TRACE size.
561 */
562 static int __save_stack_trace(unsigned long *trace)
563 {
564 return stack_trace_save(trace, MAX_TRACE, 2);
565 }
566
567 /*
568 * Create the metadata (struct kmemleak_object) corresponding to an allocated
569 * memory block and add it to the object_list and object_tree_root.
570 */
571 static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
572 int min_count, gfp_t gfp)
573 {
574 unsigned long flags;
575 struct kmemleak_object *object, *parent;
576 struct rb_node **link, *rb_parent;
577 unsigned long untagged_ptr;
578
579 object = mem_pool_alloc(gfp);
580 if (!object) {
581 pr_warn("Cannot allocate a kmemleak_object structure\n");
582 kmemleak_disable();
583 return NULL;
584 }
585
586 INIT_LIST_HEAD(&object->object_list);
587 INIT_LIST_HEAD(&object->gray_list);
588 INIT_HLIST_HEAD(&object->area_list);
589 raw_spin_lock_init(&object->lock);
590 atomic_set(&object->use_count, 1);
591 object->flags = OBJECT_ALLOCATED;
592 object->pointer = ptr;
593 object->size = kfence_ksize((void *)ptr) ?: size;
594 object->excess_ref = 0;
595 object->min_count = min_count;
596 object->count = 0; /* white color initially */
597 object->jiffies = jiffies;
598 object->checksum = 0;
599
600 /* task information */
601 if (in_irq()) {
602 object->pid = 0;
603 strncpy(object->comm, "hardirq", sizeof(object->comm));
604 } else if (in_serving_softirq()) {
605 object->pid = 0;
606 strncpy(object->comm, "softirq", sizeof(object->comm));
607 } else {
608 object->pid = current->pid;
609 /*
610 * There is a small chance of a race with set_task_comm(),
611 * however using get_task_comm() here may cause locking
612 * dependency issues with current->alloc_lock. In the worst
613 * case, the command line is not correct.
614 */
615 strncpy(object->comm, current->comm, sizeof(object->comm));
616 }
617
618 /* kernel backtrace */
619 object->trace_len = __save_stack_trace(object->trace);
620
621 raw_spin_lock_irqsave(&kmemleak_lock, flags);
622
623 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
624 min_addr = min(min_addr, untagged_ptr);
625 max_addr = max(max_addr, untagged_ptr + size);
626 link = &object_tree_root.rb_node;
627 rb_parent = NULL;
628 while (*link) {
629 rb_parent = *link;
630 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
631 if (ptr + size <= parent->pointer)
632 link = &parent->rb_node.rb_left;
633 else if (parent->pointer + parent->size <= ptr)
634 link = &parent->rb_node.rb_right;
635 else {
636 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
637 ptr);
638 /*
639 * No need for parent->lock here since "parent" cannot
640 * be freed while the kmemleak_lock is held.
641 */
642 dump_object_info(parent);
643 kmem_cache_free(object_cache, object);
644 object = NULL;
645 goto out;
646 }
647 }
648 rb_link_node(&object->rb_node, rb_parent, link);
649 rb_insert_color(&object->rb_node, &object_tree_root);
650
651 list_add_tail_rcu(&object->object_list, &object_list);
652 out:
653 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
654 return object;
655 }
656
657 /*
658 * Mark the object as not allocated and schedule RCU freeing via put_object().
659 */
660 static void __delete_object(struct kmemleak_object *object)
661 {
662 unsigned long flags;
663
664 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
665 WARN_ON(atomic_read(&object->use_count) < 1);
666
667 /*
668 * Locking here also ensures that the corresponding memory block
669 * cannot be freed when it is being scanned.
670 */
671 raw_spin_lock_irqsave(&object->lock, flags);
672 object->flags &= ~OBJECT_ALLOCATED;
673 raw_spin_unlock_irqrestore(&object->lock, flags);
674 put_object(object);
675 }
676
677 /*
678 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
679 * delete it.
680 */
681 static void delete_object_full(unsigned long ptr)
682 {
683 struct kmemleak_object *object;
684
685 object = find_and_remove_object(ptr, 0);
686 if (!object) {
687 #ifdef DEBUG
688 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
689 ptr);
690 #endif
691 return;
692 }
693 __delete_object(object);
694 }
695
696 /*
697 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
698 * delete it. If the memory block is partially freed, the function may create
699 * additional metadata for the remaining parts of the block.
700 */
701 static void delete_object_part(unsigned long ptr, size_t size)
702 {
703 struct kmemleak_object *object;
704 unsigned long start, end;
705
706 object = find_and_remove_object(ptr, 1);
707 if (!object) {
708 #ifdef DEBUG
709 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
710 ptr, size);
711 #endif
712 return;
713 }
714
715 /*
716 * Create one or two objects that may result from the memory block
717 * split. Note that partial freeing is only done by free_bootmem() and
718 * this happens before kmemleak_init() is called.
719 */
720 start = object->pointer;
721 end = object->pointer + object->size;
722 if (ptr > start)
723 create_object(start, ptr - start, object->min_count,
724 GFP_KERNEL);
725 if (ptr + size < end)
726 create_object(ptr + size, end - ptr - size, object->min_count,
727 GFP_KERNEL);
728
729 __delete_object(object);
730 }
731
732 static void __paint_it(struct kmemleak_object *object, int color)
733 {
734 object->min_count = color;
735 if (color == KMEMLEAK_BLACK)
736 object->flags |= OBJECT_NO_SCAN;
737 }
738
739 static void paint_it(struct kmemleak_object *object, int color)
740 {
741 unsigned long flags;
742
743 raw_spin_lock_irqsave(&object->lock, flags);
744 __paint_it(object, color);
745 raw_spin_unlock_irqrestore(&object->lock, flags);
746 }
747
748 static void paint_ptr(unsigned long ptr, int color)
749 {
750 struct kmemleak_object *object;
751
752 object = find_and_get_object(ptr, 0);
753 if (!object) {
754 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
755 ptr,
756 (color == KMEMLEAK_GREY) ? "Grey" :
757 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
758 return;
759 }
760 paint_it(object, color);
761 put_object(object);
762 }
763
764 /*
765 * Mark an object permanently as gray-colored so that it can no longer be
766 * reported as a leak. This is used in general to mark a false positive.
767 */
768 static void make_gray_object(unsigned long ptr)
769 {
770 paint_ptr(ptr, KMEMLEAK_GREY);
771 }
772
773 /*
774 * Mark the object as black-colored so that it is ignored from scans and
775 * reporting.
776 */
777 static void make_black_object(unsigned long ptr)
778 {
779 paint_ptr(ptr, KMEMLEAK_BLACK);
780 }
781
782 /*
783 * Add a scanning area to the object. If at least one such area is added,
784 * kmemleak will only scan these ranges rather than the whole memory block.
785 */
786 static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
787 {
788 unsigned long flags;
789 struct kmemleak_object *object;
790 struct kmemleak_scan_area *area = NULL;
791
792 object = find_and_get_object(ptr, 1);
793 if (!object) {
794 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
795 ptr);
796 return;
797 }
798
799 if (scan_area_cache)
800 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
801
802 raw_spin_lock_irqsave(&object->lock, flags);
803 if (!area) {
804 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
805 /* mark the object for full scan to avoid false positives */
806 object->flags |= OBJECT_FULL_SCAN;
807 goto out_unlock;
808 }
809 if (size == SIZE_MAX) {
810 size = object->pointer + object->size - ptr;
811 } else if (ptr + size > object->pointer + object->size) {
812 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
813 dump_object_info(object);
814 kmem_cache_free(scan_area_cache, area);
815 goto out_unlock;
816 }
817
818 INIT_HLIST_NODE(&area->node);
819 area->start = ptr;
820 area->size = size;
821
822 hlist_add_head(&area->node, &object->area_list);
823 out_unlock:
824 raw_spin_unlock_irqrestore(&object->lock, flags);
825 put_object(object);
826 }
827
828 /*
829 * Any surplus references (object already gray) to 'ptr' are passed to
830 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
831 * vm_struct may be used as an alternative reference to the vmalloc'ed object
832 * (see free_thread_stack()).
833 */
834 static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
835 {
836 unsigned long flags;
837 struct kmemleak_object *object;
838
839 object = find_and_get_object(ptr, 0);
840 if (!object) {
841 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
842 ptr);
843 return;
844 }
845
846 raw_spin_lock_irqsave(&object->lock, flags);
847 object->excess_ref = excess_ref;
848 raw_spin_unlock_irqrestore(&object->lock, flags);
849 put_object(object);
850 }
851
852 /*
853 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
854 * pointer. Such object will not be scanned by kmemleak but references to it
855 * are searched.
856 */
857 static void object_no_scan(unsigned long ptr)
858 {
859 unsigned long flags;
860 struct kmemleak_object *object;
861
862 object = find_and_get_object(ptr, 0);
863 if (!object) {
864 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
865 return;
866 }
867
868 raw_spin_lock_irqsave(&object->lock, flags);
869 object->flags |= OBJECT_NO_SCAN;
870 raw_spin_unlock_irqrestore(&object->lock, flags);
871 put_object(object);
872 }
873
874 /**
875 * kmemleak_alloc - register a newly allocated object
876 * @ptr: pointer to beginning of the object
877 * @size: size of the object
878 * @min_count: minimum number of references to this object. If during memory
879 * scanning a number of references less than @min_count is found,
880 * the object is reported as a memory leak. If @min_count is 0,
881 * the object is never reported as a leak. If @min_count is -1,
882 * the object is ignored (not scanned and not reported as a leak)
883 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
884 *
885 * This function is called from the kernel allocators when a new object
886 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
887 */
888 void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
889 gfp_t gfp)
890 {
891 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
892
893 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
894 create_object((unsigned long)ptr, size, min_count, gfp);
895 }
896 EXPORT_SYMBOL_GPL(kmemleak_alloc);
897
898 /**
899 * kmemleak_alloc_percpu - register a newly allocated __percpu object
900 * @ptr: __percpu pointer to beginning of the object
901 * @size: size of the object
902 * @gfp: flags used for kmemleak internal memory allocations
903 *
904 * This function is called from the kernel percpu allocator when a new object
905 * (memory block) is allocated (alloc_percpu).
906 */
907 void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
908 gfp_t gfp)
909 {
910 unsigned int cpu;
911
912 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
913
914 /*
915 * Percpu allocations are only scanned and not reported as leaks
916 * (min_count is set to 0).
917 */
918 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
919 for_each_possible_cpu(cpu)
920 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
921 size, 0, gfp);
922 }
923 EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
924
925 /**
926 * kmemleak_vmalloc - register a newly vmalloc'ed object
927 * @area: pointer to vm_struct
928 * @size: size of the object
929 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
930 *
931 * This function is called from the vmalloc() kernel allocator when a new
932 * object (memory block) is allocated.
933 */
934 void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
935 {
936 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
937
938 /*
939 * A min_count = 2 is needed because vm_struct contains a reference to
940 * the virtual address of the vmalloc'ed block.
941 */
942 if (kmemleak_enabled) {
943 create_object((unsigned long)area->addr, size, 2, gfp);
944 object_set_excess_ref((unsigned long)area,
945 (unsigned long)area->addr);
946 }
947 }
948 EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
949
950 /**
951 * kmemleak_free - unregister a previously registered object
952 * @ptr: pointer to beginning of the object
953 *
954 * This function is called from the kernel allocators when an object (memory
955 * block) is freed (kmem_cache_free, kfree, vfree etc.).
956 */
957 void __ref kmemleak_free(const void *ptr)
958 {
959 pr_debug("%s(0x%p)\n", __func__, ptr);
960
961 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
962 delete_object_full((unsigned long)ptr);
963 }
964 EXPORT_SYMBOL_GPL(kmemleak_free);
965
966 /**
967 * kmemleak_free_part - partially unregister a previously registered object
968 * @ptr: pointer to the beginning or inside the object. This also
969 * represents the start of the range to be freed
970 * @size: size to be unregistered
971 *
972 * This function is called when only a part of a memory block is freed
973 * (usually from the bootmem allocator).
974 */
975 void __ref kmemleak_free_part(const void *ptr, size_t size)
976 {
977 pr_debug("%s(0x%p)\n", __func__, ptr);
978
979 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
980 delete_object_part((unsigned long)ptr, size);
981 }
982 EXPORT_SYMBOL_GPL(kmemleak_free_part);
983
984 /**
985 * kmemleak_free_percpu - unregister a previously registered __percpu object
986 * @ptr: __percpu pointer to beginning of the object
987 *
988 * This function is called from the kernel percpu allocator when an object
989 * (memory block) is freed (free_percpu).
990 */
991 void __ref kmemleak_free_percpu(const void __percpu *ptr)
992 {
993 unsigned int cpu;
994
995 pr_debug("%s(0x%p)\n", __func__, ptr);
996
997 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
998 for_each_possible_cpu(cpu)
999 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1000 cpu));
1001 }
1002 EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1003
1004 /**
1005 * kmemleak_update_trace - update object allocation stack trace
1006 * @ptr: pointer to beginning of the object
1007 *
1008 * Override the object allocation stack trace for cases where the actual
1009 * allocation place is not always useful.
1010 */
1011 void __ref kmemleak_update_trace(const void *ptr)
1012 {
1013 struct kmemleak_object *object;
1014 unsigned long flags;
1015
1016 pr_debug("%s(0x%p)\n", __func__, ptr);
1017
1018 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1019 return;
1020
1021 object = find_and_get_object((unsigned long)ptr, 1);
1022 if (!object) {
1023 #ifdef DEBUG
1024 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1025 ptr);
1026 #endif
1027 return;
1028 }
1029
1030 raw_spin_lock_irqsave(&object->lock, flags);
1031 object->trace_len = __save_stack_trace(object->trace);
1032 raw_spin_unlock_irqrestore(&object->lock, flags);
1033
1034 put_object(object);
1035 }
1036 EXPORT_SYMBOL(kmemleak_update_trace);
1037
1038 /**
1039 * kmemleak_not_leak - mark an allocated object as false positive
1040 * @ptr: pointer to beginning of the object
1041 *
1042 * Calling this function on an object will cause the memory block to no longer
1043 * be reported as leak and always be scanned.
1044 */
1045 void __ref kmemleak_not_leak(const void *ptr)
1046 {
1047 pr_debug("%s(0x%p)\n", __func__, ptr);
1048
1049 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1050 make_gray_object((unsigned long)ptr);
1051 }
1052 EXPORT_SYMBOL(kmemleak_not_leak);
1053
1054 /**
1055 * kmemleak_ignore - ignore an allocated object
1056 * @ptr: pointer to beginning of the object
1057 *
1058 * Calling this function on an object will cause the memory block to be
1059 * ignored (not scanned and not reported as a leak). This is usually done when
1060 * it is known that the corresponding block is not a leak and does not contain
1061 * any references to other allocated memory blocks.
1062 */
1063 void __ref kmemleak_ignore(const void *ptr)
1064 {
1065 pr_debug("%s(0x%p)\n", __func__, ptr);
1066
1067 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1068 make_black_object((unsigned long)ptr);
1069 }
1070 EXPORT_SYMBOL(kmemleak_ignore);
1071
1072 /**
1073 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1074 * @ptr: pointer to beginning or inside the object. This also
1075 * represents the start of the scan area
1076 * @size: size of the scan area
1077 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1078 *
1079 * This function is used when it is known that only certain parts of an object
1080 * contain references to other objects. Kmemleak will only scan these areas
1081 * reducing the number false negatives.
1082 */
1083 void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1084 {
1085 pr_debug("%s(0x%p)\n", __func__, ptr);
1086
1087 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1088 add_scan_area((unsigned long)ptr, size, gfp);
1089 }
1090 EXPORT_SYMBOL(kmemleak_scan_area);
1091
1092 /**
1093 * kmemleak_no_scan - do not scan an allocated object
1094 * @ptr: pointer to beginning of the object
1095 *
1096 * This function notifies kmemleak not to scan the given memory block. Useful
1097 * in situations where it is known that the given object does not contain any
1098 * references to other objects. Kmemleak will not scan such objects reducing
1099 * the number of false negatives.
1100 */
1101 void __ref kmemleak_no_scan(const void *ptr)
1102 {
1103 pr_debug("%s(0x%p)\n", __func__, ptr);
1104
1105 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1106 object_no_scan((unsigned long)ptr);
1107 }
1108 EXPORT_SYMBOL(kmemleak_no_scan);
1109
1110 /**
1111 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1112 * address argument
1113 * @phys: physical address of the object
1114 * @size: size of the object
1115 * @min_count: minimum number of references to this object.
1116 * See kmemleak_alloc()
1117 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1118 */
1119 void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1120 gfp_t gfp)
1121 {
1122 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1123 kmemleak_alloc(__va(phys), size, min_count, gfp);
1124 }
1125 EXPORT_SYMBOL(kmemleak_alloc_phys);
1126
1127 /**
1128 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1129 * physical address argument
1130 * @phys: physical address if the beginning or inside an object. This
1131 * also represents the start of the range to be freed
1132 * @size: size to be unregistered
1133 */
1134 void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1135 {
1136 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1137 kmemleak_free_part(__va(phys), size);
1138 }
1139 EXPORT_SYMBOL(kmemleak_free_part_phys);
1140
1141 /**
1142 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1143 * address argument
1144 * @phys: physical address of the object
1145 */
1146 void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1147 {
1148 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1149 kmemleak_not_leak(__va(phys));
1150 }
1151 EXPORT_SYMBOL(kmemleak_not_leak_phys);
1152
1153 /**
1154 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1155 * address argument
1156 * @phys: physical address of the object
1157 */
1158 void __ref kmemleak_ignore_phys(phys_addr_t phys)
1159 {
1160 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1161 kmemleak_ignore(__va(phys));
1162 }
1163 EXPORT_SYMBOL(kmemleak_ignore_phys);
1164
1165 /*
1166 * Update an object's checksum and return true if it was modified.
1167 */
1168 static bool update_checksum(struct kmemleak_object *object)
1169 {
1170 u32 old_csum = object->checksum;
1171
1172 kasan_disable_current();
1173 kcsan_disable_current();
1174 object->checksum = crc32(0, (void *)object->pointer, object->size);
1175 kasan_enable_current();
1176 kcsan_enable_current();
1177
1178 return object->checksum != old_csum;
1179 }
1180
1181 /*
1182 * Update an object's references. object->lock must be held by the caller.
1183 */
1184 static void update_refs(struct kmemleak_object *object)
1185 {
1186 if (!color_white(object)) {
1187 /* non-orphan, ignored or new */
1188 return;
1189 }
1190
1191 /*
1192 * Increase the object's reference count (number of pointers to the
1193 * memory block). If this count reaches the required minimum, the
1194 * object's color will become gray and it will be added to the
1195 * gray_list.
1196 */
1197 object->count++;
1198 if (color_gray(object)) {
1199 /* put_object() called when removing from gray_list */
1200 WARN_ON(!get_object(object));
1201 list_add_tail(&object->gray_list, &gray_list);
1202 }
1203 }
1204
1205 /*
1206 * Memory scanning is a long process and it needs to be interruptible. This
1207 * function checks whether such interrupt condition occurred.
1208 */
1209 static int scan_should_stop(void)
1210 {
1211 if (!kmemleak_enabled)
1212 return 1;
1213
1214 /*
1215 * This function may be called from either process or kthread context,
1216 * hence the need to check for both stop conditions.
1217 */
1218 if (current->mm)
1219 return signal_pending(current);
1220 else
1221 return kthread_should_stop();
1222
1223 return 0;
1224 }
1225
1226 /*
1227 * Scan a memory block (exclusive range) for valid pointers and add those
1228 * found to the gray list.
1229 */
1230 static void scan_block(void *_start, void *_end,
1231 struct kmemleak_object *scanned)
1232 {
1233 unsigned long *ptr;
1234 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1235 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1236 unsigned long flags;
1237 unsigned long untagged_ptr;
1238
1239 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1240 for (ptr = start; ptr < end; ptr++) {
1241 struct kmemleak_object *object;
1242 unsigned long pointer;
1243 unsigned long excess_ref;
1244
1245 if (scan_should_stop())
1246 break;
1247
1248 kasan_disable_current();
1249 pointer = *ptr;
1250 kasan_enable_current();
1251
1252 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1253 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1254 continue;
1255
1256 /*
1257 * No need for get_object() here since we hold kmemleak_lock.
1258 * object->use_count cannot be dropped to 0 while the object
1259 * is still present in object_tree_root and object_list
1260 * (with updates protected by kmemleak_lock).
1261 */
1262 object = lookup_object(pointer, 1);
1263 if (!object)
1264 continue;
1265 if (object == scanned)
1266 /* self referenced, ignore */
1267 continue;
1268
1269 /*
1270 * Avoid the lockdep recursive warning on object->lock being
1271 * previously acquired in scan_object(). These locks are
1272 * enclosed by scan_mutex.
1273 */
1274 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1275 /* only pass surplus references (object already gray) */
1276 if (color_gray(object)) {
1277 excess_ref = object->excess_ref;
1278 /* no need for update_refs() if object already gray */
1279 } else {
1280 excess_ref = 0;
1281 update_refs(object);
1282 }
1283 raw_spin_unlock(&object->lock);
1284
1285 if (excess_ref) {
1286 object = lookup_object(excess_ref, 0);
1287 if (!object)
1288 continue;
1289 if (object == scanned)
1290 /* circular reference, ignore */
1291 continue;
1292 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1293 update_refs(object);
1294 raw_spin_unlock(&object->lock);
1295 }
1296 }
1297 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1298 }
1299
1300 /*
1301 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1302 */
1303 #ifdef CONFIG_SMP
1304 static void scan_large_block(void *start, void *end)
1305 {
1306 void *next;
1307
1308 while (start < end) {
1309 next = min(start + MAX_SCAN_SIZE, end);
1310 scan_block(start, next, NULL);
1311 start = next;
1312 cond_resched();
1313 }
1314 }
1315 #endif
1316
1317 /*
1318 * Scan a memory block corresponding to a kmemleak_object. A condition is
1319 * that object->use_count >= 1.
1320 */
1321 static void scan_object(struct kmemleak_object *object)
1322 {
1323 struct kmemleak_scan_area *area;
1324 unsigned long flags;
1325
1326 /*
1327 * Once the object->lock is acquired, the corresponding memory block
1328 * cannot be freed (the same lock is acquired in delete_object).
1329 */
1330 raw_spin_lock_irqsave(&object->lock, flags);
1331 if (object->flags & OBJECT_NO_SCAN)
1332 goto out;
1333 if (!(object->flags & OBJECT_ALLOCATED))
1334 /* already freed object */
1335 goto out;
1336 if (hlist_empty(&object->area_list) ||
1337 object->flags & OBJECT_FULL_SCAN) {
1338 void *start = (void *)object->pointer;
1339 void *end = (void *)(object->pointer + object->size);
1340 void *next;
1341
1342 do {
1343 next = min(start + MAX_SCAN_SIZE, end);
1344 scan_block(start, next, object);
1345
1346 start = next;
1347 if (start >= end)
1348 break;
1349
1350 raw_spin_unlock_irqrestore(&object->lock, flags);
1351 cond_resched();
1352 raw_spin_lock_irqsave(&object->lock, flags);
1353 } while (object->flags & OBJECT_ALLOCATED);
1354 } else
1355 hlist_for_each_entry(area, &object->area_list, node)
1356 scan_block((void *)area->start,
1357 (void *)(area->start + area->size),
1358 object);
1359 out:
1360 raw_spin_unlock_irqrestore(&object->lock, flags);
1361 }
1362
1363 /*
1364 * Scan the objects already referenced (gray objects). More objects will be
1365 * referenced and, if there are no memory leaks, all the objects are scanned.
1366 */
1367 static void scan_gray_list(void)
1368 {
1369 struct kmemleak_object *object, *tmp;
1370
1371 /*
1372 * The list traversal is safe for both tail additions and removals
1373 * from inside the loop. The kmemleak objects cannot be freed from
1374 * outside the loop because their use_count was incremented.
1375 */
1376 object = list_entry(gray_list.next, typeof(*object), gray_list);
1377 while (&object->gray_list != &gray_list) {
1378 cond_resched();
1379
1380 /* may add new objects to the list */
1381 if (!scan_should_stop())
1382 scan_object(object);
1383
1384 tmp = list_entry(object->gray_list.next, typeof(*object),
1385 gray_list);
1386
1387 /* remove the object from the list and release it */
1388 list_del(&object->gray_list);
1389 put_object(object);
1390
1391 object = tmp;
1392 }
1393 WARN_ON(!list_empty(&gray_list));
1394 }
1395
1396 /*
1397 * Scan data sections and all the referenced memory blocks allocated via the
1398 * kernel's standard allocators. This function must be called with the
1399 * scan_mutex held.
1400 */
1401 static void kmemleak_scan(void)
1402 {
1403 unsigned long flags;
1404 struct kmemleak_object *object;
1405 int i;
1406 int new_leaks = 0;
1407
1408 jiffies_last_scan = jiffies;
1409
1410 /* prepare the kmemleak_object's */
1411 rcu_read_lock();
1412 list_for_each_entry_rcu(object, &object_list, object_list) {
1413 raw_spin_lock_irqsave(&object->lock, flags);
1414 #ifdef DEBUG
1415 /*
1416 * With a few exceptions there should be a maximum of
1417 * 1 reference to any object at this point.
1418 */
1419 if (atomic_read(&object->use_count) > 1) {
1420 pr_debug("object->use_count = %d\n",
1421 atomic_read(&object->use_count));
1422 dump_object_info(object);
1423 }
1424 #endif
1425 /* reset the reference count (whiten the object) */
1426 object->count = 0;
1427 if (color_gray(object) && get_object(object))
1428 list_add_tail(&object->gray_list, &gray_list);
1429
1430 raw_spin_unlock_irqrestore(&object->lock, flags);
1431 }
1432 rcu_read_unlock();
1433
1434 #ifdef CONFIG_SMP
1435 /* per-cpu sections scanning */
1436 for_each_possible_cpu(i)
1437 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1438 __per_cpu_end + per_cpu_offset(i));
1439 #endif
1440
1441 /*
1442 * Struct page scanning for each node.
1443 */
1444 get_online_mems();
1445 for_each_online_node(i) {
1446 unsigned long start_pfn = node_start_pfn(i);
1447 unsigned long end_pfn = node_end_pfn(i);
1448 unsigned long pfn;
1449
1450 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1451 struct page *page = pfn_to_online_page(pfn);
1452
1453 if (!page)
1454 continue;
1455
1456 /* only scan pages belonging to this node */
1457 if (page_to_nid(page) != i)
1458 continue;
1459 /* only scan if page is in use */
1460 if (page_count(page) == 0)
1461 continue;
1462 scan_block(page, page + 1, NULL);
1463 if (!(pfn & 63))
1464 cond_resched();
1465 }
1466 }
1467 put_online_mems();
1468
1469 /*
1470 * Scanning the task stacks (may introduce false negatives).
1471 */
1472 if (kmemleak_stack_scan) {
1473 struct task_struct *p, *g;
1474
1475 rcu_read_lock();
1476 for_each_process_thread(g, p) {
1477 void *stack = try_get_task_stack(p);
1478 if (stack) {
1479 scan_block(stack, stack + THREAD_SIZE, NULL);
1480 put_task_stack(p);
1481 }
1482 }
1483 rcu_read_unlock();
1484 }
1485
1486 /*
1487 * Scan the objects already referenced from the sections scanned
1488 * above.
1489 */
1490 scan_gray_list();
1491
1492 /*
1493 * Check for new or unreferenced objects modified since the previous
1494 * scan and color them gray until the next scan.
1495 */
1496 rcu_read_lock();
1497 list_for_each_entry_rcu(object, &object_list, object_list) {
1498 raw_spin_lock_irqsave(&object->lock, flags);
1499 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1500 && update_checksum(object) && get_object(object)) {
1501 /* color it gray temporarily */
1502 object->count = object->min_count;
1503 list_add_tail(&object->gray_list, &gray_list);
1504 }
1505 raw_spin_unlock_irqrestore(&object->lock, flags);
1506 }
1507 rcu_read_unlock();
1508
1509 /*
1510 * Re-scan the gray list for modified unreferenced objects.
1511 */
1512 scan_gray_list();
1513
1514 /*
1515 * If scanning was stopped do not report any new unreferenced objects.
1516 */
1517 if (scan_should_stop())
1518 return;
1519
1520 /*
1521 * Scanning result reporting.
1522 */
1523 rcu_read_lock();
1524 list_for_each_entry_rcu(object, &object_list, object_list) {
1525 raw_spin_lock_irqsave(&object->lock, flags);
1526 if (unreferenced_object(object) &&
1527 !(object->flags & OBJECT_REPORTED)) {
1528 object->flags |= OBJECT_REPORTED;
1529
1530 if (kmemleak_verbose)
1531 print_unreferenced(NULL, object);
1532
1533 new_leaks++;
1534 }
1535 raw_spin_unlock_irqrestore(&object->lock, flags);
1536 }
1537 rcu_read_unlock();
1538
1539 if (new_leaks) {
1540 kmemleak_found_leaks = true;
1541
1542 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1543 new_leaks);
1544 }
1545
1546 }
1547
1548 /*
1549 * Thread function performing automatic memory scanning. Unreferenced objects
1550 * at the end of a memory scan are reported but only the first time.
1551 */
1552 static int kmemleak_scan_thread(void *arg)
1553 {
1554 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1555
1556 pr_info("Automatic memory scanning thread started\n");
1557 set_user_nice(current, 10);
1558
1559 /*
1560 * Wait before the first scan to allow the system to fully initialize.
1561 */
1562 if (first_run) {
1563 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1564 first_run = 0;
1565 while (timeout && !kthread_should_stop())
1566 timeout = schedule_timeout_interruptible(timeout);
1567 }
1568
1569 while (!kthread_should_stop()) {
1570 signed long timeout = jiffies_scan_wait;
1571
1572 mutex_lock(&scan_mutex);
1573 kmemleak_scan();
1574 mutex_unlock(&scan_mutex);
1575
1576 /* wait before the next scan */
1577 while (timeout && !kthread_should_stop())
1578 timeout = schedule_timeout_interruptible(timeout);
1579 }
1580
1581 pr_info("Automatic memory scanning thread ended\n");
1582
1583 return 0;
1584 }
1585
1586 /*
1587 * Start the automatic memory scanning thread. This function must be called
1588 * with the scan_mutex held.
1589 */
1590 static void start_scan_thread(void)
1591 {
1592 if (scan_thread)
1593 return;
1594 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1595 if (IS_ERR(scan_thread)) {
1596 pr_warn("Failed to create the scan thread\n");
1597 scan_thread = NULL;
1598 }
1599 }
1600
1601 /*
1602 * Stop the automatic memory scanning thread.
1603 */
1604 static void stop_scan_thread(void)
1605 {
1606 if (scan_thread) {
1607 kthread_stop(scan_thread);
1608 scan_thread = NULL;
1609 }
1610 }
1611
1612 /*
1613 * Iterate over the object_list and return the first valid object at or after
1614 * the required position with its use_count incremented. The function triggers
1615 * a memory scanning when the pos argument points to the first position.
1616 */
1617 static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1618 {
1619 struct kmemleak_object *object;
1620 loff_t n = *pos;
1621 int err;
1622
1623 err = mutex_lock_interruptible(&scan_mutex);
1624 if (err < 0)
1625 return ERR_PTR(err);
1626
1627 rcu_read_lock();
1628 list_for_each_entry_rcu(object, &object_list, object_list) {
1629 if (n-- > 0)
1630 continue;
1631 if (get_object(object))
1632 goto out;
1633 }
1634 object = NULL;
1635 out:
1636 return object;
1637 }
1638
1639 /*
1640 * Return the next object in the object_list. The function decrements the
1641 * use_count of the previous object and increases that of the next one.
1642 */
1643 static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1644 {
1645 struct kmemleak_object *prev_obj = v;
1646 struct kmemleak_object *next_obj = NULL;
1647 struct kmemleak_object *obj = prev_obj;
1648
1649 ++(*pos);
1650
1651 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1652 if (get_object(obj)) {
1653 next_obj = obj;
1654 break;
1655 }
1656 }
1657
1658 put_object(prev_obj);
1659 return next_obj;
1660 }
1661
1662 /*
1663 * Decrement the use_count of the last object required, if any.
1664 */
1665 static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1666 {
1667 if (!IS_ERR(v)) {
1668 /*
1669 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1670 * waiting was interrupted, so only release it if !IS_ERR.
1671 */
1672 rcu_read_unlock();
1673 mutex_unlock(&scan_mutex);
1674 if (v)
1675 put_object(v);
1676 }
1677 }
1678
1679 /*
1680 * Print the information for an unreferenced object to the seq file.
1681 */
1682 static int kmemleak_seq_show(struct seq_file *seq, void *v)
1683 {
1684 struct kmemleak_object *object = v;
1685 unsigned long flags;
1686
1687 raw_spin_lock_irqsave(&object->lock, flags);
1688 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1689 print_unreferenced(seq, object);
1690 raw_spin_unlock_irqrestore(&object->lock, flags);
1691 return 0;
1692 }
1693
1694 static const struct seq_operations kmemleak_seq_ops = {
1695 .start = kmemleak_seq_start,
1696 .next = kmemleak_seq_next,
1697 .stop = kmemleak_seq_stop,
1698 .show = kmemleak_seq_show,
1699 };
1700
1701 static int kmemleak_open(struct inode *inode, struct file *file)
1702 {
1703 return seq_open(file, &kmemleak_seq_ops);
1704 }
1705
1706 static int dump_str_object_info(const char *str)
1707 {
1708 unsigned long flags;
1709 struct kmemleak_object *object;
1710 unsigned long addr;
1711
1712 if (kstrtoul(str, 0, &addr))
1713 return -EINVAL;
1714 object = find_and_get_object(addr, 0);
1715 if (!object) {
1716 pr_info("Unknown object at 0x%08lx\n", addr);
1717 return -EINVAL;
1718 }
1719
1720 raw_spin_lock_irqsave(&object->lock, flags);
1721 dump_object_info(object);
1722 raw_spin_unlock_irqrestore(&object->lock, flags);
1723
1724 put_object(object);
1725 return 0;
1726 }
1727
1728 /*
1729 * We use grey instead of black to ensure we can do future scans on the same
1730 * objects. If we did not do future scans these black objects could
1731 * potentially contain references to newly allocated objects in the future and
1732 * we'd end up with false positives.
1733 */
1734 static void kmemleak_clear(void)
1735 {
1736 struct kmemleak_object *object;
1737 unsigned long flags;
1738
1739 rcu_read_lock();
1740 list_for_each_entry_rcu(object, &object_list, object_list) {
1741 raw_spin_lock_irqsave(&object->lock, flags);
1742 if ((object->flags & OBJECT_REPORTED) &&
1743 unreferenced_object(object))
1744 __paint_it(object, KMEMLEAK_GREY);
1745 raw_spin_unlock_irqrestore(&object->lock, flags);
1746 }
1747 rcu_read_unlock();
1748
1749 kmemleak_found_leaks = false;
1750 }
1751
1752 static void __kmemleak_do_cleanup(void);
1753
1754 /*
1755 * File write operation to configure kmemleak at run-time. The following
1756 * commands can be written to the /sys/kernel/debug/kmemleak file:
1757 * off - disable kmemleak (irreversible)
1758 * stack=on - enable the task stacks scanning
1759 * stack=off - disable the tasks stacks scanning
1760 * scan=on - start the automatic memory scanning thread
1761 * scan=off - stop the automatic memory scanning thread
1762 * scan=... - set the automatic memory scanning period in seconds (0 to
1763 * disable it)
1764 * scan - trigger a memory scan
1765 * clear - mark all current reported unreferenced kmemleak objects as
1766 * grey to ignore printing them, or free all kmemleak objects
1767 * if kmemleak has been disabled.
1768 * dump=... - dump information about the object found at the given address
1769 */
1770 static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1771 size_t size, loff_t *ppos)
1772 {
1773 char buf[64];
1774 int buf_size;
1775 int ret;
1776
1777 buf_size = min(size, (sizeof(buf) - 1));
1778 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1779 return -EFAULT;
1780 buf[buf_size] = 0;
1781
1782 ret = mutex_lock_interruptible(&scan_mutex);
1783 if (ret < 0)
1784 return ret;
1785
1786 if (strncmp(buf, "clear", 5) == 0) {
1787 if (kmemleak_enabled)
1788 kmemleak_clear();
1789 else
1790 __kmemleak_do_cleanup();
1791 goto out;
1792 }
1793
1794 if (!kmemleak_enabled) {
1795 ret = -EPERM;
1796 goto out;
1797 }
1798
1799 if (strncmp(buf, "off", 3) == 0)
1800 kmemleak_disable();
1801 else if (strncmp(buf, "stack=on", 8) == 0)
1802 kmemleak_stack_scan = 1;
1803 else if (strncmp(buf, "stack=off", 9) == 0)
1804 kmemleak_stack_scan = 0;
1805 else if (strncmp(buf, "scan=on", 7) == 0)
1806 start_scan_thread();
1807 else if (strncmp(buf, "scan=off", 8) == 0)
1808 stop_scan_thread();
1809 else if (strncmp(buf, "scan=", 5) == 0) {
1810 unsigned long secs;
1811
1812 ret = kstrtoul(buf + 5, 0, &secs);
1813 if (ret < 0)
1814 goto out;
1815 stop_scan_thread();
1816 if (secs) {
1817 jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1818 start_scan_thread();
1819 }
1820 } else if (strncmp(buf, "scan", 4) == 0)
1821 kmemleak_scan();
1822 else if (strncmp(buf, "dump=", 5) == 0)
1823 ret = dump_str_object_info(buf + 5);
1824 else
1825 ret = -EINVAL;
1826
1827 out:
1828 mutex_unlock(&scan_mutex);
1829 if (ret < 0)
1830 return ret;
1831
1832 /* ignore the rest of the buffer, only one command at a time */
1833 *ppos += size;
1834 return size;
1835 }
1836
1837 static const struct file_operations kmemleak_fops = {
1838 .owner = THIS_MODULE,
1839 .open = kmemleak_open,
1840 .read = seq_read,
1841 .write = kmemleak_write,
1842 .llseek = seq_lseek,
1843 .release = seq_release,
1844 };
1845
1846 static void __kmemleak_do_cleanup(void)
1847 {
1848 struct kmemleak_object *object, *tmp;
1849
1850 /*
1851 * Kmemleak has already been disabled, no need for RCU list traversal
1852 * or kmemleak_lock held.
1853 */
1854 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1855 __remove_object(object);
1856 __delete_object(object);
1857 }
1858 }
1859
1860 /*
1861 * Stop the memory scanning thread and free the kmemleak internal objects if
1862 * no previous scan thread (otherwise, kmemleak may still have some useful
1863 * information on memory leaks).
1864 */
1865 static void kmemleak_do_cleanup(struct work_struct *work)
1866 {
1867 stop_scan_thread();
1868
1869 mutex_lock(&scan_mutex);
1870 /*
1871 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1872 * longer track object freeing. Ordering of the scan thread stopping and
1873 * the memory accesses below is guaranteed by the kthread_stop()
1874 * function.
1875 */
1876 kmemleak_free_enabled = 0;
1877 mutex_unlock(&scan_mutex);
1878
1879 if (!kmemleak_found_leaks)
1880 __kmemleak_do_cleanup();
1881 else
1882 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1883 }
1884
1885 static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1886
1887 /*
1888 * Disable kmemleak. No memory allocation/freeing will be traced once this
1889 * function is called. Disabling kmemleak is an irreversible operation.
1890 */
1891 static void kmemleak_disable(void)
1892 {
1893 /* atomically check whether it was already invoked */
1894 if (cmpxchg(&kmemleak_error, 0, 1))
1895 return;
1896
1897 /* stop any memory operation tracing */
1898 kmemleak_enabled = 0;
1899
1900 /* check whether it is too early for a kernel thread */
1901 if (kmemleak_initialized)
1902 schedule_work(&cleanup_work);
1903 else
1904 kmemleak_free_enabled = 0;
1905
1906 pr_info("Kernel memory leak detector disabled\n");
1907 }
1908
1909 /*
1910 * Allow boot-time kmemleak disabling (enabled by default).
1911 */
1912 static int __init kmemleak_boot_config(char *str)
1913 {
1914 if (!str)
1915 return -EINVAL;
1916 if (strcmp(str, "off") == 0)
1917 kmemleak_disable();
1918 else if (strcmp(str, "on") == 0)
1919 kmemleak_skip_disable = 1;
1920 else
1921 return -EINVAL;
1922 return 0;
1923 }
1924 early_param("kmemleak", kmemleak_boot_config);
1925
1926 /*
1927 * Kmemleak initialization.
1928 */
1929 void __init kmemleak_init(void)
1930 {
1931 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1932 if (!kmemleak_skip_disable) {
1933 kmemleak_disable();
1934 return;
1935 }
1936 #endif
1937
1938 if (kmemleak_error)
1939 return;
1940
1941 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1942 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1943
1944 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1945 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1946
1947 /* register the data/bss sections */
1948 create_object((unsigned long)_sdata, _edata - _sdata,
1949 KMEMLEAK_GREY, GFP_ATOMIC);
1950 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1951 KMEMLEAK_GREY, GFP_ATOMIC);
1952 /* only register .data..ro_after_init if not within .data */
1953 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1954 create_object((unsigned long)__start_ro_after_init,
1955 __end_ro_after_init - __start_ro_after_init,
1956 KMEMLEAK_GREY, GFP_ATOMIC);
1957 }
1958
1959 /*
1960 * Late initialization function.
1961 */
1962 static int __init kmemleak_late_init(void)
1963 {
1964 kmemleak_initialized = 1;
1965
1966 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1967
1968 if (kmemleak_error) {
1969 /*
1970 * Some error occurred and kmemleak was disabled. There is a
1971 * small chance that kmemleak_disable() was called immediately
1972 * after setting kmemleak_initialized and we may end up with
1973 * two clean-up threads but serialized by scan_mutex.
1974 */
1975 schedule_work(&cleanup_work);
1976 return -ENOMEM;
1977 }
1978
1979 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1980 mutex_lock(&scan_mutex);
1981 start_scan_thread();
1982 mutex_unlock(&scan_mutex);
1983 }
1984
1985 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1986 mem_pool_free_count);
1987
1988 return 0;
1989 }
1990 late_initcall(kmemleak_late_init);