]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * mm/kmemleak.c | |
3 | * | |
4 | * Copyright (C) 2008 ARM Limited | |
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | * | |
20 | * | |
21 | * For more information on the algorithm and kmemleak usage, please see | |
22 | * Documentation/dev-tools/kmemleak.rst. | |
23 | * | |
24 | * Notes on locking | |
25 | * ---------------- | |
26 | * | |
27 | * The following locks and mutexes are used by kmemleak: | |
28 | * | |
29 | * - kmemleak_lock (rwlock): protects the object_list modifications and | |
30 | * accesses to the object_tree_root. The object_list is the main list | |
31 | * holding the metadata (struct kmemleak_object) for the allocated memory | |
32 | * blocks. The object_tree_root is a red black tree used to look-up | |
33 | * metadata based on a pointer to the corresponding memory block. The | |
34 | * kmemleak_object structures are added to the object_list and | |
35 | * object_tree_root in the create_object() function called from the | |
36 | * kmemleak_alloc() callback and removed in delete_object() called from the | |
37 | * kmemleak_free() callback | |
38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to | |
39 | * the metadata (e.g. count) are protected by this lock. Note that some | |
40 | * members of this structure may be protected by other means (atomic or | |
41 | * kmemleak_lock). This lock is also held when scanning the corresponding | |
42 | * memory block to avoid the kernel freeing it via the kmemleak_free() | |
43 | * callback. This is less heavyweight than holding a global lock like | |
44 | * kmemleak_lock during scanning | |
45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for | |
46 | * unreferenced objects at a time. The gray_list contains the objects which | |
47 | * are already referenced or marked as false positives and need to be | |
48 | * scanned. This list is only modified during a scanning episode when the | |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. | |
50 | * Note that the kmemleak_object.use_count is incremented when an object is | |
51 | * added to the gray_list and therefore cannot be freed. This mutex also | |
52 | * prevents multiple users of the "kmemleak" debugfs file together with | |
53 | * modifications to the memory scanning parameters including the scan_thread | |
54 | * pointer | |
55 | * | |
56 | * Locks and mutexes are acquired/nested in the following order: | |
57 | * | |
58 | * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING) | |
59 | * | |
60 | * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex | |
61 | * regions. | |
62 | * | |
63 | * The kmemleak_object structures have a use_count incremented or decremented | |
64 | * using the get_object()/put_object() functions. When the use_count becomes | |
65 | * 0, this count can no longer be incremented and put_object() schedules the | |
66 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() | |
67 | * function must be protected by rcu_read_lock() to avoid accessing a freed | |
68 | * structure. | |
69 | */ | |
70 | ||
71 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
72 | ||
73 | #include <linux/init.h> | |
74 | #include <linux/kernel.h> | |
75 | #include <linux/list.h> | |
76 | #include <linux/sched/signal.h> | |
77 | #include <linux/sched/task.h> | |
78 | #include <linux/sched/task_stack.h> | |
79 | #include <linux/jiffies.h> | |
80 | #include <linux/delay.h> | |
81 | #include <linux/export.h> | |
82 | #include <linux/kthread.h> | |
83 | #include <linux/rbtree.h> | |
84 | #include <linux/fs.h> | |
85 | #include <linux/debugfs.h> | |
86 | #include <linux/seq_file.h> | |
87 | #include <linux/cpumask.h> | |
88 | #include <linux/spinlock.h> | |
89 | #include <linux/mutex.h> | |
90 | #include <linux/rcupdate.h> | |
91 | #include <linux/stacktrace.h> | |
92 | #include <linux/cache.h> | |
93 | #include <linux/percpu.h> | |
94 | #include <linux/hardirq.h> | |
95 | #include <linux/bootmem.h> | |
96 | #include <linux/pfn.h> | |
97 | #include <linux/mmzone.h> | |
98 | #include <linux/slab.h> | |
99 | #include <linux/thread_info.h> | |
100 | #include <linux/err.h> | |
101 | #include <linux/uaccess.h> | |
102 | #include <linux/string.h> | |
103 | #include <linux/nodemask.h> | |
104 | #include <linux/mm.h> | |
105 | #include <linux/workqueue.h> | |
106 | #include <linux/crc32.h> | |
107 | ||
108 | #include <asm/sections.h> | |
109 | #include <asm/processor.h> | |
110 | #include <linux/atomic.h> | |
111 | ||
112 | #include <linux/kasan.h> | |
113 | #include <linux/kmemcheck.h> | |
114 | #include <linux/kmemleak.h> | |
115 | #include <linux/memory_hotplug.h> | |
116 | ||
117 | /* | |
118 | * Kmemleak configuration and common defines. | |
119 | */ | |
120 | #define MAX_TRACE 16 /* stack trace length */ | |
121 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ | |
122 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ | |
123 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | |
124 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ | |
125 | ||
126 | #define BYTES_PER_POINTER sizeof(void *) | |
127 | ||
128 | /* GFP bitmask for kmemleak internal allocations */ | |
129 | #define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ | |
130 | __GFP_NORETRY | __GFP_NOMEMALLOC | \ | |
131 | __GFP_NOWARN) | |
132 | ||
133 | /* scanning area inside a memory block */ | |
134 | struct kmemleak_scan_area { | |
135 | struct hlist_node node; | |
136 | unsigned long start; | |
137 | size_t size; | |
138 | }; | |
139 | ||
140 | #define KMEMLEAK_GREY 0 | |
141 | #define KMEMLEAK_BLACK -1 | |
142 | ||
143 | /* | |
144 | * Structure holding the metadata for each allocated memory block. | |
145 | * Modifications to such objects should be made while holding the | |
146 | * object->lock. Insertions or deletions from object_list, gray_list or | |
147 | * rb_node are already protected by the corresponding locks or mutex (see | |
148 | * the notes on locking above). These objects are reference-counted | |
149 | * (use_count) and freed using the RCU mechanism. | |
150 | */ | |
151 | struct kmemleak_object { | |
152 | spinlock_t lock; | |
153 | unsigned long flags; /* object status flags */ | |
154 | struct list_head object_list; | |
155 | struct list_head gray_list; | |
156 | struct rb_node rb_node; | |
157 | struct rcu_head rcu; /* object_list lockless traversal */ | |
158 | /* object usage count; object freed when use_count == 0 */ | |
159 | atomic_t use_count; | |
160 | unsigned long pointer; | |
161 | size_t size; | |
162 | /* minimum number of a pointers found before it is considered leak */ | |
163 | int min_count; | |
164 | /* the total number of pointers found pointing to this object */ | |
165 | int count; | |
166 | /* checksum for detecting modified objects */ | |
167 | u32 checksum; | |
168 | /* memory ranges to be scanned inside an object (empty for all) */ | |
169 | struct hlist_head area_list; | |
170 | unsigned long trace[MAX_TRACE]; | |
171 | unsigned int trace_len; | |
172 | unsigned long jiffies; /* creation timestamp */ | |
173 | pid_t pid; /* pid of the current task */ | |
174 | char comm[TASK_COMM_LEN]; /* executable name */ | |
175 | }; | |
176 | ||
177 | /* flag representing the memory block allocation status */ | |
178 | #define OBJECT_ALLOCATED (1 << 0) | |
179 | /* flag set after the first reporting of an unreference object */ | |
180 | #define OBJECT_REPORTED (1 << 1) | |
181 | /* flag set to not scan the object */ | |
182 | #define OBJECT_NO_SCAN (1 << 2) | |
183 | ||
184 | /* number of bytes to print per line; must be 16 or 32 */ | |
185 | #define HEX_ROW_SIZE 16 | |
186 | /* number of bytes to print at a time (1, 2, 4, 8) */ | |
187 | #define HEX_GROUP_SIZE 1 | |
188 | /* include ASCII after the hex output */ | |
189 | #define HEX_ASCII 1 | |
190 | /* max number of lines to be printed */ | |
191 | #define HEX_MAX_LINES 2 | |
192 | ||
193 | /* the list of all allocated objects */ | |
194 | static LIST_HEAD(object_list); | |
195 | /* the list of gray-colored objects (see color_gray comment below) */ | |
196 | static LIST_HEAD(gray_list); | |
197 | /* search tree for object boundaries */ | |
198 | static struct rb_root object_tree_root = RB_ROOT; | |
199 | /* rw_lock protecting the access to object_list and object_tree_root */ | |
200 | static DEFINE_RWLOCK(kmemleak_lock); | |
201 | ||
202 | /* allocation caches for kmemleak internal data */ | |
203 | static struct kmem_cache *object_cache; | |
204 | static struct kmem_cache *scan_area_cache; | |
205 | ||
206 | /* set if tracing memory operations is enabled */ | |
207 | static int kmemleak_enabled; | |
208 | /* same as above but only for the kmemleak_free() callback */ | |
209 | static int kmemleak_free_enabled; | |
210 | /* set in the late_initcall if there were no errors */ | |
211 | static int kmemleak_initialized; | |
212 | /* enables or disables early logging of the memory operations */ | |
213 | static int kmemleak_early_log = 1; | |
214 | /* set if a kmemleak warning was issued */ | |
215 | static int kmemleak_warning; | |
216 | /* set if a fatal kmemleak error has occurred */ | |
217 | static int kmemleak_error; | |
218 | ||
219 | /* minimum and maximum address that may be valid pointers */ | |
220 | static unsigned long min_addr = ULONG_MAX; | |
221 | static unsigned long max_addr; | |
222 | ||
223 | static struct task_struct *scan_thread; | |
224 | /* used to avoid reporting of recently allocated objects */ | |
225 | static unsigned long jiffies_min_age; | |
226 | static unsigned long jiffies_last_scan; | |
227 | /* delay between automatic memory scannings */ | |
228 | static signed long jiffies_scan_wait; | |
229 | /* enables or disables the task stacks scanning */ | |
230 | static int kmemleak_stack_scan = 1; | |
231 | /* protects the memory scanning, parameters and debug/kmemleak file access */ | |
232 | static DEFINE_MUTEX(scan_mutex); | |
233 | /* setting kmemleak=on, will set this var, skipping the disable */ | |
234 | static int kmemleak_skip_disable; | |
235 | /* If there are leaks that can be reported */ | |
236 | static bool kmemleak_found_leaks; | |
237 | ||
238 | /* | |
239 | * Early object allocation/freeing logging. Kmemleak is initialized after the | |
240 | * kernel allocator. However, both the kernel allocator and kmemleak may | |
241 | * allocate memory blocks which need to be tracked. Kmemleak defines an | |
242 | * arbitrary buffer to hold the allocation/freeing information before it is | |
243 | * fully initialized. | |
244 | */ | |
245 | ||
246 | /* kmemleak operation type for early logging */ | |
247 | enum { | |
248 | KMEMLEAK_ALLOC, | |
249 | KMEMLEAK_ALLOC_PERCPU, | |
250 | KMEMLEAK_FREE, | |
251 | KMEMLEAK_FREE_PART, | |
252 | KMEMLEAK_FREE_PERCPU, | |
253 | KMEMLEAK_NOT_LEAK, | |
254 | KMEMLEAK_IGNORE, | |
255 | KMEMLEAK_SCAN_AREA, | |
256 | KMEMLEAK_NO_SCAN | |
257 | }; | |
258 | ||
259 | /* | |
260 | * Structure holding the information passed to kmemleak callbacks during the | |
261 | * early logging. | |
262 | */ | |
263 | struct early_log { | |
264 | int op_type; /* kmemleak operation type */ | |
265 | const void *ptr; /* allocated/freed memory block */ | |
266 | size_t size; /* memory block size */ | |
267 | int min_count; /* minimum reference count */ | |
268 | unsigned long trace[MAX_TRACE]; /* stack trace */ | |
269 | unsigned int trace_len; /* stack trace length */ | |
270 | }; | |
271 | ||
272 | /* early logging buffer and current position */ | |
273 | static struct early_log | |
274 | early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata; | |
275 | static int crt_early_log __initdata; | |
276 | ||
277 | static void kmemleak_disable(void); | |
278 | ||
279 | /* | |
280 | * Print a warning and dump the stack trace. | |
281 | */ | |
282 | #define kmemleak_warn(x...) do { \ | |
283 | pr_warn(x); \ | |
284 | dump_stack(); \ | |
285 | kmemleak_warning = 1; \ | |
286 | } while (0) | |
287 | ||
288 | /* | |
289 | * Macro invoked when a serious kmemleak condition occurred and cannot be | |
290 | * recovered from. Kmemleak will be disabled and further allocation/freeing | |
291 | * tracing no longer available. | |
292 | */ | |
293 | #define kmemleak_stop(x...) do { \ | |
294 | kmemleak_warn(x); \ | |
295 | kmemleak_disable(); \ | |
296 | } while (0) | |
297 | ||
298 | /* | |
299 | * Printing of the objects hex dump to the seq file. The number of lines to be | |
300 | * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The | |
301 | * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called | |
302 | * with the object->lock held. | |
303 | */ | |
304 | static void hex_dump_object(struct seq_file *seq, | |
305 | struct kmemleak_object *object) | |
306 | { | |
307 | const u8 *ptr = (const u8 *)object->pointer; | |
308 | size_t len; | |
309 | ||
310 | /* limit the number of lines to HEX_MAX_LINES */ | |
311 | len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE); | |
312 | ||
313 | seq_printf(seq, " hex dump (first %zu bytes):\n", len); | |
314 | kasan_disable_current(); | |
315 | seq_hex_dump(seq, " ", DUMP_PREFIX_NONE, HEX_ROW_SIZE, | |
316 | HEX_GROUP_SIZE, ptr, len, HEX_ASCII); | |
317 | kasan_enable_current(); | |
318 | } | |
319 | ||
320 | /* | |
321 | * Object colors, encoded with count and min_count: | |
322 | * - white - orphan object, not enough references to it (count < min_count) | |
323 | * - gray - not orphan, not marked as false positive (min_count == 0) or | |
324 | * sufficient references to it (count >= min_count) | |
325 | * - black - ignore, it doesn't contain references (e.g. text section) | |
326 | * (min_count == -1). No function defined for this color. | |
327 | * Newly created objects don't have any color assigned (object->count == -1) | |
328 | * before the next memory scan when they become white. | |
329 | */ | |
330 | static bool color_white(const struct kmemleak_object *object) | |
331 | { | |
332 | return object->count != KMEMLEAK_BLACK && | |
333 | object->count < object->min_count; | |
334 | } | |
335 | ||
336 | static bool color_gray(const struct kmemleak_object *object) | |
337 | { | |
338 | return object->min_count != KMEMLEAK_BLACK && | |
339 | object->count >= object->min_count; | |
340 | } | |
341 | ||
342 | /* | |
343 | * Objects are considered unreferenced only if their color is white, they have | |
344 | * not be deleted and have a minimum age to avoid false positives caused by | |
345 | * pointers temporarily stored in CPU registers. | |
346 | */ | |
347 | static bool unreferenced_object(struct kmemleak_object *object) | |
348 | { | |
349 | return (color_white(object) && object->flags & OBJECT_ALLOCATED) && | |
350 | time_before_eq(object->jiffies + jiffies_min_age, | |
351 | jiffies_last_scan); | |
352 | } | |
353 | ||
354 | /* | |
355 | * Printing of the unreferenced objects information to the seq file. The | |
356 | * print_unreferenced function must be called with the object->lock held. | |
357 | */ | |
358 | static void print_unreferenced(struct seq_file *seq, | |
359 | struct kmemleak_object *object) | |
360 | { | |
361 | int i; | |
362 | unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies); | |
363 | ||
364 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", | |
365 | object->pointer, object->size); | |
366 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n", | |
367 | object->comm, object->pid, object->jiffies, | |
368 | msecs_age / 1000, msecs_age % 1000); | |
369 | hex_dump_object(seq, object); | |
370 | seq_printf(seq, " backtrace:\n"); | |
371 | ||
372 | for (i = 0; i < object->trace_len; i++) { | |
373 | void *ptr = (void *)object->trace[i]; | |
374 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); | |
375 | } | |
376 | } | |
377 | ||
378 | /* | |
379 | * Print the kmemleak_object information. This function is used mainly for | |
380 | * debugging special cases when kmemleak operations. It must be called with | |
381 | * the object->lock held. | |
382 | */ | |
383 | static void dump_object_info(struct kmemleak_object *object) | |
384 | { | |
385 | struct stack_trace trace; | |
386 | ||
387 | trace.nr_entries = object->trace_len; | |
388 | trace.entries = object->trace; | |
389 | ||
390 | pr_notice("Object 0x%08lx (size %zu):\n", | |
391 | object->pointer, object->size); | |
392 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", | |
393 | object->comm, object->pid, object->jiffies); | |
394 | pr_notice(" min_count = %d\n", object->min_count); | |
395 | pr_notice(" count = %d\n", object->count); | |
396 | pr_notice(" flags = 0x%lx\n", object->flags); | |
397 | pr_notice(" checksum = %u\n", object->checksum); | |
398 | pr_notice(" backtrace:\n"); | |
399 | print_stack_trace(&trace, 4); | |
400 | } | |
401 | ||
402 | /* | |
403 | * Look-up a memory block metadata (kmemleak_object) in the object search | |
404 | * tree based on a pointer value. If alias is 0, only values pointing to the | |
405 | * beginning of the memory block are allowed. The kmemleak_lock must be held | |
406 | * when calling this function. | |
407 | */ | |
408 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) | |
409 | { | |
410 | struct rb_node *rb = object_tree_root.rb_node; | |
411 | ||
412 | while (rb) { | |
413 | struct kmemleak_object *object = | |
414 | rb_entry(rb, struct kmemleak_object, rb_node); | |
415 | if (ptr < object->pointer) | |
416 | rb = object->rb_node.rb_left; | |
417 | else if (object->pointer + object->size <= ptr) | |
418 | rb = object->rb_node.rb_right; | |
419 | else if (object->pointer == ptr || alias) | |
420 | return object; | |
421 | else { | |
422 | kmemleak_warn("Found object by alias at 0x%08lx\n", | |
423 | ptr); | |
424 | dump_object_info(object); | |
425 | break; | |
426 | } | |
427 | } | |
428 | return NULL; | |
429 | } | |
430 | ||
431 | /* | |
432 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note | |
433 | * that once an object's use_count reached 0, the RCU freeing was already | |
434 | * registered and the object should no longer be used. This function must be | |
435 | * called under the protection of rcu_read_lock(). | |
436 | */ | |
437 | static int get_object(struct kmemleak_object *object) | |
438 | { | |
439 | return atomic_inc_not_zero(&object->use_count); | |
440 | } | |
441 | ||
442 | /* | |
443 | * RCU callback to free a kmemleak_object. | |
444 | */ | |
445 | static void free_object_rcu(struct rcu_head *rcu) | |
446 | { | |
447 | struct hlist_node *tmp; | |
448 | struct kmemleak_scan_area *area; | |
449 | struct kmemleak_object *object = | |
450 | container_of(rcu, struct kmemleak_object, rcu); | |
451 | ||
452 | /* | |
453 | * Once use_count is 0 (guaranteed by put_object), there is no other | |
454 | * code accessing this object, hence no need for locking. | |
455 | */ | |
456 | hlist_for_each_entry_safe(area, tmp, &object->area_list, node) { | |
457 | hlist_del(&area->node); | |
458 | kmem_cache_free(scan_area_cache, area); | |
459 | } | |
460 | kmem_cache_free(object_cache, object); | |
461 | } | |
462 | ||
463 | /* | |
464 | * Decrement the object use_count. Once the count is 0, free the object using | |
465 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> | |
466 | * delete_object() path, the delayed RCU freeing ensures that there is no | |
467 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal | |
468 | * is also possible. | |
469 | */ | |
470 | static void put_object(struct kmemleak_object *object) | |
471 | { | |
472 | if (!atomic_dec_and_test(&object->use_count)) | |
473 | return; | |
474 | ||
475 | /* should only get here after delete_object was called */ | |
476 | WARN_ON(object->flags & OBJECT_ALLOCATED); | |
477 | ||
478 | call_rcu(&object->rcu, free_object_rcu); | |
479 | } | |
480 | ||
481 | /* | |
482 | * Look up an object in the object search tree and increase its use_count. | |
483 | */ | |
484 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) | |
485 | { | |
486 | unsigned long flags; | |
487 | struct kmemleak_object *object; | |
488 | ||
489 | rcu_read_lock(); | |
490 | read_lock_irqsave(&kmemleak_lock, flags); | |
491 | object = lookup_object(ptr, alias); | |
492 | read_unlock_irqrestore(&kmemleak_lock, flags); | |
493 | ||
494 | /* check whether the object is still available */ | |
495 | if (object && !get_object(object)) | |
496 | object = NULL; | |
497 | rcu_read_unlock(); | |
498 | ||
499 | return object; | |
500 | } | |
501 | ||
502 | /* | |
503 | * Look up an object in the object search tree and remove it from both | |
504 | * object_tree_root and object_list. The returned object's use_count should be | |
505 | * at least 1, as initially set by create_object(). | |
506 | */ | |
507 | static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias) | |
508 | { | |
509 | unsigned long flags; | |
510 | struct kmemleak_object *object; | |
511 | ||
512 | write_lock_irqsave(&kmemleak_lock, flags); | |
513 | object = lookup_object(ptr, alias); | |
514 | if (object) { | |
515 | rb_erase(&object->rb_node, &object_tree_root); | |
516 | list_del_rcu(&object->object_list); | |
517 | } | |
518 | write_unlock_irqrestore(&kmemleak_lock, flags); | |
519 | ||
520 | return object; | |
521 | } | |
522 | ||
523 | /* | |
524 | * Save stack trace to the given array of MAX_TRACE size. | |
525 | */ | |
526 | static int __save_stack_trace(unsigned long *trace) | |
527 | { | |
528 | struct stack_trace stack_trace; | |
529 | ||
530 | stack_trace.max_entries = MAX_TRACE; | |
531 | stack_trace.nr_entries = 0; | |
532 | stack_trace.entries = trace; | |
533 | stack_trace.skip = 2; | |
534 | save_stack_trace(&stack_trace); | |
535 | ||
536 | return stack_trace.nr_entries; | |
537 | } | |
538 | ||
539 | /* | |
540 | * Create the metadata (struct kmemleak_object) corresponding to an allocated | |
541 | * memory block and add it to the object_list and object_tree_root. | |
542 | */ | |
543 | static struct kmemleak_object *create_object(unsigned long ptr, size_t size, | |
544 | int min_count, gfp_t gfp) | |
545 | { | |
546 | unsigned long flags; | |
547 | struct kmemleak_object *object, *parent; | |
548 | struct rb_node **link, *rb_parent; | |
549 | ||
550 | object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp)); | |
551 | if (!object) { | |
552 | pr_warn("Cannot allocate a kmemleak_object structure\n"); | |
553 | kmemleak_disable(); | |
554 | return NULL; | |
555 | } | |
556 | ||
557 | INIT_LIST_HEAD(&object->object_list); | |
558 | INIT_LIST_HEAD(&object->gray_list); | |
559 | INIT_HLIST_HEAD(&object->area_list); | |
560 | spin_lock_init(&object->lock); | |
561 | atomic_set(&object->use_count, 1); | |
562 | object->flags = OBJECT_ALLOCATED; | |
563 | object->pointer = ptr; | |
564 | object->size = size; | |
565 | object->min_count = min_count; | |
566 | object->count = 0; /* white color initially */ | |
567 | object->jiffies = jiffies; | |
568 | object->checksum = 0; | |
569 | ||
570 | /* task information */ | |
571 | if (in_irq()) { | |
572 | object->pid = 0; | |
573 | strncpy(object->comm, "hardirq", sizeof(object->comm)); | |
574 | } else if (in_softirq()) { | |
575 | object->pid = 0; | |
576 | strncpy(object->comm, "softirq", sizeof(object->comm)); | |
577 | } else { | |
578 | object->pid = current->pid; | |
579 | /* | |
580 | * There is a small chance of a race with set_task_comm(), | |
581 | * however using get_task_comm() here may cause locking | |
582 | * dependency issues with current->alloc_lock. In the worst | |
583 | * case, the command line is not correct. | |
584 | */ | |
585 | strncpy(object->comm, current->comm, sizeof(object->comm)); | |
586 | } | |
587 | ||
588 | /* kernel backtrace */ | |
589 | object->trace_len = __save_stack_trace(object->trace); | |
590 | ||
591 | write_lock_irqsave(&kmemleak_lock, flags); | |
592 | ||
593 | min_addr = min(min_addr, ptr); | |
594 | max_addr = max(max_addr, ptr + size); | |
595 | link = &object_tree_root.rb_node; | |
596 | rb_parent = NULL; | |
597 | while (*link) { | |
598 | rb_parent = *link; | |
599 | parent = rb_entry(rb_parent, struct kmemleak_object, rb_node); | |
600 | if (ptr + size <= parent->pointer) | |
601 | link = &parent->rb_node.rb_left; | |
602 | else if (parent->pointer + parent->size <= ptr) | |
603 | link = &parent->rb_node.rb_right; | |
604 | else { | |
605 | kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n", | |
606 | ptr); | |
607 | /* | |
608 | * No need for parent->lock here since "parent" cannot | |
609 | * be freed while the kmemleak_lock is held. | |
610 | */ | |
611 | dump_object_info(parent); | |
612 | kmem_cache_free(object_cache, object); | |
613 | object = NULL; | |
614 | goto out; | |
615 | } | |
616 | } | |
617 | rb_link_node(&object->rb_node, rb_parent, link); | |
618 | rb_insert_color(&object->rb_node, &object_tree_root); | |
619 | ||
620 | list_add_tail_rcu(&object->object_list, &object_list); | |
621 | out: | |
622 | write_unlock_irqrestore(&kmemleak_lock, flags); | |
623 | return object; | |
624 | } | |
625 | ||
626 | /* | |
627 | * Mark the object as not allocated and schedule RCU freeing via put_object(). | |
628 | */ | |
629 | static void __delete_object(struct kmemleak_object *object) | |
630 | { | |
631 | unsigned long flags; | |
632 | ||
633 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); | |
634 | WARN_ON(atomic_read(&object->use_count) < 1); | |
635 | ||
636 | /* | |
637 | * Locking here also ensures that the corresponding memory block | |
638 | * cannot be freed when it is being scanned. | |
639 | */ | |
640 | spin_lock_irqsave(&object->lock, flags); | |
641 | object->flags &= ~OBJECT_ALLOCATED; | |
642 | spin_unlock_irqrestore(&object->lock, flags); | |
643 | put_object(object); | |
644 | } | |
645 | ||
646 | /* | |
647 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
648 | * delete it. | |
649 | */ | |
650 | static void delete_object_full(unsigned long ptr) | |
651 | { | |
652 | struct kmemleak_object *object; | |
653 | ||
654 | object = find_and_remove_object(ptr, 0); | |
655 | if (!object) { | |
656 | #ifdef DEBUG | |
657 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | |
658 | ptr); | |
659 | #endif | |
660 | return; | |
661 | } | |
662 | __delete_object(object); | |
663 | } | |
664 | ||
665 | /* | |
666 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
667 | * delete it. If the memory block is partially freed, the function may create | |
668 | * additional metadata for the remaining parts of the block. | |
669 | */ | |
670 | static void delete_object_part(unsigned long ptr, size_t size) | |
671 | { | |
672 | struct kmemleak_object *object; | |
673 | unsigned long start, end; | |
674 | ||
675 | object = find_and_remove_object(ptr, 1); | |
676 | if (!object) { | |
677 | #ifdef DEBUG | |
678 | kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n", | |
679 | ptr, size); | |
680 | #endif | |
681 | return; | |
682 | } | |
683 | ||
684 | /* | |
685 | * Create one or two objects that may result from the memory block | |
686 | * split. Note that partial freeing is only done by free_bootmem() and | |
687 | * this happens before kmemleak_init() is called. The path below is | |
688 | * only executed during early log recording in kmemleak_init(), so | |
689 | * GFP_KERNEL is enough. | |
690 | */ | |
691 | start = object->pointer; | |
692 | end = object->pointer + object->size; | |
693 | if (ptr > start) | |
694 | create_object(start, ptr - start, object->min_count, | |
695 | GFP_KERNEL); | |
696 | if (ptr + size < end) | |
697 | create_object(ptr + size, end - ptr - size, object->min_count, | |
698 | GFP_KERNEL); | |
699 | ||
700 | __delete_object(object); | |
701 | } | |
702 | ||
703 | static void __paint_it(struct kmemleak_object *object, int color) | |
704 | { | |
705 | object->min_count = color; | |
706 | if (color == KMEMLEAK_BLACK) | |
707 | object->flags |= OBJECT_NO_SCAN; | |
708 | } | |
709 | ||
710 | static void paint_it(struct kmemleak_object *object, int color) | |
711 | { | |
712 | unsigned long flags; | |
713 | ||
714 | spin_lock_irqsave(&object->lock, flags); | |
715 | __paint_it(object, color); | |
716 | spin_unlock_irqrestore(&object->lock, flags); | |
717 | } | |
718 | ||
719 | static void paint_ptr(unsigned long ptr, int color) | |
720 | { | |
721 | struct kmemleak_object *object; | |
722 | ||
723 | object = find_and_get_object(ptr, 0); | |
724 | if (!object) { | |
725 | kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n", | |
726 | ptr, | |
727 | (color == KMEMLEAK_GREY) ? "Grey" : | |
728 | (color == KMEMLEAK_BLACK) ? "Black" : "Unknown"); | |
729 | return; | |
730 | } | |
731 | paint_it(object, color); | |
732 | put_object(object); | |
733 | } | |
734 | ||
735 | /* | |
736 | * Mark an object permanently as gray-colored so that it can no longer be | |
737 | * reported as a leak. This is used in general to mark a false positive. | |
738 | */ | |
739 | static void make_gray_object(unsigned long ptr) | |
740 | { | |
741 | paint_ptr(ptr, KMEMLEAK_GREY); | |
742 | } | |
743 | ||
744 | /* | |
745 | * Mark the object as black-colored so that it is ignored from scans and | |
746 | * reporting. | |
747 | */ | |
748 | static void make_black_object(unsigned long ptr) | |
749 | { | |
750 | paint_ptr(ptr, KMEMLEAK_BLACK); | |
751 | } | |
752 | ||
753 | /* | |
754 | * Add a scanning area to the object. If at least one such area is added, | |
755 | * kmemleak will only scan these ranges rather than the whole memory block. | |
756 | */ | |
757 | static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp) | |
758 | { | |
759 | unsigned long flags; | |
760 | struct kmemleak_object *object; | |
761 | struct kmemleak_scan_area *area; | |
762 | ||
763 | object = find_and_get_object(ptr, 1); | |
764 | if (!object) { | |
765 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", | |
766 | ptr); | |
767 | return; | |
768 | } | |
769 | ||
770 | area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp)); | |
771 | if (!area) { | |
772 | pr_warn("Cannot allocate a scan area\n"); | |
773 | goto out; | |
774 | } | |
775 | ||
776 | spin_lock_irqsave(&object->lock, flags); | |
777 | if (size == SIZE_MAX) { | |
778 | size = object->pointer + object->size - ptr; | |
779 | } else if (ptr + size > object->pointer + object->size) { | |
780 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); | |
781 | dump_object_info(object); | |
782 | kmem_cache_free(scan_area_cache, area); | |
783 | goto out_unlock; | |
784 | } | |
785 | ||
786 | INIT_HLIST_NODE(&area->node); | |
787 | area->start = ptr; | |
788 | area->size = size; | |
789 | ||
790 | hlist_add_head(&area->node, &object->area_list); | |
791 | out_unlock: | |
792 | spin_unlock_irqrestore(&object->lock, flags); | |
793 | out: | |
794 | put_object(object); | |
795 | } | |
796 | ||
797 | /* | |
798 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give | |
799 | * pointer. Such object will not be scanned by kmemleak but references to it | |
800 | * are searched. | |
801 | */ | |
802 | static void object_no_scan(unsigned long ptr) | |
803 | { | |
804 | unsigned long flags; | |
805 | struct kmemleak_object *object; | |
806 | ||
807 | object = find_and_get_object(ptr, 0); | |
808 | if (!object) { | |
809 | kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); | |
810 | return; | |
811 | } | |
812 | ||
813 | spin_lock_irqsave(&object->lock, flags); | |
814 | object->flags |= OBJECT_NO_SCAN; | |
815 | spin_unlock_irqrestore(&object->lock, flags); | |
816 | put_object(object); | |
817 | } | |
818 | ||
819 | /* | |
820 | * Log an early kmemleak_* call to the early_log buffer. These calls will be | |
821 | * processed later once kmemleak is fully initialized. | |
822 | */ | |
823 | static void __init log_early(int op_type, const void *ptr, size_t size, | |
824 | int min_count) | |
825 | { | |
826 | unsigned long flags; | |
827 | struct early_log *log; | |
828 | ||
829 | if (kmemleak_error) { | |
830 | /* kmemleak stopped recording, just count the requests */ | |
831 | crt_early_log++; | |
832 | return; | |
833 | } | |
834 | ||
835 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | |
836 | crt_early_log++; | |
837 | kmemleak_disable(); | |
838 | return; | |
839 | } | |
840 | ||
841 | /* | |
842 | * There is no need for locking since the kernel is still in UP mode | |
843 | * at this stage. Disabling the IRQs is enough. | |
844 | */ | |
845 | local_irq_save(flags); | |
846 | log = &early_log[crt_early_log]; | |
847 | log->op_type = op_type; | |
848 | log->ptr = ptr; | |
849 | log->size = size; | |
850 | log->min_count = min_count; | |
851 | log->trace_len = __save_stack_trace(log->trace); | |
852 | crt_early_log++; | |
853 | local_irq_restore(flags); | |
854 | } | |
855 | ||
856 | /* | |
857 | * Log an early allocated block and populate the stack trace. | |
858 | */ | |
859 | static void early_alloc(struct early_log *log) | |
860 | { | |
861 | struct kmemleak_object *object; | |
862 | unsigned long flags; | |
863 | int i; | |
864 | ||
865 | if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr)) | |
866 | return; | |
867 | ||
868 | /* | |
869 | * RCU locking needed to ensure object is not freed via put_object(). | |
870 | */ | |
871 | rcu_read_lock(); | |
872 | object = create_object((unsigned long)log->ptr, log->size, | |
873 | log->min_count, GFP_ATOMIC); | |
874 | if (!object) | |
875 | goto out; | |
876 | spin_lock_irqsave(&object->lock, flags); | |
877 | for (i = 0; i < log->trace_len; i++) | |
878 | object->trace[i] = log->trace[i]; | |
879 | object->trace_len = log->trace_len; | |
880 | spin_unlock_irqrestore(&object->lock, flags); | |
881 | out: | |
882 | rcu_read_unlock(); | |
883 | } | |
884 | ||
885 | /* | |
886 | * Log an early allocated block and populate the stack trace. | |
887 | */ | |
888 | static void early_alloc_percpu(struct early_log *log) | |
889 | { | |
890 | unsigned int cpu; | |
891 | const void __percpu *ptr = log->ptr; | |
892 | ||
893 | for_each_possible_cpu(cpu) { | |
894 | log->ptr = per_cpu_ptr(ptr, cpu); | |
895 | early_alloc(log); | |
896 | } | |
897 | } | |
898 | ||
899 | /** | |
900 | * kmemleak_alloc - register a newly allocated object | |
901 | * @ptr: pointer to beginning of the object | |
902 | * @size: size of the object | |
903 | * @min_count: minimum number of references to this object. If during memory | |
904 | * scanning a number of references less than @min_count is found, | |
905 | * the object is reported as a memory leak. If @min_count is 0, | |
906 | * the object is never reported as a leak. If @min_count is -1, | |
907 | * the object is ignored (not scanned and not reported as a leak) | |
908 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations | |
909 | * | |
910 | * This function is called from the kernel allocators when a new object | |
911 | * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.). | |
912 | */ | |
913 | void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count, | |
914 | gfp_t gfp) | |
915 | { | |
916 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | |
917 | ||
918 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
919 | create_object((unsigned long)ptr, size, min_count, gfp); | |
920 | else if (kmemleak_early_log) | |
921 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count); | |
922 | } | |
923 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | |
924 | ||
925 | /** | |
926 | * kmemleak_alloc_percpu - register a newly allocated __percpu object | |
927 | * @ptr: __percpu pointer to beginning of the object | |
928 | * @size: size of the object | |
929 | * @gfp: flags used for kmemleak internal memory allocations | |
930 | * | |
931 | * This function is called from the kernel percpu allocator when a new object | |
932 | * (memory block) is allocated (alloc_percpu). | |
933 | */ | |
934 | void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size, | |
935 | gfp_t gfp) | |
936 | { | |
937 | unsigned int cpu; | |
938 | ||
939 | pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size); | |
940 | ||
941 | /* | |
942 | * Percpu allocations are only scanned and not reported as leaks | |
943 | * (min_count is set to 0). | |
944 | */ | |
945 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
946 | for_each_possible_cpu(cpu) | |
947 | create_object((unsigned long)per_cpu_ptr(ptr, cpu), | |
948 | size, 0, gfp); | |
949 | else if (kmemleak_early_log) | |
950 | log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0); | |
951 | } | |
952 | EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu); | |
953 | ||
954 | /** | |
955 | * kmemleak_free - unregister a previously registered object | |
956 | * @ptr: pointer to beginning of the object | |
957 | * | |
958 | * This function is called from the kernel allocators when an object (memory | |
959 | * block) is freed (kmem_cache_free, kfree, vfree etc.). | |
960 | */ | |
961 | void __ref kmemleak_free(const void *ptr) | |
962 | { | |
963 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
964 | ||
965 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) | |
966 | delete_object_full((unsigned long)ptr); | |
967 | else if (kmemleak_early_log) | |
968 | log_early(KMEMLEAK_FREE, ptr, 0, 0); | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(kmemleak_free); | |
971 | ||
972 | /** | |
973 | * kmemleak_free_part - partially unregister a previously registered object | |
974 | * @ptr: pointer to the beginning or inside the object. This also | |
975 | * represents the start of the range to be freed | |
976 | * @size: size to be unregistered | |
977 | * | |
978 | * This function is called when only a part of a memory block is freed | |
979 | * (usually from the bootmem allocator). | |
980 | */ | |
981 | void __ref kmemleak_free_part(const void *ptr, size_t size) | |
982 | { | |
983 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
984 | ||
985 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
986 | delete_object_part((unsigned long)ptr, size); | |
987 | else if (kmemleak_early_log) | |
988 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0); | |
989 | } | |
990 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | |
991 | ||
992 | /** | |
993 | * kmemleak_free_percpu - unregister a previously registered __percpu object | |
994 | * @ptr: __percpu pointer to beginning of the object | |
995 | * | |
996 | * This function is called from the kernel percpu allocator when an object | |
997 | * (memory block) is freed (free_percpu). | |
998 | */ | |
999 | void __ref kmemleak_free_percpu(const void __percpu *ptr) | |
1000 | { | |
1001 | unsigned int cpu; | |
1002 | ||
1003 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1004 | ||
1005 | if (kmemleak_free_enabled && ptr && !IS_ERR(ptr)) | |
1006 | for_each_possible_cpu(cpu) | |
1007 | delete_object_full((unsigned long)per_cpu_ptr(ptr, | |
1008 | cpu)); | |
1009 | else if (kmemleak_early_log) | |
1010 | log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0); | |
1011 | } | |
1012 | EXPORT_SYMBOL_GPL(kmemleak_free_percpu); | |
1013 | ||
1014 | /** | |
1015 | * kmemleak_update_trace - update object allocation stack trace | |
1016 | * @ptr: pointer to beginning of the object | |
1017 | * | |
1018 | * Override the object allocation stack trace for cases where the actual | |
1019 | * allocation place is not always useful. | |
1020 | */ | |
1021 | void __ref kmemleak_update_trace(const void *ptr) | |
1022 | { | |
1023 | struct kmemleak_object *object; | |
1024 | unsigned long flags; | |
1025 | ||
1026 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1027 | ||
1028 | if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr)) | |
1029 | return; | |
1030 | ||
1031 | object = find_and_get_object((unsigned long)ptr, 1); | |
1032 | if (!object) { | |
1033 | #ifdef DEBUG | |
1034 | kmemleak_warn("Updating stack trace for unknown object at %p\n", | |
1035 | ptr); | |
1036 | #endif | |
1037 | return; | |
1038 | } | |
1039 | ||
1040 | spin_lock_irqsave(&object->lock, flags); | |
1041 | object->trace_len = __save_stack_trace(object->trace); | |
1042 | spin_unlock_irqrestore(&object->lock, flags); | |
1043 | ||
1044 | put_object(object); | |
1045 | } | |
1046 | EXPORT_SYMBOL(kmemleak_update_trace); | |
1047 | ||
1048 | /** | |
1049 | * kmemleak_not_leak - mark an allocated object as false positive | |
1050 | * @ptr: pointer to beginning of the object | |
1051 | * | |
1052 | * Calling this function on an object will cause the memory block to no longer | |
1053 | * be reported as leak and always be scanned. | |
1054 | */ | |
1055 | void __ref kmemleak_not_leak(const void *ptr) | |
1056 | { | |
1057 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1058 | ||
1059 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
1060 | make_gray_object((unsigned long)ptr); | |
1061 | else if (kmemleak_early_log) | |
1062 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0); | |
1063 | } | |
1064 | EXPORT_SYMBOL(kmemleak_not_leak); | |
1065 | ||
1066 | /** | |
1067 | * kmemleak_ignore - ignore an allocated object | |
1068 | * @ptr: pointer to beginning of the object | |
1069 | * | |
1070 | * Calling this function on an object will cause the memory block to be | |
1071 | * ignored (not scanned and not reported as a leak). This is usually done when | |
1072 | * it is known that the corresponding block is not a leak and does not contain | |
1073 | * any references to other allocated memory blocks. | |
1074 | */ | |
1075 | void __ref kmemleak_ignore(const void *ptr) | |
1076 | { | |
1077 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1078 | ||
1079 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
1080 | make_black_object((unsigned long)ptr); | |
1081 | else if (kmemleak_early_log) | |
1082 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0); | |
1083 | } | |
1084 | EXPORT_SYMBOL(kmemleak_ignore); | |
1085 | ||
1086 | /** | |
1087 | * kmemleak_scan_area - limit the range to be scanned in an allocated object | |
1088 | * @ptr: pointer to beginning or inside the object. This also | |
1089 | * represents the start of the scan area | |
1090 | * @size: size of the scan area | |
1091 | * @gfp: kmalloc() flags used for kmemleak internal memory allocations | |
1092 | * | |
1093 | * This function is used when it is known that only certain parts of an object | |
1094 | * contain references to other objects. Kmemleak will only scan these areas | |
1095 | * reducing the number false negatives. | |
1096 | */ | |
1097 | void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp) | |
1098 | { | |
1099 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1100 | ||
1101 | if (kmemleak_enabled && ptr && size && !IS_ERR(ptr)) | |
1102 | add_scan_area((unsigned long)ptr, size, gfp); | |
1103 | else if (kmemleak_early_log) | |
1104 | log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0); | |
1105 | } | |
1106 | EXPORT_SYMBOL(kmemleak_scan_area); | |
1107 | ||
1108 | /** | |
1109 | * kmemleak_no_scan - do not scan an allocated object | |
1110 | * @ptr: pointer to beginning of the object | |
1111 | * | |
1112 | * This function notifies kmemleak not to scan the given memory block. Useful | |
1113 | * in situations where it is known that the given object does not contain any | |
1114 | * references to other objects. Kmemleak will not scan such objects reducing | |
1115 | * the number of false negatives. | |
1116 | */ | |
1117 | void __ref kmemleak_no_scan(const void *ptr) | |
1118 | { | |
1119 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
1120 | ||
1121 | if (kmemleak_enabled && ptr && !IS_ERR(ptr)) | |
1122 | object_no_scan((unsigned long)ptr); | |
1123 | else if (kmemleak_early_log) | |
1124 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0); | |
1125 | } | |
1126 | EXPORT_SYMBOL(kmemleak_no_scan); | |
1127 | ||
1128 | /** | |
1129 | * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical | |
1130 | * address argument | |
1131 | */ | |
1132 | void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count, | |
1133 | gfp_t gfp) | |
1134 | { | |
1135 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | |
1136 | kmemleak_alloc(__va(phys), size, min_count, gfp); | |
1137 | } | |
1138 | EXPORT_SYMBOL(kmemleak_alloc_phys); | |
1139 | ||
1140 | /** | |
1141 | * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a | |
1142 | * physical address argument | |
1143 | */ | |
1144 | void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size) | |
1145 | { | |
1146 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | |
1147 | kmemleak_free_part(__va(phys), size); | |
1148 | } | |
1149 | EXPORT_SYMBOL(kmemleak_free_part_phys); | |
1150 | ||
1151 | /** | |
1152 | * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical | |
1153 | * address argument | |
1154 | */ | |
1155 | void __ref kmemleak_not_leak_phys(phys_addr_t phys) | |
1156 | { | |
1157 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | |
1158 | kmemleak_not_leak(__va(phys)); | |
1159 | } | |
1160 | EXPORT_SYMBOL(kmemleak_not_leak_phys); | |
1161 | ||
1162 | /** | |
1163 | * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical | |
1164 | * address argument | |
1165 | */ | |
1166 | void __ref kmemleak_ignore_phys(phys_addr_t phys) | |
1167 | { | |
1168 | if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn) | |
1169 | kmemleak_ignore(__va(phys)); | |
1170 | } | |
1171 | EXPORT_SYMBOL(kmemleak_ignore_phys); | |
1172 | ||
1173 | /* | |
1174 | * Update an object's checksum and return true if it was modified. | |
1175 | */ | |
1176 | static bool update_checksum(struct kmemleak_object *object) | |
1177 | { | |
1178 | u32 old_csum = object->checksum; | |
1179 | ||
1180 | if (!kmemcheck_is_obj_initialized(object->pointer, object->size)) | |
1181 | return false; | |
1182 | ||
1183 | kasan_disable_current(); | |
1184 | object->checksum = crc32(0, (void *)object->pointer, object->size); | |
1185 | kasan_enable_current(); | |
1186 | ||
1187 | return object->checksum != old_csum; | |
1188 | } | |
1189 | ||
1190 | /* | |
1191 | * Memory scanning is a long process and it needs to be interruptable. This | |
1192 | * function checks whether such interrupt condition occurred. | |
1193 | */ | |
1194 | static int scan_should_stop(void) | |
1195 | { | |
1196 | if (!kmemleak_enabled) | |
1197 | return 1; | |
1198 | ||
1199 | /* | |
1200 | * This function may be called from either process or kthread context, | |
1201 | * hence the need to check for both stop conditions. | |
1202 | */ | |
1203 | if (current->mm) | |
1204 | return signal_pending(current); | |
1205 | else | |
1206 | return kthread_should_stop(); | |
1207 | ||
1208 | return 0; | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * Scan a memory block (exclusive range) for valid pointers and add those | |
1213 | * found to the gray list. | |
1214 | */ | |
1215 | static void scan_block(void *_start, void *_end, | |
1216 | struct kmemleak_object *scanned) | |
1217 | { | |
1218 | unsigned long *ptr; | |
1219 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | |
1220 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | |
1221 | unsigned long flags; | |
1222 | ||
1223 | read_lock_irqsave(&kmemleak_lock, flags); | |
1224 | for (ptr = start; ptr < end; ptr++) { | |
1225 | struct kmemleak_object *object; | |
1226 | unsigned long pointer; | |
1227 | ||
1228 | if (scan_should_stop()) | |
1229 | break; | |
1230 | ||
1231 | /* don't scan uninitialized memory */ | |
1232 | if (!kmemcheck_is_obj_initialized((unsigned long)ptr, | |
1233 | BYTES_PER_POINTER)) | |
1234 | continue; | |
1235 | ||
1236 | kasan_disable_current(); | |
1237 | pointer = *ptr; | |
1238 | kasan_enable_current(); | |
1239 | ||
1240 | if (pointer < min_addr || pointer >= max_addr) | |
1241 | continue; | |
1242 | ||
1243 | /* | |
1244 | * No need for get_object() here since we hold kmemleak_lock. | |
1245 | * object->use_count cannot be dropped to 0 while the object | |
1246 | * is still present in object_tree_root and object_list | |
1247 | * (with updates protected by kmemleak_lock). | |
1248 | */ | |
1249 | object = lookup_object(pointer, 1); | |
1250 | if (!object) | |
1251 | continue; | |
1252 | if (object == scanned) | |
1253 | /* self referenced, ignore */ | |
1254 | continue; | |
1255 | ||
1256 | /* | |
1257 | * Avoid the lockdep recursive warning on object->lock being | |
1258 | * previously acquired in scan_object(). These locks are | |
1259 | * enclosed by scan_mutex. | |
1260 | */ | |
1261 | spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING); | |
1262 | if (!color_white(object)) { | |
1263 | /* non-orphan, ignored or new */ | |
1264 | spin_unlock(&object->lock); | |
1265 | continue; | |
1266 | } | |
1267 | ||
1268 | /* | |
1269 | * Increase the object's reference count (number of pointers | |
1270 | * to the memory block). If this count reaches the required | |
1271 | * minimum, the object's color will become gray and it will be | |
1272 | * added to the gray_list. | |
1273 | */ | |
1274 | object->count++; | |
1275 | if (color_gray(object)) { | |
1276 | /* put_object() called when removing from gray_list */ | |
1277 | WARN_ON(!get_object(object)); | |
1278 | list_add_tail(&object->gray_list, &gray_list); | |
1279 | } | |
1280 | spin_unlock(&object->lock); | |
1281 | } | |
1282 | read_unlock_irqrestore(&kmemleak_lock, flags); | |
1283 | } | |
1284 | ||
1285 | /* | |
1286 | * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency. | |
1287 | */ | |
1288 | static void scan_large_block(void *start, void *end) | |
1289 | { | |
1290 | void *next; | |
1291 | ||
1292 | while (start < end) { | |
1293 | next = min(start + MAX_SCAN_SIZE, end); | |
1294 | scan_block(start, next, NULL); | |
1295 | start = next; | |
1296 | cond_resched(); | |
1297 | } | |
1298 | } | |
1299 | ||
1300 | /* | |
1301 | * Scan a memory block corresponding to a kmemleak_object. A condition is | |
1302 | * that object->use_count >= 1. | |
1303 | */ | |
1304 | static void scan_object(struct kmemleak_object *object) | |
1305 | { | |
1306 | struct kmemleak_scan_area *area; | |
1307 | unsigned long flags; | |
1308 | ||
1309 | /* | |
1310 | * Once the object->lock is acquired, the corresponding memory block | |
1311 | * cannot be freed (the same lock is acquired in delete_object). | |
1312 | */ | |
1313 | spin_lock_irqsave(&object->lock, flags); | |
1314 | if (object->flags & OBJECT_NO_SCAN) | |
1315 | goto out; | |
1316 | if (!(object->flags & OBJECT_ALLOCATED)) | |
1317 | /* already freed object */ | |
1318 | goto out; | |
1319 | if (hlist_empty(&object->area_list)) { | |
1320 | void *start = (void *)object->pointer; | |
1321 | void *end = (void *)(object->pointer + object->size); | |
1322 | void *next; | |
1323 | ||
1324 | do { | |
1325 | next = min(start + MAX_SCAN_SIZE, end); | |
1326 | scan_block(start, next, object); | |
1327 | ||
1328 | start = next; | |
1329 | if (start >= end) | |
1330 | break; | |
1331 | ||
1332 | spin_unlock_irqrestore(&object->lock, flags); | |
1333 | cond_resched(); | |
1334 | spin_lock_irqsave(&object->lock, flags); | |
1335 | } while (object->flags & OBJECT_ALLOCATED); | |
1336 | } else | |
1337 | hlist_for_each_entry(area, &object->area_list, node) | |
1338 | scan_block((void *)area->start, | |
1339 | (void *)(area->start + area->size), | |
1340 | object); | |
1341 | out: | |
1342 | spin_unlock_irqrestore(&object->lock, flags); | |
1343 | } | |
1344 | ||
1345 | /* | |
1346 | * Scan the objects already referenced (gray objects). More objects will be | |
1347 | * referenced and, if there are no memory leaks, all the objects are scanned. | |
1348 | */ | |
1349 | static void scan_gray_list(void) | |
1350 | { | |
1351 | struct kmemleak_object *object, *tmp; | |
1352 | ||
1353 | /* | |
1354 | * The list traversal is safe for both tail additions and removals | |
1355 | * from inside the loop. The kmemleak objects cannot be freed from | |
1356 | * outside the loop because their use_count was incremented. | |
1357 | */ | |
1358 | object = list_entry(gray_list.next, typeof(*object), gray_list); | |
1359 | while (&object->gray_list != &gray_list) { | |
1360 | cond_resched(); | |
1361 | ||
1362 | /* may add new objects to the list */ | |
1363 | if (!scan_should_stop()) | |
1364 | scan_object(object); | |
1365 | ||
1366 | tmp = list_entry(object->gray_list.next, typeof(*object), | |
1367 | gray_list); | |
1368 | ||
1369 | /* remove the object from the list and release it */ | |
1370 | list_del(&object->gray_list); | |
1371 | put_object(object); | |
1372 | ||
1373 | object = tmp; | |
1374 | } | |
1375 | WARN_ON(!list_empty(&gray_list)); | |
1376 | } | |
1377 | ||
1378 | /* | |
1379 | * Scan data sections and all the referenced memory blocks allocated via the | |
1380 | * kernel's standard allocators. This function must be called with the | |
1381 | * scan_mutex held. | |
1382 | */ | |
1383 | static void kmemleak_scan(void) | |
1384 | { | |
1385 | unsigned long flags; | |
1386 | struct kmemleak_object *object; | |
1387 | int i; | |
1388 | int new_leaks = 0; | |
1389 | ||
1390 | jiffies_last_scan = jiffies; | |
1391 | ||
1392 | /* prepare the kmemleak_object's */ | |
1393 | rcu_read_lock(); | |
1394 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1395 | spin_lock_irqsave(&object->lock, flags); | |
1396 | #ifdef DEBUG | |
1397 | /* | |
1398 | * With a few exceptions there should be a maximum of | |
1399 | * 1 reference to any object at this point. | |
1400 | */ | |
1401 | if (atomic_read(&object->use_count) > 1) { | |
1402 | pr_debug("object->use_count = %d\n", | |
1403 | atomic_read(&object->use_count)); | |
1404 | dump_object_info(object); | |
1405 | } | |
1406 | #endif | |
1407 | /* reset the reference count (whiten the object) */ | |
1408 | object->count = 0; | |
1409 | if (color_gray(object) && get_object(object)) | |
1410 | list_add_tail(&object->gray_list, &gray_list); | |
1411 | ||
1412 | spin_unlock_irqrestore(&object->lock, flags); | |
1413 | } | |
1414 | rcu_read_unlock(); | |
1415 | ||
1416 | /* data/bss scanning */ | |
1417 | scan_large_block(_sdata, _edata); | |
1418 | scan_large_block(__bss_start, __bss_stop); | |
1419 | scan_large_block(__start_ro_after_init, __end_ro_after_init); | |
1420 | ||
1421 | #ifdef CONFIG_SMP | |
1422 | /* per-cpu sections scanning */ | |
1423 | for_each_possible_cpu(i) | |
1424 | scan_large_block(__per_cpu_start + per_cpu_offset(i), | |
1425 | __per_cpu_end + per_cpu_offset(i)); | |
1426 | #endif | |
1427 | ||
1428 | /* | |
1429 | * Struct page scanning for each node. | |
1430 | */ | |
1431 | get_online_mems(); | |
1432 | for_each_online_node(i) { | |
1433 | unsigned long start_pfn = node_start_pfn(i); | |
1434 | unsigned long end_pfn = node_end_pfn(i); | |
1435 | unsigned long pfn; | |
1436 | ||
1437 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | |
1438 | struct page *page; | |
1439 | ||
1440 | if (!pfn_valid(pfn)) | |
1441 | continue; | |
1442 | page = pfn_to_page(pfn); | |
1443 | /* only scan if page is in use */ | |
1444 | if (page_count(page) == 0) | |
1445 | continue; | |
1446 | scan_block(page, page + 1, NULL); | |
1447 | } | |
1448 | } | |
1449 | put_online_mems(); | |
1450 | ||
1451 | /* | |
1452 | * Scanning the task stacks (may introduce false negatives). | |
1453 | */ | |
1454 | if (kmemleak_stack_scan) { | |
1455 | struct task_struct *p, *g; | |
1456 | ||
1457 | read_lock(&tasklist_lock); | |
1458 | do_each_thread(g, p) { | |
1459 | void *stack = try_get_task_stack(p); | |
1460 | if (stack) { | |
1461 | scan_block(stack, stack + THREAD_SIZE, NULL); | |
1462 | put_task_stack(p); | |
1463 | } | |
1464 | } while_each_thread(g, p); | |
1465 | read_unlock(&tasklist_lock); | |
1466 | } | |
1467 | ||
1468 | /* | |
1469 | * Scan the objects already referenced from the sections scanned | |
1470 | * above. | |
1471 | */ | |
1472 | scan_gray_list(); | |
1473 | ||
1474 | /* | |
1475 | * Check for new or unreferenced objects modified since the previous | |
1476 | * scan and color them gray until the next scan. | |
1477 | */ | |
1478 | rcu_read_lock(); | |
1479 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1480 | spin_lock_irqsave(&object->lock, flags); | |
1481 | if (color_white(object) && (object->flags & OBJECT_ALLOCATED) | |
1482 | && update_checksum(object) && get_object(object)) { | |
1483 | /* color it gray temporarily */ | |
1484 | object->count = object->min_count; | |
1485 | list_add_tail(&object->gray_list, &gray_list); | |
1486 | } | |
1487 | spin_unlock_irqrestore(&object->lock, flags); | |
1488 | } | |
1489 | rcu_read_unlock(); | |
1490 | ||
1491 | /* | |
1492 | * Re-scan the gray list for modified unreferenced objects. | |
1493 | */ | |
1494 | scan_gray_list(); | |
1495 | ||
1496 | /* | |
1497 | * If scanning was stopped do not report any new unreferenced objects. | |
1498 | */ | |
1499 | if (scan_should_stop()) | |
1500 | return; | |
1501 | ||
1502 | /* | |
1503 | * Scanning result reporting. | |
1504 | */ | |
1505 | rcu_read_lock(); | |
1506 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1507 | spin_lock_irqsave(&object->lock, flags); | |
1508 | if (unreferenced_object(object) && | |
1509 | !(object->flags & OBJECT_REPORTED)) { | |
1510 | object->flags |= OBJECT_REPORTED; | |
1511 | new_leaks++; | |
1512 | } | |
1513 | spin_unlock_irqrestore(&object->lock, flags); | |
1514 | } | |
1515 | rcu_read_unlock(); | |
1516 | ||
1517 | if (new_leaks) { | |
1518 | kmemleak_found_leaks = true; | |
1519 | ||
1520 | pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n", | |
1521 | new_leaks); | |
1522 | } | |
1523 | ||
1524 | } | |
1525 | ||
1526 | /* | |
1527 | * Thread function performing automatic memory scanning. Unreferenced objects | |
1528 | * at the end of a memory scan are reported but only the first time. | |
1529 | */ | |
1530 | static int kmemleak_scan_thread(void *arg) | |
1531 | { | |
1532 | static int first_run = 1; | |
1533 | ||
1534 | pr_info("Automatic memory scanning thread started\n"); | |
1535 | set_user_nice(current, 10); | |
1536 | ||
1537 | /* | |
1538 | * Wait before the first scan to allow the system to fully initialize. | |
1539 | */ | |
1540 | if (first_run) { | |
1541 | signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000); | |
1542 | first_run = 0; | |
1543 | while (timeout && !kthread_should_stop()) | |
1544 | timeout = schedule_timeout_interruptible(timeout); | |
1545 | } | |
1546 | ||
1547 | while (!kthread_should_stop()) { | |
1548 | signed long timeout = jiffies_scan_wait; | |
1549 | ||
1550 | mutex_lock(&scan_mutex); | |
1551 | kmemleak_scan(); | |
1552 | mutex_unlock(&scan_mutex); | |
1553 | ||
1554 | /* wait before the next scan */ | |
1555 | while (timeout && !kthread_should_stop()) | |
1556 | timeout = schedule_timeout_interruptible(timeout); | |
1557 | } | |
1558 | ||
1559 | pr_info("Automatic memory scanning thread ended\n"); | |
1560 | ||
1561 | return 0; | |
1562 | } | |
1563 | ||
1564 | /* | |
1565 | * Start the automatic memory scanning thread. This function must be called | |
1566 | * with the scan_mutex held. | |
1567 | */ | |
1568 | static void start_scan_thread(void) | |
1569 | { | |
1570 | if (scan_thread) | |
1571 | return; | |
1572 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); | |
1573 | if (IS_ERR(scan_thread)) { | |
1574 | pr_warn("Failed to create the scan thread\n"); | |
1575 | scan_thread = NULL; | |
1576 | } | |
1577 | } | |
1578 | ||
1579 | /* | |
1580 | * Stop the automatic memory scanning thread. This function must be called | |
1581 | * with the scan_mutex held. | |
1582 | */ | |
1583 | static void stop_scan_thread(void) | |
1584 | { | |
1585 | if (scan_thread) { | |
1586 | kthread_stop(scan_thread); | |
1587 | scan_thread = NULL; | |
1588 | } | |
1589 | } | |
1590 | ||
1591 | /* | |
1592 | * Iterate over the object_list and return the first valid object at or after | |
1593 | * the required position with its use_count incremented. The function triggers | |
1594 | * a memory scanning when the pos argument points to the first position. | |
1595 | */ | |
1596 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |
1597 | { | |
1598 | struct kmemleak_object *object; | |
1599 | loff_t n = *pos; | |
1600 | int err; | |
1601 | ||
1602 | err = mutex_lock_interruptible(&scan_mutex); | |
1603 | if (err < 0) | |
1604 | return ERR_PTR(err); | |
1605 | ||
1606 | rcu_read_lock(); | |
1607 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1608 | if (n-- > 0) | |
1609 | continue; | |
1610 | if (get_object(object)) | |
1611 | goto out; | |
1612 | } | |
1613 | object = NULL; | |
1614 | out: | |
1615 | return object; | |
1616 | } | |
1617 | ||
1618 | /* | |
1619 | * Return the next object in the object_list. The function decrements the | |
1620 | * use_count of the previous object and increases that of the next one. | |
1621 | */ | |
1622 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1623 | { | |
1624 | struct kmemleak_object *prev_obj = v; | |
1625 | struct kmemleak_object *next_obj = NULL; | |
1626 | struct kmemleak_object *obj = prev_obj; | |
1627 | ||
1628 | ++(*pos); | |
1629 | ||
1630 | list_for_each_entry_continue_rcu(obj, &object_list, object_list) { | |
1631 | if (get_object(obj)) { | |
1632 | next_obj = obj; | |
1633 | break; | |
1634 | } | |
1635 | } | |
1636 | ||
1637 | put_object(prev_obj); | |
1638 | return next_obj; | |
1639 | } | |
1640 | ||
1641 | /* | |
1642 | * Decrement the use_count of the last object required, if any. | |
1643 | */ | |
1644 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) | |
1645 | { | |
1646 | if (!IS_ERR(v)) { | |
1647 | /* | |
1648 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex | |
1649 | * waiting was interrupted, so only release it if !IS_ERR. | |
1650 | */ | |
1651 | rcu_read_unlock(); | |
1652 | mutex_unlock(&scan_mutex); | |
1653 | if (v) | |
1654 | put_object(v); | |
1655 | } | |
1656 | } | |
1657 | ||
1658 | /* | |
1659 | * Print the information for an unreferenced object to the seq file. | |
1660 | */ | |
1661 | static int kmemleak_seq_show(struct seq_file *seq, void *v) | |
1662 | { | |
1663 | struct kmemleak_object *object = v; | |
1664 | unsigned long flags; | |
1665 | ||
1666 | spin_lock_irqsave(&object->lock, flags); | |
1667 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) | |
1668 | print_unreferenced(seq, object); | |
1669 | spin_unlock_irqrestore(&object->lock, flags); | |
1670 | return 0; | |
1671 | } | |
1672 | ||
1673 | static const struct seq_operations kmemleak_seq_ops = { | |
1674 | .start = kmemleak_seq_start, | |
1675 | .next = kmemleak_seq_next, | |
1676 | .stop = kmemleak_seq_stop, | |
1677 | .show = kmemleak_seq_show, | |
1678 | }; | |
1679 | ||
1680 | static int kmemleak_open(struct inode *inode, struct file *file) | |
1681 | { | |
1682 | return seq_open(file, &kmemleak_seq_ops); | |
1683 | } | |
1684 | ||
1685 | static int dump_str_object_info(const char *str) | |
1686 | { | |
1687 | unsigned long flags; | |
1688 | struct kmemleak_object *object; | |
1689 | unsigned long addr; | |
1690 | ||
1691 | if (kstrtoul(str, 0, &addr)) | |
1692 | return -EINVAL; | |
1693 | object = find_and_get_object(addr, 0); | |
1694 | if (!object) { | |
1695 | pr_info("Unknown object at 0x%08lx\n", addr); | |
1696 | return -EINVAL; | |
1697 | } | |
1698 | ||
1699 | spin_lock_irqsave(&object->lock, flags); | |
1700 | dump_object_info(object); | |
1701 | spin_unlock_irqrestore(&object->lock, flags); | |
1702 | ||
1703 | put_object(object); | |
1704 | return 0; | |
1705 | } | |
1706 | ||
1707 | /* | |
1708 | * We use grey instead of black to ensure we can do future scans on the same | |
1709 | * objects. If we did not do future scans these black objects could | |
1710 | * potentially contain references to newly allocated objects in the future and | |
1711 | * we'd end up with false positives. | |
1712 | */ | |
1713 | static void kmemleak_clear(void) | |
1714 | { | |
1715 | struct kmemleak_object *object; | |
1716 | unsigned long flags; | |
1717 | ||
1718 | rcu_read_lock(); | |
1719 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1720 | spin_lock_irqsave(&object->lock, flags); | |
1721 | if ((object->flags & OBJECT_REPORTED) && | |
1722 | unreferenced_object(object)) | |
1723 | __paint_it(object, KMEMLEAK_GREY); | |
1724 | spin_unlock_irqrestore(&object->lock, flags); | |
1725 | } | |
1726 | rcu_read_unlock(); | |
1727 | ||
1728 | kmemleak_found_leaks = false; | |
1729 | } | |
1730 | ||
1731 | static void __kmemleak_do_cleanup(void); | |
1732 | ||
1733 | /* | |
1734 | * File write operation to configure kmemleak at run-time. The following | |
1735 | * commands can be written to the /sys/kernel/debug/kmemleak file: | |
1736 | * off - disable kmemleak (irreversible) | |
1737 | * stack=on - enable the task stacks scanning | |
1738 | * stack=off - disable the tasks stacks scanning | |
1739 | * scan=on - start the automatic memory scanning thread | |
1740 | * scan=off - stop the automatic memory scanning thread | |
1741 | * scan=... - set the automatic memory scanning period in seconds (0 to | |
1742 | * disable it) | |
1743 | * scan - trigger a memory scan | |
1744 | * clear - mark all current reported unreferenced kmemleak objects as | |
1745 | * grey to ignore printing them, or free all kmemleak objects | |
1746 | * if kmemleak has been disabled. | |
1747 | * dump=... - dump information about the object found at the given address | |
1748 | */ | |
1749 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |
1750 | size_t size, loff_t *ppos) | |
1751 | { | |
1752 | char buf[64]; | |
1753 | int buf_size; | |
1754 | int ret; | |
1755 | ||
1756 | buf_size = min(size, (sizeof(buf) - 1)); | |
1757 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | |
1758 | return -EFAULT; | |
1759 | buf[buf_size] = 0; | |
1760 | ||
1761 | ret = mutex_lock_interruptible(&scan_mutex); | |
1762 | if (ret < 0) | |
1763 | return ret; | |
1764 | ||
1765 | if (strncmp(buf, "clear", 5) == 0) { | |
1766 | if (kmemleak_enabled) | |
1767 | kmemleak_clear(); | |
1768 | else | |
1769 | __kmemleak_do_cleanup(); | |
1770 | goto out; | |
1771 | } | |
1772 | ||
1773 | if (!kmemleak_enabled) { | |
1774 | ret = -EBUSY; | |
1775 | goto out; | |
1776 | } | |
1777 | ||
1778 | if (strncmp(buf, "off", 3) == 0) | |
1779 | kmemleak_disable(); | |
1780 | else if (strncmp(buf, "stack=on", 8) == 0) | |
1781 | kmemleak_stack_scan = 1; | |
1782 | else if (strncmp(buf, "stack=off", 9) == 0) | |
1783 | kmemleak_stack_scan = 0; | |
1784 | else if (strncmp(buf, "scan=on", 7) == 0) | |
1785 | start_scan_thread(); | |
1786 | else if (strncmp(buf, "scan=off", 8) == 0) | |
1787 | stop_scan_thread(); | |
1788 | else if (strncmp(buf, "scan=", 5) == 0) { | |
1789 | unsigned long secs; | |
1790 | ||
1791 | ret = kstrtoul(buf + 5, 0, &secs); | |
1792 | if (ret < 0) | |
1793 | goto out; | |
1794 | stop_scan_thread(); | |
1795 | if (secs) { | |
1796 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | |
1797 | start_scan_thread(); | |
1798 | } | |
1799 | } else if (strncmp(buf, "scan", 4) == 0) | |
1800 | kmemleak_scan(); | |
1801 | else if (strncmp(buf, "dump=", 5) == 0) | |
1802 | ret = dump_str_object_info(buf + 5); | |
1803 | else | |
1804 | ret = -EINVAL; | |
1805 | ||
1806 | out: | |
1807 | mutex_unlock(&scan_mutex); | |
1808 | if (ret < 0) | |
1809 | return ret; | |
1810 | ||
1811 | /* ignore the rest of the buffer, only one command at a time */ | |
1812 | *ppos += size; | |
1813 | return size; | |
1814 | } | |
1815 | ||
1816 | static const struct file_operations kmemleak_fops = { | |
1817 | .owner = THIS_MODULE, | |
1818 | .open = kmemleak_open, | |
1819 | .read = seq_read, | |
1820 | .write = kmemleak_write, | |
1821 | .llseek = seq_lseek, | |
1822 | .release = seq_release, | |
1823 | }; | |
1824 | ||
1825 | static void __kmemleak_do_cleanup(void) | |
1826 | { | |
1827 | struct kmemleak_object *object; | |
1828 | ||
1829 | rcu_read_lock(); | |
1830 | list_for_each_entry_rcu(object, &object_list, object_list) | |
1831 | delete_object_full(object->pointer); | |
1832 | rcu_read_unlock(); | |
1833 | } | |
1834 | ||
1835 | /* | |
1836 | * Stop the memory scanning thread and free the kmemleak internal objects if | |
1837 | * no previous scan thread (otherwise, kmemleak may still have some useful | |
1838 | * information on memory leaks). | |
1839 | */ | |
1840 | static void kmemleak_do_cleanup(struct work_struct *work) | |
1841 | { | |
1842 | stop_scan_thread(); | |
1843 | ||
1844 | /* | |
1845 | * Once the scan thread has stopped, it is safe to no longer track | |
1846 | * object freeing. Ordering of the scan thread stopping and the memory | |
1847 | * accesses below is guaranteed by the kthread_stop() function. | |
1848 | */ | |
1849 | kmemleak_free_enabled = 0; | |
1850 | ||
1851 | if (!kmemleak_found_leaks) | |
1852 | __kmemleak_do_cleanup(); | |
1853 | else | |
1854 | pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n"); | |
1855 | } | |
1856 | ||
1857 | static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup); | |
1858 | ||
1859 | /* | |
1860 | * Disable kmemleak. No memory allocation/freeing will be traced once this | |
1861 | * function is called. Disabling kmemleak is an irreversible operation. | |
1862 | */ | |
1863 | static void kmemleak_disable(void) | |
1864 | { | |
1865 | /* atomically check whether it was already invoked */ | |
1866 | if (cmpxchg(&kmemleak_error, 0, 1)) | |
1867 | return; | |
1868 | ||
1869 | /* stop any memory operation tracing */ | |
1870 | kmemleak_enabled = 0; | |
1871 | ||
1872 | /* check whether it is too early for a kernel thread */ | |
1873 | if (kmemleak_initialized) | |
1874 | schedule_work(&cleanup_work); | |
1875 | else | |
1876 | kmemleak_free_enabled = 0; | |
1877 | ||
1878 | pr_info("Kernel memory leak detector disabled\n"); | |
1879 | } | |
1880 | ||
1881 | /* | |
1882 | * Allow boot-time kmemleak disabling (enabled by default). | |
1883 | */ | |
1884 | static int kmemleak_boot_config(char *str) | |
1885 | { | |
1886 | if (!str) | |
1887 | return -EINVAL; | |
1888 | if (strcmp(str, "off") == 0) | |
1889 | kmemleak_disable(); | |
1890 | else if (strcmp(str, "on") == 0) | |
1891 | kmemleak_skip_disable = 1; | |
1892 | else | |
1893 | return -EINVAL; | |
1894 | return 0; | |
1895 | } | |
1896 | early_param("kmemleak", kmemleak_boot_config); | |
1897 | ||
1898 | static void __init print_log_trace(struct early_log *log) | |
1899 | { | |
1900 | struct stack_trace trace; | |
1901 | ||
1902 | trace.nr_entries = log->trace_len; | |
1903 | trace.entries = log->trace; | |
1904 | ||
1905 | pr_notice("Early log backtrace:\n"); | |
1906 | print_stack_trace(&trace, 2); | |
1907 | } | |
1908 | ||
1909 | /* | |
1910 | * Kmemleak initialization. | |
1911 | */ | |
1912 | void __init kmemleak_init(void) | |
1913 | { | |
1914 | int i; | |
1915 | unsigned long flags; | |
1916 | ||
1917 | #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF | |
1918 | if (!kmemleak_skip_disable) { | |
1919 | kmemleak_early_log = 0; | |
1920 | kmemleak_disable(); | |
1921 | return; | |
1922 | } | |
1923 | #endif | |
1924 | ||
1925 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); | |
1926 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); | |
1927 | ||
1928 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); | |
1929 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); | |
1930 | ||
1931 | if (crt_early_log > ARRAY_SIZE(early_log)) | |
1932 | pr_warn("Early log buffer exceeded (%d), please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", | |
1933 | crt_early_log); | |
1934 | ||
1935 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | |
1936 | local_irq_save(flags); | |
1937 | kmemleak_early_log = 0; | |
1938 | if (kmemleak_error) { | |
1939 | local_irq_restore(flags); | |
1940 | return; | |
1941 | } else { | |
1942 | kmemleak_enabled = 1; | |
1943 | kmemleak_free_enabled = 1; | |
1944 | } | |
1945 | local_irq_restore(flags); | |
1946 | ||
1947 | /* | |
1948 | * This is the point where tracking allocations is safe. Automatic | |
1949 | * scanning is started during the late initcall. Add the early logged | |
1950 | * callbacks to the kmemleak infrastructure. | |
1951 | */ | |
1952 | for (i = 0; i < crt_early_log; i++) { | |
1953 | struct early_log *log = &early_log[i]; | |
1954 | ||
1955 | switch (log->op_type) { | |
1956 | case KMEMLEAK_ALLOC: | |
1957 | early_alloc(log); | |
1958 | break; | |
1959 | case KMEMLEAK_ALLOC_PERCPU: | |
1960 | early_alloc_percpu(log); | |
1961 | break; | |
1962 | case KMEMLEAK_FREE: | |
1963 | kmemleak_free(log->ptr); | |
1964 | break; | |
1965 | case KMEMLEAK_FREE_PART: | |
1966 | kmemleak_free_part(log->ptr, log->size); | |
1967 | break; | |
1968 | case KMEMLEAK_FREE_PERCPU: | |
1969 | kmemleak_free_percpu(log->ptr); | |
1970 | break; | |
1971 | case KMEMLEAK_NOT_LEAK: | |
1972 | kmemleak_not_leak(log->ptr); | |
1973 | break; | |
1974 | case KMEMLEAK_IGNORE: | |
1975 | kmemleak_ignore(log->ptr); | |
1976 | break; | |
1977 | case KMEMLEAK_SCAN_AREA: | |
1978 | kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL); | |
1979 | break; | |
1980 | case KMEMLEAK_NO_SCAN: | |
1981 | kmemleak_no_scan(log->ptr); | |
1982 | break; | |
1983 | default: | |
1984 | kmemleak_warn("Unknown early log operation: %d\n", | |
1985 | log->op_type); | |
1986 | } | |
1987 | ||
1988 | if (kmemleak_warning) { | |
1989 | print_log_trace(log); | |
1990 | kmemleak_warning = 0; | |
1991 | } | |
1992 | } | |
1993 | } | |
1994 | ||
1995 | /* | |
1996 | * Late initialization function. | |
1997 | */ | |
1998 | static int __init kmemleak_late_init(void) | |
1999 | { | |
2000 | struct dentry *dentry; | |
2001 | ||
2002 | kmemleak_initialized = 1; | |
2003 | ||
2004 | if (kmemleak_error) { | |
2005 | /* | |
2006 | * Some error occurred and kmemleak was disabled. There is a | |
2007 | * small chance that kmemleak_disable() was called immediately | |
2008 | * after setting kmemleak_initialized and we may end up with | |
2009 | * two clean-up threads but serialized by scan_mutex. | |
2010 | */ | |
2011 | schedule_work(&cleanup_work); | |
2012 | return -ENOMEM; | |
2013 | } | |
2014 | ||
2015 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, | |
2016 | &kmemleak_fops); | |
2017 | if (!dentry) | |
2018 | pr_warn("Failed to create the debugfs kmemleak file\n"); | |
2019 | mutex_lock(&scan_mutex); | |
2020 | start_scan_thread(); | |
2021 | mutex_unlock(&scan_mutex); | |
2022 | ||
2023 | pr_info("Kernel memory leak detector initialized\n"); | |
2024 | ||
2025 | return 0; | |
2026 | } | |
2027 | late_initcall(kmemleak_late_init); |