]>
Commit | Line | Data |
---|---|---|
3c7b4e6b CM |
1 | /* |
2 | * mm/kmemleak.c | |
3 | * | |
4 | * Copyright (C) 2008 ARM Limited | |
5 | * Written by Catalin Marinas <catalin.marinas@arm.com> | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program; if not, write to the Free Software | |
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
19 | * | |
20 | * | |
21 | * For more information on the algorithm and kmemleak usage, please see | |
22 | * Documentation/kmemleak.txt. | |
23 | * | |
24 | * Notes on locking | |
25 | * ---------------- | |
26 | * | |
27 | * The following locks and mutexes are used by kmemleak: | |
28 | * | |
29 | * - kmemleak_lock (rwlock): protects the object_list modifications and | |
30 | * accesses to the object_tree_root. The object_list is the main list | |
31 | * holding the metadata (struct kmemleak_object) for the allocated memory | |
32 | * blocks. The object_tree_root is a priority search tree used to look-up | |
33 | * metadata based on a pointer to the corresponding memory block. The | |
34 | * kmemleak_object structures are added to the object_list and | |
35 | * object_tree_root in the create_object() function called from the | |
36 | * kmemleak_alloc() callback and removed in delete_object() called from the | |
37 | * kmemleak_free() callback | |
38 | * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to | |
39 | * the metadata (e.g. count) are protected by this lock. Note that some | |
40 | * members of this structure may be protected by other means (atomic or | |
41 | * kmemleak_lock). This lock is also held when scanning the corresponding | |
42 | * memory block to avoid the kernel freeing it via the kmemleak_free() | |
43 | * callback. This is less heavyweight than holding a global lock like | |
44 | * kmemleak_lock during scanning | |
45 | * - scan_mutex (mutex): ensures that only one thread may scan the memory for | |
46 | * unreferenced objects at a time. The gray_list contains the objects which | |
47 | * are already referenced or marked as false positives and need to be | |
48 | * scanned. This list is only modified during a scanning episode when the | |
49 | * scan_mutex is held. At the end of a scan, the gray_list is always empty. | |
50 | * Note that the kmemleak_object.use_count is incremented when an object is | |
4698c1f2 CM |
51 | * added to the gray_list and therefore cannot be freed. This mutex also |
52 | * prevents multiple users of the "kmemleak" debugfs file together with | |
53 | * modifications to the memory scanning parameters including the scan_thread | |
54 | * pointer | |
3c7b4e6b CM |
55 | * |
56 | * The kmemleak_object structures have a use_count incremented or decremented | |
57 | * using the get_object()/put_object() functions. When the use_count becomes | |
58 | * 0, this count can no longer be incremented and put_object() schedules the | |
59 | * kmemleak_object freeing via an RCU callback. All calls to the get_object() | |
60 | * function must be protected by rcu_read_lock() to avoid accessing a freed | |
61 | * structure. | |
62 | */ | |
63 | ||
ae281064 JP |
64 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
65 | ||
3c7b4e6b CM |
66 | #include <linux/init.h> |
67 | #include <linux/kernel.h> | |
68 | #include <linux/list.h> | |
69 | #include <linux/sched.h> | |
70 | #include <linux/jiffies.h> | |
71 | #include <linux/delay.h> | |
72 | #include <linux/module.h> | |
73 | #include <linux/kthread.h> | |
74 | #include <linux/prio_tree.h> | |
75 | #include <linux/gfp.h> | |
76 | #include <linux/fs.h> | |
77 | #include <linux/debugfs.h> | |
78 | #include <linux/seq_file.h> | |
79 | #include <linux/cpumask.h> | |
80 | #include <linux/spinlock.h> | |
81 | #include <linux/mutex.h> | |
82 | #include <linux/rcupdate.h> | |
83 | #include <linux/stacktrace.h> | |
84 | #include <linux/cache.h> | |
85 | #include <linux/percpu.h> | |
86 | #include <linux/hardirq.h> | |
87 | #include <linux/mmzone.h> | |
88 | #include <linux/slab.h> | |
89 | #include <linux/thread_info.h> | |
90 | #include <linux/err.h> | |
91 | #include <linux/uaccess.h> | |
92 | #include <linux/string.h> | |
93 | #include <linux/nodemask.h> | |
94 | #include <linux/mm.h> | |
95 | ||
96 | #include <asm/sections.h> | |
97 | #include <asm/processor.h> | |
98 | #include <asm/atomic.h> | |
99 | ||
100 | #include <linux/kmemleak.h> | |
101 | ||
102 | /* | |
103 | * Kmemleak configuration and common defines. | |
104 | */ | |
105 | #define MAX_TRACE 16 /* stack trace length */ | |
3c7b4e6b | 106 | #define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ |
3c7b4e6b CM |
107 | #define SECS_FIRST_SCAN 60 /* delay before the first scan */ |
108 | #define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ | |
2587362e | 109 | #define GRAY_LIST_PASSES 25 /* maximum number of gray list scans */ |
af98603d | 110 | #define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */ |
3c7b4e6b CM |
111 | |
112 | #define BYTES_PER_POINTER sizeof(void *) | |
113 | ||
216c04b0 CM |
114 | /* GFP bitmask for kmemleak internal allocations */ |
115 | #define GFP_KMEMLEAK_MASK (GFP_KERNEL | GFP_ATOMIC) | |
116 | ||
3c7b4e6b CM |
117 | /* scanning area inside a memory block */ |
118 | struct kmemleak_scan_area { | |
119 | struct hlist_node node; | |
120 | unsigned long offset; | |
121 | size_t length; | |
122 | }; | |
123 | ||
124 | /* | |
125 | * Structure holding the metadata for each allocated memory block. | |
126 | * Modifications to such objects should be made while holding the | |
127 | * object->lock. Insertions or deletions from object_list, gray_list or | |
128 | * tree_node are already protected by the corresponding locks or mutex (see | |
129 | * the notes on locking above). These objects are reference-counted | |
130 | * (use_count) and freed using the RCU mechanism. | |
131 | */ | |
132 | struct kmemleak_object { | |
133 | spinlock_t lock; | |
134 | unsigned long flags; /* object status flags */ | |
135 | struct list_head object_list; | |
136 | struct list_head gray_list; | |
137 | struct prio_tree_node tree_node; | |
138 | struct rcu_head rcu; /* object_list lockless traversal */ | |
139 | /* object usage count; object freed when use_count == 0 */ | |
140 | atomic_t use_count; | |
141 | unsigned long pointer; | |
142 | size_t size; | |
143 | /* minimum number of a pointers found before it is considered leak */ | |
144 | int min_count; | |
145 | /* the total number of pointers found pointing to this object */ | |
146 | int count; | |
147 | /* memory ranges to be scanned inside an object (empty for all) */ | |
148 | struct hlist_head area_list; | |
149 | unsigned long trace[MAX_TRACE]; | |
150 | unsigned int trace_len; | |
151 | unsigned long jiffies; /* creation timestamp */ | |
152 | pid_t pid; /* pid of the current task */ | |
153 | char comm[TASK_COMM_LEN]; /* executable name */ | |
154 | }; | |
155 | ||
156 | /* flag representing the memory block allocation status */ | |
157 | #define OBJECT_ALLOCATED (1 << 0) | |
158 | /* flag set after the first reporting of an unreference object */ | |
159 | #define OBJECT_REPORTED (1 << 1) | |
160 | /* flag set to not scan the object */ | |
161 | #define OBJECT_NO_SCAN (1 << 2) | |
2587362e CM |
162 | /* flag set on newly allocated objects */ |
163 | #define OBJECT_NEW (1 << 3) | |
3c7b4e6b CM |
164 | |
165 | /* the list of all allocated objects */ | |
166 | static LIST_HEAD(object_list); | |
167 | /* the list of gray-colored objects (see color_gray comment below) */ | |
168 | static LIST_HEAD(gray_list); | |
169 | /* prio search tree for object boundaries */ | |
170 | static struct prio_tree_root object_tree_root; | |
171 | /* rw_lock protecting the access to object_list and prio_tree_root */ | |
172 | static DEFINE_RWLOCK(kmemleak_lock); | |
173 | ||
174 | /* allocation caches for kmemleak internal data */ | |
175 | static struct kmem_cache *object_cache; | |
176 | static struct kmem_cache *scan_area_cache; | |
177 | ||
178 | /* set if tracing memory operations is enabled */ | |
179 | static atomic_t kmemleak_enabled = ATOMIC_INIT(0); | |
180 | /* set in the late_initcall if there were no errors */ | |
181 | static atomic_t kmemleak_initialized = ATOMIC_INIT(0); | |
182 | /* enables or disables early logging of the memory operations */ | |
183 | static atomic_t kmemleak_early_log = ATOMIC_INIT(1); | |
184 | /* set if a fata kmemleak error has occurred */ | |
185 | static atomic_t kmemleak_error = ATOMIC_INIT(0); | |
186 | ||
187 | /* minimum and maximum address that may be valid pointers */ | |
188 | static unsigned long min_addr = ULONG_MAX; | |
189 | static unsigned long max_addr; | |
190 | ||
3c7b4e6b | 191 | static struct task_struct *scan_thread; |
acf4968e | 192 | /* used to avoid reporting of recently allocated objects */ |
3c7b4e6b | 193 | static unsigned long jiffies_min_age; |
acf4968e | 194 | static unsigned long jiffies_last_scan; |
3c7b4e6b CM |
195 | /* delay between automatic memory scannings */ |
196 | static signed long jiffies_scan_wait; | |
197 | /* enables or disables the task stacks scanning */ | |
e0a2a160 | 198 | static int kmemleak_stack_scan = 1; |
4698c1f2 | 199 | /* protects the memory scanning, parameters and debug/kmemleak file access */ |
3c7b4e6b | 200 | static DEFINE_MUTEX(scan_mutex); |
3c7b4e6b | 201 | |
3c7b4e6b | 202 | /* |
2030117d | 203 | * Early object allocation/freeing logging. Kmemleak is initialized after the |
3c7b4e6b | 204 | * kernel allocator. However, both the kernel allocator and kmemleak may |
2030117d | 205 | * allocate memory blocks which need to be tracked. Kmemleak defines an |
3c7b4e6b CM |
206 | * arbitrary buffer to hold the allocation/freeing information before it is |
207 | * fully initialized. | |
208 | */ | |
209 | ||
210 | /* kmemleak operation type for early logging */ | |
211 | enum { | |
212 | KMEMLEAK_ALLOC, | |
213 | KMEMLEAK_FREE, | |
53238a60 | 214 | KMEMLEAK_FREE_PART, |
3c7b4e6b CM |
215 | KMEMLEAK_NOT_LEAK, |
216 | KMEMLEAK_IGNORE, | |
217 | KMEMLEAK_SCAN_AREA, | |
218 | KMEMLEAK_NO_SCAN | |
219 | }; | |
220 | ||
221 | /* | |
222 | * Structure holding the information passed to kmemleak callbacks during the | |
223 | * early logging. | |
224 | */ | |
225 | struct early_log { | |
226 | int op_type; /* kmemleak operation type */ | |
227 | const void *ptr; /* allocated/freed memory block */ | |
228 | size_t size; /* memory block size */ | |
229 | int min_count; /* minimum reference count */ | |
230 | unsigned long offset; /* scan area offset */ | |
231 | size_t length; /* scan area length */ | |
232 | }; | |
233 | ||
234 | /* early logging buffer and current position */ | |
a9d9058a | 235 | static struct early_log early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE]; |
3c7b4e6b CM |
236 | static int crt_early_log; |
237 | ||
238 | static void kmemleak_disable(void); | |
239 | ||
240 | /* | |
241 | * Print a warning and dump the stack trace. | |
242 | */ | |
243 | #define kmemleak_warn(x...) do { \ | |
244 | pr_warning(x); \ | |
245 | dump_stack(); \ | |
246 | } while (0) | |
247 | ||
248 | /* | |
249 | * Macro invoked when a serious kmemleak condition occured and cannot be | |
2030117d | 250 | * recovered from. Kmemleak will be disabled and further allocation/freeing |
3c7b4e6b CM |
251 | * tracing no longer available. |
252 | */ | |
000814f4 | 253 | #define kmemleak_stop(x...) do { \ |
3c7b4e6b CM |
254 | kmemleak_warn(x); \ |
255 | kmemleak_disable(); \ | |
256 | } while (0) | |
257 | ||
258 | /* | |
259 | * Object colors, encoded with count and min_count: | |
260 | * - white - orphan object, not enough references to it (count < min_count) | |
261 | * - gray - not orphan, not marked as false positive (min_count == 0) or | |
262 | * sufficient references to it (count >= min_count) | |
263 | * - black - ignore, it doesn't contain references (e.g. text section) | |
264 | * (min_count == -1). No function defined for this color. | |
265 | * Newly created objects don't have any color assigned (object->count == -1) | |
266 | * before the next memory scan when they become white. | |
267 | */ | |
268 | static int color_white(const struct kmemleak_object *object) | |
269 | { | |
270 | return object->count != -1 && object->count < object->min_count; | |
271 | } | |
272 | ||
273 | static int color_gray(const struct kmemleak_object *object) | |
274 | { | |
275 | return object->min_count != -1 && object->count >= object->min_count; | |
276 | } | |
277 | ||
2587362e CM |
278 | static int color_black(const struct kmemleak_object *object) |
279 | { | |
280 | return object->min_count == -1; | |
281 | } | |
282 | ||
3c7b4e6b CM |
283 | /* |
284 | * Objects are considered unreferenced only if their color is white, they have | |
285 | * not be deleted and have a minimum age to avoid false positives caused by | |
286 | * pointers temporarily stored in CPU registers. | |
287 | */ | |
288 | static int unreferenced_object(struct kmemleak_object *object) | |
289 | { | |
290 | return (object->flags & OBJECT_ALLOCATED) && color_white(object) && | |
acf4968e CM |
291 | time_before_eq(object->jiffies + jiffies_min_age, |
292 | jiffies_last_scan); | |
3c7b4e6b CM |
293 | } |
294 | ||
295 | /* | |
bab4a34a CM |
296 | * Printing of the unreferenced objects information to the seq file. The |
297 | * print_unreferenced function must be called with the object->lock held. | |
3c7b4e6b | 298 | */ |
3c7b4e6b CM |
299 | static void print_unreferenced(struct seq_file *seq, |
300 | struct kmemleak_object *object) | |
301 | { | |
302 | int i; | |
303 | ||
bab4a34a CM |
304 | seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n", |
305 | object->pointer, object->size); | |
306 | seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n", | |
307 | object->comm, object->pid, object->jiffies); | |
308 | seq_printf(seq, " backtrace:\n"); | |
3c7b4e6b CM |
309 | |
310 | for (i = 0; i < object->trace_len; i++) { | |
311 | void *ptr = (void *)object->trace[i]; | |
bab4a34a | 312 | seq_printf(seq, " [<%p>] %pS\n", ptr, ptr); |
3c7b4e6b CM |
313 | } |
314 | } | |
315 | ||
316 | /* | |
317 | * Print the kmemleak_object information. This function is used mainly for | |
318 | * debugging special cases when kmemleak operations. It must be called with | |
319 | * the object->lock held. | |
320 | */ | |
321 | static void dump_object_info(struct kmemleak_object *object) | |
322 | { | |
323 | struct stack_trace trace; | |
324 | ||
325 | trace.nr_entries = object->trace_len; | |
326 | trace.entries = object->trace; | |
327 | ||
ae281064 | 328 | pr_notice("Object 0x%08lx (size %zu):\n", |
3c7b4e6b CM |
329 | object->tree_node.start, object->size); |
330 | pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", | |
331 | object->comm, object->pid, object->jiffies); | |
332 | pr_notice(" min_count = %d\n", object->min_count); | |
333 | pr_notice(" count = %d\n", object->count); | |
189d84ed | 334 | pr_notice(" flags = 0x%lx\n", object->flags); |
3c7b4e6b CM |
335 | pr_notice(" backtrace:\n"); |
336 | print_stack_trace(&trace, 4); | |
337 | } | |
338 | ||
339 | /* | |
340 | * Look-up a memory block metadata (kmemleak_object) in the priority search | |
341 | * tree based on a pointer value. If alias is 0, only values pointing to the | |
342 | * beginning of the memory block are allowed. The kmemleak_lock must be held | |
343 | * when calling this function. | |
344 | */ | |
345 | static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) | |
346 | { | |
347 | struct prio_tree_node *node; | |
348 | struct prio_tree_iter iter; | |
349 | struct kmemleak_object *object; | |
350 | ||
351 | prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); | |
352 | node = prio_tree_next(&iter); | |
353 | if (node) { | |
354 | object = prio_tree_entry(node, struct kmemleak_object, | |
355 | tree_node); | |
356 | if (!alias && object->pointer != ptr) { | |
ae281064 | 357 | kmemleak_warn("Found object by alias"); |
3c7b4e6b CM |
358 | object = NULL; |
359 | } | |
360 | } else | |
361 | object = NULL; | |
362 | ||
363 | return object; | |
364 | } | |
365 | ||
366 | /* | |
367 | * Increment the object use_count. Return 1 if successful or 0 otherwise. Note | |
368 | * that once an object's use_count reached 0, the RCU freeing was already | |
369 | * registered and the object should no longer be used. This function must be | |
370 | * called under the protection of rcu_read_lock(). | |
371 | */ | |
372 | static int get_object(struct kmemleak_object *object) | |
373 | { | |
374 | return atomic_inc_not_zero(&object->use_count); | |
375 | } | |
376 | ||
377 | /* | |
378 | * RCU callback to free a kmemleak_object. | |
379 | */ | |
380 | static void free_object_rcu(struct rcu_head *rcu) | |
381 | { | |
382 | struct hlist_node *elem, *tmp; | |
383 | struct kmemleak_scan_area *area; | |
384 | struct kmemleak_object *object = | |
385 | container_of(rcu, struct kmemleak_object, rcu); | |
386 | ||
387 | /* | |
388 | * Once use_count is 0 (guaranteed by put_object), there is no other | |
389 | * code accessing this object, hence no need for locking. | |
390 | */ | |
391 | hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { | |
392 | hlist_del(elem); | |
393 | kmem_cache_free(scan_area_cache, area); | |
394 | } | |
395 | kmem_cache_free(object_cache, object); | |
396 | } | |
397 | ||
398 | /* | |
399 | * Decrement the object use_count. Once the count is 0, free the object using | |
400 | * an RCU callback. Since put_object() may be called via the kmemleak_free() -> | |
401 | * delete_object() path, the delayed RCU freeing ensures that there is no | |
402 | * recursive call to the kernel allocator. Lock-less RCU object_list traversal | |
403 | * is also possible. | |
404 | */ | |
405 | static void put_object(struct kmemleak_object *object) | |
406 | { | |
407 | if (!atomic_dec_and_test(&object->use_count)) | |
408 | return; | |
409 | ||
410 | /* should only get here after delete_object was called */ | |
411 | WARN_ON(object->flags & OBJECT_ALLOCATED); | |
412 | ||
413 | call_rcu(&object->rcu, free_object_rcu); | |
414 | } | |
415 | ||
416 | /* | |
417 | * Look up an object in the prio search tree and increase its use_count. | |
418 | */ | |
419 | static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) | |
420 | { | |
421 | unsigned long flags; | |
422 | struct kmemleak_object *object = NULL; | |
423 | ||
424 | rcu_read_lock(); | |
425 | read_lock_irqsave(&kmemleak_lock, flags); | |
426 | if (ptr >= min_addr && ptr < max_addr) | |
427 | object = lookup_object(ptr, alias); | |
428 | read_unlock_irqrestore(&kmemleak_lock, flags); | |
429 | ||
430 | /* check whether the object is still available */ | |
431 | if (object && !get_object(object)) | |
432 | object = NULL; | |
433 | rcu_read_unlock(); | |
434 | ||
435 | return object; | |
436 | } | |
437 | ||
438 | /* | |
439 | * Create the metadata (struct kmemleak_object) corresponding to an allocated | |
440 | * memory block and add it to the object_list and object_tree_root. | |
441 | */ | |
442 | static void create_object(unsigned long ptr, size_t size, int min_count, | |
443 | gfp_t gfp) | |
444 | { | |
445 | unsigned long flags; | |
446 | struct kmemleak_object *object; | |
447 | struct prio_tree_node *node; | |
448 | struct stack_trace trace; | |
449 | ||
216c04b0 | 450 | object = kmem_cache_alloc(object_cache, gfp & GFP_KMEMLEAK_MASK); |
3c7b4e6b | 451 | if (!object) { |
ae281064 | 452 | kmemleak_stop("Cannot allocate a kmemleak_object structure\n"); |
3c7b4e6b CM |
453 | return; |
454 | } | |
455 | ||
456 | INIT_LIST_HEAD(&object->object_list); | |
457 | INIT_LIST_HEAD(&object->gray_list); | |
458 | INIT_HLIST_HEAD(&object->area_list); | |
459 | spin_lock_init(&object->lock); | |
460 | atomic_set(&object->use_count, 1); | |
2587362e | 461 | object->flags = OBJECT_ALLOCATED | OBJECT_NEW; |
3c7b4e6b CM |
462 | object->pointer = ptr; |
463 | object->size = size; | |
464 | object->min_count = min_count; | |
465 | object->count = -1; /* no color initially */ | |
466 | object->jiffies = jiffies; | |
467 | ||
468 | /* task information */ | |
469 | if (in_irq()) { | |
470 | object->pid = 0; | |
471 | strncpy(object->comm, "hardirq", sizeof(object->comm)); | |
472 | } else if (in_softirq()) { | |
473 | object->pid = 0; | |
474 | strncpy(object->comm, "softirq", sizeof(object->comm)); | |
475 | } else { | |
476 | object->pid = current->pid; | |
477 | /* | |
478 | * There is a small chance of a race with set_task_comm(), | |
479 | * however using get_task_comm() here may cause locking | |
480 | * dependency issues with current->alloc_lock. In the worst | |
481 | * case, the command line is not correct. | |
482 | */ | |
483 | strncpy(object->comm, current->comm, sizeof(object->comm)); | |
484 | } | |
485 | ||
486 | /* kernel backtrace */ | |
487 | trace.max_entries = MAX_TRACE; | |
488 | trace.nr_entries = 0; | |
489 | trace.entries = object->trace; | |
490 | trace.skip = 1; | |
491 | save_stack_trace(&trace); | |
492 | object->trace_len = trace.nr_entries; | |
493 | ||
494 | INIT_PRIO_TREE_NODE(&object->tree_node); | |
495 | object->tree_node.start = ptr; | |
496 | object->tree_node.last = ptr + size - 1; | |
497 | ||
498 | write_lock_irqsave(&kmemleak_lock, flags); | |
499 | min_addr = min(min_addr, ptr); | |
500 | max_addr = max(max_addr, ptr + size); | |
501 | node = prio_tree_insert(&object_tree_root, &object->tree_node); | |
502 | /* | |
503 | * The code calling the kernel does not yet have the pointer to the | |
504 | * memory block to be able to free it. However, we still hold the | |
505 | * kmemleak_lock here in case parts of the kernel started freeing | |
506 | * random memory blocks. | |
507 | */ | |
508 | if (node != &object->tree_node) { | |
509 | unsigned long flags; | |
510 | ||
ae281064 JP |
511 | kmemleak_stop("Cannot insert 0x%lx into the object search tree " |
512 | "(already existing)\n", ptr); | |
3c7b4e6b CM |
513 | object = lookup_object(ptr, 1); |
514 | spin_lock_irqsave(&object->lock, flags); | |
515 | dump_object_info(object); | |
516 | spin_unlock_irqrestore(&object->lock, flags); | |
517 | ||
518 | goto out; | |
519 | } | |
520 | list_add_tail_rcu(&object->object_list, &object_list); | |
521 | out: | |
522 | write_unlock_irqrestore(&kmemleak_lock, flags); | |
523 | } | |
524 | ||
525 | /* | |
526 | * Remove the metadata (struct kmemleak_object) for a memory block from the | |
527 | * object_list and object_tree_root and decrement its use_count. | |
528 | */ | |
53238a60 | 529 | static void __delete_object(struct kmemleak_object *object) |
3c7b4e6b CM |
530 | { |
531 | unsigned long flags; | |
3c7b4e6b CM |
532 | |
533 | write_lock_irqsave(&kmemleak_lock, flags); | |
3c7b4e6b CM |
534 | prio_tree_remove(&object_tree_root, &object->tree_node); |
535 | list_del_rcu(&object->object_list); | |
536 | write_unlock_irqrestore(&kmemleak_lock, flags); | |
537 | ||
538 | WARN_ON(!(object->flags & OBJECT_ALLOCATED)); | |
53238a60 | 539 | WARN_ON(atomic_read(&object->use_count) < 2); |
3c7b4e6b CM |
540 | |
541 | /* | |
542 | * Locking here also ensures that the corresponding memory block | |
543 | * cannot be freed when it is being scanned. | |
544 | */ | |
545 | spin_lock_irqsave(&object->lock, flags); | |
3c7b4e6b CM |
546 | object->flags &= ~OBJECT_ALLOCATED; |
547 | spin_unlock_irqrestore(&object->lock, flags); | |
548 | put_object(object); | |
549 | } | |
550 | ||
53238a60 CM |
551 | /* |
552 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
553 | * delete it. | |
554 | */ | |
555 | static void delete_object_full(unsigned long ptr) | |
556 | { | |
557 | struct kmemleak_object *object; | |
558 | ||
559 | object = find_and_get_object(ptr, 0); | |
560 | if (!object) { | |
561 | #ifdef DEBUG | |
562 | kmemleak_warn("Freeing unknown object at 0x%08lx\n", | |
563 | ptr); | |
564 | #endif | |
565 | return; | |
566 | } | |
567 | __delete_object(object); | |
568 | put_object(object); | |
569 | } | |
570 | ||
571 | /* | |
572 | * Look up the metadata (struct kmemleak_object) corresponding to ptr and | |
573 | * delete it. If the memory block is partially freed, the function may create | |
574 | * additional metadata for the remaining parts of the block. | |
575 | */ | |
576 | static void delete_object_part(unsigned long ptr, size_t size) | |
577 | { | |
578 | struct kmemleak_object *object; | |
579 | unsigned long start, end; | |
580 | ||
581 | object = find_and_get_object(ptr, 1); | |
582 | if (!object) { | |
583 | #ifdef DEBUG | |
584 | kmemleak_warn("Partially freeing unknown object at 0x%08lx " | |
585 | "(size %zu)\n", ptr, size); | |
586 | #endif | |
587 | return; | |
588 | } | |
589 | __delete_object(object); | |
590 | ||
591 | /* | |
592 | * Create one or two objects that may result from the memory block | |
593 | * split. Note that partial freeing is only done by free_bootmem() and | |
594 | * this happens before kmemleak_init() is called. The path below is | |
595 | * only executed during early log recording in kmemleak_init(), so | |
596 | * GFP_KERNEL is enough. | |
597 | */ | |
598 | start = object->pointer; | |
599 | end = object->pointer + object->size; | |
600 | if (ptr > start) | |
601 | create_object(start, ptr - start, object->min_count, | |
602 | GFP_KERNEL); | |
603 | if (ptr + size < end) | |
604 | create_object(ptr + size, end - ptr - size, object->min_count, | |
605 | GFP_KERNEL); | |
606 | ||
607 | put_object(object); | |
608 | } | |
3c7b4e6b CM |
609 | /* |
610 | * Make a object permanently as gray-colored so that it can no longer be | |
611 | * reported as a leak. This is used in general to mark a false positive. | |
612 | */ | |
613 | static void make_gray_object(unsigned long ptr) | |
614 | { | |
615 | unsigned long flags; | |
616 | struct kmemleak_object *object; | |
617 | ||
618 | object = find_and_get_object(ptr, 0); | |
619 | if (!object) { | |
ae281064 | 620 | kmemleak_warn("Graying unknown object at 0x%08lx\n", ptr); |
3c7b4e6b CM |
621 | return; |
622 | } | |
623 | ||
624 | spin_lock_irqsave(&object->lock, flags); | |
625 | object->min_count = 0; | |
626 | spin_unlock_irqrestore(&object->lock, flags); | |
627 | put_object(object); | |
628 | } | |
629 | ||
630 | /* | |
631 | * Mark the object as black-colored so that it is ignored from scans and | |
632 | * reporting. | |
633 | */ | |
634 | static void make_black_object(unsigned long ptr) | |
635 | { | |
636 | unsigned long flags; | |
637 | struct kmemleak_object *object; | |
638 | ||
639 | object = find_and_get_object(ptr, 0); | |
640 | if (!object) { | |
ae281064 | 641 | kmemleak_warn("Blacking unknown object at 0x%08lx\n", ptr); |
3c7b4e6b CM |
642 | return; |
643 | } | |
644 | ||
645 | spin_lock_irqsave(&object->lock, flags); | |
646 | object->min_count = -1; | |
af98603d | 647 | object->flags |= OBJECT_NO_SCAN; |
3c7b4e6b CM |
648 | spin_unlock_irqrestore(&object->lock, flags); |
649 | put_object(object); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Add a scanning area to the object. If at least one such area is added, | |
654 | * kmemleak will only scan these ranges rather than the whole memory block. | |
655 | */ | |
656 | static void add_scan_area(unsigned long ptr, unsigned long offset, | |
657 | size_t length, gfp_t gfp) | |
658 | { | |
659 | unsigned long flags; | |
660 | struct kmemleak_object *object; | |
661 | struct kmemleak_scan_area *area; | |
662 | ||
663 | object = find_and_get_object(ptr, 0); | |
664 | if (!object) { | |
ae281064 JP |
665 | kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n", |
666 | ptr); | |
3c7b4e6b CM |
667 | return; |
668 | } | |
669 | ||
216c04b0 | 670 | area = kmem_cache_alloc(scan_area_cache, gfp & GFP_KMEMLEAK_MASK); |
3c7b4e6b | 671 | if (!area) { |
ae281064 | 672 | kmemleak_warn("Cannot allocate a scan area\n"); |
3c7b4e6b CM |
673 | goto out; |
674 | } | |
675 | ||
676 | spin_lock_irqsave(&object->lock, flags); | |
677 | if (offset + length > object->size) { | |
ae281064 | 678 | kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr); |
3c7b4e6b CM |
679 | dump_object_info(object); |
680 | kmem_cache_free(scan_area_cache, area); | |
681 | goto out_unlock; | |
682 | } | |
683 | ||
684 | INIT_HLIST_NODE(&area->node); | |
685 | area->offset = offset; | |
686 | area->length = length; | |
687 | ||
688 | hlist_add_head(&area->node, &object->area_list); | |
689 | out_unlock: | |
690 | spin_unlock_irqrestore(&object->lock, flags); | |
691 | out: | |
692 | put_object(object); | |
693 | } | |
694 | ||
695 | /* | |
696 | * Set the OBJECT_NO_SCAN flag for the object corresponding to the give | |
697 | * pointer. Such object will not be scanned by kmemleak but references to it | |
698 | * are searched. | |
699 | */ | |
700 | static void object_no_scan(unsigned long ptr) | |
701 | { | |
702 | unsigned long flags; | |
703 | struct kmemleak_object *object; | |
704 | ||
705 | object = find_and_get_object(ptr, 0); | |
706 | if (!object) { | |
ae281064 | 707 | kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr); |
3c7b4e6b CM |
708 | return; |
709 | } | |
710 | ||
711 | spin_lock_irqsave(&object->lock, flags); | |
712 | object->flags |= OBJECT_NO_SCAN; | |
713 | spin_unlock_irqrestore(&object->lock, flags); | |
714 | put_object(object); | |
715 | } | |
716 | ||
717 | /* | |
718 | * Log an early kmemleak_* call to the early_log buffer. These calls will be | |
719 | * processed later once kmemleak is fully initialized. | |
720 | */ | |
721 | static void log_early(int op_type, const void *ptr, size_t size, | |
722 | int min_count, unsigned long offset, size_t length) | |
723 | { | |
724 | unsigned long flags; | |
725 | struct early_log *log; | |
726 | ||
727 | if (crt_early_log >= ARRAY_SIZE(early_log)) { | |
a9d9058a CM |
728 | pr_warning("Early log buffer exceeded\n"); |
729 | kmemleak_disable(); | |
3c7b4e6b CM |
730 | return; |
731 | } | |
732 | ||
733 | /* | |
734 | * There is no need for locking since the kernel is still in UP mode | |
735 | * at this stage. Disabling the IRQs is enough. | |
736 | */ | |
737 | local_irq_save(flags); | |
738 | log = &early_log[crt_early_log]; | |
739 | log->op_type = op_type; | |
740 | log->ptr = ptr; | |
741 | log->size = size; | |
742 | log->min_count = min_count; | |
743 | log->offset = offset; | |
744 | log->length = length; | |
745 | crt_early_log++; | |
746 | local_irq_restore(flags); | |
747 | } | |
748 | ||
749 | /* | |
750 | * Memory allocation function callback. This function is called from the | |
751 | * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, | |
752 | * vmalloc etc.). | |
753 | */ | |
754 | void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) | |
755 | { | |
756 | pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); | |
757 | ||
758 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
759 | create_object((unsigned long)ptr, size, min_count, gfp); | |
760 | else if (atomic_read(&kmemleak_early_log)) | |
761 | log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); | |
762 | } | |
763 | EXPORT_SYMBOL_GPL(kmemleak_alloc); | |
764 | ||
765 | /* | |
766 | * Memory freeing function callback. This function is called from the kernel | |
767 | * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). | |
768 | */ | |
769 | void kmemleak_free(const void *ptr) | |
770 | { | |
771 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
772 | ||
773 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
53238a60 | 774 | delete_object_full((unsigned long)ptr); |
3c7b4e6b CM |
775 | else if (atomic_read(&kmemleak_early_log)) |
776 | log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); | |
777 | } | |
778 | EXPORT_SYMBOL_GPL(kmemleak_free); | |
779 | ||
53238a60 CM |
780 | /* |
781 | * Partial memory freeing function callback. This function is usually called | |
782 | * from bootmem allocator when (part of) a memory block is freed. | |
783 | */ | |
784 | void kmemleak_free_part(const void *ptr, size_t size) | |
785 | { | |
786 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
787 | ||
788 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
789 | delete_object_part((unsigned long)ptr, size); | |
790 | else if (atomic_read(&kmemleak_early_log)) | |
791 | log_early(KMEMLEAK_FREE_PART, ptr, size, 0, 0, 0); | |
792 | } | |
793 | EXPORT_SYMBOL_GPL(kmemleak_free_part); | |
794 | ||
3c7b4e6b CM |
795 | /* |
796 | * Mark an already allocated memory block as a false positive. This will cause | |
797 | * the block to no longer be reported as leak and always be scanned. | |
798 | */ | |
799 | void kmemleak_not_leak(const void *ptr) | |
800 | { | |
801 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
802 | ||
803 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
804 | make_gray_object((unsigned long)ptr); | |
805 | else if (atomic_read(&kmemleak_early_log)) | |
806 | log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); | |
807 | } | |
808 | EXPORT_SYMBOL(kmemleak_not_leak); | |
809 | ||
810 | /* | |
811 | * Ignore a memory block. This is usually done when it is known that the | |
812 | * corresponding block is not a leak and does not contain any references to | |
813 | * other allocated memory blocks. | |
814 | */ | |
815 | void kmemleak_ignore(const void *ptr) | |
816 | { | |
817 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
818 | ||
819 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
820 | make_black_object((unsigned long)ptr); | |
821 | else if (atomic_read(&kmemleak_early_log)) | |
822 | log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); | |
823 | } | |
824 | EXPORT_SYMBOL(kmemleak_ignore); | |
825 | ||
826 | /* | |
827 | * Limit the range to be scanned in an allocated memory block. | |
828 | */ | |
829 | void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, | |
830 | gfp_t gfp) | |
831 | { | |
832 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
833 | ||
834 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
835 | add_scan_area((unsigned long)ptr, offset, length, gfp); | |
836 | else if (atomic_read(&kmemleak_early_log)) | |
837 | log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); | |
838 | } | |
839 | EXPORT_SYMBOL(kmemleak_scan_area); | |
840 | ||
841 | /* | |
842 | * Inform kmemleak not to scan the given memory block. | |
843 | */ | |
844 | void kmemleak_no_scan(const void *ptr) | |
845 | { | |
846 | pr_debug("%s(0x%p)\n", __func__, ptr); | |
847 | ||
848 | if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) | |
849 | object_no_scan((unsigned long)ptr); | |
850 | else if (atomic_read(&kmemleak_early_log)) | |
851 | log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); | |
852 | } | |
853 | EXPORT_SYMBOL(kmemleak_no_scan); | |
854 | ||
3c7b4e6b CM |
855 | /* |
856 | * Memory scanning is a long process and it needs to be interruptable. This | |
857 | * function checks whether such interrupt condition occured. | |
858 | */ | |
859 | static int scan_should_stop(void) | |
860 | { | |
861 | if (!atomic_read(&kmemleak_enabled)) | |
862 | return 1; | |
863 | ||
864 | /* | |
865 | * This function may be called from either process or kthread context, | |
866 | * hence the need to check for both stop conditions. | |
867 | */ | |
868 | if (current->mm) | |
869 | return signal_pending(current); | |
870 | else | |
871 | return kthread_should_stop(); | |
872 | ||
873 | return 0; | |
874 | } | |
875 | ||
876 | /* | |
877 | * Scan a memory block (exclusive range) for valid pointers and add those | |
878 | * found to the gray list. | |
879 | */ | |
880 | static void scan_block(void *_start, void *_end, | |
4b8a9674 | 881 | struct kmemleak_object *scanned, int allow_resched) |
3c7b4e6b CM |
882 | { |
883 | unsigned long *ptr; | |
884 | unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); | |
885 | unsigned long *end = _end - (BYTES_PER_POINTER - 1); | |
886 | ||
887 | for (ptr = start; ptr < end; ptr++) { | |
888 | unsigned long flags; | |
889 | unsigned long pointer = *ptr; | |
890 | struct kmemleak_object *object; | |
891 | ||
4b8a9674 CM |
892 | if (allow_resched) |
893 | cond_resched(); | |
3c7b4e6b CM |
894 | if (scan_should_stop()) |
895 | break; | |
896 | ||
3c7b4e6b CM |
897 | object = find_and_get_object(pointer, 1); |
898 | if (!object) | |
899 | continue; | |
900 | if (object == scanned) { | |
901 | /* self referenced, ignore */ | |
902 | put_object(object); | |
903 | continue; | |
904 | } | |
905 | ||
906 | /* | |
907 | * Avoid the lockdep recursive warning on object->lock being | |
908 | * previously acquired in scan_object(). These locks are | |
909 | * enclosed by scan_mutex. | |
910 | */ | |
911 | spin_lock_irqsave_nested(&object->lock, flags, | |
912 | SINGLE_DEPTH_NESTING); | |
913 | if (!color_white(object)) { | |
914 | /* non-orphan, ignored or new */ | |
915 | spin_unlock_irqrestore(&object->lock, flags); | |
916 | put_object(object); | |
917 | continue; | |
918 | } | |
919 | ||
920 | /* | |
921 | * Increase the object's reference count (number of pointers | |
922 | * to the memory block). If this count reaches the required | |
923 | * minimum, the object's color will become gray and it will be | |
924 | * added to the gray_list. | |
925 | */ | |
926 | object->count++; | |
927 | if (color_gray(object)) | |
928 | list_add_tail(&object->gray_list, &gray_list); | |
929 | else | |
930 | put_object(object); | |
931 | spin_unlock_irqrestore(&object->lock, flags); | |
932 | } | |
933 | } | |
934 | ||
935 | /* | |
936 | * Scan a memory block corresponding to a kmemleak_object. A condition is | |
937 | * that object->use_count >= 1. | |
938 | */ | |
939 | static void scan_object(struct kmemleak_object *object) | |
940 | { | |
941 | struct kmemleak_scan_area *area; | |
942 | struct hlist_node *elem; | |
943 | unsigned long flags; | |
944 | ||
945 | /* | |
946 | * Once the object->lock is aquired, the corresponding memory block | |
947 | * cannot be freed (the same lock is aquired in delete_object). | |
948 | */ | |
949 | spin_lock_irqsave(&object->lock, flags); | |
950 | if (object->flags & OBJECT_NO_SCAN) | |
951 | goto out; | |
952 | if (!(object->flags & OBJECT_ALLOCATED)) | |
953 | /* already freed object */ | |
954 | goto out; | |
af98603d CM |
955 | if (hlist_empty(&object->area_list)) { |
956 | void *start = (void *)object->pointer; | |
957 | void *end = (void *)(object->pointer + object->size); | |
958 | ||
959 | while (start < end && (object->flags & OBJECT_ALLOCATED) && | |
960 | !(object->flags & OBJECT_NO_SCAN)) { | |
961 | scan_block(start, min(start + MAX_SCAN_SIZE, end), | |
962 | object, 0); | |
963 | start += MAX_SCAN_SIZE; | |
964 | ||
965 | spin_unlock_irqrestore(&object->lock, flags); | |
966 | cond_resched(); | |
967 | spin_lock_irqsave(&object->lock, flags); | |
968 | } | |
969 | } else | |
3c7b4e6b CM |
970 | hlist_for_each_entry(area, elem, &object->area_list, node) |
971 | scan_block((void *)(object->pointer + area->offset), | |
972 | (void *)(object->pointer + area->offset | |
4b8a9674 | 973 | + area->length), object, 0); |
3c7b4e6b CM |
974 | out: |
975 | spin_unlock_irqrestore(&object->lock, flags); | |
976 | } | |
977 | ||
978 | /* | |
979 | * Scan data sections and all the referenced memory blocks allocated via the | |
980 | * kernel's standard allocators. This function must be called with the | |
981 | * scan_mutex held. | |
982 | */ | |
983 | static void kmemleak_scan(void) | |
984 | { | |
985 | unsigned long flags; | |
986 | struct kmemleak_object *object, *tmp; | |
987 | struct task_struct *task; | |
988 | int i; | |
4698c1f2 | 989 | int new_leaks = 0; |
2587362e | 990 | int gray_list_pass = 0; |
3c7b4e6b | 991 | |
acf4968e CM |
992 | jiffies_last_scan = jiffies; |
993 | ||
3c7b4e6b CM |
994 | /* prepare the kmemleak_object's */ |
995 | rcu_read_lock(); | |
996 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
997 | spin_lock_irqsave(&object->lock, flags); | |
998 | #ifdef DEBUG | |
999 | /* | |
1000 | * With a few exceptions there should be a maximum of | |
1001 | * 1 reference to any object at this point. | |
1002 | */ | |
1003 | if (atomic_read(&object->use_count) > 1) { | |
ae281064 | 1004 | pr_debug("object->use_count = %d\n", |
3c7b4e6b CM |
1005 | atomic_read(&object->use_count)); |
1006 | dump_object_info(object); | |
1007 | } | |
1008 | #endif | |
1009 | /* reset the reference count (whiten the object) */ | |
1010 | object->count = 0; | |
2587362e | 1011 | object->flags &= ~OBJECT_NEW; |
3c7b4e6b CM |
1012 | if (color_gray(object) && get_object(object)) |
1013 | list_add_tail(&object->gray_list, &gray_list); | |
1014 | ||
1015 | spin_unlock_irqrestore(&object->lock, flags); | |
1016 | } | |
1017 | rcu_read_unlock(); | |
1018 | ||
1019 | /* data/bss scanning */ | |
4b8a9674 CM |
1020 | scan_block(_sdata, _edata, NULL, 1); |
1021 | scan_block(__bss_start, __bss_stop, NULL, 1); | |
3c7b4e6b CM |
1022 | |
1023 | #ifdef CONFIG_SMP | |
1024 | /* per-cpu sections scanning */ | |
1025 | for_each_possible_cpu(i) | |
1026 | scan_block(__per_cpu_start + per_cpu_offset(i), | |
4b8a9674 | 1027 | __per_cpu_end + per_cpu_offset(i), NULL, 1); |
3c7b4e6b CM |
1028 | #endif |
1029 | ||
1030 | /* | |
1031 | * Struct page scanning for each node. The code below is not yet safe | |
1032 | * with MEMORY_HOTPLUG. | |
1033 | */ | |
1034 | for_each_online_node(i) { | |
1035 | pg_data_t *pgdat = NODE_DATA(i); | |
1036 | unsigned long start_pfn = pgdat->node_start_pfn; | |
1037 | unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; | |
1038 | unsigned long pfn; | |
1039 | ||
1040 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | |
1041 | struct page *page; | |
1042 | ||
1043 | if (!pfn_valid(pfn)) | |
1044 | continue; | |
1045 | page = pfn_to_page(pfn); | |
1046 | /* only scan if page is in use */ | |
1047 | if (page_count(page) == 0) | |
1048 | continue; | |
4b8a9674 | 1049 | scan_block(page, page + 1, NULL, 1); |
3c7b4e6b CM |
1050 | } |
1051 | } | |
1052 | ||
1053 | /* | |
1054 | * Scanning the task stacks may introduce false negatives and it is | |
1055 | * not enabled by default. | |
1056 | */ | |
1057 | if (kmemleak_stack_scan) { | |
1058 | read_lock(&tasklist_lock); | |
1059 | for_each_process(task) | |
1060 | scan_block(task_stack_page(task), | |
4b8a9674 CM |
1061 | task_stack_page(task) + THREAD_SIZE, |
1062 | NULL, 0); | |
3c7b4e6b CM |
1063 | read_unlock(&tasklist_lock); |
1064 | } | |
1065 | ||
1066 | /* | |
1067 | * Scan the objects already referenced from the sections scanned | |
1068 | * above. More objects will be referenced and, if there are no memory | |
1069 | * leaks, all the objects will be scanned. The list traversal is safe | |
1070 | * for both tail additions and removals from inside the loop. The | |
1071 | * kmemleak objects cannot be freed from outside the loop because their | |
1072 | * use_count was increased. | |
1073 | */ | |
2587362e | 1074 | repeat: |
3c7b4e6b CM |
1075 | object = list_entry(gray_list.next, typeof(*object), gray_list); |
1076 | while (&object->gray_list != &gray_list) { | |
57d81f6f | 1077 | cond_resched(); |
3c7b4e6b CM |
1078 | |
1079 | /* may add new objects to the list */ | |
1080 | if (!scan_should_stop()) | |
1081 | scan_object(object); | |
1082 | ||
1083 | tmp = list_entry(object->gray_list.next, typeof(*object), | |
1084 | gray_list); | |
1085 | ||
1086 | /* remove the object from the list and release it */ | |
1087 | list_del(&object->gray_list); | |
1088 | put_object(object); | |
1089 | ||
1090 | object = tmp; | |
1091 | } | |
2587362e CM |
1092 | |
1093 | if (scan_should_stop() || ++gray_list_pass >= GRAY_LIST_PASSES) | |
1094 | goto scan_end; | |
1095 | ||
1096 | /* | |
1097 | * Check for new objects allocated during this scanning and add them | |
1098 | * to the gray list. | |
1099 | */ | |
1100 | rcu_read_lock(); | |
1101 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1102 | spin_lock_irqsave(&object->lock, flags); | |
1103 | if ((object->flags & OBJECT_NEW) && !color_black(object) && | |
1104 | get_object(object)) { | |
1105 | object->flags &= ~OBJECT_NEW; | |
1106 | list_add_tail(&object->gray_list, &gray_list); | |
1107 | } | |
1108 | spin_unlock_irqrestore(&object->lock, flags); | |
1109 | } | |
1110 | rcu_read_unlock(); | |
1111 | ||
1112 | if (!list_empty(&gray_list)) | |
1113 | goto repeat; | |
1114 | ||
1115 | scan_end: | |
3c7b4e6b | 1116 | WARN_ON(!list_empty(&gray_list)); |
4698c1f2 | 1117 | |
17bb9e0d | 1118 | /* |
2587362e CM |
1119 | * If scanning was stopped or new objects were being allocated at a |
1120 | * higher rate than gray list scanning, do not report any new | |
1121 | * unreferenced objects. | |
17bb9e0d | 1122 | */ |
2587362e | 1123 | if (scan_should_stop() || gray_list_pass >= GRAY_LIST_PASSES) |
17bb9e0d CM |
1124 | return; |
1125 | ||
4698c1f2 CM |
1126 | /* |
1127 | * Scanning result reporting. | |
1128 | */ | |
1129 | rcu_read_lock(); | |
1130 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1131 | spin_lock_irqsave(&object->lock, flags); | |
1132 | if (unreferenced_object(object) && | |
1133 | !(object->flags & OBJECT_REPORTED)) { | |
1134 | object->flags |= OBJECT_REPORTED; | |
1135 | new_leaks++; | |
1136 | } | |
1137 | spin_unlock_irqrestore(&object->lock, flags); | |
1138 | } | |
1139 | rcu_read_unlock(); | |
1140 | ||
1141 | if (new_leaks) | |
1142 | pr_info("%d new suspected memory leaks (see " | |
1143 | "/sys/kernel/debug/kmemleak)\n", new_leaks); | |
1144 | ||
3c7b4e6b CM |
1145 | } |
1146 | ||
1147 | /* | |
1148 | * Thread function performing automatic memory scanning. Unreferenced objects | |
1149 | * at the end of a memory scan are reported but only the first time. | |
1150 | */ | |
1151 | static int kmemleak_scan_thread(void *arg) | |
1152 | { | |
1153 | static int first_run = 1; | |
1154 | ||
ae281064 | 1155 | pr_info("Automatic memory scanning thread started\n"); |
bf2a76b3 | 1156 | set_user_nice(current, 10); |
3c7b4e6b CM |
1157 | |
1158 | /* | |
1159 | * Wait before the first scan to allow the system to fully initialize. | |
1160 | */ | |
1161 | if (first_run) { | |
1162 | first_run = 0; | |
1163 | ssleep(SECS_FIRST_SCAN); | |
1164 | } | |
1165 | ||
1166 | while (!kthread_should_stop()) { | |
3c7b4e6b CM |
1167 | signed long timeout = jiffies_scan_wait; |
1168 | ||
1169 | mutex_lock(&scan_mutex); | |
3c7b4e6b | 1170 | kmemleak_scan(); |
3c7b4e6b | 1171 | mutex_unlock(&scan_mutex); |
4698c1f2 | 1172 | |
3c7b4e6b CM |
1173 | /* wait before the next scan */ |
1174 | while (timeout && !kthread_should_stop()) | |
1175 | timeout = schedule_timeout_interruptible(timeout); | |
1176 | } | |
1177 | ||
ae281064 | 1178 | pr_info("Automatic memory scanning thread ended\n"); |
3c7b4e6b CM |
1179 | |
1180 | return 0; | |
1181 | } | |
1182 | ||
1183 | /* | |
1184 | * Start the automatic memory scanning thread. This function must be called | |
4698c1f2 | 1185 | * with the scan_mutex held. |
3c7b4e6b CM |
1186 | */ |
1187 | void start_scan_thread(void) | |
1188 | { | |
1189 | if (scan_thread) | |
1190 | return; | |
1191 | scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); | |
1192 | if (IS_ERR(scan_thread)) { | |
ae281064 | 1193 | pr_warning("Failed to create the scan thread\n"); |
3c7b4e6b CM |
1194 | scan_thread = NULL; |
1195 | } | |
1196 | } | |
1197 | ||
1198 | /* | |
1199 | * Stop the automatic memory scanning thread. This function must be called | |
4698c1f2 | 1200 | * with the scan_mutex held. |
3c7b4e6b CM |
1201 | */ |
1202 | void stop_scan_thread(void) | |
1203 | { | |
1204 | if (scan_thread) { | |
1205 | kthread_stop(scan_thread); | |
1206 | scan_thread = NULL; | |
1207 | } | |
1208 | } | |
1209 | ||
1210 | /* | |
1211 | * Iterate over the object_list and return the first valid object at or after | |
1212 | * the required position with its use_count incremented. The function triggers | |
1213 | * a memory scanning when the pos argument points to the first position. | |
1214 | */ | |
1215 | static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) | |
1216 | { | |
1217 | struct kmemleak_object *object; | |
1218 | loff_t n = *pos; | |
b87324d0 CM |
1219 | int err; |
1220 | ||
1221 | err = mutex_lock_interruptible(&scan_mutex); | |
1222 | if (err < 0) | |
1223 | return ERR_PTR(err); | |
3c7b4e6b | 1224 | |
3c7b4e6b CM |
1225 | rcu_read_lock(); |
1226 | list_for_each_entry_rcu(object, &object_list, object_list) { | |
1227 | if (n-- > 0) | |
1228 | continue; | |
1229 | if (get_object(object)) | |
1230 | goto out; | |
1231 | } | |
1232 | object = NULL; | |
1233 | out: | |
3c7b4e6b CM |
1234 | return object; |
1235 | } | |
1236 | ||
1237 | /* | |
1238 | * Return the next object in the object_list. The function decrements the | |
1239 | * use_count of the previous object and increases that of the next one. | |
1240 | */ | |
1241 | static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) | |
1242 | { | |
1243 | struct kmemleak_object *prev_obj = v; | |
1244 | struct kmemleak_object *next_obj = NULL; | |
1245 | struct list_head *n = &prev_obj->object_list; | |
1246 | ||
1247 | ++(*pos); | |
3c7b4e6b | 1248 | |
3c7b4e6b CM |
1249 | list_for_each_continue_rcu(n, &object_list) { |
1250 | next_obj = list_entry(n, struct kmemleak_object, object_list); | |
1251 | if (get_object(next_obj)) | |
1252 | break; | |
1253 | } | |
288c857d | 1254 | |
3c7b4e6b CM |
1255 | put_object(prev_obj); |
1256 | return next_obj; | |
1257 | } | |
1258 | ||
1259 | /* | |
1260 | * Decrement the use_count of the last object required, if any. | |
1261 | */ | |
1262 | static void kmemleak_seq_stop(struct seq_file *seq, void *v) | |
1263 | { | |
b87324d0 CM |
1264 | if (!IS_ERR(v)) { |
1265 | /* | |
1266 | * kmemleak_seq_start may return ERR_PTR if the scan_mutex | |
1267 | * waiting was interrupted, so only release it if !IS_ERR. | |
1268 | */ | |
f5886c7f | 1269 | rcu_read_unlock(); |
b87324d0 CM |
1270 | mutex_unlock(&scan_mutex); |
1271 | if (v) | |
1272 | put_object(v); | |
1273 | } | |
3c7b4e6b CM |
1274 | } |
1275 | ||
1276 | /* | |
1277 | * Print the information for an unreferenced object to the seq file. | |
1278 | */ | |
1279 | static int kmemleak_seq_show(struct seq_file *seq, void *v) | |
1280 | { | |
1281 | struct kmemleak_object *object = v; | |
1282 | unsigned long flags; | |
1283 | ||
1284 | spin_lock_irqsave(&object->lock, flags); | |
288c857d | 1285 | if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object)) |
17bb9e0d | 1286 | print_unreferenced(seq, object); |
3c7b4e6b CM |
1287 | spin_unlock_irqrestore(&object->lock, flags); |
1288 | return 0; | |
1289 | } | |
1290 | ||
1291 | static const struct seq_operations kmemleak_seq_ops = { | |
1292 | .start = kmemleak_seq_start, | |
1293 | .next = kmemleak_seq_next, | |
1294 | .stop = kmemleak_seq_stop, | |
1295 | .show = kmemleak_seq_show, | |
1296 | }; | |
1297 | ||
1298 | static int kmemleak_open(struct inode *inode, struct file *file) | |
1299 | { | |
3c7b4e6b CM |
1300 | if (!atomic_read(&kmemleak_enabled)) |
1301 | return -EBUSY; | |
1302 | ||
b87324d0 | 1303 | return seq_open(file, &kmemleak_seq_ops); |
3c7b4e6b CM |
1304 | } |
1305 | ||
1306 | static int kmemleak_release(struct inode *inode, struct file *file) | |
1307 | { | |
b87324d0 | 1308 | return seq_release(inode, file); |
3c7b4e6b CM |
1309 | } |
1310 | ||
189d84ed CM |
1311 | static int dump_str_object_info(const char *str) |
1312 | { | |
1313 | unsigned long flags; | |
1314 | struct kmemleak_object *object; | |
1315 | unsigned long addr; | |
1316 | ||
1317 | addr= simple_strtoul(str, NULL, 0); | |
1318 | object = find_and_get_object(addr, 0); | |
1319 | if (!object) { | |
1320 | pr_info("Unknown object at 0x%08lx\n", addr); | |
1321 | return -EINVAL; | |
1322 | } | |
1323 | ||
1324 | spin_lock_irqsave(&object->lock, flags); | |
1325 | dump_object_info(object); | |
1326 | spin_unlock_irqrestore(&object->lock, flags); | |
1327 | ||
1328 | put_object(object); | |
1329 | return 0; | |
1330 | } | |
1331 | ||
3c7b4e6b CM |
1332 | /* |
1333 | * File write operation to configure kmemleak at run-time. The following | |
1334 | * commands can be written to the /sys/kernel/debug/kmemleak file: | |
1335 | * off - disable kmemleak (irreversible) | |
1336 | * stack=on - enable the task stacks scanning | |
1337 | * stack=off - disable the tasks stacks scanning | |
1338 | * scan=on - start the automatic memory scanning thread | |
1339 | * scan=off - stop the automatic memory scanning thread | |
1340 | * scan=... - set the automatic memory scanning period in seconds (0 to | |
1341 | * disable it) | |
4698c1f2 | 1342 | * scan - trigger a memory scan |
189d84ed | 1343 | * dump=... - dump information about the object found at the given address |
3c7b4e6b CM |
1344 | */ |
1345 | static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, | |
1346 | size_t size, loff_t *ppos) | |
1347 | { | |
1348 | char buf[64]; | |
1349 | int buf_size; | |
b87324d0 | 1350 | int ret; |
3c7b4e6b CM |
1351 | |
1352 | buf_size = min(size, (sizeof(buf) - 1)); | |
1353 | if (strncpy_from_user(buf, user_buf, buf_size) < 0) | |
1354 | return -EFAULT; | |
1355 | buf[buf_size] = 0; | |
1356 | ||
b87324d0 CM |
1357 | ret = mutex_lock_interruptible(&scan_mutex); |
1358 | if (ret < 0) | |
1359 | return ret; | |
1360 | ||
3c7b4e6b CM |
1361 | if (strncmp(buf, "off", 3) == 0) |
1362 | kmemleak_disable(); | |
1363 | else if (strncmp(buf, "stack=on", 8) == 0) | |
1364 | kmemleak_stack_scan = 1; | |
1365 | else if (strncmp(buf, "stack=off", 9) == 0) | |
1366 | kmemleak_stack_scan = 0; | |
1367 | else if (strncmp(buf, "scan=on", 7) == 0) | |
1368 | start_scan_thread(); | |
1369 | else if (strncmp(buf, "scan=off", 8) == 0) | |
1370 | stop_scan_thread(); | |
1371 | else if (strncmp(buf, "scan=", 5) == 0) { | |
1372 | unsigned long secs; | |
3c7b4e6b | 1373 | |
b87324d0 CM |
1374 | ret = strict_strtoul(buf + 5, 0, &secs); |
1375 | if (ret < 0) | |
1376 | goto out; | |
3c7b4e6b CM |
1377 | stop_scan_thread(); |
1378 | if (secs) { | |
1379 | jiffies_scan_wait = msecs_to_jiffies(secs * 1000); | |
1380 | start_scan_thread(); | |
1381 | } | |
4698c1f2 CM |
1382 | } else if (strncmp(buf, "scan", 4) == 0) |
1383 | kmemleak_scan(); | |
189d84ed CM |
1384 | else if (strncmp(buf, "dump=", 5) == 0) |
1385 | ret = dump_str_object_info(buf + 5); | |
4698c1f2 | 1386 | else |
b87324d0 CM |
1387 | ret = -EINVAL; |
1388 | ||
1389 | out: | |
1390 | mutex_unlock(&scan_mutex); | |
1391 | if (ret < 0) | |
1392 | return ret; | |
3c7b4e6b CM |
1393 | |
1394 | /* ignore the rest of the buffer, only one command at a time */ | |
1395 | *ppos += size; | |
1396 | return size; | |
1397 | } | |
1398 | ||
1399 | static const struct file_operations kmemleak_fops = { | |
1400 | .owner = THIS_MODULE, | |
1401 | .open = kmemleak_open, | |
1402 | .read = seq_read, | |
1403 | .write = kmemleak_write, | |
1404 | .llseek = seq_lseek, | |
1405 | .release = kmemleak_release, | |
1406 | }; | |
1407 | ||
1408 | /* | |
1409 | * Perform the freeing of the kmemleak internal objects after waiting for any | |
1410 | * current memory scan to complete. | |
1411 | */ | |
1412 | static int kmemleak_cleanup_thread(void *arg) | |
1413 | { | |
1414 | struct kmemleak_object *object; | |
1415 | ||
4698c1f2 | 1416 | mutex_lock(&scan_mutex); |
3c7b4e6b | 1417 | stop_scan_thread(); |
3c7b4e6b | 1418 | |
3c7b4e6b CM |
1419 | rcu_read_lock(); |
1420 | list_for_each_entry_rcu(object, &object_list, object_list) | |
53238a60 | 1421 | delete_object_full(object->pointer); |
3c7b4e6b CM |
1422 | rcu_read_unlock(); |
1423 | mutex_unlock(&scan_mutex); | |
1424 | ||
1425 | return 0; | |
1426 | } | |
1427 | ||
1428 | /* | |
1429 | * Start the clean-up thread. | |
1430 | */ | |
1431 | static void kmemleak_cleanup(void) | |
1432 | { | |
1433 | struct task_struct *cleanup_thread; | |
1434 | ||
1435 | cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, | |
1436 | "kmemleak-clean"); | |
1437 | if (IS_ERR(cleanup_thread)) | |
ae281064 | 1438 | pr_warning("Failed to create the clean-up thread\n"); |
3c7b4e6b CM |
1439 | } |
1440 | ||
1441 | /* | |
1442 | * Disable kmemleak. No memory allocation/freeing will be traced once this | |
1443 | * function is called. Disabling kmemleak is an irreversible operation. | |
1444 | */ | |
1445 | static void kmemleak_disable(void) | |
1446 | { | |
1447 | /* atomically check whether it was already invoked */ | |
1448 | if (atomic_cmpxchg(&kmemleak_error, 0, 1)) | |
1449 | return; | |
1450 | ||
1451 | /* stop any memory operation tracing */ | |
1452 | atomic_set(&kmemleak_early_log, 0); | |
1453 | atomic_set(&kmemleak_enabled, 0); | |
1454 | ||
1455 | /* check whether it is too early for a kernel thread */ | |
1456 | if (atomic_read(&kmemleak_initialized)) | |
1457 | kmemleak_cleanup(); | |
1458 | ||
1459 | pr_info("Kernel memory leak detector disabled\n"); | |
1460 | } | |
1461 | ||
1462 | /* | |
1463 | * Allow boot-time kmemleak disabling (enabled by default). | |
1464 | */ | |
1465 | static int kmemleak_boot_config(char *str) | |
1466 | { | |
1467 | if (!str) | |
1468 | return -EINVAL; | |
1469 | if (strcmp(str, "off") == 0) | |
1470 | kmemleak_disable(); | |
1471 | else if (strcmp(str, "on") != 0) | |
1472 | return -EINVAL; | |
1473 | return 0; | |
1474 | } | |
1475 | early_param("kmemleak", kmemleak_boot_config); | |
1476 | ||
1477 | /* | |
2030117d | 1478 | * Kmemleak initialization. |
3c7b4e6b CM |
1479 | */ |
1480 | void __init kmemleak_init(void) | |
1481 | { | |
1482 | int i; | |
1483 | unsigned long flags; | |
1484 | ||
3c7b4e6b CM |
1485 | jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); |
1486 | jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); | |
1487 | ||
1488 | object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); | |
1489 | scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); | |
1490 | INIT_PRIO_TREE_ROOT(&object_tree_root); | |
1491 | ||
1492 | /* the kernel is still in UP mode, so disabling the IRQs is enough */ | |
1493 | local_irq_save(flags); | |
1494 | if (!atomic_read(&kmemleak_error)) { | |
1495 | atomic_set(&kmemleak_enabled, 1); | |
1496 | atomic_set(&kmemleak_early_log, 0); | |
1497 | } | |
1498 | local_irq_restore(flags); | |
1499 | ||
1500 | /* | |
1501 | * This is the point where tracking allocations is safe. Automatic | |
1502 | * scanning is started during the late initcall. Add the early logged | |
1503 | * callbacks to the kmemleak infrastructure. | |
1504 | */ | |
1505 | for (i = 0; i < crt_early_log; i++) { | |
1506 | struct early_log *log = &early_log[i]; | |
1507 | ||
1508 | switch (log->op_type) { | |
1509 | case KMEMLEAK_ALLOC: | |
1510 | kmemleak_alloc(log->ptr, log->size, log->min_count, | |
1511 | GFP_KERNEL); | |
1512 | break; | |
1513 | case KMEMLEAK_FREE: | |
1514 | kmemleak_free(log->ptr); | |
1515 | break; | |
53238a60 CM |
1516 | case KMEMLEAK_FREE_PART: |
1517 | kmemleak_free_part(log->ptr, log->size); | |
1518 | break; | |
3c7b4e6b CM |
1519 | case KMEMLEAK_NOT_LEAK: |
1520 | kmemleak_not_leak(log->ptr); | |
1521 | break; | |
1522 | case KMEMLEAK_IGNORE: | |
1523 | kmemleak_ignore(log->ptr); | |
1524 | break; | |
1525 | case KMEMLEAK_SCAN_AREA: | |
1526 | kmemleak_scan_area(log->ptr, log->offset, log->length, | |
1527 | GFP_KERNEL); | |
1528 | break; | |
1529 | case KMEMLEAK_NO_SCAN: | |
1530 | kmemleak_no_scan(log->ptr); | |
1531 | break; | |
1532 | default: | |
1533 | WARN_ON(1); | |
1534 | } | |
1535 | } | |
1536 | } | |
1537 | ||
1538 | /* | |
1539 | * Late initialization function. | |
1540 | */ | |
1541 | static int __init kmemleak_late_init(void) | |
1542 | { | |
1543 | struct dentry *dentry; | |
1544 | ||
1545 | atomic_set(&kmemleak_initialized, 1); | |
1546 | ||
1547 | if (atomic_read(&kmemleak_error)) { | |
1548 | /* | |
1549 | * Some error occured and kmemleak was disabled. There is a | |
1550 | * small chance that kmemleak_disable() was called immediately | |
1551 | * after setting kmemleak_initialized and we may end up with | |
1552 | * two clean-up threads but serialized by scan_mutex. | |
1553 | */ | |
1554 | kmemleak_cleanup(); | |
1555 | return -ENOMEM; | |
1556 | } | |
1557 | ||
1558 | dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, | |
1559 | &kmemleak_fops); | |
1560 | if (!dentry) | |
ae281064 | 1561 | pr_warning("Failed to create the debugfs kmemleak file\n"); |
4698c1f2 | 1562 | mutex_lock(&scan_mutex); |
3c7b4e6b | 1563 | start_scan_thread(); |
4698c1f2 | 1564 | mutex_unlock(&scan_mutex); |
3c7b4e6b CM |
1565 | |
1566 | pr_info("Kernel memory leak detector initialized\n"); | |
1567 | ||
1568 | return 0; | |
1569 | } | |
1570 | late_initcall(kmemleak_late_init); |