2 * Generic infrastructure for lifetime debugging of objects.
4 * Started by Thomas Gleixner
6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
8 * For licencing details see kernel-base/COPYING
11 #define pr_fmt(fmt) "ODEBUG: " fmt
13 #include <linux/debugobjects.h>
14 #include <linux/interrupt.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/debugfs.h>
18 #include <linux/slab.h>
19 #include <linux/hash.h>
21 #define ODEBUG_HASH_BITS 14
22 #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS)
24 #define ODEBUG_POOL_SIZE 1024
25 #define ODEBUG_POOL_MIN_LEVEL 256
27 #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT
28 #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT)
29 #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1))
32 struct hlist_head list
;
36 static struct debug_bucket obj_hash
[ODEBUG_HASH_SIZE
];
38 static struct debug_obj obj_static_pool
[ODEBUG_POOL_SIZE
] __initdata
;
40 static DEFINE_RAW_SPINLOCK(pool_lock
);
42 static HLIST_HEAD(obj_pool
);
44 static int obj_pool_min_free
= ODEBUG_POOL_SIZE
;
45 static int obj_pool_free
= ODEBUG_POOL_SIZE
;
46 static int obj_pool_used
;
47 static int obj_pool_max_used
;
48 static struct kmem_cache
*obj_cache
;
50 static int debug_objects_maxchain __read_mostly
;
51 static int debug_objects_fixups __read_mostly
;
52 static int debug_objects_warnings __read_mostly
;
53 static int debug_objects_enabled __read_mostly
54 = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT
;
55 static int debug_objects_pool_size __read_mostly
57 static int debug_objects_pool_min_level __read_mostly
58 = ODEBUG_POOL_MIN_LEVEL
;
59 static struct debug_obj_descr
*descr_test __read_mostly
;
62 * Track numbers of kmem_cache_alloc()/free() calls done.
64 static int debug_objects_allocated
;
65 static int debug_objects_freed
;
67 static void free_obj_work(struct work_struct
*work
);
68 static DECLARE_WORK(debug_obj_work
, free_obj_work
);
70 static int __init
enable_object_debug(char *str
)
72 debug_objects_enabled
= 1;
76 static int __init
disable_object_debug(char *str
)
78 debug_objects_enabled
= 0;
82 early_param("debug_objects", enable_object_debug
);
83 early_param("no_debug_objects", disable_object_debug
);
85 static const char *obj_states
[ODEBUG_STATE_MAX
] = {
86 [ODEBUG_STATE_NONE
] = "none",
87 [ODEBUG_STATE_INIT
] = "initialized",
88 [ODEBUG_STATE_INACTIVE
] = "inactive",
89 [ODEBUG_STATE_ACTIVE
] = "active",
90 [ODEBUG_STATE_DESTROYED
] = "destroyed",
91 [ODEBUG_STATE_NOTAVAILABLE
] = "not available",
94 static void fill_pool(void)
96 gfp_t gfp
= GFP_ATOMIC
| __GFP_NORETRY
| __GFP_NOWARN
;
97 struct debug_obj
*new;
100 if (likely(obj_pool_free
>= debug_objects_pool_min_level
))
103 if (unlikely(!obj_cache
))
106 while (obj_pool_free
< debug_objects_pool_min_level
) {
108 new = kmem_cache_zalloc(obj_cache
, gfp
);
112 raw_spin_lock_irqsave(&pool_lock
, flags
);
113 hlist_add_head(&new->node
, &obj_pool
);
114 debug_objects_allocated
++;
116 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
121 * Lookup an object in the hash bucket.
123 static struct debug_obj
*lookup_object(void *addr
, struct debug_bucket
*b
)
125 struct debug_obj
*obj
;
128 hlist_for_each_entry(obj
, &b
->list
, node
) {
130 if (obj
->object
== addr
)
133 if (cnt
> debug_objects_maxchain
)
134 debug_objects_maxchain
= cnt
;
140 * Allocate a new object. If the pool is empty, switch off the debugger.
141 * Must be called with interrupts disabled.
143 static struct debug_obj
*
144 alloc_object(void *addr
, struct debug_bucket
*b
, struct debug_obj_descr
*descr
)
146 struct debug_obj
*obj
= NULL
;
148 raw_spin_lock(&pool_lock
);
149 if (obj_pool
.first
) {
150 obj
= hlist_entry(obj_pool
.first
, typeof(*obj
), node
);
154 obj
->state
= ODEBUG_STATE_NONE
;
156 hlist_del(&obj
->node
);
158 hlist_add_head(&obj
->node
, &b
->list
);
161 if (obj_pool_used
> obj_pool_max_used
)
162 obj_pool_max_used
= obj_pool_used
;
165 if (obj_pool_free
< obj_pool_min_free
)
166 obj_pool_min_free
= obj_pool_free
;
168 raw_spin_unlock(&pool_lock
);
174 * workqueue function to free objects.
176 * To reduce contention on the global pool_lock, the actual freeing of
177 * debug objects will be delayed if the pool_lock is busy. We also free
178 * the objects in a batch of 4 for each lock/unlock cycle.
180 #define ODEBUG_FREE_BATCH 4
182 static void free_obj_work(struct work_struct
*work
)
184 struct debug_obj
*objs
[ODEBUG_FREE_BATCH
];
188 if (!raw_spin_trylock_irqsave(&pool_lock
, flags
))
190 while (obj_pool_free
>= debug_objects_pool_size
+ ODEBUG_FREE_BATCH
) {
191 for (i
= 0; i
< ODEBUG_FREE_BATCH
; i
++) {
192 objs
[i
] = hlist_entry(obj_pool
.first
,
193 typeof(*objs
[0]), node
);
194 hlist_del(&objs
[i
]->node
);
197 obj_pool_free
-= ODEBUG_FREE_BATCH
;
198 debug_objects_freed
+= ODEBUG_FREE_BATCH
;
200 * We release pool_lock across kmem_cache_free() to
201 * avoid contention on pool_lock.
203 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
204 for (i
= 0; i
< ODEBUG_FREE_BATCH
; i
++)
205 kmem_cache_free(obj_cache
, objs
[i
]);
206 if (!raw_spin_trylock_irqsave(&pool_lock
, flags
))
209 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
213 * Put the object back into the pool and schedule work to free objects
216 static void free_object(struct debug_obj
*obj
)
221 raw_spin_lock_irqsave(&pool_lock
, flags
);
223 * schedule work when the pool is filled and the cache is
226 if (obj_pool_free
> debug_objects_pool_size
&& obj_cache
)
228 hlist_add_head(&obj
->node
, &obj_pool
);
231 raw_spin_unlock_irqrestore(&pool_lock
, flags
);
233 schedule_work(&debug_obj_work
);
237 * We run out of memory. That means we probably have tons of objects
240 static void debug_objects_oom(void)
242 struct debug_bucket
*db
= obj_hash
;
243 struct hlist_node
*tmp
;
244 HLIST_HEAD(freelist
);
245 struct debug_obj
*obj
;
249 pr_warn("Out of memory. ODEBUG disabled\n");
251 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
252 raw_spin_lock_irqsave(&db
->lock
, flags
);
253 hlist_move_list(&db
->list
, &freelist
);
254 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
257 hlist_for_each_entry_safe(obj
, tmp
, &freelist
, node
) {
258 hlist_del(&obj
->node
);
265 * We use the pfn of the address for the hash. That way we can check
266 * for freed objects simply by checking the affected bucket.
268 static struct debug_bucket
*get_bucket(unsigned long addr
)
272 hash
= hash_long((addr
>> ODEBUG_CHUNK_SHIFT
), ODEBUG_HASH_BITS
);
273 return &obj_hash
[hash
];
276 static void debug_print_object(struct debug_obj
*obj
, char *msg
)
278 struct debug_obj_descr
*descr
= obj
->descr
;
281 if (limit
< 5 && descr
!= descr_test
) {
282 void *hint
= descr
->debug_hint
?
283 descr
->debug_hint(obj
->object
) : NULL
;
285 WARN(1, KERN_ERR
"ODEBUG: %s %s (active state %u) "
286 "object type: %s hint: %pS\n",
287 msg
, obj_states
[obj
->state
], obj
->astate
,
290 debug_objects_warnings
++;
294 * Try to repair the damage, so we have a better chance to get useful
298 debug_object_fixup(bool (*fixup
)(void *addr
, enum debug_obj_state state
),
299 void * addr
, enum debug_obj_state state
)
301 if (fixup
&& fixup(addr
, state
)) {
302 debug_objects_fixups
++;
308 static void debug_object_is_on_stack(void *addr
, int onstack
)
316 is_on_stack
= object_is_on_stack(addr
);
317 if (is_on_stack
== onstack
)
322 pr_warn("object is on stack, but not annotated\n");
324 pr_warn("object is not on stack, but annotated\n");
329 __debug_object_init(void *addr
, struct debug_obj_descr
*descr
, int onstack
)
331 enum debug_obj_state state
;
332 struct debug_bucket
*db
;
333 struct debug_obj
*obj
;
338 db
= get_bucket((unsigned long) addr
);
340 raw_spin_lock_irqsave(&db
->lock
, flags
);
342 obj
= lookup_object(addr
, db
);
344 obj
= alloc_object(addr
, db
, descr
);
346 debug_objects_enabled
= 0;
347 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
351 debug_object_is_on_stack(addr
, onstack
);
354 switch (obj
->state
) {
355 case ODEBUG_STATE_NONE
:
356 case ODEBUG_STATE_INIT
:
357 case ODEBUG_STATE_INACTIVE
:
358 obj
->state
= ODEBUG_STATE_INIT
;
361 case ODEBUG_STATE_ACTIVE
:
362 debug_print_object(obj
, "init");
364 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
365 debug_object_fixup(descr
->fixup_init
, addr
, state
);
368 case ODEBUG_STATE_DESTROYED
:
369 debug_print_object(obj
, "init");
375 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
379 * debug_object_init - debug checks when an object is initialized
380 * @addr: address of the object
381 * @descr: pointer to an object specific debug description structure
383 void debug_object_init(void *addr
, struct debug_obj_descr
*descr
)
385 if (!debug_objects_enabled
)
388 __debug_object_init(addr
, descr
, 0);
390 EXPORT_SYMBOL_GPL(debug_object_init
);
393 * debug_object_init_on_stack - debug checks when an object on stack is
395 * @addr: address of the object
396 * @descr: pointer to an object specific debug description structure
398 void debug_object_init_on_stack(void *addr
, struct debug_obj_descr
*descr
)
400 if (!debug_objects_enabled
)
403 __debug_object_init(addr
, descr
, 1);
405 EXPORT_SYMBOL_GPL(debug_object_init_on_stack
);
408 * debug_object_activate - debug checks when an object is activated
409 * @addr: address of the object
410 * @descr: pointer to an object specific debug description structure
411 * Returns 0 for success, -EINVAL for check failed.
413 int debug_object_activate(void *addr
, struct debug_obj_descr
*descr
)
415 enum debug_obj_state state
;
416 struct debug_bucket
*db
;
417 struct debug_obj
*obj
;
420 struct debug_obj o
= { .object
= addr
,
421 .state
= ODEBUG_STATE_NOTAVAILABLE
,
424 if (!debug_objects_enabled
)
427 db
= get_bucket((unsigned long) addr
);
429 raw_spin_lock_irqsave(&db
->lock
, flags
);
431 obj
= lookup_object(addr
, db
);
433 switch (obj
->state
) {
434 case ODEBUG_STATE_INIT
:
435 case ODEBUG_STATE_INACTIVE
:
436 obj
->state
= ODEBUG_STATE_ACTIVE
;
440 case ODEBUG_STATE_ACTIVE
:
441 debug_print_object(obj
, "activate");
443 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
444 ret
= debug_object_fixup(descr
->fixup_activate
, addr
, state
);
445 return ret
? 0 : -EINVAL
;
447 case ODEBUG_STATE_DESTROYED
:
448 debug_print_object(obj
, "activate");
455 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
459 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
461 * We are here when a static object is activated. We
462 * let the type specific code confirm whether this is
463 * true or not. if true, we just make sure that the
464 * static object is tracked in the object tracker. If
465 * not, this must be a bug, so we try to fix it up.
467 if (descr
->is_static_object
&& descr
->is_static_object(addr
)) {
468 /* track this static object */
469 debug_object_init(addr
, descr
);
470 debug_object_activate(addr
, descr
);
472 debug_print_object(&o
, "activate");
473 ret
= debug_object_fixup(descr
->fixup_activate
, addr
,
474 ODEBUG_STATE_NOTAVAILABLE
);
475 return ret
? 0 : -EINVAL
;
479 EXPORT_SYMBOL_GPL(debug_object_activate
);
482 * debug_object_deactivate - debug checks when an object is deactivated
483 * @addr: address of the object
484 * @descr: pointer to an object specific debug description structure
486 void debug_object_deactivate(void *addr
, struct debug_obj_descr
*descr
)
488 struct debug_bucket
*db
;
489 struct debug_obj
*obj
;
492 if (!debug_objects_enabled
)
495 db
= get_bucket((unsigned long) addr
);
497 raw_spin_lock_irqsave(&db
->lock
, flags
);
499 obj
= lookup_object(addr
, db
);
501 switch (obj
->state
) {
502 case ODEBUG_STATE_INIT
:
503 case ODEBUG_STATE_INACTIVE
:
504 case ODEBUG_STATE_ACTIVE
:
506 obj
->state
= ODEBUG_STATE_INACTIVE
;
508 debug_print_object(obj
, "deactivate");
511 case ODEBUG_STATE_DESTROYED
:
512 debug_print_object(obj
, "deactivate");
518 struct debug_obj o
= { .object
= addr
,
519 .state
= ODEBUG_STATE_NOTAVAILABLE
,
522 debug_print_object(&o
, "deactivate");
525 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
527 EXPORT_SYMBOL_GPL(debug_object_deactivate
);
530 * debug_object_destroy - debug checks when an object is destroyed
531 * @addr: address of the object
532 * @descr: pointer to an object specific debug description structure
534 void debug_object_destroy(void *addr
, struct debug_obj_descr
*descr
)
536 enum debug_obj_state state
;
537 struct debug_bucket
*db
;
538 struct debug_obj
*obj
;
541 if (!debug_objects_enabled
)
544 db
= get_bucket((unsigned long) addr
);
546 raw_spin_lock_irqsave(&db
->lock
, flags
);
548 obj
= lookup_object(addr
, db
);
552 switch (obj
->state
) {
553 case ODEBUG_STATE_NONE
:
554 case ODEBUG_STATE_INIT
:
555 case ODEBUG_STATE_INACTIVE
:
556 obj
->state
= ODEBUG_STATE_DESTROYED
;
558 case ODEBUG_STATE_ACTIVE
:
559 debug_print_object(obj
, "destroy");
561 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
562 debug_object_fixup(descr
->fixup_destroy
, addr
, state
);
565 case ODEBUG_STATE_DESTROYED
:
566 debug_print_object(obj
, "destroy");
572 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
574 EXPORT_SYMBOL_GPL(debug_object_destroy
);
577 * debug_object_free - debug checks when an object is freed
578 * @addr: address of the object
579 * @descr: pointer to an object specific debug description structure
581 void debug_object_free(void *addr
, struct debug_obj_descr
*descr
)
583 enum debug_obj_state state
;
584 struct debug_bucket
*db
;
585 struct debug_obj
*obj
;
588 if (!debug_objects_enabled
)
591 db
= get_bucket((unsigned long) addr
);
593 raw_spin_lock_irqsave(&db
->lock
, flags
);
595 obj
= lookup_object(addr
, db
);
599 switch (obj
->state
) {
600 case ODEBUG_STATE_ACTIVE
:
601 debug_print_object(obj
, "free");
603 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
604 debug_object_fixup(descr
->fixup_free
, addr
, state
);
607 hlist_del(&obj
->node
);
608 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
613 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
615 EXPORT_SYMBOL_GPL(debug_object_free
);
618 * debug_object_assert_init - debug checks when object should be init-ed
619 * @addr: address of the object
620 * @descr: pointer to an object specific debug description structure
622 void debug_object_assert_init(void *addr
, struct debug_obj_descr
*descr
)
624 struct debug_bucket
*db
;
625 struct debug_obj
*obj
;
628 if (!debug_objects_enabled
)
631 db
= get_bucket((unsigned long) addr
);
633 raw_spin_lock_irqsave(&db
->lock
, flags
);
635 obj
= lookup_object(addr
, db
);
637 struct debug_obj o
= { .object
= addr
,
638 .state
= ODEBUG_STATE_NOTAVAILABLE
,
641 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
643 * Maybe the object is static, and we let the type specific
644 * code confirm. Track this static object if true, else invoke
647 if (descr
->is_static_object
&& descr
->is_static_object(addr
)) {
648 /* Track this static object */
649 debug_object_init(addr
, descr
);
651 debug_print_object(&o
, "assert_init");
652 debug_object_fixup(descr
->fixup_assert_init
, addr
,
653 ODEBUG_STATE_NOTAVAILABLE
);
658 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
660 EXPORT_SYMBOL_GPL(debug_object_assert_init
);
663 * debug_object_active_state - debug checks object usage state machine
664 * @addr: address of the object
665 * @descr: pointer to an object specific debug description structure
666 * @expect: expected state
667 * @next: state to move to if expected state is found
670 debug_object_active_state(void *addr
, struct debug_obj_descr
*descr
,
671 unsigned int expect
, unsigned int next
)
673 struct debug_bucket
*db
;
674 struct debug_obj
*obj
;
677 if (!debug_objects_enabled
)
680 db
= get_bucket((unsigned long) addr
);
682 raw_spin_lock_irqsave(&db
->lock
, flags
);
684 obj
= lookup_object(addr
, db
);
686 switch (obj
->state
) {
687 case ODEBUG_STATE_ACTIVE
:
688 if (obj
->astate
== expect
)
691 debug_print_object(obj
, "active_state");
695 debug_print_object(obj
, "active_state");
699 struct debug_obj o
= { .object
= addr
,
700 .state
= ODEBUG_STATE_NOTAVAILABLE
,
703 debug_print_object(&o
, "active_state");
706 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
708 EXPORT_SYMBOL_GPL(debug_object_active_state
);
710 #ifdef CONFIG_DEBUG_OBJECTS_FREE
711 static void __debug_check_no_obj_freed(const void *address
, unsigned long size
)
713 unsigned long flags
, oaddr
, saddr
, eaddr
, paddr
, chunks
;
714 struct hlist_node
*tmp
;
715 HLIST_HEAD(freelist
);
716 struct debug_obj_descr
*descr
;
717 enum debug_obj_state state
;
718 struct debug_bucket
*db
;
719 struct debug_obj
*obj
;
722 saddr
= (unsigned long) address
;
723 eaddr
= saddr
+ size
;
724 paddr
= saddr
& ODEBUG_CHUNK_MASK
;
725 chunks
= ((eaddr
- paddr
) + (ODEBUG_CHUNK_SIZE
- 1));
726 chunks
>>= ODEBUG_CHUNK_SHIFT
;
728 for (;chunks
> 0; chunks
--, paddr
+= ODEBUG_CHUNK_SIZE
) {
729 db
= get_bucket(paddr
);
733 raw_spin_lock_irqsave(&db
->lock
, flags
);
734 hlist_for_each_entry_safe(obj
, tmp
, &db
->list
, node
) {
736 oaddr
= (unsigned long) obj
->object
;
737 if (oaddr
< saddr
|| oaddr
>= eaddr
)
740 switch (obj
->state
) {
741 case ODEBUG_STATE_ACTIVE
:
742 debug_print_object(obj
, "free");
745 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
746 debug_object_fixup(descr
->fixup_free
,
747 (void *) oaddr
, state
);
750 hlist_del(&obj
->node
);
751 hlist_add_head(&obj
->node
, &freelist
);
755 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
758 hlist_for_each_entry_safe(obj
, tmp
, &freelist
, node
) {
759 hlist_del(&obj
->node
);
763 if (cnt
> debug_objects_maxchain
)
764 debug_objects_maxchain
= cnt
;
768 void debug_check_no_obj_freed(const void *address
, unsigned long size
)
770 if (debug_objects_enabled
)
771 __debug_check_no_obj_freed(address
, size
);
775 #ifdef CONFIG_DEBUG_FS
777 static int debug_stats_show(struct seq_file
*m
, void *v
)
779 seq_printf(m
, "max_chain :%d\n", debug_objects_maxchain
);
780 seq_printf(m
, "warnings :%d\n", debug_objects_warnings
);
781 seq_printf(m
, "fixups :%d\n", debug_objects_fixups
);
782 seq_printf(m
, "pool_free :%d\n", obj_pool_free
);
783 seq_printf(m
, "pool_min_free :%d\n", obj_pool_min_free
);
784 seq_printf(m
, "pool_used :%d\n", obj_pool_used
);
785 seq_printf(m
, "pool_max_used :%d\n", obj_pool_max_used
);
786 seq_printf(m
, "objs_allocated:%d\n", debug_objects_allocated
);
787 seq_printf(m
, "objs_freed :%d\n", debug_objects_freed
);
791 static int debug_stats_open(struct inode
*inode
, struct file
*filp
)
793 return single_open(filp
, debug_stats_show
, NULL
);
796 static const struct file_operations debug_stats_fops
= {
797 .open
= debug_stats_open
,
800 .release
= single_release
,
803 static int __init
debug_objects_init_debugfs(void)
805 struct dentry
*dbgdir
, *dbgstats
;
807 if (!debug_objects_enabled
)
810 dbgdir
= debugfs_create_dir("debug_objects", NULL
);
814 dbgstats
= debugfs_create_file("stats", 0444, dbgdir
, NULL
,
822 debugfs_remove(dbgdir
);
826 __initcall(debug_objects_init_debugfs
);
829 static inline void debug_objects_init_debugfs(void) { }
832 #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
834 /* Random data structure for the self test */
836 unsigned long dummy1
[6];
838 unsigned long dummy2
[3];
841 static __initdata
struct debug_obj_descr descr_type_test
;
843 static bool __init
is_static_object(void *addr
)
845 struct self_test
*obj
= addr
;
847 return obj
->static_init
;
851 * fixup_init is called when:
852 * - an active object is initialized
854 static bool __init
fixup_init(void *addr
, enum debug_obj_state state
)
856 struct self_test
*obj
= addr
;
859 case ODEBUG_STATE_ACTIVE
:
860 debug_object_deactivate(obj
, &descr_type_test
);
861 debug_object_init(obj
, &descr_type_test
);
869 * fixup_activate is called when:
870 * - an active object is activated
871 * - an unknown non-static object is activated
873 static bool __init
fixup_activate(void *addr
, enum debug_obj_state state
)
875 struct self_test
*obj
= addr
;
878 case ODEBUG_STATE_NOTAVAILABLE
:
880 case ODEBUG_STATE_ACTIVE
:
881 debug_object_deactivate(obj
, &descr_type_test
);
882 debug_object_activate(obj
, &descr_type_test
);
891 * fixup_destroy is called when:
892 * - an active object is destroyed
894 static bool __init
fixup_destroy(void *addr
, enum debug_obj_state state
)
896 struct self_test
*obj
= addr
;
899 case ODEBUG_STATE_ACTIVE
:
900 debug_object_deactivate(obj
, &descr_type_test
);
901 debug_object_destroy(obj
, &descr_type_test
);
909 * fixup_free is called when:
910 * - an active object is freed
912 static bool __init
fixup_free(void *addr
, enum debug_obj_state state
)
914 struct self_test
*obj
= addr
;
917 case ODEBUG_STATE_ACTIVE
:
918 debug_object_deactivate(obj
, &descr_type_test
);
919 debug_object_free(obj
, &descr_type_test
);
927 check_results(void *addr
, enum debug_obj_state state
, int fixups
, int warnings
)
929 struct debug_bucket
*db
;
930 struct debug_obj
*obj
;
934 db
= get_bucket((unsigned long) addr
);
936 raw_spin_lock_irqsave(&db
->lock
, flags
);
938 obj
= lookup_object(addr
, db
);
939 if (!obj
&& state
!= ODEBUG_STATE_NONE
) {
940 WARN(1, KERN_ERR
"ODEBUG: selftest object not found\n");
943 if (obj
&& obj
->state
!= state
) {
944 WARN(1, KERN_ERR
"ODEBUG: selftest wrong state: %d != %d\n",
948 if (fixups
!= debug_objects_fixups
) {
949 WARN(1, KERN_ERR
"ODEBUG: selftest fixups failed %d != %d\n",
950 fixups
, debug_objects_fixups
);
953 if (warnings
!= debug_objects_warnings
) {
954 WARN(1, KERN_ERR
"ODEBUG: selftest warnings failed %d != %d\n",
955 warnings
, debug_objects_warnings
);
960 raw_spin_unlock_irqrestore(&db
->lock
, flags
);
962 debug_objects_enabled
= 0;
966 static __initdata
struct debug_obj_descr descr_type_test
= {
968 .is_static_object
= is_static_object
,
969 .fixup_init
= fixup_init
,
970 .fixup_activate
= fixup_activate
,
971 .fixup_destroy
= fixup_destroy
,
972 .fixup_free
= fixup_free
,
975 static __initdata
struct self_test obj
= { .static_init
= 0 };
977 static void __init
debug_objects_selftest(void)
979 int fixups
, oldfixups
, warnings
, oldwarnings
;
982 local_irq_save(flags
);
984 fixups
= oldfixups
= debug_objects_fixups
;
985 warnings
= oldwarnings
= debug_objects_warnings
;
986 descr_test
= &descr_type_test
;
988 debug_object_init(&obj
, &descr_type_test
);
989 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
991 debug_object_activate(&obj
, &descr_type_test
);
992 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
994 debug_object_activate(&obj
, &descr_type_test
);
995 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, ++fixups
, ++warnings
))
997 debug_object_deactivate(&obj
, &descr_type_test
);
998 if (check_results(&obj
, ODEBUG_STATE_INACTIVE
, fixups
, warnings
))
1000 debug_object_destroy(&obj
, &descr_type_test
);
1001 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, warnings
))
1003 debug_object_init(&obj
, &descr_type_test
);
1004 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1006 debug_object_activate(&obj
, &descr_type_test
);
1007 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1009 debug_object_deactivate(&obj
, &descr_type_test
);
1010 if (check_results(&obj
, ODEBUG_STATE_DESTROYED
, fixups
, ++warnings
))
1012 debug_object_free(&obj
, &descr_type_test
);
1013 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1016 obj
.static_init
= 1;
1017 debug_object_activate(&obj
, &descr_type_test
);
1018 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1020 debug_object_init(&obj
, &descr_type_test
);
1021 if (check_results(&obj
, ODEBUG_STATE_INIT
, ++fixups
, ++warnings
))
1023 debug_object_free(&obj
, &descr_type_test
);
1024 if (check_results(&obj
, ODEBUG_STATE_NONE
, fixups
, warnings
))
1027 #ifdef CONFIG_DEBUG_OBJECTS_FREE
1028 debug_object_init(&obj
, &descr_type_test
);
1029 if (check_results(&obj
, ODEBUG_STATE_INIT
, fixups
, warnings
))
1031 debug_object_activate(&obj
, &descr_type_test
);
1032 if (check_results(&obj
, ODEBUG_STATE_ACTIVE
, fixups
, warnings
))
1034 __debug_check_no_obj_freed(&obj
, sizeof(obj
));
1035 if (check_results(&obj
, ODEBUG_STATE_NONE
, ++fixups
, ++warnings
))
1038 pr_info("selftest passed\n");
1041 debug_objects_fixups
= oldfixups
;
1042 debug_objects_warnings
= oldwarnings
;
1045 local_irq_restore(flags
);
1048 static inline void debug_objects_selftest(void) { }
1052 * Called during early boot to initialize the hash buckets and link
1053 * the static object pool objects into the poll list. After this call
1054 * the object tracker is fully operational.
1056 void __init
debug_objects_early_init(void)
1060 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++)
1061 raw_spin_lock_init(&obj_hash
[i
].lock
);
1063 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
++)
1064 hlist_add_head(&obj_static_pool
[i
].node
, &obj_pool
);
1068 * Convert the statically allocated objects to dynamic ones:
1070 static int __init
debug_objects_replace_static_objects(void)
1072 struct debug_bucket
*db
= obj_hash
;
1073 struct hlist_node
*tmp
;
1074 struct debug_obj
*obj
, *new;
1075 HLIST_HEAD(objects
);
1078 for (i
= 0; i
< ODEBUG_POOL_SIZE
; i
++) {
1079 obj
= kmem_cache_zalloc(obj_cache
, GFP_KERNEL
);
1082 hlist_add_head(&obj
->node
, &objects
);
1086 * When debug_objects_mem_init() is called we know that only
1087 * one CPU is up, so disabling interrupts is enough
1088 * protection. This avoids the lockdep hell of lock ordering.
1090 local_irq_disable();
1092 /* Remove the statically allocated objects from the pool */
1093 hlist_for_each_entry_safe(obj
, tmp
, &obj_pool
, node
)
1094 hlist_del(&obj
->node
);
1095 /* Move the allocated objects to the pool */
1096 hlist_move_list(&objects
, &obj_pool
);
1098 /* Replace the active object references */
1099 for (i
= 0; i
< ODEBUG_HASH_SIZE
; i
++, db
++) {
1100 hlist_move_list(&db
->list
, &objects
);
1102 hlist_for_each_entry(obj
, &objects
, node
) {
1103 new = hlist_entry(obj_pool
.first
, typeof(*obj
), node
);
1104 hlist_del(&new->node
);
1105 /* copy object data */
1107 hlist_add_head(&new->node
, &db
->list
);
1113 pr_debug("%d of %d active objects replaced\n",
1114 cnt
, obj_pool_used
);
1117 hlist_for_each_entry_safe(obj
, tmp
, &objects
, node
) {
1118 hlist_del(&obj
->node
);
1119 kmem_cache_free(obj_cache
, obj
);
1125 * Called after the kmem_caches are functional to setup a dedicated
1126 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1127 * prevents that the debug code is called on kmem_cache_free() for the
1128 * debug tracker objects to avoid recursive calls.
1130 void __init
debug_objects_mem_init(void)
1132 if (!debug_objects_enabled
)
1135 obj_cache
= kmem_cache_create("debug_objects_cache",
1136 sizeof (struct debug_obj
), 0,
1137 SLAB_DEBUG_OBJECTS
, NULL
);
1139 if (!obj_cache
|| debug_objects_replace_static_objects()) {
1140 debug_objects_enabled
= 0;
1142 kmem_cache_destroy(obj_cache
);
1143 pr_warn("out of memory.\n");
1145 debug_objects_selftest();
1148 * Increase the thresholds for allocating and freeing objects
1149 * according to the number of possible CPUs available in the system.
1151 debug_objects_pool_size
+= num_possible_cpus() * 32;
1152 debug_objects_pool_min_level
+= num_possible_cpus() * 4;