]>
Commit | Line | Data |
---|---|---|
3ac7fe5a TG |
1 | /* |
2 | * Generic infrastructure for lifetime debugging of objects. | |
3 | * | |
4 | * Started by Thomas Gleixner | |
5 | * | |
6 | * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de> | |
7 | * | |
8 | * For licencing details see kernel-base/COPYING | |
9 | */ | |
719e4843 FF |
10 | |
11 | #define pr_fmt(fmt) "ODEBUG: " fmt | |
12 | ||
3ac7fe5a TG |
13 | #include <linux/debugobjects.h> |
14 | #include <linux/interrupt.h> | |
d43c36dc | 15 | #include <linux/sched.h> |
68db0cf1 | 16 | #include <linux/sched/task_stack.h> |
3ac7fe5a TG |
17 | #include <linux/seq_file.h> |
18 | #include <linux/debugfs.h> | |
5a0e3ad6 | 19 | #include <linux/slab.h> |
3ac7fe5a | 20 | #include <linux/hash.h> |
caba4cbb | 21 | #include <linux/kmemleak.h> |
88451f2c | 22 | #include <linux/cpu.h> |
3ac7fe5a TG |
23 | |
24 | #define ODEBUG_HASH_BITS 14 | |
25 | #define ODEBUG_HASH_SIZE (1 << ODEBUG_HASH_BITS) | |
26 | ||
0b6ec8c0 | 27 | #define ODEBUG_POOL_SIZE 1024 |
3ac7fe5a | 28 | #define ODEBUG_POOL_MIN_LEVEL 256 |
d86998b1 | 29 | #define ODEBUG_POOL_PERCPU_SIZE 64 |
634d61f4 | 30 | #define ODEBUG_BATCH_SIZE 16 |
3ac7fe5a TG |
31 | |
32 | #define ODEBUG_CHUNK_SHIFT PAGE_SHIFT | |
33 | #define ODEBUG_CHUNK_SIZE (1 << ODEBUG_CHUNK_SHIFT) | |
34 | #define ODEBUG_CHUNK_MASK (~(ODEBUG_CHUNK_SIZE - 1)) | |
35 | ||
a7344a68 WL |
36 | /* |
37 | * We limit the freeing of debug objects via workqueue at a maximum | |
38 | * frequency of 10Hz and about 1024 objects for each freeing operation. | |
39 | * So it is freeing at most 10k debug objects per second. | |
40 | */ | |
41 | #define ODEBUG_FREE_WORK_MAX 1024 | |
42 | #define ODEBUG_FREE_WORK_DELAY DIV_ROUND_UP(HZ, 10) | |
43 | ||
3ac7fe5a TG |
44 | struct debug_bucket { |
45 | struct hlist_head list; | |
aef9cb05 | 46 | raw_spinlock_t lock; |
3ac7fe5a TG |
47 | }; |
48 | ||
d86998b1 WL |
49 | /* |
50 | * Debug object percpu free list | |
51 | * Access is protected by disabling irq | |
52 | */ | |
53 | struct debug_percpu_free { | |
54 | struct hlist_head free_objs; | |
55 | int obj_free; | |
56 | }; | |
57 | ||
58 | static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool); | |
59 | ||
3ac7fe5a TG |
60 | static struct debug_bucket obj_hash[ODEBUG_HASH_SIZE]; |
61 | ||
1be1cb7b | 62 | static struct debug_obj obj_static_pool[ODEBUG_POOL_SIZE] __initdata; |
3ac7fe5a | 63 | |
aef9cb05 | 64 | static DEFINE_RAW_SPINLOCK(pool_lock); |
3ac7fe5a TG |
65 | |
66 | static HLIST_HEAD(obj_pool); | |
36c4ead6 | 67 | static HLIST_HEAD(obj_to_free); |
3ac7fe5a | 68 | |
d86998b1 WL |
69 | /* |
70 | * Because of the presence of percpu free pools, obj_pool_free will | |
71 | * under-count those in the percpu free pools. Similarly, obj_pool_used | |
72 | * will over-count those in the percpu free pools. Adjustments will be | |
73 | * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used | |
74 | * can be off. | |
75 | */ | |
3ac7fe5a TG |
76 | static int obj_pool_min_free = ODEBUG_POOL_SIZE; |
77 | static int obj_pool_free = ODEBUG_POOL_SIZE; | |
78 | static int obj_pool_used; | |
79 | static int obj_pool_max_used; | |
a7344a68 | 80 | static bool obj_freeing; |
36c4ead6 YS |
81 | /* The number of objs on the global free list */ |
82 | static int obj_nr_tofree; | |
3ac7fe5a TG |
83 | |
84 | static int debug_objects_maxchain __read_mostly; | |
163cf842 | 85 | static int __maybe_unused debug_objects_maxchecked __read_mostly; |
3ac7fe5a TG |
86 | static int debug_objects_fixups __read_mostly; |
87 | static int debug_objects_warnings __read_mostly; | |
3ae70205 IM |
88 | static int debug_objects_enabled __read_mostly |
89 | = CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT; | |
97dd552e WL |
90 | static int debug_objects_pool_size __read_mostly |
91 | = ODEBUG_POOL_SIZE; | |
92 | static int debug_objects_pool_min_level __read_mostly | |
93 | = ODEBUG_POOL_MIN_LEVEL; | |
aedcade6 | 94 | static const struct debug_obj_descr *descr_test __read_mostly; |
d86998b1 | 95 | static struct kmem_cache *obj_cache __read_mostly; |
3ac7fe5a | 96 | |
c4b73aab | 97 | /* |
0cad93c3 | 98 | * Track numbers of kmem_cache_alloc()/free() calls done. |
c4b73aab | 99 | */ |
0cad93c3 | 100 | static int debug_objects_allocated; |
c4b73aab WL |
101 | static int debug_objects_freed; |
102 | ||
337fff8b | 103 | static void free_obj_work(struct work_struct *work); |
a7344a68 | 104 | static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work); |
337fff8b | 105 | |
3ac7fe5a TG |
106 | static int __init enable_object_debug(char *str) |
107 | { | |
108 | debug_objects_enabled = 1; | |
109 | return 0; | |
110 | } | |
3e8ebb5c KM |
111 | |
112 | static int __init disable_object_debug(char *str) | |
113 | { | |
114 | debug_objects_enabled = 0; | |
115 | return 0; | |
116 | } | |
117 | ||
3ac7fe5a | 118 | early_param("debug_objects", enable_object_debug); |
3e8ebb5c | 119 | early_param("no_debug_objects", disable_object_debug); |
3ac7fe5a TG |
120 | |
121 | static const char *obj_states[ODEBUG_STATE_MAX] = { | |
122 | [ODEBUG_STATE_NONE] = "none", | |
123 | [ODEBUG_STATE_INIT] = "initialized", | |
124 | [ODEBUG_STATE_INACTIVE] = "inactive", | |
125 | [ODEBUG_STATE_ACTIVE] = "active", | |
126 | [ODEBUG_STATE_DESTROYED] = "destroyed", | |
127 | [ODEBUG_STATE_NOTAVAILABLE] = "not available", | |
128 | }; | |
129 | ||
1fda107d | 130 | static void fill_pool(void) |
3ac7fe5a TG |
131 | { |
132 | gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; | |
d26bf505 | 133 | struct debug_obj *obj; |
50db04dd | 134 | unsigned long flags; |
3ac7fe5a | 135 | |
35fd7a63 | 136 | if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level)) |
1fda107d | 137 | return; |
3ac7fe5a | 138 | |
36c4ead6 YS |
139 | /* |
140 | * Reuse objs from the global free list; they will be reinitialized | |
141 | * when allocating. | |
35fd7a63 ME |
142 | * |
143 | * Both obj_nr_tofree and obj_pool_free are checked locklessly; the | |
144 | * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical | |
145 | * sections. | |
36c4ead6 | 146 | */ |
35fd7a63 | 147 | while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) { |
36c4ead6 YS |
148 | raw_spin_lock_irqsave(&pool_lock, flags); |
149 | /* | |
150 | * Recheck with the lock held as the worker thread might have | |
151 | * won the race and freed the global free list already. | |
152 | */ | |
d26bf505 | 153 | while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) { |
36c4ead6 YS |
154 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); |
155 | hlist_del(&obj->node); | |
35fd7a63 | 156 | WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); |
36c4ead6 | 157 | hlist_add_head(&obj->node, &obj_pool); |
35fd7a63 | 158 | WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
36c4ead6 YS |
159 | } |
160 | raw_spin_unlock_irqrestore(&pool_lock, flags); | |
161 | } | |
162 | ||
3ac7fe5a | 163 | if (unlikely(!obj_cache)) |
1fda107d | 164 | return; |
3ac7fe5a | 165 | |
35fd7a63 | 166 | while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) { |
d26bf505 WL |
167 | struct debug_obj *new[ODEBUG_BATCH_SIZE]; |
168 | int cnt; | |
3ac7fe5a | 169 | |
d26bf505 WL |
170 | for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) { |
171 | new[cnt] = kmem_cache_zalloc(obj_cache, gfp); | |
172 | if (!new[cnt]) | |
173 | break; | |
174 | } | |
175 | if (!cnt) | |
3340808c | 176 | return; |
3ac7fe5a | 177 | |
aef9cb05 | 178 | raw_spin_lock_irqsave(&pool_lock, flags); |
d26bf505 WL |
179 | while (cnt) { |
180 | hlist_add_head(&new[--cnt]->node, &obj_pool); | |
181 | debug_objects_allocated++; | |
35fd7a63 | 182 | WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
d26bf505 | 183 | } |
aef9cb05 | 184 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
3ac7fe5a | 185 | } |
3ac7fe5a TG |
186 | } |
187 | ||
188 | /* | |
189 | * Lookup an object in the hash bucket. | |
190 | */ | |
191 | static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b) | |
192 | { | |
3ac7fe5a TG |
193 | struct debug_obj *obj; |
194 | int cnt = 0; | |
195 | ||
b67bfe0d | 196 | hlist_for_each_entry(obj, &b->list, node) { |
3ac7fe5a TG |
197 | cnt++; |
198 | if (obj->object == addr) | |
199 | return obj; | |
200 | } | |
201 | if (cnt > debug_objects_maxchain) | |
202 | debug_objects_maxchain = cnt; | |
203 | ||
204 | return NULL; | |
205 | } | |
206 | ||
d86998b1 WL |
207 | /* |
208 | * Allocate a new object from the hlist | |
209 | */ | |
210 | static struct debug_obj *__alloc_object(struct hlist_head *list) | |
211 | { | |
212 | struct debug_obj *obj = NULL; | |
213 | ||
214 | if (list->first) { | |
215 | obj = hlist_entry(list->first, typeof(*obj), node); | |
216 | hlist_del(&obj->node); | |
217 | } | |
218 | ||
219 | return obj; | |
220 | } | |
221 | ||
3ac7fe5a | 222 | /* |
50db04dd | 223 | * Allocate a new object. If the pool is empty, switch off the debugger. |
673d62cc | 224 | * Must be called with interrupts disabled. |
3ac7fe5a TG |
225 | */ |
226 | static struct debug_obj * | |
aedcade6 | 227 | alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr) |
3ac7fe5a | 228 | { |
634d61f4 | 229 | struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool); |
d86998b1 | 230 | struct debug_obj *obj; |
3ac7fe5a | 231 | |
d86998b1 | 232 | if (likely(obj_cache)) { |
d86998b1 WL |
233 | obj = __alloc_object(&percpu_pool->free_objs); |
234 | if (obj) { | |
235 | percpu_pool->obj_free--; | |
236 | goto init_obj; | |
237 | } | |
238 | } | |
3ac7fe5a | 239 | |
d86998b1 WL |
240 | raw_spin_lock(&pool_lock); |
241 | obj = __alloc_object(&obj_pool); | |
242 | if (obj) { | |
3ac7fe5a | 243 | obj_pool_used++; |
35fd7a63 | 244 | WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
634d61f4 WL |
245 | |
246 | /* | |
247 | * Looking ahead, allocate one batch of debug objects and | |
248 | * put them into the percpu free pool. | |
249 | */ | |
250 | if (likely(obj_cache)) { | |
251 | int i; | |
252 | ||
253 | for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { | |
254 | struct debug_obj *obj2; | |
255 | ||
256 | obj2 = __alloc_object(&obj_pool); | |
257 | if (!obj2) | |
258 | break; | |
259 | hlist_add_head(&obj2->node, | |
260 | &percpu_pool->free_objs); | |
261 | percpu_pool->obj_free++; | |
262 | obj_pool_used++; | |
35fd7a63 | 263 | WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
634d61f4 WL |
264 | } |
265 | } | |
266 | ||
3ac7fe5a TG |
267 | if (obj_pool_used > obj_pool_max_used) |
268 | obj_pool_max_used = obj_pool_used; | |
269 | ||
3ac7fe5a TG |
270 | if (obj_pool_free < obj_pool_min_free) |
271 | obj_pool_min_free = obj_pool_free; | |
272 | } | |
aef9cb05 | 273 | raw_spin_unlock(&pool_lock); |
3ac7fe5a | 274 | |
d86998b1 WL |
275 | init_obj: |
276 | if (obj) { | |
277 | obj->object = addr; | |
278 | obj->descr = descr; | |
279 | obj->state = ODEBUG_STATE_NONE; | |
280 | obj->astate = 0; | |
281 | hlist_add_head(&obj->node, &b->list); | |
282 | } | |
3ac7fe5a TG |
283 | return obj; |
284 | } | |
285 | ||
286 | /* | |
337fff8b | 287 | * workqueue function to free objects. |
858274b6 WL |
288 | * |
289 | * To reduce contention on the global pool_lock, the actual freeing of | |
636e1970 | 290 | * debug objects will be delayed if the pool_lock is busy. |
3ac7fe5a | 291 | */ |
337fff8b | 292 | static void free_obj_work(struct work_struct *work) |
3ac7fe5a | 293 | { |
36c4ead6 YS |
294 | struct hlist_node *tmp; |
295 | struct debug_obj *obj; | |
673d62cc | 296 | unsigned long flags; |
36c4ead6 | 297 | HLIST_HEAD(tofree); |
3ac7fe5a | 298 | |
a7344a68 | 299 | WRITE_ONCE(obj_freeing, false); |
858274b6 WL |
300 | if (!raw_spin_trylock_irqsave(&pool_lock, flags)) |
301 | return; | |
36c4ead6 | 302 | |
a7344a68 WL |
303 | if (obj_pool_free >= debug_objects_pool_size) |
304 | goto free_objs; | |
305 | ||
36c4ead6 YS |
306 | /* |
307 | * The objs on the pool list might be allocated before the work is | |
308 | * run, so recheck if pool list it full or not, if not fill pool | |
a7344a68 WL |
309 | * list from the global free list. As it is likely that a workload |
310 | * may be gearing up to use more and more objects, don't free any | |
311 | * of them until the next round. | |
36c4ead6 YS |
312 | */ |
313 | while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) { | |
314 | obj = hlist_entry(obj_to_free.first, typeof(*obj), node); | |
315 | hlist_del(&obj->node); | |
316 | hlist_add_head(&obj->node, &obj_pool); | |
35fd7a63 ME |
317 | WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
318 | WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1); | |
36c4ead6 | 319 | } |
a7344a68 WL |
320 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
321 | return; | |
36c4ead6 | 322 | |
a7344a68 | 323 | free_objs: |
36c4ead6 YS |
324 | /* |
325 | * Pool list is already full and there are still objs on the free | |
326 | * list. Move remaining free objs to a temporary list to free the | |
327 | * memory outside the pool_lock held region. | |
328 | */ | |
329 | if (obj_nr_tofree) { | |
330 | hlist_move_list(&obj_to_free, &tofree); | |
04148187 | 331 | debug_objects_freed += obj_nr_tofree; |
35fd7a63 | 332 | WRITE_ONCE(obj_nr_tofree, 0); |
36c4ead6 | 333 | } |
aef9cb05 | 334 | raw_spin_unlock_irqrestore(&pool_lock, flags); |
36c4ead6 YS |
335 | |
336 | hlist_for_each_entry_safe(obj, tmp, &tofree, node) { | |
337 | hlist_del(&obj->node); | |
338 | kmem_cache_free(obj_cache, obj); | |
339 | } | |
337fff8b TG |
340 | } |
341 | ||
a7344a68 | 342 | static void __free_object(struct debug_obj *obj) |
337fff8b | 343 | { |
634d61f4 WL |
344 | struct debug_obj *objs[ODEBUG_BATCH_SIZE]; |
345 | struct debug_percpu_free *percpu_pool; | |
346 | int lookahead_count = 0; | |
337fff8b | 347 | unsigned long flags; |
636e1970 | 348 | bool work; |
337fff8b | 349 | |
d86998b1 | 350 | local_irq_save(flags); |
634d61f4 WL |
351 | if (!obj_cache) |
352 | goto free_to_obj_pool; | |
353 | ||
d86998b1 WL |
354 | /* |
355 | * Try to free it into the percpu pool first. | |
356 | */ | |
357 | percpu_pool = this_cpu_ptr(&percpu_obj_pool); | |
634d61f4 | 358 | if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) { |
d86998b1 WL |
359 | hlist_add_head(&obj->node, &percpu_pool->free_objs); |
360 | percpu_pool->obj_free++; | |
361 | local_irq_restore(flags); | |
a7344a68 | 362 | return; |
d86998b1 WL |
363 | } |
364 | ||
634d61f4 WL |
365 | /* |
366 | * As the percpu pool is full, look ahead and pull out a batch | |
367 | * of objects from the percpu pool and free them as well. | |
368 | */ | |
369 | for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) { | |
370 | objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs); | |
371 | if (!objs[lookahead_count]) | |
372 | break; | |
373 | percpu_pool->obj_free--; | |
374 | } | |
375 | ||
376 | free_to_obj_pool: | |
d86998b1 | 377 | raw_spin_lock(&pool_lock); |
a7344a68 WL |
378 | work = (obj_pool_free > debug_objects_pool_size) && obj_cache && |
379 | (obj_nr_tofree < ODEBUG_FREE_WORK_MAX); | |
337fff8b | 380 | obj_pool_used--; |
636e1970 YS |
381 | |
382 | if (work) { | |
35fd7a63 | 383 | WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); |
636e1970 | 384 | hlist_add_head(&obj->node, &obj_to_free); |
634d61f4 | 385 | if (lookahead_count) { |
35fd7a63 | 386 | WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count); |
634d61f4 WL |
387 | obj_pool_used -= lookahead_count; |
388 | while (lookahead_count) { | |
389 | hlist_add_head(&objs[--lookahead_count]->node, | |
390 | &obj_to_free); | |
391 | } | |
392 | } | |
a7344a68 WL |
393 | |
394 | if ((obj_pool_free > debug_objects_pool_size) && | |
395 | (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) { | |
396 | int i; | |
397 | ||
398 | /* | |
399 | * Free one more batch of objects from obj_pool. | |
400 | */ | |
401 | for (i = 0; i < ODEBUG_BATCH_SIZE; i++) { | |
402 | obj = __alloc_object(&obj_pool); | |
403 | hlist_add_head(&obj->node, &obj_to_free); | |
35fd7a63 ME |
404 | WRITE_ONCE(obj_pool_free, obj_pool_free - 1); |
405 | WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1); | |
a7344a68 WL |
406 | } |
407 | } | |
636e1970 | 408 | } else { |
35fd7a63 | 409 | WRITE_ONCE(obj_pool_free, obj_pool_free + 1); |
636e1970 | 410 | hlist_add_head(&obj->node, &obj_pool); |
634d61f4 | 411 | if (lookahead_count) { |
35fd7a63 | 412 | WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count); |
634d61f4 WL |
413 | obj_pool_used -= lookahead_count; |
414 | while (lookahead_count) { | |
415 | hlist_add_head(&objs[--lookahead_count]->node, | |
416 | &obj_pool); | |
417 | } | |
418 | } | |
636e1970 | 419 | } |
d86998b1 WL |
420 | raw_spin_unlock(&pool_lock); |
421 | local_irq_restore(flags); | |
636e1970 YS |
422 | } |
423 | ||
424 | /* | |
425 | * Put the object back into the pool and schedule work to free objects | |
426 | * if necessary. | |
427 | */ | |
428 | static void free_object(struct debug_obj *obj) | |
429 | { | |
a7344a68 | 430 | __free_object(obj); |
35fd7a63 | 431 | if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
a7344a68 WL |
432 | WRITE_ONCE(obj_freeing, true); |
433 | schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); | |
434 | } | |
3ac7fe5a TG |
435 | } |
436 | ||
88451f2c Z |
437 | #ifdef CONFIG_HOTPLUG_CPU |
438 | static int object_cpu_offline(unsigned int cpu) | |
439 | { | |
440 | struct debug_percpu_free *percpu_pool; | |
441 | struct hlist_node *tmp; | |
442 | struct debug_obj *obj; | |
443 | ||
444 | /* Remote access is safe as the CPU is dead already */ | |
445 | percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu); | |
446 | hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) { | |
447 | hlist_del(&obj->node); | |
448 | kmem_cache_free(obj_cache, obj); | |
449 | } | |
450 | percpu_pool->obj_free = 0; | |
451 | ||
452 | return 0; | |
453 | } | |
454 | #endif | |
455 | ||
3ac7fe5a TG |
456 | /* |
457 | * We run out of memory. That means we probably have tons of objects | |
458 | * allocated. | |
459 | */ | |
460 | static void debug_objects_oom(void) | |
461 | { | |
462 | struct debug_bucket *db = obj_hash; | |
b67bfe0d | 463 | struct hlist_node *tmp; |
673d62cc | 464 | HLIST_HEAD(freelist); |
3ac7fe5a TG |
465 | struct debug_obj *obj; |
466 | unsigned long flags; | |
467 | int i; | |
468 | ||
719e4843 | 469 | pr_warn("Out of memory. ODEBUG disabled\n"); |
3ac7fe5a TG |
470 | |
471 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | |
aef9cb05 | 472 | raw_spin_lock_irqsave(&db->lock, flags); |
673d62cc | 473 | hlist_move_list(&db->list, &freelist); |
aef9cb05 | 474 | raw_spin_unlock_irqrestore(&db->lock, flags); |
673d62cc VN |
475 | |
476 | /* Now free them */ | |
b67bfe0d | 477 | hlist_for_each_entry_safe(obj, tmp, &freelist, node) { |
3ac7fe5a TG |
478 | hlist_del(&obj->node); |
479 | free_object(obj); | |
480 | } | |
3ac7fe5a TG |
481 | } |
482 | } | |
483 | ||
484 | /* | |
485 | * We use the pfn of the address for the hash. That way we can check | |
486 | * for freed objects simply by checking the affected bucket. | |
487 | */ | |
488 | static struct debug_bucket *get_bucket(unsigned long addr) | |
489 | { | |
490 | unsigned long hash; | |
491 | ||
492 | hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS); | |
493 | return &obj_hash[hash]; | |
494 | } | |
495 | ||
496 | static void debug_print_object(struct debug_obj *obj, char *msg) | |
497 | { | |
aedcade6 | 498 | const struct debug_obj_descr *descr = obj->descr; |
3ac7fe5a TG |
499 | static int limit; |
500 | ||
99777288 SG |
501 | if (limit < 5 && descr != descr_test) { |
502 | void *hint = descr->debug_hint ? | |
503 | descr->debug_hint(obj->object) : NULL; | |
3ac7fe5a | 504 | limit++; |
a5d8e467 | 505 | WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) " |
99777288 | 506 | "object type: %s hint: %pS\n", |
a5d8e467 | 507 | msg, obj_states[obj->state], obj->astate, |
99777288 | 508 | descr->name, hint); |
3ac7fe5a TG |
509 | } |
510 | debug_objects_warnings++; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Try to repair the damage, so we have a better chance to get useful | |
515 | * debug output. | |
516 | */ | |
b1e4d9d8 CD |
517 | static bool |
518 | debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state), | |
3ac7fe5a TG |
519 | void * addr, enum debug_obj_state state) |
520 | { | |
b1e4d9d8 CD |
521 | if (fixup && fixup(addr, state)) { |
522 | debug_objects_fixups++; | |
523 | return true; | |
524 | } | |
525 | return false; | |
3ac7fe5a TG |
526 | } |
527 | ||
528 | static void debug_object_is_on_stack(void *addr, int onstack) | |
529 | { | |
3ac7fe5a TG |
530 | int is_on_stack; |
531 | static int limit; | |
532 | ||
533 | if (limit > 4) | |
534 | return; | |
535 | ||
8b05c7e6 | 536 | is_on_stack = object_is_on_stack(addr); |
3ac7fe5a TG |
537 | if (is_on_stack == onstack) |
538 | return; | |
539 | ||
540 | limit++; | |
541 | if (is_on_stack) | |
fc91a3c4 JFG |
542 | pr_warn("object %p is on stack %p, but NOT annotated.\n", addr, |
543 | task_stack_page(current)); | |
3ac7fe5a | 544 | else |
fc91a3c4 JFG |
545 | pr_warn("object %p is NOT on stack %p, but annotated.\n", addr, |
546 | task_stack_page(current)); | |
547 | ||
3ac7fe5a TG |
548 | WARN_ON(1); |
549 | } | |
550 | ||
551 | static void | |
aedcade6 | 552 | __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack) |
3ac7fe5a TG |
553 | { |
554 | enum debug_obj_state state; | |
d5f34153 | 555 | bool check_stack = false; |
3ac7fe5a TG |
556 | struct debug_bucket *db; |
557 | struct debug_obj *obj; | |
558 | unsigned long flags; | |
559 | ||
4bedcc28 TG |
560 | /* |
561 | * On RT enabled kernels the pool refill must happen in preemptible | |
562 | * context: | |
563 | */ | |
564 | if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) | |
565 | fill_pool(); | |
50db04dd | 566 | |
3ac7fe5a TG |
567 | db = get_bucket((unsigned long) addr); |
568 | ||
aef9cb05 | 569 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
570 | |
571 | obj = lookup_object(addr, db); | |
572 | if (!obj) { | |
573 | obj = alloc_object(addr, db, descr); | |
574 | if (!obj) { | |
575 | debug_objects_enabled = 0; | |
aef9cb05 | 576 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
577 | debug_objects_oom(); |
578 | return; | |
579 | } | |
d5f34153 | 580 | check_stack = true; |
3ac7fe5a TG |
581 | } |
582 | ||
583 | switch (obj->state) { | |
584 | case ODEBUG_STATE_NONE: | |
585 | case ODEBUG_STATE_INIT: | |
586 | case ODEBUG_STATE_INACTIVE: | |
587 | obj->state = ODEBUG_STATE_INIT; | |
588 | break; | |
589 | ||
590 | case ODEBUG_STATE_ACTIVE: | |
3ac7fe5a | 591 | state = obj->state; |
aef9cb05 | 592 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 593 | debug_print_object(obj, "init"); |
3ac7fe5a TG |
594 | debug_object_fixup(descr->fixup_init, addr, state); |
595 | return; | |
596 | ||
597 | case ODEBUG_STATE_DESTROYED: | |
d5f34153 | 598 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a | 599 | debug_print_object(obj, "init"); |
d5f34153 | 600 | return; |
3ac7fe5a TG |
601 | default: |
602 | break; | |
603 | } | |
604 | ||
aef9cb05 | 605 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 WL |
606 | if (check_stack) |
607 | debug_object_is_on_stack(addr, onstack); | |
3ac7fe5a TG |
608 | } |
609 | ||
610 | /** | |
611 | * debug_object_init - debug checks when an object is initialized | |
612 | * @addr: address of the object | |
613 | * @descr: pointer to an object specific debug description structure | |
614 | */ | |
aedcade6 | 615 | void debug_object_init(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
616 | { |
617 | if (!debug_objects_enabled) | |
618 | return; | |
619 | ||
620 | __debug_object_init(addr, descr, 0); | |
621 | } | |
f8ff04e2 | 622 | EXPORT_SYMBOL_GPL(debug_object_init); |
3ac7fe5a TG |
623 | |
624 | /** | |
625 | * debug_object_init_on_stack - debug checks when an object on stack is | |
626 | * initialized | |
627 | * @addr: address of the object | |
628 | * @descr: pointer to an object specific debug description structure | |
629 | */ | |
aedcade6 | 630 | void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
631 | { |
632 | if (!debug_objects_enabled) | |
633 | return; | |
634 | ||
635 | __debug_object_init(addr, descr, 1); | |
636 | } | |
f8ff04e2 | 637 | EXPORT_SYMBOL_GPL(debug_object_init_on_stack); |
3ac7fe5a TG |
638 | |
639 | /** | |
640 | * debug_object_activate - debug checks when an object is activated | |
641 | * @addr: address of the object | |
642 | * @descr: pointer to an object specific debug description structure | |
b778ae25 | 643 | * Returns 0 for success, -EINVAL for check failed. |
3ac7fe5a | 644 | */ |
aedcade6 | 645 | int debug_object_activate(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
646 | { |
647 | enum debug_obj_state state; | |
648 | struct debug_bucket *db; | |
649 | struct debug_obj *obj; | |
650 | unsigned long flags; | |
b778ae25 | 651 | int ret; |
feac18dd SB |
652 | struct debug_obj o = { .object = addr, |
653 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
654 | .descr = descr }; | |
3ac7fe5a TG |
655 | |
656 | if (!debug_objects_enabled) | |
b778ae25 | 657 | return 0; |
3ac7fe5a TG |
658 | |
659 | db = get_bucket((unsigned long) addr); | |
660 | ||
aef9cb05 | 661 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
662 | |
663 | obj = lookup_object(addr, db); | |
664 | if (obj) { | |
d5f34153 WL |
665 | bool print_object = false; |
666 | ||
3ac7fe5a TG |
667 | switch (obj->state) { |
668 | case ODEBUG_STATE_INIT: | |
669 | case ODEBUG_STATE_INACTIVE: | |
670 | obj->state = ODEBUG_STATE_ACTIVE; | |
b778ae25 | 671 | ret = 0; |
3ac7fe5a TG |
672 | break; |
673 | ||
674 | case ODEBUG_STATE_ACTIVE: | |
3ac7fe5a | 675 | state = obj->state; |
aef9cb05 | 676 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 677 | debug_print_object(obj, "activate"); |
b778ae25 | 678 | ret = debug_object_fixup(descr->fixup_activate, addr, state); |
e7a8e78b | 679 | return ret ? 0 : -EINVAL; |
3ac7fe5a TG |
680 | |
681 | case ODEBUG_STATE_DESTROYED: | |
d5f34153 | 682 | print_object = true; |
b778ae25 | 683 | ret = -EINVAL; |
3ac7fe5a TG |
684 | break; |
685 | default: | |
b778ae25 | 686 | ret = 0; |
3ac7fe5a TG |
687 | break; |
688 | } | |
aef9cb05 | 689 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 WL |
690 | if (print_object) |
691 | debug_print_object(obj, "activate"); | |
b778ae25 | 692 | return ret; |
3ac7fe5a TG |
693 | } |
694 | ||
aef9cb05 | 695 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 696 | |
3ac7fe5a | 697 | /* |
b9fdac7f CD |
698 | * We are here when a static object is activated. We |
699 | * let the type specific code confirm whether this is | |
700 | * true or not. if true, we just make sure that the | |
701 | * static object is tracked in the object tracker. If | |
702 | * not, this must be a bug, so we try to fix it up. | |
3ac7fe5a | 703 | */ |
b9fdac7f CD |
704 | if (descr->is_static_object && descr->is_static_object(addr)) { |
705 | /* track this static object */ | |
706 | debug_object_init(addr, descr); | |
707 | debug_object_activate(addr, descr); | |
708 | } else { | |
feac18dd | 709 | debug_print_object(&o, "activate"); |
b9fdac7f CD |
710 | ret = debug_object_fixup(descr->fixup_activate, addr, |
711 | ODEBUG_STATE_NOTAVAILABLE); | |
712 | return ret ? 0 : -EINVAL; | |
b778ae25 PM |
713 | } |
714 | return 0; | |
3ac7fe5a | 715 | } |
f8ff04e2 | 716 | EXPORT_SYMBOL_GPL(debug_object_activate); |
3ac7fe5a TG |
717 | |
718 | /** | |
719 | * debug_object_deactivate - debug checks when an object is deactivated | |
720 | * @addr: address of the object | |
721 | * @descr: pointer to an object specific debug description structure | |
722 | */ | |
aedcade6 | 723 | void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
724 | { |
725 | struct debug_bucket *db; | |
726 | struct debug_obj *obj; | |
727 | unsigned long flags; | |
d5f34153 | 728 | bool print_object = false; |
3ac7fe5a TG |
729 | |
730 | if (!debug_objects_enabled) | |
731 | return; | |
732 | ||
733 | db = get_bucket((unsigned long) addr); | |
734 | ||
aef9cb05 | 735 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
736 | |
737 | obj = lookup_object(addr, db); | |
738 | if (obj) { | |
739 | switch (obj->state) { | |
740 | case ODEBUG_STATE_INIT: | |
741 | case ODEBUG_STATE_INACTIVE: | |
742 | case ODEBUG_STATE_ACTIVE: | |
a5d8e467 MD |
743 | if (!obj->astate) |
744 | obj->state = ODEBUG_STATE_INACTIVE; | |
745 | else | |
d5f34153 | 746 | print_object = true; |
3ac7fe5a TG |
747 | break; |
748 | ||
749 | case ODEBUG_STATE_DESTROYED: | |
d5f34153 | 750 | print_object = true; |
3ac7fe5a TG |
751 | break; |
752 | default: | |
753 | break; | |
754 | } | |
d5f34153 WL |
755 | } |
756 | ||
757 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
758 | if (!obj) { | |
3ac7fe5a TG |
759 | struct debug_obj o = { .object = addr, |
760 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
761 | .descr = descr }; | |
762 | ||
763 | debug_print_object(&o, "deactivate"); | |
d5f34153 WL |
764 | } else if (print_object) { |
765 | debug_print_object(obj, "deactivate"); | |
3ac7fe5a | 766 | } |
3ac7fe5a | 767 | } |
f8ff04e2 | 768 | EXPORT_SYMBOL_GPL(debug_object_deactivate); |
3ac7fe5a TG |
769 | |
770 | /** | |
771 | * debug_object_destroy - debug checks when an object is destroyed | |
772 | * @addr: address of the object | |
773 | * @descr: pointer to an object specific debug description structure | |
774 | */ | |
aedcade6 | 775 | void debug_object_destroy(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
776 | { |
777 | enum debug_obj_state state; | |
778 | struct debug_bucket *db; | |
779 | struct debug_obj *obj; | |
780 | unsigned long flags; | |
d5f34153 | 781 | bool print_object = false; |
3ac7fe5a TG |
782 | |
783 | if (!debug_objects_enabled) | |
784 | return; | |
785 | ||
786 | db = get_bucket((unsigned long) addr); | |
787 | ||
aef9cb05 | 788 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
789 | |
790 | obj = lookup_object(addr, db); | |
791 | if (!obj) | |
792 | goto out_unlock; | |
793 | ||
794 | switch (obj->state) { | |
795 | case ODEBUG_STATE_NONE: | |
796 | case ODEBUG_STATE_INIT: | |
797 | case ODEBUG_STATE_INACTIVE: | |
798 | obj->state = ODEBUG_STATE_DESTROYED; | |
799 | break; | |
800 | case ODEBUG_STATE_ACTIVE: | |
3ac7fe5a | 801 | state = obj->state; |
aef9cb05 | 802 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 803 | debug_print_object(obj, "destroy"); |
3ac7fe5a TG |
804 | debug_object_fixup(descr->fixup_destroy, addr, state); |
805 | return; | |
806 | ||
807 | case ODEBUG_STATE_DESTROYED: | |
d5f34153 | 808 | print_object = true; |
3ac7fe5a TG |
809 | break; |
810 | default: | |
811 | break; | |
812 | } | |
813 | out_unlock: | |
aef9cb05 | 814 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 WL |
815 | if (print_object) |
816 | debug_print_object(obj, "destroy"); | |
3ac7fe5a | 817 | } |
f8ff04e2 | 818 | EXPORT_SYMBOL_GPL(debug_object_destroy); |
3ac7fe5a TG |
819 | |
820 | /** | |
821 | * debug_object_free - debug checks when an object is freed | |
822 | * @addr: address of the object | |
823 | * @descr: pointer to an object specific debug description structure | |
824 | */ | |
aedcade6 | 825 | void debug_object_free(void *addr, const struct debug_obj_descr *descr) |
3ac7fe5a TG |
826 | { |
827 | enum debug_obj_state state; | |
828 | struct debug_bucket *db; | |
829 | struct debug_obj *obj; | |
830 | unsigned long flags; | |
831 | ||
832 | if (!debug_objects_enabled) | |
833 | return; | |
834 | ||
835 | db = get_bucket((unsigned long) addr); | |
836 | ||
aef9cb05 | 837 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
838 | |
839 | obj = lookup_object(addr, db); | |
840 | if (!obj) | |
841 | goto out_unlock; | |
842 | ||
843 | switch (obj->state) { | |
844 | case ODEBUG_STATE_ACTIVE: | |
3ac7fe5a | 845 | state = obj->state; |
aef9cb05 | 846 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 847 | debug_print_object(obj, "free"); |
3ac7fe5a TG |
848 | debug_object_fixup(descr->fixup_free, addr, state); |
849 | return; | |
850 | default: | |
851 | hlist_del(&obj->node); | |
aef9cb05 | 852 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a | 853 | free_object(obj); |
673d62cc | 854 | return; |
3ac7fe5a TG |
855 | } |
856 | out_unlock: | |
aef9cb05 | 857 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a | 858 | } |
f8ff04e2 | 859 | EXPORT_SYMBOL_GPL(debug_object_free); |
3ac7fe5a | 860 | |
b84d435c CC |
861 | /** |
862 | * debug_object_assert_init - debug checks when object should be init-ed | |
863 | * @addr: address of the object | |
864 | * @descr: pointer to an object specific debug description structure | |
865 | */ | |
aedcade6 | 866 | void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr) |
b84d435c CC |
867 | { |
868 | struct debug_bucket *db; | |
869 | struct debug_obj *obj; | |
870 | unsigned long flags; | |
871 | ||
872 | if (!debug_objects_enabled) | |
873 | return; | |
874 | ||
875 | db = get_bucket((unsigned long) addr); | |
876 | ||
877 | raw_spin_lock_irqsave(&db->lock, flags); | |
878 | ||
879 | obj = lookup_object(addr, db); | |
880 | if (!obj) { | |
881 | struct debug_obj o = { .object = addr, | |
882 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
883 | .descr = descr }; | |
884 | ||
885 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
886 | /* | |
b9fdac7f CD |
887 | * Maybe the object is static, and we let the type specific |
888 | * code confirm. Track this static object if true, else invoke | |
889 | * fixup. | |
b84d435c | 890 | */ |
b9fdac7f CD |
891 | if (descr->is_static_object && descr->is_static_object(addr)) { |
892 | /* Track this static object */ | |
893 | debug_object_init(addr, descr); | |
894 | } else { | |
b84d435c | 895 | debug_print_object(&o, "assert_init"); |
b9fdac7f CD |
896 | debug_object_fixup(descr->fixup_assert_init, addr, |
897 | ODEBUG_STATE_NOTAVAILABLE); | |
898 | } | |
b84d435c CC |
899 | return; |
900 | } | |
901 | ||
902 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
903 | } | |
f8ff04e2 | 904 | EXPORT_SYMBOL_GPL(debug_object_assert_init); |
b84d435c | 905 | |
a5d8e467 MD |
906 | /** |
907 | * debug_object_active_state - debug checks object usage state machine | |
908 | * @addr: address of the object | |
909 | * @descr: pointer to an object specific debug description structure | |
910 | * @expect: expected state | |
911 | * @next: state to move to if expected state is found | |
912 | */ | |
913 | void | |
aedcade6 | 914 | debug_object_active_state(void *addr, const struct debug_obj_descr *descr, |
a5d8e467 MD |
915 | unsigned int expect, unsigned int next) |
916 | { | |
917 | struct debug_bucket *db; | |
918 | struct debug_obj *obj; | |
919 | unsigned long flags; | |
d5f34153 | 920 | bool print_object = false; |
a5d8e467 MD |
921 | |
922 | if (!debug_objects_enabled) | |
923 | return; | |
924 | ||
925 | db = get_bucket((unsigned long) addr); | |
926 | ||
927 | raw_spin_lock_irqsave(&db->lock, flags); | |
928 | ||
929 | obj = lookup_object(addr, db); | |
930 | if (obj) { | |
931 | switch (obj->state) { | |
932 | case ODEBUG_STATE_ACTIVE: | |
933 | if (obj->astate == expect) | |
934 | obj->astate = next; | |
935 | else | |
d5f34153 | 936 | print_object = true; |
a5d8e467 MD |
937 | break; |
938 | ||
939 | default: | |
d5f34153 | 940 | print_object = true; |
a5d8e467 MD |
941 | break; |
942 | } | |
d5f34153 WL |
943 | } |
944 | ||
945 | raw_spin_unlock_irqrestore(&db->lock, flags); | |
946 | if (!obj) { | |
a5d8e467 MD |
947 | struct debug_obj o = { .object = addr, |
948 | .state = ODEBUG_STATE_NOTAVAILABLE, | |
949 | .descr = descr }; | |
950 | ||
951 | debug_print_object(&o, "active_state"); | |
d5f34153 WL |
952 | } else if (print_object) { |
953 | debug_print_object(obj, "active_state"); | |
a5d8e467 | 954 | } |
a5d8e467 | 955 | } |
f8ff04e2 | 956 | EXPORT_SYMBOL_GPL(debug_object_active_state); |
a5d8e467 | 957 | |
3ac7fe5a TG |
958 | #ifdef CONFIG_DEBUG_OBJECTS_FREE |
959 | static void __debug_check_no_obj_freed(const void *address, unsigned long size) | |
960 | { | |
961 | unsigned long flags, oaddr, saddr, eaddr, paddr, chunks; | |
aedcade6 | 962 | const struct debug_obj_descr *descr; |
3ac7fe5a TG |
963 | enum debug_obj_state state; |
964 | struct debug_bucket *db; | |
1ea9b98b | 965 | struct hlist_node *tmp; |
3ac7fe5a | 966 | struct debug_obj *obj; |
bd9dcd04 | 967 | int cnt, objs_checked = 0; |
3ac7fe5a TG |
968 | |
969 | saddr = (unsigned long) address; | |
970 | eaddr = saddr + size; | |
971 | paddr = saddr & ODEBUG_CHUNK_MASK; | |
972 | chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1)); | |
973 | chunks >>= ODEBUG_CHUNK_SHIFT; | |
974 | ||
975 | for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) { | |
976 | db = get_bucket(paddr); | |
977 | ||
978 | repeat: | |
979 | cnt = 0; | |
aef9cb05 | 980 | raw_spin_lock_irqsave(&db->lock, flags); |
b67bfe0d | 981 | hlist_for_each_entry_safe(obj, tmp, &db->list, node) { |
3ac7fe5a TG |
982 | cnt++; |
983 | oaddr = (unsigned long) obj->object; | |
984 | if (oaddr < saddr || oaddr >= eaddr) | |
985 | continue; | |
986 | ||
987 | switch (obj->state) { | |
988 | case ODEBUG_STATE_ACTIVE: | |
3ac7fe5a TG |
989 | descr = obj->descr; |
990 | state = obj->state; | |
aef9cb05 | 991 | raw_spin_unlock_irqrestore(&db->lock, flags); |
d5f34153 | 992 | debug_print_object(obj, "free"); |
3ac7fe5a TG |
993 | debug_object_fixup(descr->fixup_free, |
994 | (void *) oaddr, state); | |
995 | goto repeat; | |
996 | default: | |
997 | hlist_del(&obj->node); | |
a7344a68 | 998 | __free_object(obj); |
3ac7fe5a TG |
999 | break; |
1000 | } | |
1001 | } | |
aef9cb05 | 1002 | raw_spin_unlock_irqrestore(&db->lock, flags); |
673d62cc | 1003 | |
3ac7fe5a TG |
1004 | if (cnt > debug_objects_maxchain) |
1005 | debug_objects_maxchain = cnt; | |
bd9dcd04 YS |
1006 | |
1007 | objs_checked += cnt; | |
3ac7fe5a | 1008 | } |
bd9dcd04 YS |
1009 | |
1010 | if (objs_checked > debug_objects_maxchecked) | |
1011 | debug_objects_maxchecked = objs_checked; | |
1ea9b98b YS |
1012 | |
1013 | /* Schedule work to actually kmem_cache_free() objects */ | |
35fd7a63 | 1014 | if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) { |
a7344a68 WL |
1015 | WRITE_ONCE(obj_freeing, true); |
1016 | schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY); | |
1017 | } | |
3ac7fe5a TG |
1018 | } |
1019 | ||
1020 | void debug_check_no_obj_freed(const void *address, unsigned long size) | |
1021 | { | |
1022 | if (debug_objects_enabled) | |
1023 | __debug_check_no_obj_freed(address, size); | |
1024 | } | |
1025 | #endif | |
1026 | ||
1027 | #ifdef CONFIG_DEBUG_FS | |
1028 | ||
1029 | static int debug_stats_show(struct seq_file *m, void *v) | |
1030 | { | |
d86998b1 WL |
1031 | int cpu, obj_percpu_free = 0; |
1032 | ||
1033 | for_each_possible_cpu(cpu) | |
1034 | obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu); | |
1035 | ||
3ac7fe5a | 1036 | seq_printf(m, "max_chain :%d\n", debug_objects_maxchain); |
bd9dcd04 | 1037 | seq_printf(m, "max_checked :%d\n", debug_objects_maxchecked); |
3ac7fe5a TG |
1038 | seq_printf(m, "warnings :%d\n", debug_objects_warnings); |
1039 | seq_printf(m, "fixups :%d\n", debug_objects_fixups); | |
35fd7a63 | 1040 | seq_printf(m, "pool_free :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free); |
d86998b1 | 1041 | seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free); |
3ac7fe5a | 1042 | seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free); |
d86998b1 | 1043 | seq_printf(m, "pool_used :%d\n", obj_pool_used - obj_percpu_free); |
3ac7fe5a | 1044 | seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used); |
35fd7a63 | 1045 | seq_printf(m, "on_free_list :%d\n", READ_ONCE(obj_nr_tofree)); |
0cad93c3 WL |
1046 | seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated); |
1047 | seq_printf(m, "objs_freed :%d\n", debug_objects_freed); | |
3ac7fe5a TG |
1048 | return 0; |
1049 | } | |
0f85c480 | 1050 | DEFINE_SHOW_ATTRIBUTE(debug_stats); |
3ac7fe5a TG |
1051 | |
1052 | static int __init debug_objects_init_debugfs(void) | |
1053 | { | |
fecb0d95 | 1054 | struct dentry *dbgdir; |
3ac7fe5a TG |
1055 | |
1056 | if (!debug_objects_enabled) | |
1057 | return 0; | |
1058 | ||
1059 | dbgdir = debugfs_create_dir("debug_objects", NULL); | |
3ac7fe5a | 1060 | |
fecb0d95 | 1061 | debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops); |
3ac7fe5a TG |
1062 | |
1063 | return 0; | |
3ac7fe5a TG |
1064 | } |
1065 | __initcall(debug_objects_init_debugfs); | |
1066 | ||
1067 | #else | |
1068 | static inline void debug_objects_init_debugfs(void) { } | |
1069 | #endif | |
1070 | ||
1071 | #ifdef CONFIG_DEBUG_OBJECTS_SELFTEST | |
1072 | ||
1073 | /* Random data structure for the self test */ | |
1074 | struct self_test { | |
1075 | unsigned long dummy1[6]; | |
1076 | int static_init; | |
1077 | unsigned long dummy2[3]; | |
1078 | }; | |
1079 | ||
aedcade6 | 1080 | static __initconst const struct debug_obj_descr descr_type_test; |
3ac7fe5a | 1081 | |
b9fdac7f CD |
1082 | static bool __init is_static_object(void *addr) |
1083 | { | |
1084 | struct self_test *obj = addr; | |
1085 | ||
1086 | return obj->static_init; | |
1087 | } | |
1088 | ||
3ac7fe5a TG |
1089 | /* |
1090 | * fixup_init is called when: | |
1091 | * - an active object is initialized | |
1092 | */ | |
b1e4d9d8 | 1093 | static bool __init fixup_init(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
1094 | { |
1095 | struct self_test *obj = addr; | |
1096 | ||
1097 | switch (state) { | |
1098 | case ODEBUG_STATE_ACTIVE: | |
1099 | debug_object_deactivate(obj, &descr_type_test); | |
1100 | debug_object_init(obj, &descr_type_test); | |
b1e4d9d8 | 1101 | return true; |
3ac7fe5a | 1102 | default: |
b1e4d9d8 | 1103 | return false; |
3ac7fe5a TG |
1104 | } |
1105 | } | |
1106 | ||
1107 | /* | |
1108 | * fixup_activate is called when: | |
1109 | * - an active object is activated | |
b9fdac7f | 1110 | * - an unknown non-static object is activated |
3ac7fe5a | 1111 | */ |
b1e4d9d8 | 1112 | static bool __init fixup_activate(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
1113 | { |
1114 | struct self_test *obj = addr; | |
1115 | ||
1116 | switch (state) { | |
1117 | case ODEBUG_STATE_NOTAVAILABLE: | |
b1e4d9d8 | 1118 | return true; |
3ac7fe5a TG |
1119 | case ODEBUG_STATE_ACTIVE: |
1120 | debug_object_deactivate(obj, &descr_type_test); | |
1121 | debug_object_activate(obj, &descr_type_test); | |
b1e4d9d8 | 1122 | return true; |
3ac7fe5a TG |
1123 | |
1124 | default: | |
b1e4d9d8 | 1125 | return false; |
3ac7fe5a TG |
1126 | } |
1127 | } | |
1128 | ||
1129 | /* | |
1130 | * fixup_destroy is called when: | |
1131 | * - an active object is destroyed | |
1132 | */ | |
b1e4d9d8 | 1133 | static bool __init fixup_destroy(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
1134 | { |
1135 | struct self_test *obj = addr; | |
1136 | ||
1137 | switch (state) { | |
1138 | case ODEBUG_STATE_ACTIVE: | |
1139 | debug_object_deactivate(obj, &descr_type_test); | |
1140 | debug_object_destroy(obj, &descr_type_test); | |
b1e4d9d8 | 1141 | return true; |
3ac7fe5a | 1142 | default: |
b1e4d9d8 | 1143 | return false; |
3ac7fe5a TG |
1144 | } |
1145 | } | |
1146 | ||
1147 | /* | |
1148 | * fixup_free is called when: | |
1149 | * - an active object is freed | |
1150 | */ | |
b1e4d9d8 | 1151 | static bool __init fixup_free(void *addr, enum debug_obj_state state) |
3ac7fe5a TG |
1152 | { |
1153 | struct self_test *obj = addr; | |
1154 | ||
1155 | switch (state) { | |
1156 | case ODEBUG_STATE_ACTIVE: | |
1157 | debug_object_deactivate(obj, &descr_type_test); | |
1158 | debug_object_free(obj, &descr_type_test); | |
b1e4d9d8 | 1159 | return true; |
3ac7fe5a | 1160 | default: |
b1e4d9d8 | 1161 | return false; |
3ac7fe5a TG |
1162 | } |
1163 | } | |
1164 | ||
1fb2f77c | 1165 | static int __init |
3ac7fe5a TG |
1166 | check_results(void *addr, enum debug_obj_state state, int fixups, int warnings) |
1167 | { | |
1168 | struct debug_bucket *db; | |
1169 | struct debug_obj *obj; | |
1170 | unsigned long flags; | |
1171 | int res = -EINVAL; | |
1172 | ||
1173 | db = get_bucket((unsigned long) addr); | |
1174 | ||
aef9cb05 | 1175 | raw_spin_lock_irqsave(&db->lock, flags); |
3ac7fe5a TG |
1176 | |
1177 | obj = lookup_object(addr, db); | |
1178 | if (!obj && state != ODEBUG_STATE_NONE) { | |
5cd2b459 | 1179 | WARN(1, KERN_ERR "ODEBUG: selftest object not found\n"); |
3ac7fe5a TG |
1180 | goto out; |
1181 | } | |
1182 | if (obj && obj->state != state) { | |
5cd2b459 | 1183 | WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n", |
3ac7fe5a | 1184 | obj->state, state); |
3ac7fe5a TG |
1185 | goto out; |
1186 | } | |
1187 | if (fixups != debug_objects_fixups) { | |
5cd2b459 | 1188 | WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n", |
3ac7fe5a | 1189 | fixups, debug_objects_fixups); |
3ac7fe5a TG |
1190 | goto out; |
1191 | } | |
1192 | if (warnings != debug_objects_warnings) { | |
5cd2b459 | 1193 | WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n", |
3ac7fe5a | 1194 | warnings, debug_objects_warnings); |
3ac7fe5a TG |
1195 | goto out; |
1196 | } | |
1197 | res = 0; | |
1198 | out: | |
aef9cb05 | 1199 | raw_spin_unlock_irqrestore(&db->lock, flags); |
3ac7fe5a TG |
1200 | if (res) |
1201 | debug_objects_enabled = 0; | |
1202 | return res; | |
1203 | } | |
1204 | ||
aedcade6 | 1205 | static __initconst const struct debug_obj_descr descr_type_test = { |
3ac7fe5a | 1206 | .name = "selftest", |
b9fdac7f | 1207 | .is_static_object = is_static_object, |
3ac7fe5a TG |
1208 | .fixup_init = fixup_init, |
1209 | .fixup_activate = fixup_activate, | |
1210 | .fixup_destroy = fixup_destroy, | |
1211 | .fixup_free = fixup_free, | |
1212 | }; | |
1213 | ||
1214 | static __initdata struct self_test obj = { .static_init = 0 }; | |
1215 | ||
1216 | static void __init debug_objects_selftest(void) | |
1217 | { | |
1218 | int fixups, oldfixups, warnings, oldwarnings; | |
1219 | unsigned long flags; | |
1220 | ||
1221 | local_irq_save(flags); | |
1222 | ||
1223 | fixups = oldfixups = debug_objects_fixups; | |
1224 | warnings = oldwarnings = debug_objects_warnings; | |
1225 | descr_test = &descr_type_test; | |
1226 | ||
1227 | debug_object_init(&obj, &descr_type_test); | |
1228 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | |
1229 | goto out; | |
1230 | debug_object_activate(&obj, &descr_type_test); | |
1231 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | |
1232 | goto out; | |
1233 | debug_object_activate(&obj, &descr_type_test); | |
1234 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings)) | |
1235 | goto out; | |
1236 | debug_object_deactivate(&obj, &descr_type_test); | |
1237 | if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings)) | |
1238 | goto out; | |
1239 | debug_object_destroy(&obj, &descr_type_test); | |
1240 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings)) | |
1241 | goto out; | |
1242 | debug_object_init(&obj, &descr_type_test); | |
1243 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
1244 | goto out; | |
1245 | debug_object_activate(&obj, &descr_type_test); | |
1246 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
1247 | goto out; | |
1248 | debug_object_deactivate(&obj, &descr_type_test); | |
1249 | if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings)) | |
1250 | goto out; | |
1251 | debug_object_free(&obj, &descr_type_test); | |
1252 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | |
1253 | goto out; | |
1254 | ||
1255 | obj.static_init = 1; | |
1256 | debug_object_activate(&obj, &descr_type_test); | |
9f78ff00 | 1257 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) |
3ac7fe5a TG |
1258 | goto out; |
1259 | debug_object_init(&obj, &descr_type_test); | |
1260 | if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings)) | |
1261 | goto out; | |
1262 | debug_object_free(&obj, &descr_type_test); | |
1263 | if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings)) | |
1264 | goto out; | |
1265 | ||
1266 | #ifdef CONFIG_DEBUG_OBJECTS_FREE | |
1267 | debug_object_init(&obj, &descr_type_test); | |
1268 | if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings)) | |
1269 | goto out; | |
1270 | debug_object_activate(&obj, &descr_type_test); | |
1271 | if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings)) | |
1272 | goto out; | |
1273 | __debug_check_no_obj_freed(&obj, sizeof(obj)); | |
1274 | if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings)) | |
1275 | goto out; | |
1276 | #endif | |
719e4843 | 1277 | pr_info("selftest passed\n"); |
3ac7fe5a TG |
1278 | |
1279 | out: | |
1280 | debug_objects_fixups = oldfixups; | |
1281 | debug_objects_warnings = oldwarnings; | |
1282 | descr_test = NULL; | |
1283 | ||
1284 | local_irq_restore(flags); | |
1285 | } | |
1286 | #else | |
1287 | static inline void debug_objects_selftest(void) { } | |
1288 | #endif | |
1289 | ||
1290 | /* | |
1291 | * Called during early boot to initialize the hash buckets and link | |
1292 | * the static object pool objects into the poll list. After this call | |
1293 | * the object tracker is fully operational. | |
1294 | */ | |
1295 | void __init debug_objects_early_init(void) | |
1296 | { | |
1297 | int i; | |
1298 | ||
1299 | for (i = 0; i < ODEBUG_HASH_SIZE; i++) | |
aef9cb05 | 1300 | raw_spin_lock_init(&obj_hash[i].lock); |
3ac7fe5a TG |
1301 | |
1302 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) | |
1303 | hlist_add_head(&obj_static_pool[i].node, &obj_pool); | |
1304 | } | |
1305 | ||
1be1cb7b TG |
1306 | /* |
1307 | * Convert the statically allocated objects to dynamic ones: | |
1308 | */ | |
1fb2f77c | 1309 | static int __init debug_objects_replace_static_objects(void) |
1be1cb7b TG |
1310 | { |
1311 | struct debug_bucket *db = obj_hash; | |
b67bfe0d | 1312 | struct hlist_node *tmp; |
1be1cb7b TG |
1313 | struct debug_obj *obj, *new; |
1314 | HLIST_HEAD(objects); | |
1315 | int i, cnt = 0; | |
1316 | ||
1317 | for (i = 0; i < ODEBUG_POOL_SIZE; i++) { | |
1318 | obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL); | |
1319 | if (!obj) | |
1320 | goto free; | |
1321 | hlist_add_head(&obj->node, &objects); | |
1322 | } | |
1323 | ||
1324 | /* | |
a9ee3a63 QC |
1325 | * debug_objects_mem_init() is now called early that only one CPU is up |
1326 | * and interrupts have been disabled, so it is safe to replace the | |
1327 | * active object references. | |
1be1cb7b | 1328 | */ |
1be1cb7b TG |
1329 | |
1330 | /* Remove the statically allocated objects from the pool */ | |
b67bfe0d | 1331 | hlist_for_each_entry_safe(obj, tmp, &obj_pool, node) |
1be1cb7b TG |
1332 | hlist_del(&obj->node); |
1333 | /* Move the allocated objects to the pool */ | |
1334 | hlist_move_list(&objects, &obj_pool); | |
1335 | ||
1336 | /* Replace the active object references */ | |
1337 | for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) { | |
1338 | hlist_move_list(&db->list, &objects); | |
1339 | ||
b67bfe0d | 1340 | hlist_for_each_entry(obj, &objects, node) { |
1be1cb7b TG |
1341 | new = hlist_entry(obj_pool.first, typeof(*obj), node); |
1342 | hlist_del(&new->node); | |
1343 | /* copy object data */ | |
1344 | *new = *obj; | |
1345 | hlist_add_head(&new->node, &db->list); | |
1346 | cnt++; | |
1347 | } | |
1348 | } | |
1349 | ||
c0f35cc0 FF |
1350 | pr_debug("%d of %d active objects replaced\n", |
1351 | cnt, obj_pool_used); | |
1be1cb7b TG |
1352 | return 0; |
1353 | free: | |
b67bfe0d | 1354 | hlist_for_each_entry_safe(obj, tmp, &objects, node) { |
1be1cb7b TG |
1355 | hlist_del(&obj->node); |
1356 | kmem_cache_free(obj_cache, obj); | |
1357 | } | |
1358 | return -ENOMEM; | |
1359 | } | |
1360 | ||
3ac7fe5a TG |
1361 | /* |
1362 | * Called after the kmem_caches are functional to setup a dedicated | |
1363 | * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag | |
1364 | * prevents that the debug code is called on kmem_cache_free() for the | |
1365 | * debug tracker objects to avoid recursive calls. | |
1366 | */ | |
1367 | void __init debug_objects_mem_init(void) | |
1368 | { | |
634d61f4 | 1369 | int cpu, extras; |
d86998b1 | 1370 | |
3ac7fe5a TG |
1371 | if (!debug_objects_enabled) |
1372 | return; | |
1373 | ||
d86998b1 WL |
1374 | /* |
1375 | * Initialize the percpu object pools | |
1376 | * | |
1377 | * Initialization is not strictly necessary, but was done for | |
1378 | * completeness. | |
1379 | */ | |
1380 | for_each_possible_cpu(cpu) | |
1381 | INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu)); | |
1382 | ||
3ac7fe5a TG |
1383 | obj_cache = kmem_cache_create("debug_objects_cache", |
1384 | sizeof (struct debug_obj), 0, | |
8de456cf QC |
1385 | SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, |
1386 | NULL); | |
3ac7fe5a | 1387 | |
1be1cb7b | 1388 | if (!obj_cache || debug_objects_replace_static_objects()) { |
3ac7fe5a | 1389 | debug_objects_enabled = 0; |
3ff4f80a | 1390 | kmem_cache_destroy(obj_cache); |
719e4843 | 1391 | pr_warn("out of memory.\n"); |
1be1cb7b | 1392 | } else |
3ac7fe5a | 1393 | debug_objects_selftest(); |
634d61f4 | 1394 | |
88451f2c Z |
1395 | #ifdef CONFIG_HOTPLUG_CPU |
1396 | cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL, | |
1397 | object_cpu_offline); | |
1398 | #endif | |
1399 | ||
634d61f4 WL |
1400 | /* |
1401 | * Increase the thresholds for allocating and freeing objects | |
1402 | * according to the number of possible CPUs available in the system. | |
1403 | */ | |
1404 | extras = num_possible_cpus() * ODEBUG_BATCH_SIZE; | |
1405 | debug_objects_pool_size += extras; | |
1406 | debug_objects_pool_min_level += extras; | |
3ac7fe5a | 1407 | } |