]>
Commit | Line | Data |
---|---|---|
b34b9563 | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
b34b9563 | 23 | */ |
715f6251 | 24 | |
e5b9b344 | 25 | #include <sys/debug.h> |
c3eabc75 | 26 | #include <sys/sysmacros.h> |
f4b37741 | 27 | #include <sys/kmem.h> |
e5b9b344 | 28 | #include <sys/vmem.h> |
c3eabc75 BB |
29 | #include <linux/mm.h> |
30 | #include <linux/ratelimit.h> | |
31 | ||
32 | /* | |
33 | * As a general rule kmem_alloc() allocations should be small, preferably | |
34 | * just a few pages since they must by physically contiguous. Therefore, a | |
35 | * rate limited warning will be printed to the console for any kmem_alloc() | |
36 | * which exceeds a reasonable threshold. | |
37 | * | |
38 | * The default warning threshold is set to eight pages but capped at 32K to | |
39 | * accommodate systems using large pages. This value was selected to be small | |
40 | * enough to ensure the largest allocations are quickly noticed and fixed. | |
41 | * But large enough to avoid logging any warnings when a allocation size is | |
42 | * larger than optimal but not a serious concern. Since this value is tunable, | |
43 | * developers are encouraged to set it lower when testing so any new largish | |
44 | * allocations are quickly caught. These warnings may be disabled by setting | |
45 | * the threshold to zero. | |
46 | */ | |
47 | unsigned int spl_kmem_alloc_warn = MAX(8 * PAGE_SIZE, 32 * 1024); | |
48 | module_param(spl_kmem_alloc_warn, uint, 0644); | |
49 | MODULE_PARM_DESC(spl_kmem_alloc_warn, | |
50 | "Warning threshold in bytes for a kmem_alloc()"); | |
51 | EXPORT_SYMBOL(spl_kmem_alloc_warn); | |
52 | ||
53 | /* | |
54 | * Large kmem_alloc() allocations will fail if they exceed KMALLOC_MAX_SIZE. | |
55 | * Allocations which are marginally smaller than this limit may succeed but | |
56 | * should still be avoided due to the expense of locating a contiguous range | |
57 | * of free pages. Therefore, a maximum kmem size with reasonable safely | |
58 | * margin of 4x is set. Kmem_alloc() allocations larger than this maximum | |
59 | * will quickly fail. Vmem_alloc() allocations less than or equal to this | |
60 | * value will use kmalloc(), but shift to vmalloc() when exceeding this value. | |
61 | */ | |
62 | unsigned int spl_kmem_alloc_max = (KMALLOC_MAX_SIZE >> 2); | |
63 | module_param(spl_kmem_alloc_max, uint, 0644); | |
64 | MODULE_PARM_DESC(spl_kmem_alloc_max, | |
65 | "Maximum size in bytes for a kmem_alloc()"); | |
66 | EXPORT_SYMBOL(spl_kmem_alloc_max); | |
4ab13d3b | 67 | |
b868e22f BB |
68 | int |
69 | kmem_debugging(void) | |
70 | { | |
b34b9563 | 71 | return (0); |
b868e22f BB |
72 | } |
73 | EXPORT_SYMBOL(kmem_debugging); | |
74 | ||
e6de04b7 BB |
75 | char * |
76 | kmem_vasprintf(const char *fmt, va_list ap) | |
77 | { | |
78 | va_list aq; | |
79 | char *ptr; | |
80 | ||
e6de04b7 | 81 | do { |
2c762de8 | 82 | va_copy(aq, ap); |
e6de04b7 | 83 | ptr = kvasprintf(GFP_KERNEL, fmt, aq); |
2c762de8 | 84 | va_end(aq); |
e6de04b7 | 85 | } while (ptr == NULL); |
e6de04b7 | 86 | |
b34b9563 | 87 | return (ptr); |
e6de04b7 BB |
88 | } |
89 | EXPORT_SYMBOL(kmem_vasprintf); | |
90 | ||
b868e22f BB |
91 | char * |
92 | kmem_asprintf(const char *fmt, ...) | |
93 | { | |
e6de04b7 | 94 | va_list ap; |
b868e22f BB |
95 | char *ptr; |
96 | ||
b868e22f | 97 | do { |
2c762de8 | 98 | va_start(ap, fmt); |
e6de04b7 | 99 | ptr = kvasprintf(GFP_KERNEL, fmt, ap); |
2c762de8 | 100 | va_end(ap); |
b868e22f | 101 | } while (ptr == NULL); |
b868e22f | 102 | |
b34b9563 | 103 | return (ptr); |
b868e22f BB |
104 | } |
105 | EXPORT_SYMBOL(kmem_asprintf); | |
106 | ||
10129680 BB |
107 | static char * |
108 | __strdup(const char *str, int flags) | |
109 | { | |
110 | char *ptr; | |
111 | int n; | |
112 | ||
113 | n = strlen(str); | |
c3eabc75 | 114 | ptr = kmalloc(n + 1, kmem_flags_convert(flags)); |
10129680 BB |
115 | if (ptr) |
116 | memcpy(ptr, str, n + 1); | |
117 | ||
b34b9563 | 118 | return (ptr); |
10129680 BB |
119 | } |
120 | ||
121 | char * | |
122 | strdup(const char *str) | |
123 | { | |
b34b9563 | 124 | return (__strdup(str, KM_SLEEP)); |
10129680 BB |
125 | } |
126 | EXPORT_SYMBOL(strdup); | |
127 | ||
128 | void | |
129 | strfree(char *str) | |
130 | { | |
41f84a8d | 131 | kfree(str); |
10129680 BB |
132 | } |
133 | EXPORT_SYMBOL(strfree); | |
134 | ||
f1ca4da6 | 135 | /* |
c3eabc75 BB |
136 | * Limit the number of large allocation stack traces dumped to not more than |
137 | * 5 every 60 seconds to prevent denial-of-service attacks from debug code. | |
138 | */ | |
139 | DEFINE_RATELIMIT_STATE(kmem_alloc_ratelimit_state, 60 * HZ, 5); | |
140 | ||
141 | /* | |
142 | * General purpose unified implementation of kmem_alloc(). It is an | |
143 | * amalgamation of Linux and Illumos allocator design. It should never be | |
144 | * exported to ensure that code using kmem_alloc()/kmem_zalloc() remains | |
145 | * relatively portable. Consumers may only access this function through | |
146 | * wrappers that enforce the common flags to ensure portability. | |
147 | */ | |
148 | inline void * | |
149 | spl_kmem_alloc_impl(size_t size, int flags, int node) | |
150 | { | |
151 | gfp_t lflags = kmem_flags_convert(flags); | |
152 | void *ptr; | |
153 | ||
154 | /* | |
155 | * Log abnormally large allocations and rate limit the console output. | |
156 | * Allocations larger than spl_kmem_alloc_warn should be performed | |
157 | * through the vmem_alloc()/vmem_zalloc() interfaces. | |
158 | */ | |
159 | if ((spl_kmem_alloc_warn > 0) && (size > spl_kmem_alloc_warn) && | |
160 | !(flags & KM_VMEM) && __ratelimit(&kmem_alloc_ratelimit_state)) { | |
161 | printk(KERN_WARNING | |
162 | "Large kmem_alloc(%lu, 0x%x), please file an issue at:\n" | |
163 | "https://github.com/zfsonlinux/zfs/issues/new\n", | |
164 | (unsigned long)size, flags); | |
165 | dump_stack(); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Use a loop because kmalloc_node() can fail when GFP_KERNEL is used | |
170 | * unlike kmem_alloc() with KM_SLEEP on Illumos. | |
171 | */ | |
172 | do { | |
173 | /* | |
174 | * Calling kmalloc_node() when the size >= spl_kmem_alloc_max | |
175 | * is unsafe. This must fail for all for kmem_alloc() and | |
176 | * kmem_zalloc() callers. | |
177 | * | |
178 | * For vmem_alloc() and vmem_zalloc() callers it is permissible | |
179 | * to use __vmalloc(). However, in general use of __vmalloc() | |
180 | * is strongly discouraged because a global lock must be | |
181 | * acquired. Contention on this lock can significantly | |
182 | * impact performance so frequently manipulating the virtual | |
183 | * address space is strongly discouraged. | |
184 | */ | |
185 | if (unlikely(size > spl_kmem_alloc_max)) { | |
186 | if (flags & KM_VMEM) { | |
c2fa0945 | 187 | ptr = spl_vmalloc(size, lflags, PAGE_KERNEL); |
c3eabc75 BB |
188 | } else { |
189 | return (NULL); | |
190 | } | |
191 | } else { | |
192 | ptr = kmalloc_node(size, lflags, node); | |
193 | } | |
194 | ||
195 | if (likely(ptr) || (flags & KM_NOSLEEP)) | |
196 | return (ptr); | |
197 | ||
198 | if (unlikely(__ratelimit(&kmem_alloc_ratelimit_state))) { | |
199 | printk(KERN_WARNING | |
200 | "Possible memory allocation deadlock: " | |
201 | "size=%lu lflags=0x%x", | |
202 | (unsigned long)size, lflags); | |
203 | dump_stack(); | |
204 | } | |
205 | ||
206 | /* | |
207 | * Use cond_resched() instead of congestion_wait() to avoid | |
208 | * deadlocking systems where there are no block devices. | |
209 | */ | |
210 | cond_resched(); | |
211 | } while (1); | |
212 | ||
213 | return (NULL); | |
214 | } | |
215 | ||
216 | inline void | |
217 | spl_kmem_free_impl(const void *buf, size_t size) | |
218 | { | |
219 | if (is_vmalloc_addr(buf)) | |
220 | vfree(buf); | |
221 | else | |
222 | kfree(buf); | |
223 | } | |
224 | ||
225 | /* | |
226 | * Memory allocation and accounting for kmem_* * style allocations. When | |
227 | * DEBUG_KMEM is enabled the total memory allocated will be tracked and | |
228 | * any memory leaked will be reported during module unload. | |
229 | * | |
230 | * ./configure --enable-debug-kmem | |
f1ca4da6 | 231 | */ |
232 | #ifdef DEBUG_KMEM | |
d04c8a56 | 233 | |
f1ca4da6 | 234 | /* Shim layer memory accounting */ |
b34b9563 | 235 | #ifdef HAVE_ATOMIC64_T |
550f1705 | 236 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 237 | unsigned long long kmem_alloc_max = 0; |
b34b9563 | 238 | #else /* HAVE_ATOMIC64_T */ |
d04c8a56 BB |
239 | atomic_t kmem_alloc_used = ATOMIC_INIT(0); |
240 | unsigned long long kmem_alloc_max = 0; | |
b34b9563 | 241 | #endif /* HAVE_ATOMIC64_T */ |
79b31f36 | 242 | |
ff449ac4 | 243 | EXPORT_SYMBOL(kmem_alloc_used); |
244 | EXPORT_SYMBOL(kmem_alloc_max); | |
ff449ac4 | 245 | |
c3eabc75 BB |
246 | inline void * |
247 | spl_kmem_alloc_debug(size_t size, int flags, int node) | |
248 | { | |
249 | void *ptr; | |
250 | ||
251 | ptr = spl_kmem_alloc_impl(size, flags, node); | |
252 | if (ptr) { | |
253 | kmem_alloc_used_add(size); | |
254 | if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) | |
255 | kmem_alloc_max = kmem_alloc_used_read(); | |
256 | } | |
257 | ||
258 | return (ptr); | |
259 | } | |
260 | ||
261 | inline void | |
262 | spl_kmem_free_debug(const void *ptr, size_t size) | |
263 | { | |
264 | kmem_alloc_used_sub(size); | |
265 | spl_kmem_free_impl(ptr, size); | |
266 | } | |
267 | ||
b34b9563 BB |
268 | /* |
269 | * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked | |
055ffd98 BB |
270 | * but also the location of every alloc and free. When the SPL module is |
271 | * unloaded a list of all leaked addresses and where they were allocated | |
272 | * will be dumped to the console. Enabling this feature has a significant | |
273 | * impact on performance but it makes finding memory leaks straight forward. | |
274 | * | |
275 | * Not surprisingly with debugging enabled the xmem_locks are very highly | |
276 | * contended particularly on xfree(). If we want to run with this detailed | |
277 | * debugging enabled for anything other than debugging we need to minimize | |
278 | * the contention by moving to a lock per xmem_table entry model. | |
c3eabc75 BB |
279 | * |
280 | * ./configure --enable-debug-kmem-tracking | |
a0f6da3d | 281 | */ |
b34b9563 | 282 | #ifdef DEBUG_KMEM_TRACKING |
a0f6da3d | 283 | |
c3eabc75 BB |
284 | #include <linux/hash.h> |
285 | #include <linux/ctype.h> | |
286 | ||
b34b9563 BB |
287 | #define KMEM_HASH_BITS 10 |
288 | #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
a0f6da3d | 289 | |
a0f6da3d | 290 | typedef struct kmem_debug { |
b34b9563 BB |
291 | struct hlist_node kd_hlist; /* Hash node linkage */ |
292 | struct list_head kd_list; /* List of all allocations */ | |
293 | void *kd_addr; /* Allocation pointer */ | |
294 | size_t kd_size; /* Allocation size */ | |
295 | const char *kd_func; /* Allocation function */ | |
296 | int kd_line; /* Allocation line */ | |
a0f6da3d | 297 | } kmem_debug_t; |
298 | ||
c3eabc75 BB |
299 | static spinlock_t kmem_lock; |
300 | static struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
301 | static struct list_head kmem_list; | |
d6a26c6a | 302 | |
a0f6da3d | 303 | static kmem_debug_t * |
b34b9563 BB |
304 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, |
305 | int bits, const void *addr) | |
a0f6da3d | 306 | { |
307 | struct hlist_head *head; | |
308 | struct hlist_node *node; | |
309 | struct kmem_debug *p; | |
310 | unsigned long flags; | |
a0f6da3d | 311 | |
312 | spin_lock_irqsave(lock, flags); | |
313 | ||
b1424add BB |
314 | head = &table[hash_ptr((void *)addr, bits)]; |
315 | hlist_for_each(node, head) { | |
316 | p = list_entry(node, struct kmem_debug, kd_hlist); | |
a0f6da3d | 317 | if (p->kd_addr == addr) { |
318 | hlist_del_init(&p->kd_hlist); | |
319 | list_del_init(&p->kd_list); | |
320 | spin_unlock_irqrestore(lock, flags); | |
b34b9563 | 321 | return (p); |
a0f6da3d | 322 | } |
323 | } | |
324 | ||
325 | spin_unlock_irqrestore(lock, flags); | |
326 | ||
8d9a23e8 | 327 | return (NULL); |
a0f6da3d | 328 | } |
329 | ||
c3eabc75 BB |
330 | inline void * |
331 | spl_kmem_alloc_track(size_t size, int flags, | |
332 | const char *func, int line, int node) | |
a0f6da3d | 333 | { |
334 | void *ptr = NULL; | |
335 | kmem_debug_t *dptr; | |
336 | unsigned long irq_flags; | |
a0f6da3d | 337 | |
c3eabc75 BB |
338 | dptr = kmalloc(sizeof (kmem_debug_t), kmem_flags_convert(flags)); |
339 | if (dptr == NULL) | |
340 | return (NULL); | |
a0f6da3d | 341 | |
c3eabc75 BB |
342 | dptr->kd_func = __strdup(func, flags); |
343 | if (dptr->kd_func == NULL) { | |
344 | kfree(dptr); | |
345 | return (NULL); | |
346 | } | |
a0f6da3d | 347 | |
c3eabc75 BB |
348 | ptr = spl_kmem_alloc_debug(size, flags, node); |
349 | if (ptr == NULL) { | |
350 | kfree(dptr->kd_func); | |
351 | kfree(dptr); | |
352 | return (NULL); | |
353 | } | |
a0f6da3d | 354 | |
c3eabc75 BB |
355 | INIT_HLIST_NODE(&dptr->kd_hlist); |
356 | INIT_LIST_HEAD(&dptr->kd_list); | |
a0f6da3d | 357 | |
c3eabc75 BB |
358 | dptr->kd_addr = ptr; |
359 | dptr->kd_size = size; | |
360 | dptr->kd_line = line; | |
a0f6da3d | 361 | |
c3eabc75 BB |
362 | spin_lock_irqsave(&kmem_lock, irq_flags); |
363 | hlist_add_head(&dptr->kd_hlist, | |
364 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
365 | list_add_tail(&dptr->kd_list, &kmem_list); | |
366 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
a0f6da3d | 367 | |
8d9a23e8 | 368 | return (ptr); |
a0f6da3d | 369 | } |
a0f6da3d | 370 | |
c3eabc75 BB |
371 | inline void |
372 | spl_kmem_free_track(const void *ptr, size_t size) | |
a0f6da3d | 373 | { |
374 | kmem_debug_t *dptr; | |
a0f6da3d | 375 | |
10129680 | 376 | /* Must exist in hash due to kmem_alloc() */ |
8d9a23e8 | 377 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); |
c3eabc75 BB |
378 | ASSERT3P(dptr, !=, NULL); |
379 | ASSERT3S(dptr->kd_size, ==, size); | |
a0f6da3d | 380 | |
c8e60837 | 381 | kfree(dptr->kd_func); |
a0f6da3d | 382 | kfree(dptr); |
383 | ||
c3eabc75 | 384 | spl_kmem_free_debug(ptr, size); |
a0f6da3d | 385 | } |
c3eabc75 BB |
386 | #endif /* DEBUG_KMEM_TRACKING */ |
387 | #endif /* DEBUG_KMEM */ | |
a0f6da3d | 388 | |
c3eabc75 BB |
389 | /* |
390 | * Public kmem_alloc(), kmem_zalloc() and kmem_free() interfaces. | |
391 | */ | |
a0f6da3d | 392 | void * |
c3eabc75 | 393 | spl_kmem_alloc(size_t size, int flags, const char *func, int line) |
a0f6da3d | 394 | { |
c3eabc75 BB |
395 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
396 | ||
397 | #if !defined(DEBUG_KMEM) | |
398 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
399 | #elif !defined(DEBUG_KMEM_TRACKING) | |
400 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
401 | #else | |
402 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
403 | #endif | |
404 | } | |
405 | EXPORT_SYMBOL(spl_kmem_alloc); | |
a0f6da3d | 406 | |
c3eabc75 BB |
407 | void * |
408 | spl_kmem_zalloc(size_t size, int flags, const char *func, int line) | |
409 | { | |
410 | ASSERT0(flags & ~KM_PUBLIC_MASK); | |
a0f6da3d | 411 | |
c3eabc75 | 412 | flags |= KM_ZERO; |
10129680 | 413 | |
c3eabc75 BB |
414 | #if !defined(DEBUG_KMEM) |
415 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
416 | #elif !defined(DEBUG_KMEM_TRACKING) | |
417 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
418 | #else | |
419 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
420 | #endif | |
a0f6da3d | 421 | } |
c3eabc75 | 422 | EXPORT_SYMBOL(spl_kmem_zalloc); |
a0f6da3d | 423 | |
424 | void | |
c3eabc75 | 425 | spl_kmem_free(const void *buf, size_t size) |
a0f6da3d | 426 | { |
c3eabc75 BB |
427 | #if !defined(DEBUG_KMEM) |
428 | return (spl_kmem_free_impl(buf, size)); | |
429 | #elif !defined(DEBUG_KMEM_TRACKING) | |
430 | return (spl_kmem_free_debug(buf, size)); | |
431 | #else | |
432 | return (spl_kmem_free_track(buf, size)); | |
433 | #endif | |
a0f6da3d | 434 | } |
c3eabc75 | 435 | EXPORT_SYMBOL(spl_kmem_free); |
a0f6da3d | 436 | |
e5b9b344 BB |
437 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
438 | static char * | |
439 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) | |
fece7c99 | 440 | { |
e5b9b344 BB |
441 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
442 | int i, flag = 1; | |
fece7c99 | 443 | |
e5b9b344 BB |
444 | ASSERT(str != NULL && len >= 17); |
445 | memset(str, 0, len); | |
fece7c99 | 446 | |
b34b9563 BB |
447 | /* |
448 | * Check for a fully printable string, and while we are at | |
449 | * it place the printable characters in the passed buffer. | |
450 | */ | |
e5b9b344 BB |
451 | for (i = 0; i < size; i++) { |
452 | str[i] = ((char *)(kd->kd_addr))[i]; | |
453 | if (isprint(str[i])) { | |
454 | continue; | |
455 | } else { | |
b34b9563 BB |
456 | /* |
457 | * Minimum number of printable characters found | |
458 | * to make it worthwhile to print this as ascii. | |
459 | */ | |
e5b9b344 BB |
460 | if (i > min) |
461 | break; | |
8b45dda2 | 462 | |
e5b9b344 BB |
463 | flag = 0; |
464 | break; | |
465 | } | |
466 | } | |
06089b9e | 467 | |
e5b9b344 BB |
468 | if (!flag) { |
469 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
b34b9563 BB |
470 | *((uint8_t *)kd->kd_addr), |
471 | *((uint8_t *)kd->kd_addr + 2), | |
472 | *((uint8_t *)kd->kd_addr + 4), | |
473 | *((uint8_t *)kd->kd_addr + 6), | |
474 | *((uint8_t *)kd->kd_addr + 8), | |
475 | *((uint8_t *)kd->kd_addr + 10), | |
476 | *((uint8_t *)kd->kd_addr + 12), | |
477 | *((uint8_t *)kd->kd_addr + 14)); | |
e5b9b344 | 478 | } |
8b45dda2 | 479 | |
b34b9563 | 480 | return (str); |
8b45dda2 BB |
481 | } |
482 | ||
e5b9b344 BB |
483 | static int |
484 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
8b45dda2 | 485 | { |
e5b9b344 | 486 | int i; |
8b45dda2 | 487 | |
e5b9b344 BB |
488 | spin_lock_init(lock); |
489 | INIT_LIST_HEAD(list); | |
8b45dda2 | 490 | |
e5b9b344 BB |
491 | for (i = 0; i < size; i++) |
492 | INIT_HLIST_HEAD(&kmem_table[i]); | |
8b45dda2 | 493 | |
e5b9b344 | 494 | return (0); |
fece7c99 | 495 | } |
496 | ||
e5b9b344 BB |
497 | static void |
498 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
fece7c99 | 499 | { |
e5b9b344 BB |
500 | unsigned long flags; |
501 | kmem_debug_t *kd; | |
502 | char str[17]; | |
a1502d76 | 503 | |
e5b9b344 BB |
504 | spin_lock_irqsave(lock, flags); |
505 | if (!list_empty(list)) | |
506 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", | |
b34b9563 | 507 | "size", "data", "func", "line"); |
fece7c99 | 508 | |
e5b9b344 BB |
509 | list_for_each_entry(kd, list, kd_list) |
510 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, | |
b34b9563 BB |
511 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
512 | kd->kd_func, kd->kd_line); | |
fece7c99 | 513 | |
e5b9b344 | 514 | spin_unlock_irqrestore(lock, flags); |
fece7c99 | 515 | } |
e5b9b344 | 516 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ |
fece7c99 | 517 | |
e5b9b344 BB |
518 | int |
519 | spl_kmem_init(void) | |
ea3e6ca9 | 520 | { |
e5b9b344 BB |
521 | #ifdef DEBUG_KMEM |
522 | kmem_alloc_used_set(0); | |
c3eabc75 BB |
523 | |
524 | #ifdef DEBUG_KMEM_TRACKING | |
e5b9b344 | 525 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); |
c3eabc75 BB |
526 | #endif /* DEBUG_KMEM_TRACKING */ |
527 | #endif /* DEBUG_KMEM */ | |
f1ca4da6 | 528 | |
c3eabc75 | 529 | return (0); |
2fb9b26a | 530 | } |
d6a26c6a | 531 | |
e5b9b344 BB |
532 | void |
533 | spl_kmem_fini(void) | |
2fb9b26a | 534 | { |
ff449ac4 | 535 | #ifdef DEBUG_KMEM |
b34b9563 BB |
536 | /* |
537 | * Display all unreclaimed memory addresses, including the | |
ff449ac4 | 538 | * allocation size and the first few bytes of what's located |
539 | * at that address to aid in debugging. Performance is not | |
b34b9563 BB |
540 | * a serious concern here since it is module unload time. |
541 | */ | |
d04c8a56 | 542 | if (kmem_alloc_used_read() != 0) |
8d9a23e8 | 543 | printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n", |
c3eabc75 | 544 | (unsigned long)kmem_alloc_used_read(), kmem_alloc_max); |
ff449ac4 | 545 | |
c3eabc75 | 546 | #ifdef DEBUG_KMEM_TRACKING |
ff449ac4 | 547 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
c3eabc75 | 548 | #endif /* DEBUG_KMEM_TRACKING */ |
ff449ac4 | 549 | #endif /* DEBUG_KMEM */ |
5d86345d | 550 | } |