]>
Commit | Line | Data |
---|---|---|
b34b9563 | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 BB |
6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 BB |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
b34b9563 | 23 | */ |
715f6251 | 24 | |
e5b9b344 | 25 | #include <sys/debug.h> |
c3eabc75 | 26 | #include <sys/sysmacros.h> |
f4b37741 | 27 | #include <sys/kmem.h> |
e5b9b344 | 28 | #include <sys/vmem.h> |
c3eabc75 | 29 | #include <linux/mm.h> |
c3eabc75 BB |
30 | |
31 | /* | |
32 | * As a general rule kmem_alloc() allocations should be small, preferably | |
33 | * just a few pages since they must by physically contiguous. Therefore, a | |
34 | * rate limited warning will be printed to the console for any kmem_alloc() | |
35 | * which exceeds a reasonable threshold. | |
36 | * | |
cb81c0c5 | 37 | * The default warning threshold is set to sixteen pages but capped at 64K to |
c3eabc75 BB |
38 | * accommodate systems using large pages. This value was selected to be small |
39 | * enough to ensure the largest allocations are quickly noticed and fixed. | |
40 | * But large enough to avoid logging any warnings when a allocation size is | |
41 | * larger than optimal but not a serious concern. Since this value is tunable, | |
42 | * developers are encouraged to set it lower when testing so any new largish | |
43 | * allocations are quickly caught. These warnings may be disabled by setting | |
44 | * the threshold to zero. | |
45 | */ | |
3673d032 | 46 | /* BEGIN CSTYLED */ |
cb81c0c5 | 47 | unsigned int spl_kmem_alloc_warn = MIN(16 * PAGE_SIZE, 64 * 1024); |
c3eabc75 BB |
48 | module_param(spl_kmem_alloc_warn, uint, 0644); |
49 | MODULE_PARM_DESC(spl_kmem_alloc_warn, | |
50 | "Warning threshold in bytes for a kmem_alloc()"); | |
51 | EXPORT_SYMBOL(spl_kmem_alloc_warn); | |
52 | ||
53 | /* | |
54 | * Large kmem_alloc() allocations will fail if they exceed KMALLOC_MAX_SIZE. | |
55 | * Allocations which are marginally smaller than this limit may succeed but | |
56 | * should still be avoided due to the expense of locating a contiguous range | |
57 | * of free pages. Therefore, a maximum kmem size with reasonable safely | |
58 | * margin of 4x is set. Kmem_alloc() allocations larger than this maximum | |
59 | * will quickly fail. Vmem_alloc() allocations less than or equal to this | |
60 | * value will use kmalloc(), but shift to vmalloc() when exceeding this value. | |
61 | */ | |
62 | unsigned int spl_kmem_alloc_max = (KMALLOC_MAX_SIZE >> 2); | |
63 | module_param(spl_kmem_alloc_max, uint, 0644); | |
64 | MODULE_PARM_DESC(spl_kmem_alloc_max, | |
65 | "Maximum size in bytes for a kmem_alloc()"); | |
66 | EXPORT_SYMBOL(spl_kmem_alloc_max); | |
3673d032 | 67 | /* END CSTYLED */ |
4ab13d3b | 68 | |
b868e22f BB |
69 | int |
70 | kmem_debugging(void) | |
71 | { | |
b34b9563 | 72 | return (0); |
b868e22f BB |
73 | } |
74 | EXPORT_SYMBOL(kmem_debugging); | |
75 | ||
e6de04b7 BB |
76 | char * |
77 | kmem_vasprintf(const char *fmt, va_list ap) | |
78 | { | |
79 | va_list aq; | |
80 | char *ptr; | |
81 | ||
e6de04b7 | 82 | do { |
2c762de8 | 83 | va_copy(aq, ap); |
54cccfc2 | 84 | ptr = kvasprintf(kmem_flags_convert(KM_SLEEP), fmt, aq); |
2c762de8 | 85 | va_end(aq); |
e6de04b7 | 86 | } while (ptr == NULL); |
e6de04b7 | 87 | |
b34b9563 | 88 | return (ptr); |
e6de04b7 BB |
89 | } |
90 | EXPORT_SYMBOL(kmem_vasprintf); | |
91 | ||
b868e22f BB |
92 | char * |
93 | kmem_asprintf(const char *fmt, ...) | |
94 | { | |
e6de04b7 | 95 | va_list ap; |
b868e22f BB |
96 | char *ptr; |
97 | ||
b868e22f | 98 | do { |
2c762de8 | 99 | va_start(ap, fmt); |
54cccfc2 | 100 | ptr = kvasprintf(kmem_flags_convert(KM_SLEEP), fmt, ap); |
2c762de8 | 101 | va_end(ap); |
b868e22f | 102 | } while (ptr == NULL); |
b868e22f | 103 | |
b34b9563 | 104 | return (ptr); |
b868e22f BB |
105 | } |
106 | EXPORT_SYMBOL(kmem_asprintf); | |
107 | ||
10129680 BB |
108 | static char * |
109 | __strdup(const char *str, int flags) | |
110 | { | |
111 | char *ptr; | |
112 | int n; | |
113 | ||
114 | n = strlen(str); | |
c3eabc75 | 115 | ptr = kmalloc(n + 1, kmem_flags_convert(flags)); |
10129680 BB |
116 | if (ptr) |
117 | memcpy(ptr, str, n + 1); | |
118 | ||
b34b9563 | 119 | return (ptr); |
10129680 BB |
120 | } |
121 | ||
122 | char * | |
123 | strdup(const char *str) | |
124 | { | |
b34b9563 | 125 | return (__strdup(str, KM_SLEEP)); |
10129680 BB |
126 | } |
127 | EXPORT_SYMBOL(strdup); | |
128 | ||
129 | void | |
130 | strfree(char *str) | |
131 | { | |
41f84a8d | 132 | kfree(str); |
10129680 BB |
133 | } |
134 | EXPORT_SYMBOL(strfree); | |
135 | ||
c3eabc75 BB |
136 | /* |
137 | * General purpose unified implementation of kmem_alloc(). It is an | |
138 | * amalgamation of Linux and Illumos allocator design. It should never be | |
139 | * exported to ensure that code using kmem_alloc()/kmem_zalloc() remains | |
140 | * relatively portable. Consumers may only access this function through | |
141 | * wrappers that enforce the common flags to ensure portability. | |
142 | */ | |
143 | inline void * | |
144 | spl_kmem_alloc_impl(size_t size, int flags, int node) | |
145 | { | |
146 | gfp_t lflags = kmem_flags_convert(flags); | |
c7db36a3 | 147 | int use_vmem = 0; |
c3eabc75 BB |
148 | void *ptr; |
149 | ||
150 | /* | |
151 | * Log abnormally large allocations and rate limit the console output. | |
152 | * Allocations larger than spl_kmem_alloc_warn should be performed | |
153 | * through the vmem_alloc()/vmem_zalloc() interfaces. | |
154 | */ | |
155 | if ((spl_kmem_alloc_warn > 0) && (size > spl_kmem_alloc_warn) && | |
93ce2b4c | 156 | !(flags & KM_VMEM)) { |
c3eabc75 BB |
157 | printk(KERN_WARNING |
158 | "Large kmem_alloc(%lu, 0x%x), please file an issue at:\n" | |
159 | "https://github.com/zfsonlinux/zfs/issues/new\n", | |
160 | (unsigned long)size, flags); | |
161 | dump_stack(); | |
162 | } | |
163 | ||
164 | /* | |
165 | * Use a loop because kmalloc_node() can fail when GFP_KERNEL is used | |
166 | * unlike kmem_alloc() with KM_SLEEP on Illumos. | |
167 | */ | |
168 | do { | |
169 | /* | |
170 | * Calling kmalloc_node() when the size >= spl_kmem_alloc_max | |
171 | * is unsafe. This must fail for all for kmem_alloc() and | |
172 | * kmem_zalloc() callers. | |
173 | * | |
174 | * For vmem_alloc() and vmem_zalloc() callers it is permissible | |
175 | * to use __vmalloc(). However, in general use of __vmalloc() | |
176 | * is strongly discouraged because a global lock must be | |
177 | * acquired. Contention on this lock can significantly | |
178 | * impact performance so frequently manipulating the virtual | |
179 | * address space is strongly discouraged. | |
180 | */ | |
c7db36a3 | 181 | if ((size > spl_kmem_alloc_max) || use_vmem) { |
c3eabc75 | 182 | if (flags & KM_VMEM) { |
b4ad50ac | 183 | ptr = __vmalloc(size, lflags, PAGE_KERNEL); |
c3eabc75 BB |
184 | } else { |
185 | return (NULL); | |
186 | } | |
187 | } else { | |
188 | ptr = kmalloc_node(size, lflags, node); | |
189 | } | |
190 | ||
191 | if (likely(ptr) || (flags & KM_NOSLEEP)) | |
192 | return (ptr); | |
193 | ||
c7db36a3 BB |
194 | /* |
195 | * For vmem_alloc() and vmem_zalloc() callers retry immediately | |
b4ad50ac | 196 | * using __vmalloc() which is unlikely to fail. |
c7db36a3 BB |
197 | */ |
198 | if ((flags & KM_VMEM) && (use_vmem == 0)) { | |
199 | use_vmem = 1; | |
200 | continue; | |
201 | } | |
202 | ||
c3eabc75 BB |
203 | /* |
204 | * Use cond_resched() instead of congestion_wait() to avoid | |
205 | * deadlocking systems where there are no block devices. | |
206 | */ | |
207 | cond_resched(); | |
208 | } while (1); | |
209 | ||
210 | return (NULL); | |
211 | } | |
212 | ||
213 | inline void | |
214 | spl_kmem_free_impl(const void *buf, size_t size) | |
215 | { | |
216 | if (is_vmalloc_addr(buf)) | |
217 | vfree(buf); | |
218 | else | |
219 | kfree(buf); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Memory allocation and accounting for kmem_* * style allocations. When | |
224 | * DEBUG_KMEM is enabled the total memory allocated will be tracked and | |
225 | * any memory leaked will be reported during module unload. | |
226 | * | |
227 | * ./configure --enable-debug-kmem | |
f1ca4da6 BB |
228 | */ |
229 | #ifdef DEBUG_KMEM | |
d04c8a56 | 230 | |
f1ca4da6 | 231 | /* Shim layer memory accounting */ |
b34b9563 | 232 | #ifdef HAVE_ATOMIC64_T |
550f1705 | 233 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 234 | unsigned long long kmem_alloc_max = 0; |
b34b9563 | 235 | #else /* HAVE_ATOMIC64_T */ |
d04c8a56 BB |
236 | atomic_t kmem_alloc_used = ATOMIC_INIT(0); |
237 | unsigned long long kmem_alloc_max = 0; | |
b34b9563 | 238 | #endif /* HAVE_ATOMIC64_T */ |
79b31f36 | 239 | |
ff449ac4 BB |
240 | EXPORT_SYMBOL(kmem_alloc_used); |
241 | EXPORT_SYMBOL(kmem_alloc_max); | |
ff449ac4 | 242 | |
c3eabc75 BB |
243 | inline void * |
244 | spl_kmem_alloc_debug(size_t size, int flags, int node) | |
245 | { | |
246 | void *ptr; | |
247 | ||
248 | ptr = spl_kmem_alloc_impl(size, flags, node); | |
249 | if (ptr) { | |
250 | kmem_alloc_used_add(size); | |
251 | if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) | |
252 | kmem_alloc_max = kmem_alloc_used_read(); | |
253 | } | |
254 | ||
255 | return (ptr); | |
256 | } | |
257 | ||
258 | inline void | |
259 | spl_kmem_free_debug(const void *ptr, size_t size) | |
260 | { | |
261 | kmem_alloc_used_sub(size); | |
262 | spl_kmem_free_impl(ptr, size); | |
263 | } | |
264 | ||
b34b9563 BB |
265 | /* |
266 | * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked | |
055ffd98 BB |
267 | * but also the location of every alloc and free. When the SPL module is |
268 | * unloaded a list of all leaked addresses and where they were allocated | |
269 | * will be dumped to the console. Enabling this feature has a significant | |
270 | * impact on performance but it makes finding memory leaks straight forward. | |
271 | * | |
272 | * Not surprisingly with debugging enabled the xmem_locks are very highly | |
273 | * contended particularly on xfree(). If we want to run with this detailed | |
274 | * debugging enabled for anything other than debugging we need to minimize | |
275 | * the contention by moving to a lock per xmem_table entry model. | |
c3eabc75 BB |
276 | * |
277 | * ./configure --enable-debug-kmem-tracking | |
a0f6da3d | 278 | */ |
b34b9563 | 279 | #ifdef DEBUG_KMEM_TRACKING |
a0f6da3d | 280 | |
c3eabc75 BB |
281 | #include <linux/hash.h> |
282 | #include <linux/ctype.h> | |
283 | ||
b34b9563 BB |
284 | #define KMEM_HASH_BITS 10 |
285 | #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
a0f6da3d | 286 | |
a0f6da3d | 287 | typedef struct kmem_debug { |
b34b9563 BB |
288 | struct hlist_node kd_hlist; /* Hash node linkage */ |
289 | struct list_head kd_list; /* List of all allocations */ | |
290 | void *kd_addr; /* Allocation pointer */ | |
291 | size_t kd_size; /* Allocation size */ | |
292 | const char *kd_func; /* Allocation function */ | |
293 | int kd_line; /* Allocation line */ | |
a0f6da3d BB |
294 | } kmem_debug_t; |
295 | ||
c3eabc75 BB |
296 | static spinlock_t kmem_lock; |
297 | static struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
298 | static struct list_head kmem_list; | |
d6a26c6a | 299 | |
a0f6da3d | 300 | static kmem_debug_t * |
b34b9563 BB |
301 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, |
302 | int bits, const void *addr) | |
a0f6da3d BB |
303 | { |
304 | struct hlist_head *head; | |
305 | struct hlist_node *node; | |
306 | struct kmem_debug *p; | |
307 | unsigned long flags; | |
a0f6da3d BB |
308 | |
309 | spin_lock_irqsave(lock, flags); | |
310 | ||
b1424add BB |
311 | head = &table[hash_ptr((void *)addr, bits)]; |
312 | hlist_for_each(node, head) { | |
313 | p = list_entry(node, struct kmem_debug, kd_hlist); | |
a0f6da3d BB |
314 | if (p->kd_addr == addr) { |
315 | hlist_del_init(&p->kd_hlist); | |
316 | list_del_init(&p->kd_list); | |
317 | spin_unlock_irqrestore(lock, flags); | |
b34b9563 | 318 | return (p); |
a0f6da3d BB |
319 | } |
320 | } | |
321 | ||
322 | spin_unlock_irqrestore(lock, flags); | |
323 | ||
8d9a23e8 | 324 | return (NULL); |
a0f6da3d BB |
325 | } |
326 | ||
c3eabc75 BB |
327 | inline void * |
328 | spl_kmem_alloc_track(size_t size, int flags, | |
329 | const char *func, int line, int node) | |
a0f6da3d BB |
330 | { |
331 | void *ptr = NULL; | |
332 | kmem_debug_t *dptr; | |
333 | unsigned long irq_flags; | |
a0f6da3d | 334 | |
c3eabc75 BB |
335 | dptr = kmalloc(sizeof (kmem_debug_t), kmem_flags_convert(flags)); |
336 | if (dptr == NULL) | |
337 | return (NULL); | |
a0f6da3d | 338 | |
c3eabc75 BB |
339 | dptr->kd_func = __strdup(func, flags); |
340 | if (dptr->kd_func == NULL) { | |
341 | kfree(dptr); | |
342 | return (NULL); | |
343 | } | |
a0f6da3d | 344 | |
c3eabc75 BB |
345 | ptr = spl_kmem_alloc_debug(size, flags, node); |
346 | if (ptr == NULL) { | |
347 | kfree(dptr->kd_func); | |
348 | kfree(dptr); | |
349 | return (NULL); | |
350 | } | |
a0f6da3d | 351 | |
c3eabc75 BB |
352 | INIT_HLIST_NODE(&dptr->kd_hlist); |
353 | INIT_LIST_HEAD(&dptr->kd_list); | |
a0f6da3d | 354 | |
c3eabc75 BB |
355 | dptr->kd_addr = ptr; |
356 | dptr->kd_size = size; | |
357 | dptr->kd_line = line; | |
a0f6da3d | 358 | |
c3eabc75 BB |
359 | spin_lock_irqsave(&kmem_lock, irq_flags); |
360 | hlist_add_head(&dptr->kd_hlist, | |
361 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
362 | list_add_tail(&dptr->kd_list, &kmem_list); | |
363 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
a0f6da3d | 364 | |
8d9a23e8 | 365 | return (ptr); |
a0f6da3d | 366 | } |
a0f6da3d | 367 | |
c3eabc75 BB |
368 | inline void |
369 | spl_kmem_free_track(const void *ptr, size_t size) | |
a0f6da3d BB |
370 | { |
371 | kmem_debug_t *dptr; | |
a0f6da3d | 372 | |
5461eefe | 373 | /* Ignore NULL pointer since we haven't tracked it at all */ |
aeb9baa6 G |
374 | if (ptr == NULL) |
375 | return; | |
376 | ||
10129680 | 377 | /* Must exist in hash due to kmem_alloc() */ |
8d9a23e8 | 378 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); |
c3eabc75 BB |
379 | ASSERT3P(dptr, !=, NULL); |
380 | ASSERT3S(dptr->kd_size, ==, size); | |
a0f6da3d | 381 | |
c8e60837 | 382 | kfree(dptr->kd_func); |
a0f6da3d BB |
383 | kfree(dptr); |
384 | ||
c3eabc75 | 385 | spl_kmem_free_debug(ptr, size); |
a0f6da3d | 386 | } |
c3eabc75 BB |
387 | #endif /* DEBUG_KMEM_TRACKING */ |
388 | #endif /* DEBUG_KMEM */ | |
a0f6da3d | 389 | |
c3eabc75 BB |
390 | /* |
391 | * Public kmem_alloc(), kmem_zalloc() and kmem_free() interfaces. | |
392 | */ | |
a0f6da3d | 393 | void * |
c3eabc75 | 394 | spl_kmem_alloc(size_t size, int flags, const char *func, int line) |
a0f6da3d | 395 | { |
c3eabc75 BB |
396 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
397 | ||
398 | #if !defined(DEBUG_KMEM) | |
399 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
400 | #elif !defined(DEBUG_KMEM_TRACKING) | |
401 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
402 | #else | |
403 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
404 | #endif | |
405 | } | |
406 | EXPORT_SYMBOL(spl_kmem_alloc); | |
a0f6da3d | 407 | |
c3eabc75 BB |
408 | void * |
409 | spl_kmem_zalloc(size_t size, int flags, const char *func, int line) | |
410 | { | |
411 | ASSERT0(flags & ~KM_PUBLIC_MASK); | |
a0f6da3d | 412 | |
c3eabc75 | 413 | flags |= KM_ZERO; |
10129680 | 414 | |
c3eabc75 BB |
415 | #if !defined(DEBUG_KMEM) |
416 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
417 | #elif !defined(DEBUG_KMEM_TRACKING) | |
418 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
419 | #else | |
420 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
421 | #endif | |
a0f6da3d | 422 | } |
c3eabc75 | 423 | EXPORT_SYMBOL(spl_kmem_zalloc); |
a0f6da3d BB |
424 | |
425 | void | |
c3eabc75 | 426 | spl_kmem_free(const void *buf, size_t size) |
a0f6da3d | 427 | { |
c3eabc75 BB |
428 | #if !defined(DEBUG_KMEM) |
429 | return (spl_kmem_free_impl(buf, size)); | |
430 | #elif !defined(DEBUG_KMEM_TRACKING) | |
431 | return (spl_kmem_free_debug(buf, size)); | |
432 | #else | |
433 | return (spl_kmem_free_track(buf, size)); | |
434 | #endif | |
a0f6da3d | 435 | } |
c3eabc75 | 436 | EXPORT_SYMBOL(spl_kmem_free); |
a0f6da3d | 437 | |
e5b9b344 BB |
438 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
439 | static char * | |
440 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) | |
fece7c99 | 441 | { |
e5b9b344 BB |
442 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
443 | int i, flag = 1; | |
fece7c99 | 444 | |
e5b9b344 BB |
445 | ASSERT(str != NULL && len >= 17); |
446 | memset(str, 0, len); | |
fece7c99 | 447 | |
b34b9563 BB |
448 | /* |
449 | * Check for a fully printable string, and while we are at | |
450 | * it place the printable characters in the passed buffer. | |
451 | */ | |
e5b9b344 BB |
452 | for (i = 0; i < size; i++) { |
453 | str[i] = ((char *)(kd->kd_addr))[i]; | |
454 | if (isprint(str[i])) { | |
455 | continue; | |
456 | } else { | |
b34b9563 BB |
457 | /* |
458 | * Minimum number of printable characters found | |
459 | * to make it worthwhile to print this as ascii. | |
460 | */ | |
e5b9b344 BB |
461 | if (i > min) |
462 | break; | |
8b45dda2 | 463 | |
e5b9b344 BB |
464 | flag = 0; |
465 | break; | |
466 | } | |
467 | } | |
06089b9e | 468 | |
e5b9b344 BB |
469 | if (!flag) { |
470 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
b34b9563 BB |
471 | *((uint8_t *)kd->kd_addr), |
472 | *((uint8_t *)kd->kd_addr + 2), | |
473 | *((uint8_t *)kd->kd_addr + 4), | |
474 | *((uint8_t *)kd->kd_addr + 6), | |
475 | *((uint8_t *)kd->kd_addr + 8), | |
476 | *((uint8_t *)kd->kd_addr + 10), | |
477 | *((uint8_t *)kd->kd_addr + 12), | |
478 | *((uint8_t *)kd->kd_addr + 14)); | |
e5b9b344 | 479 | } |
8b45dda2 | 480 | |
b34b9563 | 481 | return (str); |
8b45dda2 BB |
482 | } |
483 | ||
e5b9b344 BB |
484 | static int |
485 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
8b45dda2 | 486 | { |
e5b9b344 | 487 | int i; |
8b45dda2 | 488 | |
e5b9b344 BB |
489 | spin_lock_init(lock); |
490 | INIT_LIST_HEAD(list); | |
8b45dda2 | 491 | |
e5b9b344 BB |
492 | for (i = 0; i < size; i++) |
493 | INIT_HLIST_HEAD(&kmem_table[i]); | |
8b45dda2 | 494 | |
e5b9b344 | 495 | return (0); |
fece7c99 BB |
496 | } |
497 | ||
e5b9b344 BB |
498 | static void |
499 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
fece7c99 | 500 | { |
e5b9b344 BB |
501 | unsigned long flags; |
502 | kmem_debug_t *kd; | |
503 | char str[17]; | |
a1502d76 | 504 | |
e5b9b344 BB |
505 | spin_lock_irqsave(lock, flags); |
506 | if (!list_empty(list)) | |
507 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", | |
b34b9563 | 508 | "size", "data", "func", "line"); |
fece7c99 | 509 | |
3673d032 | 510 | list_for_each_entry(kd, list, kd_list) { |
e5b9b344 | 511 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, |
b34b9563 BB |
512 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
513 | kd->kd_func, kd->kd_line); | |
3673d032 | 514 | } |
fece7c99 | 515 | |
e5b9b344 | 516 | spin_unlock_irqrestore(lock, flags); |
fece7c99 | 517 | } |
e5b9b344 | 518 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ |
fece7c99 | 519 | |
e5b9b344 BB |
520 | int |
521 | spl_kmem_init(void) | |
ea3e6ca9 | 522 | { |
93ce2b4c | 523 | |
e5b9b344 BB |
524 | #ifdef DEBUG_KMEM |
525 | kmem_alloc_used_set(0); | |
c3eabc75 | 526 | |
93ce2b4c BB |
527 | |
528 | ||
c3eabc75 | 529 | #ifdef DEBUG_KMEM_TRACKING |
e5b9b344 | 530 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); |
c3eabc75 BB |
531 | #endif /* DEBUG_KMEM_TRACKING */ |
532 | #endif /* DEBUG_KMEM */ | |
f1ca4da6 | 533 | |
c3eabc75 | 534 | return (0); |
2fb9b26a | 535 | } |
d6a26c6a | 536 | |
e5b9b344 BB |
537 | void |
538 | spl_kmem_fini(void) | |
2fb9b26a | 539 | { |
ff449ac4 | 540 | #ifdef DEBUG_KMEM |
b34b9563 BB |
541 | /* |
542 | * Display all unreclaimed memory addresses, including the | |
ff449ac4 BB |
543 | * allocation size and the first few bytes of what's located |
544 | * at that address to aid in debugging. Performance is not | |
b34b9563 BB |
545 | * a serious concern here since it is module unload time. |
546 | */ | |
d04c8a56 | 547 | if (kmem_alloc_used_read() != 0) |
8d9a23e8 | 548 | printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n", |
c3eabc75 | 549 | (unsigned long)kmem_alloc_used_read(), kmem_alloc_max); |
ff449ac4 | 550 | |
c3eabc75 | 551 | #ifdef DEBUG_KMEM_TRACKING |
ff449ac4 | 552 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
c3eabc75 | 553 | #endif /* DEBUG_KMEM_TRACKING */ |
ff449ac4 | 554 | #endif /* DEBUG_KMEM */ |
5d86345d | 555 | } |