]>
Commit | Line | Data |
---|---|---|
b34b9563 | 1 | /* |
716154c5 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 BB |
6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 | 8 | * This file is part of the SPL, Solaris Porting Layer. |
3d6af2dd | 9 | * For details, see <http://zfsonlinux.org/>. |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 BB |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 | 22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
b34b9563 | 23 | */ |
715f6251 | 24 | |
e5b9b344 | 25 | #include <sys/debug.h> |
c3eabc75 | 26 | #include <sys/sysmacros.h> |
f4b37741 | 27 | #include <sys/kmem.h> |
e5b9b344 | 28 | #include <sys/vmem.h> |
c3eabc75 BB |
29 | #include <linux/mm.h> |
30 | #include <linux/ratelimit.h> | |
31 | ||
32 | /* | |
33 | * As a general rule kmem_alloc() allocations should be small, preferably | |
34 | * just a few pages since they must by physically contiguous. Therefore, a | |
35 | * rate limited warning will be printed to the console for any kmem_alloc() | |
36 | * which exceeds a reasonable threshold. | |
37 | * | |
cb81c0c5 | 38 | * The default warning threshold is set to sixteen pages but capped at 64K to |
c3eabc75 BB |
39 | * accommodate systems using large pages. This value was selected to be small |
40 | * enough to ensure the largest allocations are quickly noticed and fixed. | |
41 | * But large enough to avoid logging any warnings when a allocation size is | |
42 | * larger than optimal but not a serious concern. Since this value is tunable, | |
43 | * developers are encouraged to set it lower when testing so any new largish | |
44 | * allocations are quickly caught. These warnings may be disabled by setting | |
45 | * the threshold to zero. | |
46 | */ | |
cb81c0c5 | 47 | unsigned int spl_kmem_alloc_warn = MIN(16 * PAGE_SIZE, 64 * 1024); |
c3eabc75 BB |
48 | module_param(spl_kmem_alloc_warn, uint, 0644); |
49 | MODULE_PARM_DESC(spl_kmem_alloc_warn, | |
50 | "Warning threshold in bytes for a kmem_alloc()"); | |
51 | EXPORT_SYMBOL(spl_kmem_alloc_warn); | |
52 | ||
53 | /* | |
54 | * Large kmem_alloc() allocations will fail if they exceed KMALLOC_MAX_SIZE. | |
55 | * Allocations which are marginally smaller than this limit may succeed but | |
56 | * should still be avoided due to the expense of locating a contiguous range | |
57 | * of free pages. Therefore, a maximum kmem size with reasonable safely | |
58 | * margin of 4x is set. Kmem_alloc() allocations larger than this maximum | |
59 | * will quickly fail. Vmem_alloc() allocations less than or equal to this | |
60 | * value will use kmalloc(), but shift to vmalloc() when exceeding this value. | |
61 | */ | |
62 | unsigned int spl_kmem_alloc_max = (KMALLOC_MAX_SIZE >> 2); | |
63 | module_param(spl_kmem_alloc_max, uint, 0644); | |
64 | MODULE_PARM_DESC(spl_kmem_alloc_max, | |
65 | "Maximum size in bytes for a kmem_alloc()"); | |
66 | EXPORT_SYMBOL(spl_kmem_alloc_max); | |
4ab13d3b | 67 | |
b868e22f BB |
68 | int |
69 | kmem_debugging(void) | |
70 | { | |
b34b9563 | 71 | return (0); |
b868e22f BB |
72 | } |
73 | EXPORT_SYMBOL(kmem_debugging); | |
74 | ||
e6de04b7 BB |
75 | char * |
76 | kmem_vasprintf(const char *fmt, va_list ap) | |
77 | { | |
78 | va_list aq; | |
79 | char *ptr; | |
80 | ||
e6de04b7 | 81 | do { |
2c762de8 | 82 | va_copy(aq, ap); |
54cccfc2 | 83 | ptr = kvasprintf(kmem_flags_convert(KM_SLEEP), fmt, aq); |
2c762de8 | 84 | va_end(aq); |
e6de04b7 | 85 | } while (ptr == NULL); |
e6de04b7 | 86 | |
b34b9563 | 87 | return (ptr); |
e6de04b7 BB |
88 | } |
89 | EXPORT_SYMBOL(kmem_vasprintf); | |
90 | ||
b868e22f BB |
91 | char * |
92 | kmem_asprintf(const char *fmt, ...) | |
93 | { | |
e6de04b7 | 94 | va_list ap; |
b868e22f BB |
95 | char *ptr; |
96 | ||
b868e22f | 97 | do { |
2c762de8 | 98 | va_start(ap, fmt); |
54cccfc2 | 99 | ptr = kvasprintf(kmem_flags_convert(KM_SLEEP), fmt, ap); |
2c762de8 | 100 | va_end(ap); |
b868e22f | 101 | } while (ptr == NULL); |
b868e22f | 102 | |
b34b9563 | 103 | return (ptr); |
b868e22f BB |
104 | } |
105 | EXPORT_SYMBOL(kmem_asprintf); | |
106 | ||
10129680 BB |
107 | static char * |
108 | __strdup(const char *str, int flags) | |
109 | { | |
110 | char *ptr; | |
111 | int n; | |
112 | ||
113 | n = strlen(str); | |
c3eabc75 | 114 | ptr = kmalloc(n + 1, kmem_flags_convert(flags)); |
10129680 BB |
115 | if (ptr) |
116 | memcpy(ptr, str, n + 1); | |
117 | ||
b34b9563 | 118 | return (ptr); |
10129680 BB |
119 | } |
120 | ||
121 | char * | |
122 | strdup(const char *str) | |
123 | { | |
b34b9563 | 124 | return (__strdup(str, KM_SLEEP)); |
10129680 BB |
125 | } |
126 | EXPORT_SYMBOL(strdup); | |
127 | ||
128 | void | |
129 | strfree(char *str) | |
130 | { | |
41f84a8d | 131 | kfree(str); |
10129680 BB |
132 | } |
133 | EXPORT_SYMBOL(strfree); | |
134 | ||
f1ca4da6 | 135 | /* |
c3eabc75 BB |
136 | * Limit the number of large allocation stack traces dumped to not more than |
137 | * 5 every 60 seconds to prevent denial-of-service attacks from debug code. | |
138 | */ | |
139 | DEFINE_RATELIMIT_STATE(kmem_alloc_ratelimit_state, 60 * HZ, 5); | |
140 | ||
141 | /* | |
142 | * General purpose unified implementation of kmem_alloc(). It is an | |
143 | * amalgamation of Linux and Illumos allocator design. It should never be | |
144 | * exported to ensure that code using kmem_alloc()/kmem_zalloc() remains | |
145 | * relatively portable. Consumers may only access this function through | |
146 | * wrappers that enforce the common flags to ensure portability. | |
147 | */ | |
148 | inline void * | |
149 | spl_kmem_alloc_impl(size_t size, int flags, int node) | |
150 | { | |
151 | gfp_t lflags = kmem_flags_convert(flags); | |
c7db36a3 | 152 | int use_vmem = 0; |
c3eabc75 BB |
153 | void *ptr; |
154 | ||
155 | /* | |
156 | * Log abnormally large allocations and rate limit the console output. | |
157 | * Allocations larger than spl_kmem_alloc_warn should be performed | |
158 | * through the vmem_alloc()/vmem_zalloc() interfaces. | |
159 | */ | |
160 | if ((spl_kmem_alloc_warn > 0) && (size > spl_kmem_alloc_warn) && | |
161 | !(flags & KM_VMEM) && __ratelimit(&kmem_alloc_ratelimit_state)) { | |
162 | printk(KERN_WARNING | |
163 | "Large kmem_alloc(%lu, 0x%x), please file an issue at:\n" | |
164 | "https://github.com/zfsonlinux/zfs/issues/new\n", | |
165 | (unsigned long)size, flags); | |
166 | dump_stack(); | |
167 | } | |
168 | ||
169 | /* | |
170 | * Use a loop because kmalloc_node() can fail when GFP_KERNEL is used | |
171 | * unlike kmem_alloc() with KM_SLEEP on Illumos. | |
172 | */ | |
173 | do { | |
174 | /* | |
175 | * Calling kmalloc_node() when the size >= spl_kmem_alloc_max | |
176 | * is unsafe. This must fail for all for kmem_alloc() and | |
177 | * kmem_zalloc() callers. | |
178 | * | |
179 | * For vmem_alloc() and vmem_zalloc() callers it is permissible | |
180 | * to use __vmalloc(). However, in general use of __vmalloc() | |
181 | * is strongly discouraged because a global lock must be | |
182 | * acquired. Contention on this lock can significantly | |
183 | * impact performance so frequently manipulating the virtual | |
184 | * address space is strongly discouraged. | |
185 | */ | |
c7db36a3 | 186 | if ((size > spl_kmem_alloc_max) || use_vmem) { |
c3eabc75 | 187 | if (flags & KM_VMEM) { |
b4ad50ac | 188 | ptr = __vmalloc(size, lflags, PAGE_KERNEL); |
c3eabc75 BB |
189 | } else { |
190 | return (NULL); | |
191 | } | |
192 | } else { | |
193 | ptr = kmalloc_node(size, lflags, node); | |
194 | } | |
195 | ||
196 | if (likely(ptr) || (flags & KM_NOSLEEP)) | |
197 | return (ptr); | |
198 | ||
c7db36a3 BB |
199 | /* |
200 | * For vmem_alloc() and vmem_zalloc() callers retry immediately | |
b4ad50ac | 201 | * using __vmalloc() which is unlikely to fail. |
c7db36a3 BB |
202 | */ |
203 | if ((flags & KM_VMEM) && (use_vmem == 0)) { | |
204 | use_vmem = 1; | |
205 | continue; | |
206 | } | |
207 | ||
c3eabc75 BB |
208 | if (unlikely(__ratelimit(&kmem_alloc_ratelimit_state))) { |
209 | printk(KERN_WARNING | |
210 | "Possible memory allocation deadlock: " | |
211 | "size=%lu lflags=0x%x", | |
212 | (unsigned long)size, lflags); | |
213 | dump_stack(); | |
214 | } | |
215 | ||
216 | /* | |
217 | * Use cond_resched() instead of congestion_wait() to avoid | |
218 | * deadlocking systems where there are no block devices. | |
219 | */ | |
220 | cond_resched(); | |
221 | } while (1); | |
222 | ||
223 | return (NULL); | |
224 | } | |
225 | ||
226 | inline void | |
227 | spl_kmem_free_impl(const void *buf, size_t size) | |
228 | { | |
229 | if (is_vmalloc_addr(buf)) | |
230 | vfree(buf); | |
231 | else | |
232 | kfree(buf); | |
233 | } | |
234 | ||
235 | /* | |
236 | * Memory allocation and accounting for kmem_* * style allocations. When | |
237 | * DEBUG_KMEM is enabled the total memory allocated will be tracked and | |
238 | * any memory leaked will be reported during module unload. | |
239 | * | |
240 | * ./configure --enable-debug-kmem | |
f1ca4da6 BB |
241 | */ |
242 | #ifdef DEBUG_KMEM | |
d04c8a56 | 243 | |
f1ca4da6 | 244 | /* Shim layer memory accounting */ |
b34b9563 | 245 | #ifdef HAVE_ATOMIC64_T |
550f1705 | 246 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 247 | unsigned long long kmem_alloc_max = 0; |
b34b9563 | 248 | #else /* HAVE_ATOMIC64_T */ |
d04c8a56 BB |
249 | atomic_t kmem_alloc_used = ATOMIC_INIT(0); |
250 | unsigned long long kmem_alloc_max = 0; | |
b34b9563 | 251 | #endif /* HAVE_ATOMIC64_T */ |
79b31f36 | 252 | |
ff449ac4 BB |
253 | EXPORT_SYMBOL(kmem_alloc_used); |
254 | EXPORT_SYMBOL(kmem_alloc_max); | |
ff449ac4 | 255 | |
c3eabc75 BB |
256 | inline void * |
257 | spl_kmem_alloc_debug(size_t size, int flags, int node) | |
258 | { | |
259 | void *ptr; | |
260 | ||
261 | ptr = spl_kmem_alloc_impl(size, flags, node); | |
262 | if (ptr) { | |
263 | kmem_alloc_used_add(size); | |
264 | if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) | |
265 | kmem_alloc_max = kmem_alloc_used_read(); | |
266 | } | |
267 | ||
268 | return (ptr); | |
269 | } | |
270 | ||
271 | inline void | |
272 | spl_kmem_free_debug(const void *ptr, size_t size) | |
273 | { | |
274 | kmem_alloc_used_sub(size); | |
275 | spl_kmem_free_impl(ptr, size); | |
276 | } | |
277 | ||
b34b9563 BB |
278 | /* |
279 | * When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked | |
055ffd98 BB |
280 | * but also the location of every alloc and free. When the SPL module is |
281 | * unloaded a list of all leaked addresses and where they were allocated | |
282 | * will be dumped to the console. Enabling this feature has a significant | |
283 | * impact on performance but it makes finding memory leaks straight forward. | |
284 | * | |
285 | * Not surprisingly with debugging enabled the xmem_locks are very highly | |
286 | * contended particularly on xfree(). If we want to run with this detailed | |
287 | * debugging enabled for anything other than debugging we need to minimize | |
288 | * the contention by moving to a lock per xmem_table entry model. | |
c3eabc75 BB |
289 | * |
290 | * ./configure --enable-debug-kmem-tracking | |
a0f6da3d | 291 | */ |
b34b9563 | 292 | #ifdef DEBUG_KMEM_TRACKING |
a0f6da3d | 293 | |
c3eabc75 BB |
294 | #include <linux/hash.h> |
295 | #include <linux/ctype.h> | |
296 | ||
b34b9563 BB |
297 | #define KMEM_HASH_BITS 10 |
298 | #define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
a0f6da3d | 299 | |
a0f6da3d | 300 | typedef struct kmem_debug { |
b34b9563 BB |
301 | struct hlist_node kd_hlist; /* Hash node linkage */ |
302 | struct list_head kd_list; /* List of all allocations */ | |
303 | void *kd_addr; /* Allocation pointer */ | |
304 | size_t kd_size; /* Allocation size */ | |
305 | const char *kd_func; /* Allocation function */ | |
306 | int kd_line; /* Allocation line */ | |
a0f6da3d BB |
307 | } kmem_debug_t; |
308 | ||
c3eabc75 BB |
309 | static spinlock_t kmem_lock; |
310 | static struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
311 | static struct list_head kmem_list; | |
d6a26c6a | 312 | |
a0f6da3d | 313 | static kmem_debug_t * |
b34b9563 BB |
314 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, |
315 | int bits, const void *addr) | |
a0f6da3d BB |
316 | { |
317 | struct hlist_head *head; | |
318 | struct hlist_node *node; | |
319 | struct kmem_debug *p; | |
320 | unsigned long flags; | |
a0f6da3d BB |
321 | |
322 | spin_lock_irqsave(lock, flags); | |
323 | ||
b1424add BB |
324 | head = &table[hash_ptr((void *)addr, bits)]; |
325 | hlist_for_each(node, head) { | |
326 | p = list_entry(node, struct kmem_debug, kd_hlist); | |
a0f6da3d BB |
327 | if (p->kd_addr == addr) { |
328 | hlist_del_init(&p->kd_hlist); | |
329 | list_del_init(&p->kd_list); | |
330 | spin_unlock_irqrestore(lock, flags); | |
b34b9563 | 331 | return (p); |
a0f6da3d BB |
332 | } |
333 | } | |
334 | ||
335 | spin_unlock_irqrestore(lock, flags); | |
336 | ||
8d9a23e8 | 337 | return (NULL); |
a0f6da3d BB |
338 | } |
339 | ||
c3eabc75 BB |
340 | inline void * |
341 | spl_kmem_alloc_track(size_t size, int flags, | |
342 | const char *func, int line, int node) | |
a0f6da3d BB |
343 | { |
344 | void *ptr = NULL; | |
345 | kmem_debug_t *dptr; | |
346 | unsigned long irq_flags; | |
a0f6da3d | 347 | |
c3eabc75 BB |
348 | dptr = kmalloc(sizeof (kmem_debug_t), kmem_flags_convert(flags)); |
349 | if (dptr == NULL) | |
350 | return (NULL); | |
a0f6da3d | 351 | |
c3eabc75 BB |
352 | dptr->kd_func = __strdup(func, flags); |
353 | if (dptr->kd_func == NULL) { | |
354 | kfree(dptr); | |
355 | return (NULL); | |
356 | } | |
a0f6da3d | 357 | |
c3eabc75 BB |
358 | ptr = spl_kmem_alloc_debug(size, flags, node); |
359 | if (ptr == NULL) { | |
360 | kfree(dptr->kd_func); | |
361 | kfree(dptr); | |
362 | return (NULL); | |
363 | } | |
a0f6da3d | 364 | |
c3eabc75 BB |
365 | INIT_HLIST_NODE(&dptr->kd_hlist); |
366 | INIT_LIST_HEAD(&dptr->kd_list); | |
a0f6da3d | 367 | |
c3eabc75 BB |
368 | dptr->kd_addr = ptr; |
369 | dptr->kd_size = size; | |
370 | dptr->kd_line = line; | |
a0f6da3d | 371 | |
c3eabc75 BB |
372 | spin_lock_irqsave(&kmem_lock, irq_flags); |
373 | hlist_add_head(&dptr->kd_hlist, | |
374 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
375 | list_add_tail(&dptr->kd_list, &kmem_list); | |
376 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
a0f6da3d | 377 | |
8d9a23e8 | 378 | return (ptr); |
a0f6da3d | 379 | } |
a0f6da3d | 380 | |
c3eabc75 BB |
381 | inline void |
382 | spl_kmem_free_track(const void *ptr, size_t size) | |
a0f6da3d BB |
383 | { |
384 | kmem_debug_t *dptr; | |
a0f6da3d | 385 | |
aeb9baa6 G |
386 | /* Ignore NULL pointer since we haven't tracked it at all*/ |
387 | if (ptr == NULL) | |
388 | return; | |
389 | ||
10129680 | 390 | /* Must exist in hash due to kmem_alloc() */ |
8d9a23e8 | 391 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); |
c3eabc75 BB |
392 | ASSERT3P(dptr, !=, NULL); |
393 | ASSERT3S(dptr->kd_size, ==, size); | |
a0f6da3d | 394 | |
c8e60837 | 395 | kfree(dptr->kd_func); |
a0f6da3d BB |
396 | kfree(dptr); |
397 | ||
c3eabc75 | 398 | spl_kmem_free_debug(ptr, size); |
a0f6da3d | 399 | } |
c3eabc75 BB |
400 | #endif /* DEBUG_KMEM_TRACKING */ |
401 | #endif /* DEBUG_KMEM */ | |
a0f6da3d | 402 | |
c3eabc75 BB |
403 | /* |
404 | * Public kmem_alloc(), kmem_zalloc() and kmem_free() interfaces. | |
405 | */ | |
a0f6da3d | 406 | void * |
c3eabc75 | 407 | spl_kmem_alloc(size_t size, int flags, const char *func, int line) |
a0f6da3d | 408 | { |
c3eabc75 BB |
409 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
410 | ||
411 | #if !defined(DEBUG_KMEM) | |
412 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
413 | #elif !defined(DEBUG_KMEM_TRACKING) | |
414 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
415 | #else | |
416 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
417 | #endif | |
418 | } | |
419 | EXPORT_SYMBOL(spl_kmem_alloc); | |
a0f6da3d | 420 | |
c3eabc75 BB |
421 | void * |
422 | spl_kmem_zalloc(size_t size, int flags, const char *func, int line) | |
423 | { | |
424 | ASSERT0(flags & ~KM_PUBLIC_MASK); | |
a0f6da3d | 425 | |
c3eabc75 | 426 | flags |= KM_ZERO; |
10129680 | 427 | |
c3eabc75 BB |
428 | #if !defined(DEBUG_KMEM) |
429 | return (spl_kmem_alloc_impl(size, flags, NUMA_NO_NODE)); | |
430 | #elif !defined(DEBUG_KMEM_TRACKING) | |
431 | return (spl_kmem_alloc_debug(size, flags, NUMA_NO_NODE)); | |
432 | #else | |
433 | return (spl_kmem_alloc_track(size, flags, func, line, NUMA_NO_NODE)); | |
434 | #endif | |
a0f6da3d | 435 | } |
c3eabc75 | 436 | EXPORT_SYMBOL(spl_kmem_zalloc); |
a0f6da3d BB |
437 | |
438 | void | |
c3eabc75 | 439 | spl_kmem_free(const void *buf, size_t size) |
a0f6da3d | 440 | { |
c3eabc75 BB |
441 | #if !defined(DEBUG_KMEM) |
442 | return (spl_kmem_free_impl(buf, size)); | |
443 | #elif !defined(DEBUG_KMEM_TRACKING) | |
444 | return (spl_kmem_free_debug(buf, size)); | |
445 | #else | |
446 | return (spl_kmem_free_track(buf, size)); | |
447 | #endif | |
a0f6da3d | 448 | } |
c3eabc75 | 449 | EXPORT_SYMBOL(spl_kmem_free); |
a0f6da3d | 450 | |
e5b9b344 BB |
451 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
452 | static char * | |
453 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) | |
fece7c99 | 454 | { |
e5b9b344 BB |
455 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
456 | int i, flag = 1; | |
fece7c99 | 457 | |
e5b9b344 BB |
458 | ASSERT(str != NULL && len >= 17); |
459 | memset(str, 0, len); | |
fece7c99 | 460 | |
b34b9563 BB |
461 | /* |
462 | * Check for a fully printable string, and while we are at | |
463 | * it place the printable characters in the passed buffer. | |
464 | */ | |
e5b9b344 BB |
465 | for (i = 0; i < size; i++) { |
466 | str[i] = ((char *)(kd->kd_addr))[i]; | |
467 | if (isprint(str[i])) { | |
468 | continue; | |
469 | } else { | |
b34b9563 BB |
470 | /* |
471 | * Minimum number of printable characters found | |
472 | * to make it worthwhile to print this as ascii. | |
473 | */ | |
e5b9b344 BB |
474 | if (i > min) |
475 | break; | |
8b45dda2 | 476 | |
e5b9b344 BB |
477 | flag = 0; |
478 | break; | |
479 | } | |
480 | } | |
06089b9e | 481 | |
e5b9b344 BB |
482 | if (!flag) { |
483 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
b34b9563 BB |
484 | *((uint8_t *)kd->kd_addr), |
485 | *((uint8_t *)kd->kd_addr + 2), | |
486 | *((uint8_t *)kd->kd_addr + 4), | |
487 | *((uint8_t *)kd->kd_addr + 6), | |
488 | *((uint8_t *)kd->kd_addr + 8), | |
489 | *((uint8_t *)kd->kd_addr + 10), | |
490 | *((uint8_t *)kd->kd_addr + 12), | |
491 | *((uint8_t *)kd->kd_addr + 14)); | |
e5b9b344 | 492 | } |
8b45dda2 | 493 | |
b34b9563 | 494 | return (str); |
8b45dda2 BB |
495 | } |
496 | ||
e5b9b344 BB |
497 | static int |
498 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
8b45dda2 | 499 | { |
e5b9b344 | 500 | int i; |
8b45dda2 | 501 | |
e5b9b344 BB |
502 | spin_lock_init(lock); |
503 | INIT_LIST_HEAD(list); | |
8b45dda2 | 504 | |
e5b9b344 BB |
505 | for (i = 0; i < size; i++) |
506 | INIT_HLIST_HEAD(&kmem_table[i]); | |
8b45dda2 | 507 | |
e5b9b344 | 508 | return (0); |
fece7c99 BB |
509 | } |
510 | ||
e5b9b344 BB |
511 | static void |
512 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
fece7c99 | 513 | { |
e5b9b344 BB |
514 | unsigned long flags; |
515 | kmem_debug_t *kd; | |
516 | char str[17]; | |
a1502d76 | 517 | |
e5b9b344 BB |
518 | spin_lock_irqsave(lock, flags); |
519 | if (!list_empty(list)) | |
520 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", | |
b34b9563 | 521 | "size", "data", "func", "line"); |
fece7c99 | 522 | |
e5b9b344 BB |
523 | list_for_each_entry(kd, list, kd_list) |
524 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, | |
b34b9563 BB |
525 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
526 | kd->kd_func, kd->kd_line); | |
fece7c99 | 527 | |
e5b9b344 | 528 | spin_unlock_irqrestore(lock, flags); |
fece7c99 | 529 | } |
e5b9b344 | 530 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ |
fece7c99 | 531 | |
e5b9b344 BB |
532 | int |
533 | spl_kmem_init(void) | |
ea3e6ca9 | 534 | { |
e5b9b344 BB |
535 | #ifdef DEBUG_KMEM |
536 | kmem_alloc_used_set(0); | |
c3eabc75 BB |
537 | |
538 | #ifdef DEBUG_KMEM_TRACKING | |
e5b9b344 | 539 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); |
c3eabc75 BB |
540 | #endif /* DEBUG_KMEM_TRACKING */ |
541 | #endif /* DEBUG_KMEM */ | |
f1ca4da6 | 542 | |
c3eabc75 | 543 | return (0); |
2fb9b26a | 544 | } |
d6a26c6a | 545 | |
e5b9b344 BB |
546 | void |
547 | spl_kmem_fini(void) | |
2fb9b26a | 548 | { |
ff449ac4 | 549 | #ifdef DEBUG_KMEM |
b34b9563 BB |
550 | /* |
551 | * Display all unreclaimed memory addresses, including the | |
ff449ac4 BB |
552 | * allocation size and the first few bytes of what's located |
553 | * at that address to aid in debugging. Performance is not | |
b34b9563 BB |
554 | * a serious concern here since it is module unload time. |
555 | */ | |
d04c8a56 | 556 | if (kmem_alloc_used_read() != 0) |
8d9a23e8 | 557 | printk(KERN_WARNING "kmem leaked %ld/%llu bytes\n", |
c3eabc75 | 558 | (unsigned long)kmem_alloc_used_read(), kmem_alloc_max); |
ff449ac4 | 559 | |
c3eabc75 | 560 | #ifdef DEBUG_KMEM_TRACKING |
ff449ac4 | 561 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
c3eabc75 | 562 | #endif /* DEBUG_KMEM_TRACKING */ |
ff449ac4 | 563 | #endif /* DEBUG_KMEM */ |
5d86345d | 564 | } |