]> git.proxmox.com Git - mirror_spl.git/blame - modules/spl/spl-kmem.c
Just use CONFIG_SLUB to detect SLUB use
[mirror_spl.git] / modules / spl / spl-kmem.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
f4b37741 27#include <sys/kmem.h>
f1ca4da6 28
937879f1 29#ifdef DEBUG_SUBSYSTEM
30#undef DEBUG_SUBSYSTEM
31#endif
32
33#define DEBUG_SUBSYSTEM S_KMEM
34
f1ca4da6 35/*
36 * Memory allocation interfaces
37 */
38#ifdef DEBUG_KMEM
39/* Shim layer memory accounting */
c19c06f3 40atomic64_t kmem_alloc_used;
41unsigned long kmem_alloc_max = 0;
42atomic64_t vmem_alloc_used;
43unsigned long vmem_alloc_max = 0;
44int kmem_warning_flag = 1;
5c2bb9b2 45atomic64_t kmem_cache_alloc_failed;
79b31f36 46
d6a26c6a 47spinlock_t kmem_lock;
48struct hlist_head kmem_table[KMEM_TABLE_SIZE];
49struct list_head kmem_list;
50
13cdca65 51spinlock_t vmem_lock;
52struct hlist_head vmem_table[VMEM_TABLE_SIZE];
53struct list_head vmem_list;
54
79b31f36 55EXPORT_SYMBOL(kmem_alloc_used);
56EXPORT_SYMBOL(kmem_alloc_max);
57EXPORT_SYMBOL(vmem_alloc_used);
58EXPORT_SYMBOL(vmem_alloc_max);
c19c06f3 59EXPORT_SYMBOL(kmem_warning_flag);
60
d6a26c6a 61EXPORT_SYMBOL(kmem_lock);
62EXPORT_SYMBOL(kmem_table);
63EXPORT_SYMBOL(kmem_list);
64
13cdca65 65EXPORT_SYMBOL(vmem_lock);
66EXPORT_SYMBOL(vmem_table);
67EXPORT_SYMBOL(vmem_list);
68
c19c06f3 69int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
70#else
71int kmem_set_warning(int flag) { return 0; }
f1ca4da6 72#endif
c19c06f3 73EXPORT_SYMBOL(kmem_set_warning);
f1ca4da6 74
75/*
76 * Slab allocation interfaces
77 *
78 * While the linux slab implementation was inspired by solaris they
79 * have made some changes to the API which complicates this shim
80 * layer. For one thing the same symbol names are used with different
81 * arguments for the prototypes. To deal with this we must use the
82 * preprocessor to re-order arguments. Happily for us standard C says,
83 * "Macro's appearing in their own expansion are not reexpanded" so
84 * this does not result in an infinite recursion. Additionally the
85 * function pointers registered by solarias differ from those used
86 * by linux so a lookup and mapping from linux style callback to a
87 * solaris style callback is needed. There is some overhead in this
88 * operation which isn't horibile but it needs to be kept in mind.
89 */
d6a26c6a 90#define KCC_MAGIC 0x7a7a7a7a
91#define KCC_POISON 0x77
92
f1ca4da6 93typedef struct kmem_cache_cb {
d6a26c6a 94 int kcc_magic;
c30df9c8 95 struct hlist_node kcc_hlist;
f1ca4da6 96 struct list_head kcc_list;
97 kmem_cache_t * kcc_cache;
98 kmem_constructor_t kcc_constructor;
99 kmem_destructor_t kcc_destructor;
100 kmem_reclaim_t kcc_reclaim;
101 void * kcc_private;
102 void * kcc_vmp;
d6a26c6a 103 atomic_t kcc_ref;
f1ca4da6 104} kmem_cache_cb_t;
105
c30df9c8 106#define KMEM_CACHE_HASH_BITS 10
107#define KMEM_CACHE_TABLE_SIZE (1 << KMEM_CACHE_HASH_BITS)
108
109struct hlist_head kmem_cache_table[KMEM_CACHE_TABLE_SIZE];
110struct list_head kmem_cache_list;
111static struct rw_semaphore kmem_cache_sem;
112
57d86234 113#ifdef HAVE_SET_SHRINKER
f1ca4da6 114static struct shrinker *kmem_cache_shrinker;
57d86234 115#else
116static int kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask);
117static struct shrinker kmem_cache_shrinker = {
118 .shrink = kmem_cache_generic_shrinker,
119 .seeks = KMC_DEFAULT_SEEKS,
120};
121#endif
f1ca4da6 122
c30df9c8 123/* Function must be called while holding the kmem_cache_sem
f1ca4da6 124 * Because kmem_cache_t is an opaque datatype we're forced to
125 * match pointers to identify specific cache entires.
126 */
127static kmem_cache_cb_t *
128kmem_cache_find_cache_cb(kmem_cache_t *cache)
129{
c30df9c8 130 struct hlist_head *head;
131 struct hlist_node *node;
f1ca4da6 132 kmem_cache_cb_t *kcc;
d6a26c6a 133#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
c30df9c8 134 ASSERT(rwsem_is_locked(&kmem_cache_sem));
d6a26c6a 135#endif
f1ca4da6 136
c30df9c8 137 head = &kmem_cache_table[hash_ptr(cache, KMEM_CACHE_HASH_BITS)];
138 hlist_for_each_entry_rcu(kcc, node, head, kcc_hlist)
139 if (kcc->kcc_cache == cache)
f1ca4da6 140 return kcc;
141
142 return NULL;
143}
144
145static kmem_cache_cb_t *
146kmem_cache_add_cache_cb(kmem_cache_t *cache,
147 kmem_constructor_t constructor,
148 kmem_destructor_t destructor,
149 kmem_reclaim_t reclaim,
150 void *priv, void *vmp)
151{
152 kmem_cache_cb_t *kcc;
153
154 kcc = (kmem_cache_cb_t *)kmalloc(sizeof(*kcc), GFP_KERNEL);
155 if (kcc) {
d6a26c6a 156 kcc->kcc_magic = KCC_MAGIC;
f1ca4da6 157 kcc->kcc_cache = cache;
158 kcc->kcc_constructor = constructor;
159 kcc->kcc_destructor = destructor;
160 kcc->kcc_reclaim = reclaim;
161 kcc->kcc_private = priv;
162 kcc->kcc_vmp = vmp;
d6a26c6a 163 atomic_set(&kcc->kcc_ref, 0);
c30df9c8 164 down_write(&kmem_cache_sem);
165 hlist_add_head_rcu(&kcc->kcc_hlist, &kmem_cache_table[
166 hash_ptr(cache, KMEM_CACHE_HASH_BITS)]);
167 list_add_tail(&kcc->kcc_list, &kmem_cache_list);
168 up_write(&kmem_cache_sem);
f1ca4da6 169 }
170
171 return kcc;
172}
173
174static void
175kmem_cache_remove_cache_cb(kmem_cache_cb_t *kcc)
176{
c30df9c8 177 down_write(&kmem_cache_sem);
d6a26c6a 178 ASSERT(atomic_read(&kcc->kcc_ref) == 0);
c30df9c8 179 hlist_del_init(&kcc->kcc_hlist);
180 list_del_init(&kcc->kcc_list);
181 up_write(&kmem_cache_sem);
f1ca4da6 182
c30df9c8 183 if (kcc) {
d6a26c6a 184 memset(kcc, KCC_POISON, sizeof(*kcc));
185 kfree(kcc);
186 }
f1ca4da6 187}
188
57d86234 189#ifdef HAVE_3ARG_KMEM_CACHE_CREATE_CTOR
f1ca4da6 190static void
57d86234 191kmem_cache_generic_constructor(void *ptr, kmem_cache_t *cache,
192 unsigned long flags)
f1ca4da6 193{
194 kmem_cache_cb_t *kcc;
d61e12af 195 kmem_constructor_t constructor;
196 void *private;
f1ca4da6 197
d6a26c6a 198 /* Ensure constructor verifies are not passed to the registered
199 * constructors. This may not be safe due to the Solaris constructor
200 * not being aware of how to handle the SLAB_CTOR_VERIFY flag
201 */
57d86234 202 ASSERT(flags & SLAB_CTOR_CONSTRUCTOR);
203
d6a26c6a 204 if (flags & SLAB_CTOR_VERIFY)
205 return;
206
4efd4118 207 if (flags & SLAB_CTOR_ATOMIC)
208 flags = KM_NOSLEEP;
209 else
210 flags = KM_SLEEP;
57d86234 211#else
212static void
213kmem_cache_generic_constructor(kmem_cache_t *cache, void *ptr)
214{
215 kmem_cache_cb_t *kcc;
216 kmem_constructor_t constructor;
217 void *private;
218 int flags = KM_NOSLEEP;
219#endif
d6a26c6a 220 /* We can be called with interrupts disabled so it is critical that
221 * this function and the registered constructor never sleep.
222 */
c30df9c8 223 while (!down_read_trylock(&kmem_cache_sem));
f1ca4da6 224
225 /* Callback list must be in sync with linux slab caches */
226 kcc = kmem_cache_find_cache_cb(cache);
937879f1 227 ASSERT(kcc);
d6a26c6a 228 ASSERT(kcc->kcc_magic == KCC_MAGIC);
229 atomic_inc(&kcc->kcc_ref);
937879f1 230
d61e12af 231 constructor = kcc->kcc_constructor;
232 private = kcc->kcc_private;
0a6fd143 233
c30df9c8 234 up_read(&kmem_cache_sem);
d61e12af 235
236 if (constructor)
237 constructor(ptr, private, (int)flags);
238
d6a26c6a 239 atomic_dec(&kcc->kcc_ref);
240
f1ca4da6 241 /* Linux constructor has no return code, silently eat it */
242}
243
244static void
245kmem_cache_generic_destructor(void *ptr, kmem_cache_t *cache, unsigned long flags)
246{
247 kmem_cache_cb_t *kcc;
d61e12af 248 kmem_destructor_t destructor;
249 void *private;
f1ca4da6 250
4efd4118 251 /* No valid destructor flags */
252 ASSERT(flags == 0);
253
d6a26c6a 254 /* We can be called with interrupts disabled so it is critical that
255 * this function and the registered constructor never sleep.
256 */
c30df9c8 257 while (!down_read_trylock(&kmem_cache_sem));
f1ca4da6 258
259 /* Callback list must be in sync with linux slab caches */
260 kcc = kmem_cache_find_cache_cb(cache);
937879f1 261 ASSERT(kcc);
d6a26c6a 262 ASSERT(kcc->kcc_magic == KCC_MAGIC);
263 atomic_inc(&kcc->kcc_ref);
937879f1 264
d61e12af 265 destructor = kcc->kcc_destructor;
266 private = kcc->kcc_private;
0a6fd143 267
c30df9c8 268 up_read(&kmem_cache_sem);
d61e12af 269
270 /* Solaris destructor takes no flags, silently eat them */
271 if (destructor)
272 destructor(ptr, private);
d6a26c6a 273
274 atomic_dec(&kcc->kcc_ref);
f1ca4da6 275}
276
57d86234 277/* Arguments are ignored */
f1ca4da6 278static int
279kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
280{
281 kmem_cache_cb_t *kcc;
282 int total = 0;
283
284 /* Under linux a shrinker is not tightly coupled with a slab
285 * cache. In fact linux always systematically trys calling all
286 * registered shrinker callbacks until its target reclamation level
287 * is reached. Because of this we only register one shrinker
288 * function in the shim layer for all slab caches. And we always
289 * attempt to shrink all caches when this generic shrinker is called.
290 */
c30df9c8 291 down_read(&kmem_cache_sem);
f1ca4da6 292
c30df9c8 293 list_for_each_entry(kcc, &kmem_cache_list, kcc_list) {
d6a26c6a 294 ASSERT(kcc);
295 ASSERT(kcc->kcc_magic == KCC_MAGIC);
296
297 /* Take a reference on the cache in question. If that
298 * cache is contended simply skip it, it may already be
299 * in the process of a reclaim or the ctor/dtor may be
300 * running in either case it's best to skip it.
301 */
302 atomic_inc(&kcc->kcc_ref);
303 if (atomic_read(&kcc->kcc_ref) > 1) {
304 atomic_dec(&kcc->kcc_ref);
305 continue;
306 }
307
f1ca4da6 308 /* Under linux the desired number and gfp type of objects
309 * is passed to the reclaiming function as a sugested reclaim
310 * target. I do not pass these args on because reclaim
311 * policy is entirely up to the owner under solaris. We only
312 * pass on the pre-registered private data.
313 */
314 if (kcc->kcc_reclaim)
315 kcc->kcc_reclaim(kcc->kcc_private);
316
d6a26c6a 317 atomic_dec(&kcc->kcc_ref);
f1ca4da6 318 total += 1;
319 }
320
321 /* Under linux we should return the remaining number of entires in
322 * the cache. Unfortunately, I don't see an easy way to safely
323 * emulate this behavior so I'm returning one entry per cache which
324 * was registered with the generic shrinker. This should fake out
325 * the linux VM when it attempts to shrink caches.
326 */
c30df9c8 327 up_read(&kmem_cache_sem);
d6a26c6a 328
f1ca4da6 329 return total;
330}
331
332/* Ensure the __kmem_cache_create/__kmem_cache_destroy macros are
333 * removed here to prevent a recursive substitution, we want to call
334 * the native linux version.
335 */
336#undef kmem_cache_create
337#undef kmem_cache_destroy
5c2bb9b2 338#undef kmem_cache_alloc
57d86234 339#undef kmem_cache_free
f1ca4da6 340
341kmem_cache_t *
342__kmem_cache_create(char *name, size_t size, size_t align,
f1b59d26 343 kmem_constructor_t constructor,
344 kmem_destructor_t destructor,
345 kmem_reclaim_t reclaim,
f1ca4da6 346 void *priv, void *vmp, int flags)
347{
348 kmem_cache_t *cache;
349 kmem_cache_cb_t *kcc;
350 int shrinker_flag = 0;
c19c06f3 351 char *cache_name;
937879f1 352 ENTRY;
f1ca4da6 353
937879f1 354 /* XXX: - Option currently unsupported by shim layer */
355 ASSERT(!vmp);
4efd4118 356 ASSERT(flags == 0);
f1ca4da6 357
c19c06f3 358 cache_name = kzalloc(strlen(name) + 1, GFP_KERNEL);
359 if (cache_name == NULL)
937879f1 360 RETURN(NULL);
c19c06f3 361
362 strcpy(cache_name, name);
57d86234 363
c30df9c8 364 /* When your slab is implemented in terms of the slub it
365 * is possible similarly sized slab caches will be merged.
366 * For our implementation we must make sure this never
367 * happens because we require a unique cache address to
368 * use as a hash key when looking up the constructor,
369 * destructor, and shrinker registered for each unique
370 * type of slab cache. Passing any of the following flags
371 * will prevent the slub merging.
372 *
373 * SLAB_RED_ZONE
374 * SLAB_POISON
375 * SLAB_STORE_USER
376 * SLAB_TRACE
377 * SLAB_DESTROY_BY_RCU
378 */
475cdc78 379#ifdef CONFIG_SLUB
c30df9c8 380 flags |= SLAB_STORE_USER;
381#endif
382
57d86234 383#ifdef HAVE_KMEM_CACHE_CREATE_DTOR
c19c06f3 384 cache = kmem_cache_create(cache_name, size, align, flags,
f1ca4da6 385 kmem_cache_generic_constructor,
386 kmem_cache_generic_destructor);
57d86234 387#else
388 cache = kmem_cache_create(cache_name, size, align, flags, NULL);
389#endif
f1ca4da6 390 if (cache == NULL)
937879f1 391 RETURN(NULL);
f1ca4da6 392
393 /* Register shared shrinker function on initial cache create */
c30df9c8 394 down_read(&kmem_cache_sem);
395 if (list_empty(&kmem_cache_list)) {
57d86234 396#ifdef HAVE_SET_SHRINKER
c30df9c8 397 kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
398 kmem_cache_generic_shrinker);
f1ca4da6 399 if (kmem_cache_shrinker == NULL) {
400 kmem_cache_destroy(cache);
c30df9c8 401 up_read(&kmem_cache_sem);
937879f1 402 RETURN(NULL);
f1ca4da6 403 }
57d86234 404#else
405 register_shrinker(&kmem_cache_shrinker);
406#endif
f1ca4da6 407 }
c30df9c8 408 up_read(&kmem_cache_sem);
f1ca4da6 409
410 kcc = kmem_cache_add_cache_cb(cache, constructor, destructor,
411 reclaim, priv, vmp);
412 if (kcc == NULL) {
413 if (shrinker_flag) /* New shrinker registered must be removed */
57d86234 414#ifdef HAVE_SET_SHRINKER
f1ca4da6 415 remove_shrinker(kmem_cache_shrinker);
57d86234 416#else
417 unregister_shrinker(&kmem_cache_shrinker);
418#endif
f1ca4da6 419
420 kmem_cache_destroy(cache);
937879f1 421 RETURN(NULL);
f1ca4da6 422 }
423
937879f1 424 RETURN(cache);
f1ca4da6 425}
f1b59d26 426EXPORT_SYMBOL(__kmem_cache_create);
f1ca4da6 427
e4f1d29f 428/* Return code provided despite Solaris's void return. There should be no
429 * harm here since the Solaris versions will ignore it anyway. */
430int
f1ca4da6 431__kmem_cache_destroy(kmem_cache_t *cache)
432{
433 kmem_cache_cb_t *kcc;
c19c06f3 434 char *name;
e4f1d29f 435 int rc;
937879f1 436 ENTRY;
f1ca4da6 437
c30df9c8 438 down_read(&kmem_cache_sem);
f1ca4da6 439 kcc = kmem_cache_find_cache_cb(cache);
d6a26c6a 440 if (kcc == NULL) {
c30df9c8 441 up_read(&kmem_cache_sem);
937879f1 442 RETURN(-EINVAL);
d6a26c6a 443 }
444 atomic_inc(&kcc->kcc_ref);
c30df9c8 445 up_read(&kmem_cache_sem);
f1ca4da6 446
c19c06f3 447 name = (char *)kmem_cache_name(cache);
57d86234 448
449#ifdef HAVE_KMEM_CACHE_DESTROY_INT
e4f1d29f 450 rc = kmem_cache_destroy(cache);
57d86234 451#else
452 kmem_cache_destroy(cache);
453 rc = 0;
454#endif
d6a26c6a 455
456 atomic_dec(&kcc->kcc_ref);
f1ca4da6 457 kmem_cache_remove_cache_cb(kcc);
c19c06f3 458 kfree(name);
f1ca4da6 459
460 /* Unregister generic shrinker on removal of all caches */
c30df9c8 461 down_read(&kmem_cache_sem);
462 if (list_empty(&kmem_cache_list))
57d86234 463#ifdef HAVE_SET_SHRINKER
464 remove_shrinker(kmem_cache_shrinker);
465#else
466 unregister_shrinker(&kmem_cache_shrinker);
467#endif
f1ca4da6 468
c30df9c8 469 up_read(&kmem_cache_sem);
937879f1 470 RETURN(rc);
f1ca4da6 471}
f1b59d26 472EXPORT_SYMBOL(__kmem_cache_destroy);
f1ca4da6 473
5c2bb9b2 474/* Under Solaris if the KM_SLEEP flag is passed we absolutely must
475 * sleep until we are allocated the memory. Under Linux you can still
476 * get a memory allocation failure, so I'm forced to keep requesting
477 * the memory even if the system is under substantial memory pressure
478 * of fragmentation prevents the allocation from succeeded. This is
479 * not the correct fix, or even a good one. But it will do for now.
480 */
481void *
482__kmem_cache_alloc(kmem_cache_t *cache, gfp_t flags)
483{
57d86234 484 void *obj;
5c2bb9b2 485 ENTRY;
486
487restart:
57d86234 488 obj = kmem_cache_alloc(cache, flags);
489 if ((obj == NULL) && (flags & KM_SLEEP)) {
c6dc93d6 490#ifdef DEBUG_KMEM
5c2bb9b2 491 atomic64_inc(&kmem_cache_alloc_failed);
c6dc93d6 492#endif /* DEBUG_KMEM */
57d86234 493 GOTO(restart, obj);
5c2bb9b2 494 }
495
c30df9c8 496 /* When destructor support is removed we must be careful not to
497 * use the provided constructor which will end up being called
498 * more often than the destructor which we only call on free. Thus
499 * we many call the proper constructor when there is no destructor.
500 */
57d86234 501#ifndef HAVE_KMEM_CACHE_CREATE_DTOR
502#ifdef HAVE_3ARG_KMEM_CACHE_CREATE_CTOR
503 kmem_cache_generic_constructor(obj, cache, flags);
504#else
505 kmem_cache_generic_constructor(cache, obj);
c30df9c8 506#endif /* HAVE_KMEM_CACHE_CREATE_DTOR */
507#endif /* HAVE_3ARG_KMEM_CACHE_CREATE_CTOR */
57d86234 508
509 RETURN(obj);
5c2bb9b2 510}
511EXPORT_SYMBOL(__kmem_cache_alloc);
512
57d86234 513void
514__kmem_cache_free(kmem_cache_t *cache, void *obj)
515{
516#ifndef HAVE_KMEM_CACHE_CREATE_DTOR
517 kmem_cache_generic_destructor(obj, cache, 0);
518#endif
519 kmem_cache_free(cache, obj);
520}
521EXPORT_SYMBOL(__kmem_cache_free);
522
f1b59d26 523void
937879f1 524__kmem_reap(void)
525{
526 ENTRY;
f1b59d26 527 /* Since there's no easy hook in to linux to force all the registered
f1ca4da6 528 * shrinkers to run we just run the ones registered for this shim */
529 kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
937879f1 530 EXIT;
f1ca4da6 531}
f1b59d26 532EXPORT_SYMBOL(__kmem_reap);
5d86345d 533
534int
535kmem_init(void)
536{
c30df9c8 537 int i;
937879f1 538 ENTRY;
d6a26c6a 539
c30df9c8 540 init_rwsem(&kmem_cache_sem);
541 INIT_LIST_HEAD(&kmem_cache_list);
542
543 for (i = 0; i < KMEM_CACHE_TABLE_SIZE; i++)
544 INIT_HLIST_HEAD(&kmem_cache_table[i]);
545
5d86345d 546#ifdef DEBUG_KMEM
c30df9c8 547 atomic64_set(&kmem_alloc_used, 0);
548 atomic64_set(&vmem_alloc_used, 0);
d6a26c6a 549
c30df9c8 550 spin_lock_init(&kmem_lock);
551 INIT_LIST_HEAD(&kmem_list);
d6a26c6a 552
c30df9c8 553 for (i = 0; i < KMEM_TABLE_SIZE; i++)
554 INIT_HLIST_HEAD(&kmem_table[i]);
13cdca65 555
c30df9c8 556 spin_lock_init(&vmem_lock);
557 INIT_LIST_HEAD(&vmem_list);
13cdca65 558
c30df9c8 559 for (i = 0; i < VMEM_TABLE_SIZE; i++)
560 INIT_HLIST_HEAD(&vmem_table[i]);
5c2bb9b2 561
c30df9c8 562 atomic64_set(&kmem_cache_alloc_failed, 0);
5d86345d 563#endif
937879f1 564 RETURN(0);
5d86345d 565}
566
c6dc93d6 567#ifdef DEBUG_KMEM
568static char *
569sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
d6a26c6a 570{
571 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
572 int i, flag = 1;
573
574 ASSERT(str != NULL && len >= 17);
575 memset(str, 0, len);
576
577 /* Check for a fully printable string, and while we are at
578 * it place the printable characters in the passed buffer. */
579 for (i = 0; i < size; i++) {
580 str[i] = ((char *)(kd->kd_addr))[i];
581 if (isprint(str[i])) {
582 continue;
583 } else {
584 /* Minimum number of printable characters found
585 * to make it worthwhile to print this as ascii. */
586 if (i > min)
587 break;
588
589 flag = 0;
590 break;
591 }
592
593 }
594
595 if (!flag) {
596 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
597 *((uint8_t *)kd->kd_addr),
598 *((uint8_t *)kd->kd_addr + 2),
599 *((uint8_t *)kd->kd_addr + 4),
600 *((uint8_t *)kd->kd_addr + 6),
601 *((uint8_t *)kd->kd_addr + 8),
602 *((uint8_t *)kd->kd_addr + 10),
603 *((uint8_t *)kd->kd_addr + 12),
604 *((uint8_t *)kd->kd_addr + 14));
605 }
606
607 return str;
608}
c6dc93d6 609#endif /* DEBUG_KMEM */
d6a26c6a 610
5d86345d 611void
612kmem_fini(void)
613{
937879f1 614 ENTRY;
5d86345d 615#ifdef DEBUG_KMEM
d6a26c6a 616 {
617 unsigned long flags;
618 kmem_debug_t *kd;
619 char str[17];
620
d6a26c6a 621 /* Display all unreclaimed memory addresses, including the
622 * allocation size and the first few bytes of what's located
623 * at that address to aid in debugging. Performance is not
624 * a serious concern here since it is module unload time. */
13cdca65 625 if (atomic64_read(&kmem_alloc_used) != 0)
626 CWARN("kmem leaked %ld/%ld bytes\n",
627 atomic_read(&kmem_alloc_used), kmem_alloc_max);
628
d6a26c6a 629 spin_lock_irqsave(&kmem_lock, flags);
630 if (!list_empty(&kmem_list))
631 CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
632 "address", "size", "data", "func", "line");
633
13cdca65 634 list_for_each_entry(kd, &kmem_list, kd_list)
d6a26c6a 635 CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
13cdca65 636 kd->kd_addr, kd->kd_size,
d6a26c6a 637 sprintf_addr(kd, str, 17, 8),
638 kd->kd_func, kd->kd_line);
13cdca65 639
d6a26c6a 640 spin_unlock_irqrestore(&kmem_lock, flags);
641
642 if (atomic64_read(&vmem_alloc_used) != 0)
643 CWARN("vmem leaked %ld/%ld bytes\n",
644 atomic_read(&vmem_alloc_used), vmem_alloc_max);
13cdca65 645
646 spin_lock_irqsave(&vmem_lock, flags);
647 if (!list_empty(&vmem_list))
648 CDEBUG(D_WARNING, "%-16s %-5s %-16s %s:%s\n",
649 "address", "size", "data", "func", "line");
650
651 list_for_each_entry(kd, &vmem_list, kd_list)
652 CDEBUG(D_WARNING, "%p %-5d %-16s %s:%d\n",
653 kd->kd_addr, kd->kd_size,
654 sprintf_addr(kd, str, 17, 8),
655 kd->kd_func, kd->kd_line);
656
657 spin_unlock_irqrestore(&vmem_lock, flags);
d6a26c6a 658 }
5d86345d 659#endif
937879f1 660 EXIT;
5d86345d 661}