2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #ifdef DEBUG_SUBSYSTEM
30 # undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_KMEM
36 * Memory allocation interfaces and debugging for basic kmem_*
37 * and vmem_* style memory allocation. When DEBUG_KMEM is enable
38 * all allocations will be tracked when they are allocated and
39 * freed. When the SPL module is unload a list of all leaked
40 * addresses and where they were allocated will be dumped to the
41 * console. Enabling this feature has a significant impant on
42 * performance but it makes finding memory leaks staight forward.
45 /* Shim layer memory accounting */
46 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
47 unsigned long long kmem_alloc_max
= 0;
48 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
49 unsigned long long vmem_alloc_max
= 0;
50 int kmem_warning_flag
= 1;
52 EXPORT_SYMBOL(kmem_alloc_used
);
53 EXPORT_SYMBOL(kmem_alloc_max
);
54 EXPORT_SYMBOL(vmem_alloc_used
);
55 EXPORT_SYMBOL(vmem_alloc_max
);
56 EXPORT_SYMBOL(kmem_warning_flag
);
58 # ifdef DEBUG_KMEM_TRACKING
60 /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
61 * highly contended particularly on xfree(). If we want to run with this
62 * detailed debugging enabled for anything other than debugging we need to
63 * minimize the contention by moving to a lock per xmem_table entry model.
66 # define KMEM_HASH_BITS 10
67 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
69 # define VMEM_HASH_BITS 10
70 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
72 typedef struct kmem_debug
{
73 struct hlist_node kd_hlist
; /* Hash node linkage */
74 struct list_head kd_list
; /* List of all allocations */
75 void *kd_addr
; /* Allocation pointer */
76 size_t kd_size
; /* Allocation size */
77 const char *kd_func
; /* Allocation function */
78 int kd_line
; /* Allocation line */
82 struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
83 struct list_head kmem_list
;
86 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
87 struct list_head vmem_list
;
89 EXPORT_SYMBOL(kmem_lock
);
90 EXPORT_SYMBOL(kmem_table
);
91 EXPORT_SYMBOL(kmem_list
);
93 EXPORT_SYMBOL(vmem_lock
);
94 EXPORT_SYMBOL(vmem_table
);
95 EXPORT_SYMBOL(vmem_list
);
98 int kmem_set_warning(int flag
) { return (kmem_warning_flag
= !!flag
); }
100 int kmem_set_warning(int flag
) { return 0; }
102 EXPORT_SYMBOL(kmem_set_warning
);
105 * Slab allocation interfaces
107 * While the Linux slab implementation was inspired by the Solaris
108 * implemenation I cannot use it to emulate the Solaris APIs. I
109 * require two features which are not provided by the Linux slab.
111 * 1) Constructors AND destructors. Recent versions of the Linux
112 * kernel have removed support for destructors. This is a deal
113 * breaker for the SPL which contains particularly expensive
114 * initializers for mutex's, condition variables, etc. We also
115 * require a minimal level of cleanup for these data types unlike
116 * many Linux data type which do need to be explicitly destroyed.
118 * 2) Virtual address space backed slab. Callers of the Solaris slab
119 * expect it to work well for both small are very large allocations.
120 * Because of memory fragmentation the Linux slab which is backed
121 * by kmalloc'ed memory performs very badly when confronted with
122 * large numbers of large allocations. Basing the slab on the
123 * virtual address space removes the need for contigeous pages
124 * and greatly improve performance for large allocations.
126 * For these reasons, the SPL has its own slab implementation with
127 * the needed features. It is not as highly optimized as either the
128 * Solaris or Linux slabs, but it should get me most of what is
129 * needed until it can be optimized or obsoleted by another approach.
131 * One serious concern I do have about this method is the relatively
132 * small virtual address space on 32bit arches. This will seriously
133 * constrain the size of the slab caches and their performance.
135 * XXX: Improve the partial slab list by carefully maintaining a
136 * strict ordering of fullest to emptiest slabs based on
137 * the slab reference count. This gaurentees the when freeing
138 * slabs back to the system we need only linearly traverse the
139 * last N slabs in the list to discover all the freeable slabs.
141 * XXX: NUMA awareness for optionally allocating memory close to a
142 * particular core. This can be adventageous if you know the slab
143 * object will be short lived and primarily accessed from one core.
145 * XXX: Slab coloring may also yield performance improvements and would
146 * be desirable to implement.
149 struct list_head spl_kmem_cache_list
; /* List of caches */
150 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
152 static int spl_cache_flush(spl_kmem_cache_t
*skc
,
153 spl_kmem_magazine_t
*skm
, int flush
);
155 #ifdef HAVE_SET_SHRINKER
156 static struct shrinker
*spl_kmem_cache_shrinker
;
158 static int spl_kmem_cache_generic_shrinker(int nr_to_scan
,
159 unsigned int gfp_mask
);
160 static struct shrinker spl_kmem_cache_shrinker
= {
161 .shrink
= spl_kmem_cache_generic_shrinker
,
162 .seeks
= KMC_DEFAULT_SEEKS
,
167 # ifdef DEBUG_KMEM_TRACKING
169 static kmem_debug_t
*
170 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
, int bits
,
173 struct hlist_head
*head
;
174 struct hlist_node
*node
;
175 struct kmem_debug
*p
;
179 spin_lock_irqsave(lock
, flags
);
181 head
= &table
[hash_ptr(addr
, bits
)];
182 hlist_for_each_entry_rcu(p
, node
, head
, kd_hlist
) {
183 if (p
->kd_addr
== addr
) {
184 hlist_del_init(&p
->kd_hlist
);
185 list_del_init(&p
->kd_list
);
186 spin_unlock_irqrestore(lock
, flags
);
191 spin_unlock_irqrestore(lock
, flags
);
197 kmem_alloc_track(size_t size
, int flags
, const char *func
, int line
,
198 int node_alloc
, int node
)
202 unsigned long irq_flags
;
205 dptr
= (kmem_debug_t
*) kmalloc(sizeof(kmem_debug_t
),
206 flags
& ~__GFP_ZERO
);
209 CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
210 sizeof(kmem_debug_t
), flags
);
212 /* Marked unlikely because we should never be doing this,
213 * we tolerate to up 2 pages but a single page is best. */
214 if (unlikely((size
) > (PAGE_SIZE
* 2)) && kmem_warning_flag
)
215 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
216 (unsigned long long) size
, flags
,
217 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
219 /* We use kstrdup() below because the string pointed to by
220 * __FUNCTION__ might not be available by the time we want
221 * to print it since the module might have been unloaded. */
222 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
223 if (unlikely(dptr
->kd_func
== NULL
)) {
225 CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
226 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
227 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
231 /* Use the correct allocator */
233 ASSERT(!(flags
& __GFP_ZERO
));
234 ptr
= kmalloc_node(size
, flags
, node
);
235 } else if (flags
& __GFP_ZERO
) {
236 ptr
= kzalloc(size
, flags
& ~__GFP_ZERO
);
238 ptr
= kmalloc(size
, flags
);
241 if (unlikely(ptr
== NULL
)) {
242 kfree(dptr
->kd_func
);
244 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
245 (unsigned long long) size
, flags
,
246 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
250 atomic64_add(size
, &kmem_alloc_used
);
251 if (unlikely(atomic64_read(&kmem_alloc_used
) >
254 atomic64_read(&kmem_alloc_used
);
256 INIT_HLIST_NODE(&dptr
->kd_hlist
);
257 INIT_LIST_HEAD(&dptr
->kd_list
);
260 dptr
->kd_size
= size
;
261 dptr
->kd_line
= line
;
263 spin_lock_irqsave(&kmem_lock
, irq_flags
);
264 hlist_add_head_rcu(&dptr
->kd_hlist
,
265 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
266 list_add_tail(&dptr
->kd_list
, &kmem_list
);
267 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
269 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
270 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
271 ptr
, atomic64_read(&kmem_alloc_used
),
277 EXPORT_SYMBOL(kmem_alloc_track
);
280 kmem_free_track(void *ptr
, size_t size
)
285 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
286 (unsigned long long) size
);
288 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
290 ASSERT(dptr
); /* Must exist in hash due to kmem_alloc() */
292 /* Size must match */
293 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
294 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
295 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
297 atomic64_sub(size
, &kmem_alloc_used
);
299 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
300 (unsigned long long) size
, atomic64_read(&kmem_alloc_used
),
303 kfree(dptr
->kd_func
);
305 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
308 memset(ptr
, 0x5a, size
);
313 EXPORT_SYMBOL(kmem_free_track
);
316 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
320 unsigned long irq_flags
;
323 ASSERT(flags
& KM_SLEEP
);
325 dptr
= (kmem_debug_t
*) kmalloc(sizeof(kmem_debug_t
), flags
);
327 CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
328 sizeof(kmem_debug_t
), flags
);
330 /* We use kstrdup() below because the string pointed to by
331 * __FUNCTION__ might not be available by the time we want
332 * to print it, since the module might have been unloaded. */
333 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
334 if (unlikely(dptr
->kd_func
== NULL
)) {
336 CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
337 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
338 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
342 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
345 if (unlikely(ptr
== NULL
)) {
346 kfree(dptr
->kd_func
);
348 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
349 (unsigned long long) size
, flags
,
350 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
354 if (flags
& __GFP_ZERO
)
355 memset(ptr
, 0, size
);
357 atomic64_add(size
, &vmem_alloc_used
);
358 if (unlikely(atomic64_read(&vmem_alloc_used
) >
361 atomic64_read(&vmem_alloc_used
);
363 INIT_HLIST_NODE(&dptr
->kd_hlist
);
364 INIT_LIST_HEAD(&dptr
->kd_list
);
367 dptr
->kd_size
= size
;
368 dptr
->kd_line
= line
;
370 spin_lock_irqsave(&vmem_lock
, irq_flags
);
371 hlist_add_head_rcu(&dptr
->kd_hlist
,
372 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
373 list_add_tail(&dptr
->kd_list
, &vmem_list
);
374 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
376 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
377 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
378 ptr
, atomic64_read(&vmem_alloc_used
),
384 EXPORT_SYMBOL(vmem_alloc_track
);
387 vmem_free_track(void *ptr
, size_t size
)
392 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
393 (unsigned long long) size
);
395 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
396 ASSERT(dptr
); /* Must exist in hash due to vmem_alloc() */
398 /* Size must match */
399 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
400 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
401 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
403 atomic64_sub(size
, &vmem_alloc_used
);
404 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
405 (unsigned long long) size
, atomic64_read(&vmem_alloc_used
),
408 kfree(dptr
->kd_func
);
410 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
413 memset(ptr
, 0x5a, size
);
418 EXPORT_SYMBOL(vmem_free_track
);
420 # else /* DEBUG_KMEM_TRACKING */
423 kmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
,
424 int node_alloc
, int node
)
429 /* Marked unlikely because we should never be doing this,
430 * we tolerate to up 2 pages but a single page is best. */
431 if (unlikely(size
> (PAGE_SIZE
* 2)) && kmem_warning_flag
)
432 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
433 (unsigned long long) size
, flags
,
434 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
436 /* Use the correct allocator */
438 ASSERT(!(flags
& __GFP_ZERO
));
439 ptr
= kmalloc_node(size
, flags
, node
);
440 } else if (flags
& __GFP_ZERO
) {
441 ptr
= kzalloc(size
, flags
& (~__GFP_ZERO
));
443 ptr
= kmalloc(size
, flags
);
447 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
448 (unsigned long long) size
, flags
,
449 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
451 atomic64_add(size
, &kmem_alloc_used
);
452 if (unlikely(atomic64_read(&kmem_alloc_used
) > kmem_alloc_max
))
453 kmem_alloc_max
= atomic64_read(&kmem_alloc_used
);
455 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
456 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
457 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
461 EXPORT_SYMBOL(kmem_alloc_debug
);
464 kmem_free_debug(void *ptr
, size_t size
)
468 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
469 (unsigned long long) size
);
471 atomic64_sub(size
, &kmem_alloc_used
);
473 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
474 (unsigned long long) size
, atomic64_read(&kmem_alloc_used
),
477 memset(ptr
, 0x5a, size
);
482 EXPORT_SYMBOL(kmem_free_debug
);
485 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
490 ASSERT(flags
& KM_SLEEP
);
492 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
495 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
496 (unsigned long long) size
, flags
,
497 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
499 if (flags
& __GFP_ZERO
)
500 memset(ptr
, 0, size
);
502 atomic64_add(size
, &vmem_alloc_used
);
504 if (unlikely(atomic64_read(&vmem_alloc_used
) > vmem_alloc_max
))
505 vmem_alloc_max
= atomic64_read(&vmem_alloc_used
);
507 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
508 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
509 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
514 EXPORT_SYMBOL(vmem_alloc_debug
);
517 vmem_free_debug(void *ptr
, size_t size
)
521 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
522 (unsigned long long) size
);
524 atomic64_sub(size
, &vmem_alloc_used
);
526 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
527 (unsigned long long) size
, atomic64_read(&vmem_alloc_used
),
530 memset(ptr
, 0x5a, size
);
535 EXPORT_SYMBOL(vmem_free_debug
);
537 # endif /* DEBUG_KMEM_TRACKING */
538 #endif /* DEBUG_KMEM */
541 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
545 if (skc
->skc_flags
& KMC_KMEM
) {
546 if (size
> (2 * PAGE_SIZE
)) {
547 ptr
= (void *)__get_free_pages(flags
, get_order(size
));
549 ptr
= kmem_alloc(size
, flags
);
551 ptr
= vmem_alloc(size
, flags
);
558 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
560 if (skc
->skc_flags
& KMC_KMEM
) {
561 if (size
> (2 * PAGE_SIZE
))
562 free_pages((unsigned long)ptr
, get_order(size
));
564 kmem_free(ptr
, size
);
566 vmem_free(ptr
, size
);
571 * It's important that we pack the spl_kmem_obj_t structure and the
572 * actual objects in to one large address space to minimize the number
573 * of calls to the allocator. It is far better to do a few large
574 * allocations and then subdivide it ourselves. Now which allocator
575 * we use requires balancing a few trade offs.
577 * For small objects we use kmem_alloc() because as long as you are
578 * only requesting a small number of pages (ideally just one) its cheap.
579 * However, when you start requesting multiple pages with kmem_alloc()
580 * it gets increasingly expensive since it requires contigeous pages.
581 * For this reason we shift to vmem_alloc() for slabs of large objects
582 * which removes the need for contigeous pages. We do not use
583 * vmem_alloc() in all cases because there is significant locking
584 * overhead in __get_vm_area_node(). This function takes a single
585 * global lock when aquiring an available virtual address range which
586 * serializes all vmem_alloc()'s for all slab caches. Using slightly
587 * different allocation functions for small and large objects should
588 * give us the best of both worlds.
590 * KMC_ONSLAB KMC_OFFSLAB
592 * +------------------------+ +-----------------+
593 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
594 * | skc_obj_size <-+ | | +-----------------+ | |
595 * | spl_kmem_obj_t | | | |
596 * | skc_obj_size <---+ | +-----------------+ | |
597 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
598 * | ... v | | spl_kmem_obj_t | |
599 * +------------------------+ +-----------------+ v
601 static spl_kmem_slab_t
*
602 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
604 spl_kmem_slab_t
*sks
;
605 spl_kmem_obj_t
*sko
, *n
;
607 int i
, align
, size
, rc
= 0;
609 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
613 sks
= (spl_kmem_slab_t
*)base
;
614 sks
->sks_magic
= SKS_MAGIC
;
615 sks
->sks_objs
= skc
->skc_slab_objs
;
616 sks
->sks_age
= jiffies
;
617 sks
->sks_cache
= skc
;
618 INIT_LIST_HEAD(&sks
->sks_list
);
619 INIT_LIST_HEAD(&sks
->sks_free_list
);
622 align
= skc
->skc_obj_align
;
623 size
= P2ROUNDUP(skc
->skc_obj_size
, align
) +
624 P2ROUNDUP(sizeof(spl_kmem_obj_t
), align
);
626 for (i
= 0; i
< sks
->sks_objs
; i
++) {
627 if (skc
->skc_flags
& KMC_OFFSLAB
) {
628 obj
= kv_alloc(skc
, size
, flags
);
630 GOTO(out
, rc
= -ENOMEM
);
633 P2ROUNDUP(sizeof(spl_kmem_slab_t
), align
) +
637 sko
= obj
+ P2ROUNDUP(skc
->skc_obj_size
, align
);
639 sko
->sko_magic
= SKO_MAGIC
;
641 INIT_LIST_HEAD(&sko
->sko_list
);
642 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
645 list_for_each_entry(sko
, &sks
->sks_free_list
, sko_list
)
647 skc
->skc_ctor(sko
->sko_addr
, skc
->skc_private
, flags
);
650 if (skc
->skc_flags
& KMC_OFFSLAB
)
651 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
,
653 kv_free(skc
, sko
->sko_addr
, size
);
655 kv_free(skc
, base
, skc
->skc_slab_size
);
663 * Remove a slab from complete or partial list, it must be called with
664 * the 'skc->skc_lock' held but the actual free must be performed
665 * outside the lock to prevent deadlocking on vmem addresses.
668 spl_slab_free(spl_kmem_slab_t
*sks
,
669 struct list_head
*sks_list
, struct list_head
*sko_list
)
671 spl_kmem_cache_t
*skc
;
672 spl_kmem_obj_t
*sko
, *n
;
675 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
676 ASSERT(sks
->sks_ref
== 0);
678 skc
= sks
->sks_cache
;
679 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
680 ASSERT(spin_is_locked(&skc
->skc_lock
));
682 skc
->skc_obj_total
-= sks
->sks_objs
;
683 skc
->skc_slab_total
--;
684 list_del(&sks
->sks_list
);
686 /* Run destructors slab is being released */
687 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
, sko_list
) {
688 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
689 list_del(&sko
->sko_list
);
692 skc
->skc_dtor(sko
->sko_addr
, skc
->skc_private
);
694 if (skc
->skc_flags
& KMC_OFFSLAB
)
695 list_add(&sko
->sko_list
, sko_list
);
698 list_add(&sks
->sks_list
, sks_list
);
703 * Traverses all the partial slabs attached to a cache and free those
704 * which which are currently empty, and have not been touched for
705 * skc_delay seconds. This is to avoid thrashing.
708 spl_slab_reclaim(spl_kmem_cache_t
*skc
, int flag
)
710 spl_kmem_slab_t
*sks
, *m
;
711 spl_kmem_obj_t
*sko
, *n
;
718 * Move empty slabs and objects which have not been touched in
719 * skc_delay seconds on to private lists to be freed outside
720 * the spin lock. This delay time is important to avoid
721 * thrashing however when flag is set the delay will not be
722 * used. Empty slabs will be at the end of the skc_partial_list.
724 spin_lock(&skc
->skc_lock
);
725 list_for_each_entry_safe_reverse(sks
, m
, &skc
->skc_partial_list
,
727 if (sks
->sks_ref
> 0)
730 if (flag
|| time_after(jiffies
,sks
->sks_age
+skc
->skc_delay
*HZ
))
731 spl_slab_free(sks
, &sks_list
, &sko_list
);
733 spin_unlock(&skc
->skc_lock
);
736 * We only have list of spl_kmem_obj_t's if they are located off
737 * the slab, otherwise they get feed with the spl_kmem_slab_t.
739 if (!list_empty(&sko_list
)) {
740 ASSERT(skc
->skc_flags
& KMC_OFFSLAB
);
742 size
= P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
) +
743 P2ROUNDUP(sizeof(spl_kmem_obj_t
), skc
->skc_obj_align
);
745 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
)
746 kv_free(skc
, sko
->sko_addr
, size
);
749 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
)
750 kv_free(skc
, sks
, skc
->skc_slab_size
);
756 * Called regularly on all caches to age objects out of the magazines
757 * which have not been access in skc->skc_delay seconds. This prevents
758 * idle magazines from holding memory which might be better used by
759 * other caches or parts of the system. The delay is present to
760 * prevent thrashing the magazine.
763 spl_magazine_age(void *data
)
765 spl_kmem_cache_t
*skc
= data
;
766 spl_kmem_magazine_t
*skm
= skc
->skc_mag
[smp_processor_id()];
768 if (skm
->skm_avail
> 0 &&
769 time_after(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
770 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
774 * Called regularly to keep a downward pressure on the size of idle
775 * magazines and to release free slabs from the cache. This function
776 * never calls the registered reclaim function, that only occures
777 * under memory pressure or with a direct call to spl_kmem_reap().
780 spl_cache_age(void *data
)
782 spl_kmem_cache_t
*skc
=
783 spl_get_work_data(data
, spl_kmem_cache_t
, skc_work
.work
);
785 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
786 spl_on_each_cpu(spl_magazine_age
, skc
, 1);
787 spl_slab_reclaim(skc
, 0);
789 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
790 schedule_delayed_work(&skc
->skc_work
, 2 * skc
->skc_delay
* HZ
);
794 * Size a slab based on the size of each aliged object plus spl_kmem_obj_t.
795 * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
796 * for very small objects we may end up with more than this so as not
797 * to waste space in the minimal allocation of a single page. Also for
798 * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
799 * lower than this and we will fail.
802 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
804 int sks_size
, obj_size
, max_size
, align
;
806 if (skc
->skc_flags
& KMC_OFFSLAB
) {
807 *objs
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
808 *size
= sizeof(spl_kmem_slab_t
);
810 align
= skc
->skc_obj_align
;
811 sks_size
= P2ROUNDUP(sizeof(spl_kmem_slab_t
), align
);
812 obj_size
= P2ROUNDUP(skc
->skc_obj_size
, align
) +
813 P2ROUNDUP(sizeof(spl_kmem_obj_t
), align
);
815 if (skc
->skc_flags
& KMC_KMEM
)
816 max_size
= ((uint64_t)1 << (MAX_ORDER
-1)) * PAGE_SIZE
;
818 max_size
= (32 * 1024 * 1024);
820 for (*size
= PAGE_SIZE
; *size
<= max_size
; *size
+= PAGE_SIZE
) {
821 *objs
= (*size
- sks_size
) / obj_size
;
822 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB
)
827 * Unable to satisfy target objets per slab, fallback to
828 * allocating a maximally sized slab and assuming it can
829 * contain the minimum objects count use it. If not fail.
832 *objs
= (*size
- sks_size
) / obj_size
;
833 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
)
841 * Make a guess at reasonable per-cpu magazine size based on the size of
842 * each object and the cost of caching N of them in each magazine. Long
843 * term this should really adapt based on an observed usage heuristic.
846 spl_magazine_size(spl_kmem_cache_t
*skc
)
848 int size
, align
= skc
->skc_obj_align
;
851 /* Per-magazine sizes below assume a 4Kib page size */
852 if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
* 256))
853 size
= 4; /* Minimum 4Mib per-magazine */
854 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
* 32))
855 size
= 16; /* Minimum 2Mib per-magazine */
856 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
))
857 size
= 64; /* Minimum 256Kib per-magazine */
858 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
/ 4))
859 size
= 128; /* Minimum 128Kib per-magazine */
867 * Allocate a per-cpu magazine to assoicate with a specific core.
869 static spl_kmem_magazine_t
*
870 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int node
)
872 spl_kmem_magazine_t
*skm
;
873 int size
= sizeof(spl_kmem_magazine_t
) +
874 sizeof(void *) * skc
->skc_mag_size
;
877 skm
= kmem_alloc_node(size
, GFP_KERNEL
| __GFP_NOFAIL
, node
);
879 skm
->skm_magic
= SKM_MAGIC
;
881 skm
->skm_size
= skc
->skc_mag_size
;
882 skm
->skm_refill
= skc
->skc_mag_refill
;
883 skm
->skm_age
= jiffies
;
890 * Free a per-cpu magazine assoicated with a specific core.
893 spl_magazine_free(spl_kmem_magazine_t
*skm
)
895 int size
= sizeof(spl_kmem_magazine_t
) +
896 sizeof(void *) * skm
->skm_size
;
899 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
900 ASSERT(skm
->skm_avail
== 0);
902 kmem_free(skm
, size
);
907 __spl_magazine_create(void *data
)
909 spl_kmem_cache_t
*skc
= data
;
910 int id
= smp_processor_id();
912 skc
->skc_mag
[id
] = spl_magazine_alloc(skc
, cpu_to_node(id
));
913 ASSERT(skc
->skc_mag
[id
]);
917 * Create all pre-cpu magazines of reasonable sizes.
920 spl_magazine_create(spl_kmem_cache_t
*skc
)
924 skc
->skc_mag_size
= spl_magazine_size(skc
);
925 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
926 spl_on_each_cpu(__spl_magazine_create
, skc
, 1);
932 __spl_magazine_destroy(void *data
)
934 spl_kmem_cache_t
*skc
= data
;
935 spl_kmem_magazine_t
*skm
= skc
->skc_mag
[smp_processor_id()];
937 (void)spl_cache_flush(skc
, skm
, skm
->skm_avail
);
938 spl_magazine_free(skm
);
942 * Destroy all pre-cpu magazines.
945 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
948 spl_on_each_cpu(__spl_magazine_destroy
, skc
, 1);
953 * Create a object cache based on the following arguments:
955 * size cache object size
956 * align cache object alignment
957 * ctor cache object constructor
958 * dtor cache object destructor
959 * reclaim cache object reclaim
960 * priv cache private data for ctor/dtor/reclaim
961 * vmp unused must be NULL
963 * KMC_NOTOUCH Disable cache object aging (unsupported)
964 * KMC_NODEBUG Disable debugging (unsupported)
965 * KMC_NOMAGAZINE Disable magazine (unsupported)
966 * KMC_NOHASH Disable hashing (unsupported)
967 * KMC_QCACHE Disable qcache (unsupported)
968 * KMC_KMEM Force kmem backed cache
969 * KMC_VMEM Force vmem backed cache
970 * KMC_OFFSLAB Locate objects off the slab
973 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
974 spl_kmem_ctor_t ctor
,
975 spl_kmem_dtor_t dtor
,
976 spl_kmem_reclaim_t reclaim
,
977 void *priv
, void *vmp
, int flags
)
979 spl_kmem_cache_t
*skc
;
980 int rc
, kmem_flags
= KM_SLEEP
;
983 ASSERTF(!(flags
& KMC_NOMAGAZINE
), "Bad KMC_NOMAGAZINE (%x)\n", flags
);
984 ASSERTF(!(flags
& KMC_NOHASH
), "Bad KMC_NOHASH (%x)\n", flags
);
985 ASSERTF(!(flags
& KMC_QCACHE
), "Bad KMC_QCACHE (%x)\n", flags
);
988 /* We may be called when there is a non-zero preempt_count or
989 * interrupts are disabled is which case we must not sleep.
991 if (current_thread_info()->preempt_count
|| irqs_disabled())
992 kmem_flags
= KM_NOSLEEP
;
994 /* Allocate new cache memory and initialize. */
995 skc
= (spl_kmem_cache_t
*)kmem_zalloc(sizeof(*skc
), kmem_flags
);
999 skc
->skc_magic
= SKC_MAGIC
;
1000 skc
->skc_name_size
= strlen(name
) + 1;
1001 skc
->skc_name
= (char *)kmem_alloc(skc
->skc_name_size
, kmem_flags
);
1002 if (skc
->skc_name
== NULL
) {
1003 kmem_free(skc
, sizeof(*skc
));
1006 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
1008 skc
->skc_ctor
= ctor
;
1009 skc
->skc_dtor
= dtor
;
1010 skc
->skc_reclaim
= reclaim
;
1011 skc
->skc_private
= priv
;
1013 skc
->skc_flags
= flags
;
1014 skc
->skc_obj_size
= size
;
1015 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
1016 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
1017 atomic_set(&skc
->skc_ref
, 0);
1019 INIT_LIST_HEAD(&skc
->skc_list
);
1020 INIT_LIST_HEAD(&skc
->skc_complete_list
);
1021 INIT_LIST_HEAD(&skc
->skc_partial_list
);
1022 spin_lock_init(&skc
->skc_lock
);
1023 skc
->skc_slab_fail
= 0;
1024 skc
->skc_slab_create
= 0;
1025 skc
->skc_slab_destroy
= 0;
1026 skc
->skc_slab_total
= 0;
1027 skc
->skc_slab_alloc
= 0;
1028 skc
->skc_slab_max
= 0;
1029 skc
->skc_obj_total
= 0;
1030 skc
->skc_obj_alloc
= 0;
1031 skc
->skc_obj_max
= 0;
1034 ASSERT((align
& (align
- 1)) == 0); /* Power of two */
1035 ASSERT(align
>= SPL_KMEM_CACHE_ALIGN
); /* Minimum size */
1036 skc
->skc_obj_align
= align
;
1039 /* If none passed select a cache type based on object size */
1040 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
))) {
1041 if (P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
) <
1043 skc
->skc_flags
|= KMC_KMEM
;
1045 skc
->skc_flags
|= KMC_VMEM
;
1049 rc
= spl_slab_size(skc
, &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
1053 rc
= spl_magazine_create(skc
);
1057 spl_init_delayed_work(&skc
->skc_work
, spl_cache_age
, skc
);
1058 schedule_delayed_work(&skc
->skc_work
, 2 * skc
->skc_delay
* HZ
);
1060 down_write(&spl_kmem_cache_sem
);
1061 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1062 up_write(&spl_kmem_cache_sem
);
1066 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1067 kmem_free(skc
, sizeof(*skc
));
1070 EXPORT_SYMBOL(spl_kmem_cache_create
);
1073 * Destroy a cache and all objects assoicated with the cache.
1076 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1078 DECLARE_WAIT_QUEUE_HEAD(wq
);
1081 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1083 down_write(&spl_kmem_cache_sem
);
1084 list_del_init(&skc
->skc_list
);
1085 up_write(&spl_kmem_cache_sem
);
1087 /* Cancel any and wait for any pending delayed work */
1088 ASSERT(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1089 cancel_delayed_work(&skc
->skc_work
);
1090 flush_scheduled_work();
1092 /* Wait until all current callers complete, this is mainly
1093 * to catch the case where a low memory situation triggers a
1094 * cache reaping action which races with this destroy. */
1095 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1097 spl_magazine_destroy(skc
);
1098 spl_slab_reclaim(skc
, 1);
1099 spin_lock(&skc
->skc_lock
);
1101 /* Validate there are no objects in use and free all the
1102 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1103 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1104 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1105 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1106 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1107 ASSERT(list_empty(&skc
->skc_complete_list
));
1109 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1110 spin_unlock(&skc
->skc_lock
);
1112 kmem_free(skc
, sizeof(*skc
));
1116 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1119 * Allocate an object from a slab attached to the cache. This is used to
1120 * repopulate the per-cpu magazine caches in batches when they run low.
1123 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1125 spl_kmem_obj_t
*sko
;
1127 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1128 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1129 ASSERT(spin_is_locked(&skc
->skc_lock
));
1131 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1132 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1133 ASSERT(sko
->sko_addr
!= NULL
);
1135 /* Remove from sks_free_list */
1136 list_del_init(&sko
->sko_list
);
1138 sks
->sks_age
= jiffies
;
1140 skc
->skc_obj_alloc
++;
1142 /* Track max obj usage statistics */
1143 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1144 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1146 /* Track max slab usage statistics */
1147 if (sks
->sks_ref
== 1) {
1148 skc
->skc_slab_alloc
++;
1150 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1151 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1154 return sko
->sko_addr
;
1158 * No available objects on any slabsi, create a new slab. Since this
1159 * is an expensive operation we do it without holding the spinlock and
1160 * only briefly aquire it when we link in the fully allocated and
1163 static spl_kmem_slab_t
*
1164 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
)
1166 spl_kmem_slab_t
*sks
;
1169 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1174 * Before allocating a new slab check if the slab is being reaped.
1175 * If it is there is a good chance we can wait until it finishes
1176 * and then use one of the newly freed but not aged-out slabs.
1178 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1180 GOTO(out
, sks
= NULL
);
1183 /* Allocate a new slab for the cache */
1184 sks
= spl_slab_alloc(skc
, flags
| __GFP_NORETRY
| __GFP_NOWARN
);
1186 GOTO(out
, sks
= NULL
);
1188 /* Link the new empty slab in to the end of skc_partial_list. */
1189 spin_lock(&skc
->skc_lock
);
1190 skc
->skc_slab_total
++;
1191 skc
->skc_obj_total
+= sks
->sks_objs
;
1192 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1193 spin_unlock(&skc
->skc_lock
);
1195 local_irq_disable();
1201 * Refill a per-cpu magazine with objects from the slabs for this
1202 * cache. Ideally the magazine can be repopulated using existing
1203 * objects which have been released, however if we are unable to
1204 * locate enough free objects new slabs of objects will be created.
1207 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1209 spl_kmem_slab_t
*sks
;
1213 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1214 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1216 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1217 spin_lock(&skc
->skc_lock
);
1219 while (refill
> 0) {
1220 /* No slabs available we may need to grow the cache */
1221 if (list_empty(&skc
->skc_partial_list
)) {
1222 spin_unlock(&skc
->skc_lock
);
1224 sks
= spl_cache_grow(skc
, flags
);
1228 /* Rescheduled to different CPU skm is not local */
1229 if (skm
!= skc
->skc_mag
[smp_processor_id()])
1232 /* Potentially rescheduled to the same CPU but
1233 * allocations may have occured from this CPU while
1234 * we were sleeping so recalculate max refill. */
1235 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
1237 spin_lock(&skc
->skc_lock
);
1241 /* Grab the next available slab */
1242 sks
= list_entry((&skc
->skc_partial_list
)->next
,
1243 spl_kmem_slab_t
, sks_list
);
1244 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1245 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
1246 ASSERT(!list_empty(&sks
->sks_free_list
));
1248 /* Consume as many objects as needed to refill the requested
1249 * cache. We must also be careful not to overfill it. */
1250 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 && ++rc
) {
1251 ASSERT(skm
->skm_avail
< skm
->skm_size
);
1252 ASSERT(rc
< skm
->skm_size
);
1253 skm
->skm_objs
[skm
->skm_avail
++]=spl_cache_obj(skc
,sks
);
1256 /* Move slab to skc_complete_list when full */
1257 if (sks
->sks_ref
== sks
->sks_objs
) {
1258 list_del(&sks
->sks_list
);
1259 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
1263 spin_unlock(&skc
->skc_lock
);
1265 /* Returns the number of entries added to cache */
1270 * Release an object back to the slab from which it came.
1273 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
1275 spl_kmem_slab_t
*sks
= NULL
;
1276 spl_kmem_obj_t
*sko
= NULL
;
1279 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1280 ASSERT(spin_is_locked(&skc
->skc_lock
));
1282 sko
= obj
+ P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
);
1283 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1285 sks
= sko
->sko_slab
;
1286 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1287 ASSERT(sks
->sks_cache
== skc
);
1288 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
1290 sks
->sks_age
= jiffies
;
1292 skc
->skc_obj_alloc
--;
1294 /* Move slab to skc_partial_list when no longer full. Slabs
1295 * are added to the head to keep the partial list is quasi-full
1296 * sorted order. Fuller at the head, emptier at the tail. */
1297 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
1298 list_del(&sks
->sks_list
);
1299 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
1302 /* Move emply slabs to the end of the partial list so
1303 * they can be easily found and freed during reclamation. */
1304 if (sks
->sks_ref
== 0) {
1305 list_del(&sks
->sks_list
);
1306 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1307 skc
->skc_slab_alloc
--;
1314 * Release a batch of objects from a per-cpu magazine back to their
1315 * respective slabs. This occurs when we exceed the magazine size,
1316 * are under memory pressure, when the cache is idle, or during
1317 * cache cleanup. The flush argument contains the number of entries
1318 * to remove from the magazine.
1321 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1323 int i
, count
= MIN(flush
, skm
->skm_avail
);
1326 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1327 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1330 * XXX: Currently we simply return objects from the magazine to
1331 * the slabs in fifo order. The ideal thing to do from a memory
1332 * fragmentation standpoint is to cheaply determine the set of
1333 * objects in the magazine which will result in the largest
1334 * number of free slabs if released from the magazine.
1336 spin_lock(&skc
->skc_lock
);
1337 for (i
= 0; i
< count
; i
++)
1338 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
1340 skm
->skm_avail
-= count
;
1341 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
1342 sizeof(void *) * skm
->skm_avail
);
1344 spin_unlock(&skc
->skc_lock
);
1350 * Allocate an object from the per-cpu magazine, or if the magazine
1351 * is empty directly allocate from a slab and repopulate the magazine.
1354 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
1356 spl_kmem_magazine_t
*skm
;
1357 unsigned long irq_flags
;
1361 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1362 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1363 ASSERT(flags
& KM_SLEEP
);
1364 atomic_inc(&skc
->skc_ref
);
1365 local_irq_save(irq_flags
);
1368 /* Safe to update per-cpu structure without lock, but
1369 * in the restart case we must be careful to reaquire
1370 * the local magazine since this may have changed
1371 * when we need to grow the cache. */
1372 skm
= skc
->skc_mag
[smp_processor_id()];
1373 ASSERTF(skm
->skm_magic
== SKM_MAGIC
, "%x != %x: %s/%p/%p %x/%x/%x\n",
1374 skm
->skm_magic
, SKM_MAGIC
, skc
->skc_name
, skc
, skm
,
1375 skm
->skm_size
, skm
->skm_refill
, skm
->skm_avail
);
1377 if (likely(skm
->skm_avail
)) {
1378 /* Object available in CPU cache, use it */
1379 obj
= skm
->skm_objs
[--skm
->skm_avail
];
1380 skm
->skm_age
= jiffies
;
1382 /* Per-CPU cache empty, directly allocate from
1383 * the slab and refill the per-CPU cache. */
1384 (void)spl_cache_refill(skc
, skm
, flags
);
1385 GOTO(restart
, obj
= NULL
);
1388 local_irq_restore(irq_flags
);
1390 ASSERT(((unsigned long)(obj
) % skc
->skc_obj_align
) == 0);
1392 /* Pre-emptively migrate object to CPU L1 cache */
1394 atomic_dec(&skc
->skc_ref
);
1398 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
1401 * Free an object back to the local per-cpu magazine, there is no
1402 * guarantee that this is the same magazine the object was originally
1403 * allocated from. We may need to flush entire from the magazine
1404 * back to the slabs to make space.
1407 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
1409 spl_kmem_magazine_t
*skm
;
1410 unsigned long flags
;
1413 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1414 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1415 atomic_inc(&skc
->skc_ref
);
1416 local_irq_save(flags
);
1418 /* Safe to update per-cpu structure without lock, but
1419 * no remote memory allocation tracking is being performed
1420 * it is entirely possible to allocate an object from one
1421 * CPU cache and return it to another. */
1422 skm
= skc
->skc_mag
[smp_processor_id()];
1423 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1425 /* Per-CPU cache full, flush it to make space */
1426 if (unlikely(skm
->skm_avail
>= skm
->skm_size
))
1427 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1429 /* Available space in cache, use it */
1430 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
1432 local_irq_restore(flags
);
1433 atomic_dec(&skc
->skc_ref
);
1437 EXPORT_SYMBOL(spl_kmem_cache_free
);
1440 * The generic shrinker function for all caches. Under linux a shrinker
1441 * may not be tightly coupled with a slab cache. In fact linux always
1442 * systematically trys calling all registered shrinker callbacks which
1443 * report that they contain unused objects. Because of this we only
1444 * register one shrinker function in the shim layer for all slab caches.
1445 * We always attempt to shrink all caches when this generic shrinker
1446 * is called. The shrinker should return the number of free objects
1447 * in the cache when called with nr_to_scan == 0 but not attempt to
1448 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
1449 * objects should be freed, because Solaris semantics are to free
1450 * all available objects we may free more objects than requested.
1453 spl_kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
1455 spl_kmem_cache_t
*skc
;
1458 down_read(&spl_kmem_cache_sem
);
1459 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
1461 spl_kmem_cache_reap_now(skc
);
1464 * Presume everything alloc'ed in reclaimable, this ensures
1465 * we are called again with nr_to_scan > 0 so can try and
1466 * reclaim. The exact number is not important either so
1467 * we forgo taking this already highly contented lock.
1469 unused
+= skc
->skc_obj_alloc
;
1471 up_read(&spl_kmem_cache_sem
);
1473 return (unused
* sysctl_vfs_cache_pressure
) / 100;
1477 * Call the registered reclaim function for a cache. Depending on how
1478 * many and which objects are released it may simply repopulate the
1479 * local magazine which will then need to age-out. Objects which cannot
1480 * fit in the magazine we will be released back to their slabs which will
1481 * also need to age out before being release. This is all just best
1482 * effort and we do not want to thrash creating and destroying slabs.
1485 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
)
1489 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1490 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1492 /* Prevent concurrent cache reaping when contended */
1493 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1498 atomic_inc(&skc
->skc_ref
);
1500 if (skc
->skc_reclaim
)
1501 skc
->skc_reclaim(skc
->skc_private
);
1503 spl_slab_reclaim(skc
, 0);
1504 clear_bit(KMC_BIT_REAPING
, &skc
->skc_flags
);
1505 atomic_dec(&skc
->skc_ref
);
1509 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
1512 * Reap all free slabs from all registered caches.
1517 spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);
1519 EXPORT_SYMBOL(spl_kmem_reap
);
1521 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
1523 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
1525 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
1528 ASSERT(str
!= NULL
&& len
>= 17);
1529 memset(str
, 0, len
);
1531 /* Check for a fully printable string, and while we are at
1532 * it place the printable characters in the passed buffer. */
1533 for (i
= 0; i
< size
; i
++) {
1534 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
1535 if (isprint(str
[i
])) {
1538 /* Minimum number of printable characters found
1539 * to make it worthwhile to print this as ascii. */
1549 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
1550 *((uint8_t *)kd
->kd_addr
),
1551 *((uint8_t *)kd
->kd_addr
+ 2),
1552 *((uint8_t *)kd
->kd_addr
+ 4),
1553 *((uint8_t *)kd
->kd_addr
+ 6),
1554 *((uint8_t *)kd
->kd_addr
+ 8),
1555 *((uint8_t *)kd
->kd_addr
+ 10),
1556 *((uint8_t *)kd
->kd_addr
+ 12),
1557 *((uint8_t *)kd
->kd_addr
+ 14));
1564 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
1569 spin_lock_init(lock
);
1570 INIT_LIST_HEAD(list
);
1572 for (i
= 0; i
< size
; i
++)
1573 INIT_HLIST_HEAD(&kmem_table
[i
]);
1579 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
1581 unsigned long flags
;
1586 spin_lock_irqsave(lock
, flags
);
1587 if (!list_empty(list
))
1588 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
1589 "size", "data", "func", "line");
1591 list_for_each_entry(kd
, list
, kd_list
)
1592 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
1593 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
1594 kd
->kd_func
, kd
->kd_line
);
1596 spin_unlock_irqrestore(lock
, flags
);
1599 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1600 #define spl_kmem_init_tracking(list, lock, size)
1601 #define spl_kmem_fini_tracking(list, lock)
1602 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1610 init_rwsem(&spl_kmem_cache_sem
);
1611 INIT_LIST_HEAD(&spl_kmem_cache_list
);
1613 #ifdef HAVE_SET_SHRINKER
1614 spl_kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
1615 spl_kmem_cache_generic_shrinker
);
1616 if (spl_kmem_cache_shrinker
== NULL
)
1617 RETURN(rc
= -ENOMEM
);
1619 register_shrinker(&spl_kmem_cache_shrinker
);
1623 atomic64_set(&kmem_alloc_used
, 0);
1624 atomic64_set(&vmem_alloc_used
, 0);
1626 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
1627 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
1636 /* Display all unreclaimed memory addresses, including the
1637 * allocation size and the first few bytes of what's located
1638 * at that address to aid in debugging. Performance is not
1639 * a serious concern here since it is module unload time. */
1640 if (atomic64_read(&kmem_alloc_used
) != 0)
1641 CWARN("kmem leaked %ld/%ld bytes\n",
1642 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
1645 if (atomic64_read(&vmem_alloc_used
) != 0)
1646 CWARN("vmem leaked %ld/%ld bytes\n",
1647 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
1649 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
1650 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
1651 #endif /* DEBUG_KMEM */
1654 #ifdef HAVE_SET_SHRINKER
1655 remove_shrinker(spl_kmem_cache_shrinker
);
1657 unregister_shrinker(&spl_kmem_cache_shrinker
);