2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
26 #include <sys/kmem_cache.h>
27 #include <sys/taskq.h>
28 #include <sys/timer.h>
30 #include <linux/slab.h>
31 #include <linux/swap.h>
32 #include <linux/mm_compat.h>
33 #include <linux/wait_compat.h>
34 #include <linux/prefetch.h>
37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
38 * are removed to allow access to the real Linux slab allocator.
40 #undef kmem_cache_destroy
41 #undef kmem_cache_create
42 #undef kmem_cache_alloc
43 #undef kmem_cache_free
47 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
48 * with smp_mb__{before,after}_atomic() because they were redundant. This is
49 * only used inside our SLAB allocator, so we implement an internal wrapper
50 * here to give us smp_mb__{before,after}_atomic() on older kernels.
52 #ifndef smp_mb__before_atomic
53 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
56 #ifndef smp_mb__after_atomic
57 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
61 * Cache expiration was implemented because it was part of the default Solaris
62 * kmem_cache behavior. The idea is that per-cpu objects which haven't been
63 * accessed in several seconds should be returned to the cache. On the other
64 * hand Linux slabs never move objects back to the slabs unless there is
65 * memory pressure on the system. By default the Linux method is enabled
66 * because it has been shown to improve responsiveness on low memory systems.
67 * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
69 unsigned int spl_kmem_cache_expire
= KMC_EXPIRE_MEM
;
70 EXPORT_SYMBOL(spl_kmem_cache_expire
);
71 module_param(spl_kmem_cache_expire
, uint
, 0644);
72 MODULE_PARM_DESC(spl_kmem_cache_expire
, "By age (0x1) or low memory (0x2)");
75 * Cache magazines are an optimization designed to minimize the cost of
76 * allocating memory. They do this by keeping a per-cpu cache of recently
77 * freed objects, which can then be reallocated without taking a lock. This
78 * can improve performance on highly contended caches. However, because
79 * objects in magazines will prevent otherwise empty slabs from being
80 * immediately released this may not be ideal for low memory machines.
82 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
83 * magazine size. When this value is set to 0 the magazine size will be
84 * automatically determined based on the object size. Otherwise magazines
85 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
86 * may never be entirely disabled in this implementation.
88 unsigned int spl_kmem_cache_magazine_size
= 0;
89 module_param(spl_kmem_cache_magazine_size
, uint
, 0444);
90 MODULE_PARM_DESC(spl_kmem_cache_magazine_size
,
91 "Default magazine size (2-256), set automatically (0)");
94 * The default behavior is to report the number of objects remaining in the
95 * cache. This allows the Linux VM to repeatedly reclaim objects from the
96 * cache when memory is low satisfy other memory allocations. Alternately,
97 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
98 * is reclaimed. This may increase the likelihood of out of memory events.
100 unsigned int spl_kmem_cache_reclaim
= 0 /* KMC_RECLAIM_ONCE */;
101 module_param(spl_kmem_cache_reclaim
, uint
, 0644);
102 MODULE_PARM_DESC(spl_kmem_cache_reclaim
, "Single reclaim pass (0x1)");
104 unsigned int spl_kmem_cache_obj_per_slab
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
105 module_param(spl_kmem_cache_obj_per_slab
, uint
, 0644);
106 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab
, "Number of objects per slab");
108 unsigned int spl_kmem_cache_obj_per_slab_min
= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
;
109 module_param(spl_kmem_cache_obj_per_slab_min
, uint
, 0644);
110 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min
,
111 "Minimal number of objects per slab");
113 unsigned int spl_kmem_cache_max_size
= SPL_KMEM_CACHE_MAX_SIZE
;
114 module_param(spl_kmem_cache_max_size
, uint
, 0644);
115 MODULE_PARM_DESC(spl_kmem_cache_max_size
, "Maximum size of slab in MB");
118 * For small objects the Linux slab allocator should be used to make the most
119 * efficient use of the memory. However, large objects are not supported by
120 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
121 * of 16K was determined to be optimal for architectures using 4K pages.
123 #if PAGE_SIZE == 4096
124 unsigned int spl_kmem_cache_slab_limit
= 16384;
126 unsigned int spl_kmem_cache_slab_limit
= 0;
128 module_param(spl_kmem_cache_slab_limit
, uint
, 0644);
129 MODULE_PARM_DESC(spl_kmem_cache_slab_limit
,
130 "Objects less than N bytes use the Linux slab");
133 * This value defaults to a threshold designed to avoid allocations which
134 * have been deemed costly by the kernel.
136 unsigned int spl_kmem_cache_kmem_limit
=
137 ((1 << (PAGE_ALLOC_COSTLY_ORDER
- 1)) * PAGE_SIZE
) /
138 SPL_KMEM_CACHE_OBJ_PER_SLAB
;
139 module_param(spl_kmem_cache_kmem_limit
, uint
, 0644);
140 MODULE_PARM_DESC(spl_kmem_cache_kmem_limit
,
141 "Objects less than N bytes use the kmalloc");
144 * The number of threads available to allocate new slabs for caches. This
145 * should not need to be tuned but it is available for performance analysis.
147 unsigned int spl_kmem_cache_kmem_threads
= 4;
148 module_param(spl_kmem_cache_kmem_threads
, uint
, 0444);
149 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads
,
150 "Number of spl_kmem_cache threads");
153 * Slab allocation interfaces
155 * While the Linux slab implementation was inspired by the Solaris
156 * implementation I cannot use it to emulate the Solaris APIs. I
157 * require two features which are not provided by the Linux slab.
159 * 1) Constructors AND destructors. Recent versions of the Linux
160 * kernel have removed support for destructors. This is a deal
161 * breaker for the SPL which contains particularly expensive
162 * initializers for mutex's, condition variables, etc. We also
163 * require a minimal level of cleanup for these data types unlike
164 * many Linux data types which do need to be explicitly destroyed.
166 * 2) Virtual address space backed slab. Callers of the Solaris slab
167 * expect it to work well for both small are very large allocations.
168 * Because of memory fragmentation the Linux slab which is backed
169 * by kmalloc'ed memory performs very badly when confronted with
170 * large numbers of large allocations. Basing the slab on the
171 * virtual address space removes the need for contiguous pages
172 * and greatly improve performance for large allocations.
174 * For these reasons, the SPL has its own slab implementation with
175 * the needed features. It is not as highly optimized as either the
176 * Solaris or Linux slabs, but it should get me most of what is
177 * needed until it can be optimized or obsoleted by another approach.
179 * One serious concern I do have about this method is the relatively
180 * small virtual address space on 32bit arches. This will seriously
181 * constrain the size of the slab caches and their performance.
184 struct list_head spl_kmem_cache_list
; /* List of caches */
185 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
186 taskq_t
*spl_kmem_cache_taskq
; /* Task queue for ageing / reclaim */
188 static void spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
);
190 SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker
);
191 SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker
,
192 spl_kmem_cache_generic_shrinker
, KMC_DEFAULT_SEEKS
);
195 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
197 gfp_t lflags
= kmem_flags_convert(flags
);
200 if (skc
->skc_flags
& KMC_KMEM
) {
202 ptr
= (void *)__get_free_pages(lflags
, get_order(size
));
204 ptr
= __vmalloc(size
, lflags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
207 /* Resulting allocated memory will be page aligned */
208 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
214 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
216 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
219 * The Linux direct reclaim path uses this out of band value to
220 * determine if forward progress is being made. Normally this is
221 * incremented by kmem_freepages() which is part of the various
222 * Linux slab implementations. However, since we are using none
223 * of that infrastructure we are responsible for incrementing it.
225 if (current
->reclaim_state
)
226 current
->reclaim_state
->reclaimed_slab
+= size
>> PAGE_SHIFT
;
228 if (skc
->skc_flags
& KMC_KMEM
) {
230 free_pages((unsigned long)ptr
, get_order(size
));
237 * Required space for each aligned sks.
239 static inline uint32_t
240 spl_sks_size(spl_kmem_cache_t
*skc
)
242 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t
),
243 skc
->skc_obj_align
, uint32_t));
247 * Required space for each aligned object.
249 static inline uint32_t
250 spl_obj_size(spl_kmem_cache_t
*skc
)
252 uint32_t align
= skc
->skc_obj_align
;
254 return (P2ROUNDUP_TYPED(skc
->skc_obj_size
, align
, uint32_t) +
255 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t
), align
, uint32_t));
259 * Lookup the spl_kmem_object_t for an object given that object.
261 static inline spl_kmem_obj_t
*
262 spl_sko_from_obj(spl_kmem_cache_t
*skc
, void *obj
)
264 return (obj
+ P2ROUNDUP_TYPED(skc
->skc_obj_size
,
265 skc
->skc_obj_align
, uint32_t));
269 * Required space for each offslab object taking in to account alignment
270 * restrictions and the power-of-two requirement of kv_alloc().
272 static inline uint32_t
273 spl_offslab_size(spl_kmem_cache_t
*skc
)
275 return (1UL << (fls64(spl_obj_size(skc
)) + 1));
279 * It's important that we pack the spl_kmem_obj_t structure and the
280 * actual objects in to one large address space to minimize the number
281 * of calls to the allocator. It is far better to do a few large
282 * allocations and then subdivide it ourselves. Now which allocator
283 * we use requires balancing a few trade offs.
285 * For small objects we use kmem_alloc() because as long as you are
286 * only requesting a small number of pages (ideally just one) its cheap.
287 * However, when you start requesting multiple pages with kmem_alloc()
288 * it gets increasingly expensive since it requires contiguous pages.
289 * For this reason we shift to vmem_alloc() for slabs of large objects
290 * which removes the need for contiguous pages. We do not use
291 * vmem_alloc() in all cases because there is significant locking
292 * overhead in __get_vm_area_node(). This function takes a single
293 * global lock when acquiring an available virtual address range which
294 * serializes all vmem_alloc()'s for all slab caches. Using slightly
295 * different allocation functions for small and large objects should
296 * give us the best of both worlds.
298 * KMC_ONSLAB KMC_OFFSLAB
300 * +------------------------+ +-----------------+
301 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
302 * | skc_obj_size <-+ | | +-----------------+ | |
303 * | spl_kmem_obj_t | | | |
304 * | skc_obj_size <---+ | +-----------------+ | |
305 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
306 * | ... v | | spl_kmem_obj_t | |
307 * +------------------------+ +-----------------+ v
309 static spl_kmem_slab_t
*
310 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
312 spl_kmem_slab_t
*sks
;
313 spl_kmem_obj_t
*sko
, *n
;
315 uint32_t obj_size
, offslab_size
= 0;
318 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
322 sks
= (spl_kmem_slab_t
*)base
;
323 sks
->sks_magic
= SKS_MAGIC
;
324 sks
->sks_objs
= skc
->skc_slab_objs
;
325 sks
->sks_age
= jiffies
;
326 sks
->sks_cache
= skc
;
327 INIT_LIST_HEAD(&sks
->sks_list
);
328 INIT_LIST_HEAD(&sks
->sks_free_list
);
330 obj_size
= spl_obj_size(skc
);
332 if (skc
->skc_flags
& KMC_OFFSLAB
)
333 offslab_size
= spl_offslab_size(skc
);
335 for (i
= 0; i
< sks
->sks_objs
; i
++) {
336 if (skc
->skc_flags
& KMC_OFFSLAB
) {
337 obj
= kv_alloc(skc
, offslab_size
, flags
);
343 obj
= base
+ spl_sks_size(skc
) + (i
* obj_size
);
346 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
347 sko
= spl_sko_from_obj(skc
, obj
);
349 sko
->sko_magic
= SKO_MAGIC
;
351 INIT_LIST_HEAD(&sko
->sko_list
);
352 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
357 if (skc
->skc_flags
& KMC_OFFSLAB
)
358 list_for_each_entry_safe(sko
,
359 n
, &sks
->sks_free_list
, sko_list
)
360 kv_free(skc
, sko
->sko_addr
, offslab_size
);
362 kv_free(skc
, base
, skc
->skc_slab_size
);
370 * Remove a slab from complete or partial list, it must be called with
371 * the 'skc->skc_lock' held but the actual free must be performed
372 * outside the lock to prevent deadlocking on vmem addresses.
375 spl_slab_free(spl_kmem_slab_t
*sks
,
376 struct list_head
*sks_list
, struct list_head
*sko_list
)
378 spl_kmem_cache_t
*skc
;
380 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
381 ASSERT(sks
->sks_ref
== 0);
383 skc
= sks
->sks_cache
;
384 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
385 ASSERT(spin_is_locked(&skc
->skc_lock
));
388 * Update slab/objects counters in the cache, then remove the
389 * slab from the skc->skc_partial_list. Finally add the slab
390 * and all its objects in to the private work lists where the
391 * destructors will be called and the memory freed to the system.
393 skc
->skc_obj_total
-= sks
->sks_objs
;
394 skc
->skc_slab_total
--;
395 list_del(&sks
->sks_list
);
396 list_add(&sks
->sks_list
, sks_list
);
397 list_splice_init(&sks
->sks_free_list
, sko_list
);
401 * Reclaim empty slabs at the end of the partial list.
404 spl_slab_reclaim(spl_kmem_cache_t
*skc
)
406 spl_kmem_slab_t
*sks
, *m
;
407 spl_kmem_obj_t
*sko
, *n
;
413 * Empty slabs and objects must be moved to a private list so they
414 * can be safely freed outside the spin lock. All empty slabs are
415 * at the end of skc->skc_partial_list, therefore once a non-empty
416 * slab is found we can stop scanning.
418 spin_lock(&skc
->skc_lock
);
419 list_for_each_entry_safe_reverse(sks
, m
,
420 &skc
->skc_partial_list
, sks_list
) {
422 if (sks
->sks_ref
> 0)
425 spl_slab_free(sks
, &sks_list
, &sko_list
);
427 spin_unlock(&skc
->skc_lock
);
430 * The following two loops ensure all the object destructors are
431 * run, any offslab objects are freed, and the slabs themselves
432 * are freed. This is all done outside the skc->skc_lock since
433 * this allows the destructor to sleep, and allows us to perform
434 * a conditional reschedule when a freeing a large number of
435 * objects and slabs back to the system.
437 if (skc
->skc_flags
& KMC_OFFSLAB
)
438 size
= spl_offslab_size(skc
);
440 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
) {
441 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
443 if (skc
->skc_flags
& KMC_OFFSLAB
)
444 kv_free(skc
, sko
->sko_addr
, size
);
447 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
) {
448 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
449 kv_free(skc
, sks
, skc
->skc_slab_size
);
453 static spl_kmem_emergency_t
*
454 spl_emergency_search(struct rb_root
*root
, void *obj
)
456 struct rb_node
*node
= root
->rb_node
;
457 spl_kmem_emergency_t
*ske
;
458 unsigned long address
= (unsigned long)obj
;
461 ske
= container_of(node
, spl_kmem_emergency_t
, ske_node
);
463 if (address
< ske
->ske_obj
)
464 node
= node
->rb_left
;
465 else if (address
> ske
->ske_obj
)
466 node
= node
->rb_right
;
475 spl_emergency_insert(struct rb_root
*root
, spl_kmem_emergency_t
*ske
)
477 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
478 spl_kmem_emergency_t
*ske_tmp
;
479 unsigned long address
= ske
->ske_obj
;
482 ske_tmp
= container_of(*new, spl_kmem_emergency_t
, ske_node
);
485 if (address
< ske_tmp
->ske_obj
)
486 new = &((*new)->rb_left
);
487 else if (address
> ske_tmp
->ske_obj
)
488 new = &((*new)->rb_right
);
493 rb_link_node(&ske
->ske_node
, parent
, new);
494 rb_insert_color(&ske
->ske_node
, root
);
500 * Allocate a single emergency object and track it in a red black tree.
503 spl_emergency_alloc(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
505 gfp_t lflags
= kmem_flags_convert(flags
);
506 spl_kmem_emergency_t
*ske
;
507 int order
= get_order(skc
->skc_obj_size
);
510 /* Last chance use a partial slab if one now exists */
511 spin_lock(&skc
->skc_lock
);
512 empty
= list_empty(&skc
->skc_partial_list
);
513 spin_unlock(&skc
->skc_lock
);
517 ske
= kmalloc(sizeof (*ske
), lflags
);
521 ske
->ske_obj
= __get_free_pages(lflags
, order
);
522 if (ske
->ske_obj
== 0) {
527 spin_lock(&skc
->skc_lock
);
528 empty
= spl_emergency_insert(&skc
->skc_emergency_tree
, ske
);
530 skc
->skc_obj_total
++;
531 skc
->skc_obj_emergency
++;
532 if (skc
->skc_obj_emergency
> skc
->skc_obj_emergency_max
)
533 skc
->skc_obj_emergency_max
= skc
->skc_obj_emergency
;
535 spin_unlock(&skc
->skc_lock
);
537 if (unlikely(!empty
)) {
538 free_pages(ske
->ske_obj
, order
);
543 *obj
= (void *)ske
->ske_obj
;
549 * Locate the passed object in the red black tree and free it.
552 spl_emergency_free(spl_kmem_cache_t
*skc
, void *obj
)
554 spl_kmem_emergency_t
*ske
;
555 int order
= get_order(skc
->skc_obj_size
);
557 spin_lock(&skc
->skc_lock
);
558 ske
= spl_emergency_search(&skc
->skc_emergency_tree
, obj
);
560 rb_erase(&ske
->ske_node
, &skc
->skc_emergency_tree
);
561 skc
->skc_obj_emergency
--;
562 skc
->skc_obj_total
--;
564 spin_unlock(&skc
->skc_lock
);
569 free_pages(ske
->ske_obj
, order
);
576 * Release objects from the per-cpu magazine back to their slab. The flush
577 * argument contains the max number of entries to remove from the magazine.
580 __spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
582 int i
, count
= MIN(flush
, skm
->skm_avail
);
584 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
585 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
586 ASSERT(spin_is_locked(&skc
->skc_lock
));
588 for (i
= 0; i
< count
; i
++)
589 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
591 skm
->skm_avail
-= count
;
592 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
593 sizeof (void *) * skm
->skm_avail
);
597 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
599 spin_lock(&skc
->skc_lock
);
600 __spl_cache_flush(skc
, skm
, flush
);
601 spin_unlock(&skc
->skc_lock
);
605 spl_magazine_age(void *data
)
607 spl_kmem_cache_t
*skc
= (spl_kmem_cache_t
*)data
;
608 spl_kmem_magazine_t
*skm
= skc
->skc_mag
[smp_processor_id()];
610 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
611 ASSERT(skm
->skm_cpu
== smp_processor_id());
612 ASSERT(irqs_disabled());
614 /* There are no available objects or they are too young to age out */
615 if ((skm
->skm_avail
== 0) ||
616 time_before(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
620 * Because we're executing in interrupt context we may have
621 * interrupted the holder of this lock. To avoid a potential
622 * deadlock return if the lock is contended.
624 if (!spin_trylock(&skc
->skc_lock
))
627 __spl_cache_flush(skc
, skm
, skm
->skm_refill
);
628 spin_unlock(&skc
->skc_lock
);
632 * Called regularly to keep a downward pressure on the cache.
634 * Objects older than skc->skc_delay seconds in the per-cpu magazines will
635 * be returned to the caches. This is done to prevent idle magazines from
636 * holding memory which could be better used elsewhere. The delay is
637 * present to prevent thrashing the magazine.
639 * The newly released objects may result in empty partial slabs. Those
640 * slabs should be released to the system. Otherwise moving the objects
641 * out of the magazines is just wasted work.
644 spl_cache_age(void *data
)
646 spl_kmem_cache_t
*skc
= (spl_kmem_cache_t
*)data
;
649 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
651 /* Dynamically disabled at run time */
652 if (!(spl_kmem_cache_expire
& KMC_EXPIRE_AGE
))
655 atomic_inc(&skc
->skc_ref
);
657 if (!(skc
->skc_flags
& KMC_NOMAGAZINE
))
658 on_each_cpu(spl_magazine_age
, skc
, 1);
660 spl_slab_reclaim(skc
);
662 while (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
) && !id
) {
663 id
= taskq_dispatch_delay(
664 spl_kmem_cache_taskq
, spl_cache_age
, skc
, TQ_SLEEP
,
665 ddi_get_lbolt() + skc
->skc_delay
/ 3 * HZ
);
667 /* Destroy issued after dispatch immediately cancel it */
668 if (test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
) && id
)
669 taskq_cancel_id(spl_kmem_cache_taskq
, id
);
672 spin_lock(&skc
->skc_lock
);
673 skc
->skc_taskqid
= id
;
674 spin_unlock(&skc
->skc_lock
);
676 atomic_dec(&skc
->skc_ref
);
680 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
681 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
682 * for very small objects we may end up with more than this so as not
683 * to waste space in the minimal allocation of a single page. Also for
684 * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
685 * lower than this and we will fail.
688 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
690 uint32_t sks_size
, obj_size
, max_size
, tgt_size
, tgt_objs
;
692 if (skc
->skc_flags
& KMC_OFFSLAB
) {
693 tgt_objs
= spl_kmem_cache_obj_per_slab
;
694 tgt_size
= P2ROUNDUP(sizeof (spl_kmem_slab_t
), PAGE_SIZE
);
696 if ((skc
->skc_flags
& KMC_KMEM
) &&
697 (spl_obj_size(skc
) > (SPL_MAX_ORDER_NR_PAGES
* PAGE_SIZE
)))
700 sks_size
= spl_sks_size(skc
);
701 obj_size
= spl_obj_size(skc
);
702 max_size
= (spl_kmem_cache_max_size
* 1024 * 1024);
703 tgt_size
= (spl_kmem_cache_obj_per_slab
* obj_size
+ sks_size
);
706 * KMC_KMEM slabs are allocated by __get_free_pages() which
707 * rounds up to the nearest order. Knowing this the size
708 * should be rounded up to the next power of two with a hard
709 * maximum defined by the maximum allowed allocation order.
711 if (skc
->skc_flags
& KMC_KMEM
) {
712 max_size
= SPL_MAX_ORDER_NR_PAGES
* PAGE_SIZE
;
713 tgt_size
= MIN(max_size
,
714 PAGE_SIZE
* (1 << MAX(get_order(tgt_size
) - 1, 1)));
717 if (tgt_size
<= max_size
) {
718 tgt_objs
= (tgt_size
- sks_size
) / obj_size
;
720 tgt_objs
= (max_size
- sks_size
) / obj_size
;
721 tgt_size
= (tgt_objs
* obj_size
) + sks_size
;
735 * Make a guess at reasonable per-cpu magazine size based on the size of
736 * each object and the cost of caching N of them in each magazine. Long
737 * term this should really adapt based on an observed usage heuristic.
740 spl_magazine_size(spl_kmem_cache_t
*skc
)
742 uint32_t obj_size
= spl_obj_size(skc
);
745 if (spl_kmem_cache_magazine_size
> 0)
746 return (MAX(MIN(spl_kmem_cache_magazine_size
, 256), 2));
748 /* Per-magazine sizes below assume a 4Kib page size */
749 if (obj_size
> (PAGE_SIZE
* 256))
750 size
= 4; /* Minimum 4Mib per-magazine */
751 else if (obj_size
> (PAGE_SIZE
* 32))
752 size
= 16; /* Minimum 2Mib per-magazine */
753 else if (obj_size
> (PAGE_SIZE
))
754 size
= 64; /* Minimum 256Kib per-magazine */
755 else if (obj_size
> (PAGE_SIZE
/ 4))
756 size
= 128; /* Minimum 128Kib per-magazine */
764 * Allocate a per-cpu magazine to associate with a specific core.
766 static spl_kmem_magazine_t
*
767 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int cpu
)
769 spl_kmem_magazine_t
*skm
;
770 int size
= sizeof (spl_kmem_magazine_t
) +
771 sizeof (void *) * skc
->skc_mag_size
;
773 skm
= kmalloc_node(size
, GFP_KERNEL
, cpu_to_node(cpu
));
775 skm
->skm_magic
= SKM_MAGIC
;
777 skm
->skm_size
= skc
->skc_mag_size
;
778 skm
->skm_refill
= skc
->skc_mag_refill
;
779 skm
->skm_cache
= skc
;
780 skm
->skm_age
= jiffies
;
788 * Free a per-cpu magazine associated with a specific core.
791 spl_magazine_free(spl_kmem_magazine_t
*skm
)
793 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
794 ASSERT(skm
->skm_avail
== 0);
799 * Create all pre-cpu magazines of reasonable sizes.
802 spl_magazine_create(spl_kmem_cache_t
*skc
)
806 if (skc
->skc_flags
& KMC_NOMAGAZINE
)
809 skc
->skc_mag
= kzalloc(sizeof (spl_kmem_magazine_t
*) *
810 num_possible_cpus(), kmem_flags_convert(KM_SLEEP
));
811 skc
->skc_mag_size
= spl_magazine_size(skc
);
812 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
814 for_each_possible_cpu(i
) {
815 skc
->skc_mag
[i
] = spl_magazine_alloc(skc
, i
);
816 if (!skc
->skc_mag
[i
]) {
817 for (i
--; i
>= 0; i
--)
818 spl_magazine_free(skc
->skc_mag
[i
]);
829 * Destroy all pre-cpu magazines.
832 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
834 spl_kmem_magazine_t
*skm
;
837 if (skc
->skc_flags
& KMC_NOMAGAZINE
)
840 for_each_possible_cpu(i
) {
841 skm
= skc
->skc_mag
[i
];
842 spl_cache_flush(skc
, skm
, skm
->skm_avail
);
843 spl_magazine_free(skm
);
850 * Create a object cache based on the following arguments:
852 * size cache object size
853 * align cache object alignment
854 * ctor cache object constructor
855 * dtor cache object destructor
856 * reclaim cache object reclaim
857 * priv cache private data for ctor/dtor/reclaim
858 * vmp unused must be NULL
860 * KMC_NOTOUCH Disable cache object aging (unsupported)
861 * KMC_NODEBUG Disable debugging (unsupported)
862 * KMC_NOHASH Disable hashing (unsupported)
863 * KMC_QCACHE Disable qcache (unsupported)
864 * KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab
865 * KMC_KMEM Force kmem backed cache
866 * KMC_VMEM Force vmem backed cache
867 * KMC_SLAB Force Linux slab backed cache
868 * KMC_OFFSLAB Locate objects off the slab
871 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
872 spl_kmem_ctor_t ctor
, spl_kmem_dtor_t dtor
, spl_kmem_reclaim_t reclaim
,
873 void *priv
, void *vmp
, int flags
)
875 gfp_t lflags
= kmem_flags_convert(KM_SLEEP
);
876 spl_kmem_cache_t
*skc
;
882 ASSERT0(flags
& KMC_NOMAGAZINE
);
883 ASSERT0(flags
& KMC_NOHASH
);
884 ASSERT0(flags
& KMC_QCACHE
);
889 skc
= kzalloc(sizeof (*skc
), lflags
);
893 skc
->skc_magic
= SKC_MAGIC
;
894 skc
->skc_name_size
= strlen(name
) + 1;
895 skc
->skc_name
= (char *)kmalloc(skc
->skc_name_size
, lflags
);
896 if (skc
->skc_name
== NULL
) {
900 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
902 skc
->skc_ctor
= ctor
;
903 skc
->skc_dtor
= dtor
;
904 skc
->skc_reclaim
= reclaim
;
905 skc
->skc_private
= priv
;
907 skc
->skc_linux_cache
= NULL
;
908 skc
->skc_flags
= flags
;
909 skc
->skc_obj_size
= size
;
910 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
911 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
912 skc
->skc_reap
= SPL_KMEM_CACHE_REAP
;
913 atomic_set(&skc
->skc_ref
, 0);
915 INIT_LIST_HEAD(&skc
->skc_list
);
916 INIT_LIST_HEAD(&skc
->skc_complete_list
);
917 INIT_LIST_HEAD(&skc
->skc_partial_list
);
918 skc
->skc_emergency_tree
= RB_ROOT
;
919 spin_lock_init(&skc
->skc_lock
);
920 init_waitqueue_head(&skc
->skc_waitq
);
921 skc
->skc_slab_fail
= 0;
922 skc
->skc_slab_create
= 0;
923 skc
->skc_slab_destroy
= 0;
924 skc
->skc_slab_total
= 0;
925 skc
->skc_slab_alloc
= 0;
926 skc
->skc_slab_max
= 0;
927 skc
->skc_obj_total
= 0;
928 skc
->skc_obj_alloc
= 0;
929 skc
->skc_obj_max
= 0;
930 skc
->skc_obj_deadlock
= 0;
931 skc
->skc_obj_emergency
= 0;
932 skc
->skc_obj_emergency_max
= 0;
935 * Verify the requested alignment restriction is sane.
939 VERIFY3U(align
, >=, SPL_KMEM_CACHE_ALIGN
);
940 VERIFY3U(align
, <=, PAGE_SIZE
);
941 skc
->skc_obj_align
= align
;
945 * When no specific type of slab is requested (kmem, vmem, or
946 * linuxslab) then select a cache type based on the object size
947 * and default tunables.
949 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
| KMC_SLAB
))) {
952 * Objects smaller than spl_kmem_cache_slab_limit can
953 * use the Linux slab for better space-efficiency. By
954 * default this functionality is disabled until its
955 * performance characteristics are fully understood.
957 if (spl_kmem_cache_slab_limit
&&
958 size
<= (size_t)spl_kmem_cache_slab_limit
)
959 skc
->skc_flags
|= KMC_SLAB
;
962 * Small objects, less than spl_kmem_cache_kmem_limit per
963 * object should use kmem because their slabs are small.
965 else if (spl_obj_size(skc
) <= spl_kmem_cache_kmem_limit
)
966 skc
->skc_flags
|= KMC_KMEM
;
969 * All other objects are considered large and are placed
970 * on vmem backed slabs.
973 skc
->skc_flags
|= KMC_VMEM
;
977 * Given the type of slab allocate the required resources.
979 if (skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
)) {
980 rc
= spl_slab_size(skc
,
981 &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
985 rc
= spl_magazine_create(skc
);
989 unsigned long slabflags
= 0;
991 if (size
> (SPL_MAX_KMEM_ORDER_NR_PAGES
* PAGE_SIZE
)) {
996 #if defined(SLAB_USERCOPY)
998 * Required for PAX-enabled kernels if the slab is to be
999 * used for coping between user and kernel space.
1001 slabflags
|= SLAB_USERCOPY
;
1004 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
1006 * Newer grsec patchset uses kmem_cache_create_usercopy()
1007 * instead of SLAB_USERCOPY flag
1009 skc
->skc_linux_cache
= kmem_cache_create_usercopy(
1010 skc
->skc_name
, size
, align
, slabflags
, 0, size
, NULL
);
1012 skc
->skc_linux_cache
= kmem_cache_create(
1013 skc
->skc_name
, size
, align
, slabflags
, NULL
);
1015 if (skc
->skc_linux_cache
== NULL
) {
1020 #if defined(HAVE_KMEM_CACHE_ALLOCFLAGS)
1021 skc
->skc_linux_cache
->allocflags
|= __GFP_COMP
;
1022 #elif defined(HAVE_KMEM_CACHE_GFPFLAGS)
1023 skc
->skc_linux_cache
->gfpflags
|= __GFP_COMP
;
1025 skc
->skc_flags
|= KMC_NOMAGAZINE
;
1028 if (spl_kmem_cache_expire
& KMC_EXPIRE_AGE
)
1029 skc
->skc_taskqid
= taskq_dispatch_delay(spl_kmem_cache_taskq
,
1030 spl_cache_age
, skc
, TQ_SLEEP
,
1031 ddi_get_lbolt() + skc
->skc_delay
/ 3 * HZ
);
1033 down_write(&spl_kmem_cache_sem
);
1034 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1035 up_write(&spl_kmem_cache_sem
);
1039 kfree(skc
->skc_name
);
1043 EXPORT_SYMBOL(spl_kmem_cache_create
);
1046 * Register a move callback for cache defragmentation.
1047 * XXX: Unimplemented but harmless to stub out for now.
1050 spl_kmem_cache_set_move(spl_kmem_cache_t
*skc
,
1051 kmem_cbrc_t (move
)(void *, void *, size_t, void *))
1053 ASSERT(move
!= NULL
);
1055 EXPORT_SYMBOL(spl_kmem_cache_set_move
);
1058 * Destroy a cache and all objects associated with the cache.
1061 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1063 DECLARE_WAIT_QUEUE_HEAD(wq
);
1066 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1067 ASSERT(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
| KMC_SLAB
));
1069 down_write(&spl_kmem_cache_sem
);
1070 list_del_init(&skc
->skc_list
);
1071 up_write(&spl_kmem_cache_sem
);
1073 /* Cancel any and wait for any pending delayed tasks */
1074 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1076 spin_lock(&skc
->skc_lock
);
1077 id
= skc
->skc_taskqid
;
1078 spin_unlock(&skc
->skc_lock
);
1080 taskq_cancel_id(spl_kmem_cache_taskq
, id
);
1083 * Wait until all current callers complete, this is mainly
1084 * to catch the case where a low memory situation triggers a
1085 * cache reaping action which races with this destroy.
1087 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1089 if (skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
)) {
1090 spl_magazine_destroy(skc
);
1091 spl_slab_reclaim(skc
);
1093 ASSERT(skc
->skc_flags
& KMC_SLAB
);
1094 kmem_cache_destroy(skc
->skc_linux_cache
);
1097 spin_lock(&skc
->skc_lock
);
1100 * Validate there are no objects in use and free all the
1101 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
1103 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1104 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1105 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1106 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1107 ASSERT3U(skc
->skc_obj_emergency
, ==, 0);
1108 ASSERT(list_empty(&skc
->skc_complete_list
));
1110 spin_unlock(&skc
->skc_lock
);
1112 kfree(skc
->skc_name
);
1115 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1118 * Allocate an object from a slab attached to the cache. This is used to
1119 * repopulate the per-cpu magazine caches in batches when they run low.
1122 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1124 spl_kmem_obj_t
*sko
;
1126 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1127 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1128 ASSERT(spin_is_locked(&skc
->skc_lock
));
1130 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1131 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1132 ASSERT(sko
->sko_addr
!= NULL
);
1134 /* Remove from sks_free_list */
1135 list_del_init(&sko
->sko_list
);
1137 sks
->sks_age
= jiffies
;
1139 skc
->skc_obj_alloc
++;
1141 /* Track max obj usage statistics */
1142 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1143 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1145 /* Track max slab usage statistics */
1146 if (sks
->sks_ref
== 1) {
1147 skc
->skc_slab_alloc
++;
1149 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1150 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1153 return (sko
->sko_addr
);
1157 * Generic slab allocation function to run by the global work queues.
1158 * It is responsible for allocating a new slab, linking it in to the list
1159 * of partial slabs, and then waking any waiters.
1162 __spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
)
1164 spl_kmem_slab_t
*sks
;
1166 fstrans_cookie_t cookie
= spl_fstrans_mark();
1167 sks
= spl_slab_alloc(skc
, flags
);
1168 spl_fstrans_unmark(cookie
);
1170 spin_lock(&skc
->skc_lock
);
1172 skc
->skc_slab_total
++;
1173 skc
->skc_obj_total
+= sks
->sks_objs
;
1174 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1176 smp_mb__before_atomic();
1177 clear_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1178 smp_mb__after_atomic();
1179 wake_up_all(&skc
->skc_waitq
);
1181 spin_unlock(&skc
->skc_lock
);
1183 return (sks
== NULL
? -ENOMEM
: 0);
1187 spl_cache_grow_work(void *data
)
1189 spl_kmem_alloc_t
*ska
= (spl_kmem_alloc_t
*)data
;
1190 spl_kmem_cache_t
*skc
= ska
->ska_cache
;
1192 (void)__spl_cache_grow(skc
, ska
->ska_flags
);
1194 atomic_dec(&skc
->skc_ref
);
1195 smp_mb__before_atomic();
1196 clear_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1197 smp_mb__after_atomic();
1203 * Returns non-zero when a new slab should be available.
1206 spl_cache_grow_wait(spl_kmem_cache_t
*skc
)
1208 return (!test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
));
1212 * No available objects on any slabs, create a new slab. Note that this
1213 * functionality is disabled for KMC_SLAB caches which are backed by the
1217 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
1219 int remaining
, rc
= 0;
1221 ASSERT0(flags
& ~KM_PUBLIC_MASK
);
1222 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1223 ASSERT((skc
->skc_flags
& KMC_SLAB
) == 0);
1228 * Before allocating a new slab wait for any reaping to complete and
1229 * then return so the local magazine can be rechecked for new objects.
1231 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1232 rc
= spl_wait_on_bit(&skc
->skc_flags
, KMC_BIT_REAPING
,
1233 TASK_UNINTERRUPTIBLE
);
1234 return (rc
? rc
: -EAGAIN
);
1238 * To reduce the overhead of context switch and improve NUMA locality,
1239 * it tries to allocate a new slab in the current process context with
1240 * KM_NOSLEEP flag. If it fails, it will launch a new taskq to do the
1243 * However, this can't be applied to KVM_VMEM due to a bug that
1244 * __vmalloc() doesn't honor gfp flags in page table allocation.
1246 if (!(skc
->skc_flags
& KMC_VMEM
)) {
1247 rc
= __spl_cache_grow(skc
, flags
| KM_NOSLEEP
);
1253 * This is handled by dispatching a work request to the global work
1254 * queue. This allows us to asynchronously allocate a new slab while
1255 * retaining the ability to safely fall back to a smaller synchronous
1256 * allocations to ensure forward progress is always maintained.
1258 if (test_and_set_bit(KMC_BIT_GROWING
, &skc
->skc_flags
) == 0) {
1259 spl_kmem_alloc_t
*ska
;
1261 ska
= kmalloc(sizeof (*ska
), kmem_flags_convert(flags
));
1263 clear_bit_unlock(KMC_BIT_GROWING
, &skc
->skc_flags
);
1264 smp_mb__after_atomic();
1265 wake_up_all(&skc
->skc_waitq
);
1269 atomic_inc(&skc
->skc_ref
);
1270 ska
->ska_cache
= skc
;
1271 ska
->ska_flags
= flags
;
1272 taskq_init_ent(&ska
->ska_tqe
);
1273 taskq_dispatch_ent(spl_kmem_cache_taskq
,
1274 spl_cache_grow_work
, ska
, 0, &ska
->ska_tqe
);
1278 * The goal here is to only detect the rare case where a virtual slab
1279 * allocation has deadlocked. We must be careful to minimize the use
1280 * of emergency objects which are more expensive to track. Therefore,
1281 * we set a very long timeout for the asynchronous allocation and if
1282 * the timeout is reached the cache is flagged as deadlocked. From
1283 * this point only new emergency objects will be allocated until the
1284 * asynchronous allocation completes and clears the deadlocked flag.
1286 if (test_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
)) {
1287 rc
= spl_emergency_alloc(skc
, flags
, obj
);
1289 remaining
= wait_event_timeout(skc
->skc_waitq
,
1290 spl_cache_grow_wait(skc
), HZ
/ 10);
1293 spin_lock(&skc
->skc_lock
);
1294 if (test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
)) {
1295 set_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1296 skc
->skc_obj_deadlock
++;
1298 spin_unlock(&skc
->skc_lock
);
1308 * Refill a per-cpu magazine with objects from the slabs for this cache.
1309 * Ideally the magazine can be repopulated using existing objects which have
1310 * been released, however if we are unable to locate enough free objects new
1311 * slabs of objects will be created. On success NULL is returned, otherwise
1312 * the address of a single emergency object is returned for use by the caller.
1315 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1317 spl_kmem_slab_t
*sks
;
1318 int count
= 0, rc
, refill
;
1321 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1322 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1324 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1325 spin_lock(&skc
->skc_lock
);
1327 while (refill
> 0) {
1328 /* No slabs available we may need to grow the cache */
1329 if (list_empty(&skc
->skc_partial_list
)) {
1330 spin_unlock(&skc
->skc_lock
);
1333 rc
= spl_cache_grow(skc
, flags
, &obj
);
1334 local_irq_disable();
1336 /* Emergency object for immediate use by caller */
1337 if (rc
== 0 && obj
!= NULL
)
1343 /* Rescheduled to different CPU skm is not local */
1344 if (skm
!= skc
->skc_mag
[smp_processor_id()])
1348 * Potentially rescheduled to the same CPU but
1349 * allocations may have occurred from this CPU while
1350 * we were sleeping so recalculate max refill.
1352 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
1354 spin_lock(&skc
->skc_lock
);
1358 /* Grab the next available slab */
1359 sks
= list_entry((&skc
->skc_partial_list
)->next
,
1360 spl_kmem_slab_t
, sks_list
);
1361 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1362 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
1363 ASSERT(!list_empty(&sks
->sks_free_list
));
1366 * Consume as many objects as needed to refill the requested
1367 * cache. We must also be careful not to overfill it.
1369 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 &&
1371 ASSERT(skm
->skm_avail
< skm
->skm_size
);
1372 ASSERT(count
< skm
->skm_size
);
1373 skm
->skm_objs
[skm
->skm_avail
++] =
1374 spl_cache_obj(skc
, sks
);
1377 /* Move slab to skc_complete_list when full */
1378 if (sks
->sks_ref
== sks
->sks_objs
) {
1379 list_del(&sks
->sks_list
);
1380 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
1384 spin_unlock(&skc
->skc_lock
);
1390 * Release an object back to the slab from which it came.
1393 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
1395 spl_kmem_slab_t
*sks
= NULL
;
1396 spl_kmem_obj_t
*sko
= NULL
;
1398 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1399 ASSERT(spin_is_locked(&skc
->skc_lock
));
1401 sko
= spl_sko_from_obj(skc
, obj
);
1402 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1403 sks
= sko
->sko_slab
;
1404 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1405 ASSERT(sks
->sks_cache
== skc
);
1406 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
1408 sks
->sks_age
= jiffies
;
1410 skc
->skc_obj_alloc
--;
1413 * Move slab to skc_partial_list when no longer full. Slabs
1414 * are added to the head to keep the partial list is quasi-full
1415 * sorted order. Fuller at the head, emptier at the tail.
1417 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
1418 list_del(&sks
->sks_list
);
1419 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
1423 * Move empty slabs to the end of the partial list so
1424 * they can be easily found and freed during reclamation.
1426 if (sks
->sks_ref
== 0) {
1427 list_del(&sks
->sks_list
);
1428 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1429 skc
->skc_slab_alloc
--;
1434 * Allocate an object from the per-cpu magazine, or if the magazine
1435 * is empty directly allocate from a slab and repopulate the magazine.
1438 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
1440 spl_kmem_magazine_t
*skm
;
1443 ASSERT0(flags
& ~KM_PUBLIC_MASK
);
1444 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1445 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1448 * Allocate directly from a Linux slab. All optimizations are left
1449 * to the underlying cache we only need to guarantee that KM_SLEEP
1450 * callers will never fail.
1452 if (skc
->skc_flags
& KMC_SLAB
) {
1453 struct kmem_cache
*slc
= skc
->skc_linux_cache
;
1455 obj
= kmem_cache_alloc(slc
, kmem_flags_convert(flags
));
1456 } while ((obj
== NULL
) && !(flags
& KM_NOSLEEP
));
1461 local_irq_disable();
1465 * Safe to update per-cpu structure without lock, but
1466 * in the restart case we must be careful to reacquire
1467 * the local magazine since this may have changed
1468 * when we need to grow the cache.
1470 skm
= skc
->skc_mag
[smp_processor_id()];
1471 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1473 if (likely(skm
->skm_avail
)) {
1474 /* Object available in CPU cache, use it */
1475 obj
= skm
->skm_objs
[--skm
->skm_avail
];
1476 skm
->skm_age
= jiffies
;
1478 obj
= spl_cache_refill(skc
, skm
, flags
);
1479 if ((obj
== NULL
) && !(flags
& KM_NOSLEEP
))
1488 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
1491 /* Pre-emptively migrate object to CPU L1 cache */
1493 if (obj
&& skc
->skc_ctor
)
1494 skc
->skc_ctor(obj
, skc
->skc_private
, flags
);
1501 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
1504 * Free an object back to the local per-cpu magazine, there is no
1505 * guarantee that this is the same magazine the object was originally
1506 * allocated from. We may need to flush entire from the magazine
1507 * back to the slabs to make space.
1510 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
1512 spl_kmem_magazine_t
*skm
;
1513 unsigned long flags
;
1515 int do_emergency
= 0;
1517 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1518 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1521 * Run the destructor
1524 skc
->skc_dtor(obj
, skc
->skc_private
);
1527 * Free the object from the Linux underlying Linux slab.
1529 if (skc
->skc_flags
& KMC_SLAB
) {
1530 kmem_cache_free(skc
->skc_linux_cache
, obj
);
1535 * While a cache has outstanding emergency objects all freed objects
1536 * must be checked. However, since emergency objects will never use
1537 * a virtual address these objects can be safely excluded as an
1540 if (!is_vmalloc_addr(obj
)) {
1541 spin_lock(&skc
->skc_lock
);
1542 do_emergency
= (skc
->skc_obj_emergency
> 0);
1543 spin_unlock(&skc
->skc_lock
);
1545 if (do_emergency
&& (spl_emergency_free(skc
, obj
) == 0))
1549 local_irq_save(flags
);
1552 * Safe to update per-cpu structure without lock, but
1553 * no remote memory allocation tracking is being performed
1554 * it is entirely possible to allocate an object from one
1555 * CPU cache and return it to another.
1557 skm
= skc
->skc_mag
[smp_processor_id()];
1558 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1561 * Per-CPU cache full, flush it to make space for this object,
1562 * this may result in an empty slab which can be reclaimed once
1563 * interrupts are re-enabled.
1565 if (unlikely(skm
->skm_avail
>= skm
->skm_size
)) {
1566 spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1570 /* Available space in cache, use it */
1571 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
1573 local_irq_restore(flags
);
1576 spl_slab_reclaim(skc
);
1578 EXPORT_SYMBOL(spl_kmem_cache_free
);
1581 * The generic shrinker function for all caches. Under Linux a shrinker
1582 * may not be tightly coupled with a slab cache. In fact Linux always
1583 * systematically tries calling all registered shrinker callbacks which
1584 * report that they contain unused objects. Because of this we only
1585 * register one shrinker function in the shim layer for all slab caches.
1586 * We always attempt to shrink all caches when this generic shrinker
1589 * If sc->nr_to_scan is zero, the caller is requesting a query of the
1590 * number of objects which can potentially be freed. If it is nonzero,
1591 * the request is to free that many objects.
1593 * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
1594 * in struct shrinker and also require the shrinker to return the number
1597 * Older kernels require the shrinker to return the number of freeable
1598 * objects following the freeing of nr_to_free.
1600 * Linux semantics differ from those under Solaris, which are to
1601 * free all available objects which may (and probably will) be more
1602 * objects than the requested nr_to_scan.
1604 static spl_shrinker_t
1605 __spl_kmem_cache_generic_shrinker(struct shrinker
*shrink
,
1606 struct shrink_control
*sc
)
1608 spl_kmem_cache_t
*skc
;
1612 * No shrinking in a transaction context. Can cause deadlocks.
1614 if (sc
->nr_to_scan
&& spl_fstrans_check())
1615 return (SHRINK_STOP
);
1617 down_read(&spl_kmem_cache_sem
);
1618 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
1619 if (sc
->nr_to_scan
) {
1620 #ifdef HAVE_SPLIT_SHRINKER_CALLBACK
1621 uint64_t oldalloc
= skc
->skc_obj_alloc
;
1622 spl_kmem_cache_reap_now(skc
,
1623 MAX(sc
->nr_to_scan
>>fls64(skc
->skc_slab_objs
), 1));
1624 if (oldalloc
> skc
->skc_obj_alloc
)
1625 alloc
+= oldalloc
- skc
->skc_obj_alloc
;
1627 spl_kmem_cache_reap_now(skc
,
1628 MAX(sc
->nr_to_scan
>>fls64(skc
->skc_slab_objs
), 1));
1629 alloc
+= skc
->skc_obj_alloc
;
1630 #endif /* HAVE_SPLIT_SHRINKER_CALLBACK */
1632 /* Request to query number of freeable objects */
1633 alloc
+= skc
->skc_obj_alloc
;
1636 up_read(&spl_kmem_cache_sem
);
1639 * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
1640 * This functionality only exists to work around a rare issue where
1641 * shrink_slabs() is repeatedly invoked by many cores causing the
1644 if ((spl_kmem_cache_reclaim
& KMC_RECLAIM_ONCE
) && sc
->nr_to_scan
)
1645 return (SHRINK_STOP
);
1647 return (MAX(alloc
, 0));
1650 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker
);
1653 * Call the registered reclaim function for a cache. Depending on how
1654 * many and which objects are released it may simply repopulate the
1655 * local magazine which will then need to age-out. Objects which cannot
1656 * fit in the magazine we will be released back to their slabs which will
1657 * also need to age out before being release. This is all just best
1658 * effort and we do not want to thrash creating and destroying slabs.
1661 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
)
1663 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1664 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1666 atomic_inc(&skc
->skc_ref
);
1669 * Execute the registered reclaim callback if it exists.
1671 if (skc
->skc_flags
& KMC_SLAB
) {
1672 if (skc
->skc_reclaim
)
1673 skc
->skc_reclaim(skc
->skc_private
);
1678 * Prevent concurrent cache reaping when contended.
1680 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
))
1684 * When a reclaim function is available it may be invoked repeatedly
1685 * until at least a single slab can be freed. This ensures that we
1686 * do free memory back to the system. This helps minimize the chance
1687 * of an OOM event when the bulk of memory is used by the slab.
1689 * When free slabs are already available the reclaim callback will be
1690 * skipped. Additionally, if no forward progress is detected despite
1691 * a reclaim function the cache will be skipped to avoid deadlock.
1693 * Longer term this would be the correct place to add the code which
1694 * repacks the slabs in order minimize fragmentation.
1696 if (skc
->skc_reclaim
) {
1697 uint64_t objects
= UINT64_MAX
;
1701 spin_lock(&skc
->skc_lock
);
1703 (skc
->skc_slab_total
> 0) &&
1704 ((skc
->skc_slab_total
-skc
->skc_slab_alloc
) == 0) &&
1705 (skc
->skc_obj_alloc
< objects
);
1707 objects
= skc
->skc_obj_alloc
;
1708 spin_unlock(&skc
->skc_lock
);
1711 skc
->skc_reclaim(skc
->skc_private
);
1713 } while (do_reclaim
);
1716 /* Reclaim from the magazine and free all now empty slabs. */
1717 if (spl_kmem_cache_expire
& KMC_EXPIRE_MEM
) {
1718 spl_kmem_magazine_t
*skm
;
1719 unsigned long irq_flags
;
1721 local_irq_save(irq_flags
);
1722 skm
= skc
->skc_mag
[smp_processor_id()];
1723 spl_cache_flush(skc
, skm
, skm
->skm_avail
);
1724 local_irq_restore(irq_flags
);
1727 spl_slab_reclaim(skc
);
1728 clear_bit_unlock(KMC_BIT_REAPING
, &skc
->skc_flags
);
1729 smp_mb__after_atomic();
1730 wake_up_bit(&skc
->skc_flags
, KMC_BIT_REAPING
);
1732 atomic_dec(&skc
->skc_ref
);
1734 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
1737 * Reap all free slabs from all registered caches.
1742 struct shrink_control sc
;
1744 sc
.nr_to_scan
= KMC_REAP_CHUNK
;
1745 sc
.gfp_mask
= GFP_KERNEL
;
1747 (void) __spl_kmem_cache_generic_shrinker(NULL
, &sc
);
1749 EXPORT_SYMBOL(spl_kmem_reap
);
1752 spl_kmem_cache_init(void)
1754 init_rwsem(&spl_kmem_cache_sem
);
1755 INIT_LIST_HEAD(&spl_kmem_cache_list
);
1756 spl_kmem_cache_taskq
= taskq_create("spl_kmem_cache",
1757 spl_kmem_cache_kmem_threads
, maxclsyspri
,
1758 spl_kmem_cache_kmem_threads
* 8, INT_MAX
,
1759 TASKQ_PREPOPULATE
| TASKQ_DYNAMIC
);
1760 spl_register_shrinker(&spl_kmem_cache_shrinker
);
1766 spl_kmem_cache_fini(void)
1768 spl_unregister_shrinker(&spl_kmem_cache_shrinker
);
1769 taskq_destroy(spl_kmem_cache_taskq
);