]> git.proxmox.com Git - mirror_zfs.git/blame - module/os/linux/spl/spl-kmem-cache.c
Linux 6.8 compat: implement strlcpy fallback
[mirror_zfs.git] / module / os / linux / spl / spl-kmem-cache.c
CommitLineData
b34b9563 1/*
e5b9b344
BB
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
e5b9b344
BB
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
b34b9563 22 */
e5b9b344 23
01a4852e 24#include <linux/percpu_compat.h>
e5b9b344
BB
25#include <sys/kmem.h>
26#include <sys/kmem_cache.h>
27#include <sys/taskq.h>
28#include <sys/timer.h>
29#include <sys/vmem.h>
a9125891 30#include <sys/wait.h>
84980ee0 31#include <sys/string.h>
e5b9b344
BB
32#include <linux/slab.h>
33#include <linux/swap.h>
9f456111 34#include <linux/prefetch.h>
e5b9b344
BB
35
36/*
37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
38 * are removed to allow access to the real Linux slab allocator.
39 */
40#undef kmem_cache_destroy
41#undef kmem_cache_create
42#undef kmem_cache_alloc
43#undef kmem_cache_free
44
45
a988a35a
RY
46/*
47 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
48 * with smp_mb__{before,after}_atomic() because they were redundant. This is
49 * only used inside our SLAB allocator, so we implement an internal wrapper
50 * here to give us smp_mb__{before,after}_atomic() on older kernels.
51 */
52#ifndef smp_mb__before_atomic
53#define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
54#endif
55
56#ifndef smp_mb__after_atomic
57#define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
58#endif
59
3673d032 60/* BEGIN CSTYLED */
1a204968
BB
61/*
62 * Cache magazines are an optimization designed to minimize the cost of
63 * allocating memory. They do this by keeping a per-cpu cache of recently
64 * freed objects, which can then be reallocated without taking a lock. This
65 * can improve performance on highly contended caches. However, because
66 * objects in magazines will prevent otherwise empty slabs from being
67 * immediately released this may not be ideal for low memory machines.
68 *
69 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
70 * magazine size. When this value is set to 0 the magazine size will be
71 * automatically determined based on the object size. Otherwise magazines
72 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
73 * may never be entirely disabled in this implementation.
74 */
18168da7 75static unsigned int spl_kmem_cache_magazine_size = 0;
1a204968
BB
76module_param(spl_kmem_cache_magazine_size, uint, 0444);
77MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
31f24932 78 "Default magazine size (2-256), set automatically (0)");
1a204968 79
18168da7 80static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
e5b9b344
BB
81module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
82MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
83
18168da7 84static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
e5b9b344
BB
85module_param(spl_kmem_cache_max_size, uint, 0644);
86MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
87
88/*
89 * For small objects the Linux slab allocator should be used to make the most
90 * efficient use of the memory. However, large objects are not supported by
91 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
78378458
BB
92 * of 16K was determined to be optimal for architectures using 4K pages and
93 * to also work well on architecutres using larger 64K page sizes.
e5b9b344 94 */
29ea6faf
YY
95static unsigned int spl_kmem_cache_slab_limit =
96 SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE;
e5b9b344
BB
97module_param(spl_kmem_cache_slab_limit, uint, 0644);
98MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
b34b9563 99 "Objects less than N bytes use the Linux slab");
e5b9b344 100
436ad60f
BB
101/*
102 * The number of threads available to allocate new slabs for caches. This
103 * should not need to be tuned but it is available for performance analysis.
104 */
18168da7 105static unsigned int spl_kmem_cache_kmem_threads = 4;
436ad60f
BB
106module_param(spl_kmem_cache_kmem_threads, uint, 0444);
107MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
108 "Number of spl_kmem_cache threads");
3673d032 109/* END CSTYLED */
436ad60f 110
e5b9b344
BB
111/*
112 * Slab allocation interfaces
113 *
114 * While the Linux slab implementation was inspired by the Solaris
115 * implementation I cannot use it to emulate the Solaris APIs. I
116 * require two features which are not provided by the Linux slab.
117 *
118 * 1) Constructors AND destructors. Recent versions of the Linux
119 * kernel have removed support for destructors. This is a deal
120 * breaker for the SPL which contains particularly expensive
121 * initializers for mutex's, condition variables, etc. We also
122 * require a minimal level of cleanup for these data types unlike
b34b9563 123 * many Linux data types which do need to be explicitly destroyed.
e5b9b344
BB
124 *
125 * 2) Virtual address space backed slab. Callers of the Solaris slab
126 * expect it to work well for both small are very large allocations.
127 * Because of memory fragmentation the Linux slab which is backed
128 * by kmalloc'ed memory performs very badly when confronted with
129 * large numbers of large allocations. Basing the slab on the
130 * virtual address space removes the need for contiguous pages
131 * and greatly improve performance for large allocations.
132 *
133 * For these reasons, the SPL has its own slab implementation with
134 * the needed features. It is not as highly optimized as either the
135 * Solaris or Linux slabs, but it should get me most of what is
136 * needed until it can be optimized or obsoleted by another approach.
137 *
138 * One serious concern I do have about this method is the relatively
139 * small virtual address space on 32bit arches. This will seriously
140 * constrain the size of the slab caches and their performance.
e5b9b344
BB
141 */
142
143struct list_head spl_kmem_cache_list; /* List of caches */
144struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
27218a32 145static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
e5b9b344
BB
146
147static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
148
e5b9b344
BB
149static void *
150kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
151{
c3eabc75 152 gfp_t lflags = kmem_flags_convert(flags);
e5b9b344
BB
153 void *ptr;
154
994de7e4 155 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
e5b9b344
BB
156
157 /* Resulting allocated memory will be page aligned */
158 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
159
b34b9563 160 return (ptr);
e5b9b344
BB
161}
162
163static void
164kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
165{
166 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
e5b9b344
BB
167
168 /*
169 * The Linux direct reclaim path uses this out of band value to
170 * determine if forward progress is being made. Normally this is
171 * incremented by kmem_freepages() which is part of the various
172 * Linux slab implementations. However, since we are using none
173 * of that infrastructure we are responsible for incrementing it.
174 */
175 if (current->reclaim_state)
f8447cf2
YY
176#ifdef HAVE_RECLAIM_STATE_RECLAIMED
177 current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
178#else
e5b9b344 179 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
f8447cf2 180#endif
994de7e4 181 vfree(ptr);
e5b9b344
BB
182}
183
184/*
185 * Required space for each aligned sks.
186 */
187static inline uint32_t
188spl_sks_size(spl_kmem_cache_t *skc)
189{
b34b9563
BB
190 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
191 skc->skc_obj_align, uint32_t));
e5b9b344
BB
192}
193
194/*
195 * Required space for each aligned object.
196 */
197static inline uint32_t
198spl_obj_size(spl_kmem_cache_t *skc)
199{
200 uint32_t align = skc->skc_obj_align;
201
b34b9563
BB
202 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
203 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
e5b9b344
BB
204}
205
65019062
MM
206uint64_t
207spl_kmem_cache_inuse(kmem_cache_t *cache)
208{
209 return (cache->skc_obj_total);
210}
211EXPORT_SYMBOL(spl_kmem_cache_inuse);
212
213uint64_t
214spl_kmem_cache_entry_size(kmem_cache_t *cache)
215{
216 return (cache->skc_obj_size);
217}
218EXPORT_SYMBOL(spl_kmem_cache_entry_size);
219
e5b9b344
BB
220/*
221 * Lookup the spl_kmem_object_t for an object given that object.
222 */
223static inline spl_kmem_obj_t *
224spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
225{
b34b9563
BB
226 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
227 skc->skc_obj_align, uint32_t));
e5b9b344
BB
228}
229
e5b9b344
BB
230/*
231 * It's important that we pack the spl_kmem_obj_t structure and the
232 * actual objects in to one large address space to minimize the number
233 * of calls to the allocator. It is far better to do a few large
234 * allocations and then subdivide it ourselves. Now which allocator
235 * we use requires balancing a few trade offs.
236 *
237 * For small objects we use kmem_alloc() because as long as you are
238 * only requesting a small number of pages (ideally just one) its cheap.
239 * However, when you start requesting multiple pages with kmem_alloc()
240 * it gets increasingly expensive since it requires contiguous pages.
241 * For this reason we shift to vmem_alloc() for slabs of large objects
242 * which removes the need for contiguous pages. We do not use
243 * vmem_alloc() in all cases because there is significant locking
244 * overhead in __get_vm_area_node(). This function takes a single
245 * global lock when acquiring an available virtual address range which
246 * serializes all vmem_alloc()'s for all slab caches. Using slightly
247 * different allocation functions for small and large objects should
248 * give us the best of both worlds.
249 *
492db125
MA
250 * +------------------------+
251 * | spl_kmem_slab_t --+-+ |
252 * | skc_obj_size <-+ | |
253 * | spl_kmem_obj_t | |
254 * | skc_obj_size <---+ |
255 * | spl_kmem_obj_t | |
256 * | ... v |
257 * +------------------------+
e5b9b344
BB
258 */
259static spl_kmem_slab_t *
260spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
261{
262 spl_kmem_slab_t *sks;
492db125
MA
263 void *base;
264 uint32_t obj_size;
e5b9b344
BB
265
266 base = kv_alloc(skc, skc->skc_slab_size, flags);
267 if (base == NULL)
268 return (NULL);
269
270 sks = (spl_kmem_slab_t *)base;
271 sks->sks_magic = SKS_MAGIC;
272 sks->sks_objs = skc->skc_slab_objs;
273 sks->sks_age = jiffies;
274 sks->sks_cache = skc;
275 INIT_LIST_HEAD(&sks->sks_list);
276 INIT_LIST_HEAD(&sks->sks_free_list);
277 sks->sks_ref = 0;
278 obj_size = spl_obj_size(skc);
279
492db125
MA
280 for (int i = 0; i < sks->sks_objs; i++) {
281 void *obj = base + spl_sks_size(skc) + (i * obj_size);
e5b9b344
BB
282
283 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
492db125 284 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
e5b9b344
BB
285 sko->sko_addr = obj;
286 sko->sko_magic = SKO_MAGIC;
287 sko->sko_slab = sks;
288 INIT_LIST_HEAD(&sko->sko_list);
289 list_add_tail(&sko->sko_list, &sks->sks_free_list);
290 }
291
e5b9b344
BB
292 return (sks);
293}
294
295/*
296 * Remove a slab from complete or partial list, it must be called with
297 * the 'skc->skc_lock' held but the actual free must be performed
298 * outside the lock to prevent deadlocking on vmem addresses.
299 */
300static void
301spl_slab_free(spl_kmem_slab_t *sks,
b34b9563 302 struct list_head *sks_list, struct list_head *sko_list)
e5b9b344
BB
303{
304 spl_kmem_cache_t *skc;
305
306 ASSERT(sks->sks_magic == SKS_MAGIC);
307 ASSERT(sks->sks_ref == 0);
308
309 skc = sks->sks_cache;
310 ASSERT(skc->skc_magic == SKC_MAGIC);
e5b9b344
BB
311
312 /*
313 * Update slab/objects counters in the cache, then remove the
314 * slab from the skc->skc_partial_list. Finally add the slab
315 * and all its objects in to the private work lists where the
316 * destructors will be called and the memory freed to the system.
317 */
318 skc->skc_obj_total -= sks->sks_objs;
319 skc->skc_slab_total--;
320 list_del(&sks->sks_list);
321 list_add(&sks->sks_list, sks_list);
322 list_splice_init(&sks->sks_free_list, sko_list);
323}
324
325/*
1a204968 326 * Reclaim empty slabs at the end of the partial list.
e5b9b344
BB
327 */
328static void
1a204968 329spl_slab_reclaim(spl_kmem_cache_t *skc)
e5b9b344 330{
7cf1fe63
BB
331 spl_kmem_slab_t *sks = NULL, *m = NULL;
332 spl_kmem_obj_t *sko = NULL, *n = NULL;
e5b9b344
BB
333 LIST_HEAD(sks_list);
334 LIST_HEAD(sko_list);
e5b9b344
BB
335
336 /*
1a204968
BB
337 * Empty slabs and objects must be moved to a private list so they
338 * can be safely freed outside the spin lock. All empty slabs are
339 * at the end of skc->skc_partial_list, therefore once a non-empty
340 * slab is found we can stop scanning.
e5b9b344
BB
341 */
342 spin_lock(&skc->skc_lock);
b34b9563
BB
343 list_for_each_entry_safe_reverse(sks, m,
344 &skc->skc_partial_list, sks_list) {
1a204968
BB
345
346 if (sks->sks_ref > 0)
e5b9b344
BB
347 break;
348
1a204968 349 spl_slab_free(sks, &sks_list, &sko_list);
e5b9b344
BB
350 }
351 spin_unlock(&skc->skc_lock);
352
353 /*
492db125
MA
354 * The following two loops ensure all the object destructors are run,
355 * and the slabs themselves are freed. This is all done outside the
356 * skc->skc_lock since this allows the destructor to sleep, and
357 * allows us to perform a conditional reschedule when a freeing a
358 * large number of objects and slabs back to the system.
e5b9b344 359 */
e5b9b344
BB
360
361 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
362 ASSERT(sko->sko_magic == SKO_MAGIC);
e5b9b344
BB
363 }
364
365 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
366 ASSERT(sks->sks_magic == SKS_MAGIC);
367 kv_free(skc, sks, skc->skc_slab_size);
368 }
369}
370
371static spl_kmem_emergency_t *
372spl_emergency_search(struct rb_root *root, void *obj)
373{
374 struct rb_node *node = root->rb_node;
375 spl_kmem_emergency_t *ske;
376 unsigned long address = (unsigned long)obj;
377
378 while (node) {
379 ske = container_of(node, spl_kmem_emergency_t, ske_node);
380
ee335174 381 if (address < ske->ske_obj)
e5b9b344 382 node = node->rb_left;
ee335174 383 else if (address > ske->ske_obj)
e5b9b344
BB
384 node = node->rb_right;
385 else
b34b9563 386 return (ske);
e5b9b344
BB
387 }
388
b34b9563 389 return (NULL);
e5b9b344
BB
390}
391
392static int
393spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
394{
395 struct rb_node **new = &(root->rb_node), *parent = NULL;
396 spl_kmem_emergency_t *ske_tmp;
ee335174 397 unsigned long address = ske->ske_obj;
e5b9b344
BB
398
399 while (*new) {
400 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
401
402 parent = *new;
ee335174 403 if (address < ske_tmp->ske_obj)
e5b9b344 404 new = &((*new)->rb_left);
ee335174 405 else if (address > ske_tmp->ske_obj)
e5b9b344
BB
406 new = &((*new)->rb_right);
407 else
b34b9563 408 return (0);
e5b9b344
BB
409 }
410
411 rb_link_node(&ske->ske_node, parent, new);
412 rb_insert_color(&ske->ske_node, root);
413
b34b9563 414 return (1);
e5b9b344
BB
415}
416
417/*
418 * Allocate a single emergency object and track it in a red black tree.
419 */
420static int
421spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
422{
c3eabc75 423 gfp_t lflags = kmem_flags_convert(flags);
e5b9b344 424 spl_kmem_emergency_t *ske;
ee335174 425 int order = get_order(skc->skc_obj_size);
e5b9b344
BB
426 int empty;
427
428 /* Last chance use a partial slab if one now exists */
429 spin_lock(&skc->skc_lock);
430 empty = list_empty(&skc->skc_partial_list);
431 spin_unlock(&skc->skc_lock);
432 if (!empty)
433 return (-EEXIST);
434
c3eabc75 435 ske = kmalloc(sizeof (*ske), lflags);
e5b9b344
BB
436 if (ske == NULL)
437 return (-ENOMEM);
438
ee335174
BB
439 ske->ske_obj = __get_free_pages(lflags, order);
440 if (ske->ske_obj == 0) {
e5b9b344
BB
441 kfree(ske);
442 return (-ENOMEM);
443 }
444
445 spin_lock(&skc->skc_lock);
446 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
447 if (likely(empty)) {
448 skc->skc_obj_total++;
449 skc->skc_obj_emergency++;
450 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
451 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
452 }
453 spin_unlock(&skc->skc_lock);
454
455 if (unlikely(!empty)) {
ee335174 456 free_pages(ske->ske_obj, order);
e5b9b344
BB
457 kfree(ske);
458 return (-EINVAL);
459 }
460
ee335174 461 *obj = (void *)ske->ske_obj;
e5b9b344
BB
462
463 return (0);
464}
465
466/*
467 * Locate the passed object in the red black tree and free it.
468 */
469static int
470spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
471{
472 spl_kmem_emergency_t *ske;
ee335174 473 int order = get_order(skc->skc_obj_size);
e5b9b344
BB
474
475 spin_lock(&skc->skc_lock);
476 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
436ad60f 477 if (ske) {
e5b9b344
BB
478 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
479 skc->skc_obj_emergency--;
480 skc->skc_obj_total--;
481 }
482 spin_unlock(&skc->skc_lock);
483
436ad60f 484 if (ske == NULL)
e5b9b344
BB
485 return (-ENOENT);
486
ee335174 487 free_pages(ske->ske_obj, order);
e5b9b344
BB
488 kfree(ske);
489
490 return (0);
491}
492
493/*
494 * Release objects from the per-cpu magazine back to their slab. The flush
495 * argument contains the max number of entries to remove from the magazine.
496 */
497static void
4fbdb10c 498spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
e5b9b344 499{
4fbdb10c 500 spin_lock(&skc->skc_lock);
e5b9b344
BB
501
502 ASSERT(skc->skc_magic == SKC_MAGIC);
503 ASSERT(skm->skm_magic == SKM_MAGIC);
e5b9b344 504
4fbdb10c
MA
505 int count = MIN(flush, skm->skm_avail);
506 for (int i = 0; i < count; i++)
e5b9b344
BB
507 spl_cache_shrink(skc, skm->skm_objs[i]);
508
509 skm->skm_avail -= count;
510 memmove(skm->skm_objs, &(skm->skm_objs[count]),
b34b9563 511 sizeof (void *) * skm->skm_avail);
e5b9b344 512
e5b9b344 513 spin_unlock(&skc->skc_lock);
e5b9b344
BB
514}
515
516/*
517 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
518 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
519 * for very small objects we may end up with more than this so as not
ace760a0 520 * to waste space in the minimal allocation of a single page.
e5b9b344
BB
521 */
522static int
523spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
524{
3018bffa 525 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
e5b9b344 526
492db125
MA
527 sks_size = spl_sks_size(skc);
528 obj_size = spl_obj_size(skc);
529 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
530 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
3018bffa 531
492db125
MA
532 if (tgt_size <= max_size) {
533 tgt_objs = (tgt_size - sks_size) / obj_size;
534 } else {
535 tgt_objs = (max_size - sks_size) / obj_size;
536 tgt_size = (tgt_objs * obj_size) + sks_size;
e5b9b344
BB
537 }
538
3018bffa
BB
539 if (tgt_objs == 0)
540 return (-ENOSPC);
541
542 *objs = tgt_objs;
543 *size = tgt_size;
544
545 return (0);
e5b9b344
BB
546}
547
548/*
549 * Make a guess at reasonable per-cpu magazine size based on the size of
550 * each object and the cost of caching N of them in each magazine. Long
551 * term this should really adapt based on an observed usage heuristic.
552 */
553static int
554spl_magazine_size(spl_kmem_cache_t *skc)
555{
556 uint32_t obj_size = spl_obj_size(skc);
557 int size;
558
1a204968
BB
559 if (spl_kmem_cache_magazine_size > 0)
560 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
561
e5b9b344
BB
562 /* Per-magazine sizes below assume a 4Kib page size */
563 if (obj_size > (PAGE_SIZE * 256))
564 size = 4; /* Minimum 4Mib per-magazine */
565 else if (obj_size > (PAGE_SIZE * 32))
566 size = 16; /* Minimum 2Mib per-magazine */
567 else if (obj_size > (PAGE_SIZE))
568 size = 64; /* Minimum 256Kib per-magazine */
569 else if (obj_size > (PAGE_SIZE / 4))
570 size = 128; /* Minimum 128Kib per-magazine */
571 else
572 size = 256;
573
574 return (size);
575}
576
577/*
578 * Allocate a per-cpu magazine to associate with a specific core.
579 */
580static spl_kmem_magazine_t *
581spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
582{
583 spl_kmem_magazine_t *skm;
b34b9563
BB
584 int size = sizeof (spl_kmem_magazine_t) +
585 sizeof (void *) * skc->skc_mag_size;
e5b9b344 586
c3eabc75 587 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
e5b9b344
BB
588 if (skm) {
589 skm->skm_magic = SKM_MAGIC;
590 skm->skm_avail = 0;
591 skm->skm_size = skc->skc_mag_size;
592 skm->skm_refill = skc->skc_mag_refill;
593 skm->skm_cache = skc;
e5b9b344
BB
594 skm->skm_cpu = cpu;
595 }
596
597 return (skm);
598}
599
600/*
601 * Free a per-cpu magazine associated with a specific core.
602 */
603static void
604spl_magazine_free(spl_kmem_magazine_t *skm)
605{
e5b9b344
BB
606 ASSERT(skm->skm_magic == SKM_MAGIC);
607 ASSERT(skm->skm_avail == 0);
c3eabc75 608 kfree(skm);
e5b9b344
BB
609}
610
611/*
612 * Create all pre-cpu magazines of reasonable sizes.
613 */
614static int
615spl_magazine_create(spl_kmem_cache_t *skc)
616{
7cf1fe63 617 int i = 0;
e5b9b344 618
c6f2b942 619 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
e5b9b344 620
9b13f65d
BB
621 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
622 num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
e5b9b344
BB
623 skc->skc_mag_size = spl_magazine_size(skc);
624 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
625
9b13f65d 626 for_each_possible_cpu(i) {
e5b9b344
BB
627 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
628 if (!skc->skc_mag[i]) {
629 for (i--; i >= 0; i--)
630 spl_magazine_free(skc->skc_mag[i]);
631
9b13f65d 632 kfree(skc->skc_mag);
e5b9b344
BB
633 return (-ENOMEM);
634 }
635 }
636
637 return (0);
638}
639
640/*
641 * Destroy all pre-cpu magazines.
642 */
643static void
644spl_magazine_destroy(spl_kmem_cache_t *skc)
645{
646 spl_kmem_magazine_t *skm;
7cf1fe63 647 int i = 0;
e5b9b344 648
c6f2b942 649 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
e5b9b344 650
9b13f65d 651 for_each_possible_cpu(i) {
e5b9b344
BB
652 skm = skc->skc_mag[i];
653 spl_cache_flush(skc, skm, skm->skm_avail);
654 spl_magazine_free(skm);
b34b9563 655 }
9b13f65d
BB
656
657 kfree(skc->skc_mag);
e5b9b344
BB
658}
659
660/*
661 * Create a object cache based on the following arguments:
662 * name cache name
663 * size cache object size
664 * align cache object alignment
665 * ctor cache object constructor
666 * dtor cache object destructor
667 * reclaim cache object reclaim
668 * priv cache private data for ctor/dtor/reclaim
669 * vmp unused must be NULL
670 * flags
492db125 671 * KMC_KVMEM Force kvmem backed SPL cache
e5b9b344 672 * KMC_SLAB Force Linux slab backed cache
c025008d 673 * KMC_NODEBUG Disable debugging (unsupported)
e5b9b344
BB
674 */
675spl_kmem_cache_t *
a926aab9 676spl_kmem_cache_create(const char *name, size_t size, size_t align,
026e529c 677 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
b34b9563 678 void *priv, void *vmp, int flags)
e5b9b344 679{
c3eabc75 680 gfp_t lflags = kmem_flags_convert(KM_SLEEP);
b34b9563 681 spl_kmem_cache_t *skc;
e5b9b344
BB
682 int rc;
683
684 /*
685 * Unsupported flags
686 */
e5b9b344 687 ASSERT(vmp == NULL);
026e529c 688 ASSERT(reclaim == NULL);
e5b9b344
BB
689
690 might_sleep();
691
c3eabc75 692 skc = kzalloc(sizeof (*skc), lflags);
e5b9b344
BB
693 if (skc == NULL)
694 return (NULL);
695
696 skc->skc_magic = SKC_MAGIC;
697 skc->skc_name_size = strlen(name) + 1;
7384ec65 698 skc->skc_name = kmalloc(skc->skc_name_size, lflags);
e5b9b344 699 if (skc->skc_name == NULL) {
c3eabc75 700 kfree(skc);
e5b9b344
BB
701 return (NULL);
702 }
7584fbe8 703 strlcpy(skc->skc_name, name, skc->skc_name_size);
e5b9b344
BB
704
705 skc->skc_ctor = ctor;
706 skc->skc_dtor = dtor;
e5b9b344
BB
707 skc->skc_private = priv;
708 skc->skc_vmp = vmp;
709 skc->skc_linux_cache = NULL;
710 skc->skc_flags = flags;
711 skc->skc_obj_size = size;
712 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
e5b9b344
BB
713 atomic_set(&skc->skc_ref, 0);
714
715 INIT_LIST_HEAD(&skc->skc_list);
716 INIT_LIST_HEAD(&skc->skc_complete_list);
717 INIT_LIST_HEAD(&skc->skc_partial_list);
718 skc->skc_emergency_tree = RB_ROOT;
719 spin_lock_init(&skc->skc_lock);
720 init_waitqueue_head(&skc->skc_waitq);
721 skc->skc_slab_fail = 0;
722 skc->skc_slab_create = 0;
723 skc->skc_slab_destroy = 0;
724 skc->skc_slab_total = 0;
725 skc->skc_slab_alloc = 0;
726 skc->skc_slab_max = 0;
727 skc->skc_obj_total = 0;
728 skc->skc_obj_alloc = 0;
729 skc->skc_obj_max = 0;
730 skc->skc_obj_deadlock = 0;
731 skc->skc_obj_emergency = 0;
732 skc->skc_obj_emergency_max = 0;
733
ec1fea45
SD
734 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
735 GFP_KERNEL);
736 if (rc != 0) {
737 kfree(skc);
738 return (NULL);
739 }
740
e5b9b344
BB
741 /*
742 * Verify the requested alignment restriction is sane.
743 */
744 if (align) {
745 VERIFY(ISP2(align));
746 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
747 VERIFY3U(align, <=, PAGE_SIZE);
748 skc->skc_obj_align = align;
749 }
750
751 /*
752 * When no specific type of slab is requested (kmem, vmem, or
753 * linuxslab) then select a cache type based on the object size
754 * and default tunables.
755 */
994de7e4 756 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
e5b9b344 757 if (spl_kmem_cache_slab_limit &&
0409679d
MA
758 size <= (size_t)spl_kmem_cache_slab_limit) {
759 /*
760 * Objects smaller than spl_kmem_cache_slab_limit can
761 * use the Linux slab for better space-efficiency.
762 */
e5b9b344 763 skc->skc_flags |= KMC_SLAB;
0409679d
MA
764 } else {
765 /*
766 * All other objects are considered large and are
6d948c35 767 * placed on kvmem backed slabs.
0409679d 768 */
6d948c35 769 skc->skc_flags |= KMC_KVMEM;
0409679d 770 }
e5b9b344
BB
771 }
772
773 /*
774 * Given the type of slab allocate the required resources.
775 */
994de7e4 776 if (skc->skc_flags & KMC_KVMEM) {
e5b9b344
BB
777 rc = spl_slab_size(skc,
778 &skc->skc_slab_objs, &skc->skc_slab_size);
779 if (rc)
780 goto out;
781
782 rc = spl_magazine_create(skc);
783 if (rc)
784 goto out;
785 } else {
2ebe3960
BB
786 unsigned long slabflags = 0;
787
29ea6faf 788 if (size > spl_kmem_cache_slab_limit)
3018bffa 789 goto out;
3018bffa 790
2ebe3960
BB
791#if defined(SLAB_USERCOPY)
792 /*
793 * Required for PAX-enabled kernels if the slab is to be
9f5c1bc6 794 * used for copying between user and kernel space.
2ebe3960
BB
795 */
796 slabflags |= SLAB_USERCOPY;
797#endif
798
0194e4a0 799#if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
0409679d
MA
800 /*
801 * Newer grsec patchset uses kmem_cache_create_usercopy()
802 * instead of SLAB_USERCOPY flag
803 */
804 skc->skc_linux_cache = kmem_cache_create_usercopy(
805 skc->skc_name, size, align, slabflags, 0, size, NULL);
0194e4a0 806#else
0409679d
MA
807 skc->skc_linux_cache = kmem_cache_create(
808 skc->skc_name, size, align, slabflags, NULL);
0194e4a0 809#endif
2e7f664f 810 if (skc->skc_linux_cache == NULL)
e5b9b344 811 goto out;
e5b9b344
BB
812 }
813
e5b9b344
BB
814 down_write(&spl_kmem_cache_sem);
815 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
816 up_write(&spl_kmem_cache_sem);
817
818 return (skc);
819out:
c3eabc75 820 kfree(skc->skc_name);
ec1fea45 821 percpu_counter_destroy(&skc->skc_linux_alloc);
c3eabc75 822 kfree(skc);
e5b9b344
BB
823 return (NULL);
824}
825EXPORT_SYMBOL(spl_kmem_cache_create);
826
827/*
b34b9563 828 * Register a move callback for cache defragmentation.
e5b9b344
BB
829 * XXX: Unimplemented but harmless to stub out for now.
830 */
831void
832spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
833 kmem_cbrc_t (move)(void *, void *, size_t, void *))
834{
b34b9563 835 ASSERT(move != NULL);
e5b9b344
BB
836}
837EXPORT_SYMBOL(spl_kmem_cache_set_move);
838
839/*
840 * Destroy a cache and all objects associated with the cache.
841 */
842void
843spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
844{
845 DECLARE_WAIT_QUEUE_HEAD(wq);
846 taskqid_t id;
847
848 ASSERT(skc->skc_magic == SKC_MAGIC);
994de7e4 849 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
e5b9b344
BB
850
851 down_write(&spl_kmem_cache_sem);
852 list_del_init(&skc->skc_list);
853 up_write(&spl_kmem_cache_sem);
854
855 /* Cancel any and wait for any pending delayed tasks */
856 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
857
858 spin_lock(&skc->skc_lock);
859 id = skc->skc_taskqid;
860 spin_unlock(&skc->skc_lock);
861
862 taskq_cancel_id(spl_kmem_cache_taskq, id);
863
b34b9563
BB
864 /*
865 * Wait until all current callers complete, this is mainly
e5b9b344 866 * to catch the case where a low memory situation triggers a
b34b9563
BB
867 * cache reaping action which races with this destroy.
868 */
e5b9b344
BB
869 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
870
994de7e4 871 if (skc->skc_flags & KMC_KVMEM) {
e5b9b344 872 spl_magazine_destroy(skc);
1a204968 873 spl_slab_reclaim(skc);
e5b9b344
BB
874 } else {
875 ASSERT(skc->skc_flags & KMC_SLAB);
876 kmem_cache_destroy(skc->skc_linux_cache);
877 }
878
879 spin_lock(&skc->skc_lock);
880
b34b9563
BB
881 /*
882 * Validate there are no objects in use and free all the
883 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
884 */
e5b9b344
BB
885 ASSERT3U(skc->skc_slab_alloc, ==, 0);
886 ASSERT3U(skc->skc_obj_alloc, ==, 0);
887 ASSERT3U(skc->skc_slab_total, ==, 0);
888 ASSERT3U(skc->skc_obj_total, ==, 0);
889 ASSERT3U(skc->skc_obj_emergency, ==, 0);
890 ASSERT(list_empty(&skc->skc_complete_list));
891
ec1fea45
SD
892 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
893 percpu_counter_destroy(&skc->skc_linux_alloc);
894
e5b9b344
BB
895 spin_unlock(&skc->skc_lock);
896
c3eabc75
BB
897 kfree(skc->skc_name);
898 kfree(skc);
e5b9b344
BB
899}
900EXPORT_SYMBOL(spl_kmem_cache_destroy);
901
902/*
903 * Allocate an object from a slab attached to the cache. This is used to
904 * repopulate the per-cpu magazine caches in batches when they run low.
905 */
906static void *
907spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
908{
909 spl_kmem_obj_t *sko;
910
911 ASSERT(skc->skc_magic == SKC_MAGIC);
912 ASSERT(sks->sks_magic == SKS_MAGIC);
e5b9b344
BB
913
914 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
915 ASSERT(sko->sko_magic == SKO_MAGIC);
916 ASSERT(sko->sko_addr != NULL);
917
918 /* Remove from sks_free_list */
919 list_del_init(&sko->sko_list);
920
921 sks->sks_age = jiffies;
922 sks->sks_ref++;
923 skc->skc_obj_alloc++;
924
925 /* Track max obj usage statistics */
926 if (skc->skc_obj_alloc > skc->skc_obj_max)
927 skc->skc_obj_max = skc->skc_obj_alloc;
928
929 /* Track max slab usage statistics */
930 if (sks->sks_ref == 1) {
931 skc->skc_slab_alloc++;
932
933 if (skc->skc_slab_alloc > skc->skc_slab_max)
934 skc->skc_slab_max = skc->skc_slab_alloc;
935 }
936
b34b9563 937 return (sko->sko_addr);
e5b9b344
BB
938}
939
940/*
941 * Generic slab allocation function to run by the global work queues.
942 * It is responsible for allocating a new slab, linking it in to the list
943 * of partial slabs, and then waking any waiters.
944 */
16fc1ec3
JX
945static int
946__spl_cache_grow(spl_kmem_cache_t *skc, int flags)
e5b9b344 947{
e5b9b344
BB
948 spl_kmem_slab_t *sks;
949
c2fa0945 950 fstrans_cookie_t cookie = spl_fstrans_mark();
16fc1ec3 951 sks = spl_slab_alloc(skc, flags);
c2fa0945 952 spl_fstrans_unmark(cookie);
b4ad50ac 953
e5b9b344
BB
954 spin_lock(&skc->skc_lock);
955 if (sks) {
956 skc->skc_slab_total++;
957 skc->skc_obj_total += sks->sks_objs;
958 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
16fc1ec3
JX
959
960 smp_mb__before_atomic();
961 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
962 smp_mb__after_atomic();
e5b9b344 963 }
16fc1ec3
JX
964 spin_unlock(&skc->skc_lock);
965
966 return (sks == NULL ? -ENOMEM : 0);
967}
968
969static void
970spl_cache_grow_work(void *data)
971{
972 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
973 spl_kmem_cache_t *skc = ska->ska_cache;
974
2adc6b35 975 int error = __spl_cache_grow(skc, ska->ska_flags);
e5b9b344
BB
976
977 atomic_dec(&skc->skc_ref);
a988a35a 978 smp_mb__before_atomic();
e5b9b344 979 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
a988a35a 980 smp_mb__after_atomic();
2adc6b35
MA
981 if (error == 0)
982 wake_up_all(&skc->skc_waitq);
e5b9b344
BB
983
984 kfree(ska);
985}
986
987/*
988 * Returns non-zero when a new slab should be available.
989 */
990static int
991spl_cache_grow_wait(spl_kmem_cache_t *skc)
992{
b34b9563 993 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
e5b9b344
BB
994}
995
996/*
997 * No available objects on any slabs, create a new slab. Note that this
998 * functionality is disabled for KMC_SLAB caches which are backed by the
999 * Linux slab.
1000 */
1001static int
1002spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1003{
c3eabc75 1004 int remaining, rc = 0;
e5b9b344 1005
c3eabc75 1006 ASSERT0(flags & ~KM_PUBLIC_MASK);
e5b9b344
BB
1007 ASSERT(skc->skc_magic == SKC_MAGIC);
1008 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
93f8abef 1009
e5b9b344
BB
1010 *obj = NULL;
1011
93f8abef
BB
1012 /*
1013 * Since we can't sleep attempt an emergency allocation to satisfy
1014 * the request. The only alterative is to fail the allocation but
1015 * it's preferable try. The use of KM_NOSLEEP is expected to be rare.
1016 */
1017 if (flags & KM_NOSLEEP)
1018 return (spl_emergency_alloc(skc, flags, obj));
1019
1020 might_sleep();
1021
e5b9b344
BB
1022 /*
1023 * Before allocating a new slab wait for any reaping to complete and
1024 * then return so the local magazine can be rechecked for new objects.
1025 */
1026 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1027 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1028 TASK_UNINTERRUPTIBLE);
1029 return (rc ? rc : -EAGAIN);
1030 }
1031
16fc1ec3 1032 /*
994de7e4
MA
1033 * Note: It would be nice to reduce the overhead of context switch
1034 * and improve NUMA locality, by trying to allocate a new slab in the
1035 * current process context with KM_NOSLEEP flag.
16fc1ec3 1036 *
994de7e4 1037 * However, this can't be applied to vmem/kvmem due to a bug that
080102a1 1038 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
16fc1ec3 1039 */
16fc1ec3 1040
e5b9b344
BB
1041 /*
1042 * This is handled by dispatching a work request to the global work
1043 * queue. This allows us to asynchronously allocate a new slab while
1044 * retaining the ability to safely fall back to a smaller synchronous
1045 * allocations to ensure forward progress is always maintained.
1046 */
1047 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1048 spl_kmem_alloc_t *ska;
1049
c3eabc75 1050 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
e5b9b344 1051 if (ska == NULL) {
a988a35a
RY
1052 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1053 smp_mb__after_atomic();
e5b9b344
BB
1054 wake_up_all(&skc->skc_waitq);
1055 return (-ENOMEM);
1056 }
1057
1058 atomic_inc(&skc->skc_ref);
1059 ska->ska_cache = skc;
c3eabc75 1060 ska->ska_flags = flags;
e5b9b344
BB
1061 taskq_init_ent(&ska->ska_tqe);
1062 taskq_dispatch_ent(spl_kmem_cache_taskq,
1063 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1064 }
1065
1066 /*
1067 * The goal here is to only detect the rare case where a virtual slab
1068 * allocation has deadlocked. We must be careful to minimize the use
1069 * of emergency objects which are more expensive to track. Therefore,
1070 * we set a very long timeout for the asynchronous allocation and if
1071 * the timeout is reached the cache is flagged as deadlocked. From
1072 * this point only new emergency objects will be allocated until the
1073 * asynchronous allocation completes and clears the deadlocked flag.
1074 */
1075 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1076 rc = spl_emergency_alloc(skc, flags, obj);
1077 } else {
1078 remaining = wait_event_timeout(skc->skc_waitq,
e50e6cc9 1079 spl_cache_grow_wait(skc), HZ / 10);
e5b9b344 1080
436ad60f 1081 if (!remaining) {
e5b9b344
BB
1082 spin_lock(&skc->skc_lock);
1083 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1084 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1085 skc->skc_obj_deadlock++;
1086 }
1087 spin_unlock(&skc->skc_lock);
1088 }
1089
1090 rc = -ENOMEM;
1091 }
1092
1093 return (rc);
1094}
1095
1096/*
1097 * Refill a per-cpu magazine with objects from the slabs for this cache.
1098 * Ideally the magazine can be repopulated using existing objects which have
1099 * been released, however if we are unable to locate enough free objects new
1100 * slabs of objects will be created. On success NULL is returned, otherwise
1101 * the address of a single emergency object is returned for use by the caller.
1102 */
1103static void *
1104spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1105{
1106 spl_kmem_slab_t *sks;
1107 int count = 0, rc, refill;
1108 void *obj = NULL;
1109
1110 ASSERT(skc->skc_magic == SKC_MAGIC);
1111 ASSERT(skm->skm_magic == SKM_MAGIC);
1112
1113 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1114 spin_lock(&skc->skc_lock);
1115
1116 while (refill > 0) {
1117 /* No slabs available we may need to grow the cache */
1118 if (list_empty(&skc->skc_partial_list)) {
1119 spin_unlock(&skc->skc_lock);
1120
1121 local_irq_enable();
1122 rc = spl_cache_grow(skc, flags, &obj);
1123 local_irq_disable();
1124
1125 /* Emergency object for immediate use by caller */
1126 if (rc == 0 && obj != NULL)
1127 return (obj);
1128
1129 if (rc)
1130 goto out;
1131
1132 /* Rescheduled to different CPU skm is not local */
1133 if (skm != skc->skc_mag[smp_processor_id()])
1134 goto out;
1135
b34b9563
BB
1136 /*
1137 * Potentially rescheduled to the same CPU but
e5b9b344 1138 * allocations may have occurred from this CPU while
b34b9563
BB
1139 * we were sleeping so recalculate max refill.
1140 */
e5b9b344
BB
1141 refill = MIN(refill, skm->skm_size - skm->skm_avail);
1142
1143 spin_lock(&skc->skc_lock);
1144 continue;
1145 }
1146
1147 /* Grab the next available slab */
1148 sks = list_entry((&skc->skc_partial_list)->next,
b34b9563 1149 spl_kmem_slab_t, sks_list);
e5b9b344
BB
1150 ASSERT(sks->sks_magic == SKS_MAGIC);
1151 ASSERT(sks->sks_ref < sks->sks_objs);
1152 ASSERT(!list_empty(&sks->sks_free_list));
1153
b34b9563
BB
1154 /*
1155 * Consume as many objects as needed to refill the requested
1156 * cache. We must also be careful not to overfill it.
1157 */
1158 while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1159 ++count) {
e5b9b344
BB
1160 ASSERT(skm->skm_avail < skm->skm_size);
1161 ASSERT(count < skm->skm_size);
b34b9563
BB
1162 skm->skm_objs[skm->skm_avail++] =
1163 spl_cache_obj(skc, sks);
e5b9b344
BB
1164 }
1165
1166 /* Move slab to skc_complete_list when full */
1167 if (sks->sks_ref == sks->sks_objs) {
1168 list_del(&sks->sks_list);
1169 list_add(&sks->sks_list, &skc->skc_complete_list);
1170 }
1171 }
1172
1173 spin_unlock(&skc->skc_lock);
1174out:
1175 return (NULL);
1176}
1177
1178/*
1179 * Release an object back to the slab from which it came.
1180 */
1181static void
1182spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1183{
1184 spl_kmem_slab_t *sks = NULL;
1185 spl_kmem_obj_t *sko = NULL;
1186
1187 ASSERT(skc->skc_magic == SKC_MAGIC);
e5b9b344
BB
1188
1189 sko = spl_sko_from_obj(skc, obj);
1190 ASSERT(sko->sko_magic == SKO_MAGIC);
1191 sks = sko->sko_slab;
1192 ASSERT(sks->sks_magic == SKS_MAGIC);
1193 ASSERT(sks->sks_cache == skc);
1194 list_add(&sko->sko_list, &sks->sks_free_list);
1195
1196 sks->sks_age = jiffies;
1197 sks->sks_ref--;
1198 skc->skc_obj_alloc--;
1199
b34b9563
BB
1200 /*
1201 * Move slab to skc_partial_list when no longer full. Slabs
e5b9b344 1202 * are added to the head to keep the partial list is quasi-full
b34b9563
BB
1203 * sorted order. Fuller at the head, emptier at the tail.
1204 */
e5b9b344
BB
1205 if (sks->sks_ref == (sks->sks_objs - 1)) {
1206 list_del(&sks->sks_list);
1207 list_add(&sks->sks_list, &skc->skc_partial_list);
1208 }
1209
b34b9563
BB
1210 /*
1211 * Move empty slabs to the end of the partial list so
1212 * they can be easily found and freed during reclamation.
1213 */
e5b9b344
BB
1214 if (sks->sks_ref == 0) {
1215 list_del(&sks->sks_list);
1216 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1217 skc->skc_slab_alloc--;
1218 }
1219}
1220
1221/*
1222 * Allocate an object from the per-cpu magazine, or if the magazine
1223 * is empty directly allocate from a slab and repopulate the magazine.
1224 */
1225void *
1226spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1227{
1228 spl_kmem_magazine_t *skm;
1229 void *obj = NULL;
1230
c3eabc75 1231 ASSERT0(flags & ~KM_PUBLIC_MASK);
e5b9b344
BB
1232 ASSERT(skc->skc_magic == SKC_MAGIC);
1233 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
e5b9b344 1234
e5b9b344
BB
1235 /*
1236 * Allocate directly from a Linux slab. All optimizations are left
1237 * to the underlying cache we only need to guarantee that KM_SLEEP
1238 * callers will never fail.
1239 */
1240 if (skc->skc_flags & KMC_SLAB) {
1241 struct kmem_cache *slc = skc->skc_linux_cache;
e5b9b344 1242 do {
c3eabc75 1243 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
e5b9b344
BB
1244 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
1245
851eda35
SD
1246 if (obj != NULL) {
1247 /*
1248 * Even though we leave everything up to the
1249 * underlying cache we still keep track of
1250 * how many objects we've allocated in it for
1251 * better debuggability.
1252 */
ec1fea45 1253 percpu_counter_inc(&skc->skc_linux_alloc);
851eda35 1254 }
e5b9b344
BB
1255 goto ret;
1256 }
1257
1258 local_irq_disable();
1259
1260restart:
b34b9563
BB
1261 /*
1262 * Safe to update per-cpu structure without lock, but
e5b9b344
BB
1263 * in the restart case we must be careful to reacquire
1264 * the local magazine since this may have changed
b34b9563
BB
1265 * when we need to grow the cache.
1266 */
e5b9b344
BB
1267 skm = skc->skc_mag[smp_processor_id()];
1268 ASSERT(skm->skm_magic == SKM_MAGIC);
1269
1270 if (likely(skm->skm_avail)) {
1271 /* Object available in CPU cache, use it */
1272 obj = skm->skm_objs[--skm->skm_avail];
e5b9b344
BB
1273 } else {
1274 obj = spl_cache_refill(skc, skm, flags);
3018bffa 1275 if ((obj == NULL) && !(flags & KM_NOSLEEP))
e5b9b344 1276 goto restart;
3018bffa
BB
1277
1278 local_irq_enable();
1279 goto ret;
e5b9b344
BB
1280 }
1281
1282 local_irq_enable();
1283 ASSERT(obj);
1284 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1285
1286ret:
1287 /* Pre-emptively migrate object to CPU L1 cache */
1288 if (obj) {
1289 if (obj && skc->skc_ctor)
1290 skc->skc_ctor(obj, skc->skc_private, flags);
1291 else
1292 prefetchw(obj);
1293 }
1294
e5b9b344
BB
1295 return (obj);
1296}
e5b9b344
BB
1297EXPORT_SYMBOL(spl_kmem_cache_alloc);
1298
1299/*
1300 * Free an object back to the local per-cpu magazine, there is no
1301 * guarantee that this is the same magazine the object was originally
1302 * allocated from. We may need to flush entire from the magazine
1303 * back to the slabs to make space.
1304 */
1305void
1306spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1307{
1308 spl_kmem_magazine_t *skm;
1309 unsigned long flags;
1a204968 1310 int do_reclaim = 0;
436ad60f 1311 int do_emergency = 0;
e5b9b344
BB
1312
1313 ASSERT(skc->skc_magic == SKC_MAGIC);
1314 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
e5b9b344
BB
1315
1316 /*
1317 * Run the destructor
1318 */
1319 if (skc->skc_dtor)
1320 skc->skc_dtor(obj, skc->skc_private);
1321
1322 /*
1323 * Free the object from the Linux underlying Linux slab.
1324 */
1325 if (skc->skc_flags & KMC_SLAB) {
1326 kmem_cache_free(skc->skc_linux_cache, obj);
ec1fea45 1327 percpu_counter_dec(&skc->skc_linux_alloc);
4699d76d 1328 return;
e5b9b344
BB
1329 }
1330
1331 /*
436ad60f
BB
1332 * While a cache has outstanding emergency objects all freed objects
1333 * must be checked. However, since emergency objects will never use
1334 * a virtual address these objects can be safely excluded as an
1335 * optimization.
e5b9b344 1336 */
436ad60f
BB
1337 if (!is_vmalloc_addr(obj)) {
1338 spin_lock(&skc->skc_lock);
1339 do_emergency = (skc->skc_obj_emergency > 0);
1340 spin_unlock(&skc->skc_lock);
1341
1342 if (do_emergency && (spl_emergency_free(skc, obj) == 0))
4699d76d 1343 return;
e5b9b344
BB
1344 }
1345
1346 local_irq_save(flags);
1347
b34b9563
BB
1348 /*
1349 * Safe to update per-cpu structure without lock, but
e5b9b344
BB
1350 * no remote memory allocation tracking is being performed
1351 * it is entirely possible to allocate an object from one
b34b9563
BB
1352 * CPU cache and return it to another.
1353 */
e5b9b344
BB
1354 skm = skc->skc_mag[smp_processor_id()];
1355 ASSERT(skm->skm_magic == SKM_MAGIC);
1356
1a204968
BB
1357 /*
1358 * Per-CPU cache full, flush it to make space for this object,
1359 * this may result in an empty slab which can be reclaimed once
1360 * interrupts are re-enabled.
1361 */
1362 if (unlikely(skm->skm_avail >= skm->skm_size)) {
e5b9b344 1363 spl_cache_flush(skc, skm, skm->skm_refill);
1a204968
BB
1364 do_reclaim = 1;
1365 }
e5b9b344
BB
1366
1367 /* Available space in cache, use it */
1368 skm->skm_objs[skm->skm_avail++] = obj;
1369
1370 local_irq_restore(flags);
1a204968
BB
1371
1372 if (do_reclaim)
1373 spl_slab_reclaim(skc);
e5b9b344
BB
1374}
1375EXPORT_SYMBOL(spl_kmem_cache_free);
1376
1377/*
026e529c
MA
1378 * Depending on how many and which objects are released it may simply
1379 * repopulate the local magazine which will then need to age-out. Objects
1380 * which cannot fit in the magazine will be released back to their slabs
1381 * which will also need to age out before being released. This is all just
1382 * best effort and we do not want to thrash creating and destroying slabs.
e5b9b344
BB
1383 */
1384void
3c42c9ed 1385spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
e5b9b344
BB
1386{
1387 ASSERT(skc->skc_magic == SKC_MAGIC);
1388 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1389
026e529c
MA
1390 if (skc->skc_flags & KMC_SLAB)
1391 return;
e5b9b344 1392
026e529c 1393 atomic_inc(&skc->skc_ref);
e5b9b344
BB
1394
1395 /*
1396 * Prevent concurrent cache reaping when contended.
1397 */
1398 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1399 goto out;
1400
1a204968 1401 /* Reclaim from the magazine and free all now empty slabs. */
4fbdb10c
MA
1402 unsigned long irq_flags;
1403 local_irq_save(irq_flags);
1404 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1405 spl_cache_flush(skc, skm, skm->skm_avail);
1406 local_irq_restore(irq_flags);
e5b9b344 1407
1a204968 1408 spl_slab_reclaim(skc);
a988a35a
RY
1409 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1410 smp_mb__after_atomic();
e5b9b344
BB
1411 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1412out:
1413 atomic_dec(&skc->skc_ref);
1414}
1415EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1416
3ec34e55
BL
1417/*
1418 * This is stubbed out for code consistency with other platforms. There
1419 * is existing logic to prevent concurrent reaping so while this is ugly
1420 * it should do no harm.
1421 */
1422int
493b6e56 1423spl_kmem_cache_reap_active(void)
3ec34e55
BL
1424{
1425 return (0);
1426}
1427EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1428
e5b9b344
BB
1429/*
1430 * Reap all free slabs from all registered caches.
1431 */
1432void
1433spl_kmem_reap(void)
1434{
026e529c 1435 spl_kmem_cache_t *skc = NULL;
e5b9b344 1436
026e529c
MA
1437 down_read(&spl_kmem_cache_sem);
1438 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1439 spl_kmem_cache_reap_now(skc);
1440 }
1441 up_read(&spl_kmem_cache_sem);
e5b9b344
BB
1442}
1443EXPORT_SYMBOL(spl_kmem_reap);
1444
1445int
1446spl_kmem_cache_init(void)
1447{
1448 init_rwsem(&spl_kmem_cache_sem);
1449 INIT_LIST_HEAD(&spl_kmem_cache_list);
1450 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
9dc5ffbe 1451 spl_kmem_cache_kmem_threads, maxclsyspri,
3c82160f
BB
1452 spl_kmem_cache_kmem_threads * 8, INT_MAX,
1453 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
e5b9b344 1454
380b0809
RY
1455 if (spl_kmem_cache_taskq == NULL)
1456 return (-ENOMEM);
1457
e5b9b344
BB
1458 return (0);
1459}
1460
1461void
1462spl_kmem_cache_fini(void)
1463{
e5b9b344
BB
1464 taskq_destroy(spl_kmem_cache_taskq);
1465}