]> git.proxmox.com Git - mirror_zfs.git/blob - module/os/linux/spl/spl-kmem-cache.c
a2920c746672d16209adaf8bd4e2bb1e153c37e5
[mirror_zfs.git] / module / os / linux / spl / spl-kmem-cache.c
1 /*
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 *
10 * The SPL is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * The SPL is distributed in the hope that it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <linux/percpu_compat.h>
25 #include <sys/kmem.h>
26 #include <sys/kmem_cache.h>
27 #include <sys/taskq.h>
28 #include <sys/timer.h>
29 #include <sys/vmem.h>
30 #include <sys/wait.h>
31 #include <linux/slab.h>
32 #include <linux/swap.h>
33 #include <linux/prefetch.h>
34
35 /*
36 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
37 * are removed to allow access to the real Linux slab allocator.
38 */
39 #undef kmem_cache_destroy
40 #undef kmem_cache_create
41 #undef kmem_cache_alloc
42 #undef kmem_cache_free
43
44
45 /*
46 * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}()
47 * with smp_mb__{before,after}_atomic() because they were redundant. This is
48 * only used inside our SLAB allocator, so we implement an internal wrapper
49 * here to give us smp_mb__{before,after}_atomic() on older kernels.
50 */
51 #ifndef smp_mb__before_atomic
52 #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x)
53 #endif
54
55 #ifndef smp_mb__after_atomic
56 #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x)
57 #endif
58
59 /* BEGIN CSTYLED */
60 /*
61 * Cache magazines are an optimization designed to minimize the cost of
62 * allocating memory. They do this by keeping a per-cpu cache of recently
63 * freed objects, which can then be reallocated without taking a lock. This
64 * can improve performance on highly contended caches. However, because
65 * objects in magazines will prevent otherwise empty slabs from being
66 * immediately released this may not be ideal for low memory machines.
67 *
68 * For this reason spl_kmem_cache_magazine_size can be used to set a maximum
69 * magazine size. When this value is set to 0 the magazine size will be
70 * automatically determined based on the object size. Otherwise magazines
71 * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines
72 * may never be entirely disabled in this implementation.
73 */
74 static unsigned int spl_kmem_cache_magazine_size = 0;
75 module_param(spl_kmem_cache_magazine_size, uint, 0444);
76 MODULE_PARM_DESC(spl_kmem_cache_magazine_size,
77 "Default magazine size (2-256), set automatically (0)");
78
79 static unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
80 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
81 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
82
83 static unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE;
84 module_param(spl_kmem_cache_max_size, uint, 0644);
85 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
86
87 /*
88 * For small objects the Linux slab allocator should be used to make the most
89 * efficient use of the memory. However, large objects are not supported by
90 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
91 * of 16K was determined to be optimal for architectures using 4K pages and
92 * to also work well on architecutres using larger 64K page sizes.
93 */
94 static unsigned int spl_kmem_cache_slab_limit = 16384;
95 module_param(spl_kmem_cache_slab_limit, uint, 0644);
96 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
97 "Objects less than N bytes use the Linux slab");
98
99 /*
100 * The number of threads available to allocate new slabs for caches. This
101 * should not need to be tuned but it is available for performance analysis.
102 */
103 static unsigned int spl_kmem_cache_kmem_threads = 4;
104 module_param(spl_kmem_cache_kmem_threads, uint, 0444);
105 MODULE_PARM_DESC(spl_kmem_cache_kmem_threads,
106 "Number of spl_kmem_cache threads");
107 /* END CSTYLED */
108
109 /*
110 * Slab allocation interfaces
111 *
112 * While the Linux slab implementation was inspired by the Solaris
113 * implementation I cannot use it to emulate the Solaris APIs. I
114 * require two features which are not provided by the Linux slab.
115 *
116 * 1) Constructors AND destructors. Recent versions of the Linux
117 * kernel have removed support for destructors. This is a deal
118 * breaker for the SPL which contains particularly expensive
119 * initializers for mutex's, condition variables, etc. We also
120 * require a minimal level of cleanup for these data types unlike
121 * many Linux data types which do need to be explicitly destroyed.
122 *
123 * 2) Virtual address space backed slab. Callers of the Solaris slab
124 * expect it to work well for both small are very large allocations.
125 * Because of memory fragmentation the Linux slab which is backed
126 * by kmalloc'ed memory performs very badly when confronted with
127 * large numbers of large allocations. Basing the slab on the
128 * virtual address space removes the need for contiguous pages
129 * and greatly improve performance for large allocations.
130 *
131 * For these reasons, the SPL has its own slab implementation with
132 * the needed features. It is not as highly optimized as either the
133 * Solaris or Linux slabs, but it should get me most of what is
134 * needed until it can be optimized or obsoleted by another approach.
135 *
136 * One serious concern I do have about this method is the relatively
137 * small virtual address space on 32bit arches. This will seriously
138 * constrain the size of the slab caches and their performance.
139 */
140
141 struct list_head spl_kmem_cache_list; /* List of caches */
142 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
143 static taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */
144
145 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
146
147 static void *
148 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
149 {
150 gfp_t lflags = kmem_flags_convert(flags);
151 void *ptr;
152
153 ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM);
154
155 /* Resulting allocated memory will be page aligned */
156 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
157
158 return (ptr);
159 }
160
161 static void
162 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
163 {
164 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
165
166 /*
167 * The Linux direct reclaim path uses this out of band value to
168 * determine if forward progress is being made. Normally this is
169 * incremented by kmem_freepages() which is part of the various
170 * Linux slab implementations. However, since we are using none
171 * of that infrastructure we are responsible for incrementing it.
172 */
173 if (current->reclaim_state)
174 #ifdef HAVE_RECLAIM_STATE_RECLAIMED
175 current->reclaim_state->reclaimed += size >> PAGE_SHIFT;
176 #else
177 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
178 #endif
179 vfree(ptr);
180 }
181
182 /*
183 * Required space for each aligned sks.
184 */
185 static inline uint32_t
186 spl_sks_size(spl_kmem_cache_t *skc)
187 {
188 return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t),
189 skc->skc_obj_align, uint32_t));
190 }
191
192 /*
193 * Required space for each aligned object.
194 */
195 static inline uint32_t
196 spl_obj_size(spl_kmem_cache_t *skc)
197 {
198 uint32_t align = skc->skc_obj_align;
199
200 return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
201 P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t));
202 }
203
204 uint64_t
205 spl_kmem_cache_inuse(kmem_cache_t *cache)
206 {
207 return (cache->skc_obj_total);
208 }
209 EXPORT_SYMBOL(spl_kmem_cache_inuse);
210
211 uint64_t
212 spl_kmem_cache_entry_size(kmem_cache_t *cache)
213 {
214 return (cache->skc_obj_size);
215 }
216 EXPORT_SYMBOL(spl_kmem_cache_entry_size);
217
218 /*
219 * Lookup the spl_kmem_object_t for an object given that object.
220 */
221 static inline spl_kmem_obj_t *
222 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
223 {
224 return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
225 skc->skc_obj_align, uint32_t));
226 }
227
228 /*
229 * It's important that we pack the spl_kmem_obj_t structure and the
230 * actual objects in to one large address space to minimize the number
231 * of calls to the allocator. It is far better to do a few large
232 * allocations and then subdivide it ourselves. Now which allocator
233 * we use requires balancing a few trade offs.
234 *
235 * For small objects we use kmem_alloc() because as long as you are
236 * only requesting a small number of pages (ideally just one) its cheap.
237 * However, when you start requesting multiple pages with kmem_alloc()
238 * it gets increasingly expensive since it requires contiguous pages.
239 * For this reason we shift to vmem_alloc() for slabs of large objects
240 * which removes the need for contiguous pages. We do not use
241 * vmem_alloc() in all cases because there is significant locking
242 * overhead in __get_vm_area_node(). This function takes a single
243 * global lock when acquiring an available virtual address range which
244 * serializes all vmem_alloc()'s for all slab caches. Using slightly
245 * different allocation functions for small and large objects should
246 * give us the best of both worlds.
247 *
248 * +------------------------+
249 * | spl_kmem_slab_t --+-+ |
250 * | skc_obj_size <-+ | |
251 * | spl_kmem_obj_t | |
252 * | skc_obj_size <---+ |
253 * | spl_kmem_obj_t | |
254 * | ... v |
255 * +------------------------+
256 */
257 static spl_kmem_slab_t *
258 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
259 {
260 spl_kmem_slab_t *sks;
261 void *base;
262 uint32_t obj_size;
263
264 base = kv_alloc(skc, skc->skc_slab_size, flags);
265 if (base == NULL)
266 return (NULL);
267
268 sks = (spl_kmem_slab_t *)base;
269 sks->sks_magic = SKS_MAGIC;
270 sks->sks_objs = skc->skc_slab_objs;
271 sks->sks_age = jiffies;
272 sks->sks_cache = skc;
273 INIT_LIST_HEAD(&sks->sks_list);
274 INIT_LIST_HEAD(&sks->sks_free_list);
275 sks->sks_ref = 0;
276 obj_size = spl_obj_size(skc);
277
278 for (int i = 0; i < sks->sks_objs; i++) {
279 void *obj = base + spl_sks_size(skc) + (i * obj_size);
280
281 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
282 spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj);
283 sko->sko_addr = obj;
284 sko->sko_magic = SKO_MAGIC;
285 sko->sko_slab = sks;
286 INIT_LIST_HEAD(&sko->sko_list);
287 list_add_tail(&sko->sko_list, &sks->sks_free_list);
288 }
289
290 return (sks);
291 }
292
293 /*
294 * Remove a slab from complete or partial list, it must be called with
295 * the 'skc->skc_lock' held but the actual free must be performed
296 * outside the lock to prevent deadlocking on vmem addresses.
297 */
298 static void
299 spl_slab_free(spl_kmem_slab_t *sks,
300 struct list_head *sks_list, struct list_head *sko_list)
301 {
302 spl_kmem_cache_t *skc;
303
304 ASSERT(sks->sks_magic == SKS_MAGIC);
305 ASSERT(sks->sks_ref == 0);
306
307 skc = sks->sks_cache;
308 ASSERT(skc->skc_magic == SKC_MAGIC);
309
310 /*
311 * Update slab/objects counters in the cache, then remove the
312 * slab from the skc->skc_partial_list. Finally add the slab
313 * and all its objects in to the private work lists where the
314 * destructors will be called and the memory freed to the system.
315 */
316 skc->skc_obj_total -= sks->sks_objs;
317 skc->skc_slab_total--;
318 list_del(&sks->sks_list);
319 list_add(&sks->sks_list, sks_list);
320 list_splice_init(&sks->sks_free_list, sko_list);
321 }
322
323 /*
324 * Reclaim empty slabs at the end of the partial list.
325 */
326 static void
327 spl_slab_reclaim(spl_kmem_cache_t *skc)
328 {
329 spl_kmem_slab_t *sks = NULL, *m = NULL;
330 spl_kmem_obj_t *sko = NULL, *n = NULL;
331 LIST_HEAD(sks_list);
332 LIST_HEAD(sko_list);
333
334 /*
335 * Empty slabs and objects must be moved to a private list so they
336 * can be safely freed outside the spin lock. All empty slabs are
337 * at the end of skc->skc_partial_list, therefore once a non-empty
338 * slab is found we can stop scanning.
339 */
340 spin_lock(&skc->skc_lock);
341 list_for_each_entry_safe_reverse(sks, m,
342 &skc->skc_partial_list, sks_list) {
343
344 if (sks->sks_ref > 0)
345 break;
346
347 spl_slab_free(sks, &sks_list, &sko_list);
348 }
349 spin_unlock(&skc->skc_lock);
350
351 /*
352 * The following two loops ensure all the object destructors are run,
353 * and the slabs themselves are freed. This is all done outside the
354 * skc->skc_lock since this allows the destructor to sleep, and
355 * allows us to perform a conditional reschedule when a freeing a
356 * large number of objects and slabs back to the system.
357 */
358
359 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
360 ASSERT(sko->sko_magic == SKO_MAGIC);
361 }
362
363 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
364 ASSERT(sks->sks_magic == SKS_MAGIC);
365 kv_free(skc, sks, skc->skc_slab_size);
366 }
367 }
368
369 static spl_kmem_emergency_t *
370 spl_emergency_search(struct rb_root *root, void *obj)
371 {
372 struct rb_node *node = root->rb_node;
373 spl_kmem_emergency_t *ske;
374 unsigned long address = (unsigned long)obj;
375
376 while (node) {
377 ske = container_of(node, spl_kmem_emergency_t, ske_node);
378
379 if (address < ske->ske_obj)
380 node = node->rb_left;
381 else if (address > ske->ske_obj)
382 node = node->rb_right;
383 else
384 return (ske);
385 }
386
387 return (NULL);
388 }
389
390 static int
391 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
392 {
393 struct rb_node **new = &(root->rb_node), *parent = NULL;
394 spl_kmem_emergency_t *ske_tmp;
395 unsigned long address = ske->ske_obj;
396
397 while (*new) {
398 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
399
400 parent = *new;
401 if (address < ske_tmp->ske_obj)
402 new = &((*new)->rb_left);
403 else if (address > ske_tmp->ske_obj)
404 new = &((*new)->rb_right);
405 else
406 return (0);
407 }
408
409 rb_link_node(&ske->ske_node, parent, new);
410 rb_insert_color(&ske->ske_node, root);
411
412 return (1);
413 }
414
415 /*
416 * Allocate a single emergency object and track it in a red black tree.
417 */
418 static int
419 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
420 {
421 gfp_t lflags = kmem_flags_convert(flags);
422 spl_kmem_emergency_t *ske;
423 int order = get_order(skc->skc_obj_size);
424 int empty;
425
426 /* Last chance use a partial slab if one now exists */
427 spin_lock(&skc->skc_lock);
428 empty = list_empty(&skc->skc_partial_list);
429 spin_unlock(&skc->skc_lock);
430 if (!empty)
431 return (-EEXIST);
432
433 ske = kmalloc(sizeof (*ske), lflags);
434 if (ske == NULL)
435 return (-ENOMEM);
436
437 ske->ske_obj = __get_free_pages(lflags, order);
438 if (ske->ske_obj == 0) {
439 kfree(ske);
440 return (-ENOMEM);
441 }
442
443 spin_lock(&skc->skc_lock);
444 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
445 if (likely(empty)) {
446 skc->skc_obj_total++;
447 skc->skc_obj_emergency++;
448 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
449 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
450 }
451 spin_unlock(&skc->skc_lock);
452
453 if (unlikely(!empty)) {
454 free_pages(ske->ske_obj, order);
455 kfree(ske);
456 return (-EINVAL);
457 }
458
459 *obj = (void *)ske->ske_obj;
460
461 return (0);
462 }
463
464 /*
465 * Locate the passed object in the red black tree and free it.
466 */
467 static int
468 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
469 {
470 spl_kmem_emergency_t *ske;
471 int order = get_order(skc->skc_obj_size);
472
473 spin_lock(&skc->skc_lock);
474 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
475 if (ske) {
476 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
477 skc->skc_obj_emergency--;
478 skc->skc_obj_total--;
479 }
480 spin_unlock(&skc->skc_lock);
481
482 if (ske == NULL)
483 return (-ENOENT);
484
485 free_pages(ske->ske_obj, order);
486 kfree(ske);
487
488 return (0);
489 }
490
491 /*
492 * Release objects from the per-cpu magazine back to their slab. The flush
493 * argument contains the max number of entries to remove from the magazine.
494 */
495 static void
496 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
497 {
498 spin_lock(&skc->skc_lock);
499
500 ASSERT(skc->skc_magic == SKC_MAGIC);
501 ASSERT(skm->skm_magic == SKM_MAGIC);
502
503 int count = MIN(flush, skm->skm_avail);
504 for (int i = 0; i < count; i++)
505 spl_cache_shrink(skc, skm->skm_objs[i]);
506
507 skm->skm_avail -= count;
508 memmove(skm->skm_objs, &(skm->skm_objs[count]),
509 sizeof (void *) * skm->skm_avail);
510
511 spin_unlock(&skc->skc_lock);
512 }
513
514 /*
515 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
516 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
517 * for very small objects we may end up with more than this so as not
518 * to waste space in the minimal allocation of a single page.
519 */
520 static int
521 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
522 {
523 uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs;
524
525 sks_size = spl_sks_size(skc);
526 obj_size = spl_obj_size(skc);
527 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
528 tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size);
529
530 if (tgt_size <= max_size) {
531 tgt_objs = (tgt_size - sks_size) / obj_size;
532 } else {
533 tgt_objs = (max_size - sks_size) / obj_size;
534 tgt_size = (tgt_objs * obj_size) + sks_size;
535 }
536
537 if (tgt_objs == 0)
538 return (-ENOSPC);
539
540 *objs = tgt_objs;
541 *size = tgt_size;
542
543 return (0);
544 }
545
546 /*
547 * Make a guess at reasonable per-cpu magazine size based on the size of
548 * each object and the cost of caching N of them in each magazine. Long
549 * term this should really adapt based on an observed usage heuristic.
550 */
551 static int
552 spl_magazine_size(spl_kmem_cache_t *skc)
553 {
554 uint32_t obj_size = spl_obj_size(skc);
555 int size;
556
557 if (spl_kmem_cache_magazine_size > 0)
558 return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2));
559
560 /* Per-magazine sizes below assume a 4Kib page size */
561 if (obj_size > (PAGE_SIZE * 256))
562 size = 4; /* Minimum 4Mib per-magazine */
563 else if (obj_size > (PAGE_SIZE * 32))
564 size = 16; /* Minimum 2Mib per-magazine */
565 else if (obj_size > (PAGE_SIZE))
566 size = 64; /* Minimum 256Kib per-magazine */
567 else if (obj_size > (PAGE_SIZE / 4))
568 size = 128; /* Minimum 128Kib per-magazine */
569 else
570 size = 256;
571
572 return (size);
573 }
574
575 /*
576 * Allocate a per-cpu magazine to associate with a specific core.
577 */
578 static spl_kmem_magazine_t *
579 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
580 {
581 spl_kmem_magazine_t *skm;
582 int size = sizeof (spl_kmem_magazine_t) +
583 sizeof (void *) * skc->skc_mag_size;
584
585 skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu));
586 if (skm) {
587 skm->skm_magic = SKM_MAGIC;
588 skm->skm_avail = 0;
589 skm->skm_size = skc->skc_mag_size;
590 skm->skm_refill = skc->skc_mag_refill;
591 skm->skm_cache = skc;
592 skm->skm_cpu = cpu;
593 }
594
595 return (skm);
596 }
597
598 /*
599 * Free a per-cpu magazine associated with a specific core.
600 */
601 static void
602 spl_magazine_free(spl_kmem_magazine_t *skm)
603 {
604 ASSERT(skm->skm_magic == SKM_MAGIC);
605 ASSERT(skm->skm_avail == 0);
606 kfree(skm);
607 }
608
609 /*
610 * Create all pre-cpu magazines of reasonable sizes.
611 */
612 static int
613 spl_magazine_create(spl_kmem_cache_t *skc)
614 {
615 int i = 0;
616
617 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
618
619 skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) *
620 num_possible_cpus(), kmem_flags_convert(KM_SLEEP));
621 skc->skc_mag_size = spl_magazine_size(skc);
622 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
623
624 for_each_possible_cpu(i) {
625 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
626 if (!skc->skc_mag[i]) {
627 for (i--; i >= 0; i--)
628 spl_magazine_free(skc->skc_mag[i]);
629
630 kfree(skc->skc_mag);
631 return (-ENOMEM);
632 }
633 }
634
635 return (0);
636 }
637
638 /*
639 * Destroy all pre-cpu magazines.
640 */
641 static void
642 spl_magazine_destroy(spl_kmem_cache_t *skc)
643 {
644 spl_kmem_magazine_t *skm;
645 int i = 0;
646
647 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
648
649 for_each_possible_cpu(i) {
650 skm = skc->skc_mag[i];
651 spl_cache_flush(skc, skm, skm->skm_avail);
652 spl_magazine_free(skm);
653 }
654
655 kfree(skc->skc_mag);
656 }
657
658 /*
659 * Create a object cache based on the following arguments:
660 * name cache name
661 * size cache object size
662 * align cache object alignment
663 * ctor cache object constructor
664 * dtor cache object destructor
665 * reclaim cache object reclaim
666 * priv cache private data for ctor/dtor/reclaim
667 * vmp unused must be NULL
668 * flags
669 * KMC_KVMEM Force kvmem backed SPL cache
670 * KMC_SLAB Force Linux slab backed cache
671 * KMC_NODEBUG Disable debugging (unsupported)
672 */
673 spl_kmem_cache_t *
674 spl_kmem_cache_create(const char *name, size_t size, size_t align,
675 spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim,
676 void *priv, void *vmp, int flags)
677 {
678 gfp_t lflags = kmem_flags_convert(KM_SLEEP);
679 spl_kmem_cache_t *skc;
680 int rc;
681
682 /*
683 * Unsupported flags
684 */
685 ASSERT(vmp == NULL);
686 ASSERT(reclaim == NULL);
687
688 might_sleep();
689
690 skc = kzalloc(sizeof (*skc), lflags);
691 if (skc == NULL)
692 return (NULL);
693
694 skc->skc_magic = SKC_MAGIC;
695 skc->skc_name_size = strlen(name) + 1;
696 skc->skc_name = kmalloc(skc->skc_name_size, lflags);
697 if (skc->skc_name == NULL) {
698 kfree(skc);
699 return (NULL);
700 }
701 strlcpy(skc->skc_name, name, skc->skc_name_size);
702
703 skc->skc_ctor = ctor;
704 skc->skc_dtor = dtor;
705 skc->skc_private = priv;
706 skc->skc_vmp = vmp;
707 skc->skc_linux_cache = NULL;
708 skc->skc_flags = flags;
709 skc->skc_obj_size = size;
710 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
711 atomic_set(&skc->skc_ref, 0);
712
713 INIT_LIST_HEAD(&skc->skc_list);
714 INIT_LIST_HEAD(&skc->skc_complete_list);
715 INIT_LIST_HEAD(&skc->skc_partial_list);
716 skc->skc_emergency_tree = RB_ROOT;
717 spin_lock_init(&skc->skc_lock);
718 init_waitqueue_head(&skc->skc_waitq);
719 skc->skc_slab_fail = 0;
720 skc->skc_slab_create = 0;
721 skc->skc_slab_destroy = 0;
722 skc->skc_slab_total = 0;
723 skc->skc_slab_alloc = 0;
724 skc->skc_slab_max = 0;
725 skc->skc_obj_total = 0;
726 skc->skc_obj_alloc = 0;
727 skc->skc_obj_max = 0;
728 skc->skc_obj_deadlock = 0;
729 skc->skc_obj_emergency = 0;
730 skc->skc_obj_emergency_max = 0;
731
732 rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0,
733 GFP_KERNEL);
734 if (rc != 0) {
735 kfree(skc);
736 return (NULL);
737 }
738
739 /*
740 * Verify the requested alignment restriction is sane.
741 */
742 if (align) {
743 VERIFY(ISP2(align));
744 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
745 VERIFY3U(align, <=, PAGE_SIZE);
746 skc->skc_obj_align = align;
747 }
748
749 /*
750 * When no specific type of slab is requested (kmem, vmem, or
751 * linuxslab) then select a cache type based on the object size
752 * and default tunables.
753 */
754 if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) {
755 if (spl_kmem_cache_slab_limit &&
756 size <= (size_t)spl_kmem_cache_slab_limit) {
757 /*
758 * Objects smaller than spl_kmem_cache_slab_limit can
759 * use the Linux slab for better space-efficiency.
760 */
761 skc->skc_flags |= KMC_SLAB;
762 } else {
763 /*
764 * All other objects are considered large and are
765 * placed on kvmem backed slabs.
766 */
767 skc->skc_flags |= KMC_KVMEM;
768 }
769 }
770
771 /*
772 * Given the type of slab allocate the required resources.
773 */
774 if (skc->skc_flags & KMC_KVMEM) {
775 rc = spl_slab_size(skc,
776 &skc->skc_slab_objs, &skc->skc_slab_size);
777 if (rc)
778 goto out;
779
780 rc = spl_magazine_create(skc);
781 if (rc)
782 goto out;
783 } else {
784 unsigned long slabflags = 0;
785
786 if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE))
787 goto out;
788
789 #if defined(SLAB_USERCOPY)
790 /*
791 * Required for PAX-enabled kernels if the slab is to be
792 * used for copying between user and kernel space.
793 */
794 slabflags |= SLAB_USERCOPY;
795 #endif
796
797 #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY)
798 /*
799 * Newer grsec patchset uses kmem_cache_create_usercopy()
800 * instead of SLAB_USERCOPY flag
801 */
802 skc->skc_linux_cache = kmem_cache_create_usercopy(
803 skc->skc_name, size, align, slabflags, 0, size, NULL);
804 #else
805 skc->skc_linux_cache = kmem_cache_create(
806 skc->skc_name, size, align, slabflags, NULL);
807 #endif
808 if (skc->skc_linux_cache == NULL)
809 goto out;
810 }
811
812 down_write(&spl_kmem_cache_sem);
813 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
814 up_write(&spl_kmem_cache_sem);
815
816 return (skc);
817 out:
818 kfree(skc->skc_name);
819 percpu_counter_destroy(&skc->skc_linux_alloc);
820 kfree(skc);
821 return (NULL);
822 }
823 EXPORT_SYMBOL(spl_kmem_cache_create);
824
825 /*
826 * Register a move callback for cache defragmentation.
827 * XXX: Unimplemented but harmless to stub out for now.
828 */
829 void
830 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
831 kmem_cbrc_t (move)(void *, void *, size_t, void *))
832 {
833 ASSERT(move != NULL);
834 }
835 EXPORT_SYMBOL(spl_kmem_cache_set_move);
836
837 /*
838 * Destroy a cache and all objects associated with the cache.
839 */
840 void
841 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
842 {
843 DECLARE_WAIT_QUEUE_HEAD(wq);
844 taskqid_t id;
845
846 ASSERT(skc->skc_magic == SKC_MAGIC);
847 ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB));
848
849 down_write(&spl_kmem_cache_sem);
850 list_del_init(&skc->skc_list);
851 up_write(&spl_kmem_cache_sem);
852
853 /* Cancel any and wait for any pending delayed tasks */
854 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
855
856 spin_lock(&skc->skc_lock);
857 id = skc->skc_taskqid;
858 spin_unlock(&skc->skc_lock);
859
860 taskq_cancel_id(spl_kmem_cache_taskq, id);
861
862 /*
863 * Wait until all current callers complete, this is mainly
864 * to catch the case where a low memory situation triggers a
865 * cache reaping action which races with this destroy.
866 */
867 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
868
869 if (skc->skc_flags & KMC_KVMEM) {
870 spl_magazine_destroy(skc);
871 spl_slab_reclaim(skc);
872 } else {
873 ASSERT(skc->skc_flags & KMC_SLAB);
874 kmem_cache_destroy(skc->skc_linux_cache);
875 }
876
877 spin_lock(&skc->skc_lock);
878
879 /*
880 * Validate there are no objects in use and free all the
881 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers.
882 */
883 ASSERT3U(skc->skc_slab_alloc, ==, 0);
884 ASSERT3U(skc->skc_obj_alloc, ==, 0);
885 ASSERT3U(skc->skc_slab_total, ==, 0);
886 ASSERT3U(skc->skc_obj_total, ==, 0);
887 ASSERT3U(skc->skc_obj_emergency, ==, 0);
888 ASSERT(list_empty(&skc->skc_complete_list));
889
890 ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0);
891 percpu_counter_destroy(&skc->skc_linux_alloc);
892
893 spin_unlock(&skc->skc_lock);
894
895 kfree(skc->skc_name);
896 kfree(skc);
897 }
898 EXPORT_SYMBOL(spl_kmem_cache_destroy);
899
900 /*
901 * Allocate an object from a slab attached to the cache. This is used to
902 * repopulate the per-cpu magazine caches in batches when they run low.
903 */
904 static void *
905 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
906 {
907 spl_kmem_obj_t *sko;
908
909 ASSERT(skc->skc_magic == SKC_MAGIC);
910 ASSERT(sks->sks_magic == SKS_MAGIC);
911
912 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
913 ASSERT(sko->sko_magic == SKO_MAGIC);
914 ASSERT(sko->sko_addr != NULL);
915
916 /* Remove from sks_free_list */
917 list_del_init(&sko->sko_list);
918
919 sks->sks_age = jiffies;
920 sks->sks_ref++;
921 skc->skc_obj_alloc++;
922
923 /* Track max obj usage statistics */
924 if (skc->skc_obj_alloc > skc->skc_obj_max)
925 skc->skc_obj_max = skc->skc_obj_alloc;
926
927 /* Track max slab usage statistics */
928 if (sks->sks_ref == 1) {
929 skc->skc_slab_alloc++;
930
931 if (skc->skc_slab_alloc > skc->skc_slab_max)
932 skc->skc_slab_max = skc->skc_slab_alloc;
933 }
934
935 return (sko->sko_addr);
936 }
937
938 /*
939 * Generic slab allocation function to run by the global work queues.
940 * It is responsible for allocating a new slab, linking it in to the list
941 * of partial slabs, and then waking any waiters.
942 */
943 static int
944 __spl_cache_grow(spl_kmem_cache_t *skc, int flags)
945 {
946 spl_kmem_slab_t *sks;
947
948 fstrans_cookie_t cookie = spl_fstrans_mark();
949 sks = spl_slab_alloc(skc, flags);
950 spl_fstrans_unmark(cookie);
951
952 spin_lock(&skc->skc_lock);
953 if (sks) {
954 skc->skc_slab_total++;
955 skc->skc_obj_total += sks->sks_objs;
956 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
957
958 smp_mb__before_atomic();
959 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
960 smp_mb__after_atomic();
961 }
962 spin_unlock(&skc->skc_lock);
963
964 return (sks == NULL ? -ENOMEM : 0);
965 }
966
967 static void
968 spl_cache_grow_work(void *data)
969 {
970 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
971 spl_kmem_cache_t *skc = ska->ska_cache;
972
973 int error = __spl_cache_grow(skc, ska->ska_flags);
974
975 atomic_dec(&skc->skc_ref);
976 smp_mb__before_atomic();
977 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
978 smp_mb__after_atomic();
979 if (error == 0)
980 wake_up_all(&skc->skc_waitq);
981
982 kfree(ska);
983 }
984
985 /*
986 * Returns non-zero when a new slab should be available.
987 */
988 static int
989 spl_cache_grow_wait(spl_kmem_cache_t *skc)
990 {
991 return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags));
992 }
993
994 /*
995 * No available objects on any slabs, create a new slab. Note that this
996 * functionality is disabled for KMC_SLAB caches which are backed by the
997 * Linux slab.
998 */
999 static int
1000 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1001 {
1002 int remaining, rc = 0;
1003
1004 ASSERT0(flags & ~KM_PUBLIC_MASK);
1005 ASSERT(skc->skc_magic == SKC_MAGIC);
1006 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1007
1008 *obj = NULL;
1009
1010 /*
1011 * Since we can't sleep attempt an emergency allocation to satisfy
1012 * the request. The only alterative is to fail the allocation but
1013 * it's preferable try. The use of KM_NOSLEEP is expected to be rare.
1014 */
1015 if (flags & KM_NOSLEEP)
1016 return (spl_emergency_alloc(skc, flags, obj));
1017
1018 might_sleep();
1019
1020 /*
1021 * Before allocating a new slab wait for any reaping to complete and
1022 * then return so the local magazine can be rechecked for new objects.
1023 */
1024 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1025 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1026 TASK_UNINTERRUPTIBLE);
1027 return (rc ? rc : -EAGAIN);
1028 }
1029
1030 /*
1031 * Note: It would be nice to reduce the overhead of context switch
1032 * and improve NUMA locality, by trying to allocate a new slab in the
1033 * current process context with KM_NOSLEEP flag.
1034 *
1035 * However, this can't be applied to vmem/kvmem due to a bug that
1036 * spl_vmalloc() doesn't honor gfp flags in page table allocation.
1037 */
1038
1039 /*
1040 * This is handled by dispatching a work request to the global work
1041 * queue. This allows us to asynchronously allocate a new slab while
1042 * retaining the ability to safely fall back to a smaller synchronous
1043 * allocations to ensure forward progress is always maintained.
1044 */
1045 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1046 spl_kmem_alloc_t *ska;
1047
1048 ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags));
1049 if (ska == NULL) {
1050 clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags);
1051 smp_mb__after_atomic();
1052 wake_up_all(&skc->skc_waitq);
1053 return (-ENOMEM);
1054 }
1055
1056 atomic_inc(&skc->skc_ref);
1057 ska->ska_cache = skc;
1058 ska->ska_flags = flags;
1059 taskq_init_ent(&ska->ska_tqe);
1060 taskq_dispatch_ent(spl_kmem_cache_taskq,
1061 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1062 }
1063
1064 /*
1065 * The goal here is to only detect the rare case where a virtual slab
1066 * allocation has deadlocked. We must be careful to minimize the use
1067 * of emergency objects which are more expensive to track. Therefore,
1068 * we set a very long timeout for the asynchronous allocation and if
1069 * the timeout is reached the cache is flagged as deadlocked. From
1070 * this point only new emergency objects will be allocated until the
1071 * asynchronous allocation completes and clears the deadlocked flag.
1072 */
1073 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1074 rc = spl_emergency_alloc(skc, flags, obj);
1075 } else {
1076 remaining = wait_event_timeout(skc->skc_waitq,
1077 spl_cache_grow_wait(skc), HZ / 10);
1078
1079 if (!remaining) {
1080 spin_lock(&skc->skc_lock);
1081 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1082 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1083 skc->skc_obj_deadlock++;
1084 }
1085 spin_unlock(&skc->skc_lock);
1086 }
1087
1088 rc = -ENOMEM;
1089 }
1090
1091 return (rc);
1092 }
1093
1094 /*
1095 * Refill a per-cpu magazine with objects from the slabs for this cache.
1096 * Ideally the magazine can be repopulated using existing objects which have
1097 * been released, however if we are unable to locate enough free objects new
1098 * slabs of objects will be created. On success NULL is returned, otherwise
1099 * the address of a single emergency object is returned for use by the caller.
1100 */
1101 static void *
1102 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1103 {
1104 spl_kmem_slab_t *sks;
1105 int count = 0, rc, refill;
1106 void *obj = NULL;
1107
1108 ASSERT(skc->skc_magic == SKC_MAGIC);
1109 ASSERT(skm->skm_magic == SKM_MAGIC);
1110
1111 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1112 spin_lock(&skc->skc_lock);
1113
1114 while (refill > 0) {
1115 /* No slabs available we may need to grow the cache */
1116 if (list_empty(&skc->skc_partial_list)) {
1117 spin_unlock(&skc->skc_lock);
1118
1119 local_irq_enable();
1120 rc = spl_cache_grow(skc, flags, &obj);
1121 local_irq_disable();
1122
1123 /* Emergency object for immediate use by caller */
1124 if (rc == 0 && obj != NULL)
1125 return (obj);
1126
1127 if (rc)
1128 goto out;
1129
1130 /* Rescheduled to different CPU skm is not local */
1131 if (skm != skc->skc_mag[smp_processor_id()])
1132 goto out;
1133
1134 /*
1135 * Potentially rescheduled to the same CPU but
1136 * allocations may have occurred from this CPU while
1137 * we were sleeping so recalculate max refill.
1138 */
1139 refill = MIN(refill, skm->skm_size - skm->skm_avail);
1140
1141 spin_lock(&skc->skc_lock);
1142 continue;
1143 }
1144
1145 /* Grab the next available slab */
1146 sks = list_entry((&skc->skc_partial_list)->next,
1147 spl_kmem_slab_t, sks_list);
1148 ASSERT(sks->sks_magic == SKS_MAGIC);
1149 ASSERT(sks->sks_ref < sks->sks_objs);
1150 ASSERT(!list_empty(&sks->sks_free_list));
1151
1152 /*
1153 * Consume as many objects as needed to refill the requested
1154 * cache. We must also be careful not to overfill it.
1155 */
1156 while (sks->sks_ref < sks->sks_objs && refill-- > 0 &&
1157 ++count) {
1158 ASSERT(skm->skm_avail < skm->skm_size);
1159 ASSERT(count < skm->skm_size);
1160 skm->skm_objs[skm->skm_avail++] =
1161 spl_cache_obj(skc, sks);
1162 }
1163
1164 /* Move slab to skc_complete_list when full */
1165 if (sks->sks_ref == sks->sks_objs) {
1166 list_del(&sks->sks_list);
1167 list_add(&sks->sks_list, &skc->skc_complete_list);
1168 }
1169 }
1170
1171 spin_unlock(&skc->skc_lock);
1172 out:
1173 return (NULL);
1174 }
1175
1176 /*
1177 * Release an object back to the slab from which it came.
1178 */
1179 static void
1180 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1181 {
1182 spl_kmem_slab_t *sks = NULL;
1183 spl_kmem_obj_t *sko = NULL;
1184
1185 ASSERT(skc->skc_magic == SKC_MAGIC);
1186
1187 sko = spl_sko_from_obj(skc, obj);
1188 ASSERT(sko->sko_magic == SKO_MAGIC);
1189 sks = sko->sko_slab;
1190 ASSERT(sks->sks_magic == SKS_MAGIC);
1191 ASSERT(sks->sks_cache == skc);
1192 list_add(&sko->sko_list, &sks->sks_free_list);
1193
1194 sks->sks_age = jiffies;
1195 sks->sks_ref--;
1196 skc->skc_obj_alloc--;
1197
1198 /*
1199 * Move slab to skc_partial_list when no longer full. Slabs
1200 * are added to the head to keep the partial list is quasi-full
1201 * sorted order. Fuller at the head, emptier at the tail.
1202 */
1203 if (sks->sks_ref == (sks->sks_objs - 1)) {
1204 list_del(&sks->sks_list);
1205 list_add(&sks->sks_list, &skc->skc_partial_list);
1206 }
1207
1208 /*
1209 * Move empty slabs to the end of the partial list so
1210 * they can be easily found and freed during reclamation.
1211 */
1212 if (sks->sks_ref == 0) {
1213 list_del(&sks->sks_list);
1214 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1215 skc->skc_slab_alloc--;
1216 }
1217 }
1218
1219 /*
1220 * Allocate an object from the per-cpu magazine, or if the magazine
1221 * is empty directly allocate from a slab and repopulate the magazine.
1222 */
1223 void *
1224 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1225 {
1226 spl_kmem_magazine_t *skm;
1227 void *obj = NULL;
1228
1229 ASSERT0(flags & ~KM_PUBLIC_MASK);
1230 ASSERT(skc->skc_magic == SKC_MAGIC);
1231 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1232
1233 /*
1234 * Allocate directly from a Linux slab. All optimizations are left
1235 * to the underlying cache we only need to guarantee that KM_SLEEP
1236 * callers will never fail.
1237 */
1238 if (skc->skc_flags & KMC_SLAB) {
1239 struct kmem_cache *slc = skc->skc_linux_cache;
1240 do {
1241 obj = kmem_cache_alloc(slc, kmem_flags_convert(flags));
1242 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
1243
1244 if (obj != NULL) {
1245 /*
1246 * Even though we leave everything up to the
1247 * underlying cache we still keep track of
1248 * how many objects we've allocated in it for
1249 * better debuggability.
1250 */
1251 percpu_counter_inc(&skc->skc_linux_alloc);
1252 }
1253 goto ret;
1254 }
1255
1256 local_irq_disable();
1257
1258 restart:
1259 /*
1260 * Safe to update per-cpu structure without lock, but
1261 * in the restart case we must be careful to reacquire
1262 * the local magazine since this may have changed
1263 * when we need to grow the cache.
1264 */
1265 skm = skc->skc_mag[smp_processor_id()];
1266 ASSERT(skm->skm_magic == SKM_MAGIC);
1267
1268 if (likely(skm->skm_avail)) {
1269 /* Object available in CPU cache, use it */
1270 obj = skm->skm_objs[--skm->skm_avail];
1271 } else {
1272 obj = spl_cache_refill(skc, skm, flags);
1273 if ((obj == NULL) && !(flags & KM_NOSLEEP))
1274 goto restart;
1275
1276 local_irq_enable();
1277 goto ret;
1278 }
1279
1280 local_irq_enable();
1281 ASSERT(obj);
1282 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1283
1284 ret:
1285 /* Pre-emptively migrate object to CPU L1 cache */
1286 if (obj) {
1287 if (obj && skc->skc_ctor)
1288 skc->skc_ctor(obj, skc->skc_private, flags);
1289 else
1290 prefetchw(obj);
1291 }
1292
1293 return (obj);
1294 }
1295 EXPORT_SYMBOL(spl_kmem_cache_alloc);
1296
1297 /*
1298 * Free an object back to the local per-cpu magazine, there is no
1299 * guarantee that this is the same magazine the object was originally
1300 * allocated from. We may need to flush entire from the magazine
1301 * back to the slabs to make space.
1302 */
1303 void
1304 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1305 {
1306 spl_kmem_magazine_t *skm;
1307 unsigned long flags;
1308 int do_reclaim = 0;
1309 int do_emergency = 0;
1310
1311 ASSERT(skc->skc_magic == SKC_MAGIC);
1312 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1313
1314 /*
1315 * Run the destructor
1316 */
1317 if (skc->skc_dtor)
1318 skc->skc_dtor(obj, skc->skc_private);
1319
1320 /*
1321 * Free the object from the Linux underlying Linux slab.
1322 */
1323 if (skc->skc_flags & KMC_SLAB) {
1324 kmem_cache_free(skc->skc_linux_cache, obj);
1325 percpu_counter_dec(&skc->skc_linux_alloc);
1326 return;
1327 }
1328
1329 /*
1330 * While a cache has outstanding emergency objects all freed objects
1331 * must be checked. However, since emergency objects will never use
1332 * a virtual address these objects can be safely excluded as an
1333 * optimization.
1334 */
1335 if (!is_vmalloc_addr(obj)) {
1336 spin_lock(&skc->skc_lock);
1337 do_emergency = (skc->skc_obj_emergency > 0);
1338 spin_unlock(&skc->skc_lock);
1339
1340 if (do_emergency && (spl_emergency_free(skc, obj) == 0))
1341 return;
1342 }
1343
1344 local_irq_save(flags);
1345
1346 /*
1347 * Safe to update per-cpu structure without lock, but
1348 * no remote memory allocation tracking is being performed
1349 * it is entirely possible to allocate an object from one
1350 * CPU cache and return it to another.
1351 */
1352 skm = skc->skc_mag[smp_processor_id()];
1353 ASSERT(skm->skm_magic == SKM_MAGIC);
1354
1355 /*
1356 * Per-CPU cache full, flush it to make space for this object,
1357 * this may result in an empty slab which can be reclaimed once
1358 * interrupts are re-enabled.
1359 */
1360 if (unlikely(skm->skm_avail >= skm->skm_size)) {
1361 spl_cache_flush(skc, skm, skm->skm_refill);
1362 do_reclaim = 1;
1363 }
1364
1365 /* Available space in cache, use it */
1366 skm->skm_objs[skm->skm_avail++] = obj;
1367
1368 local_irq_restore(flags);
1369
1370 if (do_reclaim)
1371 spl_slab_reclaim(skc);
1372 }
1373 EXPORT_SYMBOL(spl_kmem_cache_free);
1374
1375 /*
1376 * Depending on how many and which objects are released it may simply
1377 * repopulate the local magazine which will then need to age-out. Objects
1378 * which cannot fit in the magazine will be released back to their slabs
1379 * which will also need to age out before being released. This is all just
1380 * best effort and we do not want to thrash creating and destroying slabs.
1381 */
1382 void
1383 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
1384 {
1385 ASSERT(skc->skc_magic == SKC_MAGIC);
1386 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1387
1388 if (skc->skc_flags & KMC_SLAB)
1389 return;
1390
1391 atomic_inc(&skc->skc_ref);
1392
1393 /*
1394 * Prevent concurrent cache reaping when contended.
1395 */
1396 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
1397 goto out;
1398
1399 /* Reclaim from the magazine and free all now empty slabs. */
1400 unsigned long irq_flags;
1401 local_irq_save(irq_flags);
1402 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1403 spl_cache_flush(skc, skm, skm->skm_avail);
1404 local_irq_restore(irq_flags);
1405
1406 spl_slab_reclaim(skc);
1407 clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags);
1408 smp_mb__after_atomic();
1409 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
1410 out:
1411 atomic_dec(&skc->skc_ref);
1412 }
1413 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
1414
1415 /*
1416 * This is stubbed out for code consistency with other platforms. There
1417 * is existing logic to prevent concurrent reaping so while this is ugly
1418 * it should do no harm.
1419 */
1420 int
1421 spl_kmem_cache_reap_active(void)
1422 {
1423 return (0);
1424 }
1425 EXPORT_SYMBOL(spl_kmem_cache_reap_active);
1426
1427 /*
1428 * Reap all free slabs from all registered caches.
1429 */
1430 void
1431 spl_kmem_reap(void)
1432 {
1433 spl_kmem_cache_t *skc = NULL;
1434
1435 down_read(&spl_kmem_cache_sem);
1436 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1437 spl_kmem_cache_reap_now(skc);
1438 }
1439 up_read(&spl_kmem_cache_sem);
1440 }
1441 EXPORT_SYMBOL(spl_kmem_reap);
1442
1443 int
1444 spl_kmem_cache_init(void)
1445 {
1446 init_rwsem(&spl_kmem_cache_sem);
1447 INIT_LIST_HEAD(&spl_kmem_cache_list);
1448 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
1449 spl_kmem_cache_kmem_threads, maxclsyspri,
1450 spl_kmem_cache_kmem_threads * 8, INT_MAX,
1451 TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
1452
1453 if (spl_kmem_cache_taskq == NULL)
1454 return (-ENOMEM);
1455
1456 return (0);
1457 }
1458
1459 void
1460 spl_kmem_cache_fini(void)
1461 {
1462 taskq_destroy(spl_kmem_cache_taskq);
1463 }