1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25 \*****************************************************************************/
28 #include <spl-debug.h>
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
34 #define SS_DEBUG_SUBSYS SS_KMEM
37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
38 * are removed to allow access to the real Linux slab allocator.
40 #undef kmem_cache_destroy
41 #undef kmem_cache_create
42 #undef kmem_cache_alloc
43 #undef kmem_cache_free
47 * Cache expiration was implemented because it was part of the default Solaris
48 * kmem_cache behavior. The idea is that per-cpu objects which haven't been
49 * accessed in several seconds should be returned to the cache. On the other
50 * hand Linux slabs never move objects back to the slabs unless there is
51 * memory pressure on the system. By default the Linux method is enabled
52 * because it has been shown to improve responsiveness on low memory systems.
53 * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
55 unsigned int spl_kmem_cache_expire
= KMC_EXPIRE_MEM
;
56 EXPORT_SYMBOL(spl_kmem_cache_expire
);
57 module_param(spl_kmem_cache_expire
, uint
, 0644);
58 MODULE_PARM_DESC(spl_kmem_cache_expire
, "By age (0x1) or low memory (0x2)");
61 * The default behavior is to report the number of objects remaining in the
62 * cache. This allows the Linux VM to repeatedly reclaim objects from the
63 * cache when memory is low satisfy other memory allocations. Alternately,
64 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
65 * is reclaimed. This may increase the likelihood of out of memory events.
67 unsigned int spl_kmem_cache_reclaim
= 0;
68 module_param(spl_kmem_cache_reclaim
, uint
, 0644);
69 MODULE_PARM_DESC(spl_kmem_cache_reclaim
, "Single reclaim pass (0x1)");
71 unsigned int spl_kmem_cache_obj_per_slab
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
72 module_param(spl_kmem_cache_obj_per_slab
, uint
, 0644);
73 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab
, "Number of objects per slab");
75 unsigned int spl_kmem_cache_obj_per_slab_min
= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
;
76 module_param(spl_kmem_cache_obj_per_slab_min
, uint
, 0644);
77 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min
,
78 "Minimal number of objects per slab");
80 unsigned int spl_kmem_cache_max_size
= 32;
81 module_param(spl_kmem_cache_max_size
, uint
, 0644);
82 MODULE_PARM_DESC(spl_kmem_cache_max_size
, "Maximum size of slab in MB");
85 * For small objects the Linux slab allocator should be used to make the most
86 * efficient use of the memory. However, large objects are not supported by
87 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
88 * of 16K was determined to be optimal for architectures using 4K pages.
91 unsigned int spl_kmem_cache_slab_limit
= 16384;
93 unsigned int spl_kmem_cache_slab_limit
= 0;
95 module_param(spl_kmem_cache_slab_limit
, uint
, 0644);
96 MODULE_PARM_DESC(spl_kmem_cache_slab_limit
,
97 "Objects less than N bytes use the Linux slab");
99 unsigned int spl_kmem_cache_kmem_limit
= (PAGE_SIZE
/ 4);
100 module_param(spl_kmem_cache_kmem_limit
, uint
, 0644);
101 MODULE_PARM_DESC(spl_kmem_cache_kmem_limit
,
102 "Objects less than N bytes use the kmalloc");
105 * The minimum amount of memory measured in pages to be free at all
106 * times on the system. This is similar to Linux's zone->pages_min
107 * multiplied by the number of zones and is sized based on that.
110 EXPORT_SYMBOL(minfree
);
113 * The desired amount of memory measured in pages to be free at all
114 * times on the system. This is similar to Linux's zone->pages_low
115 * multiplied by the number of zones and is sized based on that.
116 * Assuming all zones are being used roughly equally, when we drop
117 * below this threshold asynchronous page reclamation is triggered.
120 EXPORT_SYMBOL(desfree
);
123 * When above this amount of memory measures in pages the system is
124 * determined to have enough free memory. This is similar to Linux's
125 * zone->pages_high multiplied by the number of zones and is sized based
126 * on that. Assuming all zones are being used roughly equally, when
127 * asynchronous page reclamation reaches this threshold it stops.
129 pgcnt_t lotsfree
= 0;
130 EXPORT_SYMBOL(lotsfree
);
132 /* Unused always 0 in this implementation */
133 pgcnt_t needfree
= 0;
134 EXPORT_SYMBOL(needfree
);
136 pgcnt_t swapfs_minfree
= 0;
137 EXPORT_SYMBOL(swapfs_minfree
);
139 pgcnt_t swapfs_reserve
= 0;
140 EXPORT_SYMBOL(swapfs_reserve
);
142 vmem_t
*heap_arena
= NULL
;
143 EXPORT_SYMBOL(heap_arena
);
145 vmem_t
*zio_alloc_arena
= NULL
;
146 EXPORT_SYMBOL(zio_alloc_arena
);
148 vmem_t
*zio_arena
= NULL
;
149 EXPORT_SYMBOL(zio_arena
);
151 #ifdef HAVE_PGDAT_HELPERS
152 # ifndef HAVE_FIRST_ONLINE_PGDAT
153 first_online_pgdat_t first_online_pgdat_fn
= SYMBOL_POISON
;
154 EXPORT_SYMBOL(first_online_pgdat_fn
);
155 # endif /* HAVE_FIRST_ONLINE_PGDAT */
157 # ifndef HAVE_NEXT_ONLINE_PGDAT
158 next_online_pgdat_t next_online_pgdat_fn
= SYMBOL_POISON
;
159 EXPORT_SYMBOL(next_online_pgdat_fn
);
160 # endif /* HAVE_NEXT_ONLINE_PGDAT */
162 # ifndef HAVE_NEXT_ZONE
163 next_zone_t next_zone_fn
= SYMBOL_POISON
;
164 EXPORT_SYMBOL(next_zone_fn
);
165 # endif /* HAVE_NEXT_ZONE */
167 #else /* HAVE_PGDAT_HELPERS */
169 # ifndef HAVE_PGDAT_LIST
170 struct pglist_data
*pgdat_list_addr
= SYMBOL_POISON
;
171 EXPORT_SYMBOL(pgdat_list_addr
);
172 # endif /* HAVE_PGDAT_LIST */
174 #endif /* HAVE_PGDAT_HELPERS */
176 #ifdef NEED_GET_ZONE_COUNTS
177 # ifndef HAVE_GET_ZONE_COUNTS
178 get_zone_counts_t get_zone_counts_fn
= SYMBOL_POISON
;
179 EXPORT_SYMBOL(get_zone_counts_fn
);
180 # endif /* HAVE_GET_ZONE_COUNTS */
183 spl_global_page_state(spl_zone_stat_item_t item
)
185 unsigned long active
;
186 unsigned long inactive
;
189 get_zone_counts(&active
, &inactive
, &free
);
191 case SPL_NR_FREE_PAGES
: return free
;
192 case SPL_NR_INACTIVE
: return inactive
;
193 case SPL_NR_ACTIVE
: return active
;
194 default: ASSERT(0); /* Unsupported */
200 # ifdef HAVE_GLOBAL_PAGE_STATE
202 spl_global_page_state(spl_zone_stat_item_t item
)
204 unsigned long pages
= 0;
207 case SPL_NR_FREE_PAGES
:
208 # ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
209 pages
+= global_page_state(NR_FREE_PAGES
);
212 case SPL_NR_INACTIVE
:
213 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
214 pages
+= global_page_state(NR_INACTIVE
);
216 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
217 pages
+= global_page_state(NR_INACTIVE_ANON
);
219 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
220 pages
+= global_page_state(NR_INACTIVE_FILE
);
224 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
225 pages
+= global_page_state(NR_ACTIVE
);
227 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
228 pages
+= global_page_state(NR_ACTIVE_ANON
);
230 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
231 pages
+= global_page_state(NR_ACTIVE_FILE
);
235 ASSERT(0); /* Unsupported */
241 # error "Both global_page_state() and get_zone_counts() unavailable"
242 # endif /* HAVE_GLOBAL_PAGE_STATE */
243 #endif /* NEED_GET_ZONE_COUNTS */
244 EXPORT_SYMBOL(spl_global_page_state
);
246 #ifndef HAVE_SHRINK_DCACHE_MEMORY
247 shrink_dcache_memory_t shrink_dcache_memory_fn
= SYMBOL_POISON
;
248 EXPORT_SYMBOL(shrink_dcache_memory_fn
);
249 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
251 #ifndef HAVE_SHRINK_ICACHE_MEMORY
252 shrink_icache_memory_t shrink_icache_memory_fn
= SYMBOL_POISON
;
253 EXPORT_SYMBOL(shrink_icache_memory_fn
);
254 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
257 spl_kmem_availrmem(void)
259 /* The amount of easily available memory */
260 return (spl_global_page_state(SPL_NR_FREE_PAGES
) +
261 spl_global_page_state(SPL_NR_INACTIVE
));
263 EXPORT_SYMBOL(spl_kmem_availrmem
);
266 vmem_size(vmem_t
*vmp
, int typemask
)
268 ASSERT3P(vmp
, ==, NULL
);
269 ASSERT3S(typemask
& VMEM_ALLOC
, ==, VMEM_ALLOC
);
270 ASSERT3S(typemask
& VMEM_FREE
, ==, VMEM_FREE
);
272 return (VMALLOC_TOTAL
);
274 EXPORT_SYMBOL(vmem_size
);
281 EXPORT_SYMBOL(kmem_debugging
);
283 #ifndef HAVE_KVASPRINTF
284 /* Simplified asprintf. */
285 char *kvasprintf(gfp_t gfp
, const char *fmt
, va_list ap
)
292 len
= vsnprintf(NULL
, 0, fmt
, aq
);
295 p
= kmalloc(len
+1, gfp
);
299 vsnprintf(p
, len
+1, fmt
, ap
);
303 EXPORT_SYMBOL(kvasprintf
);
304 #endif /* HAVE_KVASPRINTF */
307 kmem_vasprintf(const char *fmt
, va_list ap
)
314 ptr
= kvasprintf(GFP_KERNEL
, fmt
, aq
);
316 } while (ptr
== NULL
);
320 EXPORT_SYMBOL(kmem_vasprintf
);
323 kmem_asprintf(const char *fmt
, ...)
330 ptr
= kvasprintf(GFP_KERNEL
, fmt
, ap
);
332 } while (ptr
== NULL
);
336 EXPORT_SYMBOL(kmem_asprintf
);
339 __strdup(const char *str
, int flags
)
345 ptr
= kmalloc_nofail(n
+ 1, flags
);
347 memcpy(ptr
, str
, n
+ 1);
353 strdup(const char *str
)
355 return __strdup(str
, KM_SLEEP
);
357 EXPORT_SYMBOL(strdup
);
364 EXPORT_SYMBOL(strfree
);
367 * Memory allocation interfaces and debugging for basic kmem_*
368 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
369 * the SPL will keep track of the total memory allocated, and
370 * report any memory leaked when the module is unloaded.
374 /* Shim layer memory accounting */
375 # ifdef HAVE_ATOMIC64_T
376 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
377 unsigned long long kmem_alloc_max
= 0;
378 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
379 unsigned long long vmem_alloc_max
= 0;
380 # else /* HAVE_ATOMIC64_T */
381 atomic_t kmem_alloc_used
= ATOMIC_INIT(0);
382 unsigned long long kmem_alloc_max
= 0;
383 atomic_t vmem_alloc_used
= ATOMIC_INIT(0);
384 unsigned long long vmem_alloc_max
= 0;
385 # endif /* HAVE_ATOMIC64_T */
387 EXPORT_SYMBOL(kmem_alloc_used
);
388 EXPORT_SYMBOL(kmem_alloc_max
);
389 EXPORT_SYMBOL(vmem_alloc_used
);
390 EXPORT_SYMBOL(vmem_alloc_max
);
392 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
393 * but also the location of every alloc and free. When the SPL module is
394 * unloaded a list of all leaked addresses and where they were allocated
395 * will be dumped to the console. Enabling this feature has a significant
396 * impact on performance but it makes finding memory leaks straight forward.
398 * Not surprisingly with debugging enabled the xmem_locks are very highly
399 * contended particularly on xfree(). If we want to run with this detailed
400 * debugging enabled for anything other than debugging we need to minimize
401 * the contention by moving to a lock per xmem_table entry model.
403 # ifdef DEBUG_KMEM_TRACKING
405 # define KMEM_HASH_BITS 10
406 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
408 # define VMEM_HASH_BITS 10
409 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
411 typedef struct kmem_debug
{
412 struct hlist_node kd_hlist
; /* Hash node linkage */
413 struct list_head kd_list
; /* List of all allocations */
414 void *kd_addr
; /* Allocation pointer */
415 size_t kd_size
; /* Allocation size */
416 const char *kd_func
; /* Allocation function */
417 int kd_line
; /* Allocation line */
420 spinlock_t kmem_lock
;
421 struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
422 struct list_head kmem_list
;
424 spinlock_t vmem_lock
;
425 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
426 struct list_head vmem_list
;
428 EXPORT_SYMBOL(kmem_lock
);
429 EXPORT_SYMBOL(kmem_table
);
430 EXPORT_SYMBOL(kmem_list
);
432 EXPORT_SYMBOL(vmem_lock
);
433 EXPORT_SYMBOL(vmem_table
);
434 EXPORT_SYMBOL(vmem_list
);
436 static kmem_debug_t
*
437 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
, int bits
, const void *addr
)
439 struct hlist_head
*head
;
440 struct hlist_node
*node
;
441 struct kmem_debug
*p
;
445 spin_lock_irqsave(lock
, flags
);
447 head
= &table
[hash_ptr((void *)addr
, bits
)];
448 hlist_for_each(node
, head
) {
449 p
= list_entry(node
, struct kmem_debug
, kd_hlist
);
450 if (p
->kd_addr
== addr
) {
451 hlist_del_init(&p
->kd_hlist
);
452 list_del_init(&p
->kd_list
);
453 spin_unlock_irqrestore(lock
, flags
);
458 spin_unlock_irqrestore(lock
, flags
);
464 kmem_alloc_track(size_t size
, int flags
, const char *func
, int line
,
465 int node_alloc
, int node
)
469 unsigned long irq_flags
;
472 /* Function may be called with KM_NOSLEEP so failure is possible */
473 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
474 flags
& ~__GFP_ZERO
);
476 if (unlikely(dptr
== NULL
)) {
477 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "debug "
478 "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
479 sizeof(kmem_debug_t
), flags
, func
, line
,
480 kmem_alloc_used_read(), kmem_alloc_max
);
483 * Marked unlikely because we should never be doing this,
484 * we tolerate to up 2 pages but a single page is best.
486 if (unlikely((size
> PAGE_SIZE
*2) && !(flags
& KM_NODEBUG
))) {
487 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "large "
488 "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
489 (unsigned long long) size
, flags
, func
, line
,
490 kmem_alloc_used_read(), kmem_alloc_max
);
491 spl_debug_dumpstack(NULL
);
495 * We use __strdup() below because the string pointed to by
496 * __FUNCTION__ might not be available by the time we want
497 * to print it since the module might have been unloaded.
498 * This can only fail in the KM_NOSLEEP case.
500 dptr
->kd_func
= __strdup(func
, flags
& ~__GFP_ZERO
);
501 if (unlikely(dptr
->kd_func
== NULL
)) {
503 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
504 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
505 func
, line
, kmem_alloc_used_read(), kmem_alloc_max
);
509 /* Use the correct allocator */
511 ASSERT(!(flags
& __GFP_ZERO
));
512 ptr
= kmalloc_node_nofail(size
, flags
, node
);
513 } else if (flags
& __GFP_ZERO
) {
514 ptr
= kzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
516 ptr
= kmalloc_nofail(size
, flags
);
519 if (unlikely(ptr
== NULL
)) {
520 kfree(dptr
->kd_func
);
522 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "kmem_alloc"
523 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
524 (unsigned long long) size
, flags
, func
, line
,
525 kmem_alloc_used_read(), kmem_alloc_max
);
529 kmem_alloc_used_add(size
);
530 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
531 kmem_alloc_max
= kmem_alloc_used_read();
533 INIT_HLIST_NODE(&dptr
->kd_hlist
);
534 INIT_LIST_HEAD(&dptr
->kd_list
);
537 dptr
->kd_size
= size
;
538 dptr
->kd_line
= line
;
540 spin_lock_irqsave(&kmem_lock
, irq_flags
);
541 hlist_add_head(&dptr
->kd_hlist
,
542 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
543 list_add_tail(&dptr
->kd_list
, &kmem_list
);
544 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
546 SDEBUG_LIMIT(SD_INFO
,
547 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
548 (unsigned long long) size
, flags
, func
, line
, ptr
,
549 kmem_alloc_used_read(), kmem_alloc_max
);
554 EXPORT_SYMBOL(kmem_alloc_track
);
557 kmem_free_track(const void *ptr
, size_t size
)
562 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
563 (unsigned long long) size
);
565 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
567 /* Must exist in hash due to kmem_alloc() */
570 /* Size must match */
571 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
572 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
573 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
575 kmem_alloc_used_sub(size
);
576 SDEBUG_LIMIT(SD_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
577 (unsigned long long) size
, kmem_alloc_used_read(),
580 kfree(dptr
->kd_func
);
582 memset((void *)dptr
, 0x5a, sizeof(kmem_debug_t
));
585 memset((void *)ptr
, 0x5a, size
);
590 EXPORT_SYMBOL(kmem_free_track
);
593 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
597 unsigned long irq_flags
;
600 ASSERT(flags
& KM_SLEEP
);
602 /* Function may be called with KM_NOSLEEP so failure is possible */
603 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
604 flags
& ~__GFP_ZERO
);
605 if (unlikely(dptr
== NULL
)) {
606 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "debug "
607 "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
608 sizeof(kmem_debug_t
), flags
, func
, line
,
609 vmem_alloc_used_read(), vmem_alloc_max
);
612 * We use __strdup() below because the string pointed to by
613 * __FUNCTION__ might not be available by the time we want
614 * to print it, since the module might have been unloaded.
615 * This can never fail because we have already asserted
616 * that flags is KM_SLEEP.
618 dptr
->kd_func
= __strdup(func
, flags
& ~__GFP_ZERO
);
619 if (unlikely(dptr
->kd_func
== NULL
)) {
621 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
622 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
623 func
, line
, vmem_alloc_used_read(), vmem_alloc_max
);
627 /* Use the correct allocator */
628 if (flags
& __GFP_ZERO
) {
629 ptr
= vzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
631 ptr
= vmalloc_nofail(size
, flags
);
634 if (unlikely(ptr
== NULL
)) {
635 kfree(dptr
->kd_func
);
637 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "vmem_alloc"
638 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
639 (unsigned long long) size
, flags
, func
, line
,
640 vmem_alloc_used_read(), vmem_alloc_max
);
644 vmem_alloc_used_add(size
);
645 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
646 vmem_alloc_max
= vmem_alloc_used_read();
648 INIT_HLIST_NODE(&dptr
->kd_hlist
);
649 INIT_LIST_HEAD(&dptr
->kd_list
);
652 dptr
->kd_size
= size
;
653 dptr
->kd_line
= line
;
655 spin_lock_irqsave(&vmem_lock
, irq_flags
);
656 hlist_add_head(&dptr
->kd_hlist
,
657 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
658 list_add_tail(&dptr
->kd_list
, &vmem_list
);
659 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
661 SDEBUG_LIMIT(SD_INFO
,
662 "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
663 (unsigned long long) size
, flags
, func
, line
,
664 ptr
, vmem_alloc_used_read(), vmem_alloc_max
);
669 EXPORT_SYMBOL(vmem_alloc_track
);
672 vmem_free_track(const void *ptr
, size_t size
)
677 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
678 (unsigned long long) size
);
680 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
682 /* Must exist in hash due to vmem_alloc() */
685 /* Size must match */
686 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
687 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
688 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
690 vmem_alloc_used_sub(size
);
691 SDEBUG_LIMIT(SD_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
692 (unsigned long long) size
, vmem_alloc_used_read(),
695 kfree(dptr
->kd_func
);
697 memset((void *)dptr
, 0x5a, sizeof(kmem_debug_t
));
700 memset((void *)ptr
, 0x5a, size
);
705 EXPORT_SYMBOL(vmem_free_track
);
707 # else /* DEBUG_KMEM_TRACKING */
710 kmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
,
711 int node_alloc
, int node
)
717 * Marked unlikely because we should never be doing this,
718 * we tolerate to up 2 pages but a single page is best.
720 if (unlikely((size
> PAGE_SIZE
* 2) && !(flags
& KM_NODEBUG
))) {
721 SDEBUG(SD_CONSOLE
| SD_WARNING
,
722 "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
723 (unsigned long long) size
, flags
, func
, line
,
724 kmem_alloc_used_read(), kmem_alloc_max
);
725 spl_debug_dumpstack(NULL
);
728 /* Use the correct allocator */
730 ASSERT(!(flags
& __GFP_ZERO
));
731 ptr
= kmalloc_node_nofail(size
, flags
, node
);
732 } else if (flags
& __GFP_ZERO
) {
733 ptr
= kzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
735 ptr
= kmalloc_nofail(size
, flags
);
738 if (unlikely(ptr
== NULL
)) {
739 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
740 "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
741 (unsigned long long) size
, flags
, func
, line
,
742 kmem_alloc_used_read(), kmem_alloc_max
);
744 kmem_alloc_used_add(size
);
745 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
746 kmem_alloc_max
= kmem_alloc_used_read();
748 SDEBUG_LIMIT(SD_INFO
,
749 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
750 (unsigned long long) size
, flags
, func
, line
, ptr
,
751 kmem_alloc_used_read(), kmem_alloc_max
);
756 EXPORT_SYMBOL(kmem_alloc_debug
);
759 kmem_free_debug(const void *ptr
, size_t size
)
763 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
764 (unsigned long long) size
);
766 kmem_alloc_used_sub(size
);
767 SDEBUG_LIMIT(SD_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
768 (unsigned long long) size
, kmem_alloc_used_read(),
774 EXPORT_SYMBOL(kmem_free_debug
);
777 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
782 ASSERT(flags
& KM_SLEEP
);
784 /* Use the correct allocator */
785 if (flags
& __GFP_ZERO
) {
786 ptr
= vzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
788 ptr
= vmalloc_nofail(size
, flags
);
791 if (unlikely(ptr
== NULL
)) {
792 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
793 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
794 (unsigned long long) size
, flags
, func
, line
,
795 vmem_alloc_used_read(), vmem_alloc_max
);
797 vmem_alloc_used_add(size
);
798 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
799 vmem_alloc_max
= vmem_alloc_used_read();
801 SDEBUG_LIMIT(SD_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
802 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
803 vmem_alloc_used_read(), vmem_alloc_max
);
808 EXPORT_SYMBOL(vmem_alloc_debug
);
811 vmem_free_debug(const void *ptr
, size_t size
)
815 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
816 (unsigned long long) size
);
818 vmem_alloc_used_sub(size
);
819 SDEBUG_LIMIT(SD_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
820 (unsigned long long) size
, vmem_alloc_used_read(),
826 EXPORT_SYMBOL(vmem_free_debug
);
828 # endif /* DEBUG_KMEM_TRACKING */
829 #endif /* DEBUG_KMEM */
832 * Slab allocation interfaces
834 * While the Linux slab implementation was inspired by the Solaris
835 * implementation I cannot use it to emulate the Solaris APIs. I
836 * require two features which are not provided by the Linux slab.
838 * 1) Constructors AND destructors. Recent versions of the Linux
839 * kernel have removed support for destructors. This is a deal
840 * breaker for the SPL which contains particularly expensive
841 * initializers for mutex's, condition variables, etc. We also
842 * require a minimal level of cleanup for these data types unlike
843 * many Linux data type which do need to be explicitly destroyed.
845 * 2) Virtual address space backed slab. Callers of the Solaris slab
846 * expect it to work well for both small are very large allocations.
847 * Because of memory fragmentation the Linux slab which is backed
848 * by kmalloc'ed memory performs very badly when confronted with
849 * large numbers of large allocations. Basing the slab on the
850 * virtual address space removes the need for contiguous pages
851 * and greatly improve performance for large allocations.
853 * For these reasons, the SPL has its own slab implementation with
854 * the needed features. It is not as highly optimized as either the
855 * Solaris or Linux slabs, but it should get me most of what is
856 * needed until it can be optimized or obsoleted by another approach.
858 * One serious concern I do have about this method is the relatively
859 * small virtual address space on 32bit arches. This will seriously
860 * constrain the size of the slab caches and their performance.
862 * XXX: Improve the partial slab list by carefully maintaining a
863 * strict ordering of fullest to emptiest slabs based on
864 * the slab reference count. This guarantees the when freeing
865 * slabs back to the system we need only linearly traverse the
866 * last N slabs in the list to discover all the freeable slabs.
868 * XXX: NUMA awareness for optionally allocating memory close to a
869 * particular core. This can be advantageous if you know the slab
870 * object will be short lived and primarily accessed from one core.
872 * XXX: Slab coloring may also yield performance improvements and would
873 * be desirable to implement.
876 struct list_head spl_kmem_cache_list
; /* List of caches */
877 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
878 taskq_t
*spl_kmem_cache_taskq
; /* Task queue for ageing / reclaim */
880 static void spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
);
882 SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker
);
883 SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker
,
884 spl_kmem_cache_generic_shrinker
, KMC_DEFAULT_SEEKS
);
887 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
893 if (skc
->skc_flags
& KMC_KMEM
)
894 ptr
= (void *)__get_free_pages(flags
| __GFP_COMP
,
897 ptr
= __vmalloc(size
, flags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
899 /* Resulting allocated memory will be page aligned */
900 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
906 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
908 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
912 * The Linux direct reclaim path uses this out of band value to
913 * determine if forward progress is being made. Normally this is
914 * incremented by kmem_freepages() which is part of the various
915 * Linux slab implementations. However, since we are using none
916 * of that infrastructure we are responsible for incrementing it.
918 if (current
->reclaim_state
)
919 current
->reclaim_state
->reclaimed_slab
+= size
>> PAGE_SHIFT
;
921 if (skc
->skc_flags
& KMC_KMEM
)
922 free_pages((unsigned long)ptr
, get_order(size
));
928 * Required space for each aligned sks.
930 static inline uint32_t
931 spl_sks_size(spl_kmem_cache_t
*skc
)
933 return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t
),
934 skc
->skc_obj_align
, uint32_t);
938 * Required space for each aligned object.
940 static inline uint32_t
941 spl_obj_size(spl_kmem_cache_t
*skc
)
943 uint32_t align
= skc
->skc_obj_align
;
945 return P2ROUNDUP_TYPED(skc
->skc_obj_size
, align
, uint32_t) +
946 P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t
), align
, uint32_t);
950 * Lookup the spl_kmem_object_t for an object given that object.
952 static inline spl_kmem_obj_t
*
953 spl_sko_from_obj(spl_kmem_cache_t
*skc
, void *obj
)
955 return obj
+ P2ROUNDUP_TYPED(skc
->skc_obj_size
,
956 skc
->skc_obj_align
, uint32_t);
960 * Required space for each offslab object taking in to account alignment
961 * restrictions and the power-of-two requirement of kv_alloc().
963 static inline uint32_t
964 spl_offslab_size(spl_kmem_cache_t
*skc
)
966 return 1UL << (fls64(spl_obj_size(skc
)) + 1);
970 * It's important that we pack the spl_kmem_obj_t structure and the
971 * actual objects in to one large address space to minimize the number
972 * of calls to the allocator. It is far better to do a few large
973 * allocations and then subdivide it ourselves. Now which allocator
974 * we use requires balancing a few trade offs.
976 * For small objects we use kmem_alloc() because as long as you are
977 * only requesting a small number of pages (ideally just one) its cheap.
978 * However, when you start requesting multiple pages with kmem_alloc()
979 * it gets increasingly expensive since it requires contiguous pages.
980 * For this reason we shift to vmem_alloc() for slabs of large objects
981 * which removes the need for contiguous pages. We do not use
982 * vmem_alloc() in all cases because there is significant locking
983 * overhead in __get_vm_area_node(). This function takes a single
984 * global lock when acquiring an available virtual address range which
985 * serializes all vmem_alloc()'s for all slab caches. Using slightly
986 * different allocation functions for small and large objects should
987 * give us the best of both worlds.
989 * KMC_ONSLAB KMC_OFFSLAB
991 * +------------------------+ +-----------------+
992 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
993 * | skc_obj_size <-+ | | +-----------------+ | |
994 * | spl_kmem_obj_t | | | |
995 * | skc_obj_size <---+ | +-----------------+ | |
996 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
997 * | ... v | | spl_kmem_obj_t | |
998 * +------------------------+ +-----------------+ v
1000 static spl_kmem_slab_t
*
1001 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
1003 spl_kmem_slab_t
*sks
;
1004 spl_kmem_obj_t
*sko
, *n
;
1006 uint32_t obj_size
, offslab_size
= 0;
1009 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
1013 sks
= (spl_kmem_slab_t
*)base
;
1014 sks
->sks_magic
= SKS_MAGIC
;
1015 sks
->sks_objs
= skc
->skc_slab_objs
;
1016 sks
->sks_age
= jiffies
;
1017 sks
->sks_cache
= skc
;
1018 INIT_LIST_HEAD(&sks
->sks_list
);
1019 INIT_LIST_HEAD(&sks
->sks_free_list
);
1021 obj_size
= spl_obj_size(skc
);
1023 if (skc
->skc_flags
& KMC_OFFSLAB
)
1024 offslab_size
= spl_offslab_size(skc
);
1026 for (i
= 0; i
< sks
->sks_objs
; i
++) {
1027 if (skc
->skc_flags
& KMC_OFFSLAB
) {
1028 obj
= kv_alloc(skc
, offslab_size
, flags
);
1030 SGOTO(out
, rc
= -ENOMEM
);
1032 obj
= base
+ spl_sks_size(skc
) + (i
* obj_size
);
1035 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
1036 sko
= spl_sko_from_obj(skc
, obj
);
1037 sko
->sko_addr
= obj
;
1038 sko
->sko_magic
= SKO_MAGIC
;
1039 sko
->sko_slab
= sks
;
1040 INIT_LIST_HEAD(&sko
->sko_list
);
1041 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
1044 list_for_each_entry(sko
, &sks
->sks_free_list
, sko_list
)
1046 skc
->skc_ctor(sko
->sko_addr
, skc
->skc_private
, flags
);
1049 if (skc
->skc_flags
& KMC_OFFSLAB
)
1050 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
,
1052 kv_free(skc
, sko
->sko_addr
, offslab_size
);
1054 kv_free(skc
, base
, skc
->skc_slab_size
);
1062 * Remove a slab from complete or partial list, it must be called with
1063 * the 'skc->skc_lock' held but the actual free must be performed
1064 * outside the lock to prevent deadlocking on vmem addresses.
1067 spl_slab_free(spl_kmem_slab_t
*sks
,
1068 struct list_head
*sks_list
, struct list_head
*sko_list
)
1070 spl_kmem_cache_t
*skc
;
1073 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1074 ASSERT(sks
->sks_ref
== 0);
1076 skc
= sks
->sks_cache
;
1077 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1078 ASSERT(spin_is_locked(&skc
->skc_lock
));
1081 * Update slab/objects counters in the cache, then remove the
1082 * slab from the skc->skc_partial_list. Finally add the slab
1083 * and all its objects in to the private work lists where the
1084 * destructors will be called and the memory freed to the system.
1086 skc
->skc_obj_total
-= sks
->sks_objs
;
1087 skc
->skc_slab_total
--;
1088 list_del(&sks
->sks_list
);
1089 list_add(&sks
->sks_list
, sks_list
);
1090 list_splice_init(&sks
->sks_free_list
, sko_list
);
1096 * Traverses all the partial slabs attached to a cache and free those
1097 * which which are currently empty, and have not been touched for
1098 * skc_delay seconds to avoid thrashing. The count argument is
1099 * passed to optionally cap the number of slabs reclaimed, a count
1100 * of zero means try and reclaim everything. When flag is set we
1101 * always free an available slab regardless of age.
1104 spl_slab_reclaim(spl_kmem_cache_t
*skc
, int count
, int flag
)
1106 spl_kmem_slab_t
*sks
, *m
;
1107 spl_kmem_obj_t
*sko
, *n
;
1108 LIST_HEAD(sks_list
);
1109 LIST_HEAD(sko_list
);
1115 * Move empty slabs and objects which have not been touched in
1116 * skc_delay seconds on to private lists to be freed outside
1117 * the spin lock. This delay time is important to avoid thrashing
1118 * however when flag is set the delay will not be used.
1120 spin_lock(&skc
->skc_lock
);
1121 list_for_each_entry_safe_reverse(sks
,m
,&skc
->skc_partial_list
,sks_list
){
1123 * All empty slabs are at the end of skc->skc_partial_list,
1124 * therefore once a non-empty slab is found we can stop
1125 * scanning. Additionally, stop when reaching the target
1126 * reclaim 'count' if a non-zero threshold is given.
1128 if ((sks
->sks_ref
> 0) || (count
&& i
>= count
))
1131 if (time_after(jiffies
,sks
->sks_age
+skc
->skc_delay
*HZ
)||flag
) {
1132 spl_slab_free(sks
, &sks_list
, &sko_list
);
1136 spin_unlock(&skc
->skc_lock
);
1139 * The following two loops ensure all the object destructors are
1140 * run, any offslab objects are freed, and the slabs themselves
1141 * are freed. This is all done outside the skc->skc_lock since
1142 * this allows the destructor to sleep, and allows us to perform
1143 * a conditional reschedule when a freeing a large number of
1144 * objects and slabs back to the system.
1146 if (skc
->skc_flags
& KMC_OFFSLAB
)
1147 size
= spl_offslab_size(skc
);
1149 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
) {
1150 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1153 skc
->skc_dtor(sko
->sko_addr
, skc
->skc_private
);
1155 if (skc
->skc_flags
& KMC_OFFSLAB
)
1156 kv_free(skc
, sko
->sko_addr
, size
);
1159 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
) {
1160 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1161 kv_free(skc
, sks
, skc
->skc_slab_size
);
1167 static spl_kmem_emergency_t
*
1168 spl_emergency_search(struct rb_root
*root
, void *obj
)
1170 struct rb_node
*node
= root
->rb_node
;
1171 spl_kmem_emergency_t
*ske
;
1172 unsigned long address
= (unsigned long)obj
;
1175 ske
= container_of(node
, spl_kmem_emergency_t
, ske_node
);
1177 if (address
< (unsigned long)ske
->ske_obj
)
1178 node
= node
->rb_left
;
1179 else if (address
> (unsigned long)ske
->ske_obj
)
1180 node
= node
->rb_right
;
1189 spl_emergency_insert(struct rb_root
*root
, spl_kmem_emergency_t
*ske
)
1191 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
1192 spl_kmem_emergency_t
*ske_tmp
;
1193 unsigned long address
= (unsigned long)ske
->ske_obj
;
1196 ske_tmp
= container_of(*new, spl_kmem_emergency_t
, ske_node
);
1199 if (address
< (unsigned long)ske_tmp
->ske_obj
)
1200 new = &((*new)->rb_left
);
1201 else if (address
> (unsigned long)ske_tmp
->ske_obj
)
1202 new = &((*new)->rb_right
);
1207 rb_link_node(&ske
->ske_node
, parent
, new);
1208 rb_insert_color(&ske
->ske_node
, root
);
1214 * Allocate a single emergency object and track it in a red black tree.
1217 spl_emergency_alloc(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
1219 spl_kmem_emergency_t
*ske
;
1223 /* Last chance use a partial slab if one now exists */
1224 spin_lock(&skc
->skc_lock
);
1225 empty
= list_empty(&skc
->skc_partial_list
);
1226 spin_unlock(&skc
->skc_lock
);
1230 ske
= kmalloc(sizeof(*ske
), flags
);
1234 ske
->ske_obj
= kmalloc(skc
->skc_obj_size
, flags
);
1235 if (ske
->ske_obj
== NULL
) {
1240 spin_lock(&skc
->skc_lock
);
1241 empty
= spl_emergency_insert(&skc
->skc_emergency_tree
, ske
);
1242 if (likely(empty
)) {
1243 skc
->skc_obj_total
++;
1244 skc
->skc_obj_emergency
++;
1245 if (skc
->skc_obj_emergency
> skc
->skc_obj_emergency_max
)
1246 skc
->skc_obj_emergency_max
= skc
->skc_obj_emergency
;
1248 spin_unlock(&skc
->skc_lock
);
1250 if (unlikely(!empty
)) {
1251 kfree(ske
->ske_obj
);
1257 skc
->skc_ctor(ske
->ske_obj
, skc
->skc_private
, flags
);
1259 *obj
= ske
->ske_obj
;
1265 * Locate the passed object in the red black tree and free it.
1268 spl_emergency_free(spl_kmem_cache_t
*skc
, void *obj
)
1270 spl_kmem_emergency_t
*ske
;
1273 spin_lock(&skc
->skc_lock
);
1274 ske
= spl_emergency_search(&skc
->skc_emergency_tree
, obj
);
1276 rb_erase(&ske
->ske_node
, &skc
->skc_emergency_tree
);
1277 skc
->skc_obj_emergency
--;
1278 skc
->skc_obj_total
--;
1280 spin_unlock(&skc
->skc_lock
);
1282 if (unlikely(ske
== NULL
))
1286 skc
->skc_dtor(ske
->ske_obj
, skc
->skc_private
);
1288 kfree(ske
->ske_obj
);
1295 * Release objects from the per-cpu magazine back to their slab. The flush
1296 * argument contains the max number of entries to remove from the magazine.
1299 __spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1301 int i
, count
= MIN(flush
, skm
->skm_avail
);
1304 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1305 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1306 ASSERT(spin_is_locked(&skc
->skc_lock
));
1308 for (i
= 0; i
< count
; i
++)
1309 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
1311 skm
->skm_avail
-= count
;
1312 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
1313 sizeof(void *) * skm
->skm_avail
);
1319 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1321 spin_lock(&skc
->skc_lock
);
1322 __spl_cache_flush(skc
, skm
, flush
);
1323 spin_unlock(&skc
->skc_lock
);
1327 spl_magazine_age(void *data
)
1329 spl_kmem_cache_t
*skc
= (spl_kmem_cache_t
*)data
;
1330 spl_kmem_magazine_t
*skm
= skc
->skc_mag
[smp_processor_id()];
1332 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1333 ASSERT(skm
->skm_cpu
== smp_processor_id());
1334 ASSERT(irqs_disabled());
1336 /* There are no available objects or they are too young to age out */
1337 if ((skm
->skm_avail
== 0) ||
1338 time_before(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
1342 * Because we're executing in interrupt context we may have
1343 * interrupted the holder of this lock. To avoid a potential
1344 * deadlock return if the lock is contended.
1346 if (!spin_trylock(&skc
->skc_lock
))
1349 __spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1350 spin_unlock(&skc
->skc_lock
);
1354 * Called regularly to keep a downward pressure on the cache.
1356 * Objects older than skc->skc_delay seconds in the per-cpu magazines will
1357 * be returned to the caches. This is done to prevent idle magazines from
1358 * holding memory which could be better used elsewhere. The delay is
1359 * present to prevent thrashing the magazine.
1361 * The newly released objects may result in empty partial slabs. Those
1362 * slabs should be released to the system. Otherwise moving the objects
1363 * out of the magazines is just wasted work.
1366 spl_cache_age(void *data
)
1368 spl_kmem_cache_t
*skc
= (spl_kmem_cache_t
*)data
;
1371 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1373 /* Dynamically disabled at run time */
1374 if (!(spl_kmem_cache_expire
& KMC_EXPIRE_AGE
))
1377 atomic_inc(&skc
->skc_ref
);
1379 if (!(skc
->skc_flags
& KMC_NOMAGAZINE
))
1380 on_each_cpu(spl_magazine_age
, skc
, 1);
1382 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
1384 while (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
) && !id
) {
1385 id
= taskq_dispatch_delay(
1386 spl_kmem_cache_taskq
, spl_cache_age
, skc
, TQ_SLEEP
,
1387 ddi_get_lbolt() + skc
->skc_delay
/ 3 * HZ
);
1389 /* Destroy issued after dispatch immediately cancel it */
1390 if (test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
) && id
)
1391 taskq_cancel_id(spl_kmem_cache_taskq
, id
);
1394 spin_lock(&skc
->skc_lock
);
1395 skc
->skc_taskqid
= id
;
1396 spin_unlock(&skc
->skc_lock
);
1398 atomic_dec(&skc
->skc_ref
);
1402 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
1403 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
1404 * for very small objects we may end up with more than this so as not
1405 * to waste space in the minimal allocation of a single page. Also for
1406 * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
1407 * lower than this and we will fail.
1410 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
1412 uint32_t sks_size
, obj_size
, max_size
;
1414 if (skc
->skc_flags
& KMC_OFFSLAB
) {
1415 *objs
= spl_kmem_cache_obj_per_slab
;
1416 *size
= P2ROUNDUP(sizeof(spl_kmem_slab_t
), PAGE_SIZE
);
1419 sks_size
= spl_sks_size(skc
);
1420 obj_size
= spl_obj_size(skc
);
1422 if (skc
->skc_flags
& KMC_KMEM
)
1423 max_size
= ((uint32_t)1 << (MAX_ORDER
-3)) * PAGE_SIZE
;
1425 max_size
= (spl_kmem_cache_max_size
* 1024 * 1024);
1427 /* Power of two sized slab */
1428 for (*size
= PAGE_SIZE
; *size
<= max_size
; *size
*= 2) {
1429 *objs
= (*size
- sks_size
) / obj_size
;
1430 if (*objs
>= spl_kmem_cache_obj_per_slab
)
1435 * Unable to satisfy target objects per slab, fall back to
1436 * allocating a maximally sized slab and assuming it can
1437 * contain the minimum objects count use it. If not fail.
1440 *objs
= (*size
- sks_size
) / obj_size
;
1441 if (*objs
>= (spl_kmem_cache_obj_per_slab_min
))
1449 * Make a guess at reasonable per-cpu magazine size based on the size of
1450 * each object and the cost of caching N of them in each magazine. Long
1451 * term this should really adapt based on an observed usage heuristic.
1454 spl_magazine_size(spl_kmem_cache_t
*skc
)
1456 uint32_t obj_size
= spl_obj_size(skc
);
1460 /* Per-magazine sizes below assume a 4Kib page size */
1461 if (obj_size
> (PAGE_SIZE
* 256))
1462 size
= 4; /* Minimum 4Mib per-magazine */
1463 else if (obj_size
> (PAGE_SIZE
* 32))
1464 size
= 16; /* Minimum 2Mib per-magazine */
1465 else if (obj_size
> (PAGE_SIZE
))
1466 size
= 64; /* Minimum 256Kib per-magazine */
1467 else if (obj_size
> (PAGE_SIZE
/ 4))
1468 size
= 128; /* Minimum 128Kib per-magazine */
1476 * Allocate a per-cpu magazine to associate with a specific core.
1478 static spl_kmem_magazine_t
*
1479 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int cpu
)
1481 spl_kmem_magazine_t
*skm
;
1482 int size
= sizeof(spl_kmem_magazine_t
) +
1483 sizeof(void *) * skc
->skc_mag_size
;
1486 skm
= kmem_alloc_node(size
, KM_SLEEP
, cpu_to_node(cpu
));
1488 skm
->skm_magic
= SKM_MAGIC
;
1490 skm
->skm_size
= skc
->skc_mag_size
;
1491 skm
->skm_refill
= skc
->skc_mag_refill
;
1492 skm
->skm_cache
= skc
;
1493 skm
->skm_age
= jiffies
;
1501 * Free a per-cpu magazine associated with a specific core.
1504 spl_magazine_free(spl_kmem_magazine_t
*skm
)
1506 int size
= sizeof(spl_kmem_magazine_t
) +
1507 sizeof(void *) * skm
->skm_size
;
1510 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1511 ASSERT(skm
->skm_avail
== 0);
1513 kmem_free(skm
, size
);
1518 * Create all pre-cpu magazines of reasonable sizes.
1521 spl_magazine_create(spl_kmem_cache_t
*skc
)
1526 if (skc
->skc_flags
& KMC_NOMAGAZINE
)
1529 skc
->skc_mag_size
= spl_magazine_size(skc
);
1530 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
1532 for_each_online_cpu(i
) {
1533 skc
->skc_mag
[i
] = spl_magazine_alloc(skc
, i
);
1534 if (!skc
->skc_mag
[i
]) {
1535 for (i
--; i
>= 0; i
--)
1536 spl_magazine_free(skc
->skc_mag
[i
]);
1546 * Destroy all pre-cpu magazines.
1549 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
1551 spl_kmem_magazine_t
*skm
;
1555 if (skc
->skc_flags
& KMC_NOMAGAZINE
) {
1560 for_each_online_cpu(i
) {
1561 skm
= skc
->skc_mag
[i
];
1562 spl_cache_flush(skc
, skm
, skm
->skm_avail
);
1563 spl_magazine_free(skm
);
1570 * Create a object cache based on the following arguments:
1572 * size cache object size
1573 * align cache object alignment
1574 * ctor cache object constructor
1575 * dtor cache object destructor
1576 * reclaim cache object reclaim
1577 * priv cache private data for ctor/dtor/reclaim
1578 * vmp unused must be NULL
1580 * KMC_NOTOUCH Disable cache object aging (unsupported)
1581 * KMC_NODEBUG Disable debugging (unsupported)
1582 * KMC_NOHASH Disable hashing (unsupported)
1583 * KMC_QCACHE Disable qcache (unsupported)
1584 * KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab
1585 * KMC_KMEM Force kmem backed cache
1586 * KMC_VMEM Force vmem backed cache
1587 * KMC_SLAB Force Linux slab backed cache
1588 * KMC_OFFSLAB Locate objects off the slab
1591 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
1592 spl_kmem_ctor_t ctor
,
1593 spl_kmem_dtor_t dtor
,
1594 spl_kmem_reclaim_t reclaim
,
1595 void *priv
, void *vmp
, int flags
)
1597 spl_kmem_cache_t
*skc
;
1601 ASSERTF(!(flags
& KMC_NOMAGAZINE
), "Bad KMC_NOMAGAZINE (%x)\n", flags
);
1602 ASSERTF(!(flags
& KMC_NOHASH
), "Bad KMC_NOHASH (%x)\n", flags
);
1603 ASSERTF(!(flags
& KMC_QCACHE
), "Bad KMC_QCACHE (%x)\n", flags
);
1604 ASSERT(vmp
== NULL
);
1609 * Allocate memory for a new cache an initialize it. Unfortunately,
1610 * this usually ends up being a large allocation of ~32k because
1611 * we need to allocate enough memory for the worst case number of
1612 * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
1613 * explicitly pass KM_NODEBUG to suppress the kmem warning
1615 skc
= kmem_zalloc(sizeof(*skc
), KM_SLEEP
| KM_NODEBUG
);
1619 skc
->skc_magic
= SKC_MAGIC
;
1620 skc
->skc_name_size
= strlen(name
) + 1;
1621 skc
->skc_name
= (char *)kmem_alloc(skc
->skc_name_size
, KM_SLEEP
);
1622 if (skc
->skc_name
== NULL
) {
1623 kmem_free(skc
, sizeof(*skc
));
1626 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
1628 skc
->skc_ctor
= ctor
;
1629 skc
->skc_dtor
= dtor
;
1630 skc
->skc_reclaim
= reclaim
;
1631 skc
->skc_private
= priv
;
1633 skc
->skc_linux_cache
= NULL
;
1634 skc
->skc_flags
= flags
;
1635 skc
->skc_obj_size
= size
;
1636 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
1637 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
1638 skc
->skc_reap
= SPL_KMEM_CACHE_REAP
;
1639 atomic_set(&skc
->skc_ref
, 0);
1641 INIT_LIST_HEAD(&skc
->skc_list
);
1642 INIT_LIST_HEAD(&skc
->skc_complete_list
);
1643 INIT_LIST_HEAD(&skc
->skc_partial_list
);
1644 skc
->skc_emergency_tree
= RB_ROOT
;
1645 spin_lock_init(&skc
->skc_lock
);
1646 init_waitqueue_head(&skc
->skc_waitq
);
1647 skc
->skc_slab_fail
= 0;
1648 skc
->skc_slab_create
= 0;
1649 skc
->skc_slab_destroy
= 0;
1650 skc
->skc_slab_total
= 0;
1651 skc
->skc_slab_alloc
= 0;
1652 skc
->skc_slab_max
= 0;
1653 skc
->skc_obj_total
= 0;
1654 skc
->skc_obj_alloc
= 0;
1655 skc
->skc_obj_max
= 0;
1656 skc
->skc_obj_deadlock
= 0;
1657 skc
->skc_obj_emergency
= 0;
1658 skc
->skc_obj_emergency_max
= 0;
1661 * Verify the requested alignment restriction is sane.
1664 VERIFY(ISP2(align
));
1665 VERIFY3U(align
, >=, SPL_KMEM_CACHE_ALIGN
);
1666 VERIFY3U(align
, <=, PAGE_SIZE
);
1667 skc
->skc_obj_align
= align
;
1671 * When no specific type of slab is requested (kmem, vmem, or
1672 * linuxslab) then select a cache type based on the object size
1673 * and default tunables.
1675 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
| KMC_SLAB
))) {
1678 * Objects smaller than spl_kmem_cache_slab_limit can
1679 * use the Linux slab for better space-efficiency. By
1680 * default this functionality is disabled until its
1681 * performance characters are fully understood.
1683 if (spl_kmem_cache_slab_limit
&&
1684 size
<= (size_t)spl_kmem_cache_slab_limit
)
1685 skc
->skc_flags
|= KMC_SLAB
;
1688 * Small objects, less than spl_kmem_cache_kmem_limit per
1689 * object should use kmem because their slabs are small.
1691 else if (spl_obj_size(skc
) <= spl_kmem_cache_kmem_limit
)
1692 skc
->skc_flags
|= KMC_KMEM
;
1695 * All other objects are considered large and are placed
1696 * on vmem backed slabs.
1699 skc
->skc_flags
|= KMC_VMEM
;
1703 * Given the type of slab allocate the required resources.
1705 if (skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
)) {
1706 rc
= spl_slab_size(skc
,
1707 &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
1711 rc
= spl_magazine_create(skc
);
1715 skc
->skc_linux_cache
= kmem_cache_create(
1716 skc
->skc_name
, size
, align
, 0, NULL
);
1717 if (skc
->skc_linux_cache
== NULL
)
1718 SGOTO(out
, rc
= ENOMEM
);
1720 kmem_cache_set_allocflags(skc
, __GFP_COMP
);
1721 skc
->skc_flags
|= KMC_NOMAGAZINE
;
1724 if (spl_kmem_cache_expire
& KMC_EXPIRE_AGE
)
1725 skc
->skc_taskqid
= taskq_dispatch_delay(spl_kmem_cache_taskq
,
1726 spl_cache_age
, skc
, TQ_SLEEP
,
1727 ddi_get_lbolt() + skc
->skc_delay
/ 3 * HZ
);
1729 down_write(&spl_kmem_cache_sem
);
1730 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1731 up_write(&spl_kmem_cache_sem
);
1735 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1736 kmem_free(skc
, sizeof(*skc
));
1739 EXPORT_SYMBOL(spl_kmem_cache_create
);
1742 * Register a move callback to for cache defragmentation.
1743 * XXX: Unimplemented but harmless to stub out for now.
1746 spl_kmem_cache_set_move(spl_kmem_cache_t
*skc
,
1747 kmem_cbrc_t (move
)(void *, void *, size_t, void *))
1749 ASSERT(move
!= NULL
);
1751 EXPORT_SYMBOL(spl_kmem_cache_set_move
);
1754 * Destroy a cache and all objects associated with the cache.
1757 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1759 DECLARE_WAIT_QUEUE_HEAD(wq
);
1763 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1764 ASSERT(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
| KMC_SLAB
));
1766 down_write(&spl_kmem_cache_sem
);
1767 list_del_init(&skc
->skc_list
);
1768 up_write(&spl_kmem_cache_sem
);
1770 /* Cancel any and wait for any pending delayed tasks */
1771 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1773 spin_lock(&skc
->skc_lock
);
1774 id
= skc
->skc_taskqid
;
1775 spin_unlock(&skc
->skc_lock
);
1777 taskq_cancel_id(spl_kmem_cache_taskq
, id
);
1779 /* Wait until all current callers complete, this is mainly
1780 * to catch the case where a low memory situation triggers a
1781 * cache reaping action which races with this destroy. */
1782 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1784 if (skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
)) {
1785 spl_magazine_destroy(skc
);
1786 spl_slab_reclaim(skc
, 0, 1);
1788 ASSERT(skc
->skc_flags
& KMC_SLAB
);
1789 kmem_cache_destroy(skc
->skc_linux_cache
);
1792 spin_lock(&skc
->skc_lock
);
1794 /* Validate there are no objects in use and free all the
1795 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1796 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1797 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1798 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1799 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1800 ASSERT3U(skc
->skc_obj_emergency
, ==, 0);
1801 ASSERT(list_empty(&skc
->skc_complete_list
));
1803 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1804 spin_unlock(&skc
->skc_lock
);
1806 kmem_free(skc
, sizeof(*skc
));
1810 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1813 * Allocate an object from a slab attached to the cache. This is used to
1814 * repopulate the per-cpu magazine caches in batches when they run low.
1817 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1819 spl_kmem_obj_t
*sko
;
1821 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1822 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1823 ASSERT(spin_is_locked(&skc
->skc_lock
));
1825 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1826 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1827 ASSERT(sko
->sko_addr
!= NULL
);
1829 /* Remove from sks_free_list */
1830 list_del_init(&sko
->sko_list
);
1832 sks
->sks_age
= jiffies
;
1834 skc
->skc_obj_alloc
++;
1836 /* Track max obj usage statistics */
1837 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1838 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1840 /* Track max slab usage statistics */
1841 if (sks
->sks_ref
== 1) {
1842 skc
->skc_slab_alloc
++;
1844 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1845 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1848 return sko
->sko_addr
;
1852 * Generic slab allocation function to run by the global work queues.
1853 * It is responsible for allocating a new slab, linking it in to the list
1854 * of partial slabs, and then waking any waiters.
1857 spl_cache_grow_work(void *data
)
1859 spl_kmem_alloc_t
*ska
= (spl_kmem_alloc_t
*)data
;
1860 spl_kmem_cache_t
*skc
= ska
->ska_cache
;
1861 spl_kmem_slab_t
*sks
;
1863 sks
= spl_slab_alloc(skc
, ska
->ska_flags
| __GFP_NORETRY
| KM_NODEBUG
);
1864 spin_lock(&skc
->skc_lock
);
1866 skc
->skc_slab_total
++;
1867 skc
->skc_obj_total
+= sks
->sks_objs
;
1868 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1871 atomic_dec(&skc
->skc_ref
);
1872 clear_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1873 clear_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1874 wake_up_all(&skc
->skc_waitq
);
1875 spin_unlock(&skc
->skc_lock
);
1881 * Returns non-zero when a new slab should be available.
1884 spl_cache_grow_wait(spl_kmem_cache_t
*skc
)
1886 return !test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1890 * No available objects on any slabs, create a new slab. Note that this
1891 * functionality is disabled for KMC_SLAB caches which are backed by the
1895 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
1900 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1901 ASSERT((skc
->skc_flags
& KMC_SLAB
) == 0);
1906 * Before allocating a new slab wait for any reaping to complete and
1907 * then return so the local magazine can be rechecked for new objects.
1909 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1910 rc
= spl_wait_on_bit(&skc
->skc_flags
, KMC_BIT_REAPING
,
1911 TASK_UNINTERRUPTIBLE
);
1912 SRETURN(rc
? rc
: -EAGAIN
);
1916 * This is handled by dispatching a work request to the global work
1917 * queue. This allows us to asynchronously allocate a new slab while
1918 * retaining the ability to safely fall back to a smaller synchronous
1919 * allocations to ensure forward progress is always maintained.
1921 if (test_and_set_bit(KMC_BIT_GROWING
, &skc
->skc_flags
) == 0) {
1922 spl_kmem_alloc_t
*ska
;
1924 ska
= kmalloc(sizeof(*ska
), flags
);
1926 clear_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1927 wake_up_all(&skc
->skc_waitq
);
1931 atomic_inc(&skc
->skc_ref
);
1932 ska
->ska_cache
= skc
;
1933 ska
->ska_flags
= flags
& ~__GFP_FS
;
1934 taskq_init_ent(&ska
->ska_tqe
);
1935 taskq_dispatch_ent(spl_kmem_cache_taskq
,
1936 spl_cache_grow_work
, ska
, 0, &ska
->ska_tqe
);
1940 * The goal here is to only detect the rare case where a virtual slab
1941 * allocation has deadlocked. We must be careful to minimize the use
1942 * of emergency objects which are more expensive to track. Therefore,
1943 * we set a very long timeout for the asynchronous allocation and if
1944 * the timeout is reached the cache is flagged as deadlocked. From
1945 * this point only new emergency objects will be allocated until the
1946 * asynchronous allocation completes and clears the deadlocked flag.
1948 if (test_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
)) {
1949 rc
= spl_emergency_alloc(skc
, flags
, obj
);
1951 remaining
= wait_event_timeout(skc
->skc_waitq
,
1952 spl_cache_grow_wait(skc
), HZ
);
1954 if (!remaining
&& test_bit(KMC_BIT_VMEM
, &skc
->skc_flags
)) {
1955 spin_lock(&skc
->skc_lock
);
1956 if (test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
)) {
1957 set_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1958 skc
->skc_obj_deadlock
++;
1960 spin_unlock(&skc
->skc_lock
);
1970 * Refill a per-cpu magazine with objects from the slabs for this cache.
1971 * Ideally the magazine can be repopulated using existing objects which have
1972 * been released, however if we are unable to locate enough free objects new
1973 * slabs of objects will be created. On success NULL is returned, otherwise
1974 * the address of a single emergency object is returned for use by the caller.
1977 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1979 spl_kmem_slab_t
*sks
;
1980 int count
= 0, rc
, refill
;
1984 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1985 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1987 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1988 spin_lock(&skc
->skc_lock
);
1990 while (refill
> 0) {
1991 /* No slabs available we may need to grow the cache */
1992 if (list_empty(&skc
->skc_partial_list
)) {
1993 spin_unlock(&skc
->skc_lock
);
1996 rc
= spl_cache_grow(skc
, flags
, &obj
);
1997 local_irq_disable();
1999 /* Emergency object for immediate use by caller */
2000 if (rc
== 0 && obj
!= NULL
)
2006 /* Rescheduled to different CPU skm is not local */
2007 if (skm
!= skc
->skc_mag
[smp_processor_id()])
2010 /* Potentially rescheduled to the same CPU but
2011 * allocations may have occurred from this CPU while
2012 * we were sleeping so recalculate max refill. */
2013 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
2015 spin_lock(&skc
->skc_lock
);
2019 /* Grab the next available slab */
2020 sks
= list_entry((&skc
->skc_partial_list
)->next
,
2021 spl_kmem_slab_t
, sks_list
);
2022 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
2023 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
2024 ASSERT(!list_empty(&sks
->sks_free_list
));
2026 /* Consume as many objects as needed to refill the requested
2027 * cache. We must also be careful not to overfill it. */
2028 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 && ++count
) {
2029 ASSERT(skm
->skm_avail
< skm
->skm_size
);
2030 ASSERT(count
< skm
->skm_size
);
2031 skm
->skm_objs
[skm
->skm_avail
++]=spl_cache_obj(skc
,sks
);
2034 /* Move slab to skc_complete_list when full */
2035 if (sks
->sks_ref
== sks
->sks_objs
) {
2036 list_del(&sks
->sks_list
);
2037 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
2041 spin_unlock(&skc
->skc_lock
);
2047 * Release an object back to the slab from which it came.
2050 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
2052 spl_kmem_slab_t
*sks
= NULL
;
2053 spl_kmem_obj_t
*sko
= NULL
;
2056 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2057 ASSERT(spin_is_locked(&skc
->skc_lock
));
2059 sko
= spl_sko_from_obj(skc
, obj
);
2060 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
2061 sks
= sko
->sko_slab
;
2062 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
2063 ASSERT(sks
->sks_cache
== skc
);
2064 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
2066 sks
->sks_age
= jiffies
;
2068 skc
->skc_obj_alloc
--;
2070 /* Move slab to skc_partial_list when no longer full. Slabs
2071 * are added to the head to keep the partial list is quasi-full
2072 * sorted order. Fuller at the head, emptier at the tail. */
2073 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
2074 list_del(&sks
->sks_list
);
2075 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
2078 /* Move empty slabs to the end of the partial list so
2079 * they can be easily found and freed during reclamation. */
2080 if (sks
->sks_ref
== 0) {
2081 list_del(&sks
->sks_list
);
2082 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
2083 skc
->skc_slab_alloc
--;
2090 * Allocate an object from the per-cpu magazine, or if the magazine
2091 * is empty directly allocate from a slab and repopulate the magazine.
2094 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
2096 spl_kmem_magazine_t
*skm
;
2100 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2101 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
2102 ASSERT(flags
& KM_SLEEP
);
2104 atomic_inc(&skc
->skc_ref
);
2107 * Allocate directly from a Linux slab. All optimizations are left
2108 * to the underlying cache we only need to guarantee that KM_SLEEP
2109 * callers will never fail.
2111 if (skc
->skc_flags
& KMC_SLAB
) {
2112 struct kmem_cache
*slc
= skc
->skc_linux_cache
;
2115 obj
= kmem_cache_alloc(slc
, flags
| __GFP_COMP
);
2116 if (obj
&& skc
->skc_ctor
)
2117 skc
->skc_ctor(obj
, skc
->skc_private
, flags
);
2119 } while ((obj
== NULL
) && !(flags
& KM_NOSLEEP
));
2121 atomic_dec(&skc
->skc_ref
);
2125 local_irq_disable();
2128 /* Safe to update per-cpu structure without lock, but
2129 * in the restart case we must be careful to reacquire
2130 * the local magazine since this may have changed
2131 * when we need to grow the cache. */
2132 skm
= skc
->skc_mag
[smp_processor_id()];
2133 ASSERTF(skm
->skm_magic
== SKM_MAGIC
, "%x != %x: %s/%p/%p %x/%x/%x\n",
2134 skm
->skm_magic
, SKM_MAGIC
, skc
->skc_name
, skc
, skm
,
2135 skm
->skm_size
, skm
->skm_refill
, skm
->skm_avail
);
2137 if (likely(skm
->skm_avail
)) {
2138 /* Object available in CPU cache, use it */
2139 obj
= skm
->skm_objs
[--skm
->skm_avail
];
2140 skm
->skm_age
= jiffies
;
2142 obj
= spl_cache_refill(skc
, skm
, flags
);
2144 SGOTO(restart
, obj
= NULL
);
2149 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
2151 /* Pre-emptively migrate object to CPU L1 cache */
2153 atomic_dec(&skc
->skc_ref
);
2157 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
2160 * Free an object back to the local per-cpu magazine, there is no
2161 * guarantee that this is the same magazine the object was originally
2162 * allocated from. We may need to flush entire from the magazine
2163 * back to the slabs to make space.
2166 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
2168 spl_kmem_magazine_t
*skm
;
2169 unsigned long flags
;
2172 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2173 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
2174 atomic_inc(&skc
->skc_ref
);
2177 * Free the object from the Linux underlying Linux slab.
2179 if (skc
->skc_flags
& KMC_SLAB
) {
2181 skc
->skc_dtor(obj
, skc
->skc_private
);
2183 kmem_cache_free(skc
->skc_linux_cache
, obj
);
2188 * Only virtual slabs may have emergency objects and these objects
2189 * are guaranteed to have physical addresses. They must be removed
2190 * from the tree of emergency objects and the freed.
2192 if ((skc
->skc_flags
& KMC_VMEM
) && !kmem_virt(obj
))
2193 SGOTO(out
, spl_emergency_free(skc
, obj
));
2195 local_irq_save(flags
);
2197 /* Safe to update per-cpu structure without lock, but
2198 * no remote memory allocation tracking is being performed
2199 * it is entirely possible to allocate an object from one
2200 * CPU cache and return it to another. */
2201 skm
= skc
->skc_mag
[smp_processor_id()];
2202 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
2204 /* Per-CPU cache full, flush it to make space */
2205 if (unlikely(skm
->skm_avail
>= skm
->skm_size
))
2206 spl_cache_flush(skc
, skm
, skm
->skm_refill
);
2208 /* Available space in cache, use it */
2209 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
2211 local_irq_restore(flags
);
2213 atomic_dec(&skc
->skc_ref
);
2217 EXPORT_SYMBOL(spl_kmem_cache_free
);
2220 * The generic shrinker function for all caches. Under Linux a shrinker
2221 * may not be tightly coupled with a slab cache. In fact Linux always
2222 * systematically tries calling all registered shrinker callbacks which
2223 * report that they contain unused objects. Because of this we only
2224 * register one shrinker function in the shim layer for all slab caches.
2225 * We always attempt to shrink all caches when this generic shrinker
2226 * is called. The shrinker should return the number of free objects
2227 * in the cache when called with nr_to_scan == 0 but not attempt to
2228 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
2229 * objects should be freed, which differs from Solaris semantics.
2230 * Solaris semantics are to free all available objects which may (and
2231 * probably will) be more objects than the requested nr_to_scan.
2234 __spl_kmem_cache_generic_shrinker(struct shrinker
*shrink
,
2235 struct shrink_control
*sc
)
2237 spl_kmem_cache_t
*skc
;
2240 down_read(&spl_kmem_cache_sem
);
2241 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
2243 spl_kmem_cache_reap_now(skc
,
2244 MAX(sc
->nr_to_scan
>> fls64(skc
->skc_slab_objs
), 1));
2247 * Presume everything alloc'ed is reclaimable, this ensures
2248 * we are called again with nr_to_scan > 0 so can try and
2249 * reclaim. The exact number is not important either so
2250 * we forgo taking this already highly contented lock.
2252 alloc
+= skc
->skc_obj_alloc
;
2254 up_read(&spl_kmem_cache_sem
);
2257 * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
2258 * This functionality only exists to work around a rare issue where
2259 * shrink_slabs() is repeatedly invoked by many cores causing the
2262 if ((spl_kmem_cache_reclaim
& KMC_RECLAIM_ONCE
) && sc
->nr_to_scan
)
2265 return (MAX(alloc
, 0));
2268 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker
);
2271 * Call the registered reclaim function for a cache. Depending on how
2272 * many and which objects are released it may simply repopulate the
2273 * local magazine which will then need to age-out. Objects which cannot
2274 * fit in the magazine we will be released back to their slabs which will
2275 * also need to age out before being release. This is all just best
2276 * effort and we do not want to thrash creating and destroying slabs.
2279 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
)
2283 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2284 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
2286 atomic_inc(&skc
->skc_ref
);
2289 * Execute the registered reclaim callback if it exists. The
2290 * per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
2292 if (skc
->skc_flags
& KMC_SLAB
) {
2293 if (skc
->skc_reclaim
)
2294 skc
->skc_reclaim(skc
->skc_private
);
2296 if (spl_kmem_cache_expire
& KMC_EXPIRE_MEM
)
2297 kmem_cache_shrink(skc
->skc_linux_cache
);
2303 * Prevent concurrent cache reaping when contended.
2305 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
))
2309 * When a reclaim function is available it may be invoked repeatedly
2310 * until at least a single slab can be freed. This ensures that we
2311 * do free memory back to the system. This helps minimize the chance
2312 * of an OOM event when the bulk of memory is used by the slab.
2314 * When free slabs are already available the reclaim callback will be
2315 * skipped. Additionally, if no forward progress is detected despite
2316 * a reclaim function the cache will be skipped to avoid deadlock.
2318 * Longer term this would be the correct place to add the code which
2319 * repacks the slabs in order minimize fragmentation.
2321 if (skc
->skc_reclaim
) {
2322 uint64_t objects
= UINT64_MAX
;
2326 spin_lock(&skc
->skc_lock
);
2328 (skc
->skc_slab_total
> 0) &&
2329 ((skc
->skc_slab_total
- skc
->skc_slab_alloc
) == 0) &&
2330 (skc
->skc_obj_alloc
< objects
);
2332 objects
= skc
->skc_obj_alloc
;
2333 spin_unlock(&skc
->skc_lock
);
2336 skc
->skc_reclaim(skc
->skc_private
);
2338 } while (do_reclaim
);
2341 /* Reclaim from the magazine then the slabs ignoring age and delay. */
2342 if (spl_kmem_cache_expire
& KMC_EXPIRE_MEM
) {
2343 spl_kmem_magazine_t
*skm
;
2344 unsigned long irq_flags
;
2346 local_irq_save(irq_flags
);
2347 skm
= skc
->skc_mag
[smp_processor_id()];
2348 spl_cache_flush(skc
, skm
, skm
->skm_avail
);
2349 local_irq_restore(irq_flags
);
2352 spl_slab_reclaim(skc
, count
, 1);
2353 clear_bit(KMC_BIT_REAPING
, &skc
->skc_flags
);
2355 wake_up_bit(&skc
->skc_flags
, KMC_BIT_REAPING
);
2357 atomic_dec(&skc
->skc_ref
);
2361 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
2364 * Reap all free slabs from all registered caches.
2369 struct shrink_control sc
;
2371 sc
.nr_to_scan
= KMC_REAP_CHUNK
;
2372 sc
.gfp_mask
= GFP_KERNEL
;
2374 __spl_kmem_cache_generic_shrinker(NULL
, &sc
);
2376 EXPORT_SYMBOL(spl_kmem_reap
);
2378 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
2380 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
2382 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
2385 ASSERT(str
!= NULL
&& len
>= 17);
2386 memset(str
, 0, len
);
2388 /* Check for a fully printable string, and while we are at
2389 * it place the printable characters in the passed buffer. */
2390 for (i
= 0; i
< size
; i
++) {
2391 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
2392 if (isprint(str
[i
])) {
2395 /* Minimum number of printable characters found
2396 * to make it worthwhile to print this as ascii. */
2406 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
2407 *((uint8_t *)kd
->kd_addr
),
2408 *((uint8_t *)kd
->kd_addr
+ 2),
2409 *((uint8_t *)kd
->kd_addr
+ 4),
2410 *((uint8_t *)kd
->kd_addr
+ 6),
2411 *((uint8_t *)kd
->kd_addr
+ 8),
2412 *((uint8_t *)kd
->kd_addr
+ 10),
2413 *((uint8_t *)kd
->kd_addr
+ 12),
2414 *((uint8_t *)kd
->kd_addr
+ 14));
2421 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
2426 spin_lock_init(lock
);
2427 INIT_LIST_HEAD(list
);
2429 for (i
= 0; i
< size
; i
++)
2430 INIT_HLIST_HEAD(&kmem_table
[i
]);
2436 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
2438 unsigned long flags
;
2443 spin_lock_irqsave(lock
, flags
);
2444 if (!list_empty(list
))
2445 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
2446 "size", "data", "func", "line");
2448 list_for_each_entry(kd
, list
, kd_list
)
2449 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
2450 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
2451 kd
->kd_func
, kd
->kd_line
);
2453 spin_unlock_irqrestore(lock
, flags
);
2456 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2457 #define spl_kmem_init_tracking(list, lock, size)
2458 #define spl_kmem_fini_tracking(list, lock)
2459 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2462 spl_kmem_init_globals(void)
2466 /* For now all zones are includes, it may be wise to restrict
2467 * this to normal and highmem zones if we see problems. */
2468 for_each_zone(zone
) {
2470 if (!populated_zone(zone
))
2473 minfree
+= min_wmark_pages(zone
);
2474 desfree
+= low_wmark_pages(zone
);
2475 lotsfree
+= high_wmark_pages(zone
);
2478 /* Solaris default values */
2479 swapfs_minfree
= MAX(2*1024*1024 >> PAGE_SHIFT
, physmem
>> 3);
2480 swapfs_reserve
= MIN(4*1024*1024 >> PAGE_SHIFT
, physmem
>> 4);
2484 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
2487 spl_kmem_init_kallsyms_lookup(void)
2489 #ifdef HAVE_PGDAT_HELPERS
2490 # ifndef HAVE_FIRST_ONLINE_PGDAT
2491 first_online_pgdat_fn
= (first_online_pgdat_t
)
2492 spl_kallsyms_lookup_name("first_online_pgdat");
2493 if (!first_online_pgdat_fn
) {
2494 printk(KERN_ERR
"Error: Unknown symbol first_online_pgdat\n");
2497 # endif /* HAVE_FIRST_ONLINE_PGDAT */
2499 # ifndef HAVE_NEXT_ONLINE_PGDAT
2500 next_online_pgdat_fn
= (next_online_pgdat_t
)
2501 spl_kallsyms_lookup_name("next_online_pgdat");
2502 if (!next_online_pgdat_fn
) {
2503 printk(KERN_ERR
"Error: Unknown symbol next_online_pgdat\n");
2506 # endif /* HAVE_NEXT_ONLINE_PGDAT */
2508 # ifndef HAVE_NEXT_ZONE
2509 next_zone_fn
= (next_zone_t
)
2510 spl_kallsyms_lookup_name("next_zone");
2511 if (!next_zone_fn
) {
2512 printk(KERN_ERR
"Error: Unknown symbol next_zone\n");
2515 # endif /* HAVE_NEXT_ZONE */
2517 #else /* HAVE_PGDAT_HELPERS */
2519 # ifndef HAVE_PGDAT_LIST
2520 pgdat_list_addr
= *(struct pglist_data
**)
2521 spl_kallsyms_lookup_name("pgdat_list");
2522 if (!pgdat_list_addr
) {
2523 printk(KERN_ERR
"Error: Unknown symbol pgdat_list\n");
2526 # endif /* HAVE_PGDAT_LIST */
2527 #endif /* HAVE_PGDAT_HELPERS */
2529 #if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
2530 get_zone_counts_fn
= (get_zone_counts_t
)
2531 spl_kallsyms_lookup_name("get_zone_counts");
2532 if (!get_zone_counts_fn
) {
2533 printk(KERN_ERR
"Error: Unknown symbol get_zone_counts\n");
2536 #endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
2539 * It is now safe to initialize the global tunings which rely on
2540 * the use of the for_each_zone() macro. This macro in turns
2541 * depends on the *_pgdat symbols which are now available.
2543 spl_kmem_init_globals();
2545 #ifndef HAVE_SHRINK_DCACHE_MEMORY
2546 /* When shrink_dcache_memory_fn == NULL support is disabled */
2547 shrink_dcache_memory_fn
= (shrink_dcache_memory_t
)
2548 spl_kallsyms_lookup_name("shrink_dcache_memory");
2549 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
2551 #ifndef HAVE_SHRINK_ICACHE_MEMORY
2552 /* When shrink_icache_memory_fn == NULL support is disabled */
2553 shrink_icache_memory_fn
= (shrink_icache_memory_t
)
2554 spl_kallsyms_lookup_name("shrink_icache_memory");
2555 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
2567 kmem_alloc_used_set(0);
2568 vmem_alloc_used_set(0);
2570 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
2571 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
2574 init_rwsem(&spl_kmem_cache_sem
);
2575 INIT_LIST_HEAD(&spl_kmem_cache_list
);
2576 spl_kmem_cache_taskq
= taskq_create("spl_kmem_cache",
2577 1, maxclsyspri
, 1, 32, TASKQ_PREPOPULATE
);
2579 spl_register_shrinker(&spl_kmem_cache_shrinker
);
2589 spl_unregister_shrinker(&spl_kmem_cache_shrinker
);
2590 taskq_destroy(spl_kmem_cache_taskq
);
2593 /* Display all unreclaimed memory addresses, including the
2594 * allocation size and the first few bytes of what's located
2595 * at that address to aid in debugging. Performance is not
2596 * a serious concern here since it is module unload time. */
2597 if (kmem_alloc_used_read() != 0)
2598 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
2599 "kmem leaked %ld/%ld bytes\n",
2600 kmem_alloc_used_read(), kmem_alloc_max
);
2603 if (vmem_alloc_used_read() != 0)
2604 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
2605 "vmem leaked %ld/%ld bytes\n",
2606 vmem_alloc_used_read(), vmem_alloc_max
);
2608 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
2609 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
2610 #endif /* DEBUG_KMEM */