1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://github.com/behlendorf/spl/>.
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25 \*****************************************************************************/
28 #include <spl-debug.h>
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
34 #define SS_DEBUG_SUBSYS SS_KMEM
37 * The minimum amount of memory measured in pages to be free at all
38 * times on the system. This is similar to Linux's zone->pages_min
39 * multiplied by the number of zones and is sized based on that.
42 EXPORT_SYMBOL(minfree
);
45 * The desired amount of memory measured in pages to be free at all
46 * times on the system. This is similar to Linux's zone->pages_low
47 * multiplied by the number of zones and is sized based on that.
48 * Assuming all zones are being used roughly equally, when we drop
49 * below this threshold asynchronous page reclamation is triggered.
52 EXPORT_SYMBOL(desfree
);
55 * When above this amount of memory measures in pages the system is
56 * determined to have enough free memory. This is similar to Linux's
57 * zone->pages_high multiplied by the number of zones and is sized based
58 * on that. Assuming all zones are being used roughly equally, when
59 * asynchronous page reclamation reaches this threshold it stops.
62 EXPORT_SYMBOL(lotsfree
);
64 /* Unused always 0 in this implementation */
66 EXPORT_SYMBOL(needfree
);
68 pgcnt_t swapfs_minfree
= 0;
69 EXPORT_SYMBOL(swapfs_minfree
);
71 pgcnt_t swapfs_reserve
= 0;
72 EXPORT_SYMBOL(swapfs_reserve
);
74 vmem_t
*heap_arena
= NULL
;
75 EXPORT_SYMBOL(heap_arena
);
77 vmem_t
*zio_alloc_arena
= NULL
;
78 EXPORT_SYMBOL(zio_alloc_arena
);
80 vmem_t
*zio_arena
= NULL
;
81 EXPORT_SYMBOL(zio_arena
);
83 #ifndef HAVE_GET_VMALLOC_INFO
84 get_vmalloc_info_t get_vmalloc_info_fn
= SYMBOL_POISON
;
85 EXPORT_SYMBOL(get_vmalloc_info_fn
);
86 #endif /* HAVE_GET_VMALLOC_INFO */
88 #ifdef HAVE_PGDAT_HELPERS
89 # ifndef HAVE_FIRST_ONLINE_PGDAT
90 first_online_pgdat_t first_online_pgdat_fn
= SYMBOL_POISON
;
91 EXPORT_SYMBOL(first_online_pgdat_fn
);
92 # endif /* HAVE_FIRST_ONLINE_PGDAT */
94 # ifndef HAVE_NEXT_ONLINE_PGDAT
95 next_online_pgdat_t next_online_pgdat_fn
= SYMBOL_POISON
;
96 EXPORT_SYMBOL(next_online_pgdat_fn
);
97 # endif /* HAVE_NEXT_ONLINE_PGDAT */
99 # ifndef HAVE_NEXT_ZONE
100 next_zone_t next_zone_fn
= SYMBOL_POISON
;
101 EXPORT_SYMBOL(next_zone_fn
);
102 # endif /* HAVE_NEXT_ZONE */
104 #else /* HAVE_PGDAT_HELPERS */
106 # ifndef HAVE_PGDAT_LIST
107 struct pglist_data
*pgdat_list_addr
= SYMBOL_POISON
;
108 EXPORT_SYMBOL(pgdat_list_addr
);
109 # endif /* HAVE_PGDAT_LIST */
111 #endif /* HAVE_PGDAT_HELPERS */
113 #ifdef NEED_GET_ZONE_COUNTS
114 # ifndef HAVE_GET_ZONE_COUNTS
115 get_zone_counts_t get_zone_counts_fn
= SYMBOL_POISON
;
116 EXPORT_SYMBOL(get_zone_counts_fn
);
117 # endif /* HAVE_GET_ZONE_COUNTS */
120 spl_global_page_state(spl_zone_stat_item_t item
)
122 unsigned long active
;
123 unsigned long inactive
;
126 get_zone_counts(&active
, &inactive
, &free
);
128 case SPL_NR_FREE_PAGES
: return free
;
129 case SPL_NR_INACTIVE
: return inactive
;
130 case SPL_NR_ACTIVE
: return active
;
131 default: ASSERT(0); /* Unsupported */
137 # ifdef HAVE_GLOBAL_PAGE_STATE
139 spl_global_page_state(spl_zone_stat_item_t item
)
141 unsigned long pages
= 0;
144 case SPL_NR_FREE_PAGES
:
145 # ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
146 pages
+= global_page_state(NR_FREE_PAGES
);
149 case SPL_NR_INACTIVE
:
150 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
151 pages
+= global_page_state(NR_INACTIVE
);
153 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
154 pages
+= global_page_state(NR_INACTIVE_ANON
);
156 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
157 pages
+= global_page_state(NR_INACTIVE_FILE
);
161 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
162 pages
+= global_page_state(NR_ACTIVE
);
164 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
165 pages
+= global_page_state(NR_ACTIVE_ANON
);
167 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
168 pages
+= global_page_state(NR_ACTIVE_FILE
);
172 ASSERT(0); /* Unsupported */
178 # error "Both global_page_state() and get_zone_counts() unavailable"
179 # endif /* HAVE_GLOBAL_PAGE_STATE */
180 #endif /* NEED_GET_ZONE_COUNTS */
181 EXPORT_SYMBOL(spl_global_page_state
);
183 #if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
184 invalidate_inodes_t invalidate_inodes_fn
= SYMBOL_POISON
;
185 EXPORT_SYMBOL(invalidate_inodes_fn
);
186 #endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
188 #ifndef HAVE_SHRINK_DCACHE_MEMORY
189 shrink_dcache_memory_t shrink_dcache_memory_fn
= SYMBOL_POISON
;
190 EXPORT_SYMBOL(shrink_dcache_memory_fn
);
191 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
193 #ifndef HAVE_SHRINK_ICACHE_MEMORY
194 shrink_icache_memory_t shrink_icache_memory_fn
= SYMBOL_POISON
;
195 EXPORT_SYMBOL(shrink_icache_memory_fn
);
196 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
199 spl_kmem_availrmem(void)
201 /* The amount of easily available memory */
202 return (spl_global_page_state(SPL_NR_FREE_PAGES
) +
203 spl_global_page_state(SPL_NR_INACTIVE
));
205 EXPORT_SYMBOL(spl_kmem_availrmem
);
208 vmem_size(vmem_t
*vmp
, int typemask
)
210 struct vmalloc_info vmi
;
214 ASSERT(typemask
& (VMEM_ALLOC
| VMEM_FREE
));
216 get_vmalloc_info(&vmi
);
217 if (typemask
& VMEM_ALLOC
)
218 size
+= (size_t)vmi
.used
;
220 if (typemask
& VMEM_FREE
)
221 size
+= (size_t)(VMALLOC_TOTAL
- vmi
.used
);
225 EXPORT_SYMBOL(vmem_size
);
232 EXPORT_SYMBOL(kmem_debugging
);
234 #ifndef HAVE_KVASPRINTF
235 /* Simplified asprintf. */
236 char *kvasprintf(gfp_t gfp
, const char *fmt
, va_list ap
)
243 len
= vsnprintf(NULL
, 0, fmt
, aq
);
246 p
= kmalloc(len
+1, gfp
);
250 vsnprintf(p
, len
+1, fmt
, ap
);
254 EXPORT_SYMBOL(kvasprintf
);
255 #endif /* HAVE_KVASPRINTF */
258 kmem_vasprintf(const char *fmt
, va_list ap
)
265 ptr
= kvasprintf(GFP_KERNEL
, fmt
, aq
);
267 } while (ptr
== NULL
);
271 EXPORT_SYMBOL(kmem_vasprintf
);
274 kmem_asprintf(const char *fmt
, ...)
281 ptr
= kvasprintf(GFP_KERNEL
, fmt
, ap
);
283 } while (ptr
== NULL
);
287 EXPORT_SYMBOL(kmem_asprintf
);
290 __strdup(const char *str
, int flags
)
296 ptr
= kmalloc_nofail(n
+ 1, flags
);
298 memcpy(ptr
, str
, n
+ 1);
304 strdup(const char *str
)
306 return __strdup(str
, KM_SLEEP
);
308 EXPORT_SYMBOL(strdup
);
315 EXPORT_SYMBOL(strfree
);
318 * Memory allocation interfaces and debugging for basic kmem_*
319 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
320 * the SPL will keep track of the total memory allocated, and
321 * report any memory leaked when the module is unloaded.
325 /* Shim layer memory accounting */
326 # ifdef HAVE_ATOMIC64_T
327 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
328 unsigned long long kmem_alloc_max
= 0;
329 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
330 unsigned long long vmem_alloc_max
= 0;
331 # else /* HAVE_ATOMIC64_T */
332 atomic_t kmem_alloc_used
= ATOMIC_INIT(0);
333 unsigned long long kmem_alloc_max
= 0;
334 atomic_t vmem_alloc_used
= ATOMIC_INIT(0);
335 unsigned long long vmem_alloc_max
= 0;
336 # endif /* HAVE_ATOMIC64_T */
338 EXPORT_SYMBOL(kmem_alloc_used
);
339 EXPORT_SYMBOL(kmem_alloc_max
);
340 EXPORT_SYMBOL(vmem_alloc_used
);
341 EXPORT_SYMBOL(vmem_alloc_max
);
343 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
344 * but also the location of every alloc and free. When the SPL module is
345 * unloaded a list of all leaked addresses and where they were allocated
346 * will be dumped to the console. Enabling this feature has a significant
347 * impact on performance but it makes finding memory leaks straight forward.
349 * Not surprisingly with debugging enabled the xmem_locks are very highly
350 * contended particularly on xfree(). If we want to run with this detailed
351 * debugging enabled for anything other than debugging we need to minimize
352 * the contention by moving to a lock per xmem_table entry model.
354 # ifdef DEBUG_KMEM_TRACKING
356 # define KMEM_HASH_BITS 10
357 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
359 # define VMEM_HASH_BITS 10
360 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
362 typedef struct kmem_debug
{
363 struct hlist_node kd_hlist
; /* Hash node linkage */
364 struct list_head kd_list
; /* List of all allocations */
365 void *kd_addr
; /* Allocation pointer */
366 size_t kd_size
; /* Allocation size */
367 const char *kd_func
; /* Allocation function */
368 int kd_line
; /* Allocation line */
371 spinlock_t kmem_lock
;
372 struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
373 struct list_head kmem_list
;
375 spinlock_t vmem_lock
;
376 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
377 struct list_head vmem_list
;
379 EXPORT_SYMBOL(kmem_lock
);
380 EXPORT_SYMBOL(kmem_table
);
381 EXPORT_SYMBOL(kmem_list
);
383 EXPORT_SYMBOL(vmem_lock
);
384 EXPORT_SYMBOL(vmem_table
);
385 EXPORT_SYMBOL(vmem_list
);
387 static kmem_debug_t
*
388 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
, int bits
, const void *addr
)
390 struct hlist_head
*head
;
391 struct hlist_node
*node
;
392 struct kmem_debug
*p
;
396 spin_lock_irqsave(lock
, flags
);
398 head
= &table
[hash_ptr(addr
, bits
)];
399 hlist_for_each_entry_rcu(p
, node
, head
, kd_hlist
) {
400 if (p
->kd_addr
== addr
) {
401 hlist_del_init(&p
->kd_hlist
);
402 list_del_init(&p
->kd_list
);
403 spin_unlock_irqrestore(lock
, flags
);
408 spin_unlock_irqrestore(lock
, flags
);
414 kmem_alloc_track(size_t size
, int flags
, const char *func
, int line
,
415 int node_alloc
, int node
)
419 unsigned long irq_flags
;
422 /* Function may be called with KM_NOSLEEP so failure is possible */
423 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
424 flags
& ~__GFP_ZERO
);
426 if (unlikely(dptr
== NULL
)) {
427 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "debug "
428 "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
429 sizeof(kmem_debug_t
), flags
, func
, line
,
430 kmem_alloc_used_read(), kmem_alloc_max
);
433 * Marked unlikely because we should never be doing this,
434 * we tolerate to up 2 pages but a single page is best.
436 if (unlikely((size
> PAGE_SIZE
*2) && !(flags
& KM_NODEBUG
))) {
437 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "large "
438 "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
439 (unsigned long long) size
, flags
, func
, line
,
440 kmem_alloc_used_read(), kmem_alloc_max
);
441 spl_debug_dumpstack(NULL
);
445 * We use __strdup() below because the string pointed to by
446 * __FUNCTION__ might not be available by the time we want
447 * to print it since the module might have been unloaded.
448 * This can only fail in the KM_NOSLEEP case.
450 dptr
->kd_func
= __strdup(func
, flags
& ~__GFP_ZERO
);
451 if (unlikely(dptr
->kd_func
== NULL
)) {
453 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
454 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
455 func
, line
, kmem_alloc_used_read(), kmem_alloc_max
);
459 /* Use the correct allocator */
461 ASSERT(!(flags
& __GFP_ZERO
));
462 ptr
= kmalloc_node_nofail(size
, flags
, node
);
463 } else if (flags
& __GFP_ZERO
) {
464 ptr
= kzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
466 ptr
= kmalloc_nofail(size
, flags
);
469 if (unlikely(ptr
== NULL
)) {
470 kfree(dptr
->kd_func
);
472 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "kmem_alloc"
473 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
474 (unsigned long long) size
, flags
, func
, line
,
475 kmem_alloc_used_read(), kmem_alloc_max
);
479 kmem_alloc_used_add(size
);
480 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
481 kmem_alloc_max
= kmem_alloc_used_read();
483 INIT_HLIST_NODE(&dptr
->kd_hlist
);
484 INIT_LIST_HEAD(&dptr
->kd_list
);
487 dptr
->kd_size
= size
;
488 dptr
->kd_line
= line
;
490 spin_lock_irqsave(&kmem_lock
, irq_flags
);
491 hlist_add_head_rcu(&dptr
->kd_hlist
,
492 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
493 list_add_tail(&dptr
->kd_list
, &kmem_list
);
494 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
496 SDEBUG_LIMIT(SD_INFO
,
497 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
498 (unsigned long long) size
, flags
, func
, line
, ptr
,
499 kmem_alloc_used_read(), kmem_alloc_max
);
504 EXPORT_SYMBOL(kmem_alloc_track
);
507 kmem_free_track(const void *ptr
, size_t size
)
512 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
513 (unsigned long long) size
);
515 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
517 /* Must exist in hash due to kmem_alloc() */
520 /* Size must match */
521 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
522 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
523 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
525 kmem_alloc_used_sub(size
);
526 SDEBUG_LIMIT(SD_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
527 (unsigned long long) size
, kmem_alloc_used_read(),
530 kfree(dptr
->kd_func
);
532 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
535 memset(ptr
, 0x5a, size
);
540 EXPORT_SYMBOL(kmem_free_track
);
543 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
547 unsigned long irq_flags
;
550 ASSERT(flags
& KM_SLEEP
);
552 /* Function may be called with KM_NOSLEEP so failure is possible */
553 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
554 flags
& ~__GFP_ZERO
);
555 if (unlikely(dptr
== NULL
)) {
556 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "debug "
557 "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
558 sizeof(kmem_debug_t
), flags
, func
, line
,
559 vmem_alloc_used_read(), vmem_alloc_max
);
562 * We use __strdup() below because the string pointed to by
563 * __FUNCTION__ might not be available by the time we want
564 * to print it, since the module might have been unloaded.
565 * This can never fail because we have already asserted
566 * that flags is KM_SLEEP.
568 dptr
->kd_func
= __strdup(func
, flags
& ~__GFP_ZERO
);
569 if (unlikely(dptr
->kd_func
== NULL
)) {
571 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
572 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
573 func
, line
, vmem_alloc_used_read(), vmem_alloc_max
);
577 /* Use the correct allocator */
578 if (flags
& __GFP_ZERO
) {
579 ptr
= vzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
581 ptr
= vmalloc_nofail(size
, flags
);
584 if (unlikely(ptr
== NULL
)) {
585 kfree(dptr
->kd_func
);
587 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
, "vmem_alloc"
588 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
589 (unsigned long long) size
, flags
, func
, line
,
590 vmem_alloc_used_read(), vmem_alloc_max
);
594 vmem_alloc_used_add(size
);
595 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
596 vmem_alloc_max
= vmem_alloc_used_read();
598 INIT_HLIST_NODE(&dptr
->kd_hlist
);
599 INIT_LIST_HEAD(&dptr
->kd_list
);
602 dptr
->kd_size
= size
;
603 dptr
->kd_line
= line
;
605 spin_lock_irqsave(&vmem_lock
, irq_flags
);
606 hlist_add_head_rcu(&dptr
->kd_hlist
,
607 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
608 list_add_tail(&dptr
->kd_list
, &vmem_list
);
609 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
611 SDEBUG_LIMIT(SD_INFO
,
612 "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
613 (unsigned long long) size
, flags
, func
, line
,
614 ptr
, vmem_alloc_used_read(), vmem_alloc_max
);
619 EXPORT_SYMBOL(vmem_alloc_track
);
622 vmem_free_track(const void *ptr
, size_t size
)
627 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
628 (unsigned long long) size
);
630 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
632 /* Must exist in hash due to vmem_alloc() */
635 /* Size must match */
636 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
637 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
638 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
640 vmem_alloc_used_sub(size
);
641 SDEBUG_LIMIT(SD_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
642 (unsigned long long) size
, vmem_alloc_used_read(),
645 kfree(dptr
->kd_func
);
647 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
650 memset(ptr
, 0x5a, size
);
655 EXPORT_SYMBOL(vmem_free_track
);
657 # else /* DEBUG_KMEM_TRACKING */
660 kmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
,
661 int node_alloc
, int node
)
667 * Marked unlikely because we should never be doing this,
668 * we tolerate to up 2 pages but a single page is best.
670 if (unlikely((size
> PAGE_SIZE
* 2) && !(flags
& KM_NODEBUG
))) {
671 SDEBUG(SD_CONSOLE
| SD_WARNING
,
672 "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
673 (unsigned long long) size
, flags
, func
, line
,
674 kmem_alloc_used_read(), kmem_alloc_max
);
678 /* Use the correct allocator */
680 ASSERT(!(flags
& __GFP_ZERO
));
681 ptr
= kmalloc_node_nofail(size
, flags
, node
);
682 } else if (flags
& __GFP_ZERO
) {
683 ptr
= kzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
685 ptr
= kmalloc_nofail(size
, flags
);
688 if (unlikely(ptr
== NULL
)) {
689 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
690 "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
691 (unsigned long long) size
, flags
, func
, line
,
692 kmem_alloc_used_read(), kmem_alloc_max
);
694 kmem_alloc_used_add(size
);
695 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
696 kmem_alloc_max
= kmem_alloc_used_read();
698 SDEBUG_LIMIT(SD_INFO
,
699 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
700 (unsigned long long) size
, flags
, func
, line
, ptr
,
701 kmem_alloc_used_read(), kmem_alloc_max
);
706 EXPORT_SYMBOL(kmem_alloc_debug
);
709 kmem_free_debug(const void *ptr
, size_t size
)
713 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
714 (unsigned long long) size
);
716 kmem_alloc_used_sub(size
);
717 SDEBUG_LIMIT(SD_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
718 (unsigned long long) size
, kmem_alloc_used_read(),
724 EXPORT_SYMBOL(kmem_free_debug
);
727 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
732 ASSERT(flags
& KM_SLEEP
);
734 /* Use the correct allocator */
735 if (flags
& __GFP_ZERO
) {
736 ptr
= vzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
738 ptr
= vmalloc_nofail(size
, flags
);
741 if (unlikely(ptr
== NULL
)) {
742 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
743 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
744 (unsigned long long) size
, flags
, func
, line
,
745 vmem_alloc_used_read(), vmem_alloc_max
);
747 vmem_alloc_used_add(size
);
748 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
749 vmem_alloc_max
= vmem_alloc_used_read();
751 SDEBUG_LIMIT(SD_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
752 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
753 vmem_alloc_used_read(), vmem_alloc_max
);
758 EXPORT_SYMBOL(vmem_alloc_debug
);
761 vmem_free_debug(const void *ptr
, size_t size
)
765 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
766 (unsigned long long) size
);
768 vmem_alloc_used_sub(size
);
769 SDEBUG_LIMIT(SD_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
770 (unsigned long long) size
, vmem_alloc_used_read(),
776 EXPORT_SYMBOL(vmem_free_debug
);
778 # endif /* DEBUG_KMEM_TRACKING */
779 #endif /* DEBUG_KMEM */
782 * Slab allocation interfaces
784 * While the Linux slab implementation was inspired by the Solaris
785 * implementation I cannot use it to emulate the Solaris APIs. I
786 * require two features which are not provided by the Linux slab.
788 * 1) Constructors AND destructors. Recent versions of the Linux
789 * kernel have removed support for destructors. This is a deal
790 * breaker for the SPL which contains particularly expensive
791 * initializers for mutex's, condition variables, etc. We also
792 * require a minimal level of cleanup for these data types unlike
793 * many Linux data type which do need to be explicitly destroyed.
795 * 2) Virtual address space backed slab. Callers of the Solaris slab
796 * expect it to work well for both small are very large allocations.
797 * Because of memory fragmentation the Linux slab which is backed
798 * by kmalloc'ed memory performs very badly when confronted with
799 * large numbers of large allocations. Basing the slab on the
800 * virtual address space removes the need for contiguous pages
801 * and greatly improve performance for large allocations.
803 * For these reasons, the SPL has its own slab implementation with
804 * the needed features. It is not as highly optimized as either the
805 * Solaris or Linux slabs, but it should get me most of what is
806 * needed until it can be optimized or obsoleted by another approach.
808 * One serious concern I do have about this method is the relatively
809 * small virtual address space on 32bit arches. This will seriously
810 * constrain the size of the slab caches and their performance.
812 * XXX: Improve the partial slab list by carefully maintaining a
813 * strict ordering of fullest to emptiest slabs based on
814 * the slab reference count. This guarantees the when freeing
815 * slabs back to the system we need only linearly traverse the
816 * last N slabs in the list to discover all the freeable slabs.
818 * XXX: NUMA awareness for optionally allocating memory close to a
819 * particular core. This can be advantageous if you know the slab
820 * object will be short lived and primarily accessed from one core.
822 * XXX: Slab coloring may also yield performance improvements and would
823 * be desirable to implement.
826 struct list_head spl_kmem_cache_list
; /* List of caches */
827 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
829 static int spl_cache_flush(spl_kmem_cache_t
*skc
,
830 spl_kmem_magazine_t
*skm
, int flush
);
832 SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker
);
833 SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker
,
834 spl_kmem_cache_generic_shrinker
, KMC_DEFAULT_SEEKS
);
837 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
843 if (skc
->skc_flags
& KMC_KMEM
)
844 ptr
= (void *)__get_free_pages(flags
, get_order(size
));
846 ptr
= __vmalloc(size
, flags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
848 /* Resulting allocated memory will be page aligned */
849 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
855 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
857 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
861 * The Linux direct reclaim path uses this out of band value to
862 * determine if forward progress is being made. Normally this is
863 * incremented by kmem_freepages() which is part of the various
864 * Linux slab implementations. However, since we are using none
865 * of that infrastructure we are responsible for incrementing it.
867 if (current
->reclaim_state
)
868 current
->reclaim_state
->reclaimed_slab
+= size
>> PAGE_SHIFT
;
870 if (skc
->skc_flags
& KMC_KMEM
)
871 free_pages((unsigned long)ptr
, get_order(size
));
877 * Required space for each aligned sks.
879 static inline uint32_t
880 spl_sks_size(spl_kmem_cache_t
*skc
)
882 return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t
),
883 skc
->skc_obj_align
, uint32_t);
887 * Required space for each aligned object.
889 static inline uint32_t
890 spl_obj_size(spl_kmem_cache_t
*skc
)
892 uint32_t align
= skc
->skc_obj_align
;
894 return P2ROUNDUP_TYPED(skc
->skc_obj_size
, align
, uint32_t) +
895 P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t
), align
, uint32_t);
899 * Lookup the spl_kmem_object_t for an object given that object.
901 static inline spl_kmem_obj_t
*
902 spl_sko_from_obj(spl_kmem_cache_t
*skc
, void *obj
)
904 return obj
+ P2ROUNDUP_TYPED(skc
->skc_obj_size
,
905 skc
->skc_obj_align
, uint32_t);
909 * Required space for each offslab object taking in to account alignment
910 * restrictions and the power-of-two requirement of kv_alloc().
912 static inline uint32_t
913 spl_offslab_size(spl_kmem_cache_t
*skc
)
915 return 1UL << (highbit(spl_obj_size(skc
)) + 1);
919 * It's important that we pack the spl_kmem_obj_t structure and the
920 * actual objects in to one large address space to minimize the number
921 * of calls to the allocator. It is far better to do a few large
922 * allocations and then subdivide it ourselves. Now which allocator
923 * we use requires balancing a few trade offs.
925 * For small objects we use kmem_alloc() because as long as you are
926 * only requesting a small number of pages (ideally just one) its cheap.
927 * However, when you start requesting multiple pages with kmem_alloc()
928 * it gets increasingly expensive since it requires contiguous pages.
929 * For this reason we shift to vmem_alloc() for slabs of large objects
930 * which removes the need for contiguous pages. We do not use
931 * vmem_alloc() in all cases because there is significant locking
932 * overhead in __get_vm_area_node(). This function takes a single
933 * global lock when acquiring an available virtual address range which
934 * serializes all vmem_alloc()'s for all slab caches. Using slightly
935 * different allocation functions for small and large objects should
936 * give us the best of both worlds.
938 * KMC_ONSLAB KMC_OFFSLAB
940 * +------------------------+ +-----------------+
941 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
942 * | skc_obj_size <-+ | | +-----------------+ | |
943 * | spl_kmem_obj_t | | | |
944 * | skc_obj_size <---+ | +-----------------+ | |
945 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
946 * | ... v | | spl_kmem_obj_t | |
947 * +------------------------+ +-----------------+ v
949 static spl_kmem_slab_t
*
950 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
952 spl_kmem_slab_t
*sks
;
953 spl_kmem_obj_t
*sko
, *n
;
955 uint32_t obj_size
, offslab_size
= 0;
958 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
962 sks
= (spl_kmem_slab_t
*)base
;
963 sks
->sks_magic
= SKS_MAGIC
;
964 sks
->sks_objs
= skc
->skc_slab_objs
;
965 sks
->sks_age
= jiffies
;
966 sks
->sks_cache
= skc
;
967 INIT_LIST_HEAD(&sks
->sks_list
);
968 INIT_LIST_HEAD(&sks
->sks_free_list
);
970 obj_size
= spl_obj_size(skc
);
972 if (skc
->skc_flags
& KMC_OFFSLAB
)
973 offslab_size
= spl_offslab_size(skc
);
975 for (i
= 0; i
< sks
->sks_objs
; i
++) {
976 if (skc
->skc_flags
& KMC_OFFSLAB
) {
977 obj
= kv_alloc(skc
, offslab_size
, flags
);
979 SGOTO(out
, rc
= -ENOMEM
);
981 obj
= base
+ spl_sks_size(skc
) + (i
* obj_size
);
984 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
985 sko
= spl_sko_from_obj(skc
, obj
);
987 sko
->sko_magic
= SKO_MAGIC
;
989 INIT_LIST_HEAD(&sko
->sko_list
);
990 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
993 list_for_each_entry(sko
, &sks
->sks_free_list
, sko_list
)
995 skc
->skc_ctor(sko
->sko_addr
, skc
->skc_private
, flags
);
998 if (skc
->skc_flags
& KMC_OFFSLAB
)
999 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
,
1001 kv_free(skc
, sko
->sko_addr
, offslab_size
);
1003 kv_free(skc
, base
, skc
->skc_slab_size
);
1011 * Remove a slab from complete or partial list, it must be called with
1012 * the 'skc->skc_lock' held but the actual free must be performed
1013 * outside the lock to prevent deadlocking on vmem addresses.
1016 spl_slab_free(spl_kmem_slab_t
*sks
,
1017 struct list_head
*sks_list
, struct list_head
*sko_list
)
1019 spl_kmem_cache_t
*skc
;
1022 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1023 ASSERT(sks
->sks_ref
== 0);
1025 skc
= sks
->sks_cache
;
1026 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1027 ASSERT(spin_is_locked(&skc
->skc_lock
));
1030 * Update slab/objects counters in the cache, then remove the
1031 * slab from the skc->skc_partial_list. Finally add the slab
1032 * and all its objects in to the private work lists where the
1033 * destructors will be called and the memory freed to the system.
1035 skc
->skc_obj_total
-= sks
->sks_objs
;
1036 skc
->skc_slab_total
--;
1037 list_del(&sks
->sks_list
);
1038 list_add(&sks
->sks_list
, sks_list
);
1039 list_splice_init(&sks
->sks_free_list
, sko_list
);
1045 * Traverses all the partial slabs attached to a cache and free those
1046 * which which are currently empty, and have not been touched for
1047 * skc_delay seconds to avoid thrashing. The count argument is
1048 * passed to optionally cap the number of slabs reclaimed, a count
1049 * of zero means try and reclaim everything. When flag is set we
1050 * always free an available slab regardless of age.
1053 spl_slab_reclaim(spl_kmem_cache_t
*skc
, int count
, int flag
)
1055 spl_kmem_slab_t
*sks
, *m
;
1056 spl_kmem_obj_t
*sko
, *n
;
1057 LIST_HEAD(sks_list
);
1058 LIST_HEAD(sko_list
);
1064 * Move empty slabs and objects which have not been touched in
1065 * skc_delay seconds on to private lists to be freed outside
1066 * the spin lock. This delay time is important to avoid thrashing
1067 * however when flag is set the delay will not be used.
1069 spin_lock(&skc
->skc_lock
);
1070 list_for_each_entry_safe_reverse(sks
,m
,&skc
->skc_partial_list
,sks_list
){
1072 * All empty slabs are at the end of skc->skc_partial_list,
1073 * therefore once a non-empty slab is found we can stop
1074 * scanning. Additionally, stop when reaching the target
1075 * reclaim 'count' if a non-zero threshold is given.
1077 if ((sks
->sks_ref
> 0) || (count
&& i
>= count
))
1080 if (time_after(jiffies
,sks
->sks_age
+skc
->skc_delay
*HZ
)||flag
) {
1081 spl_slab_free(sks
, &sks_list
, &sko_list
);
1085 spin_unlock(&skc
->skc_lock
);
1088 * The following two loops ensure all the object destructors are
1089 * run, any offslab objects are freed, and the slabs themselves
1090 * are freed. This is all done outside the skc->skc_lock since
1091 * this allows the destructor to sleep, and allows us to perform
1092 * a conditional reschedule when a freeing a large number of
1093 * objects and slabs back to the system.
1095 if (skc
->skc_flags
& KMC_OFFSLAB
)
1096 size
= spl_offslab_size(skc
);
1098 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
) {
1099 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1102 skc
->skc_dtor(sko
->sko_addr
, skc
->skc_private
);
1104 if (skc
->skc_flags
& KMC_OFFSLAB
)
1105 kv_free(skc
, sko
->sko_addr
, size
);
1110 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
) {
1111 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1112 kv_free(skc
, sks
, skc
->skc_slab_size
);
1119 static spl_kmem_emergency_t
*
1120 spl_emergency_search(struct rb_root
*root
, void *obj
)
1122 struct rb_node
*node
= root
->rb_node
;
1123 spl_kmem_emergency_t
*ske
;
1124 unsigned long address
= (unsigned long)obj
;
1127 ske
= container_of(node
, spl_kmem_emergency_t
, ske_node
);
1129 if (address
< (unsigned long)ske
->ske_obj
)
1130 node
= node
->rb_left
;
1131 else if (address
> (unsigned long)ske
->ske_obj
)
1132 node
= node
->rb_right
;
1141 spl_emergency_insert(struct rb_root
*root
, spl_kmem_emergency_t
*ske
)
1143 struct rb_node
**new = &(root
->rb_node
), *parent
= NULL
;
1144 spl_kmem_emergency_t
*ske_tmp
;
1145 unsigned long address
= (unsigned long)ske
->ske_obj
;
1148 ske_tmp
= container_of(*new, spl_kmem_emergency_t
, ske_node
);
1151 if (address
< (unsigned long)ske_tmp
->ske_obj
)
1152 new = &((*new)->rb_left
);
1153 else if (address
> (unsigned long)ske_tmp
->ske_obj
)
1154 new = &((*new)->rb_right
);
1159 rb_link_node(&ske
->ske_node
, parent
, new);
1160 rb_insert_color(&ske
->ske_node
, root
);
1166 * Allocate a single emergency object and track it in a red black tree.
1169 spl_emergency_alloc(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
1171 spl_kmem_emergency_t
*ske
;
1175 /* Last chance use a partial slab if one now exists */
1176 spin_lock(&skc
->skc_lock
);
1177 empty
= list_empty(&skc
->skc_partial_list
);
1178 spin_unlock(&skc
->skc_lock
);
1182 ske
= kmalloc(sizeof(*ske
), flags
);
1186 ske
->ske_obj
= kmalloc(skc
->skc_obj_size
, flags
);
1187 if (ske
->ske_obj
== NULL
) {
1192 spin_lock(&skc
->skc_lock
);
1193 empty
= spl_emergency_insert(&skc
->skc_emergency_tree
, ske
);
1194 if (likely(empty
)) {
1195 skc
->skc_obj_total
++;
1196 skc
->skc_obj_emergency
++;
1197 if (skc
->skc_obj_emergency
> skc
->skc_obj_emergency_max
)
1198 skc
->skc_obj_emergency_max
= skc
->skc_obj_emergency
;
1200 spin_unlock(&skc
->skc_lock
);
1202 if (unlikely(!empty
)) {
1203 kfree(ske
->ske_obj
);
1209 skc
->skc_ctor(ske
->ske_obj
, skc
->skc_private
, flags
);
1211 *obj
= ske
->ske_obj
;
1217 * Locate the passed object in the red black tree and free it.
1220 spl_emergency_free(spl_kmem_cache_t
*skc
, void *obj
)
1222 spl_kmem_emergency_t
*ske
;
1225 spin_lock(&skc
->skc_lock
);
1226 ske
= spl_emergency_search(&skc
->skc_emergency_tree
, obj
);
1228 rb_erase(&ske
->ske_node
, &skc
->skc_emergency_tree
);
1229 skc
->skc_obj_emergency
--;
1230 skc
->skc_obj_total
--;
1232 spin_unlock(&skc
->skc_lock
);
1234 if (unlikely(ske
== NULL
))
1238 skc
->skc_dtor(ske
->ske_obj
, skc
->skc_private
);
1240 kfree(ske
->ske_obj
);
1247 * Called regularly on all caches to age objects out of the magazines
1248 * which have not been access in skc->skc_delay seconds. This prevents
1249 * idle magazines from holding memory which might be better used by
1250 * other caches or parts of the system. The delay is present to
1251 * prevent thrashing the magazine.
1254 spl_magazine_age(void *data
)
1256 spl_kmem_magazine_t
*skm
=
1257 spl_get_work_data(data
, spl_kmem_magazine_t
, skm_work
.work
);
1258 spl_kmem_cache_t
*skc
= skm
->skm_cache
;
1260 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1261 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1262 ASSERT(skc
->skc_mag
[skm
->skm_cpu
] == skm
);
1264 if (skm
->skm_avail
> 0 &&
1265 time_after(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
1266 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1268 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
1269 schedule_delayed_work_on(skm
->skm_cpu
, &skm
->skm_work
,
1270 skc
->skc_delay
/ 3 * HZ
);
1274 * Called regularly to keep a downward pressure on the size of idle
1275 * magazines and to release free slabs from the cache. This function
1276 * never calls the registered reclaim function, that only occurs
1277 * under memory pressure or with a direct call to spl_kmem_reap().
1280 spl_cache_age(void *data
)
1282 spl_kmem_cache_t
*skc
=
1283 spl_get_work_data(data
, spl_kmem_cache_t
, skc_work
.work
);
1285 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1286 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
1288 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
1289 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
1293 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
1294 * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
1295 * for very small objects we may end up with more than this so as not
1296 * to waste space in the minimal allocation of a single page. Also for
1297 * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
1298 * lower than this and we will fail.
1301 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
1303 uint32_t sks_size
, obj_size
, max_size
;
1305 if (skc
->skc_flags
& KMC_OFFSLAB
) {
1306 *objs
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
1307 *size
= sizeof(spl_kmem_slab_t
);
1309 sks_size
= spl_sks_size(skc
);
1310 obj_size
= spl_obj_size(skc
);
1312 if (skc
->skc_flags
& KMC_KMEM
)
1313 max_size
= ((uint32_t)1 << (MAX_ORDER
-3)) * PAGE_SIZE
;
1315 max_size
= (32 * 1024 * 1024);
1317 /* Power of two sized slab */
1318 for (*size
= PAGE_SIZE
; *size
<= max_size
; *size
*= 2) {
1319 *objs
= (*size
- sks_size
) / obj_size
;
1320 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB
)
1325 * Unable to satisfy target objects per slab, fall back to
1326 * allocating a maximally sized slab and assuming it can
1327 * contain the minimum objects count use it. If not fail.
1330 *objs
= (*size
- sks_size
) / obj_size
;
1331 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
)
1339 * Make a guess at reasonable per-cpu magazine size based on the size of
1340 * each object and the cost of caching N of them in each magazine. Long
1341 * term this should really adapt based on an observed usage heuristic.
1344 spl_magazine_size(spl_kmem_cache_t
*skc
)
1346 uint32_t obj_size
= spl_obj_size(skc
);
1350 /* Per-magazine sizes below assume a 4Kib page size */
1351 if (obj_size
> (PAGE_SIZE
* 256))
1352 size
= 4; /* Minimum 4Mib per-magazine */
1353 else if (obj_size
> (PAGE_SIZE
* 32))
1354 size
= 16; /* Minimum 2Mib per-magazine */
1355 else if (obj_size
> (PAGE_SIZE
))
1356 size
= 64; /* Minimum 256Kib per-magazine */
1357 else if (obj_size
> (PAGE_SIZE
/ 4))
1358 size
= 128; /* Minimum 128Kib per-magazine */
1366 * Allocate a per-cpu magazine to associate with a specific core.
1368 static spl_kmem_magazine_t
*
1369 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int cpu
)
1371 spl_kmem_magazine_t
*skm
;
1372 int size
= sizeof(spl_kmem_magazine_t
) +
1373 sizeof(void *) * skc
->skc_mag_size
;
1376 skm
= kmem_alloc_node(size
, KM_SLEEP
, cpu_to_node(cpu
));
1378 skm
->skm_magic
= SKM_MAGIC
;
1380 skm
->skm_size
= skc
->skc_mag_size
;
1381 skm
->skm_refill
= skc
->skc_mag_refill
;
1382 skm
->skm_cache
= skc
;
1383 spl_init_delayed_work(&skm
->skm_work
, spl_magazine_age
, skm
);
1384 skm
->skm_age
= jiffies
;
1392 * Free a per-cpu magazine associated with a specific core.
1395 spl_magazine_free(spl_kmem_magazine_t
*skm
)
1397 int size
= sizeof(spl_kmem_magazine_t
) +
1398 sizeof(void *) * skm
->skm_size
;
1401 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1402 ASSERT(skm
->skm_avail
== 0);
1404 kmem_free(skm
, size
);
1409 * Create all pre-cpu magazines of reasonable sizes.
1412 spl_magazine_create(spl_kmem_cache_t
*skc
)
1417 skc
->skc_mag_size
= spl_magazine_size(skc
);
1418 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
1420 for_each_online_cpu(i
) {
1421 skc
->skc_mag
[i
] = spl_magazine_alloc(skc
, i
);
1422 if (!skc
->skc_mag
[i
]) {
1423 for (i
--; i
>= 0; i
--)
1424 spl_magazine_free(skc
->skc_mag
[i
]);
1430 /* Only after everything is allocated schedule magazine work */
1431 for_each_online_cpu(i
)
1432 schedule_delayed_work_on(i
, &skc
->skc_mag
[i
]->skm_work
,
1433 skc
->skc_delay
/ 3 * HZ
);
1439 * Destroy all pre-cpu magazines.
1442 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
1444 spl_kmem_magazine_t
*skm
;
1448 for_each_online_cpu(i
) {
1449 skm
= skc
->skc_mag
[i
];
1450 (void)spl_cache_flush(skc
, skm
, skm
->skm_avail
);
1451 spl_magazine_free(skm
);
1458 * Create a object cache based on the following arguments:
1460 * size cache object size
1461 * align cache object alignment
1462 * ctor cache object constructor
1463 * dtor cache object destructor
1464 * reclaim cache object reclaim
1465 * priv cache private data for ctor/dtor/reclaim
1466 * vmp unused must be NULL
1468 * KMC_NOTOUCH Disable cache object aging (unsupported)
1469 * KMC_NODEBUG Disable debugging (unsupported)
1470 * KMC_NOMAGAZINE Disable magazine (unsupported)
1471 * KMC_NOHASH Disable hashing (unsupported)
1472 * KMC_QCACHE Disable qcache (unsupported)
1473 * KMC_KMEM Force kmem backed cache
1474 * KMC_VMEM Force vmem backed cache
1475 * KMC_OFFSLAB Locate objects off the slab
1478 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
1479 spl_kmem_ctor_t ctor
,
1480 spl_kmem_dtor_t dtor
,
1481 spl_kmem_reclaim_t reclaim
,
1482 void *priv
, void *vmp
, int flags
)
1484 spl_kmem_cache_t
*skc
;
1488 ASSERTF(!(flags
& KMC_NOMAGAZINE
), "Bad KMC_NOMAGAZINE (%x)\n", flags
);
1489 ASSERTF(!(flags
& KMC_NOHASH
), "Bad KMC_NOHASH (%x)\n", flags
);
1490 ASSERTF(!(flags
& KMC_QCACHE
), "Bad KMC_QCACHE (%x)\n", flags
);
1491 ASSERT(vmp
== NULL
);
1496 * Allocate memory for a new cache an initialize it. Unfortunately,
1497 * this usually ends up being a large allocation of ~32k because
1498 * we need to allocate enough memory for the worst case number of
1499 * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
1500 * explicitly pass KM_NODEBUG to suppress the kmem warning
1502 skc
= kmem_zalloc(sizeof(*skc
), KM_SLEEP
| KM_NODEBUG
);
1506 skc
->skc_magic
= SKC_MAGIC
;
1507 skc
->skc_name_size
= strlen(name
) + 1;
1508 skc
->skc_name
= (char *)kmem_alloc(skc
->skc_name_size
, KM_SLEEP
);
1509 if (skc
->skc_name
== NULL
) {
1510 kmem_free(skc
, sizeof(*skc
));
1513 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
1515 skc
->skc_ctor
= ctor
;
1516 skc
->skc_dtor
= dtor
;
1517 skc
->skc_reclaim
= reclaim
;
1518 skc
->skc_private
= priv
;
1520 skc
->skc_flags
= flags
;
1521 skc
->skc_obj_size
= size
;
1522 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
1523 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
1524 skc
->skc_reap
= SPL_KMEM_CACHE_REAP
;
1525 atomic_set(&skc
->skc_ref
, 0);
1527 INIT_LIST_HEAD(&skc
->skc_list
);
1528 INIT_LIST_HEAD(&skc
->skc_complete_list
);
1529 INIT_LIST_HEAD(&skc
->skc_partial_list
);
1530 skc
->skc_emergency_tree
= RB_ROOT
;
1531 spin_lock_init(&skc
->skc_lock
);
1532 init_waitqueue_head(&skc
->skc_waitq
);
1533 skc
->skc_slab_fail
= 0;
1534 skc
->skc_slab_create
= 0;
1535 skc
->skc_slab_destroy
= 0;
1536 skc
->skc_slab_total
= 0;
1537 skc
->skc_slab_alloc
= 0;
1538 skc
->skc_slab_max
= 0;
1539 skc
->skc_obj_total
= 0;
1540 skc
->skc_obj_alloc
= 0;
1541 skc
->skc_obj_max
= 0;
1542 skc
->skc_obj_deadlock
= 0;
1543 skc
->skc_obj_emergency
= 0;
1544 skc
->skc_obj_emergency_max
= 0;
1547 VERIFY(ISP2(align
));
1548 VERIFY3U(align
, >=, SPL_KMEM_CACHE_ALIGN
); /* Min alignment */
1549 VERIFY3U(align
, <=, PAGE_SIZE
); /* Max alignment */
1550 skc
->skc_obj_align
= align
;
1553 /* If none passed select a cache type based on object size */
1554 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
))) {
1555 if (spl_obj_size(skc
) < (PAGE_SIZE
/ 8))
1556 skc
->skc_flags
|= KMC_KMEM
;
1558 skc
->skc_flags
|= KMC_VMEM
;
1561 rc
= spl_slab_size(skc
, &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
1565 rc
= spl_magazine_create(skc
);
1569 spl_init_delayed_work(&skc
->skc_work
, spl_cache_age
, skc
);
1570 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
1572 down_write(&spl_kmem_cache_sem
);
1573 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1574 up_write(&spl_kmem_cache_sem
);
1578 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1579 kmem_free(skc
, sizeof(*skc
));
1582 EXPORT_SYMBOL(spl_kmem_cache_create
);
1585 * Register a move callback to for cache defragmentation.
1586 * XXX: Unimplemented but harmless to stub out for now.
1589 spl_kmem_cache_set_move(spl_kmem_cache_t
*skc
,
1590 kmem_cbrc_t (move
)(void *, void *, size_t, void *))
1592 ASSERT(move
!= NULL
);
1594 EXPORT_SYMBOL(spl_kmem_cache_set_move
);
1597 * Destroy a cache and all objects associated with the cache.
1600 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1602 DECLARE_WAIT_QUEUE_HEAD(wq
);
1606 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1608 down_write(&spl_kmem_cache_sem
);
1609 list_del_init(&skc
->skc_list
);
1610 up_write(&spl_kmem_cache_sem
);
1612 /* Cancel any and wait for any pending delayed work */
1613 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1614 cancel_delayed_work_sync(&skc
->skc_work
);
1615 for_each_online_cpu(i
)
1616 cancel_delayed_work_sync(&skc
->skc_mag
[i
]->skm_work
);
1618 flush_scheduled_work();
1620 /* Wait until all current callers complete, this is mainly
1621 * to catch the case where a low memory situation triggers a
1622 * cache reaping action which races with this destroy. */
1623 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1625 spl_magazine_destroy(skc
);
1626 spl_slab_reclaim(skc
, 0, 1);
1627 spin_lock(&skc
->skc_lock
);
1629 /* Validate there are no objects in use and free all the
1630 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1631 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1632 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1633 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1634 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1635 ASSERT3U(skc
->skc_obj_emergency
, ==, 0);
1636 ASSERT(list_empty(&skc
->skc_complete_list
));
1638 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1639 spin_unlock(&skc
->skc_lock
);
1641 kmem_free(skc
, sizeof(*skc
));
1645 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1648 * Allocate an object from a slab attached to the cache. This is used to
1649 * repopulate the per-cpu magazine caches in batches when they run low.
1652 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1654 spl_kmem_obj_t
*sko
;
1656 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1657 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1658 ASSERT(spin_is_locked(&skc
->skc_lock
));
1660 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1661 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1662 ASSERT(sko
->sko_addr
!= NULL
);
1664 /* Remove from sks_free_list */
1665 list_del_init(&sko
->sko_list
);
1667 sks
->sks_age
= jiffies
;
1669 skc
->skc_obj_alloc
++;
1671 /* Track max obj usage statistics */
1672 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1673 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1675 /* Track max slab usage statistics */
1676 if (sks
->sks_ref
== 1) {
1677 skc
->skc_slab_alloc
++;
1679 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1680 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1683 return sko
->sko_addr
;
1687 * Generic slab allocation function to run by the global work queues.
1688 * It is responsible for allocating a new slab, linking it in to the list
1689 * of partial slabs, and then waking any waiters.
1692 spl_cache_grow_work(void *data
)
1694 spl_kmem_alloc_t
*ska
=
1695 spl_get_work_data(data
, spl_kmem_alloc_t
, ska_work
.work
);
1696 spl_kmem_cache_t
*skc
= ska
->ska_cache
;
1697 spl_kmem_slab_t
*sks
;
1699 sks
= spl_slab_alloc(skc
, ska
->ska_flags
| __GFP_NORETRY
| KM_NODEBUG
);
1700 spin_lock(&skc
->skc_lock
);
1702 skc
->skc_slab_total
++;
1703 skc
->skc_obj_total
+= sks
->sks_objs
;
1704 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1707 atomic_dec(&skc
->skc_ref
);
1708 clear_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1709 clear_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1710 wake_up_all(&skc
->skc_waitq
);
1711 spin_unlock(&skc
->skc_lock
);
1717 * Returns non-zero when a new slab should be available.
1720 spl_cache_grow_wait(spl_kmem_cache_t
*skc
)
1722 return !test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1726 spl_cache_reclaim_wait(void *word
)
1733 * No available objects on any slabs, create a new slab.
1736 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
, void **obj
)
1741 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1746 * Before allocating a new slab wait for any reaping to complete and
1747 * then return so the local magazine can be rechecked for new objects.
1749 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1750 rc
= wait_on_bit(&skc
->skc_flags
, KMC_BIT_REAPING
,
1751 spl_cache_reclaim_wait
, TASK_UNINTERRUPTIBLE
);
1752 SRETURN(rc
? rc
: -EAGAIN
);
1756 * This is handled by dispatching a work request to the global work
1757 * queue. This allows us to asynchronously allocate a new slab while
1758 * retaining the ability to safely fall back to a smaller synchronous
1759 * allocations to ensure forward progress is always maintained.
1761 if (test_and_set_bit(KMC_BIT_GROWING
, &skc
->skc_flags
) == 0) {
1762 spl_kmem_alloc_t
*ska
;
1764 ska
= kmalloc(sizeof(*ska
), flags
);
1766 clear_bit(KMC_BIT_GROWING
, &skc
->skc_flags
);
1767 wake_up_all(&skc
->skc_waitq
);
1771 atomic_inc(&skc
->skc_ref
);
1772 ska
->ska_cache
= skc
;
1773 ska
->ska_flags
= flags
& ~__GFP_FS
;
1774 spl_init_delayed_work(&ska
->ska_work
, spl_cache_grow_work
, ska
);
1775 schedule_delayed_work(&ska
->ska_work
, 0);
1779 * The goal here is to only detect the rare case where a virtual slab
1780 * allocation has deadlocked. We must be careful to minimize the use
1781 * of emergency objects which are more expensive to track. Therefore,
1782 * we set a very long timeout for the asynchronous allocation and if
1783 * the timeout is reached the cache is flagged as deadlocked. From
1784 * this point only new emergency objects will be allocated until the
1785 * asynchronous allocation completes and clears the deadlocked flag.
1787 if (test_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
)) {
1788 rc
= spl_emergency_alloc(skc
, flags
, obj
);
1790 remaining
= wait_event_timeout(skc
->skc_waitq
,
1791 spl_cache_grow_wait(skc
), HZ
);
1793 if (!remaining
&& test_bit(KMC_BIT_VMEM
, &skc
->skc_flags
)) {
1794 spin_lock(&skc
->skc_lock
);
1795 if (test_bit(KMC_BIT_GROWING
, &skc
->skc_flags
)) {
1796 set_bit(KMC_BIT_DEADLOCKED
, &skc
->skc_flags
);
1797 skc
->skc_obj_deadlock
++;
1799 spin_unlock(&skc
->skc_lock
);
1809 * Refill a per-cpu magazine with objects from the slabs for this cache.
1810 * Ideally the magazine can be repopulated using existing objects which have
1811 * been released, however if we are unable to locate enough free objects new
1812 * slabs of objects will be created. On success NULL is returned, otherwise
1813 * the address of a single emergency object is returned for use by the caller.
1816 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1818 spl_kmem_slab_t
*sks
;
1819 int count
= 0, rc
, refill
;
1823 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1824 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1826 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1827 spin_lock(&skc
->skc_lock
);
1829 while (refill
> 0) {
1830 /* No slabs available we may need to grow the cache */
1831 if (list_empty(&skc
->skc_partial_list
)) {
1832 spin_unlock(&skc
->skc_lock
);
1835 rc
= spl_cache_grow(skc
, flags
, &obj
);
1836 local_irq_disable();
1838 /* Emergency object for immediate use by caller */
1839 if (rc
== 0 && obj
!= NULL
)
1845 /* Rescheduled to different CPU skm is not local */
1846 if (skm
!= skc
->skc_mag
[smp_processor_id()])
1849 /* Potentially rescheduled to the same CPU but
1850 * allocations may have occurred from this CPU while
1851 * we were sleeping so recalculate max refill. */
1852 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
1854 spin_lock(&skc
->skc_lock
);
1858 /* Grab the next available slab */
1859 sks
= list_entry((&skc
->skc_partial_list
)->next
,
1860 spl_kmem_slab_t
, sks_list
);
1861 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1862 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
1863 ASSERT(!list_empty(&sks
->sks_free_list
));
1865 /* Consume as many objects as needed to refill the requested
1866 * cache. We must also be careful not to overfill it. */
1867 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 && ++count
) {
1868 ASSERT(skm
->skm_avail
< skm
->skm_size
);
1869 ASSERT(count
< skm
->skm_size
);
1870 skm
->skm_objs
[skm
->skm_avail
++]=spl_cache_obj(skc
,sks
);
1873 /* Move slab to skc_complete_list when full */
1874 if (sks
->sks_ref
== sks
->sks_objs
) {
1875 list_del(&sks
->sks_list
);
1876 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
1880 spin_unlock(&skc
->skc_lock
);
1886 * Release an object back to the slab from which it came.
1889 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
1891 spl_kmem_slab_t
*sks
= NULL
;
1892 spl_kmem_obj_t
*sko
= NULL
;
1895 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1896 ASSERT(spin_is_locked(&skc
->skc_lock
));
1898 sko
= spl_sko_from_obj(skc
, obj
);
1899 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1900 sks
= sko
->sko_slab
;
1901 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1902 ASSERT(sks
->sks_cache
== skc
);
1903 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
1905 sks
->sks_age
= jiffies
;
1907 skc
->skc_obj_alloc
--;
1909 /* Move slab to skc_partial_list when no longer full. Slabs
1910 * are added to the head to keep the partial list is quasi-full
1911 * sorted order. Fuller at the head, emptier at the tail. */
1912 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
1913 list_del(&sks
->sks_list
);
1914 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
1917 /* Move empty slabs to the end of the partial list so
1918 * they can be easily found and freed during reclamation. */
1919 if (sks
->sks_ref
== 0) {
1920 list_del(&sks
->sks_list
);
1921 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1922 skc
->skc_slab_alloc
--;
1929 * Release a batch of objects from a per-cpu magazine back to their
1930 * respective slabs. This occurs when we exceed the magazine size,
1931 * are under memory pressure, when the cache is idle, or during
1932 * cache cleanup. The flush argument contains the number of entries
1933 * to remove from the magazine.
1936 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1938 int i
, count
= MIN(flush
, skm
->skm_avail
);
1941 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1942 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1945 * XXX: Currently we simply return objects from the magazine to
1946 * the slabs in fifo order. The ideal thing to do from a memory
1947 * fragmentation standpoint is to cheaply determine the set of
1948 * objects in the magazine which will result in the largest
1949 * number of free slabs if released from the magazine.
1951 spin_lock(&skc
->skc_lock
);
1952 for (i
= 0; i
< count
; i
++)
1953 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
1955 skm
->skm_avail
-= count
;
1956 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
1957 sizeof(void *) * skm
->skm_avail
);
1959 spin_unlock(&skc
->skc_lock
);
1965 * Allocate an object from the per-cpu magazine, or if the magazine
1966 * is empty directly allocate from a slab and repopulate the magazine.
1969 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
1971 spl_kmem_magazine_t
*skm
;
1972 unsigned long irq_flags
;
1976 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1977 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1978 ASSERT(flags
& KM_SLEEP
);
1979 atomic_inc(&skc
->skc_ref
);
1980 local_irq_save(irq_flags
);
1983 /* Safe to update per-cpu structure without lock, but
1984 * in the restart case we must be careful to reacquire
1985 * the local magazine since this may have changed
1986 * when we need to grow the cache. */
1987 skm
= skc
->skc_mag
[smp_processor_id()];
1988 ASSERTF(skm
->skm_magic
== SKM_MAGIC
, "%x != %x: %s/%p/%p %x/%x/%x\n",
1989 skm
->skm_magic
, SKM_MAGIC
, skc
->skc_name
, skc
, skm
,
1990 skm
->skm_size
, skm
->skm_refill
, skm
->skm_avail
);
1992 if (likely(skm
->skm_avail
)) {
1993 /* Object available in CPU cache, use it */
1994 obj
= skm
->skm_objs
[--skm
->skm_avail
];
1995 skm
->skm_age
= jiffies
;
1997 obj
= spl_cache_refill(skc
, skm
, flags
);
1999 SGOTO(restart
, obj
= NULL
);
2002 local_irq_restore(irq_flags
);
2004 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
2006 /* Pre-emptively migrate object to CPU L1 cache */
2008 atomic_dec(&skc
->skc_ref
);
2012 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
2015 * Free an object back to the local per-cpu magazine, there is no
2016 * guarantee that this is the same magazine the object was originally
2017 * allocated from. We may need to flush entire from the magazine
2018 * back to the slabs to make space.
2021 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
2023 spl_kmem_magazine_t
*skm
;
2024 unsigned long flags
;
2027 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2028 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
2029 atomic_inc(&skc
->skc_ref
);
2032 * Only virtual slabs may have emergency objects and these objects
2033 * are guaranteed to have physical addresses. They must be removed
2034 * from the tree of emergency objects and the freed.
2036 if ((skc
->skc_flags
& KMC_VMEM
) && !kmem_virt(obj
))
2037 SGOTO(out
, spl_emergency_free(skc
, obj
));
2039 local_irq_save(flags
);
2041 /* Safe to update per-cpu structure without lock, but
2042 * no remote memory allocation tracking is being performed
2043 * it is entirely possible to allocate an object from one
2044 * CPU cache and return it to another. */
2045 skm
= skc
->skc_mag
[smp_processor_id()];
2046 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
2048 /* Per-CPU cache full, flush it to make space */
2049 if (unlikely(skm
->skm_avail
>= skm
->skm_size
))
2050 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
2052 /* Available space in cache, use it */
2053 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
2055 local_irq_restore(flags
);
2057 atomic_dec(&skc
->skc_ref
);
2061 EXPORT_SYMBOL(spl_kmem_cache_free
);
2064 * The generic shrinker function for all caches. Under Linux a shrinker
2065 * may not be tightly coupled with a slab cache. In fact Linux always
2066 * systematically tries calling all registered shrinker callbacks which
2067 * report that they contain unused objects. Because of this we only
2068 * register one shrinker function in the shim layer for all slab caches.
2069 * We always attempt to shrink all caches when this generic shrinker
2070 * is called. The shrinker should return the number of free objects
2071 * in the cache when called with nr_to_scan == 0 but not attempt to
2072 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
2073 * objects should be freed, which differs from Solaris semantics.
2074 * Solaris semantics are to free all available objects which may (and
2075 * probably will) be more objects than the requested nr_to_scan.
2078 __spl_kmem_cache_generic_shrinker(struct shrinker
*shrink
,
2079 struct shrink_control
*sc
)
2081 spl_kmem_cache_t
*skc
;
2084 down_read(&spl_kmem_cache_sem
);
2085 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
2087 spl_kmem_cache_reap_now(skc
,
2088 MAX(sc
->nr_to_scan
>> fls64(skc
->skc_slab_objs
), 1));
2091 * Presume everything alloc'ed in reclaimable, this ensures
2092 * we are called again with nr_to_scan > 0 so can try and
2093 * reclaim. The exact number is not important either so
2094 * we forgo taking this already highly contented lock.
2096 unused
+= skc
->skc_obj_alloc
;
2098 up_read(&spl_kmem_cache_sem
);
2100 return (unused
* sysctl_vfs_cache_pressure
) / 100;
2103 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker
);
2106 * Call the registered reclaim function for a cache. Depending on how
2107 * many and which objects are released it may simply repopulate the
2108 * local magazine which will then need to age-out. Objects which cannot
2109 * fit in the magazine we will be released back to their slabs which will
2110 * also need to age out before being release. This is all just best
2111 * effort and we do not want to thrash creating and destroying slabs.
2114 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
, int count
)
2118 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
2119 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
2121 /* Prevent concurrent cache reaping when contended */
2122 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
2127 atomic_inc(&skc
->skc_ref
);
2130 * When a reclaim function is available it may be invoked repeatedly
2131 * until at least a single slab can be freed. This ensures that we
2132 * do free memory back to the system. This helps minimize the chance
2133 * of an OOM event when the bulk of memory is used by the slab.
2135 * When free slabs are already available the reclaim callback will be
2136 * skipped. Additionally, if no forward progress is detected despite
2137 * a reclaim function the cache will be skipped to avoid deadlock.
2139 * Longer term this would be the correct place to add the code which
2140 * repacks the slabs in order minimize fragmentation.
2142 if (skc
->skc_reclaim
) {
2143 uint64_t objects
= UINT64_MAX
;
2147 spin_lock(&skc
->skc_lock
);
2149 (skc
->skc_slab_total
> 0) &&
2150 ((skc
->skc_slab_total
- skc
->skc_slab_alloc
) == 0) &&
2151 (skc
->skc_obj_alloc
< objects
);
2153 objects
= skc
->skc_obj_alloc
;
2154 spin_unlock(&skc
->skc_lock
);
2157 skc
->skc_reclaim(skc
->skc_private
);
2159 } while (do_reclaim
);
2162 /* Reclaim from the cache, ignoring it's age and delay. */
2163 spl_slab_reclaim(skc
, count
, 1);
2164 clear_bit(KMC_BIT_REAPING
, &skc
->skc_flags
);
2165 smp_mb__after_clear_bit();
2166 wake_up_bit(&skc
->skc_flags
, KMC_BIT_REAPING
);
2168 atomic_dec(&skc
->skc_ref
);
2172 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
2175 * Reap all free slabs from all registered caches.
2180 struct shrink_control sc
;
2182 sc
.nr_to_scan
= KMC_REAP_CHUNK
;
2183 sc
.gfp_mask
= GFP_KERNEL
;
2185 __spl_kmem_cache_generic_shrinker(NULL
, &sc
);
2187 EXPORT_SYMBOL(spl_kmem_reap
);
2189 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
2191 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
2193 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
2196 ASSERT(str
!= NULL
&& len
>= 17);
2197 memset(str
, 0, len
);
2199 /* Check for a fully printable string, and while we are at
2200 * it place the printable characters in the passed buffer. */
2201 for (i
= 0; i
< size
; i
++) {
2202 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
2203 if (isprint(str
[i
])) {
2206 /* Minimum number of printable characters found
2207 * to make it worthwhile to print this as ascii. */
2217 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
2218 *((uint8_t *)kd
->kd_addr
),
2219 *((uint8_t *)kd
->kd_addr
+ 2),
2220 *((uint8_t *)kd
->kd_addr
+ 4),
2221 *((uint8_t *)kd
->kd_addr
+ 6),
2222 *((uint8_t *)kd
->kd_addr
+ 8),
2223 *((uint8_t *)kd
->kd_addr
+ 10),
2224 *((uint8_t *)kd
->kd_addr
+ 12),
2225 *((uint8_t *)kd
->kd_addr
+ 14));
2232 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
2237 spin_lock_init(lock
);
2238 INIT_LIST_HEAD(list
);
2240 for (i
= 0; i
< size
; i
++)
2241 INIT_HLIST_HEAD(&kmem_table
[i
]);
2247 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
2249 unsigned long flags
;
2254 spin_lock_irqsave(lock
, flags
);
2255 if (!list_empty(list
))
2256 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
2257 "size", "data", "func", "line");
2259 list_for_each_entry(kd
, list
, kd_list
)
2260 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
2261 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
2262 kd
->kd_func
, kd
->kd_line
);
2264 spin_unlock_irqrestore(lock
, flags
);
2267 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2268 #define spl_kmem_init_tracking(list, lock, size)
2269 #define spl_kmem_fini_tracking(list, lock)
2270 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2273 spl_kmem_init_globals(void)
2277 /* For now all zones are includes, it may be wise to restrict
2278 * this to normal and highmem zones if we see problems. */
2279 for_each_zone(zone
) {
2281 if (!populated_zone(zone
))
2284 minfree
+= min_wmark_pages(zone
);
2285 desfree
+= low_wmark_pages(zone
);
2286 lotsfree
+= high_wmark_pages(zone
);
2289 /* Solaris default values */
2290 swapfs_minfree
= MAX(2*1024*1024 >> PAGE_SHIFT
, physmem
>> 3);
2291 swapfs_reserve
= MIN(4*1024*1024 >> PAGE_SHIFT
, physmem
>> 4);
2295 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
2298 spl_kmem_init_kallsyms_lookup(void)
2300 #ifndef HAVE_GET_VMALLOC_INFO
2301 get_vmalloc_info_fn
= (get_vmalloc_info_t
)
2302 spl_kallsyms_lookup_name("get_vmalloc_info");
2303 if (!get_vmalloc_info_fn
) {
2304 printk(KERN_ERR
"Error: Unknown symbol get_vmalloc_info\n");
2307 #endif /* HAVE_GET_VMALLOC_INFO */
2309 #ifdef HAVE_PGDAT_HELPERS
2310 # ifndef HAVE_FIRST_ONLINE_PGDAT
2311 first_online_pgdat_fn
= (first_online_pgdat_t
)
2312 spl_kallsyms_lookup_name("first_online_pgdat");
2313 if (!first_online_pgdat_fn
) {
2314 printk(KERN_ERR
"Error: Unknown symbol first_online_pgdat\n");
2317 # endif /* HAVE_FIRST_ONLINE_PGDAT */
2319 # ifndef HAVE_NEXT_ONLINE_PGDAT
2320 next_online_pgdat_fn
= (next_online_pgdat_t
)
2321 spl_kallsyms_lookup_name("next_online_pgdat");
2322 if (!next_online_pgdat_fn
) {
2323 printk(KERN_ERR
"Error: Unknown symbol next_online_pgdat\n");
2326 # endif /* HAVE_NEXT_ONLINE_PGDAT */
2328 # ifndef HAVE_NEXT_ZONE
2329 next_zone_fn
= (next_zone_t
)
2330 spl_kallsyms_lookup_name("next_zone");
2331 if (!next_zone_fn
) {
2332 printk(KERN_ERR
"Error: Unknown symbol next_zone\n");
2335 # endif /* HAVE_NEXT_ZONE */
2337 #else /* HAVE_PGDAT_HELPERS */
2339 # ifndef HAVE_PGDAT_LIST
2340 pgdat_list_addr
= *(struct pglist_data
**)
2341 spl_kallsyms_lookup_name("pgdat_list");
2342 if (!pgdat_list_addr
) {
2343 printk(KERN_ERR
"Error: Unknown symbol pgdat_list\n");
2346 # endif /* HAVE_PGDAT_LIST */
2347 #endif /* HAVE_PGDAT_HELPERS */
2349 #if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
2350 get_zone_counts_fn
= (get_zone_counts_t
)
2351 spl_kallsyms_lookup_name("get_zone_counts");
2352 if (!get_zone_counts_fn
) {
2353 printk(KERN_ERR
"Error: Unknown symbol get_zone_counts\n");
2356 #endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
2359 * It is now safe to initialize the global tunings which rely on
2360 * the use of the for_each_zone() macro. This macro in turns
2361 * depends on the *_pgdat symbols which are now available.
2363 spl_kmem_init_globals();
2365 #if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK)
2366 invalidate_inodes_fn
= (invalidate_inodes_t
)
2367 spl_kallsyms_lookup_name("invalidate_inodes");
2368 if (!invalidate_inodes_fn
) {
2369 printk(KERN_ERR
"Error: Unknown symbol invalidate_inodes\n");
2372 #endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */
2374 #ifndef HAVE_SHRINK_DCACHE_MEMORY
2375 /* When shrink_dcache_memory_fn == NULL support is disabled */
2376 shrink_dcache_memory_fn
= (shrink_dcache_memory_t
)
2377 spl_kallsyms_lookup_name("shrink_dcache_memory");
2378 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
2380 #ifndef HAVE_SHRINK_ICACHE_MEMORY
2381 /* When shrink_icache_memory_fn == NULL support is disabled */
2382 shrink_icache_memory_fn
= (shrink_icache_memory_t
)
2383 spl_kallsyms_lookup_name("shrink_icache_memory");
2384 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
2395 init_rwsem(&spl_kmem_cache_sem
);
2396 INIT_LIST_HEAD(&spl_kmem_cache_list
);
2398 spl_register_shrinker(&spl_kmem_cache_shrinker
);
2401 kmem_alloc_used_set(0);
2402 vmem_alloc_used_set(0);
2404 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
2405 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
2414 /* Display all unreclaimed memory addresses, including the
2415 * allocation size and the first few bytes of what's located
2416 * at that address to aid in debugging. Performance is not
2417 * a serious concern here since it is module unload time. */
2418 if (kmem_alloc_used_read() != 0)
2419 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
2420 "kmem leaked %ld/%ld bytes\n",
2421 kmem_alloc_used_read(), kmem_alloc_max
);
2424 if (vmem_alloc_used_read() != 0)
2425 SDEBUG_LIMIT(SD_CONSOLE
| SD_WARNING
,
2426 "vmem leaked %ld/%ld bytes\n",
2427 vmem_alloc_used_read(), vmem_alloc_max
);
2429 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
2430 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
2431 #endif /* DEBUG_KMEM */
2434 spl_unregister_shrinker(&spl_kmem_cache_shrinker
);