2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #ifdef DEBUG_SUBSYSTEM
30 # undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_KMEM
36 * The minimum amount of memory measured in pages to be free at all
37 * times on the system. This is similar to Linux's zone->pages_min
38 * multipled by the number of zones and is sized based on that.
41 EXPORT_SYMBOL(minfree
);
44 * The desired amount of memory measured in pages to be free at all
45 * times on the system. This is similar to Linux's zone->pages_low
46 * multipled by the number of zones and is sized based on that.
47 * Assuming all zones are being used roughly equally, when we drop
48 * below this threshold async page reclamation is triggered.
51 EXPORT_SYMBOL(desfree
);
54 * When above this amount of memory measures in pages the system is
55 * determined to have enough free memory. This is similar to Linux's
56 * zone->pages_high multipled by the number of zones and is sized based
57 * on that. Assuming all zones are being used roughly equally, when
58 * async page reclamation reaches this threshold it stops.
61 EXPORT_SYMBOL(lotsfree
);
63 /* Unused always 0 in this implementation */
65 EXPORT_SYMBOL(needfree
);
67 pgcnt_t swapfs_minfree
= 0;
68 EXPORT_SYMBOL(swapfs_minfree
);
70 pgcnt_t swapfs_reserve
= 0;
71 EXPORT_SYMBOL(swapfs_reserve
);
73 vmem_t
*heap_arena
= NULL
;
74 EXPORT_SYMBOL(heap_arena
);
76 vmem_t
*zio_alloc_arena
= NULL
;
77 EXPORT_SYMBOL(zio_alloc_arena
);
79 vmem_t
*zio_arena
= NULL
;
80 EXPORT_SYMBOL(zio_arena
);
82 #ifndef HAVE_GET_VMALLOC_INFO
83 get_vmalloc_info_t get_vmalloc_info_fn
= SYMBOL_POISON
;
84 EXPORT_SYMBOL(get_vmalloc_info_fn
);
85 #endif /* HAVE_GET_VMALLOC_INFO */
87 #ifndef HAVE_FIRST_ONLINE_PGDAT
88 first_online_pgdat_t first_online_pgdat_fn
= SYMBOL_POISON
;
89 EXPORT_SYMBOL(first_online_pgdat_fn
);
90 #endif /* HAVE_FIRST_ONLINE_PGDAT */
92 #ifndef HAVE_NEXT_ONLINE_PGDAT
93 next_online_pgdat_t next_online_pgdat_fn
= SYMBOL_POISON
;
94 EXPORT_SYMBOL(next_online_pgdat_fn
);
95 #endif /* HAVE_NEXT_ONLINE_PGDAT */
97 #ifndef HAVE_NEXT_ZONE
98 next_zone_t next_zone_fn
= SYMBOL_POISON
;
99 EXPORT_SYMBOL(next_zone_fn
);
100 #endif /* HAVE_NEXT_ZONE */
102 #ifndef HAVE_ZONE_STAT_ITEM_FIA
103 # ifndef HAVE_GET_ZONE_COUNTS
104 get_zone_counts_t get_zone_counts_fn
= SYMBOL_POISON
;
105 EXPORT_SYMBOL(get_zone_counts_fn
);
106 # endif /* HAVE_GET_ZONE_COUNTS */
109 spl_global_page_state(int item
)
111 unsigned long active
;
112 unsigned long inactive
;
115 if (item
== NR_FREE_PAGES
) {
116 get_zone_counts(&active
, &inactive
, &free
);
120 if (item
== NR_INACTIVE
) {
121 get_zone_counts(&active
, &inactive
, &free
);
125 if (item
== NR_ACTIVE
) {
126 get_zone_counts(&active
, &inactive
, &free
);
130 # ifdef HAVE_GLOBAL_PAGE_STATE
131 return global_page_state((enum zone_stat_item
)item
);
133 return 0; /* Unsupported */
134 # endif /* HAVE_GLOBAL_PAGE_STATE */
136 EXPORT_SYMBOL(spl_global_page_state
);
137 #endif /* HAVE_ZONE_STAT_ITEM_FIA */
140 spl_kmem_availrmem(void)
142 /* The amount of easily available memory */
143 return (spl_global_page_state(NR_FREE_PAGES
) +
144 spl_global_page_state(NR_INACTIVE
));
146 EXPORT_SYMBOL(spl_kmem_availrmem
);
149 vmem_size(vmem_t
*vmp
, int typemask
)
151 struct vmalloc_info vmi
;
155 ASSERT(typemask
& (VMEM_ALLOC
| VMEM_FREE
));
157 get_vmalloc_info(&vmi
);
158 if (typemask
& VMEM_ALLOC
)
159 size
+= (size_t)vmi
.used
;
161 if (typemask
& VMEM_FREE
)
162 size
+= (size_t)(VMALLOC_TOTAL
- vmi
.used
);
166 EXPORT_SYMBOL(vmem_size
);
169 * Memory allocation interfaces and debugging for basic kmem_*
170 * and vmem_* style memory allocation. When DEBUG_KMEM is enable
171 * all allocations will be tracked when they are allocated and
172 * freed. When the SPL module is unload a list of all leaked
173 * addresses and where they were allocated will be dumped to the
174 * console. Enabling this feature has a significant impant on
175 * performance but it makes finding memory leaks staight forward.
178 /* Shim layer memory accounting */
179 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
180 unsigned long long kmem_alloc_max
= 0;
181 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
182 unsigned long long vmem_alloc_max
= 0;
183 int kmem_warning_flag
= 1;
185 EXPORT_SYMBOL(kmem_alloc_used
);
186 EXPORT_SYMBOL(kmem_alloc_max
);
187 EXPORT_SYMBOL(vmem_alloc_used
);
188 EXPORT_SYMBOL(vmem_alloc_max
);
189 EXPORT_SYMBOL(kmem_warning_flag
);
191 # ifdef DEBUG_KMEM_TRACKING
193 /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very
194 * highly contended particularly on xfree(). If we want to run with this
195 * detailed debugging enabled for anything other than debugging we need to
196 * minimize the contention by moving to a lock per xmem_table entry model.
199 # define KMEM_HASH_BITS 10
200 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
202 # define VMEM_HASH_BITS 10
203 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
205 typedef struct kmem_debug
{
206 struct hlist_node kd_hlist
; /* Hash node linkage */
207 struct list_head kd_list
; /* List of all allocations */
208 void *kd_addr
; /* Allocation pointer */
209 size_t kd_size
; /* Allocation size */
210 const char *kd_func
; /* Allocation function */
211 int kd_line
; /* Allocation line */
214 spinlock_t kmem_lock
;
215 struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
216 struct list_head kmem_list
;
218 spinlock_t vmem_lock
;
219 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
220 struct list_head vmem_list
;
222 EXPORT_SYMBOL(kmem_lock
);
223 EXPORT_SYMBOL(kmem_table
);
224 EXPORT_SYMBOL(kmem_list
);
226 EXPORT_SYMBOL(vmem_lock
);
227 EXPORT_SYMBOL(vmem_table
);
228 EXPORT_SYMBOL(vmem_list
);
231 int kmem_set_warning(int flag
) { return (kmem_warning_flag
= !!flag
); }
233 int kmem_set_warning(int flag
) { return 0; }
235 EXPORT_SYMBOL(kmem_set_warning
);
238 * Slab allocation interfaces
240 * While the Linux slab implementation was inspired by the Solaris
241 * implemenation I cannot use it to emulate the Solaris APIs. I
242 * require two features which are not provided by the Linux slab.
244 * 1) Constructors AND destructors. Recent versions of the Linux
245 * kernel have removed support for destructors. This is a deal
246 * breaker for the SPL which contains particularly expensive
247 * initializers for mutex's, condition variables, etc. We also
248 * require a minimal level of cleanup for these data types unlike
249 * many Linux data type which do need to be explicitly destroyed.
251 * 2) Virtual address space backed slab. Callers of the Solaris slab
252 * expect it to work well for both small are very large allocations.
253 * Because of memory fragmentation the Linux slab which is backed
254 * by kmalloc'ed memory performs very badly when confronted with
255 * large numbers of large allocations. Basing the slab on the
256 * virtual address space removes the need for contigeous pages
257 * and greatly improve performance for large allocations.
259 * For these reasons, the SPL has its own slab implementation with
260 * the needed features. It is not as highly optimized as either the
261 * Solaris or Linux slabs, but it should get me most of what is
262 * needed until it can be optimized or obsoleted by another approach.
264 * One serious concern I do have about this method is the relatively
265 * small virtual address space on 32bit arches. This will seriously
266 * constrain the size of the slab caches and their performance.
268 * XXX: Improve the partial slab list by carefully maintaining a
269 * strict ordering of fullest to emptiest slabs based on
270 * the slab reference count. This gaurentees the when freeing
271 * slabs back to the system we need only linearly traverse the
272 * last N slabs in the list to discover all the freeable slabs.
274 * XXX: NUMA awareness for optionally allocating memory close to a
275 * particular core. This can be adventageous if you know the slab
276 * object will be short lived and primarily accessed from one core.
278 * XXX: Slab coloring may also yield performance improvements and would
279 * be desirable to implement.
282 struct list_head spl_kmem_cache_list
; /* List of caches */
283 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
285 static int spl_cache_flush(spl_kmem_cache_t
*skc
,
286 spl_kmem_magazine_t
*skm
, int flush
);
288 #ifdef HAVE_SET_SHRINKER
289 static struct shrinker
*spl_kmem_cache_shrinker
;
291 static int spl_kmem_cache_generic_shrinker(int nr_to_scan
,
292 unsigned int gfp_mask
);
293 static struct shrinker spl_kmem_cache_shrinker
= {
294 .shrink
= spl_kmem_cache_generic_shrinker
,
295 .seeks
= KMC_DEFAULT_SEEKS
,
300 # ifdef DEBUG_KMEM_TRACKING
302 static kmem_debug_t
*
303 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
, int bits
,
306 struct hlist_head
*head
;
307 struct hlist_node
*node
;
308 struct kmem_debug
*p
;
312 spin_lock_irqsave(lock
, flags
);
314 head
= &table
[hash_ptr(addr
, bits
)];
315 hlist_for_each_entry_rcu(p
, node
, head
, kd_hlist
) {
316 if (p
->kd_addr
== addr
) {
317 hlist_del_init(&p
->kd_hlist
);
318 list_del_init(&p
->kd_list
);
319 spin_unlock_irqrestore(lock
, flags
);
324 spin_unlock_irqrestore(lock
, flags
);
330 kmem_alloc_track(size_t size
, int flags
, const char *func
, int line
,
331 int node_alloc
, int node
)
335 unsigned long irq_flags
;
338 dptr
= (kmem_debug_t
*) kmalloc(sizeof(kmem_debug_t
),
339 flags
& ~__GFP_ZERO
);
342 CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
343 sizeof(kmem_debug_t
), flags
);
345 /* Marked unlikely because we should never be doing this,
346 * we tolerate to up 2 pages but a single page is best. */
347 if (unlikely((size
) > (PAGE_SIZE
* 2)) && kmem_warning_flag
)
348 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
349 (unsigned long long) size
, flags
,
350 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
352 /* We use kstrdup() below because the string pointed to by
353 * __FUNCTION__ might not be available by the time we want
354 * to print it since the module might have been unloaded. */
355 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
356 if (unlikely(dptr
->kd_func
== NULL
)) {
358 CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
359 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
360 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
364 /* Use the correct allocator */
366 ASSERT(!(flags
& __GFP_ZERO
));
367 ptr
= kmalloc_node(size
, flags
, node
);
368 } else if (flags
& __GFP_ZERO
) {
369 ptr
= kzalloc(size
, flags
& ~__GFP_ZERO
);
371 ptr
= kmalloc(size
, flags
);
374 if (unlikely(ptr
== NULL
)) {
375 kfree(dptr
->kd_func
);
377 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
378 (unsigned long long) size
, flags
,
379 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
383 atomic64_add(size
, &kmem_alloc_used
);
384 if (unlikely(atomic64_read(&kmem_alloc_used
) >
387 atomic64_read(&kmem_alloc_used
);
389 INIT_HLIST_NODE(&dptr
->kd_hlist
);
390 INIT_LIST_HEAD(&dptr
->kd_list
);
393 dptr
->kd_size
= size
;
394 dptr
->kd_line
= line
;
396 spin_lock_irqsave(&kmem_lock
, irq_flags
);
397 hlist_add_head_rcu(&dptr
->kd_hlist
,
398 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
399 list_add_tail(&dptr
->kd_list
, &kmem_list
);
400 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
402 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
403 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
404 ptr
, atomic64_read(&kmem_alloc_used
),
410 EXPORT_SYMBOL(kmem_alloc_track
);
413 kmem_free_track(void *ptr
, size_t size
)
418 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
419 (unsigned long long) size
);
421 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
423 ASSERT(dptr
); /* Must exist in hash due to kmem_alloc() */
425 /* Size must match */
426 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
427 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
428 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
430 atomic64_sub(size
, &kmem_alloc_used
);
432 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
433 (unsigned long long) size
, atomic64_read(&kmem_alloc_used
),
436 kfree(dptr
->kd_func
);
438 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
441 memset(ptr
, 0x5a, size
);
446 EXPORT_SYMBOL(kmem_free_track
);
449 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
453 unsigned long irq_flags
;
456 ASSERT(flags
& KM_SLEEP
);
458 dptr
= (kmem_debug_t
*) kmalloc(sizeof(kmem_debug_t
), flags
);
460 CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
461 sizeof(kmem_debug_t
), flags
);
463 /* We use kstrdup() below because the string pointed to by
464 * __FUNCTION__ might not be available by the time we want
465 * to print it, since the module might have been unloaded. */
466 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
467 if (unlikely(dptr
->kd_func
== NULL
)) {
469 CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
470 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
471 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
475 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
478 if (unlikely(ptr
== NULL
)) {
479 kfree(dptr
->kd_func
);
481 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
482 (unsigned long long) size
, flags
,
483 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
487 if (flags
& __GFP_ZERO
)
488 memset(ptr
, 0, size
);
490 atomic64_add(size
, &vmem_alloc_used
);
491 if (unlikely(atomic64_read(&vmem_alloc_used
) >
494 atomic64_read(&vmem_alloc_used
);
496 INIT_HLIST_NODE(&dptr
->kd_hlist
);
497 INIT_LIST_HEAD(&dptr
->kd_list
);
500 dptr
->kd_size
= size
;
501 dptr
->kd_line
= line
;
503 spin_lock_irqsave(&vmem_lock
, irq_flags
);
504 hlist_add_head_rcu(&dptr
->kd_hlist
,
505 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
506 list_add_tail(&dptr
->kd_list
, &vmem_list
);
507 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
509 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
510 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
511 ptr
, atomic64_read(&vmem_alloc_used
),
517 EXPORT_SYMBOL(vmem_alloc_track
);
520 vmem_free_track(void *ptr
, size_t size
)
525 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
526 (unsigned long long) size
);
528 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
529 ASSERT(dptr
); /* Must exist in hash due to vmem_alloc() */
531 /* Size must match */
532 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
533 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
534 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
536 atomic64_sub(size
, &vmem_alloc_used
);
537 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
538 (unsigned long long) size
, atomic64_read(&vmem_alloc_used
),
541 kfree(dptr
->kd_func
);
543 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
546 memset(ptr
, 0x5a, size
);
551 EXPORT_SYMBOL(vmem_free_track
);
553 # else /* DEBUG_KMEM_TRACKING */
556 kmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
,
557 int node_alloc
, int node
)
562 /* Marked unlikely because we should never be doing this,
563 * we tolerate to up 2 pages but a single page is best. */
564 if (unlikely(size
> (PAGE_SIZE
* 2)) && kmem_warning_flag
)
565 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
566 (unsigned long long) size
, flags
,
567 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
569 /* Use the correct allocator */
571 ASSERT(!(flags
& __GFP_ZERO
));
572 ptr
= kmalloc_node(size
, flags
, node
);
573 } else if (flags
& __GFP_ZERO
) {
574 ptr
= kzalloc(size
, flags
& (~__GFP_ZERO
));
576 ptr
= kmalloc(size
, flags
);
580 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
581 (unsigned long long) size
, flags
,
582 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
584 atomic64_add(size
, &kmem_alloc_used
);
585 if (unlikely(atomic64_read(&kmem_alloc_used
) > kmem_alloc_max
))
586 kmem_alloc_max
= atomic64_read(&kmem_alloc_used
);
588 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
589 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
590 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
594 EXPORT_SYMBOL(kmem_alloc_debug
);
597 kmem_free_debug(void *ptr
, size_t size
)
601 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
602 (unsigned long long) size
);
604 atomic64_sub(size
, &kmem_alloc_used
);
606 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
607 (unsigned long long) size
, atomic64_read(&kmem_alloc_used
),
610 memset(ptr
, 0x5a, size
);
615 EXPORT_SYMBOL(kmem_free_debug
);
618 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
623 ASSERT(flags
& KM_SLEEP
);
625 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
628 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
629 (unsigned long long) size
, flags
,
630 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
632 if (flags
& __GFP_ZERO
)
633 memset(ptr
, 0, size
);
635 atomic64_add(size
, &vmem_alloc_used
);
637 if (unlikely(atomic64_read(&vmem_alloc_used
) > vmem_alloc_max
))
638 vmem_alloc_max
= atomic64_read(&vmem_alloc_used
);
640 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
641 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
642 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
647 EXPORT_SYMBOL(vmem_alloc_debug
);
650 vmem_free_debug(void *ptr
, size_t size
)
654 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
655 (unsigned long long) size
);
657 atomic64_sub(size
, &vmem_alloc_used
);
659 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
660 (unsigned long long) size
, atomic64_read(&vmem_alloc_used
),
663 memset(ptr
, 0x5a, size
);
668 EXPORT_SYMBOL(vmem_free_debug
);
670 # endif /* DEBUG_KMEM_TRACKING */
671 #endif /* DEBUG_KMEM */
674 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
678 if (skc
->skc_flags
& KMC_KMEM
) {
679 if (size
> (2 * PAGE_SIZE
)) {
680 ptr
= (void *)__get_free_pages(flags
, get_order(size
));
682 ptr
= kmem_alloc(size
, flags
);
684 ptr
= vmem_alloc(size
, flags
);
691 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
693 if (skc
->skc_flags
& KMC_KMEM
) {
694 if (size
> (2 * PAGE_SIZE
))
695 free_pages((unsigned long)ptr
, get_order(size
));
697 kmem_free(ptr
, size
);
699 vmem_free(ptr
, size
);
704 * It's important that we pack the spl_kmem_obj_t structure and the
705 * actual objects in to one large address space to minimize the number
706 * of calls to the allocator. It is far better to do a few large
707 * allocations and then subdivide it ourselves. Now which allocator
708 * we use requires balancing a few trade offs.
710 * For small objects we use kmem_alloc() because as long as you are
711 * only requesting a small number of pages (ideally just one) its cheap.
712 * However, when you start requesting multiple pages with kmem_alloc()
713 * it gets increasingly expensive since it requires contigeous pages.
714 * For this reason we shift to vmem_alloc() for slabs of large objects
715 * which removes the need for contigeous pages. We do not use
716 * vmem_alloc() in all cases because there is significant locking
717 * overhead in __get_vm_area_node(). This function takes a single
718 * global lock when aquiring an available virtual address range which
719 * serializes all vmem_alloc()'s for all slab caches. Using slightly
720 * different allocation functions for small and large objects should
721 * give us the best of both worlds.
723 * KMC_ONSLAB KMC_OFFSLAB
725 * +------------------------+ +-----------------+
726 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
727 * | skc_obj_size <-+ | | +-----------------+ | |
728 * | spl_kmem_obj_t | | | |
729 * | skc_obj_size <---+ | +-----------------+ | |
730 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
731 * | ... v | | spl_kmem_obj_t | |
732 * +------------------------+ +-----------------+ v
734 static spl_kmem_slab_t
*
735 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
737 spl_kmem_slab_t
*sks
;
738 spl_kmem_obj_t
*sko
, *n
;
740 int i
, align
, size
, rc
= 0;
742 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
746 sks
= (spl_kmem_slab_t
*)base
;
747 sks
->sks_magic
= SKS_MAGIC
;
748 sks
->sks_objs
= skc
->skc_slab_objs
;
749 sks
->sks_age
= jiffies
;
750 sks
->sks_cache
= skc
;
751 INIT_LIST_HEAD(&sks
->sks_list
);
752 INIT_LIST_HEAD(&sks
->sks_free_list
);
755 align
= skc
->skc_obj_align
;
756 size
= P2ROUNDUP(skc
->skc_obj_size
, align
) +
757 P2ROUNDUP(sizeof(spl_kmem_obj_t
), align
);
759 for (i
= 0; i
< sks
->sks_objs
; i
++) {
760 if (skc
->skc_flags
& KMC_OFFSLAB
) {
761 obj
= kv_alloc(skc
, size
, flags
);
763 GOTO(out
, rc
= -ENOMEM
);
766 P2ROUNDUP(sizeof(spl_kmem_slab_t
), align
) +
770 sko
= obj
+ P2ROUNDUP(skc
->skc_obj_size
, align
);
772 sko
->sko_magic
= SKO_MAGIC
;
774 INIT_LIST_HEAD(&sko
->sko_list
);
775 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
778 list_for_each_entry(sko
, &sks
->sks_free_list
, sko_list
)
780 skc
->skc_ctor(sko
->sko_addr
, skc
->skc_private
, flags
);
783 if (skc
->skc_flags
& KMC_OFFSLAB
)
784 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
,
786 kv_free(skc
, sko
->sko_addr
, size
);
788 kv_free(skc
, base
, skc
->skc_slab_size
);
796 * Remove a slab from complete or partial list, it must be called with
797 * the 'skc->skc_lock' held but the actual free must be performed
798 * outside the lock to prevent deadlocking on vmem addresses.
801 spl_slab_free(spl_kmem_slab_t
*sks
,
802 struct list_head
*sks_list
, struct list_head
*sko_list
)
804 spl_kmem_cache_t
*skc
;
807 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
808 ASSERT(sks
->sks_ref
== 0);
810 skc
= sks
->sks_cache
;
811 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
812 ASSERT(spin_is_locked(&skc
->skc_lock
));
815 * Update slab/objects counters in the cache, then remove the
816 * slab from the skc->skc_partial_list. Finally add the slab
817 * and all its objects in to the private work lists where the
818 * destructors will be called and the memory freed to the system.
820 skc
->skc_obj_total
-= sks
->sks_objs
;
821 skc
->skc_slab_total
--;
822 list_del(&sks
->sks_list
);
823 list_add(&sks
->sks_list
, sks_list
);
824 list_splice_init(&sks
->sks_free_list
, sko_list
);
830 * Traverses all the partial slabs attached to a cache and free those
831 * which which are currently empty, and have not been touched for
832 * skc_delay seconds to avoid thrashing. The count argument is
833 * passed to optionally cap the number of slabs reclaimed, a count
834 * of zero means try and reclaim everything. When flag is set we
835 * always free an available slab regardless of age.
838 spl_slab_reclaim(spl_kmem_cache_t
*skc
, int count
, int flag
)
840 spl_kmem_slab_t
*sks
, *m
;
841 spl_kmem_obj_t
*sko
, *n
;
848 * Move empty slabs and objects which have not been touched in
849 * skc_delay seconds on to private lists to be freed outside
850 * the spin lock. This delay time is important to avoid thrashing
851 * however when flag is set the delay will not be used.
853 spin_lock(&skc
->skc_lock
);
854 list_for_each_entry_safe_reverse(sks
,m
,&skc
->skc_partial_list
,sks_list
){
856 * All empty slabs are at the end of skc->skc_partial_list,
857 * therefore once a non-empty slab is found we can stop
858 * scanning. Additionally, stop when reaching the target
859 * reclaim 'count' if a non-zero threshhold is given.
861 if ((sks
->sks_ref
> 0) || (count
&& i
> count
))
864 if (time_after(jiffies
,sks
->sks_age
+skc
->skc_delay
*HZ
)||flag
) {
865 spl_slab_free(sks
, &sks_list
, &sko_list
);
869 spin_unlock(&skc
->skc_lock
);
872 * The following two loops ensure all the object destructors are
873 * run, any offslab objects are freed, and the slabs themselves
874 * are freed. This is all done outside the skc->skc_lock since
875 * this allows the destructor to sleep, and allows us to perform
876 * a conditional reschedule when a freeing a large number of
877 * objects and slabs back to the system.
879 if (skc
->skc_flags
& KMC_OFFSLAB
)
880 size
= P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
) +
881 P2ROUNDUP(sizeof(spl_kmem_obj_t
), skc
->skc_obj_align
);
883 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
) {
884 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
887 skc
->skc_dtor(sko
->sko_addr
, skc
->skc_private
);
889 if (skc
->skc_flags
& KMC_OFFSLAB
)
890 kv_free(skc
, sko
->sko_addr
, size
);
895 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
) {
896 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
897 kv_free(skc
, sks
, skc
->skc_slab_size
);
905 * Called regularly on all caches to age objects out of the magazines
906 * which have not been access in skc->skc_delay seconds. This prevents
907 * idle magazines from holding memory which might be better used by
908 * other caches or parts of the system. The delay is present to
909 * prevent thrashing the magazine.
912 spl_magazine_age(void *data
)
914 spl_kmem_magazine_t
*skm
=
915 spl_get_work_data(data
, spl_kmem_magazine_t
, skm_work
.work
);
916 spl_kmem_cache_t
*skc
= skm
->skm_cache
;
917 int i
= smp_processor_id();
919 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
920 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
921 ASSERT(skc
->skc_mag
[i
] == skm
);
923 if (skm
->skm_avail
> 0 &&
924 time_after(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
925 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
927 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
928 schedule_delayed_work_on(i
, &skm
->skm_work
,
929 skc
->skc_delay
/ 3 * HZ
);
933 * Called regularly to keep a downward pressure on the size of idle
934 * magazines and to release free slabs from the cache. This function
935 * never calls the registered reclaim function, that only occures
936 * under memory pressure or with a direct call to spl_kmem_reap().
939 spl_cache_age(void *data
)
941 spl_kmem_cache_t
*skc
=
942 spl_get_work_data(data
, spl_kmem_cache_t
, skc_work
.work
);
944 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
945 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
947 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
948 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
952 * Size a slab based on the size of each aliged object plus spl_kmem_obj_t.
953 * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
954 * for very small objects we may end up with more than this so as not
955 * to waste space in the minimal allocation of a single page. Also for
956 * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
957 * lower than this and we will fail.
960 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
962 int sks_size
, obj_size
, max_size
, align
;
964 if (skc
->skc_flags
& KMC_OFFSLAB
) {
965 *objs
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
966 *size
= sizeof(spl_kmem_slab_t
);
968 align
= skc
->skc_obj_align
;
969 sks_size
= P2ROUNDUP(sizeof(spl_kmem_slab_t
), align
);
970 obj_size
= P2ROUNDUP(skc
->skc_obj_size
, align
) +
971 P2ROUNDUP(sizeof(spl_kmem_obj_t
), align
);
973 if (skc
->skc_flags
& KMC_KMEM
)
974 max_size
= ((uint64_t)1 << (MAX_ORDER
-1)) * PAGE_SIZE
;
976 max_size
= (32 * 1024 * 1024);
978 for (*size
= PAGE_SIZE
; *size
<= max_size
; *size
+= PAGE_SIZE
) {
979 *objs
= (*size
- sks_size
) / obj_size
;
980 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB
)
985 * Unable to satisfy target objets per slab, fallback to
986 * allocating a maximally sized slab and assuming it can
987 * contain the minimum objects count use it. If not fail.
990 *objs
= (*size
- sks_size
) / obj_size
;
991 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
)
999 * Make a guess at reasonable per-cpu magazine size based on the size of
1000 * each object and the cost of caching N of them in each magazine. Long
1001 * term this should really adapt based on an observed usage heuristic.
1004 spl_magazine_size(spl_kmem_cache_t
*skc
)
1006 int size
, align
= skc
->skc_obj_align
;
1009 /* Per-magazine sizes below assume a 4Kib page size */
1010 if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
* 256))
1011 size
= 4; /* Minimum 4Mib per-magazine */
1012 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
* 32))
1013 size
= 16; /* Minimum 2Mib per-magazine */
1014 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
))
1015 size
= 64; /* Minimum 256Kib per-magazine */
1016 else if (P2ROUNDUP(skc
->skc_obj_size
, align
) > (PAGE_SIZE
/ 4))
1017 size
= 128; /* Minimum 128Kib per-magazine */
1025 * Allocate a per-cpu magazine to assoicate with a specific core.
1027 static spl_kmem_magazine_t
*
1028 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int node
)
1030 spl_kmem_magazine_t
*skm
;
1031 int size
= sizeof(spl_kmem_magazine_t
) +
1032 sizeof(void *) * skc
->skc_mag_size
;
1035 skm
= kmem_alloc_node(size
, GFP_KERNEL
| __GFP_NOFAIL
, node
);
1037 skm
->skm_magic
= SKM_MAGIC
;
1039 skm
->skm_size
= skc
->skc_mag_size
;
1040 skm
->skm_refill
= skc
->skc_mag_refill
;
1041 skm
->skm_cache
= skc
;
1042 spl_init_delayed_work(&skm
->skm_work
, spl_magazine_age
, skm
);
1043 skm
->skm_age
= jiffies
;
1050 * Free a per-cpu magazine assoicated with a specific core.
1053 spl_magazine_free(spl_kmem_magazine_t
*skm
)
1055 int size
= sizeof(spl_kmem_magazine_t
) +
1056 sizeof(void *) * skm
->skm_size
;
1059 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1060 ASSERT(skm
->skm_avail
== 0);
1062 kmem_free(skm
, size
);
1067 * Create all pre-cpu magazines of reasonable sizes.
1070 spl_magazine_create(spl_kmem_cache_t
*skc
)
1075 skc
->skc_mag_size
= spl_magazine_size(skc
);
1076 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
1078 for_each_online_cpu(i
) {
1079 skc
->skc_mag
[i
] = spl_magazine_alloc(skc
, cpu_to_node(i
));
1080 if (!skc
->skc_mag
[i
]) {
1081 for (i
--; i
>= 0; i
--)
1082 spl_magazine_free(skc
->skc_mag
[i
]);
1088 /* Only after everything is allocated schedule magazine work */
1089 for_each_online_cpu(i
)
1090 schedule_delayed_work_on(i
, &skc
->skc_mag
[i
]->skm_work
,
1091 skc
->skc_delay
/ 3 * HZ
);
1097 * Destroy all pre-cpu magazines.
1100 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
1102 spl_kmem_magazine_t
*skm
;
1106 for_each_online_cpu(i
) {
1107 skm
= skc
->skc_mag
[i
];
1108 (void)spl_cache_flush(skc
, skm
, skm
->skm_avail
);
1109 spl_magazine_free(skm
);
1116 * Create a object cache based on the following arguments:
1118 * size cache object size
1119 * align cache object alignment
1120 * ctor cache object constructor
1121 * dtor cache object destructor
1122 * reclaim cache object reclaim
1123 * priv cache private data for ctor/dtor/reclaim
1124 * vmp unused must be NULL
1126 * KMC_NOTOUCH Disable cache object aging (unsupported)
1127 * KMC_NODEBUG Disable debugging (unsupported)
1128 * KMC_NOMAGAZINE Disable magazine (unsupported)
1129 * KMC_NOHASH Disable hashing (unsupported)
1130 * KMC_QCACHE Disable qcache (unsupported)
1131 * KMC_KMEM Force kmem backed cache
1132 * KMC_VMEM Force vmem backed cache
1133 * KMC_OFFSLAB Locate objects off the slab
1136 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
1137 spl_kmem_ctor_t ctor
,
1138 spl_kmem_dtor_t dtor
,
1139 spl_kmem_reclaim_t reclaim
,
1140 void *priv
, void *vmp
, int flags
)
1142 spl_kmem_cache_t
*skc
;
1143 int rc
, kmem_flags
= KM_SLEEP
;
1146 ASSERTF(!(flags
& KMC_NOMAGAZINE
), "Bad KMC_NOMAGAZINE (%x)\n", flags
);
1147 ASSERTF(!(flags
& KMC_NOHASH
), "Bad KMC_NOHASH (%x)\n", flags
);
1148 ASSERTF(!(flags
& KMC_QCACHE
), "Bad KMC_QCACHE (%x)\n", flags
);
1149 ASSERT(vmp
== NULL
);
1151 /* We may be called when there is a non-zero preempt_count or
1152 * interrupts are disabled is which case we must not sleep.
1154 if (current_thread_info()->preempt_count
|| irqs_disabled())
1155 kmem_flags
= KM_NOSLEEP
;
1157 /* Allocate new cache memory and initialize. */
1158 skc
= (spl_kmem_cache_t
*)kmem_zalloc(sizeof(*skc
), kmem_flags
);
1162 skc
->skc_magic
= SKC_MAGIC
;
1163 skc
->skc_name_size
= strlen(name
) + 1;
1164 skc
->skc_name
= (char *)kmem_alloc(skc
->skc_name_size
, kmem_flags
);
1165 if (skc
->skc_name
== NULL
) {
1166 kmem_free(skc
, sizeof(*skc
));
1169 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
1171 skc
->skc_ctor
= ctor
;
1172 skc
->skc_dtor
= dtor
;
1173 skc
->skc_reclaim
= reclaim
;
1174 skc
->skc_private
= priv
;
1176 skc
->skc_flags
= flags
;
1177 skc
->skc_obj_size
= size
;
1178 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
1179 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
1180 skc
->skc_reap
= SPL_KMEM_CACHE_REAP
;
1181 atomic_set(&skc
->skc_ref
, 0);
1183 INIT_LIST_HEAD(&skc
->skc_list
);
1184 INIT_LIST_HEAD(&skc
->skc_complete_list
);
1185 INIT_LIST_HEAD(&skc
->skc_partial_list
);
1186 spin_lock_init(&skc
->skc_lock
);
1187 skc
->skc_slab_fail
= 0;
1188 skc
->skc_slab_create
= 0;
1189 skc
->skc_slab_destroy
= 0;
1190 skc
->skc_slab_total
= 0;
1191 skc
->skc_slab_alloc
= 0;
1192 skc
->skc_slab_max
= 0;
1193 skc
->skc_obj_total
= 0;
1194 skc
->skc_obj_alloc
= 0;
1195 skc
->skc_obj_max
= 0;
1198 ASSERT((align
& (align
- 1)) == 0); /* Power of two */
1199 ASSERT(align
>= SPL_KMEM_CACHE_ALIGN
); /* Minimum size */
1200 skc
->skc_obj_align
= align
;
1203 /* If none passed select a cache type based on object size */
1204 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
))) {
1205 if (P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
) <
1207 skc
->skc_flags
|= KMC_KMEM
;
1209 skc
->skc_flags
|= KMC_VMEM
;
1213 rc
= spl_slab_size(skc
, &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
1217 rc
= spl_magazine_create(skc
);
1221 spl_init_delayed_work(&skc
->skc_work
, spl_cache_age
, skc
);
1222 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
1224 down_write(&spl_kmem_cache_sem
);
1225 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1226 up_write(&spl_kmem_cache_sem
);
1230 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1231 kmem_free(skc
, sizeof(*skc
));
1234 EXPORT_SYMBOL(spl_kmem_cache_create
);
1237 * Destroy a cache and all objects assoicated with the cache.
1240 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1242 DECLARE_WAIT_QUEUE_HEAD(wq
);
1246 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1248 down_write(&spl_kmem_cache_sem
);
1249 list_del_init(&skc
->skc_list
);
1250 up_write(&spl_kmem_cache_sem
);
1252 /* Cancel any and wait for any pending delayed work */
1253 ASSERT(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1254 cancel_delayed_work(&skc
->skc_work
);
1255 for_each_online_cpu(i
)
1256 cancel_delayed_work(&skc
->skc_mag
[i
]->skm_work
);
1258 flush_scheduled_work();
1260 /* Wait until all current callers complete, this is mainly
1261 * to catch the case where a low memory situation triggers a
1262 * cache reaping action which races with this destroy. */
1263 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1265 spl_magazine_destroy(skc
);
1266 spl_slab_reclaim(skc
, 0, 1);
1267 spin_lock(&skc
->skc_lock
);
1269 /* Validate there are no objects in use and free all the
1270 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1271 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1272 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1273 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1274 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1275 ASSERT(list_empty(&skc
->skc_complete_list
));
1277 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1278 spin_unlock(&skc
->skc_lock
);
1280 kmem_free(skc
, sizeof(*skc
));
1284 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1287 * Allocate an object from a slab attached to the cache. This is used to
1288 * repopulate the per-cpu magazine caches in batches when they run low.
1291 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1293 spl_kmem_obj_t
*sko
;
1295 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1296 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1297 ASSERT(spin_is_locked(&skc
->skc_lock
));
1299 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1300 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1301 ASSERT(sko
->sko_addr
!= NULL
);
1303 /* Remove from sks_free_list */
1304 list_del_init(&sko
->sko_list
);
1306 sks
->sks_age
= jiffies
;
1308 skc
->skc_obj_alloc
++;
1310 /* Track max obj usage statistics */
1311 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1312 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1314 /* Track max slab usage statistics */
1315 if (sks
->sks_ref
== 1) {
1316 skc
->skc_slab_alloc
++;
1318 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1319 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1322 return sko
->sko_addr
;
1326 * No available objects on any slabsi, create a new slab. Since this
1327 * is an expensive operation we do it without holding the spinlock and
1328 * only briefly aquire it when we link in the fully allocated and
1331 static spl_kmem_slab_t
*
1332 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
)
1334 spl_kmem_slab_t
*sks
;
1337 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1342 * Before allocating a new slab check if the slab is being reaped.
1343 * If it is there is a good chance we can wait until it finishes
1344 * and then use one of the newly freed but not aged-out slabs.
1346 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1348 GOTO(out
, sks
= NULL
);
1351 /* Allocate a new slab for the cache */
1352 sks
= spl_slab_alloc(skc
, flags
| __GFP_NORETRY
| __GFP_NOWARN
);
1354 GOTO(out
, sks
= NULL
);
1356 /* Link the new empty slab in to the end of skc_partial_list. */
1357 spin_lock(&skc
->skc_lock
);
1358 skc
->skc_slab_total
++;
1359 skc
->skc_obj_total
+= sks
->sks_objs
;
1360 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1361 spin_unlock(&skc
->skc_lock
);
1363 local_irq_disable();
1369 * Refill a per-cpu magazine with objects from the slabs for this
1370 * cache. Ideally the magazine can be repopulated using existing
1371 * objects which have been released, however if we are unable to
1372 * locate enough free objects new slabs of objects will be created.
1375 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1377 spl_kmem_slab_t
*sks
;
1381 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1382 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1384 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1385 spin_lock(&skc
->skc_lock
);
1387 while (refill
> 0) {
1388 /* No slabs available we may need to grow the cache */
1389 if (list_empty(&skc
->skc_partial_list
)) {
1390 spin_unlock(&skc
->skc_lock
);
1392 sks
= spl_cache_grow(skc
, flags
);
1396 /* Rescheduled to different CPU skm is not local */
1397 if (skm
!= skc
->skc_mag
[smp_processor_id()])
1400 /* Potentially rescheduled to the same CPU but
1401 * allocations may have occured from this CPU while
1402 * we were sleeping so recalculate max refill. */
1403 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
1405 spin_lock(&skc
->skc_lock
);
1409 /* Grab the next available slab */
1410 sks
= list_entry((&skc
->skc_partial_list
)->next
,
1411 spl_kmem_slab_t
, sks_list
);
1412 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1413 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
1414 ASSERT(!list_empty(&sks
->sks_free_list
));
1416 /* Consume as many objects as needed to refill the requested
1417 * cache. We must also be careful not to overfill it. */
1418 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 && ++rc
) {
1419 ASSERT(skm
->skm_avail
< skm
->skm_size
);
1420 ASSERT(rc
< skm
->skm_size
);
1421 skm
->skm_objs
[skm
->skm_avail
++]=spl_cache_obj(skc
,sks
);
1424 /* Move slab to skc_complete_list when full */
1425 if (sks
->sks_ref
== sks
->sks_objs
) {
1426 list_del(&sks
->sks_list
);
1427 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
1431 spin_unlock(&skc
->skc_lock
);
1433 /* Returns the number of entries added to cache */
1438 * Release an object back to the slab from which it came.
1441 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
1443 spl_kmem_slab_t
*sks
= NULL
;
1444 spl_kmem_obj_t
*sko
= NULL
;
1447 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1448 ASSERT(spin_is_locked(&skc
->skc_lock
));
1450 sko
= obj
+ P2ROUNDUP(skc
->skc_obj_size
, skc
->skc_obj_align
);
1451 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1453 sks
= sko
->sko_slab
;
1454 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1455 ASSERT(sks
->sks_cache
== skc
);
1456 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
1458 sks
->sks_age
= jiffies
;
1460 skc
->skc_obj_alloc
--;
1462 /* Move slab to skc_partial_list when no longer full. Slabs
1463 * are added to the head to keep the partial list is quasi-full
1464 * sorted order. Fuller at the head, emptier at the tail. */
1465 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
1466 list_del(&sks
->sks_list
);
1467 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
1470 /* Move emply slabs to the end of the partial list so
1471 * they can be easily found and freed during reclamation. */
1472 if (sks
->sks_ref
== 0) {
1473 list_del(&sks
->sks_list
);
1474 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1475 skc
->skc_slab_alloc
--;
1482 * Release a batch of objects from a per-cpu magazine back to their
1483 * respective slabs. This occurs when we exceed the magazine size,
1484 * are under memory pressure, when the cache is idle, or during
1485 * cache cleanup. The flush argument contains the number of entries
1486 * to remove from the magazine.
1489 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1491 int i
, count
= MIN(flush
, skm
->skm_avail
);
1494 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1495 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1498 * XXX: Currently we simply return objects from the magazine to
1499 * the slabs in fifo order. The ideal thing to do from a memory
1500 * fragmentation standpoint is to cheaply determine the set of
1501 * objects in the magazine which will result in the largest
1502 * number of free slabs if released from the magazine.
1504 spin_lock(&skc
->skc_lock
);
1505 for (i
= 0; i
< count
; i
++)
1506 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
1508 skm
->skm_avail
-= count
;
1509 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
1510 sizeof(void *) * skm
->skm_avail
);
1512 spin_unlock(&skc
->skc_lock
);
1518 * Allocate an object from the per-cpu magazine, or if the magazine
1519 * is empty directly allocate from a slab and repopulate the magazine.
1522 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
1524 spl_kmem_magazine_t
*skm
;
1525 unsigned long irq_flags
;
1529 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1530 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1531 ASSERT(flags
& KM_SLEEP
);
1532 atomic_inc(&skc
->skc_ref
);
1533 local_irq_save(irq_flags
);
1536 /* Safe to update per-cpu structure without lock, but
1537 * in the restart case we must be careful to reaquire
1538 * the local magazine since this may have changed
1539 * when we need to grow the cache. */
1540 skm
= skc
->skc_mag
[smp_processor_id()];
1541 ASSERTF(skm
->skm_magic
== SKM_MAGIC
, "%x != %x: %s/%p/%p %x/%x/%x\n",
1542 skm
->skm_magic
, SKM_MAGIC
, skc
->skc_name
, skc
, skm
,
1543 skm
->skm_size
, skm
->skm_refill
, skm
->skm_avail
);
1545 if (likely(skm
->skm_avail
)) {
1546 /* Object available in CPU cache, use it */
1547 obj
= skm
->skm_objs
[--skm
->skm_avail
];
1548 skm
->skm_age
= jiffies
;
1550 /* Per-CPU cache empty, directly allocate from
1551 * the slab and refill the per-CPU cache. */
1552 (void)spl_cache_refill(skc
, skm
, flags
);
1553 GOTO(restart
, obj
= NULL
);
1556 local_irq_restore(irq_flags
);
1558 ASSERT(((unsigned long)(obj
) % skc
->skc_obj_align
) == 0);
1560 /* Pre-emptively migrate object to CPU L1 cache */
1562 atomic_dec(&skc
->skc_ref
);
1566 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
1569 * Free an object back to the local per-cpu magazine, there is no
1570 * guarantee that this is the same magazine the object was originally
1571 * allocated from. We may need to flush entire from the magazine
1572 * back to the slabs to make space.
1575 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
1577 spl_kmem_magazine_t
*skm
;
1578 unsigned long flags
;
1581 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1582 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1583 atomic_inc(&skc
->skc_ref
);
1584 local_irq_save(flags
);
1586 /* Safe to update per-cpu structure without lock, but
1587 * no remote memory allocation tracking is being performed
1588 * it is entirely possible to allocate an object from one
1589 * CPU cache and return it to another. */
1590 skm
= skc
->skc_mag
[smp_processor_id()];
1591 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1593 /* Per-CPU cache full, flush it to make space */
1594 if (unlikely(skm
->skm_avail
>= skm
->skm_size
))
1595 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1597 /* Available space in cache, use it */
1598 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
1600 local_irq_restore(flags
);
1601 atomic_dec(&skc
->skc_ref
);
1605 EXPORT_SYMBOL(spl_kmem_cache_free
);
1608 * The generic shrinker function for all caches. Under linux a shrinker
1609 * may not be tightly coupled with a slab cache. In fact linux always
1610 * systematically trys calling all registered shrinker callbacks which
1611 * report that they contain unused objects. Because of this we only
1612 * register one shrinker function in the shim layer for all slab caches.
1613 * We always attempt to shrink all caches when this generic shrinker
1614 * is called. The shrinker should return the number of free objects
1615 * in the cache when called with nr_to_scan == 0 but not attempt to
1616 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
1617 * objects should be freed, because Solaris semantics are to free
1618 * all available objects we may free more objects than requested.
1621 spl_kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
1623 spl_kmem_cache_t
*skc
;
1626 down_read(&spl_kmem_cache_sem
);
1627 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
1629 spl_kmem_cache_reap_now(skc
);
1632 * Presume everything alloc'ed in reclaimable, this ensures
1633 * we are called again with nr_to_scan > 0 so can try and
1634 * reclaim. The exact number is not important either so
1635 * we forgo taking this already highly contented lock.
1637 unused
+= skc
->skc_obj_alloc
;
1639 up_read(&spl_kmem_cache_sem
);
1641 return (unused
* sysctl_vfs_cache_pressure
) / 100;
1645 * Call the registered reclaim function for a cache. Depending on how
1646 * many and which objects are released it may simply repopulate the
1647 * local magazine which will then need to age-out. Objects which cannot
1648 * fit in the magazine we will be released back to their slabs which will
1649 * also need to age out before being release. This is all just best
1650 * effort and we do not want to thrash creating and destroying slabs.
1653 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
)
1657 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1658 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1660 /* Prevent concurrent cache reaping when contended */
1661 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1666 atomic_inc(&skc
->skc_ref
);
1668 if (skc
->skc_reclaim
)
1669 skc
->skc_reclaim(skc
->skc_private
);
1671 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
1672 clear_bit(KMC_BIT_REAPING
, &skc
->skc_flags
);
1673 atomic_dec(&skc
->skc_ref
);
1677 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
1680 * Reap all free slabs from all registered caches.
1685 spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);
1687 EXPORT_SYMBOL(spl_kmem_reap
);
1689 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
1691 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
1693 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
1696 ASSERT(str
!= NULL
&& len
>= 17);
1697 memset(str
, 0, len
);
1699 /* Check for a fully printable string, and while we are at
1700 * it place the printable characters in the passed buffer. */
1701 for (i
= 0; i
< size
; i
++) {
1702 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
1703 if (isprint(str
[i
])) {
1706 /* Minimum number of printable characters found
1707 * to make it worthwhile to print this as ascii. */
1717 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
1718 *((uint8_t *)kd
->kd_addr
),
1719 *((uint8_t *)kd
->kd_addr
+ 2),
1720 *((uint8_t *)kd
->kd_addr
+ 4),
1721 *((uint8_t *)kd
->kd_addr
+ 6),
1722 *((uint8_t *)kd
->kd_addr
+ 8),
1723 *((uint8_t *)kd
->kd_addr
+ 10),
1724 *((uint8_t *)kd
->kd_addr
+ 12),
1725 *((uint8_t *)kd
->kd_addr
+ 14));
1732 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
1737 spin_lock_init(lock
);
1738 INIT_LIST_HEAD(list
);
1740 for (i
= 0; i
< size
; i
++)
1741 INIT_HLIST_HEAD(&kmem_table
[i
]);
1747 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
1749 unsigned long flags
;
1754 spin_lock_irqsave(lock
, flags
);
1755 if (!list_empty(list
))
1756 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
1757 "size", "data", "func", "line");
1759 list_for_each_entry(kd
, list
, kd_list
)
1760 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
1761 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
1762 kd
->kd_func
, kd
->kd_line
);
1764 spin_unlock_irqrestore(lock
, flags
);
1767 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1768 #define spl_kmem_init_tracking(list, lock, size)
1769 #define spl_kmem_fini_tracking(list, lock)
1770 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1773 spl_kmem_init_globals(void)
1777 /* For now all zones are includes, it may be wise to restrict
1778 * this to normal and highmem zones if we see problems. */
1779 for_each_zone(zone
) {
1781 if (!populated_zone(zone
))
1784 minfree
+= zone
->pages_min
;
1785 desfree
+= zone
->pages_low
;
1786 lotsfree
+= zone
->pages_high
;
1789 /* Solaris default values */
1790 swapfs_minfree
= MAX(2*1024*1024 >> PAGE_SHIFT
, physmem
>> 3);
1791 swapfs_reserve
= MIN(4*1024*1024 >> PAGE_SHIFT
, physmem
>> 4);
1795 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
1798 spl_kmem_init_kallsyms_lookup(void)
1800 #ifndef HAVE_GET_VMALLOC_INFO
1801 get_vmalloc_info_fn
= (get_vmalloc_info_t
)
1802 spl_kallsyms_lookup_name("get_vmalloc_info");
1803 if (!get_vmalloc_info_fn
) {
1804 printk(KERN_ERR
"Error: Unknown symbol get_vmalloc_info\n");
1807 #endif /* HAVE_GET_VMALLOC_INFO */
1809 #ifndef HAVE_FIRST_ONLINE_PGDAT
1810 first_online_pgdat_fn
= (first_online_pgdat_t
)
1811 spl_kallsyms_lookup_name("first_online_pgdat");
1812 if (!first_online_pgdat_fn
) {
1813 printk(KERN_ERR
"Error: Unknown symbol first_online_pgdat\n");
1816 #endif /* HAVE_FIRST_ONLINE_PGDAT */
1818 #ifndef HAVE_NEXT_ONLINE_PGDAT
1819 next_online_pgdat_fn
= (next_online_pgdat_t
)
1820 spl_kallsyms_lookup_name("next_online_pgdat");
1821 if (!next_online_pgdat_fn
) {
1822 printk(KERN_ERR
"Error: Unknown symbol next_online_pgdat\n");
1825 #endif /* HAVE_NEXT_ONLINE_PGDAT */
1827 #ifndef HAVE_NEXT_ZONE
1828 next_zone_fn
= (next_zone_t
)
1829 spl_kallsyms_lookup_name("next_zone");
1830 if (!next_zone_fn
) {
1831 printk(KERN_ERR
"Error: Unknown symbol next_zone\n");
1834 #endif /* HAVE_NEXT_ZONE */
1836 #ifndef HAVE_ZONE_STAT_ITEM_FIA
1837 # ifndef HAVE_GET_ZONE_COUNTS
1838 get_zone_counts_fn
= (get_zone_counts_t
)
1839 spl_kallsyms_lookup_name("get_zone_counts");
1840 if (!get_zone_counts_fn
) {
1841 printk(KERN_ERR
"Error: Unknown symbol get_zone_counts\n");
1844 # endif /* HAVE_GET_ZONE_COUNTS */
1845 #endif /* HAVE_ZONE_STAT_ITEM_FIA */
1848 * It is now safe to initialize the global tunings which rely on
1849 * the use of the for_each_zone() macro. This macro in turns
1850 * depends on the *_pgdat symbols which are now available.
1852 spl_kmem_init_globals();
1863 init_rwsem(&spl_kmem_cache_sem
);
1864 INIT_LIST_HEAD(&spl_kmem_cache_list
);
1866 #ifdef HAVE_SET_SHRINKER
1867 spl_kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
1868 spl_kmem_cache_generic_shrinker
);
1869 if (spl_kmem_cache_shrinker
== NULL
)
1870 RETURN(rc
= -ENOMEM
);
1872 register_shrinker(&spl_kmem_cache_shrinker
);
1876 atomic64_set(&kmem_alloc_used
, 0);
1877 atomic64_set(&vmem_alloc_used
, 0);
1879 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
1880 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
1889 /* Display all unreclaimed memory addresses, including the
1890 * allocation size and the first few bytes of what's located
1891 * at that address to aid in debugging. Performance is not
1892 * a serious concern here since it is module unload time. */
1893 if (atomic64_read(&kmem_alloc_used
) != 0)
1894 CWARN("kmem leaked %ld/%ld bytes\n",
1895 atomic64_read(&kmem_alloc_used
), kmem_alloc_max
);
1898 if (atomic64_read(&vmem_alloc_used
) != 0)
1899 CWARN("vmem leaked %ld/%ld bytes\n",
1900 atomic64_read(&vmem_alloc_used
), vmem_alloc_max
);
1902 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
1903 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
1904 #endif /* DEBUG_KMEM */
1907 #ifdef HAVE_SET_SHRINKER
1908 remove_shrinker(spl_kmem_cache_shrinker
);
1910 unregister_shrinker(&spl_kmem_cache_shrinker
);