2 * This file is part of the SPL: Solaris Porting Layer.
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
29 #ifdef DEBUG_SUBSYSTEM
30 # undef DEBUG_SUBSYSTEM
33 #define DEBUG_SUBSYSTEM S_KMEM
36 * The minimum amount of memory measured in pages to be free at all
37 * times on the system. This is similar to Linux's zone->pages_min
38 * multipled by the number of zones and is sized based on that.
41 EXPORT_SYMBOL(minfree
);
44 * The desired amount of memory measured in pages to be free at all
45 * times on the system. This is similar to Linux's zone->pages_low
46 * multipled by the number of zones and is sized based on that.
47 * Assuming all zones are being used roughly equally, when we drop
48 * below this threshold async page reclamation is triggered.
51 EXPORT_SYMBOL(desfree
);
54 * When above this amount of memory measures in pages the system is
55 * determined to have enough free memory. This is similar to Linux's
56 * zone->pages_high multipled by the number of zones and is sized based
57 * on that. Assuming all zones are being used roughly equally, when
58 * async page reclamation reaches this threshold it stops.
61 EXPORT_SYMBOL(lotsfree
);
63 /* Unused always 0 in this implementation */
65 EXPORT_SYMBOL(needfree
);
67 pgcnt_t swapfs_minfree
= 0;
68 EXPORT_SYMBOL(swapfs_minfree
);
70 pgcnt_t swapfs_reserve
= 0;
71 EXPORT_SYMBOL(swapfs_reserve
);
73 vmem_t
*heap_arena
= NULL
;
74 EXPORT_SYMBOL(heap_arena
);
76 vmem_t
*zio_alloc_arena
= NULL
;
77 EXPORT_SYMBOL(zio_alloc_arena
);
79 vmem_t
*zio_arena
= NULL
;
80 EXPORT_SYMBOL(zio_arena
);
82 #ifndef HAVE_GET_VMALLOC_INFO
83 get_vmalloc_info_t get_vmalloc_info_fn
= SYMBOL_POISON
;
84 EXPORT_SYMBOL(get_vmalloc_info_fn
);
85 #endif /* HAVE_GET_VMALLOC_INFO */
87 #ifdef HAVE_PGDAT_HELPERS
88 # ifndef HAVE_FIRST_ONLINE_PGDAT
89 first_online_pgdat_t first_online_pgdat_fn
= SYMBOL_POISON
;
90 EXPORT_SYMBOL(first_online_pgdat_fn
);
91 # endif /* HAVE_FIRST_ONLINE_PGDAT */
93 # ifndef HAVE_NEXT_ONLINE_PGDAT
94 next_online_pgdat_t next_online_pgdat_fn
= SYMBOL_POISON
;
95 EXPORT_SYMBOL(next_online_pgdat_fn
);
96 # endif /* HAVE_NEXT_ONLINE_PGDAT */
98 # ifndef HAVE_NEXT_ZONE
99 next_zone_t next_zone_fn
= SYMBOL_POISON
;
100 EXPORT_SYMBOL(next_zone_fn
);
101 # endif /* HAVE_NEXT_ZONE */
103 #else /* HAVE_PGDAT_HELPERS */
105 # ifndef HAVE_PGDAT_LIST
106 struct pglist_data
*pgdat_list_addr
= SYMBOL_POISON
;
107 EXPORT_SYMBOL(pgdat_list_addr
);
108 # endif /* HAVE_PGDAT_LIST */
110 #endif /* HAVE_PGDAT_HELPERS */
112 #ifdef NEED_GET_ZONE_COUNTS
113 # ifndef HAVE_GET_ZONE_COUNTS
114 get_zone_counts_t get_zone_counts_fn
= SYMBOL_POISON
;
115 EXPORT_SYMBOL(get_zone_counts_fn
);
116 # endif /* HAVE_GET_ZONE_COUNTS */
119 spl_global_page_state(spl_zone_stat_item_t item
)
121 unsigned long active
;
122 unsigned long inactive
;
125 get_zone_counts(&active
, &inactive
, &free
);
127 case SPL_NR_FREE_PAGES
: return free
;
128 case SPL_NR_INACTIVE
: return inactive
;
129 case SPL_NR_ACTIVE
: return active
;
130 default: ASSERT(0); /* Unsupported */
136 # ifdef HAVE_GLOBAL_PAGE_STATE
138 spl_global_page_state(spl_zone_stat_item_t item
)
140 unsigned long pages
= 0;
143 case SPL_NR_FREE_PAGES
:
144 # ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
145 pages
+= global_page_state(NR_FREE_PAGES
);
148 case SPL_NR_INACTIVE
:
149 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
150 pages
+= global_page_state(NR_INACTIVE
);
152 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
153 pages
+= global_page_state(NR_INACTIVE_ANON
);
155 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
156 pages
+= global_page_state(NR_INACTIVE_FILE
);
160 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
161 pages
+= global_page_state(NR_ACTIVE
);
163 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
164 pages
+= global_page_state(NR_ACTIVE_ANON
);
166 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
167 pages
+= global_page_state(NR_ACTIVE_FILE
);
171 ASSERT(0); /* Unsupported */
177 # error "Both global_page_state() and get_zone_counts() unavailable"
178 # endif /* HAVE_GLOBAL_PAGE_STATE */
179 #endif /* NEED_GET_ZONE_COUNTS */
180 EXPORT_SYMBOL(spl_global_page_state
);
183 spl_kmem_availrmem(void)
185 /* The amount of easily available memory */
186 return (spl_global_page_state(SPL_NR_FREE_PAGES
) +
187 spl_global_page_state(SPL_NR_INACTIVE
));
189 EXPORT_SYMBOL(spl_kmem_availrmem
);
192 vmem_size(vmem_t
*vmp
, int typemask
)
194 struct vmalloc_info vmi
;
198 ASSERT(typemask
& (VMEM_ALLOC
| VMEM_FREE
));
200 get_vmalloc_info(&vmi
);
201 if (typemask
& VMEM_ALLOC
)
202 size
+= (size_t)vmi
.used
;
204 if (typemask
& VMEM_FREE
)
205 size
+= (size_t)(VMALLOC_TOTAL
- vmi
.used
);
209 EXPORT_SYMBOL(vmem_size
);
212 * Memory allocation interfaces and debugging for basic kmem_*
213 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
214 * the SPL will keep track of the total memory allocated, and
215 * report any memory leaked when the module is unloaded.
219 /* Shim layer memory accounting */
220 # ifdef HAVE_ATOMIC64_T
221 atomic64_t kmem_alloc_used
= ATOMIC64_INIT(0);
222 unsigned long long kmem_alloc_max
= 0;
223 atomic64_t vmem_alloc_used
= ATOMIC64_INIT(0);
224 unsigned long long vmem_alloc_max
= 0;
226 atomic_t kmem_alloc_used
= ATOMIC_INIT(0);
227 unsigned long long kmem_alloc_max
= 0;
228 atomic_t vmem_alloc_used
= ATOMIC_INIT(0);
229 unsigned long long vmem_alloc_max
= 0;
231 int kmem_warning_flag
= 1;
233 EXPORT_SYMBOL(kmem_alloc_used
);
234 EXPORT_SYMBOL(kmem_alloc_max
);
235 EXPORT_SYMBOL(vmem_alloc_used
);
236 EXPORT_SYMBOL(vmem_alloc_max
);
237 EXPORT_SYMBOL(kmem_warning_flag
);
239 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
240 * but also the location of every alloc and free. When the SPL module is
241 * unloaded a list of all leaked addresses and where they were allocated
242 * will be dumped to the console. Enabling this feature has a significant
243 * impact on performance but it makes finding memory leaks straight forward.
245 * Not surprisingly with debugging enabled the xmem_locks are very highly
246 * contended particularly on xfree(). If we want to run with this detailed
247 * debugging enabled for anything other than debugging we need to minimize
248 * the contention by moving to a lock per xmem_table entry model.
250 # ifdef DEBUG_KMEM_TRACKING
252 # define KMEM_HASH_BITS 10
253 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
255 # define VMEM_HASH_BITS 10
256 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
258 typedef struct kmem_debug
{
259 struct hlist_node kd_hlist
; /* Hash node linkage */
260 struct list_head kd_list
; /* List of all allocations */
261 void *kd_addr
; /* Allocation pointer */
262 size_t kd_size
; /* Allocation size */
263 const char *kd_func
; /* Allocation function */
264 int kd_line
; /* Allocation line */
267 spinlock_t kmem_lock
;
268 struct hlist_head kmem_table
[KMEM_TABLE_SIZE
];
269 struct list_head kmem_list
;
271 spinlock_t vmem_lock
;
272 struct hlist_head vmem_table
[VMEM_TABLE_SIZE
];
273 struct list_head vmem_list
;
275 EXPORT_SYMBOL(kmem_lock
);
276 EXPORT_SYMBOL(kmem_table
);
277 EXPORT_SYMBOL(kmem_list
);
279 EXPORT_SYMBOL(vmem_lock
);
280 EXPORT_SYMBOL(vmem_table
);
281 EXPORT_SYMBOL(vmem_list
);
284 int kmem_set_warning(int flag
) { return (kmem_warning_flag
= !!flag
); }
286 int kmem_set_warning(int flag
) { return 0; }
288 EXPORT_SYMBOL(kmem_set_warning
);
291 * Slab allocation interfaces
293 * While the Linux slab implementation was inspired by the Solaris
294 * implemenation I cannot use it to emulate the Solaris APIs. I
295 * require two features which are not provided by the Linux slab.
297 * 1) Constructors AND destructors. Recent versions of the Linux
298 * kernel have removed support for destructors. This is a deal
299 * breaker for the SPL which contains particularly expensive
300 * initializers for mutex's, condition variables, etc. We also
301 * require a minimal level of cleanup for these data types unlike
302 * many Linux data type which do need to be explicitly destroyed.
304 * 2) Virtual address space backed slab. Callers of the Solaris slab
305 * expect it to work well for both small are very large allocations.
306 * Because of memory fragmentation the Linux slab which is backed
307 * by kmalloc'ed memory performs very badly when confronted with
308 * large numbers of large allocations. Basing the slab on the
309 * virtual address space removes the need for contigeous pages
310 * and greatly improve performance for large allocations.
312 * For these reasons, the SPL has its own slab implementation with
313 * the needed features. It is not as highly optimized as either the
314 * Solaris or Linux slabs, but it should get me most of what is
315 * needed until it can be optimized or obsoleted by another approach.
317 * One serious concern I do have about this method is the relatively
318 * small virtual address space on 32bit arches. This will seriously
319 * constrain the size of the slab caches and their performance.
321 * XXX: Improve the partial slab list by carefully maintaining a
322 * strict ordering of fullest to emptiest slabs based on
323 * the slab reference count. This gaurentees the when freeing
324 * slabs back to the system we need only linearly traverse the
325 * last N slabs in the list to discover all the freeable slabs.
327 * XXX: NUMA awareness for optionally allocating memory close to a
328 * particular core. This can be adventageous if you know the slab
329 * object will be short lived and primarily accessed from one core.
331 * XXX: Slab coloring may also yield performance improvements and would
332 * be desirable to implement.
335 struct list_head spl_kmem_cache_list
; /* List of caches */
336 struct rw_semaphore spl_kmem_cache_sem
; /* Cache list lock */
338 static int spl_cache_flush(spl_kmem_cache_t
*skc
,
339 spl_kmem_magazine_t
*skm
, int flush
);
341 #ifdef HAVE_SET_SHRINKER
342 static struct shrinker
*spl_kmem_cache_shrinker
;
344 static int spl_kmem_cache_generic_shrinker(int nr_to_scan
,
345 unsigned int gfp_mask
);
346 static struct shrinker spl_kmem_cache_shrinker
= {
347 .shrink
= spl_kmem_cache_generic_shrinker
,
348 .seeks
= KMC_DEFAULT_SEEKS
,
353 # ifdef DEBUG_KMEM_TRACKING
355 static kmem_debug_t
*
356 kmem_del_init(spinlock_t
*lock
, struct hlist_head
*table
, int bits
,
359 struct hlist_head
*head
;
360 struct hlist_node
*node
;
361 struct kmem_debug
*p
;
365 spin_lock_irqsave(lock
, flags
);
367 head
= &table
[hash_ptr(addr
, bits
)];
368 hlist_for_each_entry_rcu(p
, node
, head
, kd_hlist
) {
369 if (p
->kd_addr
== addr
) {
370 hlist_del_init(&p
->kd_hlist
);
371 list_del_init(&p
->kd_list
);
372 spin_unlock_irqrestore(lock
, flags
);
377 spin_unlock_irqrestore(lock
, flags
);
383 kmem_alloc_track(size_t size
, int flags
, const char *func
, int line
,
384 int node_alloc
, int node
)
388 unsigned long irq_flags
;
391 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
392 flags
& ~__GFP_ZERO
);
395 CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
396 sizeof(kmem_debug_t
), flags
);
398 /* Marked unlikely because we should never be doing this,
399 * we tolerate to up 2 pages but a single page is best. */
400 if (unlikely((size
) > (PAGE_SIZE
* 2)) && kmem_warning_flag
)
401 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
402 (unsigned long long) size
, flags
,
403 kmem_alloc_used_read(), kmem_alloc_max
);
405 /* We use kstrdup() below because the string pointed to by
406 * __FUNCTION__ might not be available by the time we want
407 * to print it since the module might have been unloaded. */
408 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
409 if (unlikely(dptr
->kd_func
== NULL
)) {
411 CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
412 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
413 kmem_alloc_used_read(), kmem_alloc_max
);
417 /* Use the correct allocator */
419 ASSERT(!(flags
& __GFP_ZERO
));
420 ptr
= kmalloc_node_nofail(size
, flags
, node
);
421 } else if (flags
& __GFP_ZERO
) {
422 ptr
= kzalloc_nofail(size
, flags
& ~__GFP_ZERO
);
424 ptr
= kmalloc_nofail(size
, flags
);
427 if (unlikely(ptr
== NULL
)) {
428 kfree(dptr
->kd_func
);
430 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
431 (unsigned long long) size
, flags
,
432 kmem_alloc_used_read(), kmem_alloc_max
);
436 kmem_alloc_used_add(size
);
437 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
438 kmem_alloc_max
= kmem_alloc_used_read();
440 INIT_HLIST_NODE(&dptr
->kd_hlist
);
441 INIT_LIST_HEAD(&dptr
->kd_list
);
444 dptr
->kd_size
= size
;
445 dptr
->kd_line
= line
;
447 spin_lock_irqsave(&kmem_lock
, irq_flags
);
448 hlist_add_head_rcu(&dptr
->kd_hlist
,
449 &kmem_table
[hash_ptr(ptr
, KMEM_HASH_BITS
)]);
450 list_add_tail(&dptr
->kd_list
, &kmem_list
);
451 spin_unlock_irqrestore(&kmem_lock
, irq_flags
);
453 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
454 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
455 ptr
, kmem_alloc_used_read(),
461 EXPORT_SYMBOL(kmem_alloc_track
);
464 kmem_free_track(void *ptr
, size_t size
)
469 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
470 (unsigned long long) size
);
472 dptr
= kmem_del_init(&kmem_lock
, kmem_table
, KMEM_HASH_BITS
, ptr
);
474 ASSERT(dptr
); /* Must exist in hash due to kmem_alloc() */
476 /* Size must match */
477 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
478 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
479 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
481 kmem_alloc_used_sub(size
);
482 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
483 (unsigned long long) size
, kmem_alloc_used_read(),
486 kfree(dptr
->kd_func
);
488 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
491 memset(ptr
, 0x5a, size
);
496 EXPORT_SYMBOL(kmem_free_track
);
499 vmem_alloc_track(size_t size
, int flags
, const char *func
, int line
)
503 unsigned long irq_flags
;
506 ASSERT(flags
& KM_SLEEP
);
508 dptr
= (kmem_debug_t
*) kmalloc_nofail(sizeof(kmem_debug_t
),
509 flags
& ~__GFP_ZERO
);
511 CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
512 sizeof(kmem_debug_t
), flags
);
514 /* We use kstrdup() below because the string pointed to by
515 * __FUNCTION__ might not be available by the time we want
516 * to print it, since the module might have been unloaded. */
517 dptr
->kd_func
= kstrdup(func
, flags
& ~__GFP_ZERO
);
518 if (unlikely(dptr
->kd_func
== NULL
)) {
520 CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
521 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
522 vmem_alloc_used_read(), vmem_alloc_max
);
526 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
529 if (unlikely(ptr
== NULL
)) {
530 kfree(dptr
->kd_func
);
532 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
533 (unsigned long long) size
, flags
,
534 vmem_alloc_used_read(), vmem_alloc_max
);
538 if (flags
& __GFP_ZERO
)
539 memset(ptr
, 0, size
);
541 vmem_alloc_used_add(size
);
542 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
543 vmem_alloc_max
= vmem_alloc_used_read();
545 INIT_HLIST_NODE(&dptr
->kd_hlist
);
546 INIT_LIST_HEAD(&dptr
->kd_list
);
549 dptr
->kd_size
= size
;
550 dptr
->kd_line
= line
;
552 spin_lock_irqsave(&vmem_lock
, irq_flags
);
553 hlist_add_head_rcu(&dptr
->kd_hlist
,
554 &vmem_table
[hash_ptr(ptr
, VMEM_HASH_BITS
)]);
555 list_add_tail(&dptr
->kd_list
, &vmem_list
);
556 spin_unlock_irqrestore(&vmem_lock
, irq_flags
);
558 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
559 "(%lld/%llu)\n", (unsigned long long) size
, flags
,
560 ptr
, vmem_alloc_used_read(),
566 EXPORT_SYMBOL(vmem_alloc_track
);
569 vmem_free_track(void *ptr
, size_t size
)
574 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
575 (unsigned long long) size
);
577 dptr
= kmem_del_init(&vmem_lock
, vmem_table
, VMEM_HASH_BITS
, ptr
);
578 ASSERT(dptr
); /* Must exist in hash due to vmem_alloc() */
580 /* Size must match */
581 ASSERTF(dptr
->kd_size
== size
, "kd_size (%llu) != size (%llu), "
582 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr
->kd_size
,
583 (unsigned long long) size
, dptr
->kd_func
, dptr
->kd_line
);
585 vmem_alloc_used_sub(size
);
586 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
587 (unsigned long long) size
, vmem_alloc_used_read(),
590 kfree(dptr
->kd_func
);
592 memset(dptr
, 0x5a, sizeof(kmem_debug_t
));
595 memset(ptr
, 0x5a, size
);
600 EXPORT_SYMBOL(vmem_free_track
);
602 # else /* DEBUG_KMEM_TRACKING */
605 kmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
,
606 int node_alloc
, int node
)
611 /* Marked unlikely because we should never be doing this,
612 * we tolerate to up 2 pages but a single page is best. */
613 if (unlikely(size
> (PAGE_SIZE
* 2)) && kmem_warning_flag
)
614 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
615 (unsigned long long) size
, flags
,
616 kmem_alloc_used_read(), kmem_alloc_max
);
618 /* Use the correct allocator */
620 ASSERT(!(flags
& __GFP_ZERO
));
621 ptr
= kmalloc_node_nofail(size
, flags
, node
);
622 } else if (flags
& __GFP_ZERO
) {
623 ptr
= kzalloc_nofail(size
, flags
& (~__GFP_ZERO
));
625 ptr
= kmalloc_nofail(size
, flags
);
629 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
630 (unsigned long long) size
, flags
,
631 kmem_alloc_used_read(), kmem_alloc_max
);
633 kmem_alloc_used_add(size
);
634 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max
))
635 kmem_alloc_max
= kmem_alloc_used_read();
637 CDEBUG_LIMIT(D_INFO
, "kmem_alloc(%llu, 0x%x) = %p "
638 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
639 kmem_alloc_used_read(), kmem_alloc_max
);
643 EXPORT_SYMBOL(kmem_alloc_debug
);
646 kmem_free_debug(void *ptr
, size_t size
)
650 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
651 (unsigned long long) size
);
653 kmem_alloc_used_sub(size
);
654 CDEBUG_LIMIT(D_INFO
, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
655 (unsigned long long) size
, kmem_alloc_used_read(),
658 memset(ptr
, 0x5a, size
);
663 EXPORT_SYMBOL(kmem_free_debug
);
666 vmem_alloc_debug(size_t size
, int flags
, const char *func
, int line
)
671 ASSERT(flags
& KM_SLEEP
);
673 ptr
= __vmalloc(size
, (flags
| __GFP_HIGHMEM
) & ~__GFP_ZERO
,
676 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
677 (unsigned long long) size
, flags
,
678 vmem_alloc_used_read(), vmem_alloc_max
);
680 if (flags
& __GFP_ZERO
)
681 memset(ptr
, 0, size
);
683 vmem_alloc_used_add(size
);
684 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max
))
685 vmem_alloc_max
= vmem_alloc_used_read();
687 CDEBUG_LIMIT(D_INFO
, "vmem_alloc(%llu, 0x%x) = %p "
688 "(%lld/%llu)\n", (unsigned long long) size
, flags
, ptr
,
689 vmem_alloc_used_read(), vmem_alloc_max
);
694 EXPORT_SYMBOL(vmem_alloc_debug
);
697 vmem_free_debug(void *ptr
, size_t size
)
701 ASSERTF(ptr
|| size
> 0, "ptr: %p, size: %llu", ptr
,
702 (unsigned long long) size
);
704 vmem_alloc_used_sub(size
);
705 CDEBUG_LIMIT(D_INFO
, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr
,
706 (unsigned long long) size
, vmem_alloc_used_read(),
709 memset(ptr
, 0x5a, size
);
714 EXPORT_SYMBOL(vmem_free_debug
);
716 # endif /* DEBUG_KMEM_TRACKING */
717 #endif /* DEBUG_KMEM */
720 kv_alloc(spl_kmem_cache_t
*skc
, int size
, int flags
)
726 if (skc
->skc_flags
& KMC_KMEM
)
727 ptr
= (void *)__get_free_pages(flags
, get_order(size
));
729 ptr
= __vmalloc(size
, flags
| __GFP_HIGHMEM
, PAGE_KERNEL
);
731 /* Resulting allocated memory will be page aligned */
732 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
738 kv_free(spl_kmem_cache_t
*skc
, void *ptr
, int size
)
740 ASSERT(IS_P2ALIGNED(ptr
, PAGE_SIZE
));
743 if (skc
->skc_flags
& KMC_KMEM
)
744 free_pages((unsigned long)ptr
, get_order(size
));
750 * Required space for each aligned sks.
752 static inline uint32_t
753 spl_sks_size(spl_kmem_cache_t
*skc
)
755 return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t
),
756 skc
->skc_obj_align
, uint32_t);
760 * Required space for each aligned object.
762 static inline uint32_t
763 spl_obj_size(spl_kmem_cache_t
*skc
)
765 uint32_t align
= skc
->skc_obj_align
;
767 return P2ROUNDUP_TYPED(skc
->skc_obj_size
, align
, uint32_t) +
768 P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t
), align
, uint32_t);
772 * Lookup the spl_kmem_object_t for an object given that object.
774 static inline spl_kmem_obj_t
*
775 spl_sko_from_obj(spl_kmem_cache_t
*skc
, void *obj
)
777 return obj
+ P2ROUNDUP_TYPED(skc
->skc_obj_size
,
778 skc
->skc_obj_align
, uint32_t);
782 * Required space for each offslab object taking in to account alignment
783 * restrictions and the power-of-two requirement of kv_alloc().
785 static inline uint32_t
786 spl_offslab_size(spl_kmem_cache_t
*skc
)
788 return 1UL << (highbit(spl_obj_size(skc
)) + 1);
792 * It's important that we pack the spl_kmem_obj_t structure and the
793 * actual objects in to one large address space to minimize the number
794 * of calls to the allocator. It is far better to do a few large
795 * allocations and then subdivide it ourselves. Now which allocator
796 * we use requires balancing a few trade offs.
798 * For small objects we use kmem_alloc() because as long as you are
799 * only requesting a small number of pages (ideally just one) its cheap.
800 * However, when you start requesting multiple pages with kmem_alloc()
801 * it gets increasingly expensive since it requires contigeous pages.
802 * For this reason we shift to vmem_alloc() for slabs of large objects
803 * which removes the need for contigeous pages. We do not use
804 * vmem_alloc() in all cases because there is significant locking
805 * overhead in __get_vm_area_node(). This function takes a single
806 * global lock when aquiring an available virtual address range which
807 * serializes all vmem_alloc()'s for all slab caches. Using slightly
808 * different allocation functions for small and large objects should
809 * give us the best of both worlds.
811 * KMC_ONSLAB KMC_OFFSLAB
813 * +------------------------+ +-----------------+
814 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
815 * | skc_obj_size <-+ | | +-----------------+ | |
816 * | spl_kmem_obj_t | | | |
817 * | skc_obj_size <---+ | +-----------------+ | |
818 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
819 * | ... v | | spl_kmem_obj_t | |
820 * +------------------------+ +-----------------+ v
822 static spl_kmem_slab_t
*
823 spl_slab_alloc(spl_kmem_cache_t
*skc
, int flags
)
825 spl_kmem_slab_t
*sks
;
826 spl_kmem_obj_t
*sko
, *n
;
828 uint32_t obj_size
, offslab_size
= 0;
831 base
= kv_alloc(skc
, skc
->skc_slab_size
, flags
);
835 sks
= (spl_kmem_slab_t
*)base
;
836 sks
->sks_magic
= SKS_MAGIC
;
837 sks
->sks_objs
= skc
->skc_slab_objs
;
838 sks
->sks_age
= jiffies
;
839 sks
->sks_cache
= skc
;
840 INIT_LIST_HEAD(&sks
->sks_list
);
841 INIT_LIST_HEAD(&sks
->sks_free_list
);
843 obj_size
= spl_obj_size(skc
);
845 if (skc
->skc_flags
* KMC_OFFSLAB
)
846 offslab_size
= spl_offslab_size(skc
);
848 for (i
= 0; i
< sks
->sks_objs
; i
++) {
849 if (skc
->skc_flags
& KMC_OFFSLAB
) {
850 obj
= kv_alloc(skc
, offslab_size
, flags
);
852 GOTO(out
, rc
= -ENOMEM
);
854 obj
= base
+ spl_sks_size(skc
) + (i
* obj_size
);
857 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
858 sko
= spl_sko_from_obj(skc
, obj
);
860 sko
->sko_magic
= SKO_MAGIC
;
862 INIT_LIST_HEAD(&sko
->sko_list
);
863 list_add_tail(&sko
->sko_list
, &sks
->sks_free_list
);
866 list_for_each_entry(sko
, &sks
->sks_free_list
, sko_list
)
868 skc
->skc_ctor(sko
->sko_addr
, skc
->skc_private
, flags
);
871 if (skc
->skc_flags
& KMC_OFFSLAB
)
872 list_for_each_entry_safe(sko
, n
, &sks
->sks_free_list
,
874 kv_free(skc
, sko
->sko_addr
, offslab_size
);
876 kv_free(skc
, base
, skc
->skc_slab_size
);
884 * Remove a slab from complete or partial list, it must be called with
885 * the 'skc->skc_lock' held but the actual free must be performed
886 * outside the lock to prevent deadlocking on vmem addresses.
889 spl_slab_free(spl_kmem_slab_t
*sks
,
890 struct list_head
*sks_list
, struct list_head
*sko_list
)
892 spl_kmem_cache_t
*skc
;
895 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
896 ASSERT(sks
->sks_ref
== 0);
898 skc
= sks
->sks_cache
;
899 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
900 ASSERT(spin_is_locked(&skc
->skc_lock
));
903 * Update slab/objects counters in the cache, then remove the
904 * slab from the skc->skc_partial_list. Finally add the slab
905 * and all its objects in to the private work lists where the
906 * destructors will be called and the memory freed to the system.
908 skc
->skc_obj_total
-= sks
->sks_objs
;
909 skc
->skc_slab_total
--;
910 list_del(&sks
->sks_list
);
911 list_add(&sks
->sks_list
, sks_list
);
912 list_splice_init(&sks
->sks_free_list
, sko_list
);
918 * Traverses all the partial slabs attached to a cache and free those
919 * which which are currently empty, and have not been touched for
920 * skc_delay seconds to avoid thrashing. The count argument is
921 * passed to optionally cap the number of slabs reclaimed, a count
922 * of zero means try and reclaim everything. When flag is set we
923 * always free an available slab regardless of age.
926 spl_slab_reclaim(spl_kmem_cache_t
*skc
, int count
, int flag
)
928 spl_kmem_slab_t
*sks
, *m
;
929 spl_kmem_obj_t
*sko
, *n
;
937 * Move empty slabs and objects which have not been touched in
938 * skc_delay seconds on to private lists to be freed outside
939 * the spin lock. This delay time is important to avoid thrashing
940 * however when flag is set the delay will not be used.
942 spin_lock(&skc
->skc_lock
);
943 list_for_each_entry_safe_reverse(sks
,m
,&skc
->skc_partial_list
,sks_list
){
945 * All empty slabs are at the end of skc->skc_partial_list,
946 * therefore once a non-empty slab is found we can stop
947 * scanning. Additionally, stop when reaching the target
948 * reclaim 'count' if a non-zero threshhold is given.
950 if ((sks
->sks_ref
> 0) || (count
&& i
> count
))
953 if (time_after(jiffies
,sks
->sks_age
+skc
->skc_delay
*HZ
)||flag
) {
954 spl_slab_free(sks
, &sks_list
, &sko_list
);
958 spin_unlock(&skc
->skc_lock
);
961 * The following two loops ensure all the object destructors are
962 * run, any offslab objects are freed, and the slabs themselves
963 * are freed. This is all done outside the skc->skc_lock since
964 * this allows the destructor to sleep, and allows us to perform
965 * a conditional reschedule when a freeing a large number of
966 * objects and slabs back to the system.
968 if (skc
->skc_flags
& KMC_OFFSLAB
)
969 size
= spl_offslab_size(skc
);
971 list_for_each_entry_safe(sko
, n
, &sko_list
, sko_list
) {
972 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
975 skc
->skc_dtor(sko
->sko_addr
, skc
->skc_private
);
977 if (skc
->skc_flags
& KMC_OFFSLAB
)
978 kv_free(skc
, sko
->sko_addr
, size
);
983 list_for_each_entry_safe(sks
, m
, &sks_list
, sks_list
) {
984 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
985 kv_free(skc
, sks
, skc
->skc_slab_size
);
993 * Called regularly on all caches to age objects out of the magazines
994 * which have not been access in skc->skc_delay seconds. This prevents
995 * idle magazines from holding memory which might be better used by
996 * other caches or parts of the system. The delay is present to
997 * prevent thrashing the magazine.
1000 spl_magazine_age(void *data
)
1002 spl_kmem_magazine_t
*skm
=
1003 spl_get_work_data(data
, spl_kmem_magazine_t
, skm_work
.work
);
1004 spl_kmem_cache_t
*skc
= skm
->skm_cache
;
1005 int i
= smp_processor_id();
1007 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1008 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1009 ASSERT(skc
->skc_mag
[i
] == skm
);
1011 if (skm
->skm_avail
> 0 &&
1012 time_after(jiffies
, skm
->skm_age
+ skc
->skc_delay
* HZ
))
1013 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1015 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
1016 schedule_delayed_work_on(i
, &skm
->skm_work
,
1017 skc
->skc_delay
/ 3 * HZ
);
1021 * Called regularly to keep a downward pressure on the size of idle
1022 * magazines and to release free slabs from the cache. This function
1023 * never calls the registered reclaim function, that only occures
1024 * under memory pressure or with a direct call to spl_kmem_reap().
1027 spl_cache_age(void *data
)
1029 spl_kmem_cache_t
*skc
=
1030 spl_get_work_data(data
, spl_kmem_cache_t
, skc_work
.work
);
1032 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1033 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
1035 if (!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
))
1036 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
1040 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
1041 * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
1042 * for very small objects we may end up with more than this so as not
1043 * to waste space in the minimal allocation of a single page. Also for
1044 * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
1045 * lower than this and we will fail.
1048 spl_slab_size(spl_kmem_cache_t
*skc
, uint32_t *objs
, uint32_t *size
)
1050 uint32_t sks_size
, obj_size
, max_size
;
1052 if (skc
->skc_flags
& KMC_OFFSLAB
) {
1053 *objs
= SPL_KMEM_CACHE_OBJ_PER_SLAB
;
1054 *size
= sizeof(spl_kmem_slab_t
);
1056 sks_size
= spl_sks_size(skc
);
1057 obj_size
= spl_obj_size(skc
);
1059 if (skc
->skc_flags
& KMC_KMEM
)
1060 max_size
= ((uint32_t)1 << (MAX_ORDER
-3)) * PAGE_SIZE
;
1062 max_size
= (32 * 1024 * 1024);
1064 /* Power of two sized slab */
1065 for (*size
= PAGE_SIZE
; *size
<= max_size
; *size
*= 2) {
1066 *objs
= (*size
- sks_size
) / obj_size
;
1067 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB
)
1072 * Unable to satisfy target objects per slab, fall back to
1073 * allocating a maximally sized slab and assuming it can
1074 * contain the minimum objects count use it. If not fail.
1077 *objs
= (*size
- sks_size
) / obj_size
;
1078 if (*objs
>= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN
)
1086 * Make a guess at reasonable per-cpu magazine size based on the size of
1087 * each object and the cost of caching N of them in each magazine. Long
1088 * term this should really adapt based on an observed usage heuristic.
1091 spl_magazine_size(spl_kmem_cache_t
*skc
)
1093 uint32_t obj_size
= spl_obj_size(skc
);
1097 /* Per-magazine sizes below assume a 4Kib page size */
1098 if (obj_size
> (PAGE_SIZE
* 256))
1099 size
= 4; /* Minimum 4Mib per-magazine */
1100 else if (obj_size
> (PAGE_SIZE
* 32))
1101 size
= 16; /* Minimum 2Mib per-magazine */
1102 else if (obj_size
> (PAGE_SIZE
))
1103 size
= 64; /* Minimum 256Kib per-magazine */
1104 else if (obj_size
> (PAGE_SIZE
/ 4))
1105 size
= 128; /* Minimum 128Kib per-magazine */
1113 * Allocate a per-cpu magazine to assoicate with a specific core.
1115 static spl_kmem_magazine_t
*
1116 spl_magazine_alloc(spl_kmem_cache_t
*skc
, int node
)
1118 spl_kmem_magazine_t
*skm
;
1119 int size
= sizeof(spl_kmem_magazine_t
) +
1120 sizeof(void *) * skc
->skc_mag_size
;
1123 skm
= kmem_alloc_node(size
, KM_SLEEP
, node
);
1125 skm
->skm_magic
= SKM_MAGIC
;
1127 skm
->skm_size
= skc
->skc_mag_size
;
1128 skm
->skm_refill
= skc
->skc_mag_refill
;
1129 skm
->skm_cache
= skc
;
1130 spl_init_delayed_work(&skm
->skm_work
, spl_magazine_age
, skm
);
1131 skm
->skm_age
= jiffies
;
1138 * Free a per-cpu magazine assoicated with a specific core.
1141 spl_magazine_free(spl_kmem_magazine_t
*skm
)
1143 int size
= sizeof(spl_kmem_magazine_t
) +
1144 sizeof(void *) * skm
->skm_size
;
1147 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1148 ASSERT(skm
->skm_avail
== 0);
1150 kmem_free(skm
, size
);
1155 * Create all pre-cpu magazines of reasonable sizes.
1158 spl_magazine_create(spl_kmem_cache_t
*skc
)
1163 skc
->skc_mag_size
= spl_magazine_size(skc
);
1164 skc
->skc_mag_refill
= (skc
->skc_mag_size
+ 1) / 2;
1166 for_each_online_cpu(i
) {
1167 skc
->skc_mag
[i
] = spl_magazine_alloc(skc
, cpu_to_node(i
));
1168 if (!skc
->skc_mag
[i
]) {
1169 for (i
--; i
>= 0; i
--)
1170 spl_magazine_free(skc
->skc_mag
[i
]);
1176 /* Only after everything is allocated schedule magazine work */
1177 for_each_online_cpu(i
)
1178 schedule_delayed_work_on(i
, &skc
->skc_mag
[i
]->skm_work
,
1179 skc
->skc_delay
/ 3 * HZ
);
1185 * Destroy all pre-cpu magazines.
1188 spl_magazine_destroy(spl_kmem_cache_t
*skc
)
1190 spl_kmem_magazine_t
*skm
;
1194 for_each_online_cpu(i
) {
1195 skm
= skc
->skc_mag
[i
];
1196 (void)spl_cache_flush(skc
, skm
, skm
->skm_avail
);
1197 spl_magazine_free(skm
);
1204 * Create a object cache based on the following arguments:
1206 * size cache object size
1207 * align cache object alignment
1208 * ctor cache object constructor
1209 * dtor cache object destructor
1210 * reclaim cache object reclaim
1211 * priv cache private data for ctor/dtor/reclaim
1212 * vmp unused must be NULL
1214 * KMC_NOTOUCH Disable cache object aging (unsupported)
1215 * KMC_NODEBUG Disable debugging (unsupported)
1216 * KMC_NOMAGAZINE Disable magazine (unsupported)
1217 * KMC_NOHASH Disable hashing (unsupported)
1218 * KMC_QCACHE Disable qcache (unsupported)
1219 * KMC_KMEM Force kmem backed cache
1220 * KMC_VMEM Force vmem backed cache
1221 * KMC_OFFSLAB Locate objects off the slab
1224 spl_kmem_cache_create(char *name
, size_t size
, size_t align
,
1225 spl_kmem_ctor_t ctor
,
1226 spl_kmem_dtor_t dtor
,
1227 spl_kmem_reclaim_t reclaim
,
1228 void *priv
, void *vmp
, int flags
)
1230 spl_kmem_cache_t
*skc
;
1231 int rc
, kmem_flags
= KM_SLEEP
;
1234 ASSERTF(!(flags
& KMC_NOMAGAZINE
), "Bad KMC_NOMAGAZINE (%x)\n", flags
);
1235 ASSERTF(!(flags
& KMC_NOHASH
), "Bad KMC_NOHASH (%x)\n", flags
);
1236 ASSERTF(!(flags
& KMC_QCACHE
), "Bad KMC_QCACHE (%x)\n", flags
);
1237 ASSERT(vmp
== NULL
);
1239 /* We may be called when there is a non-zero preempt_count or
1240 * interrupts are disabled is which case we must not sleep.
1242 if (current_thread_info()->preempt_count
|| irqs_disabled())
1243 kmem_flags
= KM_NOSLEEP
;
1245 /* Allocate new cache memory and initialize. */
1246 skc
= (spl_kmem_cache_t
*)kmem_zalloc(sizeof(*skc
), kmem_flags
);
1250 skc
->skc_magic
= SKC_MAGIC
;
1251 skc
->skc_name_size
= strlen(name
) + 1;
1252 skc
->skc_name
= (char *)kmem_alloc(skc
->skc_name_size
, kmem_flags
);
1253 if (skc
->skc_name
== NULL
) {
1254 kmem_free(skc
, sizeof(*skc
));
1257 strncpy(skc
->skc_name
, name
, skc
->skc_name_size
);
1259 skc
->skc_ctor
= ctor
;
1260 skc
->skc_dtor
= dtor
;
1261 skc
->skc_reclaim
= reclaim
;
1262 skc
->skc_private
= priv
;
1264 skc
->skc_flags
= flags
;
1265 skc
->skc_obj_size
= size
;
1266 skc
->skc_obj_align
= SPL_KMEM_CACHE_ALIGN
;
1267 skc
->skc_delay
= SPL_KMEM_CACHE_DELAY
;
1268 skc
->skc_reap
= SPL_KMEM_CACHE_REAP
;
1269 atomic_set(&skc
->skc_ref
, 0);
1271 INIT_LIST_HEAD(&skc
->skc_list
);
1272 INIT_LIST_HEAD(&skc
->skc_complete_list
);
1273 INIT_LIST_HEAD(&skc
->skc_partial_list
);
1274 spin_lock_init(&skc
->skc_lock
);
1275 skc
->skc_slab_fail
= 0;
1276 skc
->skc_slab_create
= 0;
1277 skc
->skc_slab_destroy
= 0;
1278 skc
->skc_slab_total
= 0;
1279 skc
->skc_slab_alloc
= 0;
1280 skc
->skc_slab_max
= 0;
1281 skc
->skc_obj_total
= 0;
1282 skc
->skc_obj_alloc
= 0;
1283 skc
->skc_obj_max
= 0;
1286 VERIFY(ISP2(align
));
1287 VERIFY3U(align
, >=, SPL_KMEM_CACHE_ALIGN
); /* Min alignment */
1288 VERIFY3U(align
, <=, PAGE_SIZE
); /* Max alignment */
1289 skc
->skc_obj_align
= align
;
1292 /* If none passed select a cache type based on object size */
1293 if (!(skc
->skc_flags
& (KMC_KMEM
| KMC_VMEM
))) {
1294 if (spl_obj_size(skc
) < (PAGE_SIZE
/ 8))
1295 skc
->skc_flags
|= KMC_KMEM
;
1297 skc
->skc_flags
|= KMC_VMEM
;
1300 rc
= spl_slab_size(skc
, &skc
->skc_slab_objs
, &skc
->skc_slab_size
);
1304 rc
= spl_magazine_create(skc
);
1308 spl_init_delayed_work(&skc
->skc_work
, spl_cache_age
, skc
);
1309 schedule_delayed_work(&skc
->skc_work
, skc
->skc_delay
/ 3 * HZ
);
1311 down_write(&spl_kmem_cache_sem
);
1312 list_add_tail(&skc
->skc_list
, &spl_kmem_cache_list
);
1313 up_write(&spl_kmem_cache_sem
);
1317 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1318 kmem_free(skc
, sizeof(*skc
));
1321 EXPORT_SYMBOL(spl_kmem_cache_create
);
1324 * Destroy a cache and all objects assoicated with the cache.
1327 spl_kmem_cache_destroy(spl_kmem_cache_t
*skc
)
1329 DECLARE_WAIT_QUEUE_HEAD(wq
);
1333 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1335 down_write(&spl_kmem_cache_sem
);
1336 list_del_init(&skc
->skc_list
);
1337 up_write(&spl_kmem_cache_sem
);
1339 /* Cancel any and wait for any pending delayed work */
1340 ASSERT(!test_and_set_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1341 cancel_delayed_work(&skc
->skc_work
);
1342 for_each_online_cpu(i
)
1343 cancel_delayed_work(&skc
->skc_mag
[i
]->skm_work
);
1345 flush_scheduled_work();
1347 /* Wait until all current callers complete, this is mainly
1348 * to catch the case where a low memory situation triggers a
1349 * cache reaping action which races with this destroy. */
1350 wait_event(wq
, atomic_read(&skc
->skc_ref
) == 0);
1352 spl_magazine_destroy(skc
);
1353 spl_slab_reclaim(skc
, 0, 1);
1354 spin_lock(&skc
->skc_lock
);
1356 /* Validate there are no objects in use and free all the
1357 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1358 ASSERT3U(skc
->skc_slab_alloc
, ==, 0);
1359 ASSERT3U(skc
->skc_obj_alloc
, ==, 0);
1360 ASSERT3U(skc
->skc_slab_total
, ==, 0);
1361 ASSERT3U(skc
->skc_obj_total
, ==, 0);
1362 ASSERT(list_empty(&skc
->skc_complete_list
));
1364 kmem_free(skc
->skc_name
, skc
->skc_name_size
);
1365 spin_unlock(&skc
->skc_lock
);
1367 kmem_free(skc
, sizeof(*skc
));
1371 EXPORT_SYMBOL(spl_kmem_cache_destroy
);
1374 * Allocate an object from a slab attached to the cache. This is used to
1375 * repopulate the per-cpu magazine caches in batches when they run low.
1378 spl_cache_obj(spl_kmem_cache_t
*skc
, spl_kmem_slab_t
*sks
)
1380 spl_kmem_obj_t
*sko
;
1382 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1383 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1384 ASSERT(spin_is_locked(&skc
->skc_lock
));
1386 sko
= list_entry(sks
->sks_free_list
.next
, spl_kmem_obj_t
, sko_list
);
1387 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1388 ASSERT(sko
->sko_addr
!= NULL
);
1390 /* Remove from sks_free_list */
1391 list_del_init(&sko
->sko_list
);
1393 sks
->sks_age
= jiffies
;
1395 skc
->skc_obj_alloc
++;
1397 /* Track max obj usage statistics */
1398 if (skc
->skc_obj_alloc
> skc
->skc_obj_max
)
1399 skc
->skc_obj_max
= skc
->skc_obj_alloc
;
1401 /* Track max slab usage statistics */
1402 if (sks
->sks_ref
== 1) {
1403 skc
->skc_slab_alloc
++;
1405 if (skc
->skc_slab_alloc
> skc
->skc_slab_max
)
1406 skc
->skc_slab_max
= skc
->skc_slab_alloc
;
1409 return sko
->sko_addr
;
1413 * No available objects on any slabsi, create a new slab. Since this
1414 * is an expensive operation we do it without holding the spinlock and
1415 * only briefly aquire it when we link in the fully allocated and
1418 static spl_kmem_slab_t
*
1419 spl_cache_grow(spl_kmem_cache_t
*skc
, int flags
)
1421 spl_kmem_slab_t
*sks
;
1424 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1429 * Before allocating a new slab check if the slab is being reaped.
1430 * If it is there is a good chance we can wait until it finishes
1431 * and then use one of the newly freed but not aged-out slabs.
1433 if (test_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1435 GOTO(out
, sks
= NULL
);
1438 /* Allocate a new slab for the cache */
1439 sks
= spl_slab_alloc(skc
, flags
| __GFP_NORETRY
| __GFP_NOWARN
);
1441 GOTO(out
, sks
= NULL
);
1443 /* Link the new empty slab in to the end of skc_partial_list. */
1444 spin_lock(&skc
->skc_lock
);
1445 skc
->skc_slab_total
++;
1446 skc
->skc_obj_total
+= sks
->sks_objs
;
1447 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1448 spin_unlock(&skc
->skc_lock
);
1450 local_irq_disable();
1456 * Refill a per-cpu magazine with objects from the slabs for this
1457 * cache. Ideally the magazine can be repopulated using existing
1458 * objects which have been released, however if we are unable to
1459 * locate enough free objects new slabs of objects will be created.
1462 spl_cache_refill(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flags
)
1464 spl_kmem_slab_t
*sks
;
1468 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1469 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1471 refill
= MIN(skm
->skm_refill
, skm
->skm_size
- skm
->skm_avail
);
1472 spin_lock(&skc
->skc_lock
);
1474 while (refill
> 0) {
1475 /* No slabs available we may need to grow the cache */
1476 if (list_empty(&skc
->skc_partial_list
)) {
1477 spin_unlock(&skc
->skc_lock
);
1479 sks
= spl_cache_grow(skc
, flags
);
1483 /* Rescheduled to different CPU skm is not local */
1484 if (skm
!= skc
->skc_mag
[smp_processor_id()])
1487 /* Potentially rescheduled to the same CPU but
1488 * allocations may have occured from this CPU while
1489 * we were sleeping so recalculate max refill. */
1490 refill
= MIN(refill
, skm
->skm_size
- skm
->skm_avail
);
1492 spin_lock(&skc
->skc_lock
);
1496 /* Grab the next available slab */
1497 sks
= list_entry((&skc
->skc_partial_list
)->next
,
1498 spl_kmem_slab_t
, sks_list
);
1499 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1500 ASSERT(sks
->sks_ref
< sks
->sks_objs
);
1501 ASSERT(!list_empty(&sks
->sks_free_list
));
1503 /* Consume as many objects as needed to refill the requested
1504 * cache. We must also be careful not to overfill it. */
1505 while (sks
->sks_ref
< sks
->sks_objs
&& refill
-- > 0 && ++rc
) {
1506 ASSERT(skm
->skm_avail
< skm
->skm_size
);
1507 ASSERT(rc
< skm
->skm_size
);
1508 skm
->skm_objs
[skm
->skm_avail
++]=spl_cache_obj(skc
,sks
);
1511 /* Move slab to skc_complete_list when full */
1512 if (sks
->sks_ref
== sks
->sks_objs
) {
1513 list_del(&sks
->sks_list
);
1514 list_add(&sks
->sks_list
, &skc
->skc_complete_list
);
1518 spin_unlock(&skc
->skc_lock
);
1520 /* Returns the number of entries added to cache */
1525 * Release an object back to the slab from which it came.
1528 spl_cache_shrink(spl_kmem_cache_t
*skc
, void *obj
)
1530 spl_kmem_slab_t
*sks
= NULL
;
1531 spl_kmem_obj_t
*sko
= NULL
;
1534 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1535 ASSERT(spin_is_locked(&skc
->skc_lock
));
1537 sko
= spl_sko_from_obj(skc
, obj
);
1538 ASSERT(sko
->sko_magic
== SKO_MAGIC
);
1539 sks
= sko
->sko_slab
;
1540 ASSERT(sks
->sks_magic
== SKS_MAGIC
);
1541 ASSERT(sks
->sks_cache
== skc
);
1542 list_add(&sko
->sko_list
, &sks
->sks_free_list
);
1544 sks
->sks_age
= jiffies
;
1546 skc
->skc_obj_alloc
--;
1548 /* Move slab to skc_partial_list when no longer full. Slabs
1549 * are added to the head to keep the partial list is quasi-full
1550 * sorted order. Fuller at the head, emptier at the tail. */
1551 if (sks
->sks_ref
== (sks
->sks_objs
- 1)) {
1552 list_del(&sks
->sks_list
);
1553 list_add(&sks
->sks_list
, &skc
->skc_partial_list
);
1556 /* Move emply slabs to the end of the partial list so
1557 * they can be easily found and freed during reclamation. */
1558 if (sks
->sks_ref
== 0) {
1559 list_del(&sks
->sks_list
);
1560 list_add_tail(&sks
->sks_list
, &skc
->skc_partial_list
);
1561 skc
->skc_slab_alloc
--;
1568 * Release a batch of objects from a per-cpu magazine back to their
1569 * respective slabs. This occurs when we exceed the magazine size,
1570 * are under memory pressure, when the cache is idle, or during
1571 * cache cleanup. The flush argument contains the number of entries
1572 * to remove from the magazine.
1575 spl_cache_flush(spl_kmem_cache_t
*skc
, spl_kmem_magazine_t
*skm
, int flush
)
1577 int i
, count
= MIN(flush
, skm
->skm_avail
);
1580 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1581 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1584 * XXX: Currently we simply return objects from the magazine to
1585 * the slabs in fifo order. The ideal thing to do from a memory
1586 * fragmentation standpoint is to cheaply determine the set of
1587 * objects in the magazine which will result in the largest
1588 * number of free slabs if released from the magazine.
1590 spin_lock(&skc
->skc_lock
);
1591 for (i
= 0; i
< count
; i
++)
1592 spl_cache_shrink(skc
, skm
->skm_objs
[i
]);
1594 skm
->skm_avail
-= count
;
1595 memmove(skm
->skm_objs
, &(skm
->skm_objs
[count
]),
1596 sizeof(void *) * skm
->skm_avail
);
1598 spin_unlock(&skc
->skc_lock
);
1604 * Allocate an object from the per-cpu magazine, or if the magazine
1605 * is empty directly allocate from a slab and repopulate the magazine.
1608 spl_kmem_cache_alloc(spl_kmem_cache_t
*skc
, int flags
)
1610 spl_kmem_magazine_t
*skm
;
1611 unsigned long irq_flags
;
1615 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1616 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1617 ASSERT(flags
& KM_SLEEP
);
1618 atomic_inc(&skc
->skc_ref
);
1619 local_irq_save(irq_flags
);
1622 /* Safe to update per-cpu structure without lock, but
1623 * in the restart case we must be careful to reaquire
1624 * the local magazine since this may have changed
1625 * when we need to grow the cache. */
1626 skm
= skc
->skc_mag
[smp_processor_id()];
1627 ASSERTF(skm
->skm_magic
== SKM_MAGIC
, "%x != %x: %s/%p/%p %x/%x/%x\n",
1628 skm
->skm_magic
, SKM_MAGIC
, skc
->skc_name
, skc
, skm
,
1629 skm
->skm_size
, skm
->skm_refill
, skm
->skm_avail
);
1631 if (likely(skm
->skm_avail
)) {
1632 /* Object available in CPU cache, use it */
1633 obj
= skm
->skm_objs
[--skm
->skm_avail
];
1634 skm
->skm_age
= jiffies
;
1636 /* Per-CPU cache empty, directly allocate from
1637 * the slab and refill the per-CPU cache. */
1638 (void)spl_cache_refill(skc
, skm
, flags
);
1639 GOTO(restart
, obj
= NULL
);
1642 local_irq_restore(irq_flags
);
1644 ASSERT(IS_P2ALIGNED(obj
, skc
->skc_obj_align
));
1646 /* Pre-emptively migrate object to CPU L1 cache */
1648 atomic_dec(&skc
->skc_ref
);
1652 EXPORT_SYMBOL(spl_kmem_cache_alloc
);
1655 * Free an object back to the local per-cpu magazine, there is no
1656 * guarantee that this is the same magazine the object was originally
1657 * allocated from. We may need to flush entire from the magazine
1658 * back to the slabs to make space.
1661 spl_kmem_cache_free(spl_kmem_cache_t
*skc
, void *obj
)
1663 spl_kmem_magazine_t
*skm
;
1664 unsigned long flags
;
1667 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1668 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1669 atomic_inc(&skc
->skc_ref
);
1670 local_irq_save(flags
);
1672 /* Safe to update per-cpu structure without lock, but
1673 * no remote memory allocation tracking is being performed
1674 * it is entirely possible to allocate an object from one
1675 * CPU cache and return it to another. */
1676 skm
= skc
->skc_mag
[smp_processor_id()];
1677 ASSERT(skm
->skm_magic
== SKM_MAGIC
);
1679 /* Per-CPU cache full, flush it to make space */
1680 if (unlikely(skm
->skm_avail
>= skm
->skm_size
))
1681 (void)spl_cache_flush(skc
, skm
, skm
->skm_refill
);
1683 /* Available space in cache, use it */
1684 skm
->skm_objs
[skm
->skm_avail
++] = obj
;
1686 local_irq_restore(flags
);
1687 atomic_dec(&skc
->skc_ref
);
1691 EXPORT_SYMBOL(spl_kmem_cache_free
);
1694 * The generic shrinker function for all caches. Under linux a shrinker
1695 * may not be tightly coupled with a slab cache. In fact linux always
1696 * systematically trys calling all registered shrinker callbacks which
1697 * report that they contain unused objects. Because of this we only
1698 * register one shrinker function in the shim layer for all slab caches.
1699 * We always attempt to shrink all caches when this generic shrinker
1700 * is called. The shrinker should return the number of free objects
1701 * in the cache when called with nr_to_scan == 0 but not attempt to
1702 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
1703 * objects should be freed, because Solaris semantics are to free
1704 * all available objects we may free more objects than requested.
1707 spl_kmem_cache_generic_shrinker(int nr_to_scan
, unsigned int gfp_mask
)
1709 spl_kmem_cache_t
*skc
;
1712 down_read(&spl_kmem_cache_sem
);
1713 list_for_each_entry(skc
, &spl_kmem_cache_list
, skc_list
) {
1715 spl_kmem_cache_reap_now(skc
);
1718 * Presume everything alloc'ed in reclaimable, this ensures
1719 * we are called again with nr_to_scan > 0 so can try and
1720 * reclaim. The exact number is not important either so
1721 * we forgo taking this already highly contented lock.
1723 unused
+= skc
->skc_obj_alloc
;
1725 up_read(&spl_kmem_cache_sem
);
1727 return (unused
* sysctl_vfs_cache_pressure
) / 100;
1731 * Call the registered reclaim function for a cache. Depending on how
1732 * many and which objects are released it may simply repopulate the
1733 * local magazine which will then need to age-out. Objects which cannot
1734 * fit in the magazine we will be released back to their slabs which will
1735 * also need to age out before being release. This is all just best
1736 * effort and we do not want to thrash creating and destroying slabs.
1739 spl_kmem_cache_reap_now(spl_kmem_cache_t
*skc
)
1743 ASSERT(skc
->skc_magic
== SKC_MAGIC
);
1744 ASSERT(!test_bit(KMC_BIT_DESTROY
, &skc
->skc_flags
));
1746 /* Prevent concurrent cache reaping when contended */
1747 if (test_and_set_bit(KMC_BIT_REAPING
, &skc
->skc_flags
)) {
1752 atomic_inc(&skc
->skc_ref
);
1754 if (skc
->skc_reclaim
)
1755 skc
->skc_reclaim(skc
->skc_private
);
1757 spl_slab_reclaim(skc
, skc
->skc_reap
, 0);
1758 clear_bit(KMC_BIT_REAPING
, &skc
->skc_flags
);
1759 atomic_dec(&skc
->skc_ref
);
1763 EXPORT_SYMBOL(spl_kmem_cache_reap_now
);
1766 * Reap all free slabs from all registered caches.
1771 spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK
, GFP_KERNEL
);
1773 EXPORT_SYMBOL(spl_kmem_reap
);
1775 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
1777 spl_sprintf_addr(kmem_debug_t
*kd
, char *str
, int len
, int min
)
1779 int size
= ((len
- 1) < kd
->kd_size
) ? (len
- 1) : kd
->kd_size
;
1782 ASSERT(str
!= NULL
&& len
>= 17);
1783 memset(str
, 0, len
);
1785 /* Check for a fully printable string, and while we are at
1786 * it place the printable characters in the passed buffer. */
1787 for (i
= 0; i
< size
; i
++) {
1788 str
[i
] = ((char *)(kd
->kd_addr
))[i
];
1789 if (isprint(str
[i
])) {
1792 /* Minimum number of printable characters found
1793 * to make it worthwhile to print this as ascii. */
1803 sprintf(str
, "%02x%02x%02x%02x%02x%02x%02x%02x",
1804 *((uint8_t *)kd
->kd_addr
),
1805 *((uint8_t *)kd
->kd_addr
+ 2),
1806 *((uint8_t *)kd
->kd_addr
+ 4),
1807 *((uint8_t *)kd
->kd_addr
+ 6),
1808 *((uint8_t *)kd
->kd_addr
+ 8),
1809 *((uint8_t *)kd
->kd_addr
+ 10),
1810 *((uint8_t *)kd
->kd_addr
+ 12),
1811 *((uint8_t *)kd
->kd_addr
+ 14));
1818 spl_kmem_init_tracking(struct list_head
*list
, spinlock_t
*lock
, int size
)
1823 spin_lock_init(lock
);
1824 INIT_LIST_HEAD(list
);
1826 for (i
= 0; i
< size
; i
++)
1827 INIT_HLIST_HEAD(&kmem_table
[i
]);
1833 spl_kmem_fini_tracking(struct list_head
*list
, spinlock_t
*lock
)
1835 unsigned long flags
;
1840 spin_lock_irqsave(lock
, flags
);
1841 if (!list_empty(list
))
1842 printk(KERN_WARNING
"%-16s %-5s %-16s %s:%s\n", "address",
1843 "size", "data", "func", "line");
1845 list_for_each_entry(kd
, list
, kd_list
)
1846 printk(KERN_WARNING
"%p %-5d %-16s %s:%d\n", kd
->kd_addr
,
1847 (int)kd
->kd_size
, spl_sprintf_addr(kd
, str
, 17, 8),
1848 kd
->kd_func
, kd
->kd_line
);
1850 spin_unlock_irqrestore(lock
, flags
);
1853 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1854 #define spl_kmem_init_tracking(list, lock, size)
1855 #define spl_kmem_fini_tracking(list, lock)
1856 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1859 spl_kmem_init_globals(void)
1863 /* For now all zones are includes, it may be wise to restrict
1864 * this to normal and highmem zones if we see problems. */
1865 for_each_zone(zone
) {
1867 if (!populated_zone(zone
))
1870 minfree
+= min_wmark_pages(zone
);
1871 desfree
+= low_wmark_pages(zone
);
1872 lotsfree
+= high_wmark_pages(zone
);
1875 /* Solaris default values */
1876 swapfs_minfree
= MAX(2*1024*1024 >> PAGE_SHIFT
, physmem
>> 3);
1877 swapfs_reserve
= MIN(4*1024*1024 >> PAGE_SHIFT
, physmem
>> 4);
1881 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
1884 spl_kmem_init_kallsyms_lookup(void)
1886 #ifndef HAVE_GET_VMALLOC_INFO
1887 get_vmalloc_info_fn
= (get_vmalloc_info_t
)
1888 spl_kallsyms_lookup_name("get_vmalloc_info");
1889 if (!get_vmalloc_info_fn
) {
1890 printk(KERN_ERR
"Error: Unknown symbol get_vmalloc_info\n");
1893 #endif /* HAVE_GET_VMALLOC_INFO */
1895 #ifdef HAVE_PGDAT_HELPERS
1896 # ifndef HAVE_FIRST_ONLINE_PGDAT
1897 first_online_pgdat_fn
= (first_online_pgdat_t
)
1898 spl_kallsyms_lookup_name("first_online_pgdat");
1899 if (!first_online_pgdat_fn
) {
1900 printk(KERN_ERR
"Error: Unknown symbol first_online_pgdat\n");
1903 # endif /* HAVE_FIRST_ONLINE_PGDAT */
1905 # ifndef HAVE_NEXT_ONLINE_PGDAT
1906 next_online_pgdat_fn
= (next_online_pgdat_t
)
1907 spl_kallsyms_lookup_name("next_online_pgdat");
1908 if (!next_online_pgdat_fn
) {
1909 printk(KERN_ERR
"Error: Unknown symbol next_online_pgdat\n");
1912 # endif /* HAVE_NEXT_ONLINE_PGDAT */
1914 # ifndef HAVE_NEXT_ZONE
1915 next_zone_fn
= (next_zone_t
)
1916 spl_kallsyms_lookup_name("next_zone");
1917 if (!next_zone_fn
) {
1918 printk(KERN_ERR
"Error: Unknown symbol next_zone\n");
1921 # endif /* HAVE_NEXT_ZONE */
1923 #else /* HAVE_PGDAT_HELPERS */
1925 # ifndef HAVE_PGDAT_LIST
1926 pgdat_list_addr
= *(struct pglist_data
**)
1927 spl_kallsyms_lookup_name("pgdat_list");
1928 if (!pgdat_list_addr
) {
1929 printk(KERN_ERR
"Error: Unknown symbol pgdat_list\n");
1932 # endif /* HAVE_PGDAT_LIST */
1933 #endif /* HAVE_PGDAT_HELPERS */
1935 #if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
1936 get_zone_counts_fn
= (get_zone_counts_t
)
1937 spl_kallsyms_lookup_name("get_zone_counts");
1938 if (!get_zone_counts_fn
) {
1939 printk(KERN_ERR
"Error: Unknown symbol get_zone_counts\n");
1942 #endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
1945 * It is now safe to initialize the global tunings which rely on
1946 * the use of the for_each_zone() macro. This macro in turns
1947 * depends on the *_pgdat symbols which are now available.
1949 spl_kmem_init_globals();
1960 init_rwsem(&spl_kmem_cache_sem
);
1961 INIT_LIST_HEAD(&spl_kmem_cache_list
);
1963 #ifdef HAVE_SET_SHRINKER
1964 spl_kmem_cache_shrinker
= set_shrinker(KMC_DEFAULT_SEEKS
,
1965 spl_kmem_cache_generic_shrinker
);
1966 if (spl_kmem_cache_shrinker
== NULL
)
1967 RETURN(rc
= -ENOMEM
);
1969 register_shrinker(&spl_kmem_cache_shrinker
);
1973 kmem_alloc_used_set(0);
1974 vmem_alloc_used_set(0);
1976 spl_kmem_init_tracking(&kmem_list
, &kmem_lock
, KMEM_TABLE_SIZE
);
1977 spl_kmem_init_tracking(&vmem_list
, &vmem_lock
, VMEM_TABLE_SIZE
);
1986 /* Display all unreclaimed memory addresses, including the
1987 * allocation size and the first few bytes of what's located
1988 * at that address to aid in debugging. Performance is not
1989 * a serious concern here since it is module unload time. */
1990 if (kmem_alloc_used_read() != 0)
1991 CWARN("kmem leaked %ld/%ld bytes\n",
1992 kmem_alloc_used_read(), kmem_alloc_max
);
1995 if (vmem_alloc_used_read() != 0)
1996 CWARN("vmem leaked %ld/%ld bytes\n",
1997 vmem_alloc_used_read(), vmem_alloc_max
);
1999 spl_kmem_fini_tracking(&kmem_list
, &kmem_lock
);
2000 spl_kmem_fini_tracking(&vmem_list
, &vmem_lock
);
2001 #endif /* DEBUG_KMEM */
2004 #ifdef HAVE_SET_SHRINKER
2005 remove_shrinker(spl_kmem_cache_shrinker
);
2007 unregister_shrinker(&spl_kmem_cache_shrinker
);