]> git.proxmox.com Git - mirror_spl.git/blame - module/spl/spl-kmem.c
Autoconf --enable-debug-* cleanup
[mirror_spl.git] / module / spl / spl-kmem.c
CommitLineData
715f6251 1/*
2 * This file is part of the SPL: Solaris Porting Layer.
3 *
4 * Copyright (c) 2008 Lawrence Livermore National Security, LLC.
5 * Produced at Lawrence Livermore National Laboratory
6 * Written by:
7 * Brian Behlendorf <behlendorf1@llnl.gov>,
8 * Herb Wartens <wartens2@llnl.gov>,
9 * Jim Garlick <garlick@llnl.gov>
10 * UCRL-CODE-235197
11 *
12 * This is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
20 * for more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
f4b37741 27#include <sys/kmem.h>
f1ca4da6 28
937879f1 29#ifdef DEBUG_SUBSYSTEM
a0f6da3d 30# undef DEBUG_SUBSYSTEM
937879f1 31#endif
32
33#define DEBUG_SUBSYSTEM S_KMEM
34
36b313da
BB
35/*
36 * The minimum amount of memory measured in pages to be free at all
37 * times on the system. This is similar to Linux's zone->pages_min
38 * multipled by the number of zones and is sized based on that.
39 */
40pgcnt_t minfree = 0;
41EXPORT_SYMBOL(minfree);
42
43/*
44 * The desired amount of memory measured in pages to be free at all
45 * times on the system. This is similar to Linux's zone->pages_low
46 * multipled by the number of zones and is sized based on that.
47 * Assuming all zones are being used roughly equally, when we drop
48 * below this threshold async page reclamation is triggered.
49 */
50pgcnt_t desfree = 0;
51EXPORT_SYMBOL(desfree);
52
53/*
54 * When above this amount of memory measures in pages the system is
55 * determined to have enough free memory. This is similar to Linux's
56 * zone->pages_high multipled by the number of zones and is sized based
57 * on that. Assuming all zones are being used roughly equally, when
58 * async page reclamation reaches this threshold it stops.
59 */
60pgcnt_t lotsfree = 0;
61EXPORT_SYMBOL(lotsfree);
62
63/* Unused always 0 in this implementation */
64pgcnt_t needfree = 0;
65EXPORT_SYMBOL(needfree);
66
36b313da
BB
67pgcnt_t swapfs_minfree = 0;
68EXPORT_SYMBOL(swapfs_minfree);
69
70pgcnt_t swapfs_reserve = 0;
71EXPORT_SYMBOL(swapfs_reserve);
72
36b313da
BB
73vmem_t *heap_arena = NULL;
74EXPORT_SYMBOL(heap_arena);
75
76vmem_t *zio_alloc_arena = NULL;
77EXPORT_SYMBOL(zio_alloc_arena);
78
79vmem_t *zio_arena = NULL;
80EXPORT_SYMBOL(zio_arena);
81
d1ff2312 82#ifndef HAVE_GET_VMALLOC_INFO
96dded38 83get_vmalloc_info_t get_vmalloc_info_fn = SYMBOL_POISON;
d1ff2312
BB
84EXPORT_SYMBOL(get_vmalloc_info_fn);
85#endif /* HAVE_GET_VMALLOC_INFO */
86
5232d256
BB
87#ifdef HAVE_PGDAT_HELPERS
88# ifndef HAVE_FIRST_ONLINE_PGDAT
96dded38 89first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
d1ff2312 90EXPORT_SYMBOL(first_online_pgdat_fn);
5232d256 91# endif /* HAVE_FIRST_ONLINE_PGDAT */
36b313da 92
5232d256 93# ifndef HAVE_NEXT_ONLINE_PGDAT
96dded38 94next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
d1ff2312 95EXPORT_SYMBOL(next_online_pgdat_fn);
5232d256 96# endif /* HAVE_NEXT_ONLINE_PGDAT */
36b313da 97
5232d256 98# ifndef HAVE_NEXT_ZONE
96dded38 99next_zone_t next_zone_fn = SYMBOL_POISON;
d1ff2312 100EXPORT_SYMBOL(next_zone_fn);
5232d256
BB
101# endif /* HAVE_NEXT_ZONE */
102
103#else /* HAVE_PGDAT_HELPERS */
104
105# ifndef HAVE_PGDAT_LIST
106struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
107EXPORT_SYMBOL(pgdat_list_addr);
108# endif /* HAVE_PGDAT_LIST */
109
110#endif /* HAVE_PGDAT_HELPERS */
36b313da 111
6ae7fef5 112#ifdef NEED_GET_ZONE_COUNTS
e11d6c5f 113# ifndef HAVE_GET_ZONE_COUNTS
96dded38 114get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
d1ff2312 115EXPORT_SYMBOL(get_zone_counts_fn);
96dded38 116# endif /* HAVE_GET_ZONE_COUNTS */
4ab13d3b 117
e11d6c5f 118unsigned long
6ae7fef5 119spl_global_page_state(spl_zone_stat_item_t item)
4ab13d3b
BB
120{
121 unsigned long active;
122 unsigned long inactive;
123 unsigned long free;
124
6ae7fef5
BB
125 get_zone_counts(&active, &inactive, &free);
126 switch (item) {
127 case SPL_NR_FREE_PAGES: return free;
128 case SPL_NR_INACTIVE: return inactive;
129 case SPL_NR_ACTIVE: return active;
130 default: ASSERT(0); /* Unsupported */
e11d6c5f
BB
131 }
132
6ae7fef5
BB
133 return 0;
134}
135#else
136# ifdef HAVE_GLOBAL_PAGE_STATE
137unsigned long
138spl_global_page_state(spl_zone_stat_item_t item)
139{
140 unsigned long pages = 0;
141
142 switch (item) {
143 case SPL_NR_FREE_PAGES:
144# ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
145 pages += global_page_state(NR_FREE_PAGES);
146# endif
147 break;
148 case SPL_NR_INACTIVE:
149# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
150 pages += global_page_state(NR_INACTIVE);
151# endif
152# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
153 pages += global_page_state(NR_INACTIVE_ANON);
154# endif
155# ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
156 pages += global_page_state(NR_INACTIVE_FILE);
157# endif
158 break;
159 case SPL_NR_ACTIVE:
160# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
161 pages += global_page_state(NR_ACTIVE);
162# endif
163# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
164 pages += global_page_state(NR_ACTIVE_ANON);
165# endif
166# ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
167 pages += global_page_state(NR_ACTIVE_FILE);
168# endif
169 break;
170 default:
171 ASSERT(0); /* Unsupported */
e11d6c5f
BB
172 }
173
6ae7fef5
BB
174 return pages;
175}
96dded38 176# else
6ae7fef5 177# error "Both global_page_state() and get_zone_counts() unavailable"
96dded38 178# endif /* HAVE_GLOBAL_PAGE_STATE */
6ae7fef5 179#endif /* NEED_GET_ZONE_COUNTS */
e11d6c5f 180EXPORT_SYMBOL(spl_global_page_state);
4ab13d3b 181
e11d6c5f
BB
182pgcnt_t
183spl_kmem_availrmem(void)
184{
4ab13d3b 185 /* The amount of easily available memory */
6ae7fef5
BB
186 return (spl_global_page_state(SPL_NR_FREE_PAGES) +
187 spl_global_page_state(SPL_NR_INACTIVE));
4ab13d3b
BB
188}
189EXPORT_SYMBOL(spl_kmem_availrmem);
190
191size_t
192vmem_size(vmem_t *vmp, int typemask)
193{
d1ff2312
BB
194 struct vmalloc_info vmi;
195 size_t size = 0;
196
4ab13d3b
BB
197 ASSERT(vmp == NULL);
198 ASSERT(typemask & (VMEM_ALLOC | VMEM_FREE));
199
d1ff2312
BB
200 get_vmalloc_info(&vmi);
201 if (typemask & VMEM_ALLOC)
202 size += (size_t)vmi.used;
203
204 if (typemask & VMEM_FREE)
205 size += (size_t)(VMALLOC_TOTAL - vmi.used);
206
207 return size;
4ab13d3b
BB
208}
209EXPORT_SYMBOL(vmem_size);
4ab13d3b 210
f1ca4da6 211/*
2fb9b26a 212 * Memory allocation interfaces and debugging for basic kmem_*
055ffd98
BB
213 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
214 * the SPL will keep track of the total memory allocated, and
215 * report any memory leaked when the module is unloaded.
f1ca4da6 216 */
217#ifdef DEBUG_KMEM
218/* Shim layer memory accounting */
550f1705 219atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
a0f6da3d 220unsigned long long kmem_alloc_max = 0;
550f1705 221atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
a0f6da3d 222unsigned long long vmem_alloc_max = 0;
c19c06f3 223int kmem_warning_flag = 1;
79b31f36 224
ff449ac4 225EXPORT_SYMBOL(kmem_alloc_used);
226EXPORT_SYMBOL(kmem_alloc_max);
227EXPORT_SYMBOL(vmem_alloc_used);
228EXPORT_SYMBOL(vmem_alloc_max);
229EXPORT_SYMBOL(kmem_warning_flag);
230
055ffd98
BB
231/* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
232 * but also the location of every alloc and free. When the SPL module is
233 * unloaded a list of all leaked addresses and where they were allocated
234 * will be dumped to the console. Enabling this feature has a significant
235 * impact on performance but it makes finding memory leaks straight forward.
236 *
237 * Not surprisingly with debugging enabled the xmem_locks are very highly
238 * contended particularly on xfree(). If we want to run with this detailed
239 * debugging enabled for anything other than debugging we need to minimize
240 * the contention by moving to a lock per xmem_table entry model.
a0f6da3d 241 */
055ffd98 242# ifdef DEBUG_KMEM_TRACKING
a0f6da3d 243
244# define KMEM_HASH_BITS 10
245# define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
246
247# define VMEM_HASH_BITS 10
248# define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
249
250typedef struct kmem_debug {
251 struct hlist_node kd_hlist; /* Hash node linkage */
252 struct list_head kd_list; /* List of all allocations */
253 void *kd_addr; /* Allocation pointer */
254 size_t kd_size; /* Allocation size */
255 const char *kd_func; /* Allocation function */
256 int kd_line; /* Allocation line */
257} kmem_debug_t;
258
d6a26c6a 259spinlock_t kmem_lock;
260struct hlist_head kmem_table[KMEM_TABLE_SIZE];
261struct list_head kmem_list;
262
13cdca65 263spinlock_t vmem_lock;
264struct hlist_head vmem_table[VMEM_TABLE_SIZE];
265struct list_head vmem_list;
266
d6a26c6a 267EXPORT_SYMBOL(kmem_lock);
268EXPORT_SYMBOL(kmem_table);
269EXPORT_SYMBOL(kmem_list);
270
13cdca65 271EXPORT_SYMBOL(vmem_lock);
272EXPORT_SYMBOL(vmem_table);
273EXPORT_SYMBOL(vmem_list);
a0f6da3d 274# endif
13cdca65 275
c19c06f3 276int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); }
277#else
278int kmem_set_warning(int flag) { return 0; }
f1ca4da6 279#endif
c19c06f3 280EXPORT_SYMBOL(kmem_set_warning);
f1ca4da6 281
282/*
283 * Slab allocation interfaces
284 *
2fb9b26a 285 * While the Linux slab implementation was inspired by the Solaris
286 * implemenation I cannot use it to emulate the Solaris APIs. I
287 * require two features which are not provided by the Linux slab.
288 *
289 * 1) Constructors AND destructors. Recent versions of the Linux
290 * kernel have removed support for destructors. This is a deal
291 * breaker for the SPL which contains particularly expensive
292 * initializers for mutex's, condition variables, etc. We also
a0f6da3d 293 * require a minimal level of cleanup for these data types unlike
294 * many Linux data type which do need to be explicitly destroyed.
2fb9b26a 295 *
a0f6da3d 296 * 2) Virtual address space backed slab. Callers of the Solaris slab
2fb9b26a 297 * expect it to work well for both small are very large allocations.
298 * Because of memory fragmentation the Linux slab which is backed
299 * by kmalloc'ed memory performs very badly when confronted with
300 * large numbers of large allocations. Basing the slab on the
301 * virtual address space removes the need for contigeous pages
302 * and greatly improve performance for large allocations.
303 *
304 * For these reasons, the SPL has its own slab implementation with
305 * the needed features. It is not as highly optimized as either the
306 * Solaris or Linux slabs, but it should get me most of what is
307 * needed until it can be optimized or obsoleted by another approach.
308 *
309 * One serious concern I do have about this method is the relatively
310 * small virtual address space on 32bit arches. This will seriously
311 * constrain the size of the slab caches and their performance.
312 *
2fb9b26a 313 * XXX: Improve the partial slab list by carefully maintaining a
314 * strict ordering of fullest to emptiest slabs based on
315 * the slab reference count. This gaurentees the when freeing
316 * slabs back to the system we need only linearly traverse the
317 * last N slabs in the list to discover all the freeable slabs.
318 *
319 * XXX: NUMA awareness for optionally allocating memory close to a
320 * particular core. This can be adventageous if you know the slab
321 * object will be short lived and primarily accessed from one core.
322 *
323 * XXX: Slab coloring may also yield performance improvements and would
324 * be desirable to implement.
f1ca4da6 325 */
2fb9b26a 326
a0f6da3d 327struct list_head spl_kmem_cache_list; /* List of caches */
328struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
c30df9c8 329
4afaaefa 330static int spl_cache_flush(spl_kmem_cache_t *skc,
a0f6da3d 331 spl_kmem_magazine_t *skm, int flush);
4afaaefa 332
57d86234 333#ifdef HAVE_SET_SHRINKER
2fb9b26a 334static struct shrinker *spl_kmem_cache_shrinker;
57d86234 335#else
4afaaefa 336static int spl_kmem_cache_generic_shrinker(int nr_to_scan,
a0f6da3d 337 unsigned int gfp_mask);
2fb9b26a 338static struct shrinker spl_kmem_cache_shrinker = {
4afaaefa 339 .shrink = spl_kmem_cache_generic_shrinker,
57d86234 340 .seeks = KMC_DEFAULT_SEEKS,
341};
342#endif
f1ca4da6 343
a0f6da3d 344#ifdef DEBUG_KMEM
345# ifdef DEBUG_KMEM_TRACKING
346
347static kmem_debug_t *
348kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits,
349 void *addr)
350{
351 struct hlist_head *head;
352 struct hlist_node *node;
353 struct kmem_debug *p;
354 unsigned long flags;
355 ENTRY;
356
357 spin_lock_irqsave(lock, flags);
358
359 head = &table[hash_ptr(addr, bits)];
360 hlist_for_each_entry_rcu(p, node, head, kd_hlist) {
361 if (p->kd_addr == addr) {
362 hlist_del_init(&p->kd_hlist);
363 list_del_init(&p->kd_list);
364 spin_unlock_irqrestore(lock, flags);
365 return p;
366 }
367 }
368
369 spin_unlock_irqrestore(lock, flags);
370
371 RETURN(NULL);
372}
373
374void *
375kmem_alloc_track(size_t size, int flags, const char *func, int line,
376 int node_alloc, int node)
377{
378 void *ptr = NULL;
379 kmem_debug_t *dptr;
380 unsigned long irq_flags;
381 ENTRY;
382
383 dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t),
384 flags & ~__GFP_ZERO);
385
386 if (dptr == NULL) {
387 CWARN("kmem_alloc(%ld, 0x%x) debug failed\n",
388 sizeof(kmem_debug_t), flags);
389 } else {
390 /* Marked unlikely because we should never be doing this,
391 * we tolerate to up 2 pages but a single page is best. */
392 if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag)
393 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
394 (unsigned long long) size, flags,
395 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
396
c8e60837 397 /* We use kstrdup() below because the string pointed to by
398 * __FUNCTION__ might not be available by the time we want
399 * to print it since the module might have been unloaded. */
400 dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
401 if (unlikely(dptr->kd_func == NULL)) {
402 kfree(dptr);
403 CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) "
404 "(%lld/%llu)\n", (unsigned long long) size, flags,
405 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
406 goto out;
407 }
408
a0f6da3d 409 /* Use the correct allocator */
410 if (node_alloc) {
411 ASSERT(!(flags & __GFP_ZERO));
412 ptr = kmalloc_node(size, flags, node);
413 } else if (flags & __GFP_ZERO) {
414 ptr = kzalloc(size, flags & ~__GFP_ZERO);
415 } else {
416 ptr = kmalloc(size, flags);
417 }
418
419 if (unlikely(ptr == NULL)) {
c8e60837 420 kfree(dptr->kd_func);
a0f6da3d 421 kfree(dptr);
422 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
423 (unsigned long long) size, flags,
424 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
425 goto out;
426 }
427
428 atomic64_add(size, &kmem_alloc_used);
429 if (unlikely(atomic64_read(&kmem_alloc_used) >
430 kmem_alloc_max))
431 kmem_alloc_max =
432 atomic64_read(&kmem_alloc_used);
433
434 INIT_HLIST_NODE(&dptr->kd_hlist);
435 INIT_LIST_HEAD(&dptr->kd_list);
436
437 dptr->kd_addr = ptr;
438 dptr->kd_size = size;
a0f6da3d 439 dptr->kd_line = line;
440
441 spin_lock_irqsave(&kmem_lock, irq_flags);
442 hlist_add_head_rcu(&dptr->kd_hlist,
443 &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
444 list_add_tail(&dptr->kd_list, &kmem_list);
445 spin_unlock_irqrestore(&kmem_lock, irq_flags);
446
447 CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
448 "(%lld/%llu)\n", (unsigned long long) size, flags,
449 ptr, atomic64_read(&kmem_alloc_used),
450 kmem_alloc_max);
451 }
452out:
453 RETURN(ptr);
454}
455EXPORT_SYMBOL(kmem_alloc_track);
456
457void
458kmem_free_track(void *ptr, size_t size)
459{
460 kmem_debug_t *dptr;
461 ENTRY;
462
463 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
464 (unsigned long long) size);
465
466 dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
467
468 ASSERT(dptr); /* Must exist in hash due to kmem_alloc() */
469
470 /* Size must match */
471 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
472 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
473 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
474
475 atomic64_sub(size, &kmem_alloc_used);
476
477 CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
478 (unsigned long long) size, atomic64_read(&kmem_alloc_used),
479 kmem_alloc_max);
480
c8e60837 481 kfree(dptr->kd_func);
482
a0f6da3d 483 memset(dptr, 0x5a, sizeof(kmem_debug_t));
484 kfree(dptr);
485
486 memset(ptr, 0x5a, size);
487 kfree(ptr);
488
489 EXIT;
490}
491EXPORT_SYMBOL(kmem_free_track);
492
493void *
494vmem_alloc_track(size_t size, int flags, const char *func, int line)
495{
496 void *ptr = NULL;
497 kmem_debug_t *dptr;
498 unsigned long irq_flags;
499 ENTRY;
500
501 ASSERT(flags & KM_SLEEP);
502
503 dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags);
504 if (dptr == NULL) {
505 CWARN("vmem_alloc(%ld, 0x%x) debug failed\n",
506 sizeof(kmem_debug_t), flags);
507 } else {
c8e60837 508 /* We use kstrdup() below because the string pointed to by
509 * __FUNCTION__ might not be available by the time we want
510 * to print it, since the module might have been unloaded. */
511 dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO);
512 if (unlikely(dptr->kd_func == NULL)) {
513 kfree(dptr);
514 CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) "
515 "(%lld/%llu)\n", (unsigned long long) size, flags,
516 atomic64_read(&vmem_alloc_used), vmem_alloc_max);
517 goto out;
518 }
519
a0f6da3d 520 ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
521 PAGE_KERNEL);
522
523 if (unlikely(ptr == NULL)) {
c8e60837 524 kfree(dptr->kd_func);
a0f6da3d 525 kfree(dptr);
526 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
527 (unsigned long long) size, flags,
528 atomic64_read(&vmem_alloc_used), vmem_alloc_max);
529 goto out;
530 }
531
532 if (flags & __GFP_ZERO)
533 memset(ptr, 0, size);
534
535 atomic64_add(size, &vmem_alloc_used);
536 if (unlikely(atomic64_read(&vmem_alloc_used) >
537 vmem_alloc_max))
538 vmem_alloc_max =
539 atomic64_read(&vmem_alloc_used);
540
541 INIT_HLIST_NODE(&dptr->kd_hlist);
542 INIT_LIST_HEAD(&dptr->kd_list);
543
544 dptr->kd_addr = ptr;
545 dptr->kd_size = size;
a0f6da3d 546 dptr->kd_line = line;
547
548 spin_lock_irqsave(&vmem_lock, irq_flags);
549 hlist_add_head_rcu(&dptr->kd_hlist,
550 &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
551 list_add_tail(&dptr->kd_list, &vmem_list);
552 spin_unlock_irqrestore(&vmem_lock, irq_flags);
553
554 CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
555 "(%lld/%llu)\n", (unsigned long long) size, flags,
556 ptr, atomic64_read(&vmem_alloc_used),
557 vmem_alloc_max);
558 }
559out:
560 RETURN(ptr);
561}
562EXPORT_SYMBOL(vmem_alloc_track);
563
564void
565vmem_free_track(void *ptr, size_t size)
566{
567 kmem_debug_t *dptr;
568 ENTRY;
569
570 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
571 (unsigned long long) size);
572
573 dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
574 ASSERT(dptr); /* Must exist in hash due to vmem_alloc() */
575
576 /* Size must match */
577 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
578 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
579 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
580
581 atomic64_sub(size, &vmem_alloc_used);
582 CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
583 (unsigned long long) size, atomic64_read(&vmem_alloc_used),
584 vmem_alloc_max);
585
c8e60837 586 kfree(dptr->kd_func);
587
a0f6da3d 588 memset(dptr, 0x5a, sizeof(kmem_debug_t));
589 kfree(dptr);
590
591 memset(ptr, 0x5a, size);
592 vfree(ptr);
593
594 EXIT;
595}
596EXPORT_SYMBOL(vmem_free_track);
597
598# else /* DEBUG_KMEM_TRACKING */
599
600void *
601kmem_alloc_debug(size_t size, int flags, const char *func, int line,
602 int node_alloc, int node)
603{
604 void *ptr;
605 ENTRY;
606
607 /* Marked unlikely because we should never be doing this,
608 * we tolerate to up 2 pages but a single page is best. */
609 if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag)
610 CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n",
611 (unsigned long long) size, flags,
612 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
613
614 /* Use the correct allocator */
615 if (node_alloc) {
616 ASSERT(!(flags & __GFP_ZERO));
617 ptr = kmalloc_node(size, flags, node);
618 } else if (flags & __GFP_ZERO) {
619 ptr = kzalloc(size, flags & (~__GFP_ZERO));
620 } else {
621 ptr = kmalloc(size, flags);
622 }
623
624 if (ptr == NULL) {
625 CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
626 (unsigned long long) size, flags,
627 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
628 } else {
629 atomic64_add(size, &kmem_alloc_used);
630 if (unlikely(atomic64_read(&kmem_alloc_used) > kmem_alloc_max))
631 kmem_alloc_max = atomic64_read(&kmem_alloc_used);
632
633 CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p "
634 "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
635 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
636 }
637 RETURN(ptr);
638}
639EXPORT_SYMBOL(kmem_alloc_debug);
640
641void
642kmem_free_debug(void *ptr, size_t size)
643{
644 ENTRY;
645
646 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
647 (unsigned long long) size);
648
649 atomic64_sub(size, &kmem_alloc_used);
650
651 CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
652 (unsigned long long) size, atomic64_read(&kmem_alloc_used),
653 kmem_alloc_max);
654
655 memset(ptr, 0x5a, size);
656 kfree(ptr);
657
658 EXIT;
659}
660EXPORT_SYMBOL(kmem_free_debug);
661
662void *
663vmem_alloc_debug(size_t size, int flags, const char *func, int line)
664{
665 void *ptr;
666 ENTRY;
667
668 ASSERT(flags & KM_SLEEP);
669
670 ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO,
671 PAGE_KERNEL);
672 if (ptr == NULL) {
673 CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n",
674 (unsigned long long) size, flags,
675 atomic64_read(&vmem_alloc_used), vmem_alloc_max);
676 } else {
677 if (flags & __GFP_ZERO)
678 memset(ptr, 0, size);
679
680 atomic64_add(size, &vmem_alloc_used);
681
682 if (unlikely(atomic64_read(&vmem_alloc_used) > vmem_alloc_max))
683 vmem_alloc_max = atomic64_read(&vmem_alloc_used);
684
685 CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p "
686 "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
687 atomic64_read(&vmem_alloc_used), vmem_alloc_max);
688 }
689
690 RETURN(ptr);
691}
692EXPORT_SYMBOL(vmem_alloc_debug);
693
694void
695vmem_free_debug(void *ptr, size_t size)
696{
697 ENTRY;
698
699 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
700 (unsigned long long) size);
701
702 atomic64_sub(size, &vmem_alloc_used);
703
704 CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
705 (unsigned long long) size, atomic64_read(&vmem_alloc_used),
706 vmem_alloc_max);
707
708 memset(ptr, 0x5a, size);
709 vfree(ptr);
710
711 EXIT;
712}
713EXPORT_SYMBOL(vmem_free_debug);
714
715# endif /* DEBUG_KMEM_TRACKING */
716#endif /* DEBUG_KMEM */
717
a1502d76 718static void *
719kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
fece7c99 720{
a1502d76 721 void *ptr;
f1ca4da6 722
a1502d76 723 if (skc->skc_flags & KMC_KMEM) {
724 if (size > (2 * PAGE_SIZE)) {
725 ptr = (void *)__get_free_pages(flags, get_order(size));
726 } else
727 ptr = kmem_alloc(size, flags);
728 } else {
729 ptr = vmem_alloc(size, flags);
d6a26c6a 730 }
fece7c99 731
a1502d76 732 return ptr;
733}
fece7c99 734
a1502d76 735static void
736kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
737{
738 if (skc->skc_flags & KMC_KMEM) {
739 if (size > (2 * PAGE_SIZE))
740 free_pages((unsigned long)ptr, get_order(size));
741 else
742 kmem_free(ptr, size);
743 } else {
744 vmem_free(ptr, size);
745 }
fece7c99 746}
747
ea3e6ca9
BB
748/*
749 * It's important that we pack the spl_kmem_obj_t structure and the
48e0606a
BB
750 * actual objects in to one large address space to minimize the number
751 * of calls to the allocator. It is far better to do a few large
752 * allocations and then subdivide it ourselves. Now which allocator
753 * we use requires balancing a few trade offs.
754 *
755 * For small objects we use kmem_alloc() because as long as you are
756 * only requesting a small number of pages (ideally just one) its cheap.
757 * However, when you start requesting multiple pages with kmem_alloc()
758 * it gets increasingly expensive since it requires contigeous pages.
759 * For this reason we shift to vmem_alloc() for slabs of large objects
760 * which removes the need for contigeous pages. We do not use
761 * vmem_alloc() in all cases because there is significant locking
762 * overhead in __get_vm_area_node(). This function takes a single
763 * global lock when aquiring an available virtual address range which
764 * serializes all vmem_alloc()'s for all slab caches. Using slightly
765 * different allocation functions for small and large objects should
766 * give us the best of both worlds.
767 *
768 * KMC_ONSLAB KMC_OFFSLAB
769 *
770 * +------------------------+ +-----------------+
771 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
772 * | skc_obj_size <-+ | | +-----------------+ | |
773 * | spl_kmem_obj_t | | | |
774 * | skc_obj_size <---+ | +-----------------+ | |
775 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
776 * | ... v | | spl_kmem_obj_t | |
777 * +------------------------+ +-----------------+ v
778 */
fece7c99 779static spl_kmem_slab_t *
a1502d76 780spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
fece7c99 781{
782 spl_kmem_slab_t *sks;
a1502d76 783 spl_kmem_obj_t *sko, *n;
784 void *base, *obj;
48e0606a
BB
785 int i, align, size, rc = 0;
786
a1502d76 787 base = kv_alloc(skc, skc->skc_slab_size, flags);
788 if (base == NULL)
fece7c99 789 RETURN(NULL);
790
a1502d76 791 sks = (spl_kmem_slab_t *)base;
792 sks->sks_magic = SKS_MAGIC;
793 sks->sks_objs = skc->skc_slab_objs;
794 sks->sks_age = jiffies;
795 sks->sks_cache = skc;
796 INIT_LIST_HEAD(&sks->sks_list);
797 INIT_LIST_HEAD(&sks->sks_free_list);
798 sks->sks_ref = 0;
48e0606a
BB
799
800 align = skc->skc_obj_align;
801 size = P2ROUNDUP(skc->skc_obj_size, align) +
802 P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
fece7c99 803
804 for (i = 0; i < sks->sks_objs; i++) {
a1502d76 805 if (skc->skc_flags & KMC_OFFSLAB) {
806 obj = kv_alloc(skc, size, flags);
807 if (!obj)
808 GOTO(out, rc = -ENOMEM);
809 } else {
48e0606a
BB
810 obj = base +
811 P2ROUNDUP(sizeof(spl_kmem_slab_t), align) +
812 (i * size);
a1502d76 813 }
814
48e0606a 815 sko = obj + P2ROUNDUP(skc->skc_obj_size, align);
fece7c99 816 sko->sko_addr = obj;
817 sko->sko_magic = SKO_MAGIC;
818 sko->sko_slab = sks;
819 INIT_LIST_HEAD(&sko->sko_list);
fece7c99 820 list_add_tail(&sko->sko_list, &sks->sks_free_list);
821 }
822
fece7c99 823 list_for_each_entry(sko, &sks->sks_free_list, sko_list)
824 if (skc->skc_ctor)
825 skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
2fb9b26a 826out:
a1502d76 827 if (rc) {
828 if (skc->skc_flags & KMC_OFFSLAB)
48e0606a
BB
829 list_for_each_entry_safe(sko, n, &sks->sks_free_list,
830 sko_list)
a1502d76 831 kv_free(skc, sko->sko_addr, size);
fece7c99 832
a1502d76 833 kv_free(skc, base, skc->skc_slab_size);
834 sks = NULL;
fece7c99 835 }
836
a1502d76 837 RETURN(sks);
fece7c99 838}
839
ea3e6ca9
BB
840/*
841 * Remove a slab from complete or partial list, it must be called with
842 * the 'skc->skc_lock' held but the actual free must be performed
843 * outside the lock to prevent deadlocking on vmem addresses.
fece7c99 844 */
f1ca4da6 845static void
ea3e6ca9
BB
846spl_slab_free(spl_kmem_slab_t *sks,
847 struct list_head *sks_list, struct list_head *sko_list)
848{
2fb9b26a 849 spl_kmem_cache_t *skc;
2fb9b26a 850 ENTRY;
57d86234 851
2fb9b26a 852 ASSERT(sks->sks_magic == SKS_MAGIC);
4afaaefa 853 ASSERT(sks->sks_ref == 0);
d6a26c6a 854
fece7c99 855 skc = sks->sks_cache;
856 ASSERT(skc->skc_magic == SKC_MAGIC);
d46630e0 857 ASSERT(spin_is_locked(&skc->skc_lock));
f1ca4da6 858
1a944a7d
BB
859 /*
860 * Update slab/objects counters in the cache, then remove the
861 * slab from the skc->skc_partial_list. Finally add the slab
862 * and all its objects in to the private work lists where the
863 * destructors will be called and the memory freed to the system.
864 */
fece7c99 865 skc->skc_obj_total -= sks->sks_objs;
866 skc->skc_slab_total--;
867 list_del(&sks->sks_list);
ea3e6ca9 868 list_add(&sks->sks_list, sks_list);
1a944a7d
BB
869 list_splice_init(&sks->sks_free_list, sko_list);
870
2fb9b26a 871 EXIT;
872}
d6a26c6a 873
ea3e6ca9
BB
874/*
875 * Traverses all the partial slabs attached to a cache and free those
876 * which which are currently empty, and have not been touched for
37db7d8c
BB
877 * skc_delay seconds to avoid thrashing. The count argument is
878 * passed to optionally cap the number of slabs reclaimed, a count
879 * of zero means try and reclaim everything. When flag is set we
880 * always free an available slab regardless of age.
ea3e6ca9
BB
881 */
882static void
37db7d8c 883spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
2fb9b26a 884{
885 spl_kmem_slab_t *sks, *m;
ea3e6ca9
BB
886 spl_kmem_obj_t *sko, *n;
887 LIST_HEAD(sks_list);
888 LIST_HEAD(sko_list);
1a944a7d 889 int size = 0, i = 0;
2fb9b26a 890 ENTRY;
891
2fb9b26a 892 /*
ea3e6ca9
BB
893 * Move empty slabs and objects which have not been touched in
894 * skc_delay seconds on to private lists to be freed outside
1a944a7d
BB
895 * the spin lock. This delay time is important to avoid thrashing
896 * however when flag is set the delay will not be used.
2fb9b26a 897 */
ea3e6ca9 898 spin_lock(&skc->skc_lock);
1a944a7d
BB
899 list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){
900 /*
901 * All empty slabs are at the end of skc->skc_partial_list,
902 * therefore once a non-empty slab is found we can stop
903 * scanning. Additionally, stop when reaching the target
904 * reclaim 'count' if a non-zero threshhold is given.
905 */
906 if ((sks->sks_ref > 0) || (count && i > count))
37db7d8c
BB
907 break;
908
37db7d8c 909 if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
ea3e6ca9 910 spl_slab_free(sks, &sks_list, &sko_list);
37db7d8c
BB
911 i++;
912 }
ea3e6ca9
BB
913 }
914 spin_unlock(&skc->skc_lock);
915
916 /*
1a944a7d
BB
917 * The following two loops ensure all the object destructors are
918 * run, any offslab objects are freed, and the slabs themselves
919 * are freed. This is all done outside the skc->skc_lock since
920 * this allows the destructor to sleep, and allows us to perform
921 * a conditional reschedule when a freeing a large number of
922 * objects and slabs back to the system.
ea3e6ca9 923 */
1a944a7d 924 if (skc->skc_flags & KMC_OFFSLAB)
ea3e6ca9
BB
925 size = P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) +
926 P2ROUNDUP(sizeof(spl_kmem_obj_t), skc->skc_obj_align);
927
1a944a7d
BB
928 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
929 ASSERT(sko->sko_magic == SKO_MAGIC);
930
931 if (skc->skc_dtor)
932 skc->skc_dtor(sko->sko_addr, skc->skc_private);
933
934 if (skc->skc_flags & KMC_OFFSLAB)
ea3e6ca9 935 kv_free(skc, sko->sko_addr, size);
1a944a7d
BB
936
937 cond_resched();
2fb9b26a 938 }
939
37db7d8c 940 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
1a944a7d 941 ASSERT(sks->sks_magic == SKS_MAGIC);
ea3e6ca9 942 kv_free(skc, sks, skc->skc_slab_size);
37db7d8c
BB
943 cond_resched();
944 }
ea3e6ca9
BB
945
946 EXIT;
f1ca4da6 947}
948
ea3e6ca9
BB
949/*
950 * Called regularly on all caches to age objects out of the magazines
951 * which have not been access in skc->skc_delay seconds. This prevents
952 * idle magazines from holding memory which might be better used by
953 * other caches or parts of the system. The delay is present to
954 * prevent thrashing the magazine.
955 */
956static void
957spl_magazine_age(void *data)
f1ca4da6 958{
9b1b8e4c
BB
959 spl_kmem_magazine_t *skm =
960 spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work);
961 spl_kmem_cache_t *skc = skm->skm_cache;
962 int i = smp_processor_id();
963
964 ASSERT(skm->skm_magic == SKM_MAGIC);
965 ASSERT(skc->skc_magic == SKC_MAGIC);
966 ASSERT(skc->skc_mag[i] == skm);
f1ca4da6 967
ea3e6ca9
BB
968 if (skm->skm_avail > 0 &&
969 time_after(jiffies, skm->skm_age + skc->skc_delay * HZ))
970 (void)spl_cache_flush(skc, skm, skm->skm_refill);
9b1b8e4c
BB
971
972 if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
973 schedule_delayed_work_on(i, &skm->skm_work,
974 skc->skc_delay / 3 * HZ);
ea3e6ca9 975}
4efd4118 976
ea3e6ca9
BB
977/*
978 * Called regularly to keep a downward pressure on the size of idle
979 * magazines and to release free slabs from the cache. This function
980 * never calls the registered reclaim function, that only occures
981 * under memory pressure or with a direct call to spl_kmem_reap().
982 */
983static void
984spl_cache_age(void *data)
985{
9b1b8e4c 986 spl_kmem_cache_t *skc =
ea3e6ca9
BB
987 spl_get_work_data(data, spl_kmem_cache_t, skc_work.work);
988
989 ASSERT(skc->skc_magic == SKC_MAGIC);
37db7d8c 990 spl_slab_reclaim(skc, skc->skc_reap, 0);
ea3e6ca9
BB
991
992 if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags))
37db7d8c 993 schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
2fb9b26a 994}
f1ca4da6 995
ea3e6ca9
BB
996/*
997 * Size a slab based on the size of each aliged object plus spl_kmem_obj_t.
998 * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However,
999 * for very small objects we may end up with more than this so as not
1000 * to waste space in the minimal allocation of a single page. Also for
1001 * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN,
1002 * lower than this and we will fail.
1003 */
48e0606a
BB
1004static int
1005spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
1006{
ea3e6ca9 1007 int sks_size, obj_size, max_size, align;
48e0606a
BB
1008
1009 if (skc->skc_flags & KMC_OFFSLAB) {
ea3e6ca9 1010 *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB;
48e0606a
BB
1011 *size = sizeof(spl_kmem_slab_t);
1012 } else {
ea3e6ca9
BB
1013 align = skc->skc_obj_align;
1014 sks_size = P2ROUNDUP(sizeof(spl_kmem_slab_t), align);
1015 obj_size = P2ROUNDUP(skc->skc_obj_size, align) +
1016 P2ROUNDUP(sizeof(spl_kmem_obj_t), align);
1017
1018 if (skc->skc_flags & KMC_KMEM)
1019 max_size = ((uint64_t)1 << (MAX_ORDER-1)) * PAGE_SIZE;
1020 else
1021 max_size = (32 * 1024 * 1024);
48e0606a 1022
ea3e6ca9
BB
1023 for (*size = PAGE_SIZE; *size <= max_size; *size += PAGE_SIZE) {
1024 *objs = (*size - sks_size) / obj_size;
1025 if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB)
1026 RETURN(0);
1027 }
48e0606a 1028
ea3e6ca9
BB
1029 /*
1030 * Unable to satisfy target objets per slab, fallback to
1031 * allocating a maximally sized slab and assuming it can
1032 * contain the minimum objects count use it. If not fail.
1033 */
1034 *size = max_size;
1035 *objs = (*size - sks_size) / obj_size;
1036 if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN)
1037 RETURN(0);
48e0606a
BB
1038 }
1039
ea3e6ca9 1040 RETURN(-ENOSPC);
48e0606a
BB
1041}
1042
ea3e6ca9
BB
1043/*
1044 * Make a guess at reasonable per-cpu magazine size based on the size of
1045 * each object and the cost of caching N of them in each magazine. Long
1046 * term this should really adapt based on an observed usage heuristic.
1047 */
4afaaefa 1048static int
1049spl_magazine_size(spl_kmem_cache_t *skc)
1050{
48e0606a 1051 int size, align = skc->skc_obj_align;
4afaaefa 1052 ENTRY;
1053
ea3e6ca9 1054 /* Per-magazine sizes below assume a 4Kib page size */
48e0606a 1055 if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 256))
ea3e6ca9 1056 size = 4; /* Minimum 4Mib per-magazine */
48e0606a 1057 else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 32))
ea3e6ca9 1058 size = 16; /* Minimum 2Mib per-magazine */
48e0606a 1059 else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE))
ea3e6ca9 1060 size = 64; /* Minimum 256Kib per-magazine */
48e0606a 1061 else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE / 4))
ea3e6ca9 1062 size = 128; /* Minimum 128Kib per-magazine */
4afaaefa 1063 else
ea3e6ca9 1064 size = 256;
4afaaefa 1065
1066 RETURN(size);
1067}
1068
ea3e6ca9
BB
1069/*
1070 * Allocate a per-cpu magazine to assoicate with a specific core.
1071 */
4afaaefa 1072static spl_kmem_magazine_t *
1073spl_magazine_alloc(spl_kmem_cache_t *skc, int node)
1074{
1075 spl_kmem_magazine_t *skm;
1076 int size = sizeof(spl_kmem_magazine_t) +
1077 sizeof(void *) * skc->skc_mag_size;
1078 ENTRY;
1079
ea3e6ca9 1080 skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node);
4afaaefa 1081 if (skm) {
1082 skm->skm_magic = SKM_MAGIC;
1083 skm->skm_avail = 0;
1084 skm->skm_size = skc->skc_mag_size;
1085 skm->skm_refill = skc->skc_mag_refill;
9b1b8e4c
BB
1086 skm->skm_cache = skc;
1087 spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm);
ea3e6ca9 1088 skm->skm_age = jiffies;
4afaaefa 1089 }
1090
1091 RETURN(skm);
1092}
1093
ea3e6ca9
BB
1094/*
1095 * Free a per-cpu magazine assoicated with a specific core.
1096 */
4afaaefa 1097static void
1098spl_magazine_free(spl_kmem_magazine_t *skm)
1099{
a0f6da3d 1100 int size = sizeof(spl_kmem_magazine_t) +
1101 sizeof(void *) * skm->skm_size;
1102
4afaaefa 1103 ENTRY;
1104 ASSERT(skm->skm_magic == SKM_MAGIC);
1105 ASSERT(skm->skm_avail == 0);
a0f6da3d 1106
1107 kmem_free(skm, size);
4afaaefa 1108 EXIT;
1109}
1110
ea3e6ca9
BB
1111/*
1112 * Create all pre-cpu magazines of reasonable sizes.
1113 */
4afaaefa 1114static int
1115spl_magazine_create(spl_kmem_cache_t *skc)
1116{
37db7d8c 1117 int i;
4afaaefa 1118 ENTRY;
1119
1120 skc->skc_mag_size = spl_magazine_size(skc);
ea3e6ca9 1121 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
4afaaefa 1122
37db7d8c
BB
1123 for_each_online_cpu(i) {
1124 skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i));
1125 if (!skc->skc_mag[i]) {
1126 for (i--; i >= 0; i--)
1127 spl_magazine_free(skc->skc_mag[i]);
4afaaefa 1128
37db7d8c
BB
1129 RETURN(-ENOMEM);
1130 }
1131 }
4afaaefa 1132
9b1b8e4c
BB
1133 /* Only after everything is allocated schedule magazine work */
1134 for_each_online_cpu(i)
1135 schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work,
1136 skc->skc_delay / 3 * HZ);
1137
37db7d8c 1138 RETURN(0);
4afaaefa 1139}
1140
ea3e6ca9
BB
1141/*
1142 * Destroy all pre-cpu magazines.
1143 */
4afaaefa 1144static void
1145spl_magazine_destroy(spl_kmem_cache_t *skc)
1146{
37db7d8c
BB
1147 spl_kmem_magazine_t *skm;
1148 int i;
4afaaefa 1149 ENTRY;
37db7d8c
BB
1150
1151 for_each_online_cpu(i) {
1152 skm = skc->skc_mag[i];
1153 (void)spl_cache_flush(skc, skm, skm->skm_avail);
1154 spl_magazine_free(skm);
1155 }
1156
4afaaefa 1157 EXIT;
1158}
1159
ea3e6ca9
BB
1160/*
1161 * Create a object cache based on the following arguments:
1162 * name cache name
1163 * size cache object size
1164 * align cache object alignment
1165 * ctor cache object constructor
1166 * dtor cache object destructor
1167 * reclaim cache object reclaim
1168 * priv cache private data for ctor/dtor/reclaim
1169 * vmp unused must be NULL
1170 * flags
1171 * KMC_NOTOUCH Disable cache object aging (unsupported)
1172 * KMC_NODEBUG Disable debugging (unsupported)
1173 * KMC_NOMAGAZINE Disable magazine (unsupported)
1174 * KMC_NOHASH Disable hashing (unsupported)
1175 * KMC_QCACHE Disable qcache (unsupported)
1176 * KMC_KMEM Force kmem backed cache
1177 * KMC_VMEM Force vmem backed cache
1178 * KMC_OFFSLAB Locate objects off the slab
1179 */
2fb9b26a 1180spl_kmem_cache_t *
1181spl_kmem_cache_create(char *name, size_t size, size_t align,
1182 spl_kmem_ctor_t ctor,
1183 spl_kmem_dtor_t dtor,
1184 spl_kmem_reclaim_t reclaim,
1185 void *priv, void *vmp, int flags)
1186{
1187 spl_kmem_cache_t *skc;
a1502d76 1188 int rc, kmem_flags = KM_SLEEP;
2fb9b26a 1189 ENTRY;
937879f1 1190
a1502d76 1191 ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
1192 ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
1193 ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
48e0606a 1194 ASSERT(vmp == NULL);
a1502d76 1195
2fb9b26a 1196 /* We may be called when there is a non-zero preempt_count or
1197 * interrupts are disabled is which case we must not sleep.
1198 */
e9d7a2be 1199 if (current_thread_info()->preempt_count || irqs_disabled())
2fb9b26a 1200 kmem_flags = KM_NOSLEEP;
0a6fd143 1201
2fb9b26a 1202 /* Allocate new cache memory and initialize. */
ff449ac4 1203 skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags);
e9d7a2be 1204 if (skc == NULL)
2fb9b26a 1205 RETURN(NULL);
d61e12af 1206
2fb9b26a 1207 skc->skc_magic = SKC_MAGIC;
2fb9b26a 1208 skc->skc_name_size = strlen(name) + 1;
1209 skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags);
1210 if (skc->skc_name == NULL) {
1211 kmem_free(skc, sizeof(*skc));
1212 RETURN(NULL);
1213 }
1214 strncpy(skc->skc_name, name, skc->skc_name_size);
1215
e9d7a2be 1216 skc->skc_ctor = ctor;
1217 skc->skc_dtor = dtor;
1218 skc->skc_reclaim = reclaim;
2fb9b26a 1219 skc->skc_private = priv;
1220 skc->skc_vmp = vmp;
1221 skc->skc_flags = flags;
1222 skc->skc_obj_size = size;
48e0606a 1223 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
2fb9b26a 1224 skc->skc_delay = SPL_KMEM_CACHE_DELAY;
37db7d8c 1225 skc->skc_reap = SPL_KMEM_CACHE_REAP;
ea3e6ca9 1226 atomic_set(&skc->skc_ref, 0);
2fb9b26a 1227
2fb9b26a 1228 INIT_LIST_HEAD(&skc->skc_list);
1229 INIT_LIST_HEAD(&skc->skc_complete_list);
1230 INIT_LIST_HEAD(&skc->skc_partial_list);
d46630e0 1231 spin_lock_init(&skc->skc_lock);
e9d7a2be 1232 skc->skc_slab_fail = 0;
1233 skc->skc_slab_create = 0;
1234 skc->skc_slab_destroy = 0;
2fb9b26a 1235 skc->skc_slab_total = 0;
1236 skc->skc_slab_alloc = 0;
1237 skc->skc_slab_max = 0;
1238 skc->skc_obj_total = 0;
1239 skc->skc_obj_alloc = 0;
1240 skc->skc_obj_max = 0;
a1502d76 1241
48e0606a
BB
1242 if (align) {
1243 ASSERT((align & (align - 1)) == 0); /* Power of two */
1244 ASSERT(align >= SPL_KMEM_CACHE_ALIGN); /* Minimum size */
1245 skc->skc_obj_align = align;
1246 }
1247
a1502d76 1248 /* If none passed select a cache type based on object size */
1249 if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) {
48e0606a
BB
1250 if (P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) <
1251 (PAGE_SIZE / 8)) {
a1502d76 1252 skc->skc_flags |= KMC_KMEM;
1253 } else {
1254 skc->skc_flags |= KMC_VMEM;
1255 }
1256 }
1257
48e0606a
BB
1258 rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size);
1259 if (rc)
1260 GOTO(out, rc);
4afaaefa 1261
1262 rc = spl_magazine_create(skc);
48e0606a
BB
1263 if (rc)
1264 GOTO(out, rc);
2fb9b26a 1265
ea3e6ca9 1266 spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc);
37db7d8c 1267 schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ);
ea3e6ca9 1268
2fb9b26a 1269 down_write(&spl_kmem_cache_sem);
e9d7a2be 1270 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
2fb9b26a 1271 up_write(&spl_kmem_cache_sem);
1272
e9d7a2be 1273 RETURN(skc);
48e0606a
BB
1274out:
1275 kmem_free(skc->skc_name, skc->skc_name_size);
1276 kmem_free(skc, sizeof(*skc));
1277 RETURN(NULL);
f1ca4da6 1278}
2fb9b26a 1279EXPORT_SYMBOL(spl_kmem_cache_create);
f1ca4da6 1280
ea3e6ca9
BB
1281/*
1282 * Destroy a cache and all objects assoicated with the cache.
1283 */
2fb9b26a 1284void
1285spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
f1ca4da6 1286{
ea3e6ca9 1287 DECLARE_WAIT_QUEUE_HEAD(wq);
9b1b8e4c 1288 int i;
2fb9b26a 1289 ENTRY;
f1ca4da6 1290
e9d7a2be 1291 ASSERT(skc->skc_magic == SKC_MAGIC);
1292
1293 down_write(&spl_kmem_cache_sem);
1294 list_del_init(&skc->skc_list);
1295 up_write(&spl_kmem_cache_sem);
2fb9b26a 1296
ea3e6ca9
BB
1297 /* Cancel any and wait for any pending delayed work */
1298 ASSERT(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1299 cancel_delayed_work(&skc->skc_work);
9b1b8e4c
BB
1300 for_each_online_cpu(i)
1301 cancel_delayed_work(&skc->skc_mag[i]->skm_work);
1302
ea3e6ca9
BB
1303 flush_scheduled_work();
1304
1305 /* Wait until all current callers complete, this is mainly
1306 * to catch the case where a low memory situation triggers a
1307 * cache reaping action which races with this destroy. */
1308 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
1309
4afaaefa 1310 spl_magazine_destroy(skc);
37db7d8c 1311 spl_slab_reclaim(skc, 0, 1);
d46630e0 1312 spin_lock(&skc->skc_lock);
d6a26c6a 1313
2fb9b26a 1314 /* Validate there are no objects in use and free all the
4afaaefa 1315 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
ea3e6ca9
BB
1316 ASSERT3U(skc->skc_slab_alloc, ==, 0);
1317 ASSERT3U(skc->skc_obj_alloc, ==, 0);
1318 ASSERT3U(skc->skc_slab_total, ==, 0);
1319 ASSERT3U(skc->skc_obj_total, ==, 0);
2fb9b26a 1320 ASSERT(list_empty(&skc->skc_complete_list));
a1502d76 1321
2fb9b26a 1322 kmem_free(skc->skc_name, skc->skc_name_size);
d46630e0 1323 spin_unlock(&skc->skc_lock);
ff449ac4 1324
4afaaefa 1325 kmem_free(skc, sizeof(*skc));
2fb9b26a 1326
1327 EXIT;
f1ca4da6 1328}
2fb9b26a 1329EXPORT_SYMBOL(spl_kmem_cache_destroy);
f1ca4da6 1330
ea3e6ca9
BB
1331/*
1332 * Allocate an object from a slab attached to the cache. This is used to
1333 * repopulate the per-cpu magazine caches in batches when they run low.
1334 */
4afaaefa 1335static void *
1336spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
f1ca4da6 1337{
2fb9b26a 1338 spl_kmem_obj_t *sko;
f1ca4da6 1339
e9d7a2be 1340 ASSERT(skc->skc_magic == SKC_MAGIC);
1341 ASSERT(sks->sks_magic == SKS_MAGIC);
4afaaefa 1342 ASSERT(spin_is_locked(&skc->skc_lock));
2fb9b26a 1343
a1502d76 1344 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
4afaaefa 1345 ASSERT(sko->sko_magic == SKO_MAGIC);
1346 ASSERT(sko->sko_addr != NULL);
2fb9b26a 1347
a1502d76 1348 /* Remove from sks_free_list */
4afaaefa 1349 list_del_init(&sko->sko_list);
2fb9b26a 1350
4afaaefa 1351 sks->sks_age = jiffies;
1352 sks->sks_ref++;
1353 skc->skc_obj_alloc++;
2fb9b26a 1354
4afaaefa 1355 /* Track max obj usage statistics */
1356 if (skc->skc_obj_alloc > skc->skc_obj_max)
1357 skc->skc_obj_max = skc->skc_obj_alloc;
2fb9b26a 1358
4afaaefa 1359 /* Track max slab usage statistics */
1360 if (sks->sks_ref == 1) {
1361 skc->skc_slab_alloc++;
f1ca4da6 1362
4afaaefa 1363 if (skc->skc_slab_alloc > skc->skc_slab_max)
1364 skc->skc_slab_max = skc->skc_slab_alloc;
2fb9b26a 1365 }
1366
4afaaefa 1367 return sko->sko_addr;
1368}
c30df9c8 1369
ea3e6ca9
BB
1370/*
1371 * No available objects on any slabsi, create a new slab. Since this
1372 * is an expensive operation we do it without holding the spinlock and
1373 * only briefly aquire it when we link in the fully allocated and
1374 * constructed slab.
4afaaefa 1375 */
1376static spl_kmem_slab_t *
1377spl_cache_grow(spl_kmem_cache_t *skc, int flags)
1378{
e9d7a2be 1379 spl_kmem_slab_t *sks;
4afaaefa 1380 ENTRY;
f1ca4da6 1381
e9d7a2be 1382 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9
BB
1383 local_irq_enable();
1384 might_sleep();
e9d7a2be 1385
ea3e6ca9
BB
1386 /*
1387 * Before allocating a new slab check if the slab is being reaped.
1388 * If it is there is a good chance we can wait until it finishes
1389 * and then use one of the newly freed but not aged-out slabs.
1390 */
1391 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1392 schedule();
1393 GOTO(out, sks= NULL);
4afaaefa 1394 }
2fb9b26a 1395
ea3e6ca9
BB
1396 /* Allocate a new slab for the cache */
1397 sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN);
1398 if (sks == NULL)
1399 GOTO(out, sks = NULL);
4afaaefa 1400
ea3e6ca9 1401 /* Link the new empty slab in to the end of skc_partial_list. */
d46630e0 1402 spin_lock(&skc->skc_lock);
2fb9b26a 1403 skc->skc_slab_total++;
1404 skc->skc_obj_total += sks->sks_objs;
1405 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
d46630e0 1406 spin_unlock(&skc->skc_lock);
ea3e6ca9
BB
1407out:
1408 local_irq_disable();
4afaaefa 1409
1410 RETURN(sks);
f1ca4da6 1411}
1412
ea3e6ca9
BB
1413/*
1414 * Refill a per-cpu magazine with objects from the slabs for this
1415 * cache. Ideally the magazine can be repopulated using existing
1416 * objects which have been released, however if we are unable to
1417 * locate enough free objects new slabs of objects will be created.
1418 */
4afaaefa 1419static int
1420spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
f1ca4da6 1421{
e9d7a2be 1422 spl_kmem_slab_t *sks;
1423 int rc = 0, refill;
937879f1 1424 ENTRY;
f1ca4da6 1425
e9d7a2be 1426 ASSERT(skc->skc_magic == SKC_MAGIC);
1427 ASSERT(skm->skm_magic == SKM_MAGIC);
1428
e9d7a2be 1429 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
d46630e0 1430 spin_lock(&skc->skc_lock);
ff449ac4 1431
4afaaefa 1432 while (refill > 0) {
ea3e6ca9 1433 /* No slabs available we may need to grow the cache */
4afaaefa 1434 if (list_empty(&skc->skc_partial_list)) {
1435 spin_unlock(&skc->skc_lock);
ff449ac4 1436
4afaaefa 1437 sks = spl_cache_grow(skc, flags);
1438 if (!sks)
e9d7a2be 1439 GOTO(out, rc);
4afaaefa 1440
1441 /* Rescheduled to different CPU skm is not local */
1442 if (skm != skc->skc_mag[smp_processor_id()])
e9d7a2be 1443 GOTO(out, rc);
1444
1445 /* Potentially rescheduled to the same CPU but
1446 * allocations may have occured from this CPU while
1447 * we were sleeping so recalculate max refill. */
1448 refill = MIN(refill, skm->skm_size - skm->skm_avail);
4afaaefa 1449
1450 spin_lock(&skc->skc_lock);
1451 continue;
1452 }
d46630e0 1453
4afaaefa 1454 /* Grab the next available slab */
1455 sks = list_entry((&skc->skc_partial_list)->next,
1456 spl_kmem_slab_t, sks_list);
1457 ASSERT(sks->sks_magic == SKS_MAGIC);
1458 ASSERT(sks->sks_ref < sks->sks_objs);
1459 ASSERT(!list_empty(&sks->sks_free_list));
d46630e0 1460
4afaaefa 1461 /* Consume as many objects as needed to refill the requested
e9d7a2be 1462 * cache. We must also be careful not to overfill it. */
1463 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) {
1464 ASSERT(skm->skm_avail < skm->skm_size);
1465 ASSERT(rc < skm->skm_size);
4afaaefa 1466 skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
e9d7a2be 1467 }
f1ca4da6 1468
4afaaefa 1469 /* Move slab to skc_complete_list when full */
1470 if (sks->sks_ref == sks->sks_objs) {
1471 list_del(&sks->sks_list);
1472 list_add(&sks->sks_list, &skc->skc_complete_list);
2fb9b26a 1473 }
1474 }
57d86234 1475
4afaaefa 1476 spin_unlock(&skc->skc_lock);
1477out:
1478 /* Returns the number of entries added to cache */
e9d7a2be 1479 RETURN(rc);
4afaaefa 1480}
1481
ea3e6ca9
BB
1482/*
1483 * Release an object back to the slab from which it came.
1484 */
4afaaefa 1485static void
1486spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
1487{
e9d7a2be 1488 spl_kmem_slab_t *sks = NULL;
4afaaefa 1489 spl_kmem_obj_t *sko = NULL;
1490 ENTRY;
1491
e9d7a2be 1492 ASSERT(skc->skc_magic == SKC_MAGIC);
4afaaefa 1493 ASSERT(spin_is_locked(&skc->skc_lock));
1494
48e0606a 1495 sko = obj + P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align);
a1502d76 1496 ASSERT(sko->sko_magic == SKO_MAGIC);
4afaaefa 1497
1498 sks = sko->sko_slab;
a1502d76 1499 ASSERT(sks->sks_magic == SKS_MAGIC);
2fb9b26a 1500 ASSERT(sks->sks_cache == skc);
2fb9b26a 1501 list_add(&sko->sko_list, &sks->sks_free_list);
d6a26c6a 1502
2fb9b26a 1503 sks->sks_age = jiffies;
4afaaefa 1504 sks->sks_ref--;
2fb9b26a 1505 skc->skc_obj_alloc--;
f1ca4da6 1506
2fb9b26a 1507 /* Move slab to skc_partial_list when no longer full. Slabs
4afaaefa 1508 * are added to the head to keep the partial list is quasi-full
1509 * sorted order. Fuller at the head, emptier at the tail. */
1510 if (sks->sks_ref == (sks->sks_objs - 1)) {
2fb9b26a 1511 list_del(&sks->sks_list);
1512 list_add(&sks->sks_list, &skc->skc_partial_list);
1513 }
f1ca4da6 1514
2fb9b26a 1515 /* Move emply slabs to the end of the partial list so
4afaaefa 1516 * they can be easily found and freed during reclamation. */
1517 if (sks->sks_ref == 0) {
2fb9b26a 1518 list_del(&sks->sks_list);
1519 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1520 skc->skc_slab_alloc--;
1521 }
1522
4afaaefa 1523 EXIT;
1524}
1525
ea3e6ca9
BB
1526/*
1527 * Release a batch of objects from a per-cpu magazine back to their
1528 * respective slabs. This occurs when we exceed the magazine size,
1529 * are under memory pressure, when the cache is idle, or during
1530 * cache cleanup. The flush argument contains the number of entries
1531 * to remove from the magazine.
1532 */
4afaaefa 1533static int
1534spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
1535{
1536 int i, count = MIN(flush, skm->skm_avail);
1537 ENTRY;
1538
e9d7a2be 1539 ASSERT(skc->skc_magic == SKC_MAGIC);
1540 ASSERT(skm->skm_magic == SKM_MAGIC);
4afaaefa 1541
ea3e6ca9
BB
1542 /*
1543 * XXX: Currently we simply return objects from the magazine to
1544 * the slabs in fifo order. The ideal thing to do from a memory
1545 * fragmentation standpoint is to cheaply determine the set of
1546 * objects in the magazine which will result in the largest
1547 * number of free slabs if released from the magazine.
1548 */
4afaaefa 1549 spin_lock(&skc->skc_lock);
1550 for (i = 0; i < count; i++)
1551 spl_cache_shrink(skc, skm->skm_objs[i]);
1552
e9d7a2be 1553 skm->skm_avail -= count;
1554 memmove(skm->skm_objs, &(skm->skm_objs[count]),
4afaaefa 1555 sizeof(void *) * skm->skm_avail);
1556
d46630e0 1557 spin_unlock(&skc->skc_lock);
4afaaefa 1558
1559 RETURN(count);
1560}
1561
ea3e6ca9
BB
1562/*
1563 * Allocate an object from the per-cpu magazine, or if the magazine
1564 * is empty directly allocate from a slab and repopulate the magazine.
1565 */
4afaaefa 1566void *
1567spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
1568{
1569 spl_kmem_magazine_t *skm;
1570 unsigned long irq_flags;
1571 void *obj = NULL;
1572 ENTRY;
1573
e9d7a2be 1574 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9
BB
1575 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1576 ASSERT(flags & KM_SLEEP);
1577 atomic_inc(&skc->skc_ref);
4afaaefa 1578 local_irq_save(irq_flags);
1579
1580restart:
1581 /* Safe to update per-cpu structure without lock, but
1582 * in the restart case we must be careful to reaquire
1583 * the local magazine since this may have changed
1584 * when we need to grow the cache. */
1585 skm = skc->skc_mag[smp_processor_id()];
e9d7a2be 1586 ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
1587 skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
1588 skm->skm_size, skm->skm_refill, skm->skm_avail);
4afaaefa 1589
1590 if (likely(skm->skm_avail)) {
1591 /* Object available in CPU cache, use it */
1592 obj = skm->skm_objs[--skm->skm_avail];
ea3e6ca9 1593 skm->skm_age = jiffies;
4afaaefa 1594 } else {
1595 /* Per-CPU cache empty, directly allocate from
1596 * the slab and refill the per-CPU cache. */
1597 (void)spl_cache_refill(skc, skm, flags);
1598 GOTO(restart, obj = NULL);
1599 }
1600
1601 local_irq_restore(irq_flags);
fece7c99 1602 ASSERT(obj);
48e0606a 1603 ASSERT(((unsigned long)(obj) % skc->skc_obj_align) == 0);
4afaaefa 1604
1605 /* Pre-emptively migrate object to CPU L1 cache */
1606 prefetchw(obj);
ea3e6ca9 1607 atomic_dec(&skc->skc_ref);
4afaaefa 1608
1609 RETURN(obj);
1610}
1611EXPORT_SYMBOL(spl_kmem_cache_alloc);
1612
ea3e6ca9
BB
1613/*
1614 * Free an object back to the local per-cpu magazine, there is no
1615 * guarantee that this is the same magazine the object was originally
1616 * allocated from. We may need to flush entire from the magazine
1617 * back to the slabs to make space.
1618 */
4afaaefa 1619void
1620spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
1621{
1622 spl_kmem_magazine_t *skm;
1623 unsigned long flags;
1624 ENTRY;
1625
e9d7a2be 1626 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9
BB
1627 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1628 atomic_inc(&skc->skc_ref);
4afaaefa 1629 local_irq_save(flags);
1630
1631 /* Safe to update per-cpu structure without lock, but
1632 * no remote memory allocation tracking is being performed
1633 * it is entirely possible to allocate an object from one
1634 * CPU cache and return it to another. */
1635 skm = skc->skc_mag[smp_processor_id()];
e9d7a2be 1636 ASSERT(skm->skm_magic == SKM_MAGIC);
4afaaefa 1637
1638 /* Per-CPU cache full, flush it to make space */
1639 if (unlikely(skm->skm_avail >= skm->skm_size))
1640 (void)spl_cache_flush(skc, skm, skm->skm_refill);
1641
1642 /* Available space in cache, use it */
1643 skm->skm_objs[skm->skm_avail++] = obj;
1644
1645 local_irq_restore(flags);
ea3e6ca9 1646 atomic_dec(&skc->skc_ref);
4afaaefa 1647
1648 EXIT;
f1ca4da6 1649}
2fb9b26a 1650EXPORT_SYMBOL(spl_kmem_cache_free);
5c2bb9b2 1651
ea3e6ca9
BB
1652/*
1653 * The generic shrinker function for all caches. Under linux a shrinker
1654 * may not be tightly coupled with a slab cache. In fact linux always
1655 * systematically trys calling all registered shrinker callbacks which
1656 * report that they contain unused objects. Because of this we only
1657 * register one shrinker function in the shim layer for all slab caches.
1658 * We always attempt to shrink all caches when this generic shrinker
1659 * is called. The shrinker should return the number of free objects
1660 * in the cache when called with nr_to_scan == 0 but not attempt to
1661 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
1662 * objects should be freed, because Solaris semantics are to free
1663 * all available objects we may free more objects than requested.
1664 */
2fb9b26a 1665static int
4afaaefa 1666spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask)
2fb9b26a 1667{
e9d7a2be 1668 spl_kmem_cache_t *skc;
ea3e6ca9 1669 int unused = 0;
5c2bb9b2 1670
e9d7a2be 1671 down_read(&spl_kmem_cache_sem);
ea3e6ca9
BB
1672 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
1673 if (nr_to_scan)
1674 spl_kmem_cache_reap_now(skc);
1675
1676 /*
1677 * Presume everything alloc'ed in reclaimable, this ensures
1678 * we are called again with nr_to_scan > 0 so can try and
1679 * reclaim. The exact number is not important either so
1680 * we forgo taking this already highly contented lock.
1681 */
1682 unused += skc->skc_obj_alloc;
1683 }
e9d7a2be 1684 up_read(&spl_kmem_cache_sem);
2fb9b26a 1685
ea3e6ca9 1686 return (unused * sysctl_vfs_cache_pressure) / 100;
5c2bb9b2 1687}
5c2bb9b2 1688
ea3e6ca9
BB
1689/*
1690 * Call the registered reclaim function for a cache. Depending on how
1691 * many and which objects are released it may simply repopulate the
1692 * local magazine which will then need to age-out. Objects which cannot
1693 * fit in the magazine we will be released back to their slabs which will
1694 * also need to age out before being release. This is all just best
1695 * effort and we do not want to thrash creating and destroying slabs.
1696 */
57d86234 1697void
2fb9b26a 1698spl_kmem_cache_reap_now(spl_kmem_cache_t *skc)
57d86234 1699{
2fb9b26a 1700 ENTRY;
e9d7a2be 1701
1702 ASSERT(skc->skc_magic == SKC_MAGIC);
ea3e6ca9 1703 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2fb9b26a 1704
ea3e6ca9
BB
1705 /* Prevent concurrent cache reaping when contended */
1706 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1707 EXIT;
1708 return;
1709 }
2fb9b26a 1710
ea3e6ca9 1711 atomic_inc(&skc->skc_ref);
4afaaefa 1712
ea3e6ca9
BB
1713 if (skc->skc_reclaim)
1714 skc->skc_reclaim(skc->skc_private);
4afaaefa 1715
37db7d8c 1716 spl_slab_reclaim(skc, skc->skc_reap, 0);
ea3e6ca9
BB
1717 clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
1718 atomic_dec(&skc->skc_ref);
4afaaefa 1719
2fb9b26a 1720 EXIT;
57d86234 1721}
2fb9b26a 1722EXPORT_SYMBOL(spl_kmem_cache_reap_now);
57d86234 1723
ea3e6ca9
BB
1724/*
1725 * Reap all free slabs from all registered caches.
1726 */
f1b59d26 1727void
2fb9b26a 1728spl_kmem_reap(void)
937879f1 1729{
4afaaefa 1730 spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL);
f1ca4da6 1731}
2fb9b26a 1732EXPORT_SYMBOL(spl_kmem_reap);
5d86345d 1733
ff449ac4 1734#if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
c6dc93d6 1735static char *
4afaaefa 1736spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
d6a26c6a 1737{
e9d7a2be 1738 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
d6a26c6a 1739 int i, flag = 1;
1740
1741 ASSERT(str != NULL && len >= 17);
e9d7a2be 1742 memset(str, 0, len);
d6a26c6a 1743
1744 /* Check for a fully printable string, and while we are at
1745 * it place the printable characters in the passed buffer. */
1746 for (i = 0; i < size; i++) {
e9d7a2be 1747 str[i] = ((char *)(kd->kd_addr))[i];
1748 if (isprint(str[i])) {
1749 continue;
1750 } else {
1751 /* Minimum number of printable characters found
1752 * to make it worthwhile to print this as ascii. */
1753 if (i > min)
1754 break;
1755
1756 flag = 0;
1757 break;
1758 }
d6a26c6a 1759 }
1760
1761 if (!flag) {
1762 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
1763 *((uint8_t *)kd->kd_addr),
1764 *((uint8_t *)kd->kd_addr + 2),
1765 *((uint8_t *)kd->kd_addr + 4),
1766 *((uint8_t *)kd->kd_addr + 6),
1767 *((uint8_t *)kd->kd_addr + 8),
1768 *((uint8_t *)kd->kd_addr + 10),
1769 *((uint8_t *)kd->kd_addr + 12),
1770 *((uint8_t *)kd->kd_addr + 14));
1771 }
1772
1773 return str;
1774}
1775
a1502d76 1776static int
1777spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
1778{
1779 int i;
1780 ENTRY;
1781
1782 spin_lock_init(lock);
1783 INIT_LIST_HEAD(list);
1784
1785 for (i = 0; i < size; i++)
1786 INIT_HLIST_HEAD(&kmem_table[i]);
1787
1788 RETURN(0);
1789}
1790
ff449ac4 1791static void
1792spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
5d86345d 1793{
2fb9b26a 1794 unsigned long flags;
1795 kmem_debug_t *kd;
1796 char str[17];
a1502d76 1797 ENTRY;
2fb9b26a 1798
ff449ac4 1799 spin_lock_irqsave(lock, flags);
1800 if (!list_empty(list))
a0f6da3d 1801 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
1802 "size", "data", "func", "line");
2fb9b26a 1803
ff449ac4 1804 list_for_each_entry(kd, list, kd_list)
a0f6da3d 1805 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
b6b2acc6 1806 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
2fb9b26a 1807 kd->kd_func, kd->kd_line);
1808
ff449ac4 1809 spin_unlock_irqrestore(lock, flags);
a1502d76 1810 EXIT;
ff449ac4 1811}
1812#else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
a1502d76 1813#define spl_kmem_init_tracking(list, lock, size)
ff449ac4 1814#define spl_kmem_fini_tracking(list, lock)
1815#endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
1816
36b313da
BB
1817static void
1818spl_kmem_init_globals(void)
1819{
1820 struct zone *zone;
1821
1822 /* For now all zones are includes, it may be wise to restrict
1823 * this to normal and highmem zones if we see problems. */
1824 for_each_zone(zone) {
1825
1826 if (!populated_zone(zone))
1827 continue;
1828
1829 minfree += zone->pages_min;
1830 desfree += zone->pages_low;
1831 lotsfree += zone->pages_high;
1832 }
4ab13d3b
BB
1833
1834 /* Solaris default values */
96dded38
BB
1835 swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
1836 swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
36b313da
BB
1837}
1838
d1ff2312
BB
1839/*
1840 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
1841 */
1842int
1843spl_kmem_init_kallsyms_lookup(void)
1844{
1845#ifndef HAVE_GET_VMALLOC_INFO
1846 get_vmalloc_info_fn = (get_vmalloc_info_t)
1847 spl_kallsyms_lookup_name("get_vmalloc_info");
e11d6c5f
BB
1848 if (!get_vmalloc_info_fn) {
1849 printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n");
d1ff2312 1850 return -EFAULT;
e11d6c5f 1851 }
d1ff2312
BB
1852#endif /* HAVE_GET_VMALLOC_INFO */
1853
5232d256
BB
1854#ifdef HAVE_PGDAT_HELPERS
1855# ifndef HAVE_FIRST_ONLINE_PGDAT
d1ff2312
BB
1856 first_online_pgdat_fn = (first_online_pgdat_t)
1857 spl_kallsyms_lookup_name("first_online_pgdat");
e11d6c5f
BB
1858 if (!first_online_pgdat_fn) {
1859 printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
d1ff2312 1860 return -EFAULT;
e11d6c5f 1861 }
5232d256 1862# endif /* HAVE_FIRST_ONLINE_PGDAT */
d1ff2312 1863
5232d256 1864# ifndef HAVE_NEXT_ONLINE_PGDAT
d1ff2312
BB
1865 next_online_pgdat_fn = (next_online_pgdat_t)
1866 spl_kallsyms_lookup_name("next_online_pgdat");
e11d6c5f
BB
1867 if (!next_online_pgdat_fn) {
1868 printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
d1ff2312 1869 return -EFAULT;
e11d6c5f 1870 }
5232d256 1871# endif /* HAVE_NEXT_ONLINE_PGDAT */
d1ff2312 1872
5232d256 1873# ifndef HAVE_NEXT_ZONE
d1ff2312
BB
1874 next_zone_fn = (next_zone_t)
1875 spl_kallsyms_lookup_name("next_zone");
e11d6c5f
BB
1876 if (!next_zone_fn) {
1877 printk(KERN_ERR "Error: Unknown symbol next_zone\n");
d1ff2312 1878 return -EFAULT;
e11d6c5f 1879 }
5232d256
BB
1880# endif /* HAVE_NEXT_ZONE */
1881
1882#else /* HAVE_PGDAT_HELPERS */
1883
1884# ifndef HAVE_PGDAT_LIST
124ca8a5 1885 pgdat_list_addr = *(struct pglist_data **)
5232d256
BB
1886 spl_kallsyms_lookup_name("pgdat_list");
1887 if (!pgdat_list_addr) {
1888 printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
1889 return -EFAULT;
1890 }
1891# endif /* HAVE_PGDAT_LIST */
1892#endif /* HAVE_PGDAT_HELPERS */
d1ff2312 1893
6ae7fef5 1894#if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
d1ff2312
BB
1895 get_zone_counts_fn = (get_zone_counts_t)
1896 spl_kallsyms_lookup_name("get_zone_counts");
e11d6c5f
BB
1897 if (!get_zone_counts_fn) {
1898 printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
d1ff2312 1899 return -EFAULT;
e11d6c5f 1900 }
6ae7fef5 1901#endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
d1ff2312
BB
1902
1903 /*
1904 * It is now safe to initialize the global tunings which rely on
1905 * the use of the for_each_zone() macro. This macro in turns
1906 * depends on the *_pgdat symbols which are now available.
1907 */
1908 spl_kmem_init_globals();
1909
1910 return 0;
1911}
1912
a1502d76 1913int
1914spl_kmem_init(void)
1915{
1916 int rc = 0;
1917 ENTRY;
1918
1919 init_rwsem(&spl_kmem_cache_sem);
1920 INIT_LIST_HEAD(&spl_kmem_cache_list);
1921
1922#ifdef HAVE_SET_SHRINKER
1923 spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS,
1924 spl_kmem_cache_generic_shrinker);
1925 if (spl_kmem_cache_shrinker == NULL)
f78a933f 1926 RETURN(rc = -ENOMEM);
a1502d76 1927#else
1928 register_shrinker(&spl_kmem_cache_shrinker);
1929#endif
1930
1931#ifdef DEBUG_KMEM
1932 atomic64_set(&kmem_alloc_used, 0);
1933 atomic64_set(&vmem_alloc_used, 0);
1934
1935 spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
1936 spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
1937#endif
a1502d76 1938 RETURN(rc);
1939}
1940
ff449ac4 1941void
1942spl_kmem_fini(void)
1943{
1944#ifdef DEBUG_KMEM
1945 /* Display all unreclaimed memory addresses, including the
1946 * allocation size and the first few bytes of what's located
1947 * at that address to aid in debugging. Performance is not
1948 * a serious concern here since it is module unload time. */
1949 if (atomic64_read(&kmem_alloc_used) != 0)
1950 CWARN("kmem leaked %ld/%ld bytes\n",
550f1705 1951 atomic64_read(&kmem_alloc_used), kmem_alloc_max);
ff449ac4 1952
2fb9b26a 1953
1954 if (atomic64_read(&vmem_alloc_used) != 0)
1955 CWARN("vmem leaked %ld/%ld bytes\n",
550f1705 1956 atomic64_read(&vmem_alloc_used), vmem_alloc_max);
2fb9b26a 1957
ff449ac4 1958 spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
1959 spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
1960#endif /* DEBUG_KMEM */
2fb9b26a 1961 ENTRY;
1962
1963#ifdef HAVE_SET_SHRINKER
1964 remove_shrinker(spl_kmem_cache_shrinker);
1965#else
1966 unregister_shrinker(&spl_kmem_cache_shrinker);
5d86345d 1967#endif
2fb9b26a 1968
937879f1 1969 EXIT;
5d86345d 1970}