]> git.proxmox.com Git - mirror_spl.git/blob - module/spl/spl-kmem.c
Remove get_vmalloc_info() wrapper
[mirror_spl.git] / module / spl / spl-kmem.c
1 /*****************************************************************************\
2 * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
3 * Copyright (C) 2007 The Regents of the University of California.
4 * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
5 * Written by Brian Behlendorf <behlendorf1@llnl.gov>.
6 * UCRL-CODE-235197
7 *
8 * This file is part of the SPL, Solaris Porting Layer.
9 * For details, see <http://zfsonlinux.org/>.
10 *
11 * The SPL is free software; you can redistribute it and/or modify it
12 * under the terms of the GNU General Public License as published by the
13 * Free Software Foundation; either version 2 of the License, or (at your
14 * option) any later version.
15 *
16 * The SPL is distributed in the hope that it will be useful, but WITHOUT
17 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19 * for more details.
20 *
21 * You should have received a copy of the GNU General Public License along
22 * with the SPL. If not, see <http://www.gnu.org/licenses/>.
23 *****************************************************************************
24 * Solaris Porting Layer (SPL) Kmem Implementation.
25 \*****************************************************************************/
26
27 #include <sys/kmem.h>
28 #include <spl-debug.h>
29
30 #ifdef SS_DEBUG_SUBSYS
31 #undef SS_DEBUG_SUBSYS
32 #endif
33
34 #define SS_DEBUG_SUBSYS SS_KMEM
35
36 /*
37 * Within the scope of spl-kmem.c file the kmem_cache_* definitions
38 * are removed to allow access to the real Linux slab allocator.
39 */
40 #undef kmem_cache_destroy
41 #undef kmem_cache_create
42 #undef kmem_cache_alloc
43 #undef kmem_cache_free
44
45
46 /*
47 * Cache expiration was implemented because it was part of the default Solaris
48 * kmem_cache behavior. The idea is that per-cpu objects which haven't been
49 * accessed in several seconds should be returned to the cache. On the other
50 * hand Linux slabs never move objects back to the slabs unless there is
51 * memory pressure on the system. By default the Linux method is enabled
52 * because it has been shown to improve responsiveness on low memory systems.
53 * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM.
54 */
55 unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM;
56 EXPORT_SYMBOL(spl_kmem_cache_expire);
57 module_param(spl_kmem_cache_expire, uint, 0644);
58 MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)");
59
60 /*
61 * The default behavior is to report the number of objects remaining in the
62 * cache. This allows the Linux VM to repeatedly reclaim objects from the
63 * cache when memory is low satisfy other memory allocations. Alternately,
64 * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache
65 * is reclaimed. This may increase the likelihood of out of memory events.
66 */
67 unsigned int spl_kmem_cache_reclaim = 0;
68 module_param(spl_kmem_cache_reclaim, uint, 0644);
69 MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)");
70
71 unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB;
72 module_param(spl_kmem_cache_obj_per_slab, uint, 0644);
73 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab");
74
75 unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN;
76 module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644);
77 MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min,
78 "Minimal number of objects per slab");
79
80 unsigned int spl_kmem_cache_max_size = 32;
81 module_param(spl_kmem_cache_max_size, uint, 0644);
82 MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB");
83
84 /*
85 * For small objects the Linux slab allocator should be used to make the most
86 * efficient use of the memory. However, large objects are not supported by
87 * the Linux slab and therefore the SPL implementation is preferred. A cutoff
88 * of 16K was determined to be optimal for architectures using 4K pages.
89 */
90 #if PAGE_SIZE == 4096
91 unsigned int spl_kmem_cache_slab_limit = 16384;
92 #else
93 unsigned int spl_kmem_cache_slab_limit = 0;
94 #endif
95 module_param(spl_kmem_cache_slab_limit, uint, 0644);
96 MODULE_PARM_DESC(spl_kmem_cache_slab_limit,
97 "Objects less than N bytes use the Linux slab");
98
99 unsigned int spl_kmem_cache_kmem_limit = (PAGE_SIZE / 4);
100 module_param(spl_kmem_cache_kmem_limit, uint, 0644);
101 MODULE_PARM_DESC(spl_kmem_cache_kmem_limit,
102 "Objects less than N bytes use the kmalloc");
103
104 /*
105 * The minimum amount of memory measured in pages to be free at all
106 * times on the system. This is similar to Linux's zone->pages_min
107 * multiplied by the number of zones and is sized based on that.
108 */
109 pgcnt_t minfree = 0;
110 EXPORT_SYMBOL(minfree);
111
112 /*
113 * The desired amount of memory measured in pages to be free at all
114 * times on the system. This is similar to Linux's zone->pages_low
115 * multiplied by the number of zones and is sized based on that.
116 * Assuming all zones are being used roughly equally, when we drop
117 * below this threshold asynchronous page reclamation is triggered.
118 */
119 pgcnt_t desfree = 0;
120 EXPORT_SYMBOL(desfree);
121
122 /*
123 * When above this amount of memory measures in pages the system is
124 * determined to have enough free memory. This is similar to Linux's
125 * zone->pages_high multiplied by the number of zones and is sized based
126 * on that. Assuming all zones are being used roughly equally, when
127 * asynchronous page reclamation reaches this threshold it stops.
128 */
129 pgcnt_t lotsfree = 0;
130 EXPORT_SYMBOL(lotsfree);
131
132 /* Unused always 0 in this implementation */
133 pgcnt_t needfree = 0;
134 EXPORT_SYMBOL(needfree);
135
136 pgcnt_t swapfs_minfree = 0;
137 EXPORT_SYMBOL(swapfs_minfree);
138
139 pgcnt_t swapfs_reserve = 0;
140 EXPORT_SYMBOL(swapfs_reserve);
141
142 vmem_t *heap_arena = NULL;
143 EXPORT_SYMBOL(heap_arena);
144
145 vmem_t *zio_alloc_arena = NULL;
146 EXPORT_SYMBOL(zio_alloc_arena);
147
148 vmem_t *zio_arena = NULL;
149 EXPORT_SYMBOL(zio_arena);
150
151 #ifdef HAVE_PGDAT_HELPERS
152 # ifndef HAVE_FIRST_ONLINE_PGDAT
153 first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON;
154 EXPORT_SYMBOL(first_online_pgdat_fn);
155 # endif /* HAVE_FIRST_ONLINE_PGDAT */
156
157 # ifndef HAVE_NEXT_ONLINE_PGDAT
158 next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON;
159 EXPORT_SYMBOL(next_online_pgdat_fn);
160 # endif /* HAVE_NEXT_ONLINE_PGDAT */
161
162 # ifndef HAVE_NEXT_ZONE
163 next_zone_t next_zone_fn = SYMBOL_POISON;
164 EXPORT_SYMBOL(next_zone_fn);
165 # endif /* HAVE_NEXT_ZONE */
166
167 #else /* HAVE_PGDAT_HELPERS */
168
169 # ifndef HAVE_PGDAT_LIST
170 struct pglist_data *pgdat_list_addr = SYMBOL_POISON;
171 EXPORT_SYMBOL(pgdat_list_addr);
172 # endif /* HAVE_PGDAT_LIST */
173
174 #endif /* HAVE_PGDAT_HELPERS */
175
176 #ifdef NEED_GET_ZONE_COUNTS
177 # ifndef HAVE_GET_ZONE_COUNTS
178 get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON;
179 EXPORT_SYMBOL(get_zone_counts_fn);
180 # endif /* HAVE_GET_ZONE_COUNTS */
181
182 unsigned long
183 spl_global_page_state(spl_zone_stat_item_t item)
184 {
185 unsigned long active;
186 unsigned long inactive;
187 unsigned long free;
188
189 get_zone_counts(&active, &inactive, &free);
190 switch (item) {
191 case SPL_NR_FREE_PAGES: return free;
192 case SPL_NR_INACTIVE: return inactive;
193 case SPL_NR_ACTIVE: return active;
194 default: ASSERT(0); /* Unsupported */
195 }
196
197 return 0;
198 }
199 #else
200 # ifdef HAVE_GLOBAL_PAGE_STATE
201 unsigned long
202 spl_global_page_state(spl_zone_stat_item_t item)
203 {
204 unsigned long pages = 0;
205
206 switch (item) {
207 case SPL_NR_FREE_PAGES:
208 # ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES
209 pages += global_page_state(NR_FREE_PAGES);
210 # endif
211 break;
212 case SPL_NR_INACTIVE:
213 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE
214 pages += global_page_state(NR_INACTIVE);
215 # endif
216 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON
217 pages += global_page_state(NR_INACTIVE_ANON);
218 # endif
219 # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE
220 pages += global_page_state(NR_INACTIVE_FILE);
221 # endif
222 break;
223 case SPL_NR_ACTIVE:
224 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE
225 pages += global_page_state(NR_ACTIVE);
226 # endif
227 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON
228 pages += global_page_state(NR_ACTIVE_ANON);
229 # endif
230 # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE
231 pages += global_page_state(NR_ACTIVE_FILE);
232 # endif
233 break;
234 default:
235 ASSERT(0); /* Unsupported */
236 }
237
238 return pages;
239 }
240 # else
241 # error "Both global_page_state() and get_zone_counts() unavailable"
242 # endif /* HAVE_GLOBAL_PAGE_STATE */
243 #endif /* NEED_GET_ZONE_COUNTS */
244 EXPORT_SYMBOL(spl_global_page_state);
245
246 #ifndef HAVE_SHRINK_DCACHE_MEMORY
247 shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON;
248 EXPORT_SYMBOL(shrink_dcache_memory_fn);
249 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
250
251 #ifndef HAVE_SHRINK_ICACHE_MEMORY
252 shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON;
253 EXPORT_SYMBOL(shrink_icache_memory_fn);
254 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
255
256 pgcnt_t
257 spl_kmem_availrmem(void)
258 {
259 /* The amount of easily available memory */
260 return (spl_global_page_state(SPL_NR_FREE_PAGES) +
261 spl_global_page_state(SPL_NR_INACTIVE));
262 }
263 EXPORT_SYMBOL(spl_kmem_availrmem);
264
265 size_t
266 vmem_size(vmem_t *vmp, int typemask)
267 {
268 ASSERT3P(vmp, ==, NULL);
269 ASSERT3S(typemask & VMEM_ALLOC, ==, VMEM_ALLOC);
270 ASSERT3S(typemask & VMEM_FREE, ==, VMEM_FREE);
271
272 return (VMALLOC_TOTAL);
273 }
274 EXPORT_SYMBOL(vmem_size);
275
276 int
277 kmem_debugging(void)
278 {
279 return 0;
280 }
281 EXPORT_SYMBOL(kmem_debugging);
282
283 #ifndef HAVE_KVASPRINTF
284 /* Simplified asprintf. */
285 char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap)
286 {
287 unsigned int len;
288 char *p;
289 va_list aq;
290
291 va_copy(aq, ap);
292 len = vsnprintf(NULL, 0, fmt, aq);
293 va_end(aq);
294
295 p = kmalloc(len+1, gfp);
296 if (!p)
297 return NULL;
298
299 vsnprintf(p, len+1, fmt, ap);
300
301 return p;
302 }
303 EXPORT_SYMBOL(kvasprintf);
304 #endif /* HAVE_KVASPRINTF */
305
306 char *
307 kmem_vasprintf(const char *fmt, va_list ap)
308 {
309 va_list aq;
310 char *ptr;
311
312 do {
313 va_copy(aq, ap);
314 ptr = kvasprintf(GFP_KERNEL, fmt, aq);
315 va_end(aq);
316 } while (ptr == NULL);
317
318 return ptr;
319 }
320 EXPORT_SYMBOL(kmem_vasprintf);
321
322 char *
323 kmem_asprintf(const char *fmt, ...)
324 {
325 va_list ap;
326 char *ptr;
327
328 do {
329 va_start(ap, fmt);
330 ptr = kvasprintf(GFP_KERNEL, fmt, ap);
331 va_end(ap);
332 } while (ptr == NULL);
333
334 return ptr;
335 }
336 EXPORT_SYMBOL(kmem_asprintf);
337
338 static char *
339 __strdup(const char *str, int flags)
340 {
341 char *ptr;
342 int n;
343
344 n = strlen(str);
345 ptr = kmalloc_nofail(n + 1, flags);
346 if (ptr)
347 memcpy(ptr, str, n + 1);
348
349 return ptr;
350 }
351
352 char *
353 strdup(const char *str)
354 {
355 return __strdup(str, KM_SLEEP);
356 }
357 EXPORT_SYMBOL(strdup);
358
359 void
360 strfree(char *str)
361 {
362 kfree(str);
363 }
364 EXPORT_SYMBOL(strfree);
365
366 /*
367 * Memory allocation interfaces and debugging for basic kmem_*
368 * and vmem_* style memory allocation. When DEBUG_KMEM is enabled
369 * the SPL will keep track of the total memory allocated, and
370 * report any memory leaked when the module is unloaded.
371 */
372 #ifdef DEBUG_KMEM
373
374 /* Shim layer memory accounting */
375 # ifdef HAVE_ATOMIC64_T
376 atomic64_t kmem_alloc_used = ATOMIC64_INIT(0);
377 unsigned long long kmem_alloc_max = 0;
378 atomic64_t vmem_alloc_used = ATOMIC64_INIT(0);
379 unsigned long long vmem_alloc_max = 0;
380 # else /* HAVE_ATOMIC64_T */
381 atomic_t kmem_alloc_used = ATOMIC_INIT(0);
382 unsigned long long kmem_alloc_max = 0;
383 atomic_t vmem_alloc_used = ATOMIC_INIT(0);
384 unsigned long long vmem_alloc_max = 0;
385 # endif /* HAVE_ATOMIC64_T */
386
387 EXPORT_SYMBOL(kmem_alloc_used);
388 EXPORT_SYMBOL(kmem_alloc_max);
389 EXPORT_SYMBOL(vmem_alloc_used);
390 EXPORT_SYMBOL(vmem_alloc_max);
391
392 /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked
393 * but also the location of every alloc and free. When the SPL module is
394 * unloaded a list of all leaked addresses and where they were allocated
395 * will be dumped to the console. Enabling this feature has a significant
396 * impact on performance but it makes finding memory leaks straight forward.
397 *
398 * Not surprisingly with debugging enabled the xmem_locks are very highly
399 * contended particularly on xfree(). If we want to run with this detailed
400 * debugging enabled for anything other than debugging we need to minimize
401 * the contention by moving to a lock per xmem_table entry model.
402 */
403 # ifdef DEBUG_KMEM_TRACKING
404
405 # define KMEM_HASH_BITS 10
406 # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS)
407
408 # define VMEM_HASH_BITS 10
409 # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS)
410
411 typedef struct kmem_debug {
412 struct hlist_node kd_hlist; /* Hash node linkage */
413 struct list_head kd_list; /* List of all allocations */
414 void *kd_addr; /* Allocation pointer */
415 size_t kd_size; /* Allocation size */
416 const char *kd_func; /* Allocation function */
417 int kd_line; /* Allocation line */
418 } kmem_debug_t;
419
420 spinlock_t kmem_lock;
421 struct hlist_head kmem_table[KMEM_TABLE_SIZE];
422 struct list_head kmem_list;
423
424 spinlock_t vmem_lock;
425 struct hlist_head vmem_table[VMEM_TABLE_SIZE];
426 struct list_head vmem_list;
427
428 EXPORT_SYMBOL(kmem_lock);
429 EXPORT_SYMBOL(kmem_table);
430 EXPORT_SYMBOL(kmem_list);
431
432 EXPORT_SYMBOL(vmem_lock);
433 EXPORT_SYMBOL(vmem_table);
434 EXPORT_SYMBOL(vmem_list);
435
436 static kmem_debug_t *
437 kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, const void *addr)
438 {
439 struct hlist_head *head;
440 struct hlist_node *node;
441 struct kmem_debug *p;
442 unsigned long flags;
443 SENTRY;
444
445 spin_lock_irqsave(lock, flags);
446
447 head = &table[hash_ptr((void *)addr, bits)];
448 hlist_for_each(node, head) {
449 p = list_entry(node, struct kmem_debug, kd_hlist);
450 if (p->kd_addr == addr) {
451 hlist_del_init(&p->kd_hlist);
452 list_del_init(&p->kd_list);
453 spin_unlock_irqrestore(lock, flags);
454 return p;
455 }
456 }
457
458 spin_unlock_irqrestore(lock, flags);
459
460 SRETURN(NULL);
461 }
462
463 void *
464 kmem_alloc_track(size_t size, int flags, const char *func, int line,
465 int node_alloc, int node)
466 {
467 void *ptr = NULL;
468 kmem_debug_t *dptr;
469 unsigned long irq_flags;
470 SENTRY;
471
472 /* Function may be called with KM_NOSLEEP so failure is possible */
473 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
474 flags & ~__GFP_ZERO);
475
476 if (unlikely(dptr == NULL)) {
477 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
478 "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
479 sizeof(kmem_debug_t), flags, func, line,
480 kmem_alloc_used_read(), kmem_alloc_max);
481 } else {
482 /*
483 * Marked unlikely because we should never be doing this,
484 * we tolerate to up 2 pages but a single page is best.
485 */
486 if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) {
487 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large "
488 "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
489 (unsigned long long) size, flags, func, line,
490 kmem_alloc_used_read(), kmem_alloc_max);
491 spl_debug_dumpstack(NULL);
492 }
493
494 /*
495 * We use __strdup() below because the string pointed to by
496 * __FUNCTION__ might not be available by the time we want
497 * to print it since the module might have been unloaded.
498 * This can only fail in the KM_NOSLEEP case.
499 */
500 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
501 if (unlikely(dptr->kd_func == NULL)) {
502 kfree(dptr);
503 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
504 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
505 func, line, kmem_alloc_used_read(), kmem_alloc_max);
506 goto out;
507 }
508
509 /* Use the correct allocator */
510 if (node_alloc) {
511 ASSERT(!(flags & __GFP_ZERO));
512 ptr = kmalloc_node_nofail(size, flags, node);
513 } else if (flags & __GFP_ZERO) {
514 ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO);
515 } else {
516 ptr = kmalloc_nofail(size, flags);
517 }
518
519 if (unlikely(ptr == NULL)) {
520 kfree(dptr->kd_func);
521 kfree(dptr);
522 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc"
523 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
524 (unsigned long long) size, flags, func, line,
525 kmem_alloc_used_read(), kmem_alloc_max);
526 goto out;
527 }
528
529 kmem_alloc_used_add(size);
530 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
531 kmem_alloc_max = kmem_alloc_used_read();
532
533 INIT_HLIST_NODE(&dptr->kd_hlist);
534 INIT_LIST_HEAD(&dptr->kd_list);
535
536 dptr->kd_addr = ptr;
537 dptr->kd_size = size;
538 dptr->kd_line = line;
539
540 spin_lock_irqsave(&kmem_lock, irq_flags);
541 hlist_add_head(&dptr->kd_hlist,
542 &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]);
543 list_add_tail(&dptr->kd_list, &kmem_list);
544 spin_unlock_irqrestore(&kmem_lock, irq_flags);
545
546 SDEBUG_LIMIT(SD_INFO,
547 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
548 (unsigned long long) size, flags, func, line, ptr,
549 kmem_alloc_used_read(), kmem_alloc_max);
550 }
551 out:
552 SRETURN(ptr);
553 }
554 EXPORT_SYMBOL(kmem_alloc_track);
555
556 void
557 kmem_free_track(const void *ptr, size_t size)
558 {
559 kmem_debug_t *dptr;
560 SENTRY;
561
562 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
563 (unsigned long long) size);
564
565 dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr);
566
567 /* Must exist in hash due to kmem_alloc() */
568 ASSERT(dptr);
569
570 /* Size must match */
571 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
572 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
573 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
574
575 kmem_alloc_used_sub(size);
576 SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
577 (unsigned long long) size, kmem_alloc_used_read(),
578 kmem_alloc_max);
579
580 kfree(dptr->kd_func);
581
582 memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
583 kfree(dptr);
584
585 memset((void *)ptr, 0x5a, size);
586 kfree(ptr);
587
588 SEXIT;
589 }
590 EXPORT_SYMBOL(kmem_free_track);
591
592 void *
593 vmem_alloc_track(size_t size, int flags, const char *func, int line)
594 {
595 void *ptr = NULL;
596 kmem_debug_t *dptr;
597 unsigned long irq_flags;
598 SENTRY;
599
600 ASSERT(flags & KM_SLEEP);
601
602 /* Function may be called with KM_NOSLEEP so failure is possible */
603 dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t),
604 flags & ~__GFP_ZERO);
605 if (unlikely(dptr == NULL)) {
606 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug "
607 "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n",
608 sizeof(kmem_debug_t), flags, func, line,
609 vmem_alloc_used_read(), vmem_alloc_max);
610 } else {
611 /*
612 * We use __strdup() below because the string pointed to by
613 * __FUNCTION__ might not be available by the time we want
614 * to print it, since the module might have been unloaded.
615 * This can never fail because we have already asserted
616 * that flags is KM_SLEEP.
617 */
618 dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO);
619 if (unlikely(dptr->kd_func == NULL)) {
620 kfree(dptr);
621 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
622 "debug __strdup() at %s:%d failed (%lld/%llu)\n",
623 func, line, vmem_alloc_used_read(), vmem_alloc_max);
624 goto out;
625 }
626
627 /* Use the correct allocator */
628 if (flags & __GFP_ZERO) {
629 ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO);
630 } else {
631 ptr = vmalloc_nofail(size, flags);
632 }
633
634 if (unlikely(ptr == NULL)) {
635 kfree(dptr->kd_func);
636 kfree(dptr);
637 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc"
638 "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
639 (unsigned long long) size, flags, func, line,
640 vmem_alloc_used_read(), vmem_alloc_max);
641 goto out;
642 }
643
644 vmem_alloc_used_add(size);
645 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
646 vmem_alloc_max = vmem_alloc_used_read();
647
648 INIT_HLIST_NODE(&dptr->kd_hlist);
649 INIT_LIST_HEAD(&dptr->kd_list);
650
651 dptr->kd_addr = ptr;
652 dptr->kd_size = size;
653 dptr->kd_line = line;
654
655 spin_lock_irqsave(&vmem_lock, irq_flags);
656 hlist_add_head(&dptr->kd_hlist,
657 &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]);
658 list_add_tail(&dptr->kd_list, &vmem_list);
659 spin_unlock_irqrestore(&vmem_lock, irq_flags);
660
661 SDEBUG_LIMIT(SD_INFO,
662 "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
663 (unsigned long long) size, flags, func, line,
664 ptr, vmem_alloc_used_read(), vmem_alloc_max);
665 }
666 out:
667 SRETURN(ptr);
668 }
669 EXPORT_SYMBOL(vmem_alloc_track);
670
671 void
672 vmem_free_track(const void *ptr, size_t size)
673 {
674 kmem_debug_t *dptr;
675 SENTRY;
676
677 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
678 (unsigned long long) size);
679
680 dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr);
681
682 /* Must exist in hash due to vmem_alloc() */
683 ASSERT(dptr);
684
685 /* Size must match */
686 ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), "
687 "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size,
688 (unsigned long long) size, dptr->kd_func, dptr->kd_line);
689
690 vmem_alloc_used_sub(size);
691 SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
692 (unsigned long long) size, vmem_alloc_used_read(),
693 vmem_alloc_max);
694
695 kfree(dptr->kd_func);
696
697 memset((void *)dptr, 0x5a, sizeof(kmem_debug_t));
698 kfree(dptr);
699
700 memset((void *)ptr, 0x5a, size);
701 vfree(ptr);
702
703 SEXIT;
704 }
705 EXPORT_SYMBOL(vmem_free_track);
706
707 # else /* DEBUG_KMEM_TRACKING */
708
709 void *
710 kmem_alloc_debug(size_t size, int flags, const char *func, int line,
711 int node_alloc, int node)
712 {
713 void *ptr;
714 SENTRY;
715
716 /*
717 * Marked unlikely because we should never be doing this,
718 * we tolerate to up 2 pages but a single page is best.
719 */
720 if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) {
721 SDEBUG(SD_CONSOLE | SD_WARNING,
722 "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n",
723 (unsigned long long) size, flags, func, line,
724 kmem_alloc_used_read(), kmem_alloc_max);
725 spl_debug_dumpstack(NULL);
726 }
727
728 /* Use the correct allocator */
729 if (node_alloc) {
730 ASSERT(!(flags & __GFP_ZERO));
731 ptr = kmalloc_node_nofail(size, flags, node);
732 } else if (flags & __GFP_ZERO) {
733 ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO));
734 } else {
735 ptr = kmalloc_nofail(size, flags);
736 }
737
738 if (unlikely(ptr == NULL)) {
739 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
740 "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
741 (unsigned long long) size, flags, func, line,
742 kmem_alloc_used_read(), kmem_alloc_max);
743 } else {
744 kmem_alloc_used_add(size);
745 if (unlikely(kmem_alloc_used_read() > kmem_alloc_max))
746 kmem_alloc_max = kmem_alloc_used_read();
747
748 SDEBUG_LIMIT(SD_INFO,
749 "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n",
750 (unsigned long long) size, flags, func, line, ptr,
751 kmem_alloc_used_read(), kmem_alloc_max);
752 }
753
754 SRETURN(ptr);
755 }
756 EXPORT_SYMBOL(kmem_alloc_debug);
757
758 void
759 kmem_free_debug(const void *ptr, size_t size)
760 {
761 SENTRY;
762
763 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
764 (unsigned long long) size);
765
766 kmem_alloc_used_sub(size);
767 SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr,
768 (unsigned long long) size, kmem_alloc_used_read(),
769 kmem_alloc_max);
770 kfree(ptr);
771
772 SEXIT;
773 }
774 EXPORT_SYMBOL(kmem_free_debug);
775
776 void *
777 vmem_alloc_debug(size_t size, int flags, const char *func, int line)
778 {
779 void *ptr;
780 SENTRY;
781
782 ASSERT(flags & KM_SLEEP);
783
784 /* Use the correct allocator */
785 if (flags & __GFP_ZERO) {
786 ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO));
787 } else {
788 ptr = vmalloc_nofail(size, flags);
789 }
790
791 if (unlikely(ptr == NULL)) {
792 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
793 "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n",
794 (unsigned long long) size, flags, func, line,
795 vmem_alloc_used_read(), vmem_alloc_max);
796 } else {
797 vmem_alloc_used_add(size);
798 if (unlikely(vmem_alloc_used_read() > vmem_alloc_max))
799 vmem_alloc_max = vmem_alloc_used_read();
800
801 SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p "
802 "(%lld/%llu)\n", (unsigned long long) size, flags, ptr,
803 vmem_alloc_used_read(), vmem_alloc_max);
804 }
805
806 SRETURN(ptr);
807 }
808 EXPORT_SYMBOL(vmem_alloc_debug);
809
810 void
811 vmem_free_debug(const void *ptr, size_t size)
812 {
813 SENTRY;
814
815 ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr,
816 (unsigned long long) size);
817
818 vmem_alloc_used_sub(size);
819 SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr,
820 (unsigned long long) size, vmem_alloc_used_read(),
821 vmem_alloc_max);
822 vfree(ptr);
823
824 SEXIT;
825 }
826 EXPORT_SYMBOL(vmem_free_debug);
827
828 # endif /* DEBUG_KMEM_TRACKING */
829 #endif /* DEBUG_KMEM */
830
831 /*
832 * Slab allocation interfaces
833 *
834 * While the Linux slab implementation was inspired by the Solaris
835 * implementation I cannot use it to emulate the Solaris APIs. I
836 * require two features which are not provided by the Linux slab.
837 *
838 * 1) Constructors AND destructors. Recent versions of the Linux
839 * kernel have removed support for destructors. This is a deal
840 * breaker for the SPL which contains particularly expensive
841 * initializers for mutex's, condition variables, etc. We also
842 * require a minimal level of cleanup for these data types unlike
843 * many Linux data type which do need to be explicitly destroyed.
844 *
845 * 2) Virtual address space backed slab. Callers of the Solaris slab
846 * expect it to work well for both small are very large allocations.
847 * Because of memory fragmentation the Linux slab which is backed
848 * by kmalloc'ed memory performs very badly when confronted with
849 * large numbers of large allocations. Basing the slab on the
850 * virtual address space removes the need for contiguous pages
851 * and greatly improve performance for large allocations.
852 *
853 * For these reasons, the SPL has its own slab implementation with
854 * the needed features. It is not as highly optimized as either the
855 * Solaris or Linux slabs, but it should get me most of what is
856 * needed until it can be optimized or obsoleted by another approach.
857 *
858 * One serious concern I do have about this method is the relatively
859 * small virtual address space on 32bit arches. This will seriously
860 * constrain the size of the slab caches and their performance.
861 *
862 * XXX: Improve the partial slab list by carefully maintaining a
863 * strict ordering of fullest to emptiest slabs based on
864 * the slab reference count. This guarantees the when freeing
865 * slabs back to the system we need only linearly traverse the
866 * last N slabs in the list to discover all the freeable slabs.
867 *
868 * XXX: NUMA awareness for optionally allocating memory close to a
869 * particular core. This can be advantageous if you know the slab
870 * object will be short lived and primarily accessed from one core.
871 *
872 * XXX: Slab coloring may also yield performance improvements and would
873 * be desirable to implement.
874 */
875
876 struct list_head spl_kmem_cache_list; /* List of caches */
877 struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */
878 taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */
879
880 static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj);
881
882 SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker);
883 SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker,
884 spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS);
885
886 static void *
887 kv_alloc(spl_kmem_cache_t *skc, int size, int flags)
888 {
889 void *ptr;
890
891 ASSERT(ISP2(size));
892
893 if (skc->skc_flags & KMC_KMEM)
894 ptr = (void *)__get_free_pages(flags | __GFP_COMP,
895 get_order(size));
896 else
897 ptr = __vmalloc(size, flags | __GFP_HIGHMEM, PAGE_KERNEL);
898
899 /* Resulting allocated memory will be page aligned */
900 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
901
902 return ptr;
903 }
904
905 static void
906 kv_free(spl_kmem_cache_t *skc, void *ptr, int size)
907 {
908 ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE));
909 ASSERT(ISP2(size));
910
911 /*
912 * The Linux direct reclaim path uses this out of band value to
913 * determine if forward progress is being made. Normally this is
914 * incremented by kmem_freepages() which is part of the various
915 * Linux slab implementations. However, since we are using none
916 * of that infrastructure we are responsible for incrementing it.
917 */
918 if (current->reclaim_state)
919 current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT;
920
921 if (skc->skc_flags & KMC_KMEM)
922 free_pages((unsigned long)ptr, get_order(size));
923 else
924 vfree(ptr);
925 }
926
927 /*
928 * Required space for each aligned sks.
929 */
930 static inline uint32_t
931 spl_sks_size(spl_kmem_cache_t *skc)
932 {
933 return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t),
934 skc->skc_obj_align, uint32_t);
935 }
936
937 /*
938 * Required space for each aligned object.
939 */
940 static inline uint32_t
941 spl_obj_size(spl_kmem_cache_t *skc)
942 {
943 uint32_t align = skc->skc_obj_align;
944
945 return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) +
946 P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t);
947 }
948
949 /*
950 * Lookup the spl_kmem_object_t for an object given that object.
951 */
952 static inline spl_kmem_obj_t *
953 spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj)
954 {
955 return obj + P2ROUNDUP_TYPED(skc->skc_obj_size,
956 skc->skc_obj_align, uint32_t);
957 }
958
959 /*
960 * Required space for each offslab object taking in to account alignment
961 * restrictions and the power-of-two requirement of kv_alloc().
962 */
963 static inline uint32_t
964 spl_offslab_size(spl_kmem_cache_t *skc)
965 {
966 return 1UL << (fls64(spl_obj_size(skc)) + 1);
967 }
968
969 /*
970 * It's important that we pack the spl_kmem_obj_t structure and the
971 * actual objects in to one large address space to minimize the number
972 * of calls to the allocator. It is far better to do a few large
973 * allocations and then subdivide it ourselves. Now which allocator
974 * we use requires balancing a few trade offs.
975 *
976 * For small objects we use kmem_alloc() because as long as you are
977 * only requesting a small number of pages (ideally just one) its cheap.
978 * However, when you start requesting multiple pages with kmem_alloc()
979 * it gets increasingly expensive since it requires contiguous pages.
980 * For this reason we shift to vmem_alloc() for slabs of large objects
981 * which removes the need for contiguous pages. We do not use
982 * vmem_alloc() in all cases because there is significant locking
983 * overhead in __get_vm_area_node(). This function takes a single
984 * global lock when acquiring an available virtual address range which
985 * serializes all vmem_alloc()'s for all slab caches. Using slightly
986 * different allocation functions for small and large objects should
987 * give us the best of both worlds.
988 *
989 * KMC_ONSLAB KMC_OFFSLAB
990 *
991 * +------------------------+ +-----------------+
992 * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+
993 * | skc_obj_size <-+ | | +-----------------+ | |
994 * | spl_kmem_obj_t | | | |
995 * | skc_obj_size <---+ | +-----------------+ | |
996 * | spl_kmem_obj_t | | | skc_obj_size | <-+ |
997 * | ... v | | spl_kmem_obj_t | |
998 * +------------------------+ +-----------------+ v
999 */
1000 static spl_kmem_slab_t *
1001 spl_slab_alloc(spl_kmem_cache_t *skc, int flags)
1002 {
1003 spl_kmem_slab_t *sks;
1004 spl_kmem_obj_t *sko, *n;
1005 void *base, *obj;
1006 uint32_t obj_size, offslab_size = 0;
1007 int i, rc = 0;
1008
1009 base = kv_alloc(skc, skc->skc_slab_size, flags);
1010 if (base == NULL)
1011 SRETURN(NULL);
1012
1013 sks = (spl_kmem_slab_t *)base;
1014 sks->sks_magic = SKS_MAGIC;
1015 sks->sks_objs = skc->skc_slab_objs;
1016 sks->sks_age = jiffies;
1017 sks->sks_cache = skc;
1018 INIT_LIST_HEAD(&sks->sks_list);
1019 INIT_LIST_HEAD(&sks->sks_free_list);
1020 sks->sks_ref = 0;
1021 obj_size = spl_obj_size(skc);
1022
1023 if (skc->skc_flags & KMC_OFFSLAB)
1024 offslab_size = spl_offslab_size(skc);
1025
1026 for (i = 0; i < sks->sks_objs; i++) {
1027 if (skc->skc_flags & KMC_OFFSLAB) {
1028 obj = kv_alloc(skc, offslab_size, flags);
1029 if (!obj)
1030 SGOTO(out, rc = -ENOMEM);
1031 } else {
1032 obj = base + spl_sks_size(skc) + (i * obj_size);
1033 }
1034
1035 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
1036 sko = spl_sko_from_obj(skc, obj);
1037 sko->sko_addr = obj;
1038 sko->sko_magic = SKO_MAGIC;
1039 sko->sko_slab = sks;
1040 INIT_LIST_HEAD(&sko->sko_list);
1041 list_add_tail(&sko->sko_list, &sks->sks_free_list);
1042 }
1043
1044 list_for_each_entry(sko, &sks->sks_free_list, sko_list)
1045 if (skc->skc_ctor)
1046 skc->skc_ctor(sko->sko_addr, skc->skc_private, flags);
1047 out:
1048 if (rc) {
1049 if (skc->skc_flags & KMC_OFFSLAB)
1050 list_for_each_entry_safe(sko, n, &sks->sks_free_list,
1051 sko_list)
1052 kv_free(skc, sko->sko_addr, offslab_size);
1053
1054 kv_free(skc, base, skc->skc_slab_size);
1055 sks = NULL;
1056 }
1057
1058 SRETURN(sks);
1059 }
1060
1061 /*
1062 * Remove a slab from complete or partial list, it must be called with
1063 * the 'skc->skc_lock' held but the actual free must be performed
1064 * outside the lock to prevent deadlocking on vmem addresses.
1065 */
1066 static void
1067 spl_slab_free(spl_kmem_slab_t *sks,
1068 struct list_head *sks_list, struct list_head *sko_list)
1069 {
1070 spl_kmem_cache_t *skc;
1071 SENTRY;
1072
1073 ASSERT(sks->sks_magic == SKS_MAGIC);
1074 ASSERT(sks->sks_ref == 0);
1075
1076 skc = sks->sks_cache;
1077 ASSERT(skc->skc_magic == SKC_MAGIC);
1078 ASSERT(spin_is_locked(&skc->skc_lock));
1079
1080 /*
1081 * Update slab/objects counters in the cache, then remove the
1082 * slab from the skc->skc_partial_list. Finally add the slab
1083 * and all its objects in to the private work lists where the
1084 * destructors will be called and the memory freed to the system.
1085 */
1086 skc->skc_obj_total -= sks->sks_objs;
1087 skc->skc_slab_total--;
1088 list_del(&sks->sks_list);
1089 list_add(&sks->sks_list, sks_list);
1090 list_splice_init(&sks->sks_free_list, sko_list);
1091
1092 SEXIT;
1093 }
1094
1095 /*
1096 * Traverses all the partial slabs attached to a cache and free those
1097 * which which are currently empty, and have not been touched for
1098 * skc_delay seconds to avoid thrashing. The count argument is
1099 * passed to optionally cap the number of slabs reclaimed, a count
1100 * of zero means try and reclaim everything. When flag is set we
1101 * always free an available slab regardless of age.
1102 */
1103 static void
1104 spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag)
1105 {
1106 spl_kmem_slab_t *sks, *m;
1107 spl_kmem_obj_t *sko, *n;
1108 LIST_HEAD(sks_list);
1109 LIST_HEAD(sko_list);
1110 uint32_t size = 0;
1111 int i = 0;
1112 SENTRY;
1113
1114 /*
1115 * Move empty slabs and objects which have not been touched in
1116 * skc_delay seconds on to private lists to be freed outside
1117 * the spin lock. This delay time is important to avoid thrashing
1118 * however when flag is set the delay will not be used.
1119 */
1120 spin_lock(&skc->skc_lock);
1121 list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){
1122 /*
1123 * All empty slabs are at the end of skc->skc_partial_list,
1124 * therefore once a non-empty slab is found we can stop
1125 * scanning. Additionally, stop when reaching the target
1126 * reclaim 'count' if a non-zero threshold is given.
1127 */
1128 if ((sks->sks_ref > 0) || (count && i >= count))
1129 break;
1130
1131 if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) {
1132 spl_slab_free(sks, &sks_list, &sko_list);
1133 i++;
1134 }
1135 }
1136 spin_unlock(&skc->skc_lock);
1137
1138 /*
1139 * The following two loops ensure all the object destructors are
1140 * run, any offslab objects are freed, and the slabs themselves
1141 * are freed. This is all done outside the skc->skc_lock since
1142 * this allows the destructor to sleep, and allows us to perform
1143 * a conditional reschedule when a freeing a large number of
1144 * objects and slabs back to the system.
1145 */
1146 if (skc->skc_flags & KMC_OFFSLAB)
1147 size = spl_offslab_size(skc);
1148
1149 list_for_each_entry_safe(sko, n, &sko_list, sko_list) {
1150 ASSERT(sko->sko_magic == SKO_MAGIC);
1151
1152 if (skc->skc_dtor)
1153 skc->skc_dtor(sko->sko_addr, skc->skc_private);
1154
1155 if (skc->skc_flags & KMC_OFFSLAB)
1156 kv_free(skc, sko->sko_addr, size);
1157 }
1158
1159 list_for_each_entry_safe(sks, m, &sks_list, sks_list) {
1160 ASSERT(sks->sks_magic == SKS_MAGIC);
1161 kv_free(skc, sks, skc->skc_slab_size);
1162 }
1163
1164 SEXIT;
1165 }
1166
1167 static spl_kmem_emergency_t *
1168 spl_emergency_search(struct rb_root *root, void *obj)
1169 {
1170 struct rb_node *node = root->rb_node;
1171 spl_kmem_emergency_t *ske;
1172 unsigned long address = (unsigned long)obj;
1173
1174 while (node) {
1175 ske = container_of(node, spl_kmem_emergency_t, ske_node);
1176
1177 if (address < (unsigned long)ske->ske_obj)
1178 node = node->rb_left;
1179 else if (address > (unsigned long)ske->ske_obj)
1180 node = node->rb_right;
1181 else
1182 return ske;
1183 }
1184
1185 return NULL;
1186 }
1187
1188 static int
1189 spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske)
1190 {
1191 struct rb_node **new = &(root->rb_node), *parent = NULL;
1192 spl_kmem_emergency_t *ske_tmp;
1193 unsigned long address = (unsigned long)ske->ske_obj;
1194
1195 while (*new) {
1196 ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node);
1197
1198 parent = *new;
1199 if (address < (unsigned long)ske_tmp->ske_obj)
1200 new = &((*new)->rb_left);
1201 else if (address > (unsigned long)ske_tmp->ske_obj)
1202 new = &((*new)->rb_right);
1203 else
1204 return 0;
1205 }
1206
1207 rb_link_node(&ske->ske_node, parent, new);
1208 rb_insert_color(&ske->ske_node, root);
1209
1210 return 1;
1211 }
1212
1213 /*
1214 * Allocate a single emergency object and track it in a red black tree.
1215 */
1216 static int
1217 spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj)
1218 {
1219 spl_kmem_emergency_t *ske;
1220 int empty;
1221 SENTRY;
1222
1223 /* Last chance use a partial slab if one now exists */
1224 spin_lock(&skc->skc_lock);
1225 empty = list_empty(&skc->skc_partial_list);
1226 spin_unlock(&skc->skc_lock);
1227 if (!empty)
1228 SRETURN(-EEXIST);
1229
1230 ske = kmalloc(sizeof(*ske), flags);
1231 if (ske == NULL)
1232 SRETURN(-ENOMEM);
1233
1234 ske->ske_obj = kmalloc(skc->skc_obj_size, flags);
1235 if (ske->ske_obj == NULL) {
1236 kfree(ske);
1237 SRETURN(-ENOMEM);
1238 }
1239
1240 spin_lock(&skc->skc_lock);
1241 empty = spl_emergency_insert(&skc->skc_emergency_tree, ske);
1242 if (likely(empty)) {
1243 skc->skc_obj_total++;
1244 skc->skc_obj_emergency++;
1245 if (skc->skc_obj_emergency > skc->skc_obj_emergency_max)
1246 skc->skc_obj_emergency_max = skc->skc_obj_emergency;
1247 }
1248 spin_unlock(&skc->skc_lock);
1249
1250 if (unlikely(!empty)) {
1251 kfree(ske->ske_obj);
1252 kfree(ske);
1253 SRETURN(-EINVAL);
1254 }
1255
1256 if (skc->skc_ctor)
1257 skc->skc_ctor(ske->ske_obj, skc->skc_private, flags);
1258
1259 *obj = ske->ske_obj;
1260
1261 SRETURN(0);
1262 }
1263
1264 /*
1265 * Locate the passed object in the red black tree and free it.
1266 */
1267 static int
1268 spl_emergency_free(spl_kmem_cache_t *skc, void *obj)
1269 {
1270 spl_kmem_emergency_t *ske;
1271 SENTRY;
1272
1273 spin_lock(&skc->skc_lock);
1274 ske = spl_emergency_search(&skc->skc_emergency_tree, obj);
1275 if (likely(ske)) {
1276 rb_erase(&ske->ske_node, &skc->skc_emergency_tree);
1277 skc->skc_obj_emergency--;
1278 skc->skc_obj_total--;
1279 }
1280 spin_unlock(&skc->skc_lock);
1281
1282 if (unlikely(ske == NULL))
1283 SRETURN(-ENOENT);
1284
1285 if (skc->skc_dtor)
1286 skc->skc_dtor(ske->ske_obj, skc->skc_private);
1287
1288 kfree(ske->ske_obj);
1289 kfree(ske);
1290
1291 SRETURN(0);
1292 }
1293
1294 /*
1295 * Release objects from the per-cpu magazine back to their slab. The flush
1296 * argument contains the max number of entries to remove from the magazine.
1297 */
1298 static void
1299 __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
1300 {
1301 int i, count = MIN(flush, skm->skm_avail);
1302 SENTRY;
1303
1304 ASSERT(skc->skc_magic == SKC_MAGIC);
1305 ASSERT(skm->skm_magic == SKM_MAGIC);
1306 ASSERT(spin_is_locked(&skc->skc_lock));
1307
1308 for (i = 0; i < count; i++)
1309 spl_cache_shrink(skc, skm->skm_objs[i]);
1310
1311 skm->skm_avail -= count;
1312 memmove(skm->skm_objs, &(skm->skm_objs[count]),
1313 sizeof(void *) * skm->skm_avail);
1314
1315 SEXIT;
1316 }
1317
1318 static void
1319 spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush)
1320 {
1321 spin_lock(&skc->skc_lock);
1322 __spl_cache_flush(skc, skm, flush);
1323 spin_unlock(&skc->skc_lock);
1324 }
1325
1326 static void
1327 spl_magazine_age(void *data)
1328 {
1329 spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
1330 spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()];
1331
1332 ASSERT(skm->skm_magic == SKM_MAGIC);
1333 ASSERT(skm->skm_cpu == smp_processor_id());
1334 ASSERT(irqs_disabled());
1335
1336 /* There are no available objects or they are too young to age out */
1337 if ((skm->skm_avail == 0) ||
1338 time_before(jiffies, skm->skm_age + skc->skc_delay * HZ))
1339 return;
1340
1341 /*
1342 * Because we're executing in interrupt context we may have
1343 * interrupted the holder of this lock. To avoid a potential
1344 * deadlock return if the lock is contended.
1345 */
1346 if (!spin_trylock(&skc->skc_lock))
1347 return;
1348
1349 __spl_cache_flush(skc, skm, skm->skm_refill);
1350 spin_unlock(&skc->skc_lock);
1351 }
1352
1353 /*
1354 * Called regularly to keep a downward pressure on the cache.
1355 *
1356 * Objects older than skc->skc_delay seconds in the per-cpu magazines will
1357 * be returned to the caches. This is done to prevent idle magazines from
1358 * holding memory which could be better used elsewhere. The delay is
1359 * present to prevent thrashing the magazine.
1360 *
1361 * The newly released objects may result in empty partial slabs. Those
1362 * slabs should be released to the system. Otherwise moving the objects
1363 * out of the magazines is just wasted work.
1364 */
1365 static void
1366 spl_cache_age(void *data)
1367 {
1368 spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data;
1369 taskqid_t id = 0;
1370
1371 ASSERT(skc->skc_magic == SKC_MAGIC);
1372
1373 /* Dynamically disabled at run time */
1374 if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE))
1375 return;
1376
1377 atomic_inc(&skc->skc_ref);
1378
1379 if (!(skc->skc_flags & KMC_NOMAGAZINE))
1380 on_each_cpu(spl_magazine_age, skc, 1);
1381
1382 spl_slab_reclaim(skc, skc->skc_reap, 0);
1383
1384 while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) {
1385 id = taskq_dispatch_delay(
1386 spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP,
1387 ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
1388
1389 /* Destroy issued after dispatch immediately cancel it */
1390 if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id)
1391 taskq_cancel_id(spl_kmem_cache_taskq, id);
1392 }
1393
1394 spin_lock(&skc->skc_lock);
1395 skc->skc_taskqid = id;
1396 spin_unlock(&skc->skc_lock);
1397
1398 atomic_dec(&skc->skc_ref);
1399 }
1400
1401 /*
1402 * Size a slab based on the size of each aligned object plus spl_kmem_obj_t.
1403 * When on-slab we want to target spl_kmem_cache_obj_per_slab. However,
1404 * for very small objects we may end up with more than this so as not
1405 * to waste space in the minimal allocation of a single page. Also for
1406 * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min,
1407 * lower than this and we will fail.
1408 */
1409 static int
1410 spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size)
1411 {
1412 uint32_t sks_size, obj_size, max_size;
1413
1414 if (skc->skc_flags & KMC_OFFSLAB) {
1415 *objs = spl_kmem_cache_obj_per_slab;
1416 *size = P2ROUNDUP(sizeof(spl_kmem_slab_t), PAGE_SIZE);
1417 SRETURN(0);
1418 } else {
1419 sks_size = spl_sks_size(skc);
1420 obj_size = spl_obj_size(skc);
1421
1422 if (skc->skc_flags & KMC_KMEM)
1423 max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE;
1424 else
1425 max_size = (spl_kmem_cache_max_size * 1024 * 1024);
1426
1427 /* Power of two sized slab */
1428 for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) {
1429 *objs = (*size - sks_size) / obj_size;
1430 if (*objs >= spl_kmem_cache_obj_per_slab)
1431 SRETURN(0);
1432 }
1433
1434 /*
1435 * Unable to satisfy target objects per slab, fall back to
1436 * allocating a maximally sized slab and assuming it can
1437 * contain the minimum objects count use it. If not fail.
1438 */
1439 *size = max_size;
1440 *objs = (*size - sks_size) / obj_size;
1441 if (*objs >= (spl_kmem_cache_obj_per_slab_min))
1442 SRETURN(0);
1443 }
1444
1445 SRETURN(-ENOSPC);
1446 }
1447
1448 /*
1449 * Make a guess at reasonable per-cpu magazine size based on the size of
1450 * each object and the cost of caching N of them in each magazine. Long
1451 * term this should really adapt based on an observed usage heuristic.
1452 */
1453 static int
1454 spl_magazine_size(spl_kmem_cache_t *skc)
1455 {
1456 uint32_t obj_size = spl_obj_size(skc);
1457 int size;
1458 SENTRY;
1459
1460 /* Per-magazine sizes below assume a 4Kib page size */
1461 if (obj_size > (PAGE_SIZE * 256))
1462 size = 4; /* Minimum 4Mib per-magazine */
1463 else if (obj_size > (PAGE_SIZE * 32))
1464 size = 16; /* Minimum 2Mib per-magazine */
1465 else if (obj_size > (PAGE_SIZE))
1466 size = 64; /* Minimum 256Kib per-magazine */
1467 else if (obj_size > (PAGE_SIZE / 4))
1468 size = 128; /* Minimum 128Kib per-magazine */
1469 else
1470 size = 256;
1471
1472 SRETURN(size);
1473 }
1474
1475 /*
1476 * Allocate a per-cpu magazine to associate with a specific core.
1477 */
1478 static spl_kmem_magazine_t *
1479 spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu)
1480 {
1481 spl_kmem_magazine_t *skm;
1482 int size = sizeof(spl_kmem_magazine_t) +
1483 sizeof(void *) * skc->skc_mag_size;
1484 SENTRY;
1485
1486 skm = kmem_alloc_node(size, KM_SLEEP, cpu_to_node(cpu));
1487 if (skm) {
1488 skm->skm_magic = SKM_MAGIC;
1489 skm->skm_avail = 0;
1490 skm->skm_size = skc->skc_mag_size;
1491 skm->skm_refill = skc->skc_mag_refill;
1492 skm->skm_cache = skc;
1493 skm->skm_age = jiffies;
1494 skm->skm_cpu = cpu;
1495 }
1496
1497 SRETURN(skm);
1498 }
1499
1500 /*
1501 * Free a per-cpu magazine associated with a specific core.
1502 */
1503 static void
1504 spl_magazine_free(spl_kmem_magazine_t *skm)
1505 {
1506 int size = sizeof(spl_kmem_magazine_t) +
1507 sizeof(void *) * skm->skm_size;
1508
1509 SENTRY;
1510 ASSERT(skm->skm_magic == SKM_MAGIC);
1511 ASSERT(skm->skm_avail == 0);
1512
1513 kmem_free(skm, size);
1514 SEXIT;
1515 }
1516
1517 /*
1518 * Create all pre-cpu magazines of reasonable sizes.
1519 */
1520 static int
1521 spl_magazine_create(spl_kmem_cache_t *skc)
1522 {
1523 int i;
1524 SENTRY;
1525
1526 if (skc->skc_flags & KMC_NOMAGAZINE)
1527 SRETURN(0);
1528
1529 skc->skc_mag_size = spl_magazine_size(skc);
1530 skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2;
1531
1532 for_each_online_cpu(i) {
1533 skc->skc_mag[i] = spl_magazine_alloc(skc, i);
1534 if (!skc->skc_mag[i]) {
1535 for (i--; i >= 0; i--)
1536 spl_magazine_free(skc->skc_mag[i]);
1537
1538 SRETURN(-ENOMEM);
1539 }
1540 }
1541
1542 SRETURN(0);
1543 }
1544
1545 /*
1546 * Destroy all pre-cpu magazines.
1547 */
1548 static void
1549 spl_magazine_destroy(spl_kmem_cache_t *skc)
1550 {
1551 spl_kmem_magazine_t *skm;
1552 int i;
1553 SENTRY;
1554
1555 if (skc->skc_flags & KMC_NOMAGAZINE) {
1556 SEXIT;
1557 return;
1558 }
1559
1560 for_each_online_cpu(i) {
1561 skm = skc->skc_mag[i];
1562 spl_cache_flush(skc, skm, skm->skm_avail);
1563 spl_magazine_free(skm);
1564 }
1565
1566 SEXIT;
1567 }
1568
1569 /*
1570 * Create a object cache based on the following arguments:
1571 * name cache name
1572 * size cache object size
1573 * align cache object alignment
1574 * ctor cache object constructor
1575 * dtor cache object destructor
1576 * reclaim cache object reclaim
1577 * priv cache private data for ctor/dtor/reclaim
1578 * vmp unused must be NULL
1579 * flags
1580 * KMC_NOTOUCH Disable cache object aging (unsupported)
1581 * KMC_NODEBUG Disable debugging (unsupported)
1582 * KMC_NOHASH Disable hashing (unsupported)
1583 * KMC_QCACHE Disable qcache (unsupported)
1584 * KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab
1585 * KMC_KMEM Force kmem backed cache
1586 * KMC_VMEM Force vmem backed cache
1587 * KMC_SLAB Force Linux slab backed cache
1588 * KMC_OFFSLAB Locate objects off the slab
1589 */
1590 spl_kmem_cache_t *
1591 spl_kmem_cache_create(char *name, size_t size, size_t align,
1592 spl_kmem_ctor_t ctor,
1593 spl_kmem_dtor_t dtor,
1594 spl_kmem_reclaim_t reclaim,
1595 void *priv, void *vmp, int flags)
1596 {
1597 spl_kmem_cache_t *skc;
1598 int rc;
1599 SENTRY;
1600
1601 ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags);
1602 ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags);
1603 ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags);
1604 ASSERT(vmp == NULL);
1605
1606 might_sleep();
1607
1608 /*
1609 * Allocate memory for a new cache an initialize it. Unfortunately,
1610 * this usually ends up being a large allocation of ~32k because
1611 * we need to allocate enough memory for the worst case number of
1612 * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we
1613 * explicitly pass KM_NODEBUG to suppress the kmem warning
1614 */
1615 skc = kmem_zalloc(sizeof(*skc), KM_SLEEP| KM_NODEBUG);
1616 if (skc == NULL)
1617 SRETURN(NULL);
1618
1619 skc->skc_magic = SKC_MAGIC;
1620 skc->skc_name_size = strlen(name) + 1;
1621 skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, KM_SLEEP);
1622 if (skc->skc_name == NULL) {
1623 kmem_free(skc, sizeof(*skc));
1624 SRETURN(NULL);
1625 }
1626 strncpy(skc->skc_name, name, skc->skc_name_size);
1627
1628 skc->skc_ctor = ctor;
1629 skc->skc_dtor = dtor;
1630 skc->skc_reclaim = reclaim;
1631 skc->skc_private = priv;
1632 skc->skc_vmp = vmp;
1633 skc->skc_linux_cache = NULL;
1634 skc->skc_flags = flags;
1635 skc->skc_obj_size = size;
1636 skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN;
1637 skc->skc_delay = SPL_KMEM_CACHE_DELAY;
1638 skc->skc_reap = SPL_KMEM_CACHE_REAP;
1639 atomic_set(&skc->skc_ref, 0);
1640
1641 INIT_LIST_HEAD(&skc->skc_list);
1642 INIT_LIST_HEAD(&skc->skc_complete_list);
1643 INIT_LIST_HEAD(&skc->skc_partial_list);
1644 skc->skc_emergency_tree = RB_ROOT;
1645 spin_lock_init(&skc->skc_lock);
1646 init_waitqueue_head(&skc->skc_waitq);
1647 skc->skc_slab_fail = 0;
1648 skc->skc_slab_create = 0;
1649 skc->skc_slab_destroy = 0;
1650 skc->skc_slab_total = 0;
1651 skc->skc_slab_alloc = 0;
1652 skc->skc_slab_max = 0;
1653 skc->skc_obj_total = 0;
1654 skc->skc_obj_alloc = 0;
1655 skc->skc_obj_max = 0;
1656 skc->skc_obj_deadlock = 0;
1657 skc->skc_obj_emergency = 0;
1658 skc->skc_obj_emergency_max = 0;
1659
1660 /*
1661 * Verify the requested alignment restriction is sane.
1662 */
1663 if (align) {
1664 VERIFY(ISP2(align));
1665 VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN);
1666 VERIFY3U(align, <=, PAGE_SIZE);
1667 skc->skc_obj_align = align;
1668 }
1669
1670 /*
1671 * When no specific type of slab is requested (kmem, vmem, or
1672 * linuxslab) then select a cache type based on the object size
1673 * and default tunables.
1674 */
1675 if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) {
1676
1677 /*
1678 * Objects smaller than spl_kmem_cache_slab_limit can
1679 * use the Linux slab for better space-efficiency. By
1680 * default this functionality is disabled until its
1681 * performance characters are fully understood.
1682 */
1683 if (spl_kmem_cache_slab_limit &&
1684 size <= (size_t)spl_kmem_cache_slab_limit)
1685 skc->skc_flags |= KMC_SLAB;
1686
1687 /*
1688 * Small objects, less than spl_kmem_cache_kmem_limit per
1689 * object should use kmem because their slabs are small.
1690 */
1691 else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit)
1692 skc->skc_flags |= KMC_KMEM;
1693
1694 /*
1695 * All other objects are considered large and are placed
1696 * on vmem backed slabs.
1697 */
1698 else
1699 skc->skc_flags |= KMC_VMEM;
1700 }
1701
1702 /*
1703 * Given the type of slab allocate the required resources.
1704 */
1705 if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
1706 rc = spl_slab_size(skc,
1707 &skc->skc_slab_objs, &skc->skc_slab_size);
1708 if (rc)
1709 SGOTO(out, rc);
1710
1711 rc = spl_magazine_create(skc);
1712 if (rc)
1713 SGOTO(out, rc);
1714 } else {
1715 skc->skc_linux_cache = kmem_cache_create(
1716 skc->skc_name, size, align, 0, NULL);
1717 if (skc->skc_linux_cache == NULL)
1718 SGOTO(out, rc = ENOMEM);
1719
1720 kmem_cache_set_allocflags(skc, __GFP_COMP);
1721 skc->skc_flags |= KMC_NOMAGAZINE;
1722 }
1723
1724 if (spl_kmem_cache_expire & KMC_EXPIRE_AGE)
1725 skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq,
1726 spl_cache_age, skc, TQ_SLEEP,
1727 ddi_get_lbolt() + skc->skc_delay / 3 * HZ);
1728
1729 down_write(&spl_kmem_cache_sem);
1730 list_add_tail(&skc->skc_list, &spl_kmem_cache_list);
1731 up_write(&spl_kmem_cache_sem);
1732
1733 SRETURN(skc);
1734 out:
1735 kmem_free(skc->skc_name, skc->skc_name_size);
1736 kmem_free(skc, sizeof(*skc));
1737 SRETURN(NULL);
1738 }
1739 EXPORT_SYMBOL(spl_kmem_cache_create);
1740
1741 /*
1742 * Register a move callback to for cache defragmentation.
1743 * XXX: Unimplemented but harmless to stub out for now.
1744 */
1745 void
1746 spl_kmem_cache_set_move(spl_kmem_cache_t *skc,
1747 kmem_cbrc_t (move)(void *, void *, size_t, void *))
1748 {
1749 ASSERT(move != NULL);
1750 }
1751 EXPORT_SYMBOL(spl_kmem_cache_set_move);
1752
1753 /*
1754 * Destroy a cache and all objects associated with the cache.
1755 */
1756 void
1757 spl_kmem_cache_destroy(spl_kmem_cache_t *skc)
1758 {
1759 DECLARE_WAIT_QUEUE_HEAD(wq);
1760 taskqid_t id;
1761 SENTRY;
1762
1763 ASSERT(skc->skc_magic == SKC_MAGIC);
1764 ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB));
1765
1766 down_write(&spl_kmem_cache_sem);
1767 list_del_init(&skc->skc_list);
1768 up_write(&spl_kmem_cache_sem);
1769
1770 /* Cancel any and wait for any pending delayed tasks */
1771 VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags));
1772
1773 spin_lock(&skc->skc_lock);
1774 id = skc->skc_taskqid;
1775 spin_unlock(&skc->skc_lock);
1776
1777 taskq_cancel_id(spl_kmem_cache_taskq, id);
1778
1779 /* Wait until all current callers complete, this is mainly
1780 * to catch the case where a low memory situation triggers a
1781 * cache reaping action which races with this destroy. */
1782 wait_event(wq, atomic_read(&skc->skc_ref) == 0);
1783
1784 if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) {
1785 spl_magazine_destroy(skc);
1786 spl_slab_reclaim(skc, 0, 1);
1787 } else {
1788 ASSERT(skc->skc_flags & KMC_SLAB);
1789 kmem_cache_destroy(skc->skc_linux_cache);
1790 }
1791
1792 spin_lock(&skc->skc_lock);
1793
1794 /* Validate there are no objects in use and free all the
1795 * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */
1796 ASSERT3U(skc->skc_slab_alloc, ==, 0);
1797 ASSERT3U(skc->skc_obj_alloc, ==, 0);
1798 ASSERT3U(skc->skc_slab_total, ==, 0);
1799 ASSERT3U(skc->skc_obj_total, ==, 0);
1800 ASSERT3U(skc->skc_obj_emergency, ==, 0);
1801 ASSERT(list_empty(&skc->skc_complete_list));
1802
1803 kmem_free(skc->skc_name, skc->skc_name_size);
1804 spin_unlock(&skc->skc_lock);
1805
1806 kmem_free(skc, sizeof(*skc));
1807
1808 SEXIT;
1809 }
1810 EXPORT_SYMBOL(spl_kmem_cache_destroy);
1811
1812 /*
1813 * Allocate an object from a slab attached to the cache. This is used to
1814 * repopulate the per-cpu magazine caches in batches when they run low.
1815 */
1816 static void *
1817 spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks)
1818 {
1819 spl_kmem_obj_t *sko;
1820
1821 ASSERT(skc->skc_magic == SKC_MAGIC);
1822 ASSERT(sks->sks_magic == SKS_MAGIC);
1823 ASSERT(spin_is_locked(&skc->skc_lock));
1824
1825 sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list);
1826 ASSERT(sko->sko_magic == SKO_MAGIC);
1827 ASSERT(sko->sko_addr != NULL);
1828
1829 /* Remove from sks_free_list */
1830 list_del_init(&sko->sko_list);
1831
1832 sks->sks_age = jiffies;
1833 sks->sks_ref++;
1834 skc->skc_obj_alloc++;
1835
1836 /* Track max obj usage statistics */
1837 if (skc->skc_obj_alloc > skc->skc_obj_max)
1838 skc->skc_obj_max = skc->skc_obj_alloc;
1839
1840 /* Track max slab usage statistics */
1841 if (sks->sks_ref == 1) {
1842 skc->skc_slab_alloc++;
1843
1844 if (skc->skc_slab_alloc > skc->skc_slab_max)
1845 skc->skc_slab_max = skc->skc_slab_alloc;
1846 }
1847
1848 return sko->sko_addr;
1849 }
1850
1851 /*
1852 * Generic slab allocation function to run by the global work queues.
1853 * It is responsible for allocating a new slab, linking it in to the list
1854 * of partial slabs, and then waking any waiters.
1855 */
1856 static void
1857 spl_cache_grow_work(void *data)
1858 {
1859 spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data;
1860 spl_kmem_cache_t *skc = ska->ska_cache;
1861 spl_kmem_slab_t *sks;
1862
1863 sks = spl_slab_alloc(skc, ska->ska_flags | __GFP_NORETRY | KM_NODEBUG);
1864 spin_lock(&skc->skc_lock);
1865 if (sks) {
1866 skc->skc_slab_total++;
1867 skc->skc_obj_total += sks->sks_objs;
1868 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
1869 }
1870
1871 atomic_dec(&skc->skc_ref);
1872 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
1873 clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1874 wake_up_all(&skc->skc_waitq);
1875 spin_unlock(&skc->skc_lock);
1876
1877 kfree(ska);
1878 }
1879
1880 /*
1881 * Returns non-zero when a new slab should be available.
1882 */
1883 static int
1884 spl_cache_grow_wait(spl_kmem_cache_t *skc)
1885 {
1886 return !test_bit(KMC_BIT_GROWING, &skc->skc_flags);
1887 }
1888
1889 /*
1890 * No available objects on any slabs, create a new slab. Note that this
1891 * functionality is disabled for KMC_SLAB caches which are backed by the
1892 * Linux slab.
1893 */
1894 static int
1895 spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj)
1896 {
1897 int remaining, rc;
1898 SENTRY;
1899
1900 ASSERT(skc->skc_magic == SKC_MAGIC);
1901 ASSERT((skc->skc_flags & KMC_SLAB) == 0);
1902 might_sleep();
1903 *obj = NULL;
1904
1905 /*
1906 * Before allocating a new slab wait for any reaping to complete and
1907 * then return so the local magazine can be rechecked for new objects.
1908 */
1909 if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) {
1910 rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING,
1911 TASK_UNINTERRUPTIBLE);
1912 SRETURN(rc ? rc : -EAGAIN);
1913 }
1914
1915 /*
1916 * This is handled by dispatching a work request to the global work
1917 * queue. This allows us to asynchronously allocate a new slab while
1918 * retaining the ability to safely fall back to a smaller synchronous
1919 * allocations to ensure forward progress is always maintained.
1920 */
1921 if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) {
1922 spl_kmem_alloc_t *ska;
1923
1924 ska = kmalloc(sizeof(*ska), flags);
1925 if (ska == NULL) {
1926 clear_bit(KMC_BIT_GROWING, &skc->skc_flags);
1927 wake_up_all(&skc->skc_waitq);
1928 SRETURN(-ENOMEM);
1929 }
1930
1931 atomic_inc(&skc->skc_ref);
1932 ska->ska_cache = skc;
1933 ska->ska_flags = flags & ~__GFP_FS;
1934 taskq_init_ent(&ska->ska_tqe);
1935 taskq_dispatch_ent(spl_kmem_cache_taskq,
1936 spl_cache_grow_work, ska, 0, &ska->ska_tqe);
1937 }
1938
1939 /*
1940 * The goal here is to only detect the rare case where a virtual slab
1941 * allocation has deadlocked. We must be careful to minimize the use
1942 * of emergency objects which are more expensive to track. Therefore,
1943 * we set a very long timeout for the asynchronous allocation and if
1944 * the timeout is reached the cache is flagged as deadlocked. From
1945 * this point only new emergency objects will be allocated until the
1946 * asynchronous allocation completes and clears the deadlocked flag.
1947 */
1948 if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) {
1949 rc = spl_emergency_alloc(skc, flags, obj);
1950 } else {
1951 remaining = wait_event_timeout(skc->skc_waitq,
1952 spl_cache_grow_wait(skc), HZ);
1953
1954 if (!remaining && test_bit(KMC_BIT_VMEM, &skc->skc_flags)) {
1955 spin_lock(&skc->skc_lock);
1956 if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) {
1957 set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags);
1958 skc->skc_obj_deadlock++;
1959 }
1960 spin_unlock(&skc->skc_lock);
1961 }
1962
1963 rc = -ENOMEM;
1964 }
1965
1966 SRETURN(rc);
1967 }
1968
1969 /*
1970 * Refill a per-cpu magazine with objects from the slabs for this cache.
1971 * Ideally the magazine can be repopulated using existing objects which have
1972 * been released, however if we are unable to locate enough free objects new
1973 * slabs of objects will be created. On success NULL is returned, otherwise
1974 * the address of a single emergency object is returned for use by the caller.
1975 */
1976 static void *
1977 spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags)
1978 {
1979 spl_kmem_slab_t *sks;
1980 int count = 0, rc, refill;
1981 void *obj = NULL;
1982 SENTRY;
1983
1984 ASSERT(skc->skc_magic == SKC_MAGIC);
1985 ASSERT(skm->skm_magic == SKM_MAGIC);
1986
1987 refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail);
1988 spin_lock(&skc->skc_lock);
1989
1990 while (refill > 0) {
1991 /* No slabs available we may need to grow the cache */
1992 if (list_empty(&skc->skc_partial_list)) {
1993 spin_unlock(&skc->skc_lock);
1994
1995 local_irq_enable();
1996 rc = spl_cache_grow(skc, flags, &obj);
1997 local_irq_disable();
1998
1999 /* Emergency object for immediate use by caller */
2000 if (rc == 0 && obj != NULL)
2001 SRETURN(obj);
2002
2003 if (rc)
2004 SGOTO(out, rc);
2005
2006 /* Rescheduled to different CPU skm is not local */
2007 if (skm != skc->skc_mag[smp_processor_id()])
2008 SGOTO(out, rc);
2009
2010 /* Potentially rescheduled to the same CPU but
2011 * allocations may have occurred from this CPU while
2012 * we were sleeping so recalculate max refill. */
2013 refill = MIN(refill, skm->skm_size - skm->skm_avail);
2014
2015 spin_lock(&skc->skc_lock);
2016 continue;
2017 }
2018
2019 /* Grab the next available slab */
2020 sks = list_entry((&skc->skc_partial_list)->next,
2021 spl_kmem_slab_t, sks_list);
2022 ASSERT(sks->sks_magic == SKS_MAGIC);
2023 ASSERT(sks->sks_ref < sks->sks_objs);
2024 ASSERT(!list_empty(&sks->sks_free_list));
2025
2026 /* Consume as many objects as needed to refill the requested
2027 * cache. We must also be careful not to overfill it. */
2028 while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++count) {
2029 ASSERT(skm->skm_avail < skm->skm_size);
2030 ASSERT(count < skm->skm_size);
2031 skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks);
2032 }
2033
2034 /* Move slab to skc_complete_list when full */
2035 if (sks->sks_ref == sks->sks_objs) {
2036 list_del(&sks->sks_list);
2037 list_add(&sks->sks_list, &skc->skc_complete_list);
2038 }
2039 }
2040
2041 spin_unlock(&skc->skc_lock);
2042 out:
2043 SRETURN(NULL);
2044 }
2045
2046 /*
2047 * Release an object back to the slab from which it came.
2048 */
2049 static void
2050 spl_cache_shrink(spl_kmem_cache_t *skc, void *obj)
2051 {
2052 spl_kmem_slab_t *sks = NULL;
2053 spl_kmem_obj_t *sko = NULL;
2054 SENTRY;
2055
2056 ASSERT(skc->skc_magic == SKC_MAGIC);
2057 ASSERT(spin_is_locked(&skc->skc_lock));
2058
2059 sko = spl_sko_from_obj(skc, obj);
2060 ASSERT(sko->sko_magic == SKO_MAGIC);
2061 sks = sko->sko_slab;
2062 ASSERT(sks->sks_magic == SKS_MAGIC);
2063 ASSERT(sks->sks_cache == skc);
2064 list_add(&sko->sko_list, &sks->sks_free_list);
2065
2066 sks->sks_age = jiffies;
2067 sks->sks_ref--;
2068 skc->skc_obj_alloc--;
2069
2070 /* Move slab to skc_partial_list when no longer full. Slabs
2071 * are added to the head to keep the partial list is quasi-full
2072 * sorted order. Fuller at the head, emptier at the tail. */
2073 if (sks->sks_ref == (sks->sks_objs - 1)) {
2074 list_del(&sks->sks_list);
2075 list_add(&sks->sks_list, &skc->skc_partial_list);
2076 }
2077
2078 /* Move empty slabs to the end of the partial list so
2079 * they can be easily found and freed during reclamation. */
2080 if (sks->sks_ref == 0) {
2081 list_del(&sks->sks_list);
2082 list_add_tail(&sks->sks_list, &skc->skc_partial_list);
2083 skc->skc_slab_alloc--;
2084 }
2085
2086 SEXIT;
2087 }
2088
2089 /*
2090 * Allocate an object from the per-cpu magazine, or if the magazine
2091 * is empty directly allocate from a slab and repopulate the magazine.
2092 */
2093 void *
2094 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
2095 {
2096 spl_kmem_magazine_t *skm;
2097 void *obj = NULL;
2098 SENTRY;
2099
2100 ASSERT(skc->skc_magic == SKC_MAGIC);
2101 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2102 ASSERT(flags & KM_SLEEP);
2103
2104 atomic_inc(&skc->skc_ref);
2105
2106 /*
2107 * Allocate directly from a Linux slab. All optimizations are left
2108 * to the underlying cache we only need to guarantee that KM_SLEEP
2109 * callers will never fail.
2110 */
2111 if (skc->skc_flags & KMC_SLAB) {
2112 struct kmem_cache *slc = skc->skc_linux_cache;
2113
2114 do {
2115 obj = kmem_cache_alloc(slc, flags | __GFP_COMP);
2116 if (obj && skc->skc_ctor)
2117 skc->skc_ctor(obj, skc->skc_private, flags);
2118
2119 } while ((obj == NULL) && !(flags & KM_NOSLEEP));
2120
2121 atomic_dec(&skc->skc_ref);
2122 SRETURN(obj);
2123 }
2124
2125 local_irq_disable();
2126
2127 restart:
2128 /* Safe to update per-cpu structure without lock, but
2129 * in the restart case we must be careful to reacquire
2130 * the local magazine since this may have changed
2131 * when we need to grow the cache. */
2132 skm = skc->skc_mag[smp_processor_id()];
2133 ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n",
2134 skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm,
2135 skm->skm_size, skm->skm_refill, skm->skm_avail);
2136
2137 if (likely(skm->skm_avail)) {
2138 /* Object available in CPU cache, use it */
2139 obj = skm->skm_objs[--skm->skm_avail];
2140 skm->skm_age = jiffies;
2141 } else {
2142 obj = spl_cache_refill(skc, skm, flags);
2143 if (obj == NULL)
2144 SGOTO(restart, obj = NULL);
2145 }
2146
2147 local_irq_enable();
2148 ASSERT(obj);
2149 ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align));
2150
2151 /* Pre-emptively migrate object to CPU L1 cache */
2152 prefetchw(obj);
2153 atomic_dec(&skc->skc_ref);
2154
2155 SRETURN(obj);
2156 }
2157 EXPORT_SYMBOL(spl_kmem_cache_alloc);
2158
2159 /*
2160 * Free an object back to the local per-cpu magazine, there is no
2161 * guarantee that this is the same magazine the object was originally
2162 * allocated from. We may need to flush entire from the magazine
2163 * back to the slabs to make space.
2164 */
2165 void
2166 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
2167 {
2168 spl_kmem_magazine_t *skm;
2169 unsigned long flags;
2170 SENTRY;
2171
2172 ASSERT(skc->skc_magic == SKC_MAGIC);
2173 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2174 atomic_inc(&skc->skc_ref);
2175
2176 /*
2177 * Free the object from the Linux underlying Linux slab.
2178 */
2179 if (skc->skc_flags & KMC_SLAB) {
2180 if (skc->skc_dtor)
2181 skc->skc_dtor(obj, skc->skc_private);
2182
2183 kmem_cache_free(skc->skc_linux_cache, obj);
2184 goto out;
2185 }
2186
2187 /*
2188 * Only virtual slabs may have emergency objects and these objects
2189 * are guaranteed to have physical addresses. They must be removed
2190 * from the tree of emergency objects and the freed.
2191 */
2192 if ((skc->skc_flags & KMC_VMEM) && !kmem_virt(obj))
2193 SGOTO(out, spl_emergency_free(skc, obj));
2194
2195 local_irq_save(flags);
2196
2197 /* Safe to update per-cpu structure without lock, but
2198 * no remote memory allocation tracking is being performed
2199 * it is entirely possible to allocate an object from one
2200 * CPU cache and return it to another. */
2201 skm = skc->skc_mag[smp_processor_id()];
2202 ASSERT(skm->skm_magic == SKM_MAGIC);
2203
2204 /* Per-CPU cache full, flush it to make space */
2205 if (unlikely(skm->skm_avail >= skm->skm_size))
2206 spl_cache_flush(skc, skm, skm->skm_refill);
2207
2208 /* Available space in cache, use it */
2209 skm->skm_objs[skm->skm_avail++] = obj;
2210
2211 local_irq_restore(flags);
2212 out:
2213 atomic_dec(&skc->skc_ref);
2214
2215 SEXIT;
2216 }
2217 EXPORT_SYMBOL(spl_kmem_cache_free);
2218
2219 /*
2220 * The generic shrinker function for all caches. Under Linux a shrinker
2221 * may not be tightly coupled with a slab cache. In fact Linux always
2222 * systematically tries calling all registered shrinker callbacks which
2223 * report that they contain unused objects. Because of this we only
2224 * register one shrinker function in the shim layer for all slab caches.
2225 * We always attempt to shrink all caches when this generic shrinker
2226 * is called. The shrinker should return the number of free objects
2227 * in the cache when called with nr_to_scan == 0 but not attempt to
2228 * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan
2229 * objects should be freed, which differs from Solaris semantics.
2230 * Solaris semantics are to free all available objects which may (and
2231 * probably will) be more objects than the requested nr_to_scan.
2232 */
2233 static int
2234 __spl_kmem_cache_generic_shrinker(struct shrinker *shrink,
2235 struct shrink_control *sc)
2236 {
2237 spl_kmem_cache_t *skc;
2238 int alloc = 0;
2239
2240 down_read(&spl_kmem_cache_sem);
2241 list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) {
2242 if (sc->nr_to_scan)
2243 spl_kmem_cache_reap_now(skc,
2244 MAX(sc->nr_to_scan >> fls64(skc->skc_slab_objs), 1));
2245
2246 /*
2247 * Presume everything alloc'ed is reclaimable, this ensures
2248 * we are called again with nr_to_scan > 0 so can try and
2249 * reclaim. The exact number is not important either so
2250 * we forgo taking this already highly contented lock.
2251 */
2252 alloc += skc->skc_obj_alloc;
2253 }
2254 up_read(&spl_kmem_cache_sem);
2255
2256 /*
2257 * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass.
2258 * This functionality only exists to work around a rare issue where
2259 * shrink_slabs() is repeatedly invoked by many cores causing the
2260 * system to thrash.
2261 */
2262 if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan)
2263 return (-1);
2264
2265 return (MAX(alloc, 0));
2266 }
2267
2268 SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker);
2269
2270 /*
2271 * Call the registered reclaim function for a cache. Depending on how
2272 * many and which objects are released it may simply repopulate the
2273 * local magazine which will then need to age-out. Objects which cannot
2274 * fit in the magazine we will be released back to their slabs which will
2275 * also need to age out before being release. This is all just best
2276 * effort and we do not want to thrash creating and destroying slabs.
2277 */
2278 void
2279 spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count)
2280 {
2281 SENTRY;
2282
2283 ASSERT(skc->skc_magic == SKC_MAGIC);
2284 ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags));
2285
2286 atomic_inc(&skc->skc_ref);
2287
2288 /*
2289 * Execute the registered reclaim callback if it exists. The
2290 * per-cpu caches will be drained when is set KMC_EXPIRE_MEM.
2291 */
2292 if (skc->skc_flags & KMC_SLAB) {
2293 if (skc->skc_reclaim)
2294 skc->skc_reclaim(skc->skc_private);
2295
2296 if (spl_kmem_cache_expire & KMC_EXPIRE_MEM)
2297 kmem_cache_shrink(skc->skc_linux_cache);
2298
2299 SGOTO(out, 0);
2300 }
2301
2302 /*
2303 * Prevent concurrent cache reaping when contended.
2304 */
2305 if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags))
2306 SGOTO(out, 0);
2307
2308 /*
2309 * When a reclaim function is available it may be invoked repeatedly
2310 * until at least a single slab can be freed. This ensures that we
2311 * do free memory back to the system. This helps minimize the chance
2312 * of an OOM event when the bulk of memory is used by the slab.
2313 *
2314 * When free slabs are already available the reclaim callback will be
2315 * skipped. Additionally, if no forward progress is detected despite
2316 * a reclaim function the cache will be skipped to avoid deadlock.
2317 *
2318 * Longer term this would be the correct place to add the code which
2319 * repacks the slabs in order minimize fragmentation.
2320 */
2321 if (skc->skc_reclaim) {
2322 uint64_t objects = UINT64_MAX;
2323 int do_reclaim;
2324
2325 do {
2326 spin_lock(&skc->skc_lock);
2327 do_reclaim =
2328 (skc->skc_slab_total > 0) &&
2329 ((skc->skc_slab_total - skc->skc_slab_alloc) == 0) &&
2330 (skc->skc_obj_alloc < objects);
2331
2332 objects = skc->skc_obj_alloc;
2333 spin_unlock(&skc->skc_lock);
2334
2335 if (do_reclaim)
2336 skc->skc_reclaim(skc->skc_private);
2337
2338 } while (do_reclaim);
2339 }
2340
2341 /* Reclaim from the magazine then the slabs ignoring age and delay. */
2342 if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) {
2343 spl_kmem_magazine_t *skm;
2344 unsigned long irq_flags;
2345
2346 local_irq_save(irq_flags);
2347 skm = skc->skc_mag[smp_processor_id()];
2348 spl_cache_flush(skc, skm, skm->skm_avail);
2349 local_irq_restore(irq_flags);
2350 }
2351
2352 spl_slab_reclaim(skc, count, 1);
2353 clear_bit(KMC_BIT_REAPING, &skc->skc_flags);
2354 smp_wmb();
2355 wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING);
2356 out:
2357 atomic_dec(&skc->skc_ref);
2358
2359 SEXIT;
2360 }
2361 EXPORT_SYMBOL(spl_kmem_cache_reap_now);
2362
2363 /*
2364 * Reap all free slabs from all registered caches.
2365 */
2366 void
2367 spl_kmem_reap(void)
2368 {
2369 struct shrink_control sc;
2370
2371 sc.nr_to_scan = KMC_REAP_CHUNK;
2372 sc.gfp_mask = GFP_KERNEL;
2373
2374 __spl_kmem_cache_generic_shrinker(NULL, &sc);
2375 }
2376 EXPORT_SYMBOL(spl_kmem_reap);
2377
2378 #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING)
2379 static char *
2380 spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min)
2381 {
2382 int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size;
2383 int i, flag = 1;
2384
2385 ASSERT(str != NULL && len >= 17);
2386 memset(str, 0, len);
2387
2388 /* Check for a fully printable string, and while we are at
2389 * it place the printable characters in the passed buffer. */
2390 for (i = 0; i < size; i++) {
2391 str[i] = ((char *)(kd->kd_addr))[i];
2392 if (isprint(str[i])) {
2393 continue;
2394 } else {
2395 /* Minimum number of printable characters found
2396 * to make it worthwhile to print this as ascii. */
2397 if (i > min)
2398 break;
2399
2400 flag = 0;
2401 break;
2402 }
2403 }
2404
2405 if (!flag) {
2406 sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x",
2407 *((uint8_t *)kd->kd_addr),
2408 *((uint8_t *)kd->kd_addr + 2),
2409 *((uint8_t *)kd->kd_addr + 4),
2410 *((uint8_t *)kd->kd_addr + 6),
2411 *((uint8_t *)kd->kd_addr + 8),
2412 *((uint8_t *)kd->kd_addr + 10),
2413 *((uint8_t *)kd->kd_addr + 12),
2414 *((uint8_t *)kd->kd_addr + 14));
2415 }
2416
2417 return str;
2418 }
2419
2420 static int
2421 spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size)
2422 {
2423 int i;
2424 SENTRY;
2425
2426 spin_lock_init(lock);
2427 INIT_LIST_HEAD(list);
2428
2429 for (i = 0; i < size; i++)
2430 INIT_HLIST_HEAD(&kmem_table[i]);
2431
2432 SRETURN(0);
2433 }
2434
2435 static void
2436 spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock)
2437 {
2438 unsigned long flags;
2439 kmem_debug_t *kd;
2440 char str[17];
2441 SENTRY;
2442
2443 spin_lock_irqsave(lock, flags);
2444 if (!list_empty(list))
2445 printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address",
2446 "size", "data", "func", "line");
2447
2448 list_for_each_entry(kd, list, kd_list)
2449 printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr,
2450 (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8),
2451 kd->kd_func, kd->kd_line);
2452
2453 spin_unlock_irqrestore(lock, flags);
2454 SEXIT;
2455 }
2456 #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2457 #define spl_kmem_init_tracking(list, lock, size)
2458 #define spl_kmem_fini_tracking(list, lock)
2459 #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */
2460
2461 static void
2462 spl_kmem_init_globals(void)
2463 {
2464 struct zone *zone;
2465
2466 /* For now all zones are includes, it may be wise to restrict
2467 * this to normal and highmem zones if we see problems. */
2468 for_each_zone(zone) {
2469
2470 if (!populated_zone(zone))
2471 continue;
2472
2473 minfree += min_wmark_pages(zone);
2474 desfree += low_wmark_pages(zone);
2475 lotsfree += high_wmark_pages(zone);
2476 }
2477
2478 /* Solaris default values */
2479 swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3);
2480 swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4);
2481 }
2482
2483 /*
2484 * Called at module init when it is safe to use spl_kallsyms_lookup_name()
2485 */
2486 int
2487 spl_kmem_init_kallsyms_lookup(void)
2488 {
2489 #ifdef HAVE_PGDAT_HELPERS
2490 # ifndef HAVE_FIRST_ONLINE_PGDAT
2491 first_online_pgdat_fn = (first_online_pgdat_t)
2492 spl_kallsyms_lookup_name("first_online_pgdat");
2493 if (!first_online_pgdat_fn) {
2494 printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n");
2495 return -EFAULT;
2496 }
2497 # endif /* HAVE_FIRST_ONLINE_PGDAT */
2498
2499 # ifndef HAVE_NEXT_ONLINE_PGDAT
2500 next_online_pgdat_fn = (next_online_pgdat_t)
2501 spl_kallsyms_lookup_name("next_online_pgdat");
2502 if (!next_online_pgdat_fn) {
2503 printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n");
2504 return -EFAULT;
2505 }
2506 # endif /* HAVE_NEXT_ONLINE_PGDAT */
2507
2508 # ifndef HAVE_NEXT_ZONE
2509 next_zone_fn = (next_zone_t)
2510 spl_kallsyms_lookup_name("next_zone");
2511 if (!next_zone_fn) {
2512 printk(KERN_ERR "Error: Unknown symbol next_zone\n");
2513 return -EFAULT;
2514 }
2515 # endif /* HAVE_NEXT_ZONE */
2516
2517 #else /* HAVE_PGDAT_HELPERS */
2518
2519 # ifndef HAVE_PGDAT_LIST
2520 pgdat_list_addr = *(struct pglist_data **)
2521 spl_kallsyms_lookup_name("pgdat_list");
2522 if (!pgdat_list_addr) {
2523 printk(KERN_ERR "Error: Unknown symbol pgdat_list\n");
2524 return -EFAULT;
2525 }
2526 # endif /* HAVE_PGDAT_LIST */
2527 #endif /* HAVE_PGDAT_HELPERS */
2528
2529 #if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS)
2530 get_zone_counts_fn = (get_zone_counts_t)
2531 spl_kallsyms_lookup_name("get_zone_counts");
2532 if (!get_zone_counts_fn) {
2533 printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n");
2534 return -EFAULT;
2535 }
2536 #endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */
2537
2538 /*
2539 * It is now safe to initialize the global tunings which rely on
2540 * the use of the for_each_zone() macro. This macro in turns
2541 * depends on the *_pgdat symbols which are now available.
2542 */
2543 spl_kmem_init_globals();
2544
2545 #ifndef HAVE_SHRINK_DCACHE_MEMORY
2546 /* When shrink_dcache_memory_fn == NULL support is disabled */
2547 shrink_dcache_memory_fn = (shrink_dcache_memory_t)
2548 spl_kallsyms_lookup_name("shrink_dcache_memory");
2549 #endif /* HAVE_SHRINK_DCACHE_MEMORY */
2550
2551 #ifndef HAVE_SHRINK_ICACHE_MEMORY
2552 /* When shrink_icache_memory_fn == NULL support is disabled */
2553 shrink_icache_memory_fn = (shrink_icache_memory_t)
2554 spl_kallsyms_lookup_name("shrink_icache_memory");
2555 #endif /* HAVE_SHRINK_ICACHE_MEMORY */
2556
2557 return 0;
2558 }
2559
2560 int
2561 spl_kmem_init(void)
2562 {
2563 int rc = 0;
2564 SENTRY;
2565
2566 #ifdef DEBUG_KMEM
2567 kmem_alloc_used_set(0);
2568 vmem_alloc_used_set(0);
2569
2570 spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE);
2571 spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE);
2572 #endif
2573
2574 init_rwsem(&spl_kmem_cache_sem);
2575 INIT_LIST_HEAD(&spl_kmem_cache_list);
2576 spl_kmem_cache_taskq = taskq_create("spl_kmem_cache",
2577 1, maxclsyspri, 1, 32, TASKQ_PREPOPULATE);
2578
2579 spl_register_shrinker(&spl_kmem_cache_shrinker);
2580
2581 SRETURN(rc);
2582 }
2583
2584 void
2585 spl_kmem_fini(void)
2586 {
2587 SENTRY;
2588
2589 spl_unregister_shrinker(&spl_kmem_cache_shrinker);
2590 taskq_destroy(spl_kmem_cache_taskq);
2591
2592 #ifdef DEBUG_KMEM
2593 /* Display all unreclaimed memory addresses, including the
2594 * allocation size and the first few bytes of what's located
2595 * at that address to aid in debugging. Performance is not
2596 * a serious concern here since it is module unload time. */
2597 if (kmem_alloc_used_read() != 0)
2598 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
2599 "kmem leaked %ld/%ld bytes\n",
2600 kmem_alloc_used_read(), kmem_alloc_max);
2601
2602
2603 if (vmem_alloc_used_read() != 0)
2604 SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING,
2605 "vmem leaked %ld/%ld bytes\n",
2606 vmem_alloc_used_read(), vmem_alloc_max);
2607
2608 spl_kmem_fini_tracking(&kmem_list, &kmem_lock);
2609 spl_kmem_fini_tracking(&vmem_list, &vmem_lock);
2610 #endif /* DEBUG_KMEM */
2611
2612 SEXIT;
2613 }