]>
Commit | Line | Data |
---|---|---|
716154c5 BB |
1 | /*****************************************************************************\ |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. | |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
715f6251 | 6 | * UCRL-CODE-235197 |
7 | * | |
716154c5 BB |
8 | * This file is part of the SPL, Solaris Porting Layer. |
9 | * For details, see <http://github.com/behlendorf/spl/>. | |
715f6251 | 10 | * |
716154c5 BB |
11 | * The SPL is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
715f6251 | 17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
716154c5 BB |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. |
23 | ***************************************************************************** | |
24 | * Solaris Porting Layer (SPL) Kmem Implementation. | |
25 | \*****************************************************************************/ | |
715f6251 | 26 | |
f4b37741 | 27 | #include <sys/kmem.h> |
55abb092 | 28 | #include <spl-debug.h> |
f1ca4da6 | 29 | |
b17edc10 BB |
30 | #ifdef SS_DEBUG_SUBSYS |
31 | #undef SS_DEBUG_SUBSYS | |
937879f1 | 32 | #endif |
33 | ||
b17edc10 | 34 | #define SS_DEBUG_SUBSYS SS_KMEM |
937879f1 | 35 | |
36b313da BB |
36 | /* |
37 | * The minimum amount of memory measured in pages to be free at all | |
38 | * times on the system. This is similar to Linux's zone->pages_min | |
ecc39810 | 39 | * multiplied by the number of zones and is sized based on that. |
36b313da BB |
40 | */ |
41 | pgcnt_t minfree = 0; | |
42 | EXPORT_SYMBOL(minfree); | |
43 | ||
44 | /* | |
45 | * The desired amount of memory measured in pages to be free at all | |
46 | * times on the system. This is similar to Linux's zone->pages_low | |
ecc39810 | 47 | * multiplied by the number of zones and is sized based on that. |
36b313da | 48 | * Assuming all zones are being used roughly equally, when we drop |
ecc39810 | 49 | * below this threshold asynchronous page reclamation is triggered. |
36b313da BB |
50 | */ |
51 | pgcnt_t desfree = 0; | |
52 | EXPORT_SYMBOL(desfree); | |
53 | ||
54 | /* | |
55 | * When above this amount of memory measures in pages the system is | |
56 | * determined to have enough free memory. This is similar to Linux's | |
ecc39810 | 57 | * zone->pages_high multiplied by the number of zones and is sized based |
36b313da | 58 | * on that. Assuming all zones are being used roughly equally, when |
ecc39810 | 59 | * asynchronous page reclamation reaches this threshold it stops. |
36b313da BB |
60 | */ |
61 | pgcnt_t lotsfree = 0; | |
62 | EXPORT_SYMBOL(lotsfree); | |
63 | ||
64 | /* Unused always 0 in this implementation */ | |
65 | pgcnt_t needfree = 0; | |
66 | EXPORT_SYMBOL(needfree); | |
67 | ||
36b313da BB |
68 | pgcnt_t swapfs_minfree = 0; |
69 | EXPORT_SYMBOL(swapfs_minfree); | |
70 | ||
71 | pgcnt_t swapfs_reserve = 0; | |
72 | EXPORT_SYMBOL(swapfs_reserve); | |
73 | ||
36b313da BB |
74 | vmem_t *heap_arena = NULL; |
75 | EXPORT_SYMBOL(heap_arena); | |
76 | ||
77 | vmem_t *zio_alloc_arena = NULL; | |
78 | EXPORT_SYMBOL(zio_alloc_arena); | |
79 | ||
80 | vmem_t *zio_arena = NULL; | |
81 | EXPORT_SYMBOL(zio_arena); | |
82 | ||
d1ff2312 | 83 | #ifndef HAVE_GET_VMALLOC_INFO |
96dded38 | 84 | get_vmalloc_info_t get_vmalloc_info_fn = SYMBOL_POISON; |
d1ff2312 BB |
85 | EXPORT_SYMBOL(get_vmalloc_info_fn); |
86 | #endif /* HAVE_GET_VMALLOC_INFO */ | |
87 | ||
5232d256 BB |
88 | #ifdef HAVE_PGDAT_HELPERS |
89 | # ifndef HAVE_FIRST_ONLINE_PGDAT | |
96dded38 | 90 | first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON; |
d1ff2312 | 91 | EXPORT_SYMBOL(first_online_pgdat_fn); |
5232d256 | 92 | # endif /* HAVE_FIRST_ONLINE_PGDAT */ |
36b313da | 93 | |
5232d256 | 94 | # ifndef HAVE_NEXT_ONLINE_PGDAT |
96dded38 | 95 | next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON; |
d1ff2312 | 96 | EXPORT_SYMBOL(next_online_pgdat_fn); |
5232d256 | 97 | # endif /* HAVE_NEXT_ONLINE_PGDAT */ |
36b313da | 98 | |
5232d256 | 99 | # ifndef HAVE_NEXT_ZONE |
96dded38 | 100 | next_zone_t next_zone_fn = SYMBOL_POISON; |
d1ff2312 | 101 | EXPORT_SYMBOL(next_zone_fn); |
5232d256 BB |
102 | # endif /* HAVE_NEXT_ZONE */ |
103 | ||
104 | #else /* HAVE_PGDAT_HELPERS */ | |
105 | ||
106 | # ifndef HAVE_PGDAT_LIST | |
107 | struct pglist_data *pgdat_list_addr = SYMBOL_POISON; | |
108 | EXPORT_SYMBOL(pgdat_list_addr); | |
109 | # endif /* HAVE_PGDAT_LIST */ | |
110 | ||
111 | #endif /* HAVE_PGDAT_HELPERS */ | |
36b313da | 112 | |
6ae7fef5 | 113 | #ifdef NEED_GET_ZONE_COUNTS |
e11d6c5f | 114 | # ifndef HAVE_GET_ZONE_COUNTS |
96dded38 | 115 | get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON; |
d1ff2312 | 116 | EXPORT_SYMBOL(get_zone_counts_fn); |
96dded38 | 117 | # endif /* HAVE_GET_ZONE_COUNTS */ |
4ab13d3b | 118 | |
e11d6c5f | 119 | unsigned long |
6ae7fef5 | 120 | spl_global_page_state(spl_zone_stat_item_t item) |
4ab13d3b BB |
121 | { |
122 | unsigned long active; | |
123 | unsigned long inactive; | |
124 | unsigned long free; | |
125 | ||
6ae7fef5 BB |
126 | get_zone_counts(&active, &inactive, &free); |
127 | switch (item) { | |
128 | case SPL_NR_FREE_PAGES: return free; | |
129 | case SPL_NR_INACTIVE: return inactive; | |
130 | case SPL_NR_ACTIVE: return active; | |
131 | default: ASSERT(0); /* Unsupported */ | |
e11d6c5f BB |
132 | } |
133 | ||
6ae7fef5 BB |
134 | return 0; |
135 | } | |
136 | #else | |
137 | # ifdef HAVE_GLOBAL_PAGE_STATE | |
138 | unsigned long | |
139 | spl_global_page_state(spl_zone_stat_item_t item) | |
140 | { | |
141 | unsigned long pages = 0; | |
142 | ||
143 | switch (item) { | |
144 | case SPL_NR_FREE_PAGES: | |
145 | # ifdef HAVE_ZONE_STAT_ITEM_NR_FREE_PAGES | |
146 | pages += global_page_state(NR_FREE_PAGES); | |
147 | # endif | |
148 | break; | |
149 | case SPL_NR_INACTIVE: | |
150 | # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE | |
151 | pages += global_page_state(NR_INACTIVE); | |
152 | # endif | |
153 | # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_ANON | |
154 | pages += global_page_state(NR_INACTIVE_ANON); | |
155 | # endif | |
156 | # ifdef HAVE_ZONE_STAT_ITEM_NR_INACTIVE_FILE | |
157 | pages += global_page_state(NR_INACTIVE_FILE); | |
158 | # endif | |
159 | break; | |
160 | case SPL_NR_ACTIVE: | |
161 | # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE | |
162 | pages += global_page_state(NR_ACTIVE); | |
163 | # endif | |
164 | # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_ANON | |
165 | pages += global_page_state(NR_ACTIVE_ANON); | |
166 | # endif | |
167 | # ifdef HAVE_ZONE_STAT_ITEM_NR_ACTIVE_FILE | |
168 | pages += global_page_state(NR_ACTIVE_FILE); | |
169 | # endif | |
170 | break; | |
171 | default: | |
172 | ASSERT(0); /* Unsupported */ | |
e11d6c5f BB |
173 | } |
174 | ||
6ae7fef5 BB |
175 | return pages; |
176 | } | |
96dded38 | 177 | # else |
6ae7fef5 | 178 | # error "Both global_page_state() and get_zone_counts() unavailable" |
96dded38 | 179 | # endif /* HAVE_GLOBAL_PAGE_STATE */ |
6ae7fef5 | 180 | #endif /* NEED_GET_ZONE_COUNTS */ |
e11d6c5f | 181 | EXPORT_SYMBOL(spl_global_page_state); |
4ab13d3b | 182 | |
5f6c14b1 | 183 | #if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK) |
914b0631 BB |
184 | invalidate_inodes_t invalidate_inodes_fn = SYMBOL_POISON; |
185 | EXPORT_SYMBOL(invalidate_inodes_fn); | |
5f6c14b1 | 186 | #endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */ |
914b0631 | 187 | |
e76f4bf1 BB |
188 | #ifndef HAVE_SHRINK_DCACHE_MEMORY |
189 | shrink_dcache_memory_t shrink_dcache_memory_fn = SYMBOL_POISON; | |
190 | EXPORT_SYMBOL(shrink_dcache_memory_fn); | |
191 | #endif /* HAVE_SHRINK_DCACHE_MEMORY */ | |
192 | ||
193 | #ifndef HAVE_SHRINK_ICACHE_MEMORY | |
194 | shrink_icache_memory_t shrink_icache_memory_fn = SYMBOL_POISON; | |
195 | EXPORT_SYMBOL(shrink_icache_memory_fn); | |
196 | #endif /* HAVE_SHRINK_ICACHE_MEMORY */ | |
197 | ||
e11d6c5f BB |
198 | pgcnt_t |
199 | spl_kmem_availrmem(void) | |
200 | { | |
4ab13d3b | 201 | /* The amount of easily available memory */ |
6ae7fef5 BB |
202 | return (spl_global_page_state(SPL_NR_FREE_PAGES) + |
203 | spl_global_page_state(SPL_NR_INACTIVE)); | |
4ab13d3b BB |
204 | } |
205 | EXPORT_SYMBOL(spl_kmem_availrmem); | |
206 | ||
207 | size_t | |
208 | vmem_size(vmem_t *vmp, int typemask) | |
209 | { | |
d1ff2312 BB |
210 | struct vmalloc_info vmi; |
211 | size_t size = 0; | |
212 | ||
4ab13d3b BB |
213 | ASSERT(vmp == NULL); |
214 | ASSERT(typemask & (VMEM_ALLOC | VMEM_FREE)); | |
215 | ||
d1ff2312 BB |
216 | get_vmalloc_info(&vmi); |
217 | if (typemask & VMEM_ALLOC) | |
218 | size += (size_t)vmi.used; | |
219 | ||
220 | if (typemask & VMEM_FREE) | |
221 | size += (size_t)(VMALLOC_TOTAL - vmi.used); | |
222 | ||
223 | return size; | |
4ab13d3b BB |
224 | } |
225 | EXPORT_SYMBOL(vmem_size); | |
4ab13d3b | 226 | |
b868e22f BB |
227 | int |
228 | kmem_debugging(void) | |
229 | { | |
230 | return 0; | |
231 | } | |
232 | EXPORT_SYMBOL(kmem_debugging); | |
233 | ||
234 | #ifndef HAVE_KVASPRINTF | |
235 | /* Simplified asprintf. */ | |
236 | char *kvasprintf(gfp_t gfp, const char *fmt, va_list ap) | |
237 | { | |
238 | unsigned int len; | |
239 | char *p; | |
240 | va_list aq; | |
241 | ||
242 | va_copy(aq, ap); | |
243 | len = vsnprintf(NULL, 0, fmt, aq); | |
244 | va_end(aq); | |
245 | ||
246 | p = kmalloc(len+1, gfp); | |
247 | if (!p) | |
248 | return NULL; | |
249 | ||
250 | vsnprintf(p, len+1, fmt, ap); | |
251 | ||
252 | return p; | |
253 | } | |
254 | EXPORT_SYMBOL(kvasprintf); | |
255 | #endif /* HAVE_KVASPRINTF */ | |
256 | ||
e6de04b7 BB |
257 | char * |
258 | kmem_vasprintf(const char *fmt, va_list ap) | |
259 | { | |
260 | va_list aq; | |
261 | char *ptr; | |
262 | ||
e6de04b7 | 263 | do { |
2c762de8 | 264 | va_copy(aq, ap); |
e6de04b7 | 265 | ptr = kvasprintf(GFP_KERNEL, fmt, aq); |
2c762de8 | 266 | va_end(aq); |
e6de04b7 | 267 | } while (ptr == NULL); |
e6de04b7 BB |
268 | |
269 | return ptr; | |
270 | } | |
271 | EXPORT_SYMBOL(kmem_vasprintf); | |
272 | ||
b868e22f BB |
273 | char * |
274 | kmem_asprintf(const char *fmt, ...) | |
275 | { | |
e6de04b7 | 276 | va_list ap; |
b868e22f BB |
277 | char *ptr; |
278 | ||
b868e22f | 279 | do { |
2c762de8 | 280 | va_start(ap, fmt); |
e6de04b7 | 281 | ptr = kvasprintf(GFP_KERNEL, fmt, ap); |
2c762de8 | 282 | va_end(ap); |
b868e22f | 283 | } while (ptr == NULL); |
b868e22f BB |
284 | |
285 | return ptr; | |
286 | } | |
287 | EXPORT_SYMBOL(kmem_asprintf); | |
288 | ||
10129680 BB |
289 | static char * |
290 | __strdup(const char *str, int flags) | |
291 | { | |
292 | char *ptr; | |
293 | int n; | |
294 | ||
295 | n = strlen(str); | |
296 | ptr = kmalloc_nofail(n + 1, flags); | |
297 | if (ptr) | |
298 | memcpy(ptr, str, n + 1); | |
299 | ||
300 | return ptr; | |
301 | } | |
302 | ||
303 | char * | |
304 | strdup(const char *str) | |
305 | { | |
306 | return __strdup(str, KM_SLEEP); | |
307 | } | |
308 | EXPORT_SYMBOL(strdup); | |
309 | ||
310 | void | |
311 | strfree(char *str) | |
312 | { | |
41f84a8d | 313 | kfree(str); |
10129680 BB |
314 | } |
315 | EXPORT_SYMBOL(strfree); | |
316 | ||
f1ca4da6 | 317 | /* |
2fb9b26a | 318 | * Memory allocation interfaces and debugging for basic kmem_* |
055ffd98 BB |
319 | * and vmem_* style memory allocation. When DEBUG_KMEM is enabled |
320 | * the SPL will keep track of the total memory allocated, and | |
321 | * report any memory leaked when the module is unloaded. | |
f1ca4da6 | 322 | */ |
323 | #ifdef DEBUG_KMEM | |
d04c8a56 | 324 | |
f1ca4da6 | 325 | /* Shim layer memory accounting */ |
d04c8a56 | 326 | # ifdef HAVE_ATOMIC64_T |
550f1705 | 327 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 328 | unsigned long long kmem_alloc_max = 0; |
550f1705 | 329 | atomic64_t vmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 330 | unsigned long long vmem_alloc_max = 0; |
10129680 | 331 | # else /* HAVE_ATOMIC64_T */ |
d04c8a56 BB |
332 | atomic_t kmem_alloc_used = ATOMIC_INIT(0); |
333 | unsigned long long kmem_alloc_max = 0; | |
334 | atomic_t vmem_alloc_used = ATOMIC_INIT(0); | |
335 | unsigned long long vmem_alloc_max = 0; | |
10129680 | 336 | # endif /* HAVE_ATOMIC64_T */ |
79b31f36 | 337 | |
ff449ac4 | 338 | EXPORT_SYMBOL(kmem_alloc_used); |
339 | EXPORT_SYMBOL(kmem_alloc_max); | |
340 | EXPORT_SYMBOL(vmem_alloc_used); | |
341 | EXPORT_SYMBOL(vmem_alloc_max); | |
ff449ac4 | 342 | |
055ffd98 BB |
343 | /* When DEBUG_KMEM_TRACKING is enabled not only will total bytes be tracked |
344 | * but also the location of every alloc and free. When the SPL module is | |
345 | * unloaded a list of all leaked addresses and where they were allocated | |
346 | * will be dumped to the console. Enabling this feature has a significant | |
347 | * impact on performance but it makes finding memory leaks straight forward. | |
348 | * | |
349 | * Not surprisingly with debugging enabled the xmem_locks are very highly | |
350 | * contended particularly on xfree(). If we want to run with this detailed | |
351 | * debugging enabled for anything other than debugging we need to minimize | |
352 | * the contention by moving to a lock per xmem_table entry model. | |
a0f6da3d | 353 | */ |
055ffd98 | 354 | # ifdef DEBUG_KMEM_TRACKING |
a0f6da3d | 355 | |
356 | # define KMEM_HASH_BITS 10 | |
357 | # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
358 | ||
359 | # define VMEM_HASH_BITS 10 | |
360 | # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) | |
361 | ||
362 | typedef struct kmem_debug { | |
363 | struct hlist_node kd_hlist; /* Hash node linkage */ | |
364 | struct list_head kd_list; /* List of all allocations */ | |
365 | void *kd_addr; /* Allocation pointer */ | |
366 | size_t kd_size; /* Allocation size */ | |
367 | const char *kd_func; /* Allocation function */ | |
368 | int kd_line; /* Allocation line */ | |
369 | } kmem_debug_t; | |
370 | ||
d6a26c6a | 371 | spinlock_t kmem_lock; |
372 | struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
373 | struct list_head kmem_list; | |
374 | ||
13cdca65 | 375 | spinlock_t vmem_lock; |
376 | struct hlist_head vmem_table[VMEM_TABLE_SIZE]; | |
377 | struct list_head vmem_list; | |
378 | ||
d6a26c6a | 379 | EXPORT_SYMBOL(kmem_lock); |
380 | EXPORT_SYMBOL(kmem_table); | |
381 | EXPORT_SYMBOL(kmem_list); | |
382 | ||
13cdca65 | 383 | EXPORT_SYMBOL(vmem_lock); |
384 | EXPORT_SYMBOL(vmem_table); | |
385 | EXPORT_SYMBOL(vmem_list); | |
a0f6da3d | 386 | |
387 | static kmem_debug_t * | |
10129680 | 388 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, void *addr) |
a0f6da3d | 389 | { |
390 | struct hlist_head *head; | |
391 | struct hlist_node *node; | |
392 | struct kmem_debug *p; | |
393 | unsigned long flags; | |
b17edc10 | 394 | SENTRY; |
a0f6da3d | 395 | |
396 | spin_lock_irqsave(lock, flags); | |
397 | ||
398 | head = &table[hash_ptr(addr, bits)]; | |
399 | hlist_for_each_entry_rcu(p, node, head, kd_hlist) { | |
400 | if (p->kd_addr == addr) { | |
401 | hlist_del_init(&p->kd_hlist); | |
402 | list_del_init(&p->kd_list); | |
403 | spin_unlock_irqrestore(lock, flags); | |
404 | return p; | |
405 | } | |
406 | } | |
407 | ||
408 | spin_unlock_irqrestore(lock, flags); | |
409 | ||
b17edc10 | 410 | SRETURN(NULL); |
a0f6da3d | 411 | } |
412 | ||
413 | void * | |
414 | kmem_alloc_track(size_t size, int flags, const char *func, int line, | |
415 | int node_alloc, int node) | |
416 | { | |
417 | void *ptr = NULL; | |
418 | kmem_debug_t *dptr; | |
419 | unsigned long irq_flags; | |
b17edc10 | 420 | SENTRY; |
a0f6da3d | 421 | |
10129680 | 422 | /* Function may be called with KM_NOSLEEP so failure is possible */ |
c89fdee4 | 423 | dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), |
a0f6da3d | 424 | flags & ~__GFP_ZERO); |
425 | ||
10129680 | 426 | if (unlikely(dptr == NULL)) { |
b17edc10 | 427 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug " |
3cb77549 BB |
428 | "kmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n", |
429 | sizeof(kmem_debug_t), flags, func, line, | |
430 | kmem_alloc_used_read(), kmem_alloc_max); | |
a0f6da3d | 431 | } else { |
10129680 BB |
432 | /* |
433 | * Marked unlikely because we should never be doing this, | |
434 | * we tolerate to up 2 pages but a single page is best. | |
435 | */ | |
23d91792 | 436 | if (unlikely((size > PAGE_SIZE*2) && !(flags & KM_NODEBUG))) { |
b17edc10 | 437 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "large " |
3cb77549 BB |
438 | "kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n", |
439 | (unsigned long long) size, flags, func, line, | |
d04c8a56 | 440 | kmem_alloc_used_read(), kmem_alloc_max); |
5198ea0e BB |
441 | spl_debug_dumpstack(NULL); |
442 | } | |
a0f6da3d | 443 | |
10129680 BB |
444 | /* |
445 | * We use __strdup() below because the string pointed to by | |
c8e60837 | 446 | * __FUNCTION__ might not be available by the time we want |
10129680 BB |
447 | * to print it since the module might have been unloaded. |
448 | * This can only fail in the KM_NOSLEEP case. | |
449 | */ | |
450 | dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO); | |
c8e60837 | 451 | if (unlikely(dptr->kd_func == NULL)) { |
452 | kfree(dptr); | |
b17edc10 | 453 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
10129680 | 454 | "debug __strdup() at %s:%d failed (%lld/%llu)\n", |
3cb77549 | 455 | func, line, kmem_alloc_used_read(), kmem_alloc_max); |
c8e60837 | 456 | goto out; |
457 | } | |
458 | ||
a0f6da3d | 459 | /* Use the correct allocator */ |
460 | if (node_alloc) { | |
461 | ASSERT(!(flags & __GFP_ZERO)); | |
c89fdee4 | 462 | ptr = kmalloc_node_nofail(size, flags, node); |
a0f6da3d | 463 | } else if (flags & __GFP_ZERO) { |
c89fdee4 | 464 | ptr = kzalloc_nofail(size, flags & ~__GFP_ZERO); |
a0f6da3d | 465 | } else { |
c89fdee4 | 466 | ptr = kmalloc_nofail(size, flags); |
a0f6da3d | 467 | } |
468 | ||
469 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 470 | kfree(dptr->kd_func); |
a0f6da3d | 471 | kfree(dptr); |
b17edc10 | 472 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "kmem_alloc" |
3cb77549 BB |
473 | "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n", |
474 | (unsigned long long) size, flags, func, line, | |
d04c8a56 | 475 | kmem_alloc_used_read(), kmem_alloc_max); |
a0f6da3d | 476 | goto out; |
477 | } | |
478 | ||
d04c8a56 BB |
479 | kmem_alloc_used_add(size); |
480 | if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) | |
481 | kmem_alloc_max = kmem_alloc_used_read(); | |
a0f6da3d | 482 | |
483 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
484 | INIT_LIST_HEAD(&dptr->kd_list); | |
485 | ||
486 | dptr->kd_addr = ptr; | |
487 | dptr->kd_size = size; | |
a0f6da3d | 488 | dptr->kd_line = line; |
489 | ||
490 | spin_lock_irqsave(&kmem_lock, irq_flags); | |
491 | hlist_add_head_rcu(&dptr->kd_hlist, | |
492 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
493 | list_add_tail(&dptr->kd_list, &kmem_list); | |
494 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
495 | ||
b17edc10 | 496 | SDEBUG_LIMIT(SD_INFO, |
3cb77549 BB |
497 | "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n", |
498 | (unsigned long long) size, flags, func, line, ptr, | |
499 | kmem_alloc_used_read(), kmem_alloc_max); | |
a0f6da3d | 500 | } |
501 | out: | |
b17edc10 | 502 | SRETURN(ptr); |
a0f6da3d | 503 | } |
504 | EXPORT_SYMBOL(kmem_alloc_track); | |
505 | ||
506 | void | |
507 | kmem_free_track(void *ptr, size_t size) | |
508 | { | |
509 | kmem_debug_t *dptr; | |
b17edc10 | 510 | SENTRY; |
a0f6da3d | 511 | |
512 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
513 | (unsigned long long) size); | |
514 | ||
515 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); | |
516 | ||
10129680 BB |
517 | /* Must exist in hash due to kmem_alloc() */ |
518 | ASSERT(dptr); | |
a0f6da3d | 519 | |
520 | /* Size must match */ | |
521 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
522 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
523 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
524 | ||
d04c8a56 | 525 | kmem_alloc_used_sub(size); |
b17edc10 | 526 | SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, |
d04c8a56 | 527 | (unsigned long long) size, kmem_alloc_used_read(), |
a0f6da3d | 528 | kmem_alloc_max); |
529 | ||
c8e60837 | 530 | kfree(dptr->kd_func); |
531 | ||
a0f6da3d | 532 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
533 | kfree(dptr); | |
534 | ||
535 | memset(ptr, 0x5a, size); | |
536 | kfree(ptr); | |
537 | ||
b17edc10 | 538 | SEXIT; |
a0f6da3d | 539 | } |
540 | EXPORT_SYMBOL(kmem_free_track); | |
541 | ||
542 | void * | |
543 | vmem_alloc_track(size_t size, int flags, const char *func, int line) | |
544 | { | |
545 | void *ptr = NULL; | |
546 | kmem_debug_t *dptr; | |
547 | unsigned long irq_flags; | |
b17edc10 | 548 | SENTRY; |
a0f6da3d | 549 | |
550 | ASSERT(flags & KM_SLEEP); | |
551 | ||
10129680 | 552 | /* Function may be called with KM_NOSLEEP so failure is possible */ |
ef1c7a06 BB |
553 | dptr = (kmem_debug_t *) kmalloc_nofail(sizeof(kmem_debug_t), |
554 | flags & ~__GFP_ZERO); | |
10129680 | 555 | if (unlikely(dptr == NULL)) { |
b17edc10 | 556 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "debug " |
3cb77549 BB |
557 | "vmem_alloc(%ld, 0x%x) at %s:%d failed (%lld/%llu)\n", |
558 | sizeof(kmem_debug_t), flags, func, line, | |
559 | vmem_alloc_used_read(), vmem_alloc_max); | |
a0f6da3d | 560 | } else { |
10129680 BB |
561 | /* |
562 | * We use __strdup() below because the string pointed to by | |
c8e60837 | 563 | * __FUNCTION__ might not be available by the time we want |
10129680 BB |
564 | * to print it, since the module might have been unloaded. |
565 | * This can never fail because we have already asserted | |
566 | * that flags is KM_SLEEP. | |
567 | */ | |
568 | dptr->kd_func = __strdup(func, flags & ~__GFP_ZERO); | |
c8e60837 | 569 | if (unlikely(dptr->kd_func == NULL)) { |
570 | kfree(dptr); | |
b17edc10 | 571 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
10129680 | 572 | "debug __strdup() at %s:%d failed (%lld/%llu)\n", |
3cb77549 | 573 | func, line, vmem_alloc_used_read(), vmem_alloc_max); |
c8e60837 | 574 | goto out; |
575 | } | |
576 | ||
10129680 BB |
577 | /* Use the correct allocator */ |
578 | if (flags & __GFP_ZERO) { | |
579 | ptr = vzalloc_nofail(size, flags & ~__GFP_ZERO); | |
580 | } else { | |
581 | ptr = vmalloc_nofail(size, flags); | |
582 | } | |
a0f6da3d | 583 | |
584 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 585 | kfree(dptr->kd_func); |
a0f6da3d | 586 | kfree(dptr); |
b17edc10 | 587 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, "vmem_alloc" |
3cb77549 BB |
588 | "(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n", |
589 | (unsigned long long) size, flags, func, line, | |
d04c8a56 | 590 | vmem_alloc_used_read(), vmem_alloc_max); |
a0f6da3d | 591 | goto out; |
592 | } | |
593 | ||
d04c8a56 BB |
594 | vmem_alloc_used_add(size); |
595 | if (unlikely(vmem_alloc_used_read() > vmem_alloc_max)) | |
596 | vmem_alloc_max = vmem_alloc_used_read(); | |
a0f6da3d | 597 | |
598 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
599 | INIT_LIST_HEAD(&dptr->kd_list); | |
600 | ||
601 | dptr->kd_addr = ptr; | |
602 | dptr->kd_size = size; | |
a0f6da3d | 603 | dptr->kd_line = line; |
604 | ||
605 | spin_lock_irqsave(&vmem_lock, irq_flags); | |
606 | hlist_add_head_rcu(&dptr->kd_hlist, | |
607 | &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]); | |
608 | list_add_tail(&dptr->kd_list, &vmem_list); | |
609 | spin_unlock_irqrestore(&vmem_lock, irq_flags); | |
610 | ||
b17edc10 | 611 | SDEBUG_LIMIT(SD_INFO, |
3cb77549 BB |
612 | "vmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n", |
613 | (unsigned long long) size, flags, func, line, | |
614 | ptr, vmem_alloc_used_read(), vmem_alloc_max); | |
a0f6da3d | 615 | } |
616 | out: | |
b17edc10 | 617 | SRETURN(ptr); |
a0f6da3d | 618 | } |
619 | EXPORT_SYMBOL(vmem_alloc_track); | |
620 | ||
621 | void | |
622 | vmem_free_track(void *ptr, size_t size) | |
623 | { | |
624 | kmem_debug_t *dptr; | |
b17edc10 | 625 | SENTRY; |
a0f6da3d | 626 | |
627 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
628 | (unsigned long long) size); | |
629 | ||
630 | dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr); | |
10129680 BB |
631 | |
632 | /* Must exist in hash due to vmem_alloc() */ | |
633 | ASSERT(dptr); | |
a0f6da3d | 634 | |
635 | /* Size must match */ | |
636 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
637 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
638 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
639 | ||
d04c8a56 | 640 | vmem_alloc_used_sub(size); |
b17edc10 | 641 | SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, |
d04c8a56 | 642 | (unsigned long long) size, vmem_alloc_used_read(), |
a0f6da3d | 643 | vmem_alloc_max); |
644 | ||
c8e60837 | 645 | kfree(dptr->kd_func); |
646 | ||
a0f6da3d | 647 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
648 | kfree(dptr); | |
649 | ||
650 | memset(ptr, 0x5a, size); | |
651 | vfree(ptr); | |
652 | ||
b17edc10 | 653 | SEXIT; |
a0f6da3d | 654 | } |
655 | EXPORT_SYMBOL(vmem_free_track); | |
656 | ||
657 | # else /* DEBUG_KMEM_TRACKING */ | |
658 | ||
659 | void * | |
660 | kmem_alloc_debug(size_t size, int flags, const char *func, int line, | |
661 | int node_alloc, int node) | |
662 | { | |
663 | void *ptr; | |
b17edc10 | 664 | SENTRY; |
a0f6da3d | 665 | |
10129680 BB |
666 | /* |
667 | * Marked unlikely because we should never be doing this, | |
668 | * we tolerate to up 2 pages but a single page is best. | |
669 | */ | |
23d91792 | 670 | if (unlikely((size > PAGE_SIZE * 2) && !(flags & KM_NODEBUG))) { |
b17edc10 | 671 | SDEBUG(SD_CONSOLE | SD_WARNING, |
10129680 | 672 | "large kmem_alloc(%llu, 0x%x) at %s:%d (%lld/%llu)\n", |
3cb77549 | 673 | (unsigned long long) size, flags, func, line, |
d04c8a56 | 674 | kmem_alloc_used_read(), kmem_alloc_max); |
4b2220f0 | 675 | dump_stack(); |
5198ea0e | 676 | } |
a0f6da3d | 677 | |
678 | /* Use the correct allocator */ | |
679 | if (node_alloc) { | |
680 | ASSERT(!(flags & __GFP_ZERO)); | |
c89fdee4 | 681 | ptr = kmalloc_node_nofail(size, flags, node); |
a0f6da3d | 682 | } else if (flags & __GFP_ZERO) { |
c89fdee4 | 683 | ptr = kzalloc_nofail(size, flags & (~__GFP_ZERO)); |
a0f6da3d | 684 | } else { |
c89fdee4 | 685 | ptr = kmalloc_nofail(size, flags); |
a0f6da3d | 686 | } |
687 | ||
10129680 | 688 | if (unlikely(ptr == NULL)) { |
b17edc10 | 689 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
3cb77549 BB |
690 | "kmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n", |
691 | (unsigned long long) size, flags, func, line, | |
d04c8a56 | 692 | kmem_alloc_used_read(), kmem_alloc_max); |
a0f6da3d | 693 | } else { |
d04c8a56 BB |
694 | kmem_alloc_used_add(size); |
695 | if (unlikely(kmem_alloc_used_read() > kmem_alloc_max)) | |
696 | kmem_alloc_max = kmem_alloc_used_read(); | |
a0f6da3d | 697 | |
b17edc10 | 698 | SDEBUG_LIMIT(SD_INFO, |
3cb77549 BB |
699 | "kmem_alloc(%llu, 0x%x) at %s:%d = %p (%lld/%llu)\n", |
700 | (unsigned long long) size, flags, func, line, ptr, | |
10129680 | 701 | kmem_alloc_used_read(), kmem_alloc_max); |
a0f6da3d | 702 | } |
10129680 | 703 | |
b17edc10 | 704 | SRETURN(ptr); |
a0f6da3d | 705 | } |
706 | EXPORT_SYMBOL(kmem_alloc_debug); | |
707 | ||
708 | void | |
709 | kmem_free_debug(void *ptr, size_t size) | |
710 | { | |
b17edc10 | 711 | SENTRY; |
a0f6da3d | 712 | |
713 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
714 | (unsigned long long) size); | |
715 | ||
d04c8a56 | 716 | kmem_alloc_used_sub(size); |
b17edc10 | 717 | SDEBUG_LIMIT(SD_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, |
d04c8a56 | 718 | (unsigned long long) size, kmem_alloc_used_read(), |
a0f6da3d | 719 | kmem_alloc_max); |
a0f6da3d | 720 | kfree(ptr); |
721 | ||
b17edc10 | 722 | SEXIT; |
a0f6da3d | 723 | } |
724 | EXPORT_SYMBOL(kmem_free_debug); | |
725 | ||
726 | void * | |
727 | vmem_alloc_debug(size_t size, int flags, const char *func, int line) | |
728 | { | |
729 | void *ptr; | |
b17edc10 | 730 | SENTRY; |
a0f6da3d | 731 | |
732 | ASSERT(flags & KM_SLEEP); | |
733 | ||
10129680 BB |
734 | /* Use the correct allocator */ |
735 | if (flags & __GFP_ZERO) { | |
736 | ptr = vzalloc_nofail(size, flags & (~__GFP_ZERO)); | |
737 | } else { | |
738 | ptr = vmalloc_nofail(size, flags); | |
739 | } | |
740 | ||
741 | if (unlikely(ptr == NULL)) { | |
b17edc10 | 742 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
3cb77549 BB |
743 | "vmem_alloc(%llu, 0x%x) at %s:%d failed (%lld/%llu)\n", |
744 | (unsigned long long) size, flags, func, line, | |
d04c8a56 | 745 | vmem_alloc_used_read(), vmem_alloc_max); |
a0f6da3d | 746 | } else { |
d04c8a56 BB |
747 | vmem_alloc_used_add(size); |
748 | if (unlikely(vmem_alloc_used_read() > vmem_alloc_max)) | |
749 | vmem_alloc_max = vmem_alloc_used_read(); | |
a0f6da3d | 750 | |
b17edc10 | 751 | SDEBUG_LIMIT(SD_INFO, "vmem_alloc(%llu, 0x%x) = %p " |
a0f6da3d | 752 | "(%lld/%llu)\n", (unsigned long long) size, flags, ptr, |
d04c8a56 | 753 | vmem_alloc_used_read(), vmem_alloc_max); |
a0f6da3d | 754 | } |
755 | ||
b17edc10 | 756 | SRETURN(ptr); |
a0f6da3d | 757 | } |
758 | EXPORT_SYMBOL(vmem_alloc_debug); | |
759 | ||
760 | void | |
761 | vmem_free_debug(void *ptr, size_t size) | |
762 | { | |
b17edc10 | 763 | SENTRY; |
a0f6da3d | 764 | |
765 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
766 | (unsigned long long) size); | |
767 | ||
d04c8a56 | 768 | vmem_alloc_used_sub(size); |
b17edc10 | 769 | SDEBUG_LIMIT(SD_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, |
d04c8a56 | 770 | (unsigned long long) size, vmem_alloc_used_read(), |
a0f6da3d | 771 | vmem_alloc_max); |
a0f6da3d | 772 | vfree(ptr); |
773 | ||
b17edc10 | 774 | SEXIT; |
a0f6da3d | 775 | } |
776 | EXPORT_SYMBOL(vmem_free_debug); | |
777 | ||
778 | # endif /* DEBUG_KMEM_TRACKING */ | |
779 | #endif /* DEBUG_KMEM */ | |
780 | ||
10129680 BB |
781 | /* |
782 | * Slab allocation interfaces | |
783 | * | |
784 | * While the Linux slab implementation was inspired by the Solaris | |
ecc39810 | 785 | * implementation I cannot use it to emulate the Solaris APIs. I |
10129680 BB |
786 | * require two features which are not provided by the Linux slab. |
787 | * | |
788 | * 1) Constructors AND destructors. Recent versions of the Linux | |
789 | * kernel have removed support for destructors. This is a deal | |
790 | * breaker for the SPL which contains particularly expensive | |
791 | * initializers for mutex's, condition variables, etc. We also | |
792 | * require a minimal level of cleanup for these data types unlike | |
793 | * many Linux data type which do need to be explicitly destroyed. | |
794 | * | |
795 | * 2) Virtual address space backed slab. Callers of the Solaris slab | |
796 | * expect it to work well for both small are very large allocations. | |
797 | * Because of memory fragmentation the Linux slab which is backed | |
798 | * by kmalloc'ed memory performs very badly when confronted with | |
799 | * large numbers of large allocations. Basing the slab on the | |
ecc39810 | 800 | * virtual address space removes the need for contiguous pages |
10129680 BB |
801 | * and greatly improve performance for large allocations. |
802 | * | |
803 | * For these reasons, the SPL has its own slab implementation with | |
804 | * the needed features. It is not as highly optimized as either the | |
805 | * Solaris or Linux slabs, but it should get me most of what is | |
806 | * needed until it can be optimized or obsoleted by another approach. | |
807 | * | |
808 | * One serious concern I do have about this method is the relatively | |
809 | * small virtual address space on 32bit arches. This will seriously | |
810 | * constrain the size of the slab caches and their performance. | |
811 | * | |
812 | * XXX: Improve the partial slab list by carefully maintaining a | |
813 | * strict ordering of fullest to emptiest slabs based on | |
ecc39810 | 814 | * the slab reference count. This guarantees the when freeing |
10129680 BB |
815 | * slabs back to the system we need only linearly traverse the |
816 | * last N slabs in the list to discover all the freeable slabs. | |
817 | * | |
818 | * XXX: NUMA awareness for optionally allocating memory close to a | |
ecc39810 | 819 | * particular core. This can be advantageous if you know the slab |
10129680 BB |
820 | * object will be short lived and primarily accessed from one core. |
821 | * | |
822 | * XXX: Slab coloring may also yield performance improvements and would | |
823 | * be desirable to implement. | |
824 | */ | |
825 | ||
826 | struct list_head spl_kmem_cache_list; /* List of caches */ | |
827 | struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ | |
828 | ||
829 | static int spl_cache_flush(spl_kmem_cache_t *skc, | |
830 | spl_kmem_magazine_t *skm, int flush); | |
831 | ||
a55bcaad | 832 | SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker); |
495bd532 BB |
833 | SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker, |
834 | spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS); | |
10129680 | 835 | |
a1502d76 | 836 | static void * |
837 | kv_alloc(spl_kmem_cache_t *skc, int size, int flags) | |
fece7c99 | 838 | { |
a1502d76 | 839 | void *ptr; |
f1ca4da6 | 840 | |
8b45dda2 BB |
841 | ASSERT(ISP2(size)); |
842 | ||
2092cf68 | 843 | if (skc->skc_flags & KMC_KMEM) { |
8b45dda2 | 844 | ptr = (void *)__get_free_pages(flags, get_order(size)); |
2092cf68 BB |
845 | } else { |
846 | /* | |
847 | * As part of vmalloc() an __pte_alloc_kernel() allocation | |
848 | * may occur. This internal allocation does not honor the | |
849 | * gfp flags passed to vmalloc(). This means even when | |
850 | * vmalloc(GFP_NOFS) is called it is possible synchronous | |
851 | * reclaim will occur. This reclaim can trigger file IO | |
852 | * which can result in a deadlock. This issue can be avoided | |
853 | * by explicitly setting PF_MEMALLOC on the process to | |
854 | * subvert synchronous reclaim. The following bug has | |
855 | * been filed at kernel.org to track the issue. | |
856 | * | |
857 | * https://bugzilla.kernel.org/show_bug.cgi?id=30702 | |
b8b6e4c4 BB |
858 | * |
859 | * NOTE: Only set PF_MEMALLOC if it's not already set, and | |
860 | * then only clear it when we were the one who set it. | |
2092cf68 | 861 | */ |
b8b6e4c4 | 862 | if (!(flags & __GFP_FS) && !(current->flags & PF_MEMALLOC)) { |
2092cf68 | 863 | current->flags |= PF_MEMALLOC; |
b8b6e4c4 | 864 | ptr = __vmalloc(size, flags|__GFP_HIGHMEM, PAGE_KERNEL); |
2092cf68 | 865 | current->flags &= ~PF_MEMALLOC; |
b8b6e4c4 BB |
866 | } else { |
867 | ptr = __vmalloc(size, flags|__GFP_HIGHMEM, PAGE_KERNEL); | |
868 | } | |
2092cf68 BB |
869 | } |
870 | ||
8b45dda2 BB |
871 | /* Resulting allocated memory will be page aligned */ |
872 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); | |
fece7c99 | 873 | |
a1502d76 | 874 | return ptr; |
875 | } | |
fece7c99 | 876 | |
a1502d76 | 877 | static void |
878 | kv_free(spl_kmem_cache_t *skc, void *ptr, int size) | |
879 | { | |
8b45dda2 BB |
880 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); |
881 | ASSERT(ISP2(size)); | |
882 | ||
883 | if (skc->skc_flags & KMC_KMEM) | |
884 | free_pages((unsigned long)ptr, get_order(size)); | |
885 | else | |
886 | vfree(ptr); | |
887 | } | |
888 | ||
889 | /* | |
890 | * Required space for each aligned sks. | |
891 | */ | |
892 | static inline uint32_t | |
893 | spl_sks_size(spl_kmem_cache_t *skc) | |
894 | { | |
895 | return P2ROUNDUP_TYPED(sizeof(spl_kmem_slab_t), | |
896 | skc->skc_obj_align, uint32_t); | |
897 | } | |
898 | ||
899 | /* | |
900 | * Required space for each aligned object. | |
901 | */ | |
902 | static inline uint32_t | |
903 | spl_obj_size(spl_kmem_cache_t *skc) | |
904 | { | |
905 | uint32_t align = skc->skc_obj_align; | |
906 | ||
907 | return P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + | |
908 | P2ROUNDUP_TYPED(sizeof(spl_kmem_obj_t), align, uint32_t); | |
909 | } | |
910 | ||
911 | /* | |
912 | * Lookup the spl_kmem_object_t for an object given that object. | |
913 | */ | |
914 | static inline spl_kmem_obj_t * | |
915 | spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) | |
916 | { | |
917 | return obj + P2ROUNDUP_TYPED(skc->skc_obj_size, | |
918 | skc->skc_obj_align, uint32_t); | |
919 | } | |
920 | ||
921 | /* | |
922 | * Required space for each offslab object taking in to account alignment | |
923 | * restrictions and the power-of-two requirement of kv_alloc(). | |
924 | */ | |
925 | static inline uint32_t | |
926 | spl_offslab_size(spl_kmem_cache_t *skc) | |
927 | { | |
928 | return 1UL << (highbit(spl_obj_size(skc)) + 1); | |
fece7c99 | 929 | } |
930 | ||
ea3e6ca9 BB |
931 | /* |
932 | * It's important that we pack the spl_kmem_obj_t structure and the | |
48e0606a BB |
933 | * actual objects in to one large address space to minimize the number |
934 | * of calls to the allocator. It is far better to do a few large | |
935 | * allocations and then subdivide it ourselves. Now which allocator | |
936 | * we use requires balancing a few trade offs. | |
937 | * | |
938 | * For small objects we use kmem_alloc() because as long as you are | |
939 | * only requesting a small number of pages (ideally just one) its cheap. | |
940 | * However, when you start requesting multiple pages with kmem_alloc() | |
ecc39810 | 941 | * it gets increasingly expensive since it requires contiguous pages. |
48e0606a | 942 | * For this reason we shift to vmem_alloc() for slabs of large objects |
ecc39810 | 943 | * which removes the need for contiguous pages. We do not use |
48e0606a BB |
944 | * vmem_alloc() in all cases because there is significant locking |
945 | * overhead in __get_vm_area_node(). This function takes a single | |
ecc39810 | 946 | * global lock when acquiring an available virtual address range which |
48e0606a BB |
947 | * serializes all vmem_alloc()'s for all slab caches. Using slightly |
948 | * different allocation functions for small and large objects should | |
949 | * give us the best of both worlds. | |
950 | * | |
951 | * KMC_ONSLAB KMC_OFFSLAB | |
952 | * | |
953 | * +------------------------+ +-----------------+ | |
954 | * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+ | |
955 | * | skc_obj_size <-+ | | +-----------------+ | | | |
956 | * | spl_kmem_obj_t | | | | | |
957 | * | skc_obj_size <---+ | +-----------------+ | | | |
958 | * | spl_kmem_obj_t | | | skc_obj_size | <-+ | | |
959 | * | ... v | | spl_kmem_obj_t | | | |
960 | * +------------------------+ +-----------------+ v | |
961 | */ | |
fece7c99 | 962 | static spl_kmem_slab_t * |
a1502d76 | 963 | spl_slab_alloc(spl_kmem_cache_t *skc, int flags) |
fece7c99 | 964 | { |
965 | spl_kmem_slab_t *sks; | |
a1502d76 | 966 | spl_kmem_obj_t *sko, *n; |
967 | void *base, *obj; | |
8b45dda2 BB |
968 | uint32_t obj_size, offslab_size = 0; |
969 | int i, rc = 0; | |
48e0606a | 970 | |
a1502d76 | 971 | base = kv_alloc(skc, skc->skc_slab_size, flags); |
972 | if (base == NULL) | |
b17edc10 | 973 | SRETURN(NULL); |
fece7c99 | 974 | |
a1502d76 | 975 | sks = (spl_kmem_slab_t *)base; |
976 | sks->sks_magic = SKS_MAGIC; | |
977 | sks->sks_objs = skc->skc_slab_objs; | |
978 | sks->sks_age = jiffies; | |
979 | sks->sks_cache = skc; | |
980 | INIT_LIST_HEAD(&sks->sks_list); | |
981 | INIT_LIST_HEAD(&sks->sks_free_list); | |
982 | sks->sks_ref = 0; | |
8b45dda2 | 983 | obj_size = spl_obj_size(skc); |
48e0606a | 984 | |
8d177c18 | 985 | if (skc->skc_flags & KMC_OFFSLAB) |
8b45dda2 | 986 | offslab_size = spl_offslab_size(skc); |
fece7c99 | 987 | |
988 | for (i = 0; i < sks->sks_objs; i++) { | |
a1502d76 | 989 | if (skc->skc_flags & KMC_OFFSLAB) { |
8b45dda2 | 990 | obj = kv_alloc(skc, offslab_size, flags); |
a1502d76 | 991 | if (!obj) |
b17edc10 | 992 | SGOTO(out, rc = -ENOMEM); |
a1502d76 | 993 | } else { |
8b45dda2 | 994 | obj = base + spl_sks_size(skc) + (i * obj_size); |
a1502d76 | 995 | } |
996 | ||
8b45dda2 BB |
997 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); |
998 | sko = spl_sko_from_obj(skc, obj); | |
fece7c99 | 999 | sko->sko_addr = obj; |
1000 | sko->sko_magic = SKO_MAGIC; | |
1001 | sko->sko_slab = sks; | |
1002 | INIT_LIST_HEAD(&sko->sko_list); | |
fece7c99 | 1003 | list_add_tail(&sko->sko_list, &sks->sks_free_list); |
1004 | } | |
1005 | ||
fece7c99 | 1006 | list_for_each_entry(sko, &sks->sks_free_list, sko_list) |
1007 | if (skc->skc_ctor) | |
1008 | skc->skc_ctor(sko->sko_addr, skc->skc_private, flags); | |
2fb9b26a | 1009 | out: |
a1502d76 | 1010 | if (rc) { |
1011 | if (skc->skc_flags & KMC_OFFSLAB) | |
48e0606a BB |
1012 | list_for_each_entry_safe(sko, n, &sks->sks_free_list, |
1013 | sko_list) | |
8b45dda2 | 1014 | kv_free(skc, sko->sko_addr, offslab_size); |
fece7c99 | 1015 | |
a1502d76 | 1016 | kv_free(skc, base, skc->skc_slab_size); |
1017 | sks = NULL; | |
fece7c99 | 1018 | } |
1019 | ||
b17edc10 | 1020 | SRETURN(sks); |
fece7c99 | 1021 | } |
1022 | ||
ea3e6ca9 BB |
1023 | /* |
1024 | * Remove a slab from complete or partial list, it must be called with | |
1025 | * the 'skc->skc_lock' held but the actual free must be performed | |
1026 | * outside the lock to prevent deadlocking on vmem addresses. | |
fece7c99 | 1027 | */ |
f1ca4da6 | 1028 | static void |
ea3e6ca9 BB |
1029 | spl_slab_free(spl_kmem_slab_t *sks, |
1030 | struct list_head *sks_list, struct list_head *sko_list) | |
1031 | { | |
2fb9b26a | 1032 | spl_kmem_cache_t *skc; |
b17edc10 | 1033 | SENTRY; |
57d86234 | 1034 | |
2fb9b26a | 1035 | ASSERT(sks->sks_magic == SKS_MAGIC); |
4afaaefa | 1036 | ASSERT(sks->sks_ref == 0); |
d6a26c6a | 1037 | |
fece7c99 | 1038 | skc = sks->sks_cache; |
1039 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
d46630e0 | 1040 | ASSERT(spin_is_locked(&skc->skc_lock)); |
f1ca4da6 | 1041 | |
1a944a7d BB |
1042 | /* |
1043 | * Update slab/objects counters in the cache, then remove the | |
1044 | * slab from the skc->skc_partial_list. Finally add the slab | |
1045 | * and all its objects in to the private work lists where the | |
1046 | * destructors will be called and the memory freed to the system. | |
1047 | */ | |
fece7c99 | 1048 | skc->skc_obj_total -= sks->sks_objs; |
1049 | skc->skc_slab_total--; | |
1050 | list_del(&sks->sks_list); | |
ea3e6ca9 | 1051 | list_add(&sks->sks_list, sks_list); |
1a944a7d BB |
1052 | list_splice_init(&sks->sks_free_list, sko_list); |
1053 | ||
b17edc10 | 1054 | SEXIT; |
2fb9b26a | 1055 | } |
d6a26c6a | 1056 | |
ea3e6ca9 BB |
1057 | /* |
1058 | * Traverses all the partial slabs attached to a cache and free those | |
1059 | * which which are currently empty, and have not been touched for | |
37db7d8c BB |
1060 | * skc_delay seconds to avoid thrashing. The count argument is |
1061 | * passed to optionally cap the number of slabs reclaimed, a count | |
1062 | * of zero means try and reclaim everything. When flag is set we | |
1063 | * always free an available slab regardless of age. | |
ea3e6ca9 BB |
1064 | */ |
1065 | static void | |
37db7d8c | 1066 | spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag) |
2fb9b26a | 1067 | { |
1068 | spl_kmem_slab_t *sks, *m; | |
ea3e6ca9 BB |
1069 | spl_kmem_obj_t *sko, *n; |
1070 | LIST_HEAD(sks_list); | |
1071 | LIST_HEAD(sko_list); | |
8b45dda2 BB |
1072 | uint32_t size = 0; |
1073 | int i = 0; | |
b17edc10 | 1074 | SENTRY; |
2fb9b26a | 1075 | |
2fb9b26a | 1076 | /* |
ea3e6ca9 BB |
1077 | * Move empty slabs and objects which have not been touched in |
1078 | * skc_delay seconds on to private lists to be freed outside | |
1a944a7d BB |
1079 | * the spin lock. This delay time is important to avoid thrashing |
1080 | * however when flag is set the delay will not be used. | |
2fb9b26a | 1081 | */ |
ea3e6ca9 | 1082 | spin_lock(&skc->skc_lock); |
1a944a7d BB |
1083 | list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){ |
1084 | /* | |
1085 | * All empty slabs are at the end of skc->skc_partial_list, | |
1086 | * therefore once a non-empty slab is found we can stop | |
1087 | * scanning. Additionally, stop when reaching the target | |
ecc39810 | 1088 | * reclaim 'count' if a non-zero threshold is given. |
1a944a7d BB |
1089 | */ |
1090 | if ((sks->sks_ref > 0) || (count && i > count)) | |
37db7d8c BB |
1091 | break; |
1092 | ||
37db7d8c | 1093 | if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) { |
ea3e6ca9 | 1094 | spl_slab_free(sks, &sks_list, &sko_list); |
37db7d8c BB |
1095 | i++; |
1096 | } | |
ea3e6ca9 BB |
1097 | } |
1098 | spin_unlock(&skc->skc_lock); | |
1099 | ||
1100 | /* | |
1a944a7d BB |
1101 | * The following two loops ensure all the object destructors are |
1102 | * run, any offslab objects are freed, and the slabs themselves | |
1103 | * are freed. This is all done outside the skc->skc_lock since | |
1104 | * this allows the destructor to sleep, and allows us to perform | |
1105 | * a conditional reschedule when a freeing a large number of | |
1106 | * objects and slabs back to the system. | |
ea3e6ca9 | 1107 | */ |
1a944a7d | 1108 | if (skc->skc_flags & KMC_OFFSLAB) |
8b45dda2 | 1109 | size = spl_offslab_size(skc); |
ea3e6ca9 | 1110 | |
1a944a7d BB |
1111 | list_for_each_entry_safe(sko, n, &sko_list, sko_list) { |
1112 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
1113 | ||
1114 | if (skc->skc_dtor) | |
1115 | skc->skc_dtor(sko->sko_addr, skc->skc_private); | |
1116 | ||
1117 | if (skc->skc_flags & KMC_OFFSLAB) | |
ea3e6ca9 | 1118 | kv_free(skc, sko->sko_addr, size); |
1a944a7d BB |
1119 | |
1120 | cond_resched(); | |
2fb9b26a | 1121 | } |
1122 | ||
37db7d8c | 1123 | list_for_each_entry_safe(sks, m, &sks_list, sks_list) { |
1a944a7d | 1124 | ASSERT(sks->sks_magic == SKS_MAGIC); |
ea3e6ca9 | 1125 | kv_free(skc, sks, skc->skc_slab_size); |
37db7d8c BB |
1126 | cond_resched(); |
1127 | } | |
ea3e6ca9 | 1128 | |
b17edc10 | 1129 | SEXIT; |
f1ca4da6 | 1130 | } |
1131 | ||
ea3e6ca9 BB |
1132 | /* |
1133 | * Called regularly on all caches to age objects out of the magazines | |
1134 | * which have not been access in skc->skc_delay seconds. This prevents | |
1135 | * idle magazines from holding memory which might be better used by | |
1136 | * other caches or parts of the system. The delay is present to | |
1137 | * prevent thrashing the magazine. | |
1138 | */ | |
1139 | static void | |
1140 | spl_magazine_age(void *data) | |
f1ca4da6 | 1141 | { |
9b1b8e4c BB |
1142 | spl_kmem_magazine_t *skm = |
1143 | spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work); | |
1144 | spl_kmem_cache_t *skc = skm->skm_cache; | |
1145 | int i = smp_processor_id(); | |
1146 | ||
1147 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1148 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1149 | ASSERT(skc->skc_mag[i] == skm); | |
f1ca4da6 | 1150 | |
ea3e6ca9 BB |
1151 | if (skm->skm_avail > 0 && |
1152 | time_after(jiffies, skm->skm_age + skc->skc_delay * HZ)) | |
1153 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
9b1b8e4c BB |
1154 | |
1155 | if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)) | |
1156 | schedule_delayed_work_on(i, &skm->skm_work, | |
1157 | skc->skc_delay / 3 * HZ); | |
ea3e6ca9 | 1158 | } |
4efd4118 | 1159 | |
ea3e6ca9 BB |
1160 | /* |
1161 | * Called regularly to keep a downward pressure on the size of idle | |
1162 | * magazines and to release free slabs from the cache. This function | |
ecc39810 | 1163 | * never calls the registered reclaim function, that only occurs |
ea3e6ca9 BB |
1164 | * under memory pressure or with a direct call to spl_kmem_reap(). |
1165 | */ | |
1166 | static void | |
1167 | spl_cache_age(void *data) | |
1168 | { | |
9b1b8e4c | 1169 | spl_kmem_cache_t *skc = |
ea3e6ca9 BB |
1170 | spl_get_work_data(data, spl_kmem_cache_t, skc_work.work); |
1171 | ||
1172 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
37db7d8c | 1173 | spl_slab_reclaim(skc, skc->skc_reap, 0); |
ea3e6ca9 BB |
1174 | |
1175 | if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)) | |
37db7d8c | 1176 | schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ); |
2fb9b26a | 1177 | } |
f1ca4da6 | 1178 | |
ea3e6ca9 | 1179 | /* |
8b45dda2 | 1180 | * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. |
ea3e6ca9 BB |
1181 | * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However, |
1182 | * for very small objects we may end up with more than this so as not | |
1183 | * to waste space in the minimal allocation of a single page. Also for | |
1184 | * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN, | |
1185 | * lower than this and we will fail. | |
1186 | */ | |
48e0606a BB |
1187 | static int |
1188 | spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) | |
1189 | { | |
8b45dda2 | 1190 | uint32_t sks_size, obj_size, max_size; |
48e0606a BB |
1191 | |
1192 | if (skc->skc_flags & KMC_OFFSLAB) { | |
ea3e6ca9 | 1193 | *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB; |
48e0606a BB |
1194 | *size = sizeof(spl_kmem_slab_t); |
1195 | } else { | |
8b45dda2 BB |
1196 | sks_size = spl_sks_size(skc); |
1197 | obj_size = spl_obj_size(skc); | |
ea3e6ca9 BB |
1198 | |
1199 | if (skc->skc_flags & KMC_KMEM) | |
aa600d8a | 1200 | max_size = ((uint32_t)1 << (MAX_ORDER-3)) * PAGE_SIZE; |
ea3e6ca9 BB |
1201 | else |
1202 | max_size = (32 * 1024 * 1024); | |
48e0606a | 1203 | |
8b45dda2 BB |
1204 | /* Power of two sized slab */ |
1205 | for (*size = PAGE_SIZE; *size <= max_size; *size *= 2) { | |
ea3e6ca9 BB |
1206 | *objs = (*size - sks_size) / obj_size; |
1207 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB) | |
b17edc10 | 1208 | SRETURN(0); |
ea3e6ca9 | 1209 | } |
48e0606a | 1210 | |
ea3e6ca9 | 1211 | /* |
8b45dda2 | 1212 | * Unable to satisfy target objects per slab, fall back to |
ea3e6ca9 BB |
1213 | * allocating a maximally sized slab and assuming it can |
1214 | * contain the minimum objects count use it. If not fail. | |
1215 | */ | |
1216 | *size = max_size; | |
1217 | *objs = (*size - sks_size) / obj_size; | |
1218 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN) | |
b17edc10 | 1219 | SRETURN(0); |
48e0606a BB |
1220 | } |
1221 | ||
b17edc10 | 1222 | SRETURN(-ENOSPC); |
48e0606a BB |
1223 | } |
1224 | ||
ea3e6ca9 BB |
1225 | /* |
1226 | * Make a guess at reasonable per-cpu magazine size based on the size of | |
1227 | * each object and the cost of caching N of them in each magazine. Long | |
1228 | * term this should really adapt based on an observed usage heuristic. | |
1229 | */ | |
4afaaefa | 1230 | static int |
1231 | spl_magazine_size(spl_kmem_cache_t *skc) | |
1232 | { | |
8b45dda2 BB |
1233 | uint32_t obj_size = spl_obj_size(skc); |
1234 | int size; | |
b17edc10 | 1235 | SENTRY; |
4afaaefa | 1236 | |
ea3e6ca9 | 1237 | /* Per-magazine sizes below assume a 4Kib page size */ |
8b45dda2 | 1238 | if (obj_size > (PAGE_SIZE * 256)) |
ea3e6ca9 | 1239 | size = 4; /* Minimum 4Mib per-magazine */ |
8b45dda2 | 1240 | else if (obj_size > (PAGE_SIZE * 32)) |
ea3e6ca9 | 1241 | size = 16; /* Minimum 2Mib per-magazine */ |
8b45dda2 | 1242 | else if (obj_size > (PAGE_SIZE)) |
ea3e6ca9 | 1243 | size = 64; /* Minimum 256Kib per-magazine */ |
8b45dda2 | 1244 | else if (obj_size > (PAGE_SIZE / 4)) |
ea3e6ca9 | 1245 | size = 128; /* Minimum 128Kib per-magazine */ |
4afaaefa | 1246 | else |
ea3e6ca9 | 1247 | size = 256; |
4afaaefa | 1248 | |
b17edc10 | 1249 | SRETURN(size); |
4afaaefa | 1250 | } |
1251 | ||
ea3e6ca9 | 1252 | /* |
ecc39810 | 1253 | * Allocate a per-cpu magazine to associate with a specific core. |
ea3e6ca9 | 1254 | */ |
4afaaefa | 1255 | static spl_kmem_magazine_t * |
1256 | spl_magazine_alloc(spl_kmem_cache_t *skc, int node) | |
1257 | { | |
1258 | spl_kmem_magazine_t *skm; | |
1259 | int size = sizeof(spl_kmem_magazine_t) + | |
1260 | sizeof(void *) * skc->skc_mag_size; | |
b17edc10 | 1261 | SENTRY; |
4afaaefa | 1262 | |
c89fdee4 | 1263 | skm = kmem_alloc_node(size, KM_SLEEP, node); |
4afaaefa | 1264 | if (skm) { |
1265 | skm->skm_magic = SKM_MAGIC; | |
1266 | skm->skm_avail = 0; | |
1267 | skm->skm_size = skc->skc_mag_size; | |
1268 | skm->skm_refill = skc->skc_mag_refill; | |
9b1b8e4c BB |
1269 | skm->skm_cache = skc; |
1270 | spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm); | |
ea3e6ca9 | 1271 | skm->skm_age = jiffies; |
4afaaefa | 1272 | } |
1273 | ||
b17edc10 | 1274 | SRETURN(skm); |
4afaaefa | 1275 | } |
1276 | ||
ea3e6ca9 | 1277 | /* |
ecc39810 | 1278 | * Free a per-cpu magazine associated with a specific core. |
ea3e6ca9 | 1279 | */ |
4afaaefa | 1280 | static void |
1281 | spl_magazine_free(spl_kmem_magazine_t *skm) | |
1282 | { | |
a0f6da3d | 1283 | int size = sizeof(spl_kmem_magazine_t) + |
1284 | sizeof(void *) * skm->skm_size; | |
1285 | ||
b17edc10 | 1286 | SENTRY; |
4afaaefa | 1287 | ASSERT(skm->skm_magic == SKM_MAGIC); |
1288 | ASSERT(skm->skm_avail == 0); | |
a0f6da3d | 1289 | |
1290 | kmem_free(skm, size); | |
b17edc10 | 1291 | SEXIT; |
4afaaefa | 1292 | } |
1293 | ||
ea3e6ca9 BB |
1294 | /* |
1295 | * Create all pre-cpu magazines of reasonable sizes. | |
1296 | */ | |
4afaaefa | 1297 | static int |
1298 | spl_magazine_create(spl_kmem_cache_t *skc) | |
1299 | { | |
37db7d8c | 1300 | int i; |
b17edc10 | 1301 | SENTRY; |
4afaaefa | 1302 | |
1303 | skc->skc_mag_size = spl_magazine_size(skc); | |
ea3e6ca9 | 1304 | skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; |
4afaaefa | 1305 | |
37db7d8c BB |
1306 | for_each_online_cpu(i) { |
1307 | skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i)); | |
1308 | if (!skc->skc_mag[i]) { | |
1309 | for (i--; i >= 0; i--) | |
1310 | spl_magazine_free(skc->skc_mag[i]); | |
4afaaefa | 1311 | |
b17edc10 | 1312 | SRETURN(-ENOMEM); |
37db7d8c BB |
1313 | } |
1314 | } | |
4afaaefa | 1315 | |
9b1b8e4c BB |
1316 | /* Only after everything is allocated schedule magazine work */ |
1317 | for_each_online_cpu(i) | |
1318 | schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work, | |
1319 | skc->skc_delay / 3 * HZ); | |
1320 | ||
b17edc10 | 1321 | SRETURN(0); |
4afaaefa | 1322 | } |
1323 | ||
ea3e6ca9 BB |
1324 | /* |
1325 | * Destroy all pre-cpu magazines. | |
1326 | */ | |
4afaaefa | 1327 | static void |
1328 | spl_magazine_destroy(spl_kmem_cache_t *skc) | |
1329 | { | |
37db7d8c BB |
1330 | spl_kmem_magazine_t *skm; |
1331 | int i; | |
b17edc10 | 1332 | SENTRY; |
37db7d8c BB |
1333 | |
1334 | for_each_online_cpu(i) { | |
1335 | skm = skc->skc_mag[i]; | |
1336 | (void)spl_cache_flush(skc, skm, skm->skm_avail); | |
1337 | spl_magazine_free(skm); | |
1338 | } | |
1339 | ||
b17edc10 | 1340 | SEXIT; |
4afaaefa | 1341 | } |
1342 | ||
ea3e6ca9 BB |
1343 | /* |
1344 | * Create a object cache based on the following arguments: | |
1345 | * name cache name | |
1346 | * size cache object size | |
1347 | * align cache object alignment | |
1348 | * ctor cache object constructor | |
1349 | * dtor cache object destructor | |
1350 | * reclaim cache object reclaim | |
1351 | * priv cache private data for ctor/dtor/reclaim | |
1352 | * vmp unused must be NULL | |
1353 | * flags | |
1354 | * KMC_NOTOUCH Disable cache object aging (unsupported) | |
1355 | * KMC_NODEBUG Disable debugging (unsupported) | |
1356 | * KMC_NOMAGAZINE Disable magazine (unsupported) | |
1357 | * KMC_NOHASH Disable hashing (unsupported) | |
1358 | * KMC_QCACHE Disable qcache (unsupported) | |
1359 | * KMC_KMEM Force kmem backed cache | |
1360 | * KMC_VMEM Force vmem backed cache | |
1361 | * KMC_OFFSLAB Locate objects off the slab | |
1362 | */ | |
2fb9b26a | 1363 | spl_kmem_cache_t * |
1364 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
1365 | spl_kmem_ctor_t ctor, | |
1366 | spl_kmem_dtor_t dtor, | |
1367 | spl_kmem_reclaim_t reclaim, | |
1368 | void *priv, void *vmp, int flags) | |
1369 | { | |
1370 | spl_kmem_cache_t *skc; | |
a1502d76 | 1371 | int rc, kmem_flags = KM_SLEEP; |
b17edc10 | 1372 | SENTRY; |
937879f1 | 1373 | |
a1502d76 | 1374 | ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags); |
1375 | ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags); | |
1376 | ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags); | |
48e0606a | 1377 | ASSERT(vmp == NULL); |
a1502d76 | 1378 | |
2fb9b26a | 1379 | /* We may be called when there is a non-zero preempt_count or |
1380 | * interrupts are disabled is which case we must not sleep. | |
1381 | */ | |
e9d7a2be | 1382 | if (current_thread_info()->preempt_count || irqs_disabled()) |
2fb9b26a | 1383 | kmem_flags = KM_NOSLEEP; |
0a6fd143 | 1384 | |
ecc39810 | 1385 | /* Allocate memory for a new cache an initialize it. Unfortunately, |
5198ea0e BB |
1386 | * this usually ends up being a large allocation of ~32k because |
1387 | * we need to allocate enough memory for the worst case number of | |
1388 | * cpus in the magazine, skc_mag[NR_CPUS]. Because of this we | |
23d91792 | 1389 | * explicitly pass KM_NODEBUG to suppress the kmem warning */ |
5198ea0e | 1390 | skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), |
23d91792 | 1391 | kmem_flags | KM_NODEBUG); |
e9d7a2be | 1392 | if (skc == NULL) |
b17edc10 | 1393 | SRETURN(NULL); |
d61e12af | 1394 | |
2fb9b26a | 1395 | skc->skc_magic = SKC_MAGIC; |
2fb9b26a | 1396 | skc->skc_name_size = strlen(name) + 1; |
1397 | skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags); | |
1398 | if (skc->skc_name == NULL) { | |
1399 | kmem_free(skc, sizeof(*skc)); | |
b17edc10 | 1400 | SRETURN(NULL); |
2fb9b26a | 1401 | } |
1402 | strncpy(skc->skc_name, name, skc->skc_name_size); | |
1403 | ||
e9d7a2be | 1404 | skc->skc_ctor = ctor; |
1405 | skc->skc_dtor = dtor; | |
1406 | skc->skc_reclaim = reclaim; | |
2fb9b26a | 1407 | skc->skc_private = priv; |
1408 | skc->skc_vmp = vmp; | |
1409 | skc->skc_flags = flags; | |
1410 | skc->skc_obj_size = size; | |
48e0606a | 1411 | skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; |
2fb9b26a | 1412 | skc->skc_delay = SPL_KMEM_CACHE_DELAY; |
37db7d8c | 1413 | skc->skc_reap = SPL_KMEM_CACHE_REAP; |
ea3e6ca9 | 1414 | atomic_set(&skc->skc_ref, 0); |
2fb9b26a | 1415 | |
2fb9b26a | 1416 | INIT_LIST_HEAD(&skc->skc_list); |
1417 | INIT_LIST_HEAD(&skc->skc_complete_list); | |
1418 | INIT_LIST_HEAD(&skc->skc_partial_list); | |
d46630e0 | 1419 | spin_lock_init(&skc->skc_lock); |
e9d7a2be | 1420 | skc->skc_slab_fail = 0; |
1421 | skc->skc_slab_create = 0; | |
1422 | skc->skc_slab_destroy = 0; | |
2fb9b26a | 1423 | skc->skc_slab_total = 0; |
1424 | skc->skc_slab_alloc = 0; | |
1425 | skc->skc_slab_max = 0; | |
1426 | skc->skc_obj_total = 0; | |
1427 | skc->skc_obj_alloc = 0; | |
1428 | skc->skc_obj_max = 0; | |
a1502d76 | 1429 | |
48e0606a | 1430 | if (align) { |
8b45dda2 BB |
1431 | VERIFY(ISP2(align)); |
1432 | VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); /* Min alignment */ | |
1433 | VERIFY3U(align, <=, PAGE_SIZE); /* Max alignment */ | |
48e0606a BB |
1434 | skc->skc_obj_align = align; |
1435 | } | |
1436 | ||
a1502d76 | 1437 | /* If none passed select a cache type based on object size */ |
1438 | if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) { | |
8b45dda2 | 1439 | if (spl_obj_size(skc) < (PAGE_SIZE / 8)) |
a1502d76 | 1440 | skc->skc_flags |= KMC_KMEM; |
8b45dda2 | 1441 | else |
a1502d76 | 1442 | skc->skc_flags |= KMC_VMEM; |
a1502d76 | 1443 | } |
1444 | ||
48e0606a BB |
1445 | rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size); |
1446 | if (rc) | |
b17edc10 | 1447 | SGOTO(out, rc); |
4afaaefa | 1448 | |
1449 | rc = spl_magazine_create(skc); | |
48e0606a | 1450 | if (rc) |
b17edc10 | 1451 | SGOTO(out, rc); |
2fb9b26a | 1452 | |
ea3e6ca9 | 1453 | spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc); |
37db7d8c | 1454 | schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ); |
ea3e6ca9 | 1455 | |
2fb9b26a | 1456 | down_write(&spl_kmem_cache_sem); |
e9d7a2be | 1457 | list_add_tail(&skc->skc_list, &spl_kmem_cache_list); |
2fb9b26a | 1458 | up_write(&spl_kmem_cache_sem); |
1459 | ||
b17edc10 | 1460 | SRETURN(skc); |
48e0606a BB |
1461 | out: |
1462 | kmem_free(skc->skc_name, skc->skc_name_size); | |
1463 | kmem_free(skc, sizeof(*skc)); | |
b17edc10 | 1464 | SRETURN(NULL); |
f1ca4da6 | 1465 | } |
2fb9b26a | 1466 | EXPORT_SYMBOL(spl_kmem_cache_create); |
f1ca4da6 | 1467 | |
2b354302 BB |
1468 | /* |
1469 | * Register a move callback to for cache defragmentation. | |
1470 | * XXX: Unimplemented but harmless to stub out for now. | |
1471 | */ | |
1472 | void | |
1473 | spl_kmem_cache_set_move(kmem_cache_t *skc, | |
1474 | kmem_cbrc_t (move)(void *, void *, size_t, void *)) | |
1475 | { | |
1476 | ASSERT(move != NULL); | |
1477 | } | |
1478 | EXPORT_SYMBOL(spl_kmem_cache_set_move); | |
1479 | ||
ea3e6ca9 | 1480 | /* |
ecc39810 | 1481 | * Destroy a cache and all objects associated with the cache. |
ea3e6ca9 | 1482 | */ |
2fb9b26a | 1483 | void |
1484 | spl_kmem_cache_destroy(spl_kmem_cache_t *skc) | |
f1ca4da6 | 1485 | { |
ea3e6ca9 | 1486 | DECLARE_WAIT_QUEUE_HEAD(wq); |
9b1b8e4c | 1487 | int i; |
b17edc10 | 1488 | SENTRY; |
f1ca4da6 | 1489 | |
e9d7a2be | 1490 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1491 | ||
1492 | down_write(&spl_kmem_cache_sem); | |
1493 | list_del_init(&skc->skc_list); | |
1494 | up_write(&spl_kmem_cache_sem); | |
2fb9b26a | 1495 | |
ea3e6ca9 | 1496 | /* Cancel any and wait for any pending delayed work */ |
64c075c3 GB |
1497 | VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1498 | cancel_delayed_work_sync(&skc->skc_work); | |
9b1b8e4c | 1499 | for_each_online_cpu(i) |
64c075c3 | 1500 | cancel_delayed_work_sync(&skc->skc_mag[i]->skm_work); |
9b1b8e4c | 1501 | |
ea3e6ca9 BB |
1502 | flush_scheduled_work(); |
1503 | ||
1504 | /* Wait until all current callers complete, this is mainly | |
1505 | * to catch the case where a low memory situation triggers a | |
1506 | * cache reaping action which races with this destroy. */ | |
1507 | wait_event(wq, atomic_read(&skc->skc_ref) == 0); | |
1508 | ||
4afaaefa | 1509 | spl_magazine_destroy(skc); |
37db7d8c | 1510 | spl_slab_reclaim(skc, 0, 1); |
d46630e0 | 1511 | spin_lock(&skc->skc_lock); |
d6a26c6a | 1512 | |
2fb9b26a | 1513 | /* Validate there are no objects in use and free all the |
4afaaefa | 1514 | * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ |
ea3e6ca9 BB |
1515 | ASSERT3U(skc->skc_slab_alloc, ==, 0); |
1516 | ASSERT3U(skc->skc_obj_alloc, ==, 0); | |
1517 | ASSERT3U(skc->skc_slab_total, ==, 0); | |
1518 | ASSERT3U(skc->skc_obj_total, ==, 0); | |
2fb9b26a | 1519 | ASSERT(list_empty(&skc->skc_complete_list)); |
a1502d76 | 1520 | |
2fb9b26a | 1521 | kmem_free(skc->skc_name, skc->skc_name_size); |
d46630e0 | 1522 | spin_unlock(&skc->skc_lock); |
ff449ac4 | 1523 | |
4afaaefa | 1524 | kmem_free(skc, sizeof(*skc)); |
2fb9b26a | 1525 | |
b17edc10 | 1526 | SEXIT; |
f1ca4da6 | 1527 | } |
2fb9b26a | 1528 | EXPORT_SYMBOL(spl_kmem_cache_destroy); |
f1ca4da6 | 1529 | |
ea3e6ca9 BB |
1530 | /* |
1531 | * Allocate an object from a slab attached to the cache. This is used to | |
1532 | * repopulate the per-cpu magazine caches in batches when they run low. | |
1533 | */ | |
4afaaefa | 1534 | static void * |
1535 | spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) | |
f1ca4da6 | 1536 | { |
2fb9b26a | 1537 | spl_kmem_obj_t *sko; |
f1ca4da6 | 1538 | |
e9d7a2be | 1539 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1540 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
4afaaefa | 1541 | ASSERT(spin_is_locked(&skc->skc_lock)); |
2fb9b26a | 1542 | |
a1502d76 | 1543 | sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); |
4afaaefa | 1544 | ASSERT(sko->sko_magic == SKO_MAGIC); |
1545 | ASSERT(sko->sko_addr != NULL); | |
2fb9b26a | 1546 | |
a1502d76 | 1547 | /* Remove from sks_free_list */ |
4afaaefa | 1548 | list_del_init(&sko->sko_list); |
2fb9b26a | 1549 | |
4afaaefa | 1550 | sks->sks_age = jiffies; |
1551 | sks->sks_ref++; | |
1552 | skc->skc_obj_alloc++; | |
2fb9b26a | 1553 | |
4afaaefa | 1554 | /* Track max obj usage statistics */ |
1555 | if (skc->skc_obj_alloc > skc->skc_obj_max) | |
1556 | skc->skc_obj_max = skc->skc_obj_alloc; | |
2fb9b26a | 1557 | |
4afaaefa | 1558 | /* Track max slab usage statistics */ |
1559 | if (sks->sks_ref == 1) { | |
1560 | skc->skc_slab_alloc++; | |
f1ca4da6 | 1561 | |
4afaaefa | 1562 | if (skc->skc_slab_alloc > skc->skc_slab_max) |
1563 | skc->skc_slab_max = skc->skc_slab_alloc; | |
2fb9b26a | 1564 | } |
1565 | ||
4afaaefa | 1566 | return sko->sko_addr; |
1567 | } | |
c30df9c8 | 1568 | |
ea3e6ca9 | 1569 | /* |
ecc39810 BB |
1570 | * No available objects on any slabs, create a new slab. Since this |
1571 | * is an expensive operation we do it without holding the spin lock and | |
1572 | * only briefly acquire it when we link in the fully allocated and | |
ea3e6ca9 | 1573 | * constructed slab. |
4afaaefa | 1574 | */ |
1575 | static spl_kmem_slab_t * | |
1576 | spl_cache_grow(spl_kmem_cache_t *skc, int flags) | |
1577 | { | |
e9d7a2be | 1578 | spl_kmem_slab_t *sks; |
b17edc10 | 1579 | SENTRY; |
f1ca4da6 | 1580 | |
e9d7a2be | 1581 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1582 | local_irq_enable(); |
1583 | might_sleep(); | |
e9d7a2be | 1584 | |
ea3e6ca9 BB |
1585 | /* |
1586 | * Before allocating a new slab check if the slab is being reaped. | |
1587 | * If it is there is a good chance we can wait until it finishes | |
1588 | * and then use one of the newly freed but not aged-out slabs. | |
1589 | */ | |
1590 | if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1591 | schedule(); | |
b17edc10 | 1592 | SGOTO(out, sks= NULL); |
4afaaefa | 1593 | } |
2fb9b26a | 1594 | |
ea3e6ca9 | 1595 | /* Allocate a new slab for the cache */ |
23d91792 | 1596 | sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | KM_NODEBUG); |
ea3e6ca9 | 1597 | if (sks == NULL) |
b17edc10 | 1598 | SGOTO(out, sks = NULL); |
4afaaefa | 1599 | |
ea3e6ca9 | 1600 | /* Link the new empty slab in to the end of skc_partial_list. */ |
d46630e0 | 1601 | spin_lock(&skc->skc_lock); |
2fb9b26a | 1602 | skc->skc_slab_total++; |
1603 | skc->skc_obj_total += sks->sks_objs; | |
1604 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
d46630e0 | 1605 | spin_unlock(&skc->skc_lock); |
ea3e6ca9 BB |
1606 | out: |
1607 | local_irq_disable(); | |
4afaaefa | 1608 | |
b17edc10 | 1609 | SRETURN(sks); |
f1ca4da6 | 1610 | } |
1611 | ||
ea3e6ca9 BB |
1612 | /* |
1613 | * Refill a per-cpu magazine with objects from the slabs for this | |
1614 | * cache. Ideally the magazine can be repopulated using existing | |
1615 | * objects which have been released, however if we are unable to | |
1616 | * locate enough free objects new slabs of objects will be created. | |
1617 | */ | |
4afaaefa | 1618 | static int |
1619 | spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) | |
f1ca4da6 | 1620 | { |
e9d7a2be | 1621 | spl_kmem_slab_t *sks; |
1622 | int rc = 0, refill; | |
b17edc10 | 1623 | SENTRY; |
f1ca4da6 | 1624 | |
e9d7a2be | 1625 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1626 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1627 | ||
e9d7a2be | 1628 | refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); |
d46630e0 | 1629 | spin_lock(&skc->skc_lock); |
ff449ac4 | 1630 | |
4afaaefa | 1631 | while (refill > 0) { |
ea3e6ca9 | 1632 | /* No slabs available we may need to grow the cache */ |
4afaaefa | 1633 | if (list_empty(&skc->skc_partial_list)) { |
1634 | spin_unlock(&skc->skc_lock); | |
ff449ac4 | 1635 | |
4afaaefa | 1636 | sks = spl_cache_grow(skc, flags); |
1637 | if (!sks) | |
b17edc10 | 1638 | SGOTO(out, rc); |
4afaaefa | 1639 | |
1640 | /* Rescheduled to different CPU skm is not local */ | |
1641 | if (skm != skc->skc_mag[smp_processor_id()]) | |
b17edc10 | 1642 | SGOTO(out, rc); |
e9d7a2be | 1643 | |
1644 | /* Potentially rescheduled to the same CPU but | |
ecc39810 | 1645 | * allocations may have occurred from this CPU while |
e9d7a2be | 1646 | * we were sleeping so recalculate max refill. */ |
1647 | refill = MIN(refill, skm->skm_size - skm->skm_avail); | |
4afaaefa | 1648 | |
1649 | spin_lock(&skc->skc_lock); | |
1650 | continue; | |
1651 | } | |
d46630e0 | 1652 | |
4afaaefa | 1653 | /* Grab the next available slab */ |
1654 | sks = list_entry((&skc->skc_partial_list)->next, | |
1655 | spl_kmem_slab_t, sks_list); | |
1656 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1657 | ASSERT(sks->sks_ref < sks->sks_objs); | |
1658 | ASSERT(!list_empty(&sks->sks_free_list)); | |
d46630e0 | 1659 | |
4afaaefa | 1660 | /* Consume as many objects as needed to refill the requested |
e9d7a2be | 1661 | * cache. We must also be careful not to overfill it. */ |
1662 | while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) { | |
1663 | ASSERT(skm->skm_avail < skm->skm_size); | |
1664 | ASSERT(rc < skm->skm_size); | |
4afaaefa | 1665 | skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks); |
e9d7a2be | 1666 | } |
f1ca4da6 | 1667 | |
4afaaefa | 1668 | /* Move slab to skc_complete_list when full */ |
1669 | if (sks->sks_ref == sks->sks_objs) { | |
1670 | list_del(&sks->sks_list); | |
1671 | list_add(&sks->sks_list, &skc->skc_complete_list); | |
2fb9b26a | 1672 | } |
1673 | } | |
57d86234 | 1674 | |
4afaaefa | 1675 | spin_unlock(&skc->skc_lock); |
1676 | out: | |
1677 | /* Returns the number of entries added to cache */ | |
b17edc10 | 1678 | SRETURN(rc); |
4afaaefa | 1679 | } |
1680 | ||
ea3e6ca9 BB |
1681 | /* |
1682 | * Release an object back to the slab from which it came. | |
1683 | */ | |
4afaaefa | 1684 | static void |
1685 | spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) | |
1686 | { | |
e9d7a2be | 1687 | spl_kmem_slab_t *sks = NULL; |
4afaaefa | 1688 | spl_kmem_obj_t *sko = NULL; |
b17edc10 | 1689 | SENTRY; |
4afaaefa | 1690 | |
e9d7a2be | 1691 | ASSERT(skc->skc_magic == SKC_MAGIC); |
4afaaefa | 1692 | ASSERT(spin_is_locked(&skc->skc_lock)); |
1693 | ||
8b45dda2 | 1694 | sko = spl_sko_from_obj(skc, obj); |
a1502d76 | 1695 | ASSERT(sko->sko_magic == SKO_MAGIC); |
4afaaefa | 1696 | sks = sko->sko_slab; |
a1502d76 | 1697 | ASSERT(sks->sks_magic == SKS_MAGIC); |
2fb9b26a | 1698 | ASSERT(sks->sks_cache == skc); |
2fb9b26a | 1699 | list_add(&sko->sko_list, &sks->sks_free_list); |
d6a26c6a | 1700 | |
2fb9b26a | 1701 | sks->sks_age = jiffies; |
4afaaefa | 1702 | sks->sks_ref--; |
2fb9b26a | 1703 | skc->skc_obj_alloc--; |
f1ca4da6 | 1704 | |
2fb9b26a | 1705 | /* Move slab to skc_partial_list when no longer full. Slabs |
4afaaefa | 1706 | * are added to the head to keep the partial list is quasi-full |
1707 | * sorted order. Fuller at the head, emptier at the tail. */ | |
1708 | if (sks->sks_ref == (sks->sks_objs - 1)) { | |
2fb9b26a | 1709 | list_del(&sks->sks_list); |
1710 | list_add(&sks->sks_list, &skc->skc_partial_list); | |
1711 | } | |
f1ca4da6 | 1712 | |
ecc39810 | 1713 | /* Move empty slabs to the end of the partial list so |
4afaaefa | 1714 | * they can be easily found and freed during reclamation. */ |
1715 | if (sks->sks_ref == 0) { | |
2fb9b26a | 1716 | list_del(&sks->sks_list); |
1717 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1718 | skc->skc_slab_alloc--; | |
1719 | } | |
1720 | ||
b17edc10 | 1721 | SEXIT; |
4afaaefa | 1722 | } |
1723 | ||
ea3e6ca9 BB |
1724 | /* |
1725 | * Release a batch of objects from a per-cpu magazine back to their | |
1726 | * respective slabs. This occurs when we exceed the magazine size, | |
1727 | * are under memory pressure, when the cache is idle, or during | |
1728 | * cache cleanup. The flush argument contains the number of entries | |
1729 | * to remove from the magazine. | |
1730 | */ | |
4afaaefa | 1731 | static int |
1732 | spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) | |
1733 | { | |
1734 | int i, count = MIN(flush, skm->skm_avail); | |
b17edc10 | 1735 | SENTRY; |
4afaaefa | 1736 | |
e9d7a2be | 1737 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1738 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
4afaaefa | 1739 | |
ea3e6ca9 BB |
1740 | /* |
1741 | * XXX: Currently we simply return objects from the magazine to | |
1742 | * the slabs in fifo order. The ideal thing to do from a memory | |
1743 | * fragmentation standpoint is to cheaply determine the set of | |
1744 | * objects in the magazine which will result in the largest | |
1745 | * number of free slabs if released from the magazine. | |
1746 | */ | |
4afaaefa | 1747 | spin_lock(&skc->skc_lock); |
1748 | for (i = 0; i < count; i++) | |
1749 | spl_cache_shrink(skc, skm->skm_objs[i]); | |
1750 | ||
e9d7a2be | 1751 | skm->skm_avail -= count; |
1752 | memmove(skm->skm_objs, &(skm->skm_objs[count]), | |
4afaaefa | 1753 | sizeof(void *) * skm->skm_avail); |
1754 | ||
d46630e0 | 1755 | spin_unlock(&skc->skc_lock); |
4afaaefa | 1756 | |
b17edc10 | 1757 | SRETURN(count); |
4afaaefa | 1758 | } |
1759 | ||
ea3e6ca9 BB |
1760 | /* |
1761 | * Allocate an object from the per-cpu magazine, or if the magazine | |
1762 | * is empty directly allocate from a slab and repopulate the magazine. | |
1763 | */ | |
4afaaefa | 1764 | void * |
1765 | spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) | |
1766 | { | |
1767 | spl_kmem_magazine_t *skm; | |
1768 | unsigned long irq_flags; | |
1769 | void *obj = NULL; | |
b17edc10 | 1770 | SENTRY; |
4afaaefa | 1771 | |
e9d7a2be | 1772 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1773 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1774 | ASSERT(flags & KM_SLEEP); | |
1775 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1776 | local_irq_save(irq_flags); |
1777 | ||
1778 | restart: | |
1779 | /* Safe to update per-cpu structure without lock, but | |
ecc39810 | 1780 | * in the restart case we must be careful to reacquire |
4afaaefa | 1781 | * the local magazine since this may have changed |
1782 | * when we need to grow the cache. */ | |
1783 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1784 | ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n", |
1785 | skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm, | |
1786 | skm->skm_size, skm->skm_refill, skm->skm_avail); | |
4afaaefa | 1787 | |
1788 | if (likely(skm->skm_avail)) { | |
1789 | /* Object available in CPU cache, use it */ | |
1790 | obj = skm->skm_objs[--skm->skm_avail]; | |
ea3e6ca9 | 1791 | skm->skm_age = jiffies; |
4afaaefa | 1792 | } else { |
1793 | /* Per-CPU cache empty, directly allocate from | |
1794 | * the slab and refill the per-CPU cache. */ | |
1795 | (void)spl_cache_refill(skc, skm, flags); | |
b17edc10 | 1796 | SGOTO(restart, obj = NULL); |
4afaaefa | 1797 | } |
1798 | ||
1799 | local_irq_restore(irq_flags); | |
fece7c99 | 1800 | ASSERT(obj); |
8b45dda2 | 1801 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); |
4afaaefa | 1802 | |
1803 | /* Pre-emptively migrate object to CPU L1 cache */ | |
1804 | prefetchw(obj); | |
ea3e6ca9 | 1805 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1806 | |
b17edc10 | 1807 | SRETURN(obj); |
4afaaefa | 1808 | } |
1809 | EXPORT_SYMBOL(spl_kmem_cache_alloc); | |
1810 | ||
ea3e6ca9 BB |
1811 | /* |
1812 | * Free an object back to the local per-cpu magazine, there is no | |
1813 | * guarantee that this is the same magazine the object was originally | |
1814 | * allocated from. We may need to flush entire from the magazine | |
1815 | * back to the slabs to make space. | |
1816 | */ | |
4afaaefa | 1817 | void |
1818 | spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) | |
1819 | { | |
1820 | spl_kmem_magazine_t *skm; | |
1821 | unsigned long flags; | |
b17edc10 | 1822 | SENTRY; |
4afaaefa | 1823 | |
e9d7a2be | 1824 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1825 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1826 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1827 | local_irq_save(flags); |
1828 | ||
1829 | /* Safe to update per-cpu structure without lock, but | |
1830 | * no remote memory allocation tracking is being performed | |
1831 | * it is entirely possible to allocate an object from one | |
1832 | * CPU cache and return it to another. */ | |
1833 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1834 | ASSERT(skm->skm_magic == SKM_MAGIC); |
4afaaefa | 1835 | |
1836 | /* Per-CPU cache full, flush it to make space */ | |
1837 | if (unlikely(skm->skm_avail >= skm->skm_size)) | |
1838 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
1839 | ||
1840 | /* Available space in cache, use it */ | |
1841 | skm->skm_objs[skm->skm_avail++] = obj; | |
1842 | ||
1843 | local_irq_restore(flags); | |
ea3e6ca9 | 1844 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1845 | |
b17edc10 | 1846 | SEXIT; |
f1ca4da6 | 1847 | } |
2fb9b26a | 1848 | EXPORT_SYMBOL(spl_kmem_cache_free); |
5c2bb9b2 | 1849 | |
ea3e6ca9 | 1850 | /* |
ecc39810 BB |
1851 | * The generic shrinker function for all caches. Under Linux a shrinker |
1852 | * may not be tightly coupled with a slab cache. In fact Linux always | |
1853 | * systematically tries calling all registered shrinker callbacks which | |
ea3e6ca9 BB |
1854 | * report that they contain unused objects. Because of this we only |
1855 | * register one shrinker function in the shim layer for all slab caches. | |
1856 | * We always attempt to shrink all caches when this generic shrinker | |
1857 | * is called. The shrinker should return the number of free objects | |
1858 | * in the cache when called with nr_to_scan == 0 but not attempt to | |
1859 | * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan | |
1860 | * objects should be freed, because Solaris semantics are to free | |
1861 | * all available objects we may free more objects than requested. | |
1862 | */ | |
a55bcaad BB |
1863 | static int |
1864 | __spl_kmem_cache_generic_shrinker(struct shrinker *shrink, | |
1865 | struct shrink_control *sc) | |
2fb9b26a | 1866 | { |
e9d7a2be | 1867 | spl_kmem_cache_t *skc; |
ea3e6ca9 | 1868 | int unused = 0; |
5c2bb9b2 | 1869 | |
e9d7a2be | 1870 | down_read(&spl_kmem_cache_sem); |
ea3e6ca9 | 1871 | list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { |
a55bcaad | 1872 | if (sc->nr_to_scan) |
ea3e6ca9 BB |
1873 | spl_kmem_cache_reap_now(skc); |
1874 | ||
1875 | /* | |
1876 | * Presume everything alloc'ed in reclaimable, this ensures | |
1877 | * we are called again with nr_to_scan > 0 so can try and | |
1878 | * reclaim. The exact number is not important either so | |
1879 | * we forgo taking this already highly contented lock. | |
1880 | */ | |
1881 | unused += skc->skc_obj_alloc; | |
1882 | } | |
e9d7a2be | 1883 | up_read(&spl_kmem_cache_sem); |
2fb9b26a | 1884 | |
ea3e6ca9 | 1885 | return (unused * sysctl_vfs_cache_pressure) / 100; |
5c2bb9b2 | 1886 | } |
5c2bb9b2 | 1887 | |
a55bcaad BB |
1888 | SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker); |
1889 | ||
ea3e6ca9 BB |
1890 | /* |
1891 | * Call the registered reclaim function for a cache. Depending on how | |
1892 | * many and which objects are released it may simply repopulate the | |
1893 | * local magazine which will then need to age-out. Objects which cannot | |
1894 | * fit in the magazine we will be released back to their slabs which will | |
1895 | * also need to age out before being release. This is all just best | |
1896 | * effort and we do not want to thrash creating and destroying slabs. | |
1897 | */ | |
57d86234 | 1898 | void |
2fb9b26a | 1899 | spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) |
57d86234 | 1900 | { |
b17edc10 | 1901 | SENTRY; |
e9d7a2be | 1902 | |
1903 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
ea3e6ca9 | 1904 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
2fb9b26a | 1905 | |
ea3e6ca9 BB |
1906 | /* Prevent concurrent cache reaping when contended */ |
1907 | if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
b17edc10 | 1908 | SEXIT; |
ea3e6ca9 BB |
1909 | return; |
1910 | } | |
2fb9b26a | 1911 | |
ea3e6ca9 | 1912 | atomic_inc(&skc->skc_ref); |
4afaaefa | 1913 | |
ea3e6ca9 BB |
1914 | if (skc->skc_reclaim) |
1915 | skc->skc_reclaim(skc->skc_private); | |
4afaaefa | 1916 | |
37db7d8c | 1917 | spl_slab_reclaim(skc, skc->skc_reap, 0); |
ea3e6ca9 BB |
1918 | clear_bit(KMC_BIT_REAPING, &skc->skc_flags); |
1919 | atomic_dec(&skc->skc_ref); | |
4afaaefa | 1920 | |
b17edc10 | 1921 | SEXIT; |
57d86234 | 1922 | } |
2fb9b26a | 1923 | EXPORT_SYMBOL(spl_kmem_cache_reap_now); |
57d86234 | 1924 | |
ea3e6ca9 BB |
1925 | /* |
1926 | * Reap all free slabs from all registered caches. | |
1927 | */ | |
f1b59d26 | 1928 | void |
2fb9b26a | 1929 | spl_kmem_reap(void) |
937879f1 | 1930 | { |
a55bcaad BB |
1931 | struct shrink_control sc; |
1932 | ||
1933 | sc.nr_to_scan = KMC_REAP_CHUNK; | |
1934 | sc.gfp_mask = GFP_KERNEL; | |
1935 | ||
1936 | __spl_kmem_cache_generic_shrinker(NULL, &sc); | |
f1ca4da6 | 1937 | } |
2fb9b26a | 1938 | EXPORT_SYMBOL(spl_kmem_reap); |
5d86345d | 1939 | |
ff449ac4 | 1940 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
c6dc93d6 | 1941 | static char * |
4afaaefa | 1942 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) |
d6a26c6a | 1943 | { |
e9d7a2be | 1944 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
d6a26c6a | 1945 | int i, flag = 1; |
1946 | ||
1947 | ASSERT(str != NULL && len >= 17); | |
e9d7a2be | 1948 | memset(str, 0, len); |
d6a26c6a | 1949 | |
1950 | /* Check for a fully printable string, and while we are at | |
1951 | * it place the printable characters in the passed buffer. */ | |
1952 | for (i = 0; i < size; i++) { | |
e9d7a2be | 1953 | str[i] = ((char *)(kd->kd_addr))[i]; |
1954 | if (isprint(str[i])) { | |
1955 | continue; | |
1956 | } else { | |
1957 | /* Minimum number of printable characters found | |
1958 | * to make it worthwhile to print this as ascii. */ | |
1959 | if (i > min) | |
1960 | break; | |
1961 | ||
1962 | flag = 0; | |
1963 | break; | |
1964 | } | |
d6a26c6a | 1965 | } |
1966 | ||
1967 | if (!flag) { | |
1968 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
1969 | *((uint8_t *)kd->kd_addr), | |
1970 | *((uint8_t *)kd->kd_addr + 2), | |
1971 | *((uint8_t *)kd->kd_addr + 4), | |
1972 | *((uint8_t *)kd->kd_addr + 6), | |
1973 | *((uint8_t *)kd->kd_addr + 8), | |
1974 | *((uint8_t *)kd->kd_addr + 10), | |
1975 | *((uint8_t *)kd->kd_addr + 12), | |
1976 | *((uint8_t *)kd->kd_addr + 14)); | |
1977 | } | |
1978 | ||
1979 | return str; | |
1980 | } | |
1981 | ||
a1502d76 | 1982 | static int |
1983 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
1984 | { | |
1985 | int i; | |
b17edc10 | 1986 | SENTRY; |
a1502d76 | 1987 | |
1988 | spin_lock_init(lock); | |
1989 | INIT_LIST_HEAD(list); | |
1990 | ||
1991 | for (i = 0; i < size; i++) | |
1992 | INIT_HLIST_HEAD(&kmem_table[i]); | |
1993 | ||
b17edc10 | 1994 | SRETURN(0); |
a1502d76 | 1995 | } |
1996 | ||
ff449ac4 | 1997 | static void |
1998 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
5d86345d | 1999 | { |
2fb9b26a | 2000 | unsigned long flags; |
2001 | kmem_debug_t *kd; | |
2002 | char str[17]; | |
b17edc10 | 2003 | SENTRY; |
2fb9b26a | 2004 | |
ff449ac4 | 2005 | spin_lock_irqsave(lock, flags); |
2006 | if (!list_empty(list)) | |
a0f6da3d | 2007 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", |
2008 | "size", "data", "func", "line"); | |
2fb9b26a | 2009 | |
ff449ac4 | 2010 | list_for_each_entry(kd, list, kd_list) |
a0f6da3d | 2011 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, |
b6b2acc6 | 2012 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
2fb9b26a | 2013 | kd->kd_func, kd->kd_line); |
2014 | ||
ff449ac4 | 2015 | spin_unlock_irqrestore(lock, flags); |
b17edc10 | 2016 | SEXIT; |
ff449ac4 | 2017 | } |
2018 | #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
a1502d76 | 2019 | #define spl_kmem_init_tracking(list, lock, size) |
ff449ac4 | 2020 | #define spl_kmem_fini_tracking(list, lock) |
2021 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
2022 | ||
36b313da BB |
2023 | static void |
2024 | spl_kmem_init_globals(void) | |
2025 | { | |
2026 | struct zone *zone; | |
2027 | ||
2028 | /* For now all zones are includes, it may be wise to restrict | |
2029 | * this to normal and highmem zones if we see problems. */ | |
2030 | for_each_zone(zone) { | |
2031 | ||
2032 | if (!populated_zone(zone)) | |
2033 | continue; | |
2034 | ||
baf2979e BB |
2035 | minfree += min_wmark_pages(zone); |
2036 | desfree += low_wmark_pages(zone); | |
2037 | lotsfree += high_wmark_pages(zone); | |
36b313da | 2038 | } |
4ab13d3b BB |
2039 | |
2040 | /* Solaris default values */ | |
96dded38 BB |
2041 | swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3); |
2042 | swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4); | |
36b313da BB |
2043 | } |
2044 | ||
d1ff2312 BB |
2045 | /* |
2046 | * Called at module init when it is safe to use spl_kallsyms_lookup_name() | |
2047 | */ | |
2048 | int | |
2049 | spl_kmem_init_kallsyms_lookup(void) | |
2050 | { | |
2051 | #ifndef HAVE_GET_VMALLOC_INFO | |
2052 | get_vmalloc_info_fn = (get_vmalloc_info_t) | |
2053 | spl_kallsyms_lookup_name("get_vmalloc_info"); | |
e11d6c5f BB |
2054 | if (!get_vmalloc_info_fn) { |
2055 | printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n"); | |
d1ff2312 | 2056 | return -EFAULT; |
e11d6c5f | 2057 | } |
d1ff2312 BB |
2058 | #endif /* HAVE_GET_VMALLOC_INFO */ |
2059 | ||
5232d256 BB |
2060 | #ifdef HAVE_PGDAT_HELPERS |
2061 | # ifndef HAVE_FIRST_ONLINE_PGDAT | |
d1ff2312 BB |
2062 | first_online_pgdat_fn = (first_online_pgdat_t) |
2063 | spl_kallsyms_lookup_name("first_online_pgdat"); | |
e11d6c5f BB |
2064 | if (!first_online_pgdat_fn) { |
2065 | printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n"); | |
d1ff2312 | 2066 | return -EFAULT; |
e11d6c5f | 2067 | } |
5232d256 | 2068 | # endif /* HAVE_FIRST_ONLINE_PGDAT */ |
d1ff2312 | 2069 | |
5232d256 | 2070 | # ifndef HAVE_NEXT_ONLINE_PGDAT |
d1ff2312 BB |
2071 | next_online_pgdat_fn = (next_online_pgdat_t) |
2072 | spl_kallsyms_lookup_name("next_online_pgdat"); | |
e11d6c5f BB |
2073 | if (!next_online_pgdat_fn) { |
2074 | printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n"); | |
d1ff2312 | 2075 | return -EFAULT; |
e11d6c5f | 2076 | } |
5232d256 | 2077 | # endif /* HAVE_NEXT_ONLINE_PGDAT */ |
d1ff2312 | 2078 | |
5232d256 | 2079 | # ifndef HAVE_NEXT_ZONE |
d1ff2312 BB |
2080 | next_zone_fn = (next_zone_t) |
2081 | spl_kallsyms_lookup_name("next_zone"); | |
e11d6c5f BB |
2082 | if (!next_zone_fn) { |
2083 | printk(KERN_ERR "Error: Unknown symbol next_zone\n"); | |
d1ff2312 | 2084 | return -EFAULT; |
e11d6c5f | 2085 | } |
5232d256 BB |
2086 | # endif /* HAVE_NEXT_ZONE */ |
2087 | ||
2088 | #else /* HAVE_PGDAT_HELPERS */ | |
2089 | ||
2090 | # ifndef HAVE_PGDAT_LIST | |
124ca8a5 | 2091 | pgdat_list_addr = *(struct pglist_data **) |
5232d256 BB |
2092 | spl_kallsyms_lookup_name("pgdat_list"); |
2093 | if (!pgdat_list_addr) { | |
2094 | printk(KERN_ERR "Error: Unknown symbol pgdat_list\n"); | |
2095 | return -EFAULT; | |
2096 | } | |
2097 | # endif /* HAVE_PGDAT_LIST */ | |
2098 | #endif /* HAVE_PGDAT_HELPERS */ | |
d1ff2312 | 2099 | |
6ae7fef5 | 2100 | #if defined(NEED_GET_ZONE_COUNTS) && !defined(HAVE_GET_ZONE_COUNTS) |
d1ff2312 BB |
2101 | get_zone_counts_fn = (get_zone_counts_t) |
2102 | spl_kallsyms_lookup_name("get_zone_counts"); | |
e11d6c5f BB |
2103 | if (!get_zone_counts_fn) { |
2104 | printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n"); | |
d1ff2312 | 2105 | return -EFAULT; |
e11d6c5f | 2106 | } |
6ae7fef5 | 2107 | #endif /* NEED_GET_ZONE_COUNTS && !HAVE_GET_ZONE_COUNTS */ |
d1ff2312 BB |
2108 | |
2109 | /* | |
2110 | * It is now safe to initialize the global tunings which rely on | |
2111 | * the use of the for_each_zone() macro. This macro in turns | |
2112 | * depends on the *_pgdat symbols which are now available. | |
2113 | */ | |
2114 | spl_kmem_init_globals(); | |
2115 | ||
5f6c14b1 | 2116 | #if !defined(HAVE_INVALIDATE_INODES) && !defined(HAVE_INVALIDATE_INODES_CHECK) |
914b0631 | 2117 | invalidate_inodes_fn = (invalidate_inodes_t) |
9b0f9079 | 2118 | spl_kallsyms_lookup_name("invalidate_inodes"); |
914b0631 BB |
2119 | if (!invalidate_inodes_fn) { |
2120 | printk(KERN_ERR "Error: Unknown symbol invalidate_inodes\n"); | |
2121 | return -EFAULT; | |
2122 | } | |
5f6c14b1 | 2123 | #endif /* !HAVE_INVALIDATE_INODES && !HAVE_INVALIDATE_INODES_CHECK */ |
914b0631 | 2124 | |
e76f4bf1 | 2125 | #ifndef HAVE_SHRINK_DCACHE_MEMORY |
fe71c0e5 | 2126 | /* When shrink_dcache_memory_fn == NULL support is disabled */ |
e76f4bf1 | 2127 | shrink_dcache_memory_fn = (shrink_dcache_memory_t) |
fe71c0e5 | 2128 | spl_kallsyms_lookup_name("shrink_dcache_memory"); |
e76f4bf1 BB |
2129 | #endif /* HAVE_SHRINK_DCACHE_MEMORY */ |
2130 | ||
2131 | #ifndef HAVE_SHRINK_ICACHE_MEMORY | |
fe71c0e5 | 2132 | /* When shrink_icache_memory_fn == NULL support is disabled */ |
e76f4bf1 | 2133 | shrink_icache_memory_fn = (shrink_icache_memory_t) |
fe71c0e5 | 2134 | spl_kallsyms_lookup_name("shrink_icache_memory"); |
e76f4bf1 BB |
2135 | #endif /* HAVE_SHRINK_ICACHE_MEMORY */ |
2136 | ||
d1ff2312 BB |
2137 | return 0; |
2138 | } | |
2139 | ||
a1502d76 | 2140 | int |
2141 | spl_kmem_init(void) | |
2142 | { | |
2143 | int rc = 0; | |
b17edc10 | 2144 | SENTRY; |
a1502d76 | 2145 | |
2146 | init_rwsem(&spl_kmem_cache_sem); | |
2147 | INIT_LIST_HEAD(&spl_kmem_cache_list); | |
2148 | ||
495bd532 | 2149 | spl_register_shrinker(&spl_kmem_cache_shrinker); |
a1502d76 | 2150 | |
2151 | #ifdef DEBUG_KMEM | |
d04c8a56 BB |
2152 | kmem_alloc_used_set(0); |
2153 | vmem_alloc_used_set(0); | |
a1502d76 | 2154 | |
2155 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); | |
2156 | spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE); | |
2157 | #endif | |
b17edc10 | 2158 | SRETURN(rc); |
a1502d76 | 2159 | } |
2160 | ||
ff449ac4 | 2161 | void |
2162 | spl_kmem_fini(void) | |
2163 | { | |
2164 | #ifdef DEBUG_KMEM | |
2165 | /* Display all unreclaimed memory addresses, including the | |
2166 | * allocation size and the first few bytes of what's located | |
2167 | * at that address to aid in debugging. Performance is not | |
2168 | * a serious concern here since it is module unload time. */ | |
d04c8a56 | 2169 | if (kmem_alloc_used_read() != 0) |
b17edc10 | 2170 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
3cb77549 BB |
2171 | "kmem leaked %ld/%ld bytes\n", |
2172 | kmem_alloc_used_read(), kmem_alloc_max); | |
ff449ac4 | 2173 | |
2fb9b26a | 2174 | |
d04c8a56 | 2175 | if (vmem_alloc_used_read() != 0) |
b17edc10 | 2176 | SDEBUG_LIMIT(SD_CONSOLE | SD_WARNING, |
3cb77549 BB |
2177 | "vmem leaked %ld/%ld bytes\n", |
2178 | vmem_alloc_used_read(), vmem_alloc_max); | |
2fb9b26a | 2179 | |
ff449ac4 | 2180 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
2181 | spl_kmem_fini_tracking(&vmem_list, &vmem_lock); | |
2182 | #endif /* DEBUG_KMEM */ | |
b17edc10 | 2183 | SENTRY; |
2fb9b26a | 2184 | |
495bd532 | 2185 | spl_unregister_shrinker(&spl_kmem_cache_shrinker); |
2fb9b26a | 2186 | |
b17edc10 | 2187 | SEXIT; |
5d86345d | 2188 | } |