]>
Commit | Line | Data |
---|---|---|
715f6251 | 1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
f4b37741 | 27 | #include <sys/kmem.h> |
f1ca4da6 | 28 | |
937879f1 | 29 | #ifdef DEBUG_SUBSYSTEM |
a0f6da3d | 30 | # undef DEBUG_SUBSYSTEM |
937879f1 | 31 | #endif |
32 | ||
33 | #define DEBUG_SUBSYSTEM S_KMEM | |
34 | ||
f1ca4da6 | 35 | /* |
2fb9b26a | 36 | * Memory allocation interfaces and debugging for basic kmem_* |
37 | * and vmem_* style memory allocation. When DEBUG_KMEM is enable | |
38 | * all allocations will be tracked when they are allocated and | |
39 | * freed. When the SPL module is unload a list of all leaked | |
40 | * addresses and where they were allocated will be dumped to the | |
41 | * console. Enabling this feature has a significant impant on | |
42 | * performance but it makes finding memory leaks staight forward. | |
f1ca4da6 | 43 | */ |
44 | #ifdef DEBUG_KMEM | |
45 | /* Shim layer memory accounting */ | |
550f1705 | 46 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 47 | unsigned long long kmem_alloc_max = 0; |
550f1705 | 48 | atomic64_t vmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 49 | unsigned long long vmem_alloc_max = 0; |
c19c06f3 | 50 | int kmem_warning_flag = 1; |
79b31f36 | 51 | |
ff449ac4 | 52 | EXPORT_SYMBOL(kmem_alloc_used); |
53 | EXPORT_SYMBOL(kmem_alloc_max); | |
54 | EXPORT_SYMBOL(vmem_alloc_used); | |
55 | EXPORT_SYMBOL(vmem_alloc_max); | |
56 | EXPORT_SYMBOL(kmem_warning_flag); | |
57 | ||
a0f6da3d | 58 | # ifdef DEBUG_KMEM_TRACKING |
59 | ||
60 | /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very | |
61 | * highly contended particularly on xfree(). If we want to run with this | |
62 | * detailed debugging enabled for anything other than debugging we need to | |
63 | * minimize the contention by moving to a lock per xmem_table entry model. | |
64 | */ | |
65 | ||
66 | # define KMEM_HASH_BITS 10 | |
67 | # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
68 | ||
69 | # define VMEM_HASH_BITS 10 | |
70 | # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) | |
71 | ||
72 | typedef struct kmem_debug { | |
73 | struct hlist_node kd_hlist; /* Hash node linkage */ | |
74 | struct list_head kd_list; /* List of all allocations */ | |
75 | void *kd_addr; /* Allocation pointer */ | |
76 | size_t kd_size; /* Allocation size */ | |
77 | const char *kd_func; /* Allocation function */ | |
78 | int kd_line; /* Allocation line */ | |
79 | } kmem_debug_t; | |
80 | ||
d6a26c6a | 81 | spinlock_t kmem_lock; |
82 | struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
83 | struct list_head kmem_list; | |
84 | ||
13cdca65 | 85 | spinlock_t vmem_lock; |
86 | struct hlist_head vmem_table[VMEM_TABLE_SIZE]; | |
87 | struct list_head vmem_list; | |
88 | ||
d6a26c6a | 89 | EXPORT_SYMBOL(kmem_lock); |
90 | EXPORT_SYMBOL(kmem_table); | |
91 | EXPORT_SYMBOL(kmem_list); | |
92 | ||
13cdca65 | 93 | EXPORT_SYMBOL(vmem_lock); |
94 | EXPORT_SYMBOL(vmem_table); | |
95 | EXPORT_SYMBOL(vmem_list); | |
a0f6da3d | 96 | # endif |
13cdca65 | 97 | |
c19c06f3 | 98 | int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); } |
99 | #else | |
100 | int kmem_set_warning(int flag) { return 0; } | |
f1ca4da6 | 101 | #endif |
c19c06f3 | 102 | EXPORT_SYMBOL(kmem_set_warning); |
f1ca4da6 | 103 | |
104 | /* | |
105 | * Slab allocation interfaces | |
106 | * | |
2fb9b26a | 107 | * While the Linux slab implementation was inspired by the Solaris |
108 | * implemenation I cannot use it to emulate the Solaris APIs. I | |
109 | * require two features which are not provided by the Linux slab. | |
110 | * | |
111 | * 1) Constructors AND destructors. Recent versions of the Linux | |
112 | * kernel have removed support for destructors. This is a deal | |
113 | * breaker for the SPL which contains particularly expensive | |
114 | * initializers for mutex's, condition variables, etc. We also | |
a0f6da3d | 115 | * require a minimal level of cleanup for these data types unlike |
116 | * many Linux data type which do need to be explicitly destroyed. | |
2fb9b26a | 117 | * |
a0f6da3d | 118 | * 2) Virtual address space backed slab. Callers of the Solaris slab |
2fb9b26a | 119 | * expect it to work well for both small are very large allocations. |
120 | * Because of memory fragmentation the Linux slab which is backed | |
121 | * by kmalloc'ed memory performs very badly when confronted with | |
122 | * large numbers of large allocations. Basing the slab on the | |
123 | * virtual address space removes the need for contigeous pages | |
124 | * and greatly improve performance for large allocations. | |
125 | * | |
126 | * For these reasons, the SPL has its own slab implementation with | |
127 | * the needed features. It is not as highly optimized as either the | |
128 | * Solaris or Linux slabs, but it should get me most of what is | |
129 | * needed until it can be optimized or obsoleted by another approach. | |
130 | * | |
131 | * One serious concern I do have about this method is the relatively | |
132 | * small virtual address space on 32bit arches. This will seriously | |
133 | * constrain the size of the slab caches and their performance. | |
134 | * | |
2fb9b26a | 135 | * XXX: Improve the partial slab list by carefully maintaining a |
136 | * strict ordering of fullest to emptiest slabs based on | |
137 | * the slab reference count. This gaurentees the when freeing | |
138 | * slabs back to the system we need only linearly traverse the | |
139 | * last N slabs in the list to discover all the freeable slabs. | |
140 | * | |
141 | * XXX: NUMA awareness for optionally allocating memory close to a | |
142 | * particular core. This can be adventageous if you know the slab | |
143 | * object will be short lived and primarily accessed from one core. | |
144 | * | |
145 | * XXX: Slab coloring may also yield performance improvements and would | |
146 | * be desirable to implement. | |
f1ca4da6 | 147 | */ |
2fb9b26a | 148 | |
a0f6da3d | 149 | struct list_head spl_kmem_cache_list; /* List of caches */ |
150 | struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ | |
c30df9c8 | 151 | |
4afaaefa | 152 | static int spl_cache_flush(spl_kmem_cache_t *skc, |
a0f6da3d | 153 | spl_kmem_magazine_t *skm, int flush); |
4afaaefa | 154 | |
57d86234 | 155 | #ifdef HAVE_SET_SHRINKER |
2fb9b26a | 156 | static struct shrinker *spl_kmem_cache_shrinker; |
57d86234 | 157 | #else |
4afaaefa | 158 | static int spl_kmem_cache_generic_shrinker(int nr_to_scan, |
a0f6da3d | 159 | unsigned int gfp_mask); |
2fb9b26a | 160 | static struct shrinker spl_kmem_cache_shrinker = { |
4afaaefa | 161 | .shrink = spl_kmem_cache_generic_shrinker, |
57d86234 | 162 | .seeks = KMC_DEFAULT_SEEKS, |
163 | }; | |
164 | #endif | |
f1ca4da6 | 165 | |
a0f6da3d | 166 | #ifdef DEBUG_KMEM |
167 | # ifdef DEBUG_KMEM_TRACKING | |
168 | ||
169 | static kmem_debug_t * | |
170 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, | |
171 | void *addr) | |
172 | { | |
173 | struct hlist_head *head; | |
174 | struct hlist_node *node; | |
175 | struct kmem_debug *p; | |
176 | unsigned long flags; | |
177 | ENTRY; | |
178 | ||
179 | spin_lock_irqsave(lock, flags); | |
180 | ||
181 | head = &table[hash_ptr(addr, bits)]; | |
182 | hlist_for_each_entry_rcu(p, node, head, kd_hlist) { | |
183 | if (p->kd_addr == addr) { | |
184 | hlist_del_init(&p->kd_hlist); | |
185 | list_del_init(&p->kd_list); | |
186 | spin_unlock_irqrestore(lock, flags); | |
187 | return p; | |
188 | } | |
189 | } | |
190 | ||
191 | spin_unlock_irqrestore(lock, flags); | |
192 | ||
193 | RETURN(NULL); | |
194 | } | |
195 | ||
196 | void * | |
197 | kmem_alloc_track(size_t size, int flags, const char *func, int line, | |
198 | int node_alloc, int node) | |
199 | { | |
200 | void *ptr = NULL; | |
201 | kmem_debug_t *dptr; | |
202 | unsigned long irq_flags; | |
203 | ENTRY; | |
204 | ||
205 | dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), | |
206 | flags & ~__GFP_ZERO); | |
207 | ||
208 | if (dptr == NULL) { | |
209 | CWARN("kmem_alloc(%ld, 0x%x) debug failed\n", | |
210 | sizeof(kmem_debug_t), flags); | |
211 | } else { | |
212 | /* Marked unlikely because we should never be doing this, | |
213 | * we tolerate to up 2 pages but a single page is best. */ | |
214 | if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) | |
215 | CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | |
216 | (unsigned long long) size, flags, | |
217 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
218 | ||
c8e60837 | 219 | /* We use kstrdup() below because the string pointed to by |
220 | * __FUNCTION__ might not be available by the time we want | |
221 | * to print it since the module might have been unloaded. */ | |
222 | dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO); | |
223 | if (unlikely(dptr->kd_func == NULL)) { | |
224 | kfree(dptr); | |
225 | CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) " | |
226 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
227 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
228 | goto out; | |
229 | } | |
230 | ||
a0f6da3d | 231 | /* Use the correct allocator */ |
232 | if (node_alloc) { | |
233 | ASSERT(!(flags & __GFP_ZERO)); | |
234 | ptr = kmalloc_node(size, flags, node); | |
235 | } else if (flags & __GFP_ZERO) { | |
236 | ptr = kzalloc(size, flags & ~__GFP_ZERO); | |
237 | } else { | |
238 | ptr = kmalloc(size, flags); | |
239 | } | |
240 | ||
241 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 242 | kfree(dptr->kd_func); |
a0f6da3d | 243 | kfree(dptr); |
244 | CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
245 | (unsigned long long) size, flags, | |
246 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
247 | goto out; | |
248 | } | |
249 | ||
250 | atomic64_add(size, &kmem_alloc_used); | |
251 | if (unlikely(atomic64_read(&kmem_alloc_used) > | |
252 | kmem_alloc_max)) | |
253 | kmem_alloc_max = | |
254 | atomic64_read(&kmem_alloc_used); | |
255 | ||
256 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
257 | INIT_LIST_HEAD(&dptr->kd_list); | |
258 | ||
259 | dptr->kd_addr = ptr; | |
260 | dptr->kd_size = size; | |
a0f6da3d | 261 | dptr->kd_line = line; |
262 | ||
263 | spin_lock_irqsave(&kmem_lock, irq_flags); | |
264 | hlist_add_head_rcu(&dptr->kd_hlist, | |
265 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
266 | list_add_tail(&dptr->kd_list, &kmem_list); | |
267 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
268 | ||
269 | CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p " | |
270 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
271 | ptr, atomic64_read(&kmem_alloc_used), | |
272 | kmem_alloc_max); | |
273 | } | |
274 | out: | |
275 | RETURN(ptr); | |
276 | } | |
277 | EXPORT_SYMBOL(kmem_alloc_track); | |
278 | ||
279 | void | |
280 | kmem_free_track(void *ptr, size_t size) | |
281 | { | |
282 | kmem_debug_t *dptr; | |
283 | ENTRY; | |
284 | ||
285 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
286 | (unsigned long long) size); | |
287 | ||
288 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); | |
289 | ||
290 | ASSERT(dptr); /* Must exist in hash due to kmem_alloc() */ | |
291 | ||
292 | /* Size must match */ | |
293 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
294 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
295 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
296 | ||
297 | atomic64_sub(size, &kmem_alloc_used); | |
298 | ||
299 | CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
300 | (unsigned long long) size, atomic64_read(&kmem_alloc_used), | |
301 | kmem_alloc_max); | |
302 | ||
c8e60837 | 303 | kfree(dptr->kd_func); |
304 | ||
a0f6da3d | 305 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
306 | kfree(dptr); | |
307 | ||
308 | memset(ptr, 0x5a, size); | |
309 | kfree(ptr); | |
310 | ||
311 | EXIT; | |
312 | } | |
313 | EXPORT_SYMBOL(kmem_free_track); | |
314 | ||
315 | void * | |
316 | vmem_alloc_track(size_t size, int flags, const char *func, int line) | |
317 | { | |
318 | void *ptr = NULL; | |
319 | kmem_debug_t *dptr; | |
320 | unsigned long irq_flags; | |
321 | ENTRY; | |
322 | ||
323 | ASSERT(flags & KM_SLEEP); | |
324 | ||
325 | dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags); | |
326 | if (dptr == NULL) { | |
327 | CWARN("vmem_alloc(%ld, 0x%x) debug failed\n", | |
328 | sizeof(kmem_debug_t), flags); | |
329 | } else { | |
c8e60837 | 330 | /* We use kstrdup() below because the string pointed to by |
331 | * __FUNCTION__ might not be available by the time we want | |
332 | * to print it, since the module might have been unloaded. */ | |
333 | dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO); | |
334 | if (unlikely(dptr->kd_func == NULL)) { | |
335 | kfree(dptr); | |
336 | CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) " | |
337 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
338 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
339 | goto out; | |
340 | } | |
341 | ||
a0f6da3d | 342 | ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO, |
343 | PAGE_KERNEL); | |
344 | ||
345 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 346 | kfree(dptr->kd_func); |
a0f6da3d | 347 | kfree(dptr); |
348 | CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
349 | (unsigned long long) size, flags, | |
350 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
351 | goto out; | |
352 | } | |
353 | ||
354 | if (flags & __GFP_ZERO) | |
355 | memset(ptr, 0, size); | |
356 | ||
357 | atomic64_add(size, &vmem_alloc_used); | |
358 | if (unlikely(atomic64_read(&vmem_alloc_used) > | |
359 | vmem_alloc_max)) | |
360 | vmem_alloc_max = | |
361 | atomic64_read(&vmem_alloc_used); | |
362 | ||
363 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
364 | INIT_LIST_HEAD(&dptr->kd_list); | |
365 | ||
366 | dptr->kd_addr = ptr; | |
367 | dptr->kd_size = size; | |
a0f6da3d | 368 | dptr->kd_line = line; |
369 | ||
370 | spin_lock_irqsave(&vmem_lock, irq_flags); | |
371 | hlist_add_head_rcu(&dptr->kd_hlist, | |
372 | &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]); | |
373 | list_add_tail(&dptr->kd_list, &vmem_list); | |
374 | spin_unlock_irqrestore(&vmem_lock, irq_flags); | |
375 | ||
376 | CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p " | |
377 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
378 | ptr, atomic64_read(&vmem_alloc_used), | |
379 | vmem_alloc_max); | |
380 | } | |
381 | out: | |
382 | RETURN(ptr); | |
383 | } | |
384 | EXPORT_SYMBOL(vmem_alloc_track); | |
385 | ||
386 | void | |
387 | vmem_free_track(void *ptr, size_t size) | |
388 | { | |
389 | kmem_debug_t *dptr; | |
390 | ENTRY; | |
391 | ||
392 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
393 | (unsigned long long) size); | |
394 | ||
395 | dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr); | |
396 | ASSERT(dptr); /* Must exist in hash due to vmem_alloc() */ | |
397 | ||
398 | /* Size must match */ | |
399 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
400 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
401 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
402 | ||
403 | atomic64_sub(size, &vmem_alloc_used); | |
404 | CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
405 | (unsigned long long) size, atomic64_read(&vmem_alloc_used), | |
406 | vmem_alloc_max); | |
407 | ||
c8e60837 | 408 | kfree(dptr->kd_func); |
409 | ||
a0f6da3d | 410 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
411 | kfree(dptr); | |
412 | ||
413 | memset(ptr, 0x5a, size); | |
414 | vfree(ptr); | |
415 | ||
416 | EXIT; | |
417 | } | |
418 | EXPORT_SYMBOL(vmem_free_track); | |
419 | ||
420 | # else /* DEBUG_KMEM_TRACKING */ | |
421 | ||
422 | void * | |
423 | kmem_alloc_debug(size_t size, int flags, const char *func, int line, | |
424 | int node_alloc, int node) | |
425 | { | |
426 | void *ptr; | |
427 | ENTRY; | |
428 | ||
429 | /* Marked unlikely because we should never be doing this, | |
430 | * we tolerate to up 2 pages but a single page is best. */ | |
431 | if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag) | |
432 | CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | |
433 | (unsigned long long) size, flags, | |
434 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
435 | ||
436 | /* Use the correct allocator */ | |
437 | if (node_alloc) { | |
438 | ASSERT(!(flags & __GFP_ZERO)); | |
439 | ptr = kmalloc_node(size, flags, node); | |
440 | } else if (flags & __GFP_ZERO) { | |
441 | ptr = kzalloc(size, flags & (~__GFP_ZERO)); | |
442 | } else { | |
443 | ptr = kmalloc(size, flags); | |
444 | } | |
445 | ||
446 | if (ptr == NULL) { | |
447 | CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
448 | (unsigned long long) size, flags, | |
449 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
450 | } else { | |
451 | atomic64_add(size, &kmem_alloc_used); | |
452 | if (unlikely(atomic64_read(&kmem_alloc_used) > kmem_alloc_max)) | |
453 | kmem_alloc_max = atomic64_read(&kmem_alloc_used); | |
454 | ||
455 | CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p " | |
456 | "(%lld/%llu)\n", (unsigned long long) size, flags, ptr, | |
457 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
458 | } | |
459 | RETURN(ptr); | |
460 | } | |
461 | EXPORT_SYMBOL(kmem_alloc_debug); | |
462 | ||
463 | void | |
464 | kmem_free_debug(void *ptr, size_t size) | |
465 | { | |
466 | ENTRY; | |
467 | ||
468 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
469 | (unsigned long long) size); | |
470 | ||
471 | atomic64_sub(size, &kmem_alloc_used); | |
472 | ||
473 | CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
474 | (unsigned long long) size, atomic64_read(&kmem_alloc_used), | |
475 | kmem_alloc_max); | |
476 | ||
477 | memset(ptr, 0x5a, size); | |
478 | kfree(ptr); | |
479 | ||
480 | EXIT; | |
481 | } | |
482 | EXPORT_SYMBOL(kmem_free_debug); | |
483 | ||
484 | void * | |
485 | vmem_alloc_debug(size_t size, int flags, const char *func, int line) | |
486 | { | |
487 | void *ptr; | |
488 | ENTRY; | |
489 | ||
490 | ASSERT(flags & KM_SLEEP); | |
491 | ||
492 | ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO, | |
493 | PAGE_KERNEL); | |
494 | if (ptr == NULL) { | |
495 | CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
496 | (unsigned long long) size, flags, | |
497 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
498 | } else { | |
499 | if (flags & __GFP_ZERO) | |
500 | memset(ptr, 0, size); | |
501 | ||
502 | atomic64_add(size, &vmem_alloc_used); | |
503 | ||
504 | if (unlikely(atomic64_read(&vmem_alloc_used) > vmem_alloc_max)) | |
505 | vmem_alloc_max = atomic64_read(&vmem_alloc_used); | |
506 | ||
507 | CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p " | |
508 | "(%lld/%llu)\n", (unsigned long long) size, flags, ptr, | |
509 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
510 | } | |
511 | ||
512 | RETURN(ptr); | |
513 | } | |
514 | EXPORT_SYMBOL(vmem_alloc_debug); | |
515 | ||
516 | void | |
517 | vmem_free_debug(void *ptr, size_t size) | |
518 | { | |
519 | ENTRY; | |
520 | ||
521 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
522 | (unsigned long long) size); | |
523 | ||
524 | atomic64_sub(size, &vmem_alloc_used); | |
525 | ||
526 | CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
527 | (unsigned long long) size, atomic64_read(&vmem_alloc_used), | |
528 | vmem_alloc_max); | |
529 | ||
530 | memset(ptr, 0x5a, size); | |
531 | vfree(ptr); | |
532 | ||
533 | EXIT; | |
534 | } | |
535 | EXPORT_SYMBOL(vmem_free_debug); | |
536 | ||
537 | # endif /* DEBUG_KMEM_TRACKING */ | |
538 | #endif /* DEBUG_KMEM */ | |
539 | ||
a1502d76 | 540 | static void * |
541 | kv_alloc(spl_kmem_cache_t *skc, int size, int flags) | |
fece7c99 | 542 | { |
a1502d76 | 543 | void *ptr; |
f1ca4da6 | 544 | |
a1502d76 | 545 | if (skc->skc_flags & KMC_KMEM) { |
546 | if (size > (2 * PAGE_SIZE)) { | |
547 | ptr = (void *)__get_free_pages(flags, get_order(size)); | |
548 | } else | |
549 | ptr = kmem_alloc(size, flags); | |
550 | } else { | |
551 | ptr = vmem_alloc(size, flags); | |
d6a26c6a | 552 | } |
fece7c99 | 553 | |
a1502d76 | 554 | return ptr; |
555 | } | |
fece7c99 | 556 | |
a1502d76 | 557 | static void |
558 | kv_free(spl_kmem_cache_t *skc, void *ptr, int size) | |
559 | { | |
560 | if (skc->skc_flags & KMC_KMEM) { | |
561 | if (size > (2 * PAGE_SIZE)) | |
562 | free_pages((unsigned long)ptr, get_order(size)); | |
563 | else | |
564 | kmem_free(ptr, size); | |
565 | } else { | |
566 | vmem_free(ptr, size); | |
567 | } | |
fece7c99 | 568 | } |
569 | ||
ea3e6ca9 BB |
570 | /* |
571 | * It's important that we pack the spl_kmem_obj_t structure and the | |
48e0606a BB |
572 | * actual objects in to one large address space to minimize the number |
573 | * of calls to the allocator. It is far better to do a few large | |
574 | * allocations and then subdivide it ourselves. Now which allocator | |
575 | * we use requires balancing a few trade offs. | |
576 | * | |
577 | * For small objects we use kmem_alloc() because as long as you are | |
578 | * only requesting a small number of pages (ideally just one) its cheap. | |
579 | * However, when you start requesting multiple pages with kmem_alloc() | |
580 | * it gets increasingly expensive since it requires contigeous pages. | |
581 | * For this reason we shift to vmem_alloc() for slabs of large objects | |
582 | * which removes the need for contigeous pages. We do not use | |
583 | * vmem_alloc() in all cases because there is significant locking | |
584 | * overhead in __get_vm_area_node(). This function takes a single | |
585 | * global lock when aquiring an available virtual address range which | |
586 | * serializes all vmem_alloc()'s for all slab caches. Using slightly | |
587 | * different allocation functions for small and large objects should | |
588 | * give us the best of both worlds. | |
589 | * | |
590 | * KMC_ONSLAB KMC_OFFSLAB | |
591 | * | |
592 | * +------------------------+ +-----------------+ | |
593 | * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+ | |
594 | * | skc_obj_size <-+ | | +-----------------+ | | | |
595 | * | spl_kmem_obj_t | | | | | |
596 | * | skc_obj_size <---+ | +-----------------+ | | | |
597 | * | spl_kmem_obj_t | | | skc_obj_size | <-+ | | |
598 | * | ... v | | spl_kmem_obj_t | | | |
599 | * +------------------------+ +-----------------+ v | |
600 | */ | |
fece7c99 | 601 | static spl_kmem_slab_t * |
a1502d76 | 602 | spl_slab_alloc(spl_kmem_cache_t *skc, int flags) |
fece7c99 | 603 | { |
604 | spl_kmem_slab_t *sks; | |
a1502d76 | 605 | spl_kmem_obj_t *sko, *n; |
606 | void *base, *obj; | |
48e0606a BB |
607 | int i, align, size, rc = 0; |
608 | ||
a1502d76 | 609 | base = kv_alloc(skc, skc->skc_slab_size, flags); |
610 | if (base == NULL) | |
fece7c99 | 611 | RETURN(NULL); |
612 | ||
a1502d76 | 613 | sks = (spl_kmem_slab_t *)base; |
614 | sks->sks_magic = SKS_MAGIC; | |
615 | sks->sks_objs = skc->skc_slab_objs; | |
616 | sks->sks_age = jiffies; | |
617 | sks->sks_cache = skc; | |
618 | INIT_LIST_HEAD(&sks->sks_list); | |
619 | INIT_LIST_HEAD(&sks->sks_free_list); | |
620 | sks->sks_ref = 0; | |
48e0606a BB |
621 | |
622 | align = skc->skc_obj_align; | |
623 | size = P2ROUNDUP(skc->skc_obj_size, align) + | |
624 | P2ROUNDUP(sizeof(spl_kmem_obj_t), align); | |
fece7c99 | 625 | |
626 | for (i = 0; i < sks->sks_objs; i++) { | |
a1502d76 | 627 | if (skc->skc_flags & KMC_OFFSLAB) { |
628 | obj = kv_alloc(skc, size, flags); | |
629 | if (!obj) | |
630 | GOTO(out, rc = -ENOMEM); | |
631 | } else { | |
48e0606a BB |
632 | obj = base + |
633 | P2ROUNDUP(sizeof(spl_kmem_slab_t), align) + | |
634 | (i * size); | |
a1502d76 | 635 | } |
636 | ||
48e0606a | 637 | sko = obj + P2ROUNDUP(skc->skc_obj_size, align); |
fece7c99 | 638 | sko->sko_addr = obj; |
639 | sko->sko_magic = SKO_MAGIC; | |
640 | sko->sko_slab = sks; | |
641 | INIT_LIST_HEAD(&sko->sko_list); | |
fece7c99 | 642 | list_add_tail(&sko->sko_list, &sks->sks_free_list); |
643 | } | |
644 | ||
fece7c99 | 645 | list_for_each_entry(sko, &sks->sks_free_list, sko_list) |
646 | if (skc->skc_ctor) | |
647 | skc->skc_ctor(sko->sko_addr, skc->skc_private, flags); | |
2fb9b26a | 648 | out: |
a1502d76 | 649 | if (rc) { |
650 | if (skc->skc_flags & KMC_OFFSLAB) | |
48e0606a BB |
651 | list_for_each_entry_safe(sko, n, &sks->sks_free_list, |
652 | sko_list) | |
a1502d76 | 653 | kv_free(skc, sko->sko_addr, size); |
fece7c99 | 654 | |
a1502d76 | 655 | kv_free(skc, base, skc->skc_slab_size); |
656 | sks = NULL; | |
fece7c99 | 657 | } |
658 | ||
a1502d76 | 659 | RETURN(sks); |
fece7c99 | 660 | } |
661 | ||
ea3e6ca9 BB |
662 | /* |
663 | * Remove a slab from complete or partial list, it must be called with | |
664 | * the 'skc->skc_lock' held but the actual free must be performed | |
665 | * outside the lock to prevent deadlocking on vmem addresses. | |
fece7c99 | 666 | */ |
f1ca4da6 | 667 | static void |
ea3e6ca9 BB |
668 | spl_slab_free(spl_kmem_slab_t *sks, |
669 | struct list_head *sks_list, struct list_head *sko_list) | |
670 | { | |
2fb9b26a | 671 | spl_kmem_cache_t *skc; |
672 | spl_kmem_obj_t *sko, *n; | |
2fb9b26a | 673 | ENTRY; |
57d86234 | 674 | |
2fb9b26a | 675 | ASSERT(sks->sks_magic == SKS_MAGIC); |
4afaaefa | 676 | ASSERT(sks->sks_ref == 0); |
d6a26c6a | 677 | |
fece7c99 | 678 | skc = sks->sks_cache; |
679 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
d46630e0 | 680 | ASSERT(spin_is_locked(&skc->skc_lock)); |
f1ca4da6 | 681 | |
fece7c99 | 682 | skc->skc_obj_total -= sks->sks_objs; |
683 | skc->skc_slab_total--; | |
684 | list_del(&sks->sks_list); | |
937879f1 | 685 | |
fece7c99 | 686 | /* Run destructors slab is being released */ |
a1502d76 | 687 | list_for_each_entry_safe(sko, n, &sks->sks_free_list, sko_list) { |
688 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
ea3e6ca9 | 689 | list_del(&sko->sko_list); |
a1502d76 | 690 | |
2fb9b26a | 691 | if (skc->skc_dtor) |
692 | skc->skc_dtor(sko->sko_addr, skc->skc_private); | |
0a6fd143 | 693 | |
a1502d76 | 694 | if (skc->skc_flags & KMC_OFFSLAB) |
ea3e6ca9 | 695 | list_add(&sko->sko_list, sko_list); |
a1502d76 | 696 | } |
d61e12af | 697 | |
ea3e6ca9 | 698 | list_add(&sks->sks_list, sks_list); |
2fb9b26a | 699 | EXIT; |
700 | } | |
d6a26c6a | 701 | |
ea3e6ca9 BB |
702 | /* |
703 | * Traverses all the partial slabs attached to a cache and free those | |
704 | * which which are currently empty, and have not been touched for | |
705 | * skc_delay seconds. This is to avoid thrashing. | |
706 | */ | |
707 | static void | |
708 | spl_slab_reclaim(spl_kmem_cache_t *skc, int flag) | |
2fb9b26a | 709 | { |
710 | spl_kmem_slab_t *sks, *m; | |
ea3e6ca9 BB |
711 | spl_kmem_obj_t *sko, *n; |
712 | LIST_HEAD(sks_list); | |
713 | LIST_HEAD(sko_list); | |
714 | int size; | |
2fb9b26a | 715 | ENTRY; |
716 | ||
2fb9b26a | 717 | /* |
ea3e6ca9 BB |
718 | * Move empty slabs and objects which have not been touched in |
719 | * skc_delay seconds on to private lists to be freed outside | |
720 | * the spin lock. This delay time is important to avoid | |
721 | * thrashing however when flag is set the delay will not be | |
722 | * used. Empty slabs will be at the end of the skc_partial_list. | |
2fb9b26a | 723 | */ |
ea3e6ca9 | 724 | spin_lock(&skc->skc_lock); |
2fb9b26a | 725 | list_for_each_entry_safe_reverse(sks, m, &skc->skc_partial_list, |
726 | sks_list) { | |
4afaaefa | 727 | if (sks->sks_ref > 0) |
2fb9b26a | 728 | break; |
729 | ||
ea3e6ca9 BB |
730 | if (flag || time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)) |
731 | spl_slab_free(sks, &sks_list, &sko_list); | |
732 | } | |
733 | spin_unlock(&skc->skc_lock); | |
734 | ||
735 | /* | |
736 | * We only have list of spl_kmem_obj_t's if they are located off | |
737 | * the slab, otherwise they get feed with the spl_kmem_slab_t. | |
738 | */ | |
739 | if (!list_empty(&sko_list)) { | |
740 | ASSERT(skc->skc_flags & KMC_OFFSLAB); | |
741 | ||
742 | size = P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) + | |
743 | P2ROUNDUP(sizeof(spl_kmem_obj_t), skc->skc_obj_align); | |
744 | ||
745 | list_for_each_entry_safe(sko, n, &sko_list, sko_list) | |
746 | kv_free(skc, sko->sko_addr, size); | |
2fb9b26a | 747 | } |
748 | ||
ea3e6ca9 BB |
749 | list_for_each_entry_safe(sks, m, &sks_list, sks_list) |
750 | kv_free(skc, sks, skc->skc_slab_size); | |
751 | ||
752 | EXIT; | |
f1ca4da6 | 753 | } |
754 | ||
ea3e6ca9 BB |
755 | /* |
756 | * Called regularly on all caches to age objects out of the magazines | |
757 | * which have not been access in skc->skc_delay seconds. This prevents | |
758 | * idle magazines from holding memory which might be better used by | |
759 | * other caches or parts of the system. The delay is present to | |
760 | * prevent thrashing the magazine. | |
761 | */ | |
762 | static void | |
763 | spl_magazine_age(void *data) | |
f1ca4da6 | 764 | { |
ea3e6ca9 BB |
765 | spl_kmem_cache_t *skc = data; |
766 | spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; | |
f1ca4da6 | 767 | |
ea3e6ca9 BB |
768 | if (skm->skm_avail > 0 && |
769 | time_after(jiffies, skm->skm_age + skc->skc_delay * HZ)) | |
770 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
771 | } | |
4efd4118 | 772 | |
ea3e6ca9 BB |
773 | /* |
774 | * Called regularly to keep a downward pressure on the size of idle | |
775 | * magazines and to release free slabs from the cache. This function | |
776 | * never calls the registered reclaim function, that only occures | |
777 | * under memory pressure or with a direct call to spl_kmem_reap(). | |
778 | */ | |
779 | static void | |
780 | spl_cache_age(void *data) | |
781 | { | |
782 | spl_kmem_cache_t *skc = | |
783 | spl_get_work_data(data, spl_kmem_cache_t, skc_work.work); | |
784 | ||
785 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
31a033ec | 786 | spl_on_each_cpu(spl_magazine_age, skc, 1); |
ea3e6ca9 BB |
787 | spl_slab_reclaim(skc, 0); |
788 | ||
789 | if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)) | |
790 | schedule_delayed_work(&skc->skc_work, 2 * skc->skc_delay * HZ); | |
2fb9b26a | 791 | } |
f1ca4da6 | 792 | |
ea3e6ca9 BB |
793 | /* |
794 | * Size a slab based on the size of each aliged object plus spl_kmem_obj_t. | |
795 | * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However, | |
796 | * for very small objects we may end up with more than this so as not | |
797 | * to waste space in the minimal allocation of a single page. Also for | |
798 | * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN, | |
799 | * lower than this and we will fail. | |
800 | */ | |
48e0606a BB |
801 | static int |
802 | spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) | |
803 | { | |
ea3e6ca9 | 804 | int sks_size, obj_size, max_size, align; |
48e0606a BB |
805 | |
806 | if (skc->skc_flags & KMC_OFFSLAB) { | |
ea3e6ca9 | 807 | *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB; |
48e0606a BB |
808 | *size = sizeof(spl_kmem_slab_t); |
809 | } else { | |
ea3e6ca9 BB |
810 | align = skc->skc_obj_align; |
811 | sks_size = P2ROUNDUP(sizeof(spl_kmem_slab_t), align); | |
812 | obj_size = P2ROUNDUP(skc->skc_obj_size, align) + | |
813 | P2ROUNDUP(sizeof(spl_kmem_obj_t), align); | |
814 | ||
815 | if (skc->skc_flags & KMC_KMEM) | |
816 | max_size = ((uint64_t)1 << (MAX_ORDER-1)) * PAGE_SIZE; | |
817 | else | |
818 | max_size = (32 * 1024 * 1024); | |
48e0606a | 819 | |
ea3e6ca9 BB |
820 | for (*size = PAGE_SIZE; *size <= max_size; *size += PAGE_SIZE) { |
821 | *objs = (*size - sks_size) / obj_size; | |
822 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB) | |
823 | RETURN(0); | |
824 | } | |
48e0606a | 825 | |
ea3e6ca9 BB |
826 | /* |
827 | * Unable to satisfy target objets per slab, fallback to | |
828 | * allocating a maximally sized slab and assuming it can | |
829 | * contain the minimum objects count use it. If not fail. | |
830 | */ | |
831 | *size = max_size; | |
832 | *objs = (*size - sks_size) / obj_size; | |
833 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN) | |
834 | RETURN(0); | |
48e0606a BB |
835 | } |
836 | ||
ea3e6ca9 | 837 | RETURN(-ENOSPC); |
48e0606a BB |
838 | } |
839 | ||
ea3e6ca9 BB |
840 | /* |
841 | * Make a guess at reasonable per-cpu magazine size based on the size of | |
842 | * each object and the cost of caching N of them in each magazine. Long | |
843 | * term this should really adapt based on an observed usage heuristic. | |
844 | */ | |
4afaaefa | 845 | static int |
846 | spl_magazine_size(spl_kmem_cache_t *skc) | |
847 | { | |
48e0606a | 848 | int size, align = skc->skc_obj_align; |
4afaaefa | 849 | ENTRY; |
850 | ||
ea3e6ca9 | 851 | /* Per-magazine sizes below assume a 4Kib page size */ |
48e0606a | 852 | if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 256)) |
ea3e6ca9 | 853 | size = 4; /* Minimum 4Mib per-magazine */ |
48e0606a | 854 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 32)) |
ea3e6ca9 | 855 | size = 16; /* Minimum 2Mib per-magazine */ |
48e0606a | 856 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE)) |
ea3e6ca9 | 857 | size = 64; /* Minimum 256Kib per-magazine */ |
48e0606a | 858 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE / 4)) |
ea3e6ca9 | 859 | size = 128; /* Minimum 128Kib per-magazine */ |
4afaaefa | 860 | else |
ea3e6ca9 | 861 | size = 256; |
4afaaefa | 862 | |
863 | RETURN(size); | |
864 | } | |
865 | ||
ea3e6ca9 BB |
866 | /* |
867 | * Allocate a per-cpu magazine to assoicate with a specific core. | |
868 | */ | |
4afaaefa | 869 | static spl_kmem_magazine_t * |
870 | spl_magazine_alloc(spl_kmem_cache_t *skc, int node) | |
871 | { | |
872 | spl_kmem_magazine_t *skm; | |
873 | int size = sizeof(spl_kmem_magazine_t) + | |
874 | sizeof(void *) * skc->skc_mag_size; | |
875 | ENTRY; | |
876 | ||
ea3e6ca9 | 877 | skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node); |
4afaaefa | 878 | if (skm) { |
879 | skm->skm_magic = SKM_MAGIC; | |
880 | skm->skm_avail = 0; | |
881 | skm->skm_size = skc->skc_mag_size; | |
882 | skm->skm_refill = skc->skc_mag_refill; | |
ea3e6ca9 | 883 | skm->skm_age = jiffies; |
4afaaefa | 884 | } |
885 | ||
886 | RETURN(skm); | |
887 | } | |
888 | ||
ea3e6ca9 BB |
889 | /* |
890 | * Free a per-cpu magazine assoicated with a specific core. | |
891 | */ | |
4afaaefa | 892 | static void |
893 | spl_magazine_free(spl_kmem_magazine_t *skm) | |
894 | { | |
a0f6da3d | 895 | int size = sizeof(spl_kmem_magazine_t) + |
896 | sizeof(void *) * skm->skm_size; | |
897 | ||
4afaaefa | 898 | ENTRY; |
899 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
900 | ASSERT(skm->skm_avail == 0); | |
a0f6da3d | 901 | |
902 | kmem_free(skm, size); | |
4afaaefa | 903 | EXIT; |
904 | } | |
905 | ||
ea3e6ca9 BB |
906 | static void |
907 | __spl_magazine_create(void *data) | |
908 | { | |
909 | spl_kmem_cache_t *skc = data; | |
910 | int id = smp_processor_id(); | |
911 | ||
912 | skc->skc_mag[id] = spl_magazine_alloc(skc, cpu_to_node(id)); | |
913 | ASSERT(skc->skc_mag[id]); | |
914 | } | |
915 | ||
916 | /* | |
917 | * Create all pre-cpu magazines of reasonable sizes. | |
918 | */ | |
4afaaefa | 919 | static int |
920 | spl_magazine_create(spl_kmem_cache_t *skc) | |
921 | { | |
4afaaefa | 922 | ENTRY; |
923 | ||
924 | skc->skc_mag_size = spl_magazine_size(skc); | |
ea3e6ca9 | 925 | skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; |
31a033ec | 926 | spl_on_each_cpu(__spl_magazine_create, skc, 1); |
4afaaefa | 927 | |
ea3e6ca9 BB |
928 | RETURN(0); |
929 | } | |
4afaaefa | 930 | |
ea3e6ca9 BB |
931 | static void |
932 | __spl_magazine_destroy(void *data) | |
933 | { | |
934 | spl_kmem_cache_t *skc = data; | |
935 | spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; | |
4afaaefa | 936 | |
ea3e6ca9 BB |
937 | (void)spl_cache_flush(skc, skm, skm->skm_avail); |
938 | spl_magazine_free(skm); | |
4afaaefa | 939 | } |
940 | ||
ea3e6ca9 BB |
941 | /* |
942 | * Destroy all pre-cpu magazines. | |
943 | */ | |
4afaaefa | 944 | static void |
945 | spl_magazine_destroy(spl_kmem_cache_t *skc) | |
946 | { | |
4afaaefa | 947 | ENTRY; |
31a033ec | 948 | spl_on_each_cpu(__spl_magazine_destroy, skc, 1); |
4afaaefa | 949 | EXIT; |
950 | } | |
951 | ||
ea3e6ca9 BB |
952 | /* |
953 | * Create a object cache based on the following arguments: | |
954 | * name cache name | |
955 | * size cache object size | |
956 | * align cache object alignment | |
957 | * ctor cache object constructor | |
958 | * dtor cache object destructor | |
959 | * reclaim cache object reclaim | |
960 | * priv cache private data for ctor/dtor/reclaim | |
961 | * vmp unused must be NULL | |
962 | * flags | |
963 | * KMC_NOTOUCH Disable cache object aging (unsupported) | |
964 | * KMC_NODEBUG Disable debugging (unsupported) | |
965 | * KMC_NOMAGAZINE Disable magazine (unsupported) | |
966 | * KMC_NOHASH Disable hashing (unsupported) | |
967 | * KMC_QCACHE Disable qcache (unsupported) | |
968 | * KMC_KMEM Force kmem backed cache | |
969 | * KMC_VMEM Force vmem backed cache | |
970 | * KMC_OFFSLAB Locate objects off the slab | |
971 | */ | |
2fb9b26a | 972 | spl_kmem_cache_t * |
973 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
974 | spl_kmem_ctor_t ctor, | |
975 | spl_kmem_dtor_t dtor, | |
976 | spl_kmem_reclaim_t reclaim, | |
977 | void *priv, void *vmp, int flags) | |
978 | { | |
979 | spl_kmem_cache_t *skc; | |
a1502d76 | 980 | int rc, kmem_flags = KM_SLEEP; |
2fb9b26a | 981 | ENTRY; |
937879f1 | 982 | |
a1502d76 | 983 | ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags); |
984 | ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags); | |
985 | ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags); | |
48e0606a | 986 | ASSERT(vmp == NULL); |
a1502d76 | 987 | |
2fb9b26a | 988 | /* We may be called when there is a non-zero preempt_count or |
989 | * interrupts are disabled is which case we must not sleep. | |
990 | */ | |
e9d7a2be | 991 | if (current_thread_info()->preempt_count || irqs_disabled()) |
2fb9b26a | 992 | kmem_flags = KM_NOSLEEP; |
0a6fd143 | 993 | |
2fb9b26a | 994 | /* Allocate new cache memory and initialize. */ |
ff449ac4 | 995 | skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags); |
e9d7a2be | 996 | if (skc == NULL) |
2fb9b26a | 997 | RETURN(NULL); |
d61e12af | 998 | |
2fb9b26a | 999 | skc->skc_magic = SKC_MAGIC; |
2fb9b26a | 1000 | skc->skc_name_size = strlen(name) + 1; |
1001 | skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags); | |
1002 | if (skc->skc_name == NULL) { | |
1003 | kmem_free(skc, sizeof(*skc)); | |
1004 | RETURN(NULL); | |
1005 | } | |
1006 | strncpy(skc->skc_name, name, skc->skc_name_size); | |
1007 | ||
e9d7a2be | 1008 | skc->skc_ctor = ctor; |
1009 | skc->skc_dtor = dtor; | |
1010 | skc->skc_reclaim = reclaim; | |
2fb9b26a | 1011 | skc->skc_private = priv; |
1012 | skc->skc_vmp = vmp; | |
1013 | skc->skc_flags = flags; | |
1014 | skc->skc_obj_size = size; | |
48e0606a | 1015 | skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; |
2fb9b26a | 1016 | skc->skc_delay = SPL_KMEM_CACHE_DELAY; |
ea3e6ca9 | 1017 | atomic_set(&skc->skc_ref, 0); |
2fb9b26a | 1018 | |
2fb9b26a | 1019 | INIT_LIST_HEAD(&skc->skc_list); |
1020 | INIT_LIST_HEAD(&skc->skc_complete_list); | |
1021 | INIT_LIST_HEAD(&skc->skc_partial_list); | |
d46630e0 | 1022 | spin_lock_init(&skc->skc_lock); |
e9d7a2be | 1023 | skc->skc_slab_fail = 0; |
1024 | skc->skc_slab_create = 0; | |
1025 | skc->skc_slab_destroy = 0; | |
2fb9b26a | 1026 | skc->skc_slab_total = 0; |
1027 | skc->skc_slab_alloc = 0; | |
1028 | skc->skc_slab_max = 0; | |
1029 | skc->skc_obj_total = 0; | |
1030 | skc->skc_obj_alloc = 0; | |
1031 | skc->skc_obj_max = 0; | |
a1502d76 | 1032 | |
48e0606a BB |
1033 | if (align) { |
1034 | ASSERT((align & (align - 1)) == 0); /* Power of two */ | |
1035 | ASSERT(align >= SPL_KMEM_CACHE_ALIGN); /* Minimum size */ | |
1036 | skc->skc_obj_align = align; | |
1037 | } | |
1038 | ||
a1502d76 | 1039 | /* If none passed select a cache type based on object size */ |
1040 | if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) { | |
48e0606a BB |
1041 | if (P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) < |
1042 | (PAGE_SIZE / 8)) { | |
a1502d76 | 1043 | skc->skc_flags |= KMC_KMEM; |
1044 | } else { | |
1045 | skc->skc_flags |= KMC_VMEM; | |
1046 | } | |
1047 | } | |
1048 | ||
48e0606a BB |
1049 | rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size); |
1050 | if (rc) | |
1051 | GOTO(out, rc); | |
4afaaefa | 1052 | |
1053 | rc = spl_magazine_create(skc); | |
48e0606a BB |
1054 | if (rc) |
1055 | GOTO(out, rc); | |
2fb9b26a | 1056 | |
ea3e6ca9 BB |
1057 | spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc); |
1058 | schedule_delayed_work(&skc->skc_work, 2 * skc->skc_delay * HZ); | |
1059 | ||
2fb9b26a | 1060 | down_write(&spl_kmem_cache_sem); |
e9d7a2be | 1061 | list_add_tail(&skc->skc_list, &spl_kmem_cache_list); |
2fb9b26a | 1062 | up_write(&spl_kmem_cache_sem); |
1063 | ||
e9d7a2be | 1064 | RETURN(skc); |
48e0606a BB |
1065 | out: |
1066 | kmem_free(skc->skc_name, skc->skc_name_size); | |
1067 | kmem_free(skc, sizeof(*skc)); | |
1068 | RETURN(NULL); | |
f1ca4da6 | 1069 | } |
2fb9b26a | 1070 | EXPORT_SYMBOL(spl_kmem_cache_create); |
f1ca4da6 | 1071 | |
ea3e6ca9 BB |
1072 | /* |
1073 | * Destroy a cache and all objects assoicated with the cache. | |
1074 | */ | |
2fb9b26a | 1075 | void |
1076 | spl_kmem_cache_destroy(spl_kmem_cache_t *skc) | |
f1ca4da6 | 1077 | { |
ea3e6ca9 | 1078 | DECLARE_WAIT_QUEUE_HEAD(wq); |
2fb9b26a | 1079 | ENTRY; |
f1ca4da6 | 1080 | |
e9d7a2be | 1081 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1082 | ||
1083 | down_write(&spl_kmem_cache_sem); | |
1084 | list_del_init(&skc->skc_list); | |
1085 | up_write(&spl_kmem_cache_sem); | |
2fb9b26a | 1086 | |
ea3e6ca9 BB |
1087 | /* Cancel any and wait for any pending delayed work */ |
1088 | ASSERT(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1089 | cancel_delayed_work(&skc->skc_work); | |
1090 | flush_scheduled_work(); | |
1091 | ||
1092 | /* Wait until all current callers complete, this is mainly | |
1093 | * to catch the case where a low memory situation triggers a | |
1094 | * cache reaping action which races with this destroy. */ | |
1095 | wait_event(wq, atomic_read(&skc->skc_ref) == 0); | |
1096 | ||
4afaaefa | 1097 | spl_magazine_destroy(skc); |
ea3e6ca9 | 1098 | spl_slab_reclaim(skc, 1); |
d46630e0 | 1099 | spin_lock(&skc->skc_lock); |
d6a26c6a | 1100 | |
2fb9b26a | 1101 | /* Validate there are no objects in use and free all the |
4afaaefa | 1102 | * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ |
ea3e6ca9 BB |
1103 | ASSERT3U(skc->skc_slab_alloc, ==, 0); |
1104 | ASSERT3U(skc->skc_obj_alloc, ==, 0); | |
1105 | ASSERT3U(skc->skc_slab_total, ==, 0); | |
1106 | ASSERT3U(skc->skc_obj_total, ==, 0); | |
2fb9b26a | 1107 | ASSERT(list_empty(&skc->skc_complete_list)); |
a1502d76 | 1108 | |
2fb9b26a | 1109 | kmem_free(skc->skc_name, skc->skc_name_size); |
d46630e0 | 1110 | spin_unlock(&skc->skc_lock); |
ff449ac4 | 1111 | |
4afaaefa | 1112 | kmem_free(skc, sizeof(*skc)); |
2fb9b26a | 1113 | |
1114 | EXIT; | |
f1ca4da6 | 1115 | } |
2fb9b26a | 1116 | EXPORT_SYMBOL(spl_kmem_cache_destroy); |
f1ca4da6 | 1117 | |
ea3e6ca9 BB |
1118 | /* |
1119 | * Allocate an object from a slab attached to the cache. This is used to | |
1120 | * repopulate the per-cpu magazine caches in batches when they run low. | |
1121 | */ | |
4afaaefa | 1122 | static void * |
1123 | spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) | |
f1ca4da6 | 1124 | { |
2fb9b26a | 1125 | spl_kmem_obj_t *sko; |
f1ca4da6 | 1126 | |
e9d7a2be | 1127 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1128 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
4afaaefa | 1129 | ASSERT(spin_is_locked(&skc->skc_lock)); |
2fb9b26a | 1130 | |
a1502d76 | 1131 | sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); |
4afaaefa | 1132 | ASSERT(sko->sko_magic == SKO_MAGIC); |
1133 | ASSERT(sko->sko_addr != NULL); | |
2fb9b26a | 1134 | |
a1502d76 | 1135 | /* Remove from sks_free_list */ |
4afaaefa | 1136 | list_del_init(&sko->sko_list); |
2fb9b26a | 1137 | |
4afaaefa | 1138 | sks->sks_age = jiffies; |
1139 | sks->sks_ref++; | |
1140 | skc->skc_obj_alloc++; | |
2fb9b26a | 1141 | |
4afaaefa | 1142 | /* Track max obj usage statistics */ |
1143 | if (skc->skc_obj_alloc > skc->skc_obj_max) | |
1144 | skc->skc_obj_max = skc->skc_obj_alloc; | |
2fb9b26a | 1145 | |
4afaaefa | 1146 | /* Track max slab usage statistics */ |
1147 | if (sks->sks_ref == 1) { | |
1148 | skc->skc_slab_alloc++; | |
f1ca4da6 | 1149 | |
4afaaefa | 1150 | if (skc->skc_slab_alloc > skc->skc_slab_max) |
1151 | skc->skc_slab_max = skc->skc_slab_alloc; | |
2fb9b26a | 1152 | } |
1153 | ||
4afaaefa | 1154 | return sko->sko_addr; |
1155 | } | |
c30df9c8 | 1156 | |
ea3e6ca9 BB |
1157 | /* |
1158 | * No available objects on any slabsi, create a new slab. Since this | |
1159 | * is an expensive operation we do it without holding the spinlock and | |
1160 | * only briefly aquire it when we link in the fully allocated and | |
1161 | * constructed slab. | |
4afaaefa | 1162 | */ |
1163 | static spl_kmem_slab_t * | |
1164 | spl_cache_grow(spl_kmem_cache_t *skc, int flags) | |
1165 | { | |
e9d7a2be | 1166 | spl_kmem_slab_t *sks; |
4afaaefa | 1167 | ENTRY; |
f1ca4da6 | 1168 | |
e9d7a2be | 1169 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1170 | local_irq_enable(); |
1171 | might_sleep(); | |
e9d7a2be | 1172 | |
ea3e6ca9 BB |
1173 | /* |
1174 | * Before allocating a new slab check if the slab is being reaped. | |
1175 | * If it is there is a good chance we can wait until it finishes | |
1176 | * and then use one of the newly freed but not aged-out slabs. | |
1177 | */ | |
1178 | if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1179 | schedule(); | |
1180 | GOTO(out, sks= NULL); | |
4afaaefa | 1181 | } |
2fb9b26a | 1182 | |
ea3e6ca9 BB |
1183 | /* Allocate a new slab for the cache */ |
1184 | sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN); | |
1185 | if (sks == NULL) | |
1186 | GOTO(out, sks = NULL); | |
4afaaefa | 1187 | |
ea3e6ca9 | 1188 | /* Link the new empty slab in to the end of skc_partial_list. */ |
d46630e0 | 1189 | spin_lock(&skc->skc_lock); |
2fb9b26a | 1190 | skc->skc_slab_total++; |
1191 | skc->skc_obj_total += sks->sks_objs; | |
1192 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
d46630e0 | 1193 | spin_unlock(&skc->skc_lock); |
ea3e6ca9 BB |
1194 | out: |
1195 | local_irq_disable(); | |
4afaaefa | 1196 | |
1197 | RETURN(sks); | |
f1ca4da6 | 1198 | } |
1199 | ||
ea3e6ca9 BB |
1200 | /* |
1201 | * Refill a per-cpu magazine with objects from the slabs for this | |
1202 | * cache. Ideally the magazine can be repopulated using existing | |
1203 | * objects which have been released, however if we are unable to | |
1204 | * locate enough free objects new slabs of objects will be created. | |
1205 | */ | |
4afaaefa | 1206 | static int |
1207 | spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) | |
f1ca4da6 | 1208 | { |
e9d7a2be | 1209 | spl_kmem_slab_t *sks; |
1210 | int rc = 0, refill; | |
937879f1 | 1211 | ENTRY; |
f1ca4da6 | 1212 | |
e9d7a2be | 1213 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1214 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1215 | ||
e9d7a2be | 1216 | refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); |
d46630e0 | 1217 | spin_lock(&skc->skc_lock); |
ff449ac4 | 1218 | |
4afaaefa | 1219 | while (refill > 0) { |
ea3e6ca9 | 1220 | /* No slabs available we may need to grow the cache */ |
4afaaefa | 1221 | if (list_empty(&skc->skc_partial_list)) { |
1222 | spin_unlock(&skc->skc_lock); | |
ff449ac4 | 1223 | |
4afaaefa | 1224 | sks = spl_cache_grow(skc, flags); |
1225 | if (!sks) | |
e9d7a2be | 1226 | GOTO(out, rc); |
4afaaefa | 1227 | |
1228 | /* Rescheduled to different CPU skm is not local */ | |
1229 | if (skm != skc->skc_mag[smp_processor_id()]) | |
e9d7a2be | 1230 | GOTO(out, rc); |
1231 | ||
1232 | /* Potentially rescheduled to the same CPU but | |
1233 | * allocations may have occured from this CPU while | |
1234 | * we were sleeping so recalculate max refill. */ | |
1235 | refill = MIN(refill, skm->skm_size - skm->skm_avail); | |
4afaaefa | 1236 | |
1237 | spin_lock(&skc->skc_lock); | |
1238 | continue; | |
1239 | } | |
d46630e0 | 1240 | |
4afaaefa | 1241 | /* Grab the next available slab */ |
1242 | sks = list_entry((&skc->skc_partial_list)->next, | |
1243 | spl_kmem_slab_t, sks_list); | |
1244 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1245 | ASSERT(sks->sks_ref < sks->sks_objs); | |
1246 | ASSERT(!list_empty(&sks->sks_free_list)); | |
d46630e0 | 1247 | |
4afaaefa | 1248 | /* Consume as many objects as needed to refill the requested |
e9d7a2be | 1249 | * cache. We must also be careful not to overfill it. */ |
1250 | while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) { | |
1251 | ASSERT(skm->skm_avail < skm->skm_size); | |
1252 | ASSERT(rc < skm->skm_size); | |
4afaaefa | 1253 | skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks); |
e9d7a2be | 1254 | } |
f1ca4da6 | 1255 | |
4afaaefa | 1256 | /* Move slab to skc_complete_list when full */ |
1257 | if (sks->sks_ref == sks->sks_objs) { | |
1258 | list_del(&sks->sks_list); | |
1259 | list_add(&sks->sks_list, &skc->skc_complete_list); | |
2fb9b26a | 1260 | } |
1261 | } | |
57d86234 | 1262 | |
4afaaefa | 1263 | spin_unlock(&skc->skc_lock); |
1264 | out: | |
1265 | /* Returns the number of entries added to cache */ | |
e9d7a2be | 1266 | RETURN(rc); |
4afaaefa | 1267 | } |
1268 | ||
ea3e6ca9 BB |
1269 | /* |
1270 | * Release an object back to the slab from which it came. | |
1271 | */ | |
4afaaefa | 1272 | static void |
1273 | spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) | |
1274 | { | |
e9d7a2be | 1275 | spl_kmem_slab_t *sks = NULL; |
4afaaefa | 1276 | spl_kmem_obj_t *sko = NULL; |
1277 | ENTRY; | |
1278 | ||
e9d7a2be | 1279 | ASSERT(skc->skc_magic == SKC_MAGIC); |
4afaaefa | 1280 | ASSERT(spin_is_locked(&skc->skc_lock)); |
1281 | ||
48e0606a | 1282 | sko = obj + P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align); |
a1502d76 | 1283 | ASSERT(sko->sko_magic == SKO_MAGIC); |
4afaaefa | 1284 | |
1285 | sks = sko->sko_slab; | |
a1502d76 | 1286 | ASSERT(sks->sks_magic == SKS_MAGIC); |
2fb9b26a | 1287 | ASSERT(sks->sks_cache == skc); |
2fb9b26a | 1288 | list_add(&sko->sko_list, &sks->sks_free_list); |
d6a26c6a | 1289 | |
2fb9b26a | 1290 | sks->sks_age = jiffies; |
4afaaefa | 1291 | sks->sks_ref--; |
2fb9b26a | 1292 | skc->skc_obj_alloc--; |
f1ca4da6 | 1293 | |
2fb9b26a | 1294 | /* Move slab to skc_partial_list when no longer full. Slabs |
4afaaefa | 1295 | * are added to the head to keep the partial list is quasi-full |
1296 | * sorted order. Fuller at the head, emptier at the tail. */ | |
1297 | if (sks->sks_ref == (sks->sks_objs - 1)) { | |
2fb9b26a | 1298 | list_del(&sks->sks_list); |
1299 | list_add(&sks->sks_list, &skc->skc_partial_list); | |
1300 | } | |
f1ca4da6 | 1301 | |
2fb9b26a | 1302 | /* Move emply slabs to the end of the partial list so |
4afaaefa | 1303 | * they can be easily found and freed during reclamation. */ |
1304 | if (sks->sks_ref == 0) { | |
2fb9b26a | 1305 | list_del(&sks->sks_list); |
1306 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1307 | skc->skc_slab_alloc--; | |
1308 | } | |
1309 | ||
4afaaefa | 1310 | EXIT; |
1311 | } | |
1312 | ||
ea3e6ca9 BB |
1313 | /* |
1314 | * Release a batch of objects from a per-cpu magazine back to their | |
1315 | * respective slabs. This occurs when we exceed the magazine size, | |
1316 | * are under memory pressure, when the cache is idle, or during | |
1317 | * cache cleanup. The flush argument contains the number of entries | |
1318 | * to remove from the magazine. | |
1319 | */ | |
4afaaefa | 1320 | static int |
1321 | spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) | |
1322 | { | |
1323 | int i, count = MIN(flush, skm->skm_avail); | |
1324 | ENTRY; | |
1325 | ||
e9d7a2be | 1326 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1327 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
4afaaefa | 1328 | |
ea3e6ca9 BB |
1329 | /* |
1330 | * XXX: Currently we simply return objects from the magazine to | |
1331 | * the slabs in fifo order. The ideal thing to do from a memory | |
1332 | * fragmentation standpoint is to cheaply determine the set of | |
1333 | * objects in the magazine which will result in the largest | |
1334 | * number of free slabs if released from the magazine. | |
1335 | */ | |
4afaaefa | 1336 | spin_lock(&skc->skc_lock); |
1337 | for (i = 0; i < count; i++) | |
1338 | spl_cache_shrink(skc, skm->skm_objs[i]); | |
1339 | ||
e9d7a2be | 1340 | skm->skm_avail -= count; |
1341 | memmove(skm->skm_objs, &(skm->skm_objs[count]), | |
4afaaefa | 1342 | sizeof(void *) * skm->skm_avail); |
1343 | ||
d46630e0 | 1344 | spin_unlock(&skc->skc_lock); |
4afaaefa | 1345 | |
1346 | RETURN(count); | |
1347 | } | |
1348 | ||
ea3e6ca9 BB |
1349 | /* |
1350 | * Allocate an object from the per-cpu magazine, or if the magazine | |
1351 | * is empty directly allocate from a slab and repopulate the magazine. | |
1352 | */ | |
4afaaefa | 1353 | void * |
1354 | spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) | |
1355 | { | |
1356 | spl_kmem_magazine_t *skm; | |
1357 | unsigned long irq_flags; | |
1358 | void *obj = NULL; | |
1359 | ENTRY; | |
1360 | ||
e9d7a2be | 1361 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1362 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1363 | ASSERT(flags & KM_SLEEP); | |
1364 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1365 | local_irq_save(irq_flags); |
1366 | ||
1367 | restart: | |
1368 | /* Safe to update per-cpu structure without lock, but | |
1369 | * in the restart case we must be careful to reaquire | |
1370 | * the local magazine since this may have changed | |
1371 | * when we need to grow the cache. */ | |
1372 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1373 | ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n", |
1374 | skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm, | |
1375 | skm->skm_size, skm->skm_refill, skm->skm_avail); | |
4afaaefa | 1376 | |
1377 | if (likely(skm->skm_avail)) { | |
1378 | /* Object available in CPU cache, use it */ | |
1379 | obj = skm->skm_objs[--skm->skm_avail]; | |
ea3e6ca9 | 1380 | skm->skm_age = jiffies; |
4afaaefa | 1381 | } else { |
1382 | /* Per-CPU cache empty, directly allocate from | |
1383 | * the slab and refill the per-CPU cache. */ | |
1384 | (void)spl_cache_refill(skc, skm, flags); | |
1385 | GOTO(restart, obj = NULL); | |
1386 | } | |
1387 | ||
1388 | local_irq_restore(irq_flags); | |
fece7c99 | 1389 | ASSERT(obj); |
48e0606a | 1390 | ASSERT(((unsigned long)(obj) % skc->skc_obj_align) == 0); |
4afaaefa | 1391 | |
1392 | /* Pre-emptively migrate object to CPU L1 cache */ | |
1393 | prefetchw(obj); | |
ea3e6ca9 | 1394 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1395 | |
1396 | RETURN(obj); | |
1397 | } | |
1398 | EXPORT_SYMBOL(spl_kmem_cache_alloc); | |
1399 | ||
ea3e6ca9 BB |
1400 | /* |
1401 | * Free an object back to the local per-cpu magazine, there is no | |
1402 | * guarantee that this is the same magazine the object was originally | |
1403 | * allocated from. We may need to flush entire from the magazine | |
1404 | * back to the slabs to make space. | |
1405 | */ | |
4afaaefa | 1406 | void |
1407 | spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) | |
1408 | { | |
1409 | spl_kmem_magazine_t *skm; | |
1410 | unsigned long flags; | |
1411 | ENTRY; | |
1412 | ||
e9d7a2be | 1413 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1414 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1415 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1416 | local_irq_save(flags); |
1417 | ||
1418 | /* Safe to update per-cpu structure without lock, but | |
1419 | * no remote memory allocation tracking is being performed | |
1420 | * it is entirely possible to allocate an object from one | |
1421 | * CPU cache and return it to another. */ | |
1422 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1423 | ASSERT(skm->skm_magic == SKM_MAGIC); |
4afaaefa | 1424 | |
1425 | /* Per-CPU cache full, flush it to make space */ | |
1426 | if (unlikely(skm->skm_avail >= skm->skm_size)) | |
1427 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
1428 | ||
1429 | /* Available space in cache, use it */ | |
1430 | skm->skm_objs[skm->skm_avail++] = obj; | |
1431 | ||
1432 | local_irq_restore(flags); | |
ea3e6ca9 | 1433 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1434 | |
1435 | EXIT; | |
f1ca4da6 | 1436 | } |
2fb9b26a | 1437 | EXPORT_SYMBOL(spl_kmem_cache_free); |
5c2bb9b2 | 1438 | |
ea3e6ca9 BB |
1439 | /* |
1440 | * The generic shrinker function for all caches. Under linux a shrinker | |
1441 | * may not be tightly coupled with a slab cache. In fact linux always | |
1442 | * systematically trys calling all registered shrinker callbacks which | |
1443 | * report that they contain unused objects. Because of this we only | |
1444 | * register one shrinker function in the shim layer for all slab caches. | |
1445 | * We always attempt to shrink all caches when this generic shrinker | |
1446 | * is called. The shrinker should return the number of free objects | |
1447 | * in the cache when called with nr_to_scan == 0 but not attempt to | |
1448 | * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan | |
1449 | * objects should be freed, because Solaris semantics are to free | |
1450 | * all available objects we may free more objects than requested. | |
1451 | */ | |
2fb9b26a | 1452 | static int |
4afaaefa | 1453 | spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask) |
2fb9b26a | 1454 | { |
e9d7a2be | 1455 | spl_kmem_cache_t *skc; |
ea3e6ca9 | 1456 | int unused = 0; |
5c2bb9b2 | 1457 | |
e9d7a2be | 1458 | down_read(&spl_kmem_cache_sem); |
ea3e6ca9 BB |
1459 | list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { |
1460 | if (nr_to_scan) | |
1461 | spl_kmem_cache_reap_now(skc); | |
1462 | ||
1463 | /* | |
1464 | * Presume everything alloc'ed in reclaimable, this ensures | |
1465 | * we are called again with nr_to_scan > 0 so can try and | |
1466 | * reclaim. The exact number is not important either so | |
1467 | * we forgo taking this already highly contented lock. | |
1468 | */ | |
1469 | unused += skc->skc_obj_alloc; | |
1470 | } | |
e9d7a2be | 1471 | up_read(&spl_kmem_cache_sem); |
2fb9b26a | 1472 | |
ea3e6ca9 | 1473 | return (unused * sysctl_vfs_cache_pressure) / 100; |
5c2bb9b2 | 1474 | } |
5c2bb9b2 | 1475 | |
ea3e6ca9 BB |
1476 | /* |
1477 | * Call the registered reclaim function for a cache. Depending on how | |
1478 | * many and which objects are released it may simply repopulate the | |
1479 | * local magazine which will then need to age-out. Objects which cannot | |
1480 | * fit in the magazine we will be released back to their slabs which will | |
1481 | * also need to age out before being release. This is all just best | |
1482 | * effort and we do not want to thrash creating and destroying slabs. | |
1483 | */ | |
57d86234 | 1484 | void |
2fb9b26a | 1485 | spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) |
57d86234 | 1486 | { |
2fb9b26a | 1487 | ENTRY; |
e9d7a2be | 1488 | |
1489 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
ea3e6ca9 | 1490 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
2fb9b26a | 1491 | |
ea3e6ca9 BB |
1492 | /* Prevent concurrent cache reaping when contended */ |
1493 | if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1494 | EXIT; | |
1495 | return; | |
1496 | } | |
2fb9b26a | 1497 | |
ea3e6ca9 | 1498 | atomic_inc(&skc->skc_ref); |
4afaaefa | 1499 | |
ea3e6ca9 BB |
1500 | if (skc->skc_reclaim) |
1501 | skc->skc_reclaim(skc->skc_private); | |
4afaaefa | 1502 | |
ea3e6ca9 BB |
1503 | spl_slab_reclaim(skc, 0); |
1504 | clear_bit(KMC_BIT_REAPING, &skc->skc_flags); | |
1505 | atomic_dec(&skc->skc_ref); | |
4afaaefa | 1506 | |
2fb9b26a | 1507 | EXIT; |
57d86234 | 1508 | } |
2fb9b26a | 1509 | EXPORT_SYMBOL(spl_kmem_cache_reap_now); |
57d86234 | 1510 | |
ea3e6ca9 BB |
1511 | /* |
1512 | * Reap all free slabs from all registered caches. | |
1513 | */ | |
f1b59d26 | 1514 | void |
2fb9b26a | 1515 | spl_kmem_reap(void) |
937879f1 | 1516 | { |
4afaaefa | 1517 | spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL); |
f1ca4da6 | 1518 | } |
2fb9b26a | 1519 | EXPORT_SYMBOL(spl_kmem_reap); |
5d86345d | 1520 | |
ff449ac4 | 1521 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
c6dc93d6 | 1522 | static char * |
4afaaefa | 1523 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) |
d6a26c6a | 1524 | { |
e9d7a2be | 1525 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
d6a26c6a | 1526 | int i, flag = 1; |
1527 | ||
1528 | ASSERT(str != NULL && len >= 17); | |
e9d7a2be | 1529 | memset(str, 0, len); |
d6a26c6a | 1530 | |
1531 | /* Check for a fully printable string, and while we are at | |
1532 | * it place the printable characters in the passed buffer. */ | |
1533 | for (i = 0; i < size; i++) { | |
e9d7a2be | 1534 | str[i] = ((char *)(kd->kd_addr))[i]; |
1535 | if (isprint(str[i])) { | |
1536 | continue; | |
1537 | } else { | |
1538 | /* Minimum number of printable characters found | |
1539 | * to make it worthwhile to print this as ascii. */ | |
1540 | if (i > min) | |
1541 | break; | |
1542 | ||
1543 | flag = 0; | |
1544 | break; | |
1545 | } | |
d6a26c6a | 1546 | } |
1547 | ||
1548 | if (!flag) { | |
1549 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
1550 | *((uint8_t *)kd->kd_addr), | |
1551 | *((uint8_t *)kd->kd_addr + 2), | |
1552 | *((uint8_t *)kd->kd_addr + 4), | |
1553 | *((uint8_t *)kd->kd_addr + 6), | |
1554 | *((uint8_t *)kd->kd_addr + 8), | |
1555 | *((uint8_t *)kd->kd_addr + 10), | |
1556 | *((uint8_t *)kd->kd_addr + 12), | |
1557 | *((uint8_t *)kd->kd_addr + 14)); | |
1558 | } | |
1559 | ||
1560 | return str; | |
1561 | } | |
1562 | ||
a1502d76 | 1563 | static int |
1564 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
1565 | { | |
1566 | int i; | |
1567 | ENTRY; | |
1568 | ||
1569 | spin_lock_init(lock); | |
1570 | INIT_LIST_HEAD(list); | |
1571 | ||
1572 | for (i = 0; i < size; i++) | |
1573 | INIT_HLIST_HEAD(&kmem_table[i]); | |
1574 | ||
1575 | RETURN(0); | |
1576 | } | |
1577 | ||
ff449ac4 | 1578 | static void |
1579 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
5d86345d | 1580 | { |
2fb9b26a | 1581 | unsigned long flags; |
1582 | kmem_debug_t *kd; | |
1583 | char str[17]; | |
a1502d76 | 1584 | ENTRY; |
2fb9b26a | 1585 | |
ff449ac4 | 1586 | spin_lock_irqsave(lock, flags); |
1587 | if (!list_empty(list)) | |
a0f6da3d | 1588 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", |
1589 | "size", "data", "func", "line"); | |
2fb9b26a | 1590 | |
ff449ac4 | 1591 | list_for_each_entry(kd, list, kd_list) |
a0f6da3d | 1592 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, |
b6b2acc6 | 1593 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
2fb9b26a | 1594 | kd->kd_func, kd->kd_line); |
1595 | ||
ff449ac4 | 1596 | spin_unlock_irqrestore(lock, flags); |
a1502d76 | 1597 | EXIT; |
ff449ac4 | 1598 | } |
1599 | #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
a1502d76 | 1600 | #define spl_kmem_init_tracking(list, lock, size) |
ff449ac4 | 1601 | #define spl_kmem_fini_tracking(list, lock) |
1602 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
1603 | ||
a1502d76 | 1604 | int |
1605 | spl_kmem_init(void) | |
1606 | { | |
1607 | int rc = 0; | |
1608 | ENTRY; | |
1609 | ||
1610 | init_rwsem(&spl_kmem_cache_sem); | |
1611 | INIT_LIST_HEAD(&spl_kmem_cache_list); | |
1612 | ||
1613 | #ifdef HAVE_SET_SHRINKER | |
1614 | spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS, | |
1615 | spl_kmem_cache_generic_shrinker); | |
1616 | if (spl_kmem_cache_shrinker == NULL) | |
f78a933f | 1617 | RETURN(rc = -ENOMEM); |
a1502d76 | 1618 | #else |
1619 | register_shrinker(&spl_kmem_cache_shrinker); | |
1620 | #endif | |
1621 | ||
1622 | #ifdef DEBUG_KMEM | |
1623 | atomic64_set(&kmem_alloc_used, 0); | |
1624 | atomic64_set(&vmem_alloc_used, 0); | |
1625 | ||
1626 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); | |
1627 | spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE); | |
1628 | #endif | |
a1502d76 | 1629 | RETURN(rc); |
1630 | } | |
1631 | ||
ff449ac4 | 1632 | void |
1633 | spl_kmem_fini(void) | |
1634 | { | |
1635 | #ifdef DEBUG_KMEM | |
1636 | /* Display all unreclaimed memory addresses, including the | |
1637 | * allocation size and the first few bytes of what's located | |
1638 | * at that address to aid in debugging. Performance is not | |
1639 | * a serious concern here since it is module unload time. */ | |
1640 | if (atomic64_read(&kmem_alloc_used) != 0) | |
1641 | CWARN("kmem leaked %ld/%ld bytes\n", | |
550f1705 | 1642 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); |
ff449ac4 | 1643 | |
2fb9b26a | 1644 | |
1645 | if (atomic64_read(&vmem_alloc_used) != 0) | |
1646 | CWARN("vmem leaked %ld/%ld bytes\n", | |
550f1705 | 1647 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); |
2fb9b26a | 1648 | |
ff449ac4 | 1649 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
1650 | spl_kmem_fini_tracking(&vmem_list, &vmem_lock); | |
1651 | #endif /* DEBUG_KMEM */ | |
2fb9b26a | 1652 | ENTRY; |
1653 | ||
1654 | #ifdef HAVE_SET_SHRINKER | |
1655 | remove_shrinker(spl_kmem_cache_shrinker); | |
1656 | #else | |
1657 | unregister_shrinker(&spl_kmem_cache_shrinker); | |
5d86345d | 1658 | #endif |
2fb9b26a | 1659 | |
937879f1 | 1660 | EXIT; |
5d86345d | 1661 | } |