]>
Commit | Line | Data |
---|---|---|
715f6251 | 1 | /* |
2 | * This file is part of the SPL: Solaris Porting Layer. | |
3 | * | |
4 | * Copyright (c) 2008 Lawrence Livermore National Security, LLC. | |
5 | * Produced at Lawrence Livermore National Laboratory | |
6 | * Written by: | |
7 | * Brian Behlendorf <behlendorf1@llnl.gov>, | |
8 | * Herb Wartens <wartens2@llnl.gov>, | |
9 | * Jim Garlick <garlick@llnl.gov> | |
10 | * UCRL-CODE-235197 | |
11 | * | |
12 | * This is free software; you can redistribute it and/or modify it | |
13 | * under the terms of the GNU General Public License as published by | |
14 | * the Free Software Foundation; either version 2 of the License, or | |
15 | * (at your option) any later version. | |
16 | * | |
17 | * This is distributed in the hope that it will be useful, but WITHOUT | |
18 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
19 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
20 | * for more details. | |
21 | * | |
22 | * You should have received a copy of the GNU General Public License along | |
23 | * with this program; if not, write to the Free Software Foundation, Inc., | |
24 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | |
25 | */ | |
26 | ||
f4b37741 | 27 | #include <sys/kmem.h> |
f1ca4da6 | 28 | |
937879f1 | 29 | #ifdef DEBUG_SUBSYSTEM |
a0f6da3d | 30 | # undef DEBUG_SUBSYSTEM |
937879f1 | 31 | #endif |
32 | ||
33 | #define DEBUG_SUBSYSTEM S_KMEM | |
34 | ||
36b313da BB |
35 | /* |
36 | * The minimum amount of memory measured in pages to be free at all | |
37 | * times on the system. This is similar to Linux's zone->pages_min | |
38 | * multipled by the number of zones and is sized based on that. | |
39 | */ | |
40 | pgcnt_t minfree = 0; | |
41 | EXPORT_SYMBOL(minfree); | |
42 | ||
43 | /* | |
44 | * The desired amount of memory measured in pages to be free at all | |
45 | * times on the system. This is similar to Linux's zone->pages_low | |
46 | * multipled by the number of zones and is sized based on that. | |
47 | * Assuming all zones are being used roughly equally, when we drop | |
48 | * below this threshold async page reclamation is triggered. | |
49 | */ | |
50 | pgcnt_t desfree = 0; | |
51 | EXPORT_SYMBOL(desfree); | |
52 | ||
53 | /* | |
54 | * When above this amount of memory measures in pages the system is | |
55 | * determined to have enough free memory. This is similar to Linux's | |
56 | * zone->pages_high multipled by the number of zones and is sized based | |
57 | * on that. Assuming all zones are being used roughly equally, when | |
58 | * async page reclamation reaches this threshold it stops. | |
59 | */ | |
60 | pgcnt_t lotsfree = 0; | |
61 | EXPORT_SYMBOL(lotsfree); | |
62 | ||
63 | /* Unused always 0 in this implementation */ | |
64 | pgcnt_t needfree = 0; | |
65 | EXPORT_SYMBOL(needfree); | |
66 | ||
36b313da BB |
67 | pgcnt_t swapfs_minfree = 0; |
68 | EXPORT_SYMBOL(swapfs_minfree); | |
69 | ||
70 | pgcnt_t swapfs_reserve = 0; | |
71 | EXPORT_SYMBOL(swapfs_reserve); | |
72 | ||
36b313da BB |
73 | vmem_t *heap_arena = NULL; |
74 | EXPORT_SYMBOL(heap_arena); | |
75 | ||
76 | vmem_t *zio_alloc_arena = NULL; | |
77 | EXPORT_SYMBOL(zio_alloc_arena); | |
78 | ||
79 | vmem_t *zio_arena = NULL; | |
80 | EXPORT_SYMBOL(zio_arena); | |
81 | ||
d1ff2312 | 82 | #ifndef HAVE_GET_VMALLOC_INFO |
96dded38 | 83 | get_vmalloc_info_t get_vmalloc_info_fn = SYMBOL_POISON; |
d1ff2312 BB |
84 | EXPORT_SYMBOL(get_vmalloc_info_fn); |
85 | #endif /* HAVE_GET_VMALLOC_INFO */ | |
86 | ||
5232d256 BB |
87 | #ifdef HAVE_PGDAT_HELPERS |
88 | # ifndef HAVE_FIRST_ONLINE_PGDAT | |
96dded38 | 89 | first_online_pgdat_t first_online_pgdat_fn = SYMBOL_POISON; |
d1ff2312 | 90 | EXPORT_SYMBOL(first_online_pgdat_fn); |
5232d256 | 91 | # endif /* HAVE_FIRST_ONLINE_PGDAT */ |
36b313da | 92 | |
5232d256 | 93 | # ifndef HAVE_NEXT_ONLINE_PGDAT |
96dded38 | 94 | next_online_pgdat_t next_online_pgdat_fn = SYMBOL_POISON; |
d1ff2312 | 95 | EXPORT_SYMBOL(next_online_pgdat_fn); |
5232d256 | 96 | # endif /* HAVE_NEXT_ONLINE_PGDAT */ |
36b313da | 97 | |
5232d256 | 98 | # ifndef HAVE_NEXT_ZONE |
96dded38 | 99 | next_zone_t next_zone_fn = SYMBOL_POISON; |
d1ff2312 | 100 | EXPORT_SYMBOL(next_zone_fn); |
5232d256 BB |
101 | # endif /* HAVE_NEXT_ZONE */ |
102 | ||
103 | #else /* HAVE_PGDAT_HELPERS */ | |
104 | ||
105 | # ifndef HAVE_PGDAT_LIST | |
106 | struct pglist_data *pgdat_list_addr = SYMBOL_POISON; | |
107 | EXPORT_SYMBOL(pgdat_list_addr); | |
108 | # endif /* HAVE_PGDAT_LIST */ | |
109 | ||
110 | #endif /* HAVE_PGDAT_HELPERS */ | |
36b313da | 111 | |
e11d6c5f BB |
112 | #ifndef HAVE_ZONE_STAT_ITEM_FIA |
113 | # ifndef HAVE_GET_ZONE_COUNTS | |
96dded38 | 114 | get_zone_counts_t get_zone_counts_fn = SYMBOL_POISON; |
d1ff2312 | 115 | EXPORT_SYMBOL(get_zone_counts_fn); |
96dded38 | 116 | # endif /* HAVE_GET_ZONE_COUNTS */ |
4ab13d3b | 117 | |
e11d6c5f BB |
118 | unsigned long |
119 | spl_global_page_state(int item) | |
4ab13d3b BB |
120 | { |
121 | unsigned long active; | |
122 | unsigned long inactive; | |
123 | unsigned long free; | |
124 | ||
e11d6c5f BB |
125 | if (item == NR_FREE_PAGES) { |
126 | get_zone_counts(&active, &inactive, &free); | |
127 | return free; | |
128 | } | |
129 | ||
130 | if (item == NR_INACTIVE) { | |
131 | get_zone_counts(&active, &inactive, &free); | |
132 | return inactive; | |
133 | } | |
134 | ||
135 | if (item == NR_ACTIVE) { | |
136 | get_zone_counts(&active, &inactive, &free); | |
137 | return active; | |
138 | } | |
139 | ||
96dded38 | 140 | # ifdef HAVE_GLOBAL_PAGE_STATE |
e11d6c5f | 141 | return global_page_state((enum zone_stat_item)item); |
96dded38 BB |
142 | # else |
143 | return 0; /* Unsupported */ | |
144 | # endif /* HAVE_GLOBAL_PAGE_STATE */ | |
e11d6c5f BB |
145 | } |
146 | EXPORT_SYMBOL(spl_global_page_state); | |
e11d6c5f | 147 | #endif /* HAVE_ZONE_STAT_ITEM_FIA */ |
4ab13d3b | 148 | |
e11d6c5f BB |
149 | pgcnt_t |
150 | spl_kmem_availrmem(void) | |
151 | { | |
4ab13d3b | 152 | /* The amount of easily available memory */ |
e11d6c5f BB |
153 | return (spl_global_page_state(NR_FREE_PAGES) + |
154 | spl_global_page_state(NR_INACTIVE)); | |
4ab13d3b BB |
155 | } |
156 | EXPORT_SYMBOL(spl_kmem_availrmem); | |
157 | ||
158 | size_t | |
159 | vmem_size(vmem_t *vmp, int typemask) | |
160 | { | |
d1ff2312 BB |
161 | struct vmalloc_info vmi; |
162 | size_t size = 0; | |
163 | ||
4ab13d3b BB |
164 | ASSERT(vmp == NULL); |
165 | ASSERT(typemask & (VMEM_ALLOC | VMEM_FREE)); | |
166 | ||
d1ff2312 BB |
167 | get_vmalloc_info(&vmi); |
168 | if (typemask & VMEM_ALLOC) | |
169 | size += (size_t)vmi.used; | |
170 | ||
171 | if (typemask & VMEM_FREE) | |
172 | size += (size_t)(VMALLOC_TOTAL - vmi.used); | |
173 | ||
174 | return size; | |
4ab13d3b BB |
175 | } |
176 | EXPORT_SYMBOL(vmem_size); | |
4ab13d3b | 177 | |
f1ca4da6 | 178 | /* |
2fb9b26a | 179 | * Memory allocation interfaces and debugging for basic kmem_* |
180 | * and vmem_* style memory allocation. When DEBUG_KMEM is enable | |
181 | * all allocations will be tracked when they are allocated and | |
182 | * freed. When the SPL module is unload a list of all leaked | |
183 | * addresses and where they were allocated will be dumped to the | |
184 | * console. Enabling this feature has a significant impant on | |
185 | * performance but it makes finding memory leaks staight forward. | |
f1ca4da6 | 186 | */ |
187 | #ifdef DEBUG_KMEM | |
188 | /* Shim layer memory accounting */ | |
550f1705 | 189 | atomic64_t kmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 190 | unsigned long long kmem_alloc_max = 0; |
550f1705 | 191 | atomic64_t vmem_alloc_used = ATOMIC64_INIT(0); |
a0f6da3d | 192 | unsigned long long vmem_alloc_max = 0; |
c19c06f3 | 193 | int kmem_warning_flag = 1; |
79b31f36 | 194 | |
ff449ac4 | 195 | EXPORT_SYMBOL(kmem_alloc_used); |
196 | EXPORT_SYMBOL(kmem_alloc_max); | |
197 | EXPORT_SYMBOL(vmem_alloc_used); | |
198 | EXPORT_SYMBOL(vmem_alloc_max); | |
199 | EXPORT_SYMBOL(kmem_warning_flag); | |
200 | ||
a0f6da3d | 201 | # ifdef DEBUG_KMEM_TRACKING |
202 | ||
203 | /* XXX - Not to surprisingly with debugging enabled the xmem_locks are very | |
204 | * highly contended particularly on xfree(). If we want to run with this | |
205 | * detailed debugging enabled for anything other than debugging we need to | |
206 | * minimize the contention by moving to a lock per xmem_table entry model. | |
207 | */ | |
208 | ||
209 | # define KMEM_HASH_BITS 10 | |
210 | # define KMEM_TABLE_SIZE (1 << KMEM_HASH_BITS) | |
211 | ||
212 | # define VMEM_HASH_BITS 10 | |
213 | # define VMEM_TABLE_SIZE (1 << VMEM_HASH_BITS) | |
214 | ||
215 | typedef struct kmem_debug { | |
216 | struct hlist_node kd_hlist; /* Hash node linkage */ | |
217 | struct list_head kd_list; /* List of all allocations */ | |
218 | void *kd_addr; /* Allocation pointer */ | |
219 | size_t kd_size; /* Allocation size */ | |
220 | const char *kd_func; /* Allocation function */ | |
221 | int kd_line; /* Allocation line */ | |
222 | } kmem_debug_t; | |
223 | ||
d6a26c6a | 224 | spinlock_t kmem_lock; |
225 | struct hlist_head kmem_table[KMEM_TABLE_SIZE]; | |
226 | struct list_head kmem_list; | |
227 | ||
13cdca65 | 228 | spinlock_t vmem_lock; |
229 | struct hlist_head vmem_table[VMEM_TABLE_SIZE]; | |
230 | struct list_head vmem_list; | |
231 | ||
d6a26c6a | 232 | EXPORT_SYMBOL(kmem_lock); |
233 | EXPORT_SYMBOL(kmem_table); | |
234 | EXPORT_SYMBOL(kmem_list); | |
235 | ||
13cdca65 | 236 | EXPORT_SYMBOL(vmem_lock); |
237 | EXPORT_SYMBOL(vmem_table); | |
238 | EXPORT_SYMBOL(vmem_list); | |
a0f6da3d | 239 | # endif |
13cdca65 | 240 | |
c19c06f3 | 241 | int kmem_set_warning(int flag) { return (kmem_warning_flag = !!flag); } |
242 | #else | |
243 | int kmem_set_warning(int flag) { return 0; } | |
f1ca4da6 | 244 | #endif |
c19c06f3 | 245 | EXPORT_SYMBOL(kmem_set_warning); |
f1ca4da6 | 246 | |
247 | /* | |
248 | * Slab allocation interfaces | |
249 | * | |
2fb9b26a | 250 | * While the Linux slab implementation was inspired by the Solaris |
251 | * implemenation I cannot use it to emulate the Solaris APIs. I | |
252 | * require two features which are not provided by the Linux slab. | |
253 | * | |
254 | * 1) Constructors AND destructors. Recent versions of the Linux | |
255 | * kernel have removed support for destructors. This is a deal | |
256 | * breaker for the SPL which contains particularly expensive | |
257 | * initializers for mutex's, condition variables, etc. We also | |
a0f6da3d | 258 | * require a minimal level of cleanup for these data types unlike |
259 | * many Linux data type which do need to be explicitly destroyed. | |
2fb9b26a | 260 | * |
a0f6da3d | 261 | * 2) Virtual address space backed slab. Callers of the Solaris slab |
2fb9b26a | 262 | * expect it to work well for both small are very large allocations. |
263 | * Because of memory fragmentation the Linux slab which is backed | |
264 | * by kmalloc'ed memory performs very badly when confronted with | |
265 | * large numbers of large allocations. Basing the slab on the | |
266 | * virtual address space removes the need for contigeous pages | |
267 | * and greatly improve performance for large allocations. | |
268 | * | |
269 | * For these reasons, the SPL has its own slab implementation with | |
270 | * the needed features. It is not as highly optimized as either the | |
271 | * Solaris or Linux slabs, but it should get me most of what is | |
272 | * needed until it can be optimized or obsoleted by another approach. | |
273 | * | |
274 | * One serious concern I do have about this method is the relatively | |
275 | * small virtual address space on 32bit arches. This will seriously | |
276 | * constrain the size of the slab caches and their performance. | |
277 | * | |
2fb9b26a | 278 | * XXX: Improve the partial slab list by carefully maintaining a |
279 | * strict ordering of fullest to emptiest slabs based on | |
280 | * the slab reference count. This gaurentees the when freeing | |
281 | * slabs back to the system we need only linearly traverse the | |
282 | * last N slabs in the list to discover all the freeable slabs. | |
283 | * | |
284 | * XXX: NUMA awareness for optionally allocating memory close to a | |
285 | * particular core. This can be adventageous if you know the slab | |
286 | * object will be short lived and primarily accessed from one core. | |
287 | * | |
288 | * XXX: Slab coloring may also yield performance improvements and would | |
289 | * be desirable to implement. | |
f1ca4da6 | 290 | */ |
2fb9b26a | 291 | |
a0f6da3d | 292 | struct list_head spl_kmem_cache_list; /* List of caches */ |
293 | struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ | |
c30df9c8 | 294 | |
4afaaefa | 295 | static int spl_cache_flush(spl_kmem_cache_t *skc, |
a0f6da3d | 296 | spl_kmem_magazine_t *skm, int flush); |
4afaaefa | 297 | |
57d86234 | 298 | #ifdef HAVE_SET_SHRINKER |
2fb9b26a | 299 | static struct shrinker *spl_kmem_cache_shrinker; |
57d86234 | 300 | #else |
4afaaefa | 301 | static int spl_kmem_cache_generic_shrinker(int nr_to_scan, |
a0f6da3d | 302 | unsigned int gfp_mask); |
2fb9b26a | 303 | static struct shrinker spl_kmem_cache_shrinker = { |
4afaaefa | 304 | .shrink = spl_kmem_cache_generic_shrinker, |
57d86234 | 305 | .seeks = KMC_DEFAULT_SEEKS, |
306 | }; | |
307 | #endif | |
f1ca4da6 | 308 | |
a0f6da3d | 309 | #ifdef DEBUG_KMEM |
310 | # ifdef DEBUG_KMEM_TRACKING | |
311 | ||
312 | static kmem_debug_t * | |
313 | kmem_del_init(spinlock_t *lock, struct hlist_head *table, int bits, | |
314 | void *addr) | |
315 | { | |
316 | struct hlist_head *head; | |
317 | struct hlist_node *node; | |
318 | struct kmem_debug *p; | |
319 | unsigned long flags; | |
320 | ENTRY; | |
321 | ||
322 | spin_lock_irqsave(lock, flags); | |
323 | ||
324 | head = &table[hash_ptr(addr, bits)]; | |
325 | hlist_for_each_entry_rcu(p, node, head, kd_hlist) { | |
326 | if (p->kd_addr == addr) { | |
327 | hlist_del_init(&p->kd_hlist); | |
328 | list_del_init(&p->kd_list); | |
329 | spin_unlock_irqrestore(lock, flags); | |
330 | return p; | |
331 | } | |
332 | } | |
333 | ||
334 | spin_unlock_irqrestore(lock, flags); | |
335 | ||
336 | RETURN(NULL); | |
337 | } | |
338 | ||
339 | void * | |
340 | kmem_alloc_track(size_t size, int flags, const char *func, int line, | |
341 | int node_alloc, int node) | |
342 | { | |
343 | void *ptr = NULL; | |
344 | kmem_debug_t *dptr; | |
345 | unsigned long irq_flags; | |
346 | ENTRY; | |
347 | ||
348 | dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), | |
349 | flags & ~__GFP_ZERO); | |
350 | ||
351 | if (dptr == NULL) { | |
352 | CWARN("kmem_alloc(%ld, 0x%x) debug failed\n", | |
353 | sizeof(kmem_debug_t), flags); | |
354 | } else { | |
355 | /* Marked unlikely because we should never be doing this, | |
356 | * we tolerate to up 2 pages but a single page is best. */ | |
357 | if (unlikely((size) > (PAGE_SIZE * 2)) && kmem_warning_flag) | |
358 | CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | |
359 | (unsigned long long) size, flags, | |
360 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
361 | ||
c8e60837 | 362 | /* We use kstrdup() below because the string pointed to by |
363 | * __FUNCTION__ might not be available by the time we want | |
364 | * to print it since the module might have been unloaded. */ | |
365 | dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO); | |
366 | if (unlikely(dptr->kd_func == NULL)) { | |
367 | kfree(dptr); | |
368 | CWARN("kstrdup() failed in kmem_alloc(%llu, 0x%x) " | |
369 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
370 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
371 | goto out; | |
372 | } | |
373 | ||
a0f6da3d | 374 | /* Use the correct allocator */ |
375 | if (node_alloc) { | |
376 | ASSERT(!(flags & __GFP_ZERO)); | |
377 | ptr = kmalloc_node(size, flags, node); | |
378 | } else if (flags & __GFP_ZERO) { | |
379 | ptr = kzalloc(size, flags & ~__GFP_ZERO); | |
380 | } else { | |
381 | ptr = kmalloc(size, flags); | |
382 | } | |
383 | ||
384 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 385 | kfree(dptr->kd_func); |
a0f6da3d | 386 | kfree(dptr); |
387 | CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
388 | (unsigned long long) size, flags, | |
389 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
390 | goto out; | |
391 | } | |
392 | ||
393 | atomic64_add(size, &kmem_alloc_used); | |
394 | if (unlikely(atomic64_read(&kmem_alloc_used) > | |
395 | kmem_alloc_max)) | |
396 | kmem_alloc_max = | |
397 | atomic64_read(&kmem_alloc_used); | |
398 | ||
399 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
400 | INIT_LIST_HEAD(&dptr->kd_list); | |
401 | ||
402 | dptr->kd_addr = ptr; | |
403 | dptr->kd_size = size; | |
a0f6da3d | 404 | dptr->kd_line = line; |
405 | ||
406 | spin_lock_irqsave(&kmem_lock, irq_flags); | |
407 | hlist_add_head_rcu(&dptr->kd_hlist, | |
408 | &kmem_table[hash_ptr(ptr, KMEM_HASH_BITS)]); | |
409 | list_add_tail(&dptr->kd_list, &kmem_list); | |
410 | spin_unlock_irqrestore(&kmem_lock, irq_flags); | |
411 | ||
412 | CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p " | |
413 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
414 | ptr, atomic64_read(&kmem_alloc_used), | |
415 | kmem_alloc_max); | |
416 | } | |
417 | out: | |
418 | RETURN(ptr); | |
419 | } | |
420 | EXPORT_SYMBOL(kmem_alloc_track); | |
421 | ||
422 | void | |
423 | kmem_free_track(void *ptr, size_t size) | |
424 | { | |
425 | kmem_debug_t *dptr; | |
426 | ENTRY; | |
427 | ||
428 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
429 | (unsigned long long) size); | |
430 | ||
431 | dptr = kmem_del_init(&kmem_lock, kmem_table, KMEM_HASH_BITS, ptr); | |
432 | ||
433 | ASSERT(dptr); /* Must exist in hash due to kmem_alloc() */ | |
434 | ||
435 | /* Size must match */ | |
436 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
437 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
438 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
439 | ||
440 | atomic64_sub(size, &kmem_alloc_used); | |
441 | ||
442 | CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
443 | (unsigned long long) size, atomic64_read(&kmem_alloc_used), | |
444 | kmem_alloc_max); | |
445 | ||
c8e60837 | 446 | kfree(dptr->kd_func); |
447 | ||
a0f6da3d | 448 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
449 | kfree(dptr); | |
450 | ||
451 | memset(ptr, 0x5a, size); | |
452 | kfree(ptr); | |
453 | ||
454 | EXIT; | |
455 | } | |
456 | EXPORT_SYMBOL(kmem_free_track); | |
457 | ||
458 | void * | |
459 | vmem_alloc_track(size_t size, int flags, const char *func, int line) | |
460 | { | |
461 | void *ptr = NULL; | |
462 | kmem_debug_t *dptr; | |
463 | unsigned long irq_flags; | |
464 | ENTRY; | |
465 | ||
466 | ASSERT(flags & KM_SLEEP); | |
467 | ||
468 | dptr = (kmem_debug_t *) kmalloc(sizeof(kmem_debug_t), flags); | |
469 | if (dptr == NULL) { | |
470 | CWARN("vmem_alloc(%ld, 0x%x) debug failed\n", | |
471 | sizeof(kmem_debug_t), flags); | |
472 | } else { | |
c8e60837 | 473 | /* We use kstrdup() below because the string pointed to by |
474 | * __FUNCTION__ might not be available by the time we want | |
475 | * to print it, since the module might have been unloaded. */ | |
476 | dptr->kd_func = kstrdup(func, flags & ~__GFP_ZERO); | |
477 | if (unlikely(dptr->kd_func == NULL)) { | |
478 | kfree(dptr); | |
479 | CWARN("kstrdup() failed in vmem_alloc(%llu, 0x%x) " | |
480 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
481 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
482 | goto out; | |
483 | } | |
484 | ||
a0f6da3d | 485 | ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO, |
486 | PAGE_KERNEL); | |
487 | ||
488 | if (unlikely(ptr == NULL)) { | |
c8e60837 | 489 | kfree(dptr->kd_func); |
a0f6da3d | 490 | kfree(dptr); |
491 | CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
492 | (unsigned long long) size, flags, | |
493 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
494 | goto out; | |
495 | } | |
496 | ||
497 | if (flags & __GFP_ZERO) | |
498 | memset(ptr, 0, size); | |
499 | ||
500 | atomic64_add(size, &vmem_alloc_used); | |
501 | if (unlikely(atomic64_read(&vmem_alloc_used) > | |
502 | vmem_alloc_max)) | |
503 | vmem_alloc_max = | |
504 | atomic64_read(&vmem_alloc_used); | |
505 | ||
506 | INIT_HLIST_NODE(&dptr->kd_hlist); | |
507 | INIT_LIST_HEAD(&dptr->kd_list); | |
508 | ||
509 | dptr->kd_addr = ptr; | |
510 | dptr->kd_size = size; | |
a0f6da3d | 511 | dptr->kd_line = line; |
512 | ||
513 | spin_lock_irqsave(&vmem_lock, irq_flags); | |
514 | hlist_add_head_rcu(&dptr->kd_hlist, | |
515 | &vmem_table[hash_ptr(ptr, VMEM_HASH_BITS)]); | |
516 | list_add_tail(&dptr->kd_list, &vmem_list); | |
517 | spin_unlock_irqrestore(&vmem_lock, irq_flags); | |
518 | ||
519 | CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p " | |
520 | "(%lld/%llu)\n", (unsigned long long) size, flags, | |
521 | ptr, atomic64_read(&vmem_alloc_used), | |
522 | vmem_alloc_max); | |
523 | } | |
524 | out: | |
525 | RETURN(ptr); | |
526 | } | |
527 | EXPORT_SYMBOL(vmem_alloc_track); | |
528 | ||
529 | void | |
530 | vmem_free_track(void *ptr, size_t size) | |
531 | { | |
532 | kmem_debug_t *dptr; | |
533 | ENTRY; | |
534 | ||
535 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
536 | (unsigned long long) size); | |
537 | ||
538 | dptr = kmem_del_init(&vmem_lock, vmem_table, VMEM_HASH_BITS, ptr); | |
539 | ASSERT(dptr); /* Must exist in hash due to vmem_alloc() */ | |
540 | ||
541 | /* Size must match */ | |
542 | ASSERTF(dptr->kd_size == size, "kd_size (%llu) != size (%llu), " | |
543 | "kd_func = %s, kd_line = %d\n", (unsigned long long) dptr->kd_size, | |
544 | (unsigned long long) size, dptr->kd_func, dptr->kd_line); | |
545 | ||
546 | atomic64_sub(size, &vmem_alloc_used); | |
547 | CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
548 | (unsigned long long) size, atomic64_read(&vmem_alloc_used), | |
549 | vmem_alloc_max); | |
550 | ||
c8e60837 | 551 | kfree(dptr->kd_func); |
552 | ||
a0f6da3d | 553 | memset(dptr, 0x5a, sizeof(kmem_debug_t)); |
554 | kfree(dptr); | |
555 | ||
556 | memset(ptr, 0x5a, size); | |
557 | vfree(ptr); | |
558 | ||
559 | EXIT; | |
560 | } | |
561 | EXPORT_SYMBOL(vmem_free_track); | |
562 | ||
563 | # else /* DEBUG_KMEM_TRACKING */ | |
564 | ||
565 | void * | |
566 | kmem_alloc_debug(size_t size, int flags, const char *func, int line, | |
567 | int node_alloc, int node) | |
568 | { | |
569 | void *ptr; | |
570 | ENTRY; | |
571 | ||
572 | /* Marked unlikely because we should never be doing this, | |
573 | * we tolerate to up 2 pages but a single page is best. */ | |
574 | if (unlikely(size > (PAGE_SIZE * 2)) && kmem_warning_flag) | |
575 | CWARN("Large kmem_alloc(%llu, 0x%x) (%lld/%llu)\n", | |
576 | (unsigned long long) size, flags, | |
577 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
578 | ||
579 | /* Use the correct allocator */ | |
580 | if (node_alloc) { | |
581 | ASSERT(!(flags & __GFP_ZERO)); | |
582 | ptr = kmalloc_node(size, flags, node); | |
583 | } else if (flags & __GFP_ZERO) { | |
584 | ptr = kzalloc(size, flags & (~__GFP_ZERO)); | |
585 | } else { | |
586 | ptr = kmalloc(size, flags); | |
587 | } | |
588 | ||
589 | if (ptr == NULL) { | |
590 | CWARN("kmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
591 | (unsigned long long) size, flags, | |
592 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
593 | } else { | |
594 | atomic64_add(size, &kmem_alloc_used); | |
595 | if (unlikely(atomic64_read(&kmem_alloc_used) > kmem_alloc_max)) | |
596 | kmem_alloc_max = atomic64_read(&kmem_alloc_used); | |
597 | ||
598 | CDEBUG_LIMIT(D_INFO, "kmem_alloc(%llu, 0x%x) = %p " | |
599 | "(%lld/%llu)\n", (unsigned long long) size, flags, ptr, | |
600 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); | |
601 | } | |
602 | RETURN(ptr); | |
603 | } | |
604 | EXPORT_SYMBOL(kmem_alloc_debug); | |
605 | ||
606 | void | |
607 | kmem_free_debug(void *ptr, size_t size) | |
608 | { | |
609 | ENTRY; | |
610 | ||
611 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
612 | (unsigned long long) size); | |
613 | ||
614 | atomic64_sub(size, &kmem_alloc_used); | |
615 | ||
616 | CDEBUG_LIMIT(D_INFO, "kmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
617 | (unsigned long long) size, atomic64_read(&kmem_alloc_used), | |
618 | kmem_alloc_max); | |
619 | ||
620 | memset(ptr, 0x5a, size); | |
621 | kfree(ptr); | |
622 | ||
623 | EXIT; | |
624 | } | |
625 | EXPORT_SYMBOL(kmem_free_debug); | |
626 | ||
627 | void * | |
628 | vmem_alloc_debug(size_t size, int flags, const char *func, int line) | |
629 | { | |
630 | void *ptr; | |
631 | ENTRY; | |
632 | ||
633 | ASSERT(flags & KM_SLEEP); | |
634 | ||
635 | ptr = __vmalloc(size, (flags | __GFP_HIGHMEM) & ~__GFP_ZERO, | |
636 | PAGE_KERNEL); | |
637 | if (ptr == NULL) { | |
638 | CWARN("vmem_alloc(%llu, 0x%x) failed (%lld/%llu)\n", | |
639 | (unsigned long long) size, flags, | |
640 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
641 | } else { | |
642 | if (flags & __GFP_ZERO) | |
643 | memset(ptr, 0, size); | |
644 | ||
645 | atomic64_add(size, &vmem_alloc_used); | |
646 | ||
647 | if (unlikely(atomic64_read(&vmem_alloc_used) > vmem_alloc_max)) | |
648 | vmem_alloc_max = atomic64_read(&vmem_alloc_used); | |
649 | ||
650 | CDEBUG_LIMIT(D_INFO, "vmem_alloc(%llu, 0x%x) = %p " | |
651 | "(%lld/%llu)\n", (unsigned long long) size, flags, ptr, | |
652 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); | |
653 | } | |
654 | ||
655 | RETURN(ptr); | |
656 | } | |
657 | EXPORT_SYMBOL(vmem_alloc_debug); | |
658 | ||
659 | void | |
660 | vmem_free_debug(void *ptr, size_t size) | |
661 | { | |
662 | ENTRY; | |
663 | ||
664 | ASSERTF(ptr || size > 0, "ptr: %p, size: %llu", ptr, | |
665 | (unsigned long long) size); | |
666 | ||
667 | atomic64_sub(size, &vmem_alloc_used); | |
668 | ||
669 | CDEBUG_LIMIT(D_INFO, "vmem_free(%p, %llu) (%lld/%llu)\n", ptr, | |
670 | (unsigned long long) size, atomic64_read(&vmem_alloc_used), | |
671 | vmem_alloc_max); | |
672 | ||
673 | memset(ptr, 0x5a, size); | |
674 | vfree(ptr); | |
675 | ||
676 | EXIT; | |
677 | } | |
678 | EXPORT_SYMBOL(vmem_free_debug); | |
679 | ||
680 | # endif /* DEBUG_KMEM_TRACKING */ | |
681 | #endif /* DEBUG_KMEM */ | |
682 | ||
a1502d76 | 683 | static void * |
684 | kv_alloc(spl_kmem_cache_t *skc, int size, int flags) | |
fece7c99 | 685 | { |
a1502d76 | 686 | void *ptr; |
f1ca4da6 | 687 | |
a1502d76 | 688 | if (skc->skc_flags & KMC_KMEM) { |
689 | if (size > (2 * PAGE_SIZE)) { | |
690 | ptr = (void *)__get_free_pages(flags, get_order(size)); | |
691 | } else | |
692 | ptr = kmem_alloc(size, flags); | |
693 | } else { | |
694 | ptr = vmem_alloc(size, flags); | |
d6a26c6a | 695 | } |
fece7c99 | 696 | |
a1502d76 | 697 | return ptr; |
698 | } | |
fece7c99 | 699 | |
a1502d76 | 700 | static void |
701 | kv_free(spl_kmem_cache_t *skc, void *ptr, int size) | |
702 | { | |
703 | if (skc->skc_flags & KMC_KMEM) { | |
704 | if (size > (2 * PAGE_SIZE)) | |
705 | free_pages((unsigned long)ptr, get_order(size)); | |
706 | else | |
707 | kmem_free(ptr, size); | |
708 | } else { | |
709 | vmem_free(ptr, size); | |
710 | } | |
fece7c99 | 711 | } |
712 | ||
ea3e6ca9 BB |
713 | /* |
714 | * It's important that we pack the spl_kmem_obj_t structure and the | |
48e0606a BB |
715 | * actual objects in to one large address space to minimize the number |
716 | * of calls to the allocator. It is far better to do a few large | |
717 | * allocations and then subdivide it ourselves. Now which allocator | |
718 | * we use requires balancing a few trade offs. | |
719 | * | |
720 | * For small objects we use kmem_alloc() because as long as you are | |
721 | * only requesting a small number of pages (ideally just one) its cheap. | |
722 | * However, when you start requesting multiple pages with kmem_alloc() | |
723 | * it gets increasingly expensive since it requires contigeous pages. | |
724 | * For this reason we shift to vmem_alloc() for slabs of large objects | |
725 | * which removes the need for contigeous pages. We do not use | |
726 | * vmem_alloc() in all cases because there is significant locking | |
727 | * overhead in __get_vm_area_node(). This function takes a single | |
728 | * global lock when aquiring an available virtual address range which | |
729 | * serializes all vmem_alloc()'s for all slab caches. Using slightly | |
730 | * different allocation functions for small and large objects should | |
731 | * give us the best of both worlds. | |
732 | * | |
733 | * KMC_ONSLAB KMC_OFFSLAB | |
734 | * | |
735 | * +------------------------+ +-----------------+ | |
736 | * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+ | |
737 | * | skc_obj_size <-+ | | +-----------------+ | | | |
738 | * | spl_kmem_obj_t | | | | | |
739 | * | skc_obj_size <---+ | +-----------------+ | | | |
740 | * | spl_kmem_obj_t | | | skc_obj_size | <-+ | | |
741 | * | ... v | | spl_kmem_obj_t | | | |
742 | * +------------------------+ +-----------------+ v | |
743 | */ | |
fece7c99 | 744 | static spl_kmem_slab_t * |
a1502d76 | 745 | spl_slab_alloc(spl_kmem_cache_t *skc, int flags) |
fece7c99 | 746 | { |
747 | spl_kmem_slab_t *sks; | |
a1502d76 | 748 | spl_kmem_obj_t *sko, *n; |
749 | void *base, *obj; | |
48e0606a BB |
750 | int i, align, size, rc = 0; |
751 | ||
a1502d76 | 752 | base = kv_alloc(skc, skc->skc_slab_size, flags); |
753 | if (base == NULL) | |
fece7c99 | 754 | RETURN(NULL); |
755 | ||
a1502d76 | 756 | sks = (spl_kmem_slab_t *)base; |
757 | sks->sks_magic = SKS_MAGIC; | |
758 | sks->sks_objs = skc->skc_slab_objs; | |
759 | sks->sks_age = jiffies; | |
760 | sks->sks_cache = skc; | |
761 | INIT_LIST_HEAD(&sks->sks_list); | |
762 | INIT_LIST_HEAD(&sks->sks_free_list); | |
763 | sks->sks_ref = 0; | |
48e0606a BB |
764 | |
765 | align = skc->skc_obj_align; | |
766 | size = P2ROUNDUP(skc->skc_obj_size, align) + | |
767 | P2ROUNDUP(sizeof(spl_kmem_obj_t), align); | |
fece7c99 | 768 | |
769 | for (i = 0; i < sks->sks_objs; i++) { | |
a1502d76 | 770 | if (skc->skc_flags & KMC_OFFSLAB) { |
771 | obj = kv_alloc(skc, size, flags); | |
772 | if (!obj) | |
773 | GOTO(out, rc = -ENOMEM); | |
774 | } else { | |
48e0606a BB |
775 | obj = base + |
776 | P2ROUNDUP(sizeof(spl_kmem_slab_t), align) + | |
777 | (i * size); | |
a1502d76 | 778 | } |
779 | ||
48e0606a | 780 | sko = obj + P2ROUNDUP(skc->skc_obj_size, align); |
fece7c99 | 781 | sko->sko_addr = obj; |
782 | sko->sko_magic = SKO_MAGIC; | |
783 | sko->sko_slab = sks; | |
784 | INIT_LIST_HEAD(&sko->sko_list); | |
fece7c99 | 785 | list_add_tail(&sko->sko_list, &sks->sks_free_list); |
786 | } | |
787 | ||
fece7c99 | 788 | list_for_each_entry(sko, &sks->sks_free_list, sko_list) |
789 | if (skc->skc_ctor) | |
790 | skc->skc_ctor(sko->sko_addr, skc->skc_private, flags); | |
2fb9b26a | 791 | out: |
a1502d76 | 792 | if (rc) { |
793 | if (skc->skc_flags & KMC_OFFSLAB) | |
48e0606a BB |
794 | list_for_each_entry_safe(sko, n, &sks->sks_free_list, |
795 | sko_list) | |
a1502d76 | 796 | kv_free(skc, sko->sko_addr, size); |
fece7c99 | 797 | |
a1502d76 | 798 | kv_free(skc, base, skc->skc_slab_size); |
799 | sks = NULL; | |
fece7c99 | 800 | } |
801 | ||
a1502d76 | 802 | RETURN(sks); |
fece7c99 | 803 | } |
804 | ||
ea3e6ca9 BB |
805 | /* |
806 | * Remove a slab from complete or partial list, it must be called with | |
807 | * the 'skc->skc_lock' held but the actual free must be performed | |
808 | * outside the lock to prevent deadlocking on vmem addresses. | |
fece7c99 | 809 | */ |
f1ca4da6 | 810 | static void |
ea3e6ca9 BB |
811 | spl_slab_free(spl_kmem_slab_t *sks, |
812 | struct list_head *sks_list, struct list_head *sko_list) | |
813 | { | |
2fb9b26a | 814 | spl_kmem_cache_t *skc; |
2fb9b26a | 815 | ENTRY; |
57d86234 | 816 | |
2fb9b26a | 817 | ASSERT(sks->sks_magic == SKS_MAGIC); |
4afaaefa | 818 | ASSERT(sks->sks_ref == 0); |
d6a26c6a | 819 | |
fece7c99 | 820 | skc = sks->sks_cache; |
821 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
d46630e0 | 822 | ASSERT(spin_is_locked(&skc->skc_lock)); |
f1ca4da6 | 823 | |
1a944a7d BB |
824 | /* |
825 | * Update slab/objects counters in the cache, then remove the | |
826 | * slab from the skc->skc_partial_list. Finally add the slab | |
827 | * and all its objects in to the private work lists where the | |
828 | * destructors will be called and the memory freed to the system. | |
829 | */ | |
fece7c99 | 830 | skc->skc_obj_total -= sks->sks_objs; |
831 | skc->skc_slab_total--; | |
832 | list_del(&sks->sks_list); | |
ea3e6ca9 | 833 | list_add(&sks->sks_list, sks_list); |
1a944a7d BB |
834 | list_splice_init(&sks->sks_free_list, sko_list); |
835 | ||
2fb9b26a | 836 | EXIT; |
837 | } | |
d6a26c6a | 838 | |
ea3e6ca9 BB |
839 | /* |
840 | * Traverses all the partial slabs attached to a cache and free those | |
841 | * which which are currently empty, and have not been touched for | |
37db7d8c BB |
842 | * skc_delay seconds to avoid thrashing. The count argument is |
843 | * passed to optionally cap the number of slabs reclaimed, a count | |
844 | * of zero means try and reclaim everything. When flag is set we | |
845 | * always free an available slab regardless of age. | |
ea3e6ca9 BB |
846 | */ |
847 | static void | |
37db7d8c | 848 | spl_slab_reclaim(spl_kmem_cache_t *skc, int count, int flag) |
2fb9b26a | 849 | { |
850 | spl_kmem_slab_t *sks, *m; | |
ea3e6ca9 BB |
851 | spl_kmem_obj_t *sko, *n; |
852 | LIST_HEAD(sks_list); | |
853 | LIST_HEAD(sko_list); | |
1a944a7d | 854 | int size = 0, i = 0; |
2fb9b26a | 855 | ENTRY; |
856 | ||
2fb9b26a | 857 | /* |
ea3e6ca9 BB |
858 | * Move empty slabs and objects which have not been touched in |
859 | * skc_delay seconds on to private lists to be freed outside | |
1a944a7d BB |
860 | * the spin lock. This delay time is important to avoid thrashing |
861 | * however when flag is set the delay will not be used. | |
2fb9b26a | 862 | */ |
ea3e6ca9 | 863 | spin_lock(&skc->skc_lock); |
1a944a7d BB |
864 | list_for_each_entry_safe_reverse(sks,m,&skc->skc_partial_list,sks_list){ |
865 | /* | |
866 | * All empty slabs are at the end of skc->skc_partial_list, | |
867 | * therefore once a non-empty slab is found we can stop | |
868 | * scanning. Additionally, stop when reaching the target | |
869 | * reclaim 'count' if a non-zero threshhold is given. | |
870 | */ | |
871 | if ((sks->sks_ref > 0) || (count && i > count)) | |
37db7d8c BB |
872 | break; |
873 | ||
37db7d8c | 874 | if (time_after(jiffies,sks->sks_age+skc->skc_delay*HZ)||flag) { |
ea3e6ca9 | 875 | spl_slab_free(sks, &sks_list, &sko_list); |
37db7d8c BB |
876 | i++; |
877 | } | |
ea3e6ca9 BB |
878 | } |
879 | spin_unlock(&skc->skc_lock); | |
880 | ||
881 | /* | |
1a944a7d BB |
882 | * The following two loops ensure all the object destructors are |
883 | * run, any offslab objects are freed, and the slabs themselves | |
884 | * are freed. This is all done outside the skc->skc_lock since | |
885 | * this allows the destructor to sleep, and allows us to perform | |
886 | * a conditional reschedule when a freeing a large number of | |
887 | * objects and slabs back to the system. | |
ea3e6ca9 | 888 | */ |
1a944a7d | 889 | if (skc->skc_flags & KMC_OFFSLAB) |
ea3e6ca9 BB |
890 | size = P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) + |
891 | P2ROUNDUP(sizeof(spl_kmem_obj_t), skc->skc_obj_align); | |
892 | ||
1a944a7d BB |
893 | list_for_each_entry_safe(sko, n, &sko_list, sko_list) { |
894 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
895 | ||
896 | if (skc->skc_dtor) | |
897 | skc->skc_dtor(sko->sko_addr, skc->skc_private); | |
898 | ||
899 | if (skc->skc_flags & KMC_OFFSLAB) | |
ea3e6ca9 | 900 | kv_free(skc, sko->sko_addr, size); |
1a944a7d BB |
901 | |
902 | cond_resched(); | |
2fb9b26a | 903 | } |
904 | ||
37db7d8c | 905 | list_for_each_entry_safe(sks, m, &sks_list, sks_list) { |
1a944a7d | 906 | ASSERT(sks->sks_magic == SKS_MAGIC); |
ea3e6ca9 | 907 | kv_free(skc, sks, skc->skc_slab_size); |
37db7d8c BB |
908 | cond_resched(); |
909 | } | |
ea3e6ca9 BB |
910 | |
911 | EXIT; | |
f1ca4da6 | 912 | } |
913 | ||
ea3e6ca9 BB |
914 | /* |
915 | * Called regularly on all caches to age objects out of the magazines | |
916 | * which have not been access in skc->skc_delay seconds. This prevents | |
917 | * idle magazines from holding memory which might be better used by | |
918 | * other caches or parts of the system. The delay is present to | |
919 | * prevent thrashing the magazine. | |
920 | */ | |
921 | static void | |
922 | spl_magazine_age(void *data) | |
f1ca4da6 | 923 | { |
9b1b8e4c BB |
924 | spl_kmem_magazine_t *skm = |
925 | spl_get_work_data(data, spl_kmem_magazine_t, skm_work.work); | |
926 | spl_kmem_cache_t *skc = skm->skm_cache; | |
927 | int i = smp_processor_id(); | |
928 | ||
929 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
930 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
931 | ASSERT(skc->skc_mag[i] == skm); | |
f1ca4da6 | 932 | |
ea3e6ca9 BB |
933 | if (skm->skm_avail > 0 && |
934 | time_after(jiffies, skm->skm_age + skc->skc_delay * HZ)) | |
935 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
9b1b8e4c BB |
936 | |
937 | if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)) | |
938 | schedule_delayed_work_on(i, &skm->skm_work, | |
939 | skc->skc_delay / 3 * HZ); | |
ea3e6ca9 | 940 | } |
4efd4118 | 941 | |
ea3e6ca9 BB |
942 | /* |
943 | * Called regularly to keep a downward pressure on the size of idle | |
944 | * magazines and to release free slabs from the cache. This function | |
945 | * never calls the registered reclaim function, that only occures | |
946 | * under memory pressure or with a direct call to spl_kmem_reap(). | |
947 | */ | |
948 | static void | |
949 | spl_cache_age(void *data) | |
950 | { | |
9b1b8e4c | 951 | spl_kmem_cache_t *skc = |
ea3e6ca9 BB |
952 | spl_get_work_data(data, spl_kmem_cache_t, skc_work.work); |
953 | ||
954 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
37db7d8c | 955 | spl_slab_reclaim(skc, skc->skc_reap, 0); |
ea3e6ca9 BB |
956 | |
957 | if (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)) | |
37db7d8c | 958 | schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ); |
2fb9b26a | 959 | } |
f1ca4da6 | 960 | |
ea3e6ca9 BB |
961 | /* |
962 | * Size a slab based on the size of each aliged object plus spl_kmem_obj_t. | |
963 | * When on-slab we want to target SPL_KMEM_CACHE_OBJ_PER_SLAB. However, | |
964 | * for very small objects we may end up with more than this so as not | |
965 | * to waste space in the minimal allocation of a single page. Also for | |
966 | * very large objects we may use as few as SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN, | |
967 | * lower than this and we will fail. | |
968 | */ | |
48e0606a BB |
969 | static int |
970 | spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) | |
971 | { | |
ea3e6ca9 | 972 | int sks_size, obj_size, max_size, align; |
48e0606a BB |
973 | |
974 | if (skc->skc_flags & KMC_OFFSLAB) { | |
ea3e6ca9 | 975 | *objs = SPL_KMEM_CACHE_OBJ_PER_SLAB; |
48e0606a BB |
976 | *size = sizeof(spl_kmem_slab_t); |
977 | } else { | |
ea3e6ca9 BB |
978 | align = skc->skc_obj_align; |
979 | sks_size = P2ROUNDUP(sizeof(spl_kmem_slab_t), align); | |
980 | obj_size = P2ROUNDUP(skc->skc_obj_size, align) + | |
981 | P2ROUNDUP(sizeof(spl_kmem_obj_t), align); | |
982 | ||
983 | if (skc->skc_flags & KMC_KMEM) | |
984 | max_size = ((uint64_t)1 << (MAX_ORDER-1)) * PAGE_SIZE; | |
985 | else | |
986 | max_size = (32 * 1024 * 1024); | |
48e0606a | 987 | |
ea3e6ca9 BB |
988 | for (*size = PAGE_SIZE; *size <= max_size; *size += PAGE_SIZE) { |
989 | *objs = (*size - sks_size) / obj_size; | |
990 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB) | |
991 | RETURN(0); | |
992 | } | |
48e0606a | 993 | |
ea3e6ca9 BB |
994 | /* |
995 | * Unable to satisfy target objets per slab, fallback to | |
996 | * allocating a maximally sized slab and assuming it can | |
997 | * contain the minimum objects count use it. If not fail. | |
998 | */ | |
999 | *size = max_size; | |
1000 | *objs = (*size - sks_size) / obj_size; | |
1001 | if (*objs >= SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN) | |
1002 | RETURN(0); | |
48e0606a BB |
1003 | } |
1004 | ||
ea3e6ca9 | 1005 | RETURN(-ENOSPC); |
48e0606a BB |
1006 | } |
1007 | ||
ea3e6ca9 BB |
1008 | /* |
1009 | * Make a guess at reasonable per-cpu magazine size based on the size of | |
1010 | * each object and the cost of caching N of them in each magazine. Long | |
1011 | * term this should really adapt based on an observed usage heuristic. | |
1012 | */ | |
4afaaefa | 1013 | static int |
1014 | spl_magazine_size(spl_kmem_cache_t *skc) | |
1015 | { | |
48e0606a | 1016 | int size, align = skc->skc_obj_align; |
4afaaefa | 1017 | ENTRY; |
1018 | ||
ea3e6ca9 | 1019 | /* Per-magazine sizes below assume a 4Kib page size */ |
48e0606a | 1020 | if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 256)) |
ea3e6ca9 | 1021 | size = 4; /* Minimum 4Mib per-magazine */ |
48e0606a | 1022 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE * 32)) |
ea3e6ca9 | 1023 | size = 16; /* Minimum 2Mib per-magazine */ |
48e0606a | 1024 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE)) |
ea3e6ca9 | 1025 | size = 64; /* Minimum 256Kib per-magazine */ |
48e0606a | 1026 | else if (P2ROUNDUP(skc->skc_obj_size, align) > (PAGE_SIZE / 4)) |
ea3e6ca9 | 1027 | size = 128; /* Minimum 128Kib per-magazine */ |
4afaaefa | 1028 | else |
ea3e6ca9 | 1029 | size = 256; |
4afaaefa | 1030 | |
1031 | RETURN(size); | |
1032 | } | |
1033 | ||
ea3e6ca9 BB |
1034 | /* |
1035 | * Allocate a per-cpu magazine to assoicate with a specific core. | |
1036 | */ | |
4afaaefa | 1037 | static spl_kmem_magazine_t * |
1038 | spl_magazine_alloc(spl_kmem_cache_t *skc, int node) | |
1039 | { | |
1040 | spl_kmem_magazine_t *skm; | |
1041 | int size = sizeof(spl_kmem_magazine_t) + | |
1042 | sizeof(void *) * skc->skc_mag_size; | |
1043 | ENTRY; | |
1044 | ||
ea3e6ca9 | 1045 | skm = kmem_alloc_node(size, GFP_KERNEL | __GFP_NOFAIL, node); |
4afaaefa | 1046 | if (skm) { |
1047 | skm->skm_magic = SKM_MAGIC; | |
1048 | skm->skm_avail = 0; | |
1049 | skm->skm_size = skc->skc_mag_size; | |
1050 | skm->skm_refill = skc->skc_mag_refill; | |
9b1b8e4c BB |
1051 | skm->skm_cache = skc; |
1052 | spl_init_delayed_work(&skm->skm_work, spl_magazine_age, skm); | |
ea3e6ca9 | 1053 | skm->skm_age = jiffies; |
4afaaefa | 1054 | } |
1055 | ||
1056 | RETURN(skm); | |
1057 | } | |
1058 | ||
ea3e6ca9 BB |
1059 | /* |
1060 | * Free a per-cpu magazine assoicated with a specific core. | |
1061 | */ | |
4afaaefa | 1062 | static void |
1063 | spl_magazine_free(spl_kmem_magazine_t *skm) | |
1064 | { | |
a0f6da3d | 1065 | int size = sizeof(spl_kmem_magazine_t) + |
1066 | sizeof(void *) * skm->skm_size; | |
1067 | ||
4afaaefa | 1068 | ENTRY; |
1069 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1070 | ASSERT(skm->skm_avail == 0); | |
a0f6da3d | 1071 | |
1072 | kmem_free(skm, size); | |
4afaaefa | 1073 | EXIT; |
1074 | } | |
1075 | ||
ea3e6ca9 BB |
1076 | /* |
1077 | * Create all pre-cpu magazines of reasonable sizes. | |
1078 | */ | |
4afaaefa | 1079 | static int |
1080 | spl_magazine_create(spl_kmem_cache_t *skc) | |
1081 | { | |
37db7d8c | 1082 | int i; |
4afaaefa | 1083 | ENTRY; |
1084 | ||
1085 | skc->skc_mag_size = spl_magazine_size(skc); | |
ea3e6ca9 | 1086 | skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; |
4afaaefa | 1087 | |
37db7d8c BB |
1088 | for_each_online_cpu(i) { |
1089 | skc->skc_mag[i] = spl_magazine_alloc(skc, cpu_to_node(i)); | |
1090 | if (!skc->skc_mag[i]) { | |
1091 | for (i--; i >= 0; i--) | |
1092 | spl_magazine_free(skc->skc_mag[i]); | |
4afaaefa | 1093 | |
37db7d8c BB |
1094 | RETURN(-ENOMEM); |
1095 | } | |
1096 | } | |
4afaaefa | 1097 | |
9b1b8e4c BB |
1098 | /* Only after everything is allocated schedule magazine work */ |
1099 | for_each_online_cpu(i) | |
1100 | schedule_delayed_work_on(i, &skc->skc_mag[i]->skm_work, | |
1101 | skc->skc_delay / 3 * HZ); | |
1102 | ||
37db7d8c | 1103 | RETURN(0); |
4afaaefa | 1104 | } |
1105 | ||
ea3e6ca9 BB |
1106 | /* |
1107 | * Destroy all pre-cpu magazines. | |
1108 | */ | |
4afaaefa | 1109 | static void |
1110 | spl_magazine_destroy(spl_kmem_cache_t *skc) | |
1111 | { | |
37db7d8c BB |
1112 | spl_kmem_magazine_t *skm; |
1113 | int i; | |
4afaaefa | 1114 | ENTRY; |
37db7d8c BB |
1115 | |
1116 | for_each_online_cpu(i) { | |
1117 | skm = skc->skc_mag[i]; | |
1118 | (void)spl_cache_flush(skc, skm, skm->skm_avail); | |
1119 | spl_magazine_free(skm); | |
1120 | } | |
1121 | ||
4afaaefa | 1122 | EXIT; |
1123 | } | |
1124 | ||
ea3e6ca9 BB |
1125 | /* |
1126 | * Create a object cache based on the following arguments: | |
1127 | * name cache name | |
1128 | * size cache object size | |
1129 | * align cache object alignment | |
1130 | * ctor cache object constructor | |
1131 | * dtor cache object destructor | |
1132 | * reclaim cache object reclaim | |
1133 | * priv cache private data for ctor/dtor/reclaim | |
1134 | * vmp unused must be NULL | |
1135 | * flags | |
1136 | * KMC_NOTOUCH Disable cache object aging (unsupported) | |
1137 | * KMC_NODEBUG Disable debugging (unsupported) | |
1138 | * KMC_NOMAGAZINE Disable magazine (unsupported) | |
1139 | * KMC_NOHASH Disable hashing (unsupported) | |
1140 | * KMC_QCACHE Disable qcache (unsupported) | |
1141 | * KMC_KMEM Force kmem backed cache | |
1142 | * KMC_VMEM Force vmem backed cache | |
1143 | * KMC_OFFSLAB Locate objects off the slab | |
1144 | */ | |
2fb9b26a | 1145 | spl_kmem_cache_t * |
1146 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
1147 | spl_kmem_ctor_t ctor, | |
1148 | spl_kmem_dtor_t dtor, | |
1149 | spl_kmem_reclaim_t reclaim, | |
1150 | void *priv, void *vmp, int flags) | |
1151 | { | |
1152 | spl_kmem_cache_t *skc; | |
a1502d76 | 1153 | int rc, kmem_flags = KM_SLEEP; |
2fb9b26a | 1154 | ENTRY; |
937879f1 | 1155 | |
a1502d76 | 1156 | ASSERTF(!(flags & KMC_NOMAGAZINE), "Bad KMC_NOMAGAZINE (%x)\n", flags); |
1157 | ASSERTF(!(flags & KMC_NOHASH), "Bad KMC_NOHASH (%x)\n", flags); | |
1158 | ASSERTF(!(flags & KMC_QCACHE), "Bad KMC_QCACHE (%x)\n", flags); | |
48e0606a | 1159 | ASSERT(vmp == NULL); |
a1502d76 | 1160 | |
2fb9b26a | 1161 | /* We may be called when there is a non-zero preempt_count or |
1162 | * interrupts are disabled is which case we must not sleep. | |
1163 | */ | |
e9d7a2be | 1164 | if (current_thread_info()->preempt_count || irqs_disabled()) |
2fb9b26a | 1165 | kmem_flags = KM_NOSLEEP; |
0a6fd143 | 1166 | |
2fb9b26a | 1167 | /* Allocate new cache memory and initialize. */ |
ff449ac4 | 1168 | skc = (spl_kmem_cache_t *)kmem_zalloc(sizeof(*skc), kmem_flags); |
e9d7a2be | 1169 | if (skc == NULL) |
2fb9b26a | 1170 | RETURN(NULL); |
d61e12af | 1171 | |
2fb9b26a | 1172 | skc->skc_magic = SKC_MAGIC; |
2fb9b26a | 1173 | skc->skc_name_size = strlen(name) + 1; |
1174 | skc->skc_name = (char *)kmem_alloc(skc->skc_name_size, kmem_flags); | |
1175 | if (skc->skc_name == NULL) { | |
1176 | kmem_free(skc, sizeof(*skc)); | |
1177 | RETURN(NULL); | |
1178 | } | |
1179 | strncpy(skc->skc_name, name, skc->skc_name_size); | |
1180 | ||
e9d7a2be | 1181 | skc->skc_ctor = ctor; |
1182 | skc->skc_dtor = dtor; | |
1183 | skc->skc_reclaim = reclaim; | |
2fb9b26a | 1184 | skc->skc_private = priv; |
1185 | skc->skc_vmp = vmp; | |
1186 | skc->skc_flags = flags; | |
1187 | skc->skc_obj_size = size; | |
48e0606a | 1188 | skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; |
2fb9b26a | 1189 | skc->skc_delay = SPL_KMEM_CACHE_DELAY; |
37db7d8c | 1190 | skc->skc_reap = SPL_KMEM_CACHE_REAP; |
ea3e6ca9 | 1191 | atomic_set(&skc->skc_ref, 0); |
2fb9b26a | 1192 | |
2fb9b26a | 1193 | INIT_LIST_HEAD(&skc->skc_list); |
1194 | INIT_LIST_HEAD(&skc->skc_complete_list); | |
1195 | INIT_LIST_HEAD(&skc->skc_partial_list); | |
d46630e0 | 1196 | spin_lock_init(&skc->skc_lock); |
e9d7a2be | 1197 | skc->skc_slab_fail = 0; |
1198 | skc->skc_slab_create = 0; | |
1199 | skc->skc_slab_destroy = 0; | |
2fb9b26a | 1200 | skc->skc_slab_total = 0; |
1201 | skc->skc_slab_alloc = 0; | |
1202 | skc->skc_slab_max = 0; | |
1203 | skc->skc_obj_total = 0; | |
1204 | skc->skc_obj_alloc = 0; | |
1205 | skc->skc_obj_max = 0; | |
a1502d76 | 1206 | |
48e0606a BB |
1207 | if (align) { |
1208 | ASSERT((align & (align - 1)) == 0); /* Power of two */ | |
1209 | ASSERT(align >= SPL_KMEM_CACHE_ALIGN); /* Minimum size */ | |
1210 | skc->skc_obj_align = align; | |
1211 | } | |
1212 | ||
a1502d76 | 1213 | /* If none passed select a cache type based on object size */ |
1214 | if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM))) { | |
48e0606a BB |
1215 | if (P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align) < |
1216 | (PAGE_SIZE / 8)) { | |
a1502d76 | 1217 | skc->skc_flags |= KMC_KMEM; |
1218 | } else { | |
1219 | skc->skc_flags |= KMC_VMEM; | |
1220 | } | |
1221 | } | |
1222 | ||
48e0606a BB |
1223 | rc = spl_slab_size(skc, &skc->skc_slab_objs, &skc->skc_slab_size); |
1224 | if (rc) | |
1225 | GOTO(out, rc); | |
4afaaefa | 1226 | |
1227 | rc = spl_magazine_create(skc); | |
48e0606a BB |
1228 | if (rc) |
1229 | GOTO(out, rc); | |
2fb9b26a | 1230 | |
ea3e6ca9 | 1231 | spl_init_delayed_work(&skc->skc_work, spl_cache_age, skc); |
37db7d8c | 1232 | schedule_delayed_work(&skc->skc_work, skc->skc_delay / 3 * HZ); |
ea3e6ca9 | 1233 | |
2fb9b26a | 1234 | down_write(&spl_kmem_cache_sem); |
e9d7a2be | 1235 | list_add_tail(&skc->skc_list, &spl_kmem_cache_list); |
2fb9b26a | 1236 | up_write(&spl_kmem_cache_sem); |
1237 | ||
e9d7a2be | 1238 | RETURN(skc); |
48e0606a BB |
1239 | out: |
1240 | kmem_free(skc->skc_name, skc->skc_name_size); | |
1241 | kmem_free(skc, sizeof(*skc)); | |
1242 | RETURN(NULL); | |
f1ca4da6 | 1243 | } |
2fb9b26a | 1244 | EXPORT_SYMBOL(spl_kmem_cache_create); |
f1ca4da6 | 1245 | |
ea3e6ca9 BB |
1246 | /* |
1247 | * Destroy a cache and all objects assoicated with the cache. | |
1248 | */ | |
2fb9b26a | 1249 | void |
1250 | spl_kmem_cache_destroy(spl_kmem_cache_t *skc) | |
f1ca4da6 | 1251 | { |
ea3e6ca9 | 1252 | DECLARE_WAIT_QUEUE_HEAD(wq); |
9b1b8e4c | 1253 | int i; |
2fb9b26a | 1254 | ENTRY; |
f1ca4da6 | 1255 | |
e9d7a2be | 1256 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1257 | ||
1258 | down_write(&spl_kmem_cache_sem); | |
1259 | list_del_init(&skc->skc_list); | |
1260 | up_write(&spl_kmem_cache_sem); | |
2fb9b26a | 1261 | |
ea3e6ca9 BB |
1262 | /* Cancel any and wait for any pending delayed work */ |
1263 | ASSERT(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1264 | cancel_delayed_work(&skc->skc_work); | |
9b1b8e4c BB |
1265 | for_each_online_cpu(i) |
1266 | cancel_delayed_work(&skc->skc_mag[i]->skm_work); | |
1267 | ||
ea3e6ca9 BB |
1268 | flush_scheduled_work(); |
1269 | ||
1270 | /* Wait until all current callers complete, this is mainly | |
1271 | * to catch the case where a low memory situation triggers a | |
1272 | * cache reaping action which races with this destroy. */ | |
1273 | wait_event(wq, atomic_read(&skc->skc_ref) == 0); | |
1274 | ||
4afaaefa | 1275 | spl_magazine_destroy(skc); |
37db7d8c | 1276 | spl_slab_reclaim(skc, 0, 1); |
d46630e0 | 1277 | spin_lock(&skc->skc_lock); |
d6a26c6a | 1278 | |
2fb9b26a | 1279 | /* Validate there are no objects in use and free all the |
4afaaefa | 1280 | * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. */ |
ea3e6ca9 BB |
1281 | ASSERT3U(skc->skc_slab_alloc, ==, 0); |
1282 | ASSERT3U(skc->skc_obj_alloc, ==, 0); | |
1283 | ASSERT3U(skc->skc_slab_total, ==, 0); | |
1284 | ASSERT3U(skc->skc_obj_total, ==, 0); | |
2fb9b26a | 1285 | ASSERT(list_empty(&skc->skc_complete_list)); |
a1502d76 | 1286 | |
2fb9b26a | 1287 | kmem_free(skc->skc_name, skc->skc_name_size); |
d46630e0 | 1288 | spin_unlock(&skc->skc_lock); |
ff449ac4 | 1289 | |
4afaaefa | 1290 | kmem_free(skc, sizeof(*skc)); |
2fb9b26a | 1291 | |
1292 | EXIT; | |
f1ca4da6 | 1293 | } |
2fb9b26a | 1294 | EXPORT_SYMBOL(spl_kmem_cache_destroy); |
f1ca4da6 | 1295 | |
ea3e6ca9 BB |
1296 | /* |
1297 | * Allocate an object from a slab attached to the cache. This is used to | |
1298 | * repopulate the per-cpu magazine caches in batches when they run low. | |
1299 | */ | |
4afaaefa | 1300 | static void * |
1301 | spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) | |
f1ca4da6 | 1302 | { |
2fb9b26a | 1303 | spl_kmem_obj_t *sko; |
f1ca4da6 | 1304 | |
e9d7a2be | 1305 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1306 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
4afaaefa | 1307 | ASSERT(spin_is_locked(&skc->skc_lock)); |
2fb9b26a | 1308 | |
a1502d76 | 1309 | sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); |
4afaaefa | 1310 | ASSERT(sko->sko_magic == SKO_MAGIC); |
1311 | ASSERT(sko->sko_addr != NULL); | |
2fb9b26a | 1312 | |
a1502d76 | 1313 | /* Remove from sks_free_list */ |
4afaaefa | 1314 | list_del_init(&sko->sko_list); |
2fb9b26a | 1315 | |
4afaaefa | 1316 | sks->sks_age = jiffies; |
1317 | sks->sks_ref++; | |
1318 | skc->skc_obj_alloc++; | |
2fb9b26a | 1319 | |
4afaaefa | 1320 | /* Track max obj usage statistics */ |
1321 | if (skc->skc_obj_alloc > skc->skc_obj_max) | |
1322 | skc->skc_obj_max = skc->skc_obj_alloc; | |
2fb9b26a | 1323 | |
4afaaefa | 1324 | /* Track max slab usage statistics */ |
1325 | if (sks->sks_ref == 1) { | |
1326 | skc->skc_slab_alloc++; | |
f1ca4da6 | 1327 | |
4afaaefa | 1328 | if (skc->skc_slab_alloc > skc->skc_slab_max) |
1329 | skc->skc_slab_max = skc->skc_slab_alloc; | |
2fb9b26a | 1330 | } |
1331 | ||
4afaaefa | 1332 | return sko->sko_addr; |
1333 | } | |
c30df9c8 | 1334 | |
ea3e6ca9 BB |
1335 | /* |
1336 | * No available objects on any slabsi, create a new slab. Since this | |
1337 | * is an expensive operation we do it without holding the spinlock and | |
1338 | * only briefly aquire it when we link in the fully allocated and | |
1339 | * constructed slab. | |
4afaaefa | 1340 | */ |
1341 | static spl_kmem_slab_t * | |
1342 | spl_cache_grow(spl_kmem_cache_t *skc, int flags) | |
1343 | { | |
e9d7a2be | 1344 | spl_kmem_slab_t *sks; |
4afaaefa | 1345 | ENTRY; |
f1ca4da6 | 1346 | |
e9d7a2be | 1347 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1348 | local_irq_enable(); |
1349 | might_sleep(); | |
e9d7a2be | 1350 | |
ea3e6ca9 BB |
1351 | /* |
1352 | * Before allocating a new slab check if the slab is being reaped. | |
1353 | * If it is there is a good chance we can wait until it finishes | |
1354 | * and then use one of the newly freed but not aged-out slabs. | |
1355 | */ | |
1356 | if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1357 | schedule(); | |
1358 | GOTO(out, sks= NULL); | |
4afaaefa | 1359 | } |
2fb9b26a | 1360 | |
ea3e6ca9 BB |
1361 | /* Allocate a new slab for the cache */ |
1362 | sks = spl_slab_alloc(skc, flags | __GFP_NORETRY | __GFP_NOWARN); | |
1363 | if (sks == NULL) | |
1364 | GOTO(out, sks = NULL); | |
4afaaefa | 1365 | |
ea3e6ca9 | 1366 | /* Link the new empty slab in to the end of skc_partial_list. */ |
d46630e0 | 1367 | spin_lock(&skc->skc_lock); |
2fb9b26a | 1368 | skc->skc_slab_total++; |
1369 | skc->skc_obj_total += sks->sks_objs; | |
1370 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
d46630e0 | 1371 | spin_unlock(&skc->skc_lock); |
ea3e6ca9 BB |
1372 | out: |
1373 | local_irq_disable(); | |
4afaaefa | 1374 | |
1375 | RETURN(sks); | |
f1ca4da6 | 1376 | } |
1377 | ||
ea3e6ca9 BB |
1378 | /* |
1379 | * Refill a per-cpu magazine with objects from the slabs for this | |
1380 | * cache. Ideally the magazine can be repopulated using existing | |
1381 | * objects which have been released, however if we are unable to | |
1382 | * locate enough free objects new slabs of objects will be created. | |
1383 | */ | |
4afaaefa | 1384 | static int |
1385 | spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) | |
f1ca4da6 | 1386 | { |
e9d7a2be | 1387 | spl_kmem_slab_t *sks; |
1388 | int rc = 0, refill; | |
937879f1 | 1389 | ENTRY; |
f1ca4da6 | 1390 | |
e9d7a2be | 1391 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1392 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1393 | ||
e9d7a2be | 1394 | refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); |
d46630e0 | 1395 | spin_lock(&skc->skc_lock); |
ff449ac4 | 1396 | |
4afaaefa | 1397 | while (refill > 0) { |
ea3e6ca9 | 1398 | /* No slabs available we may need to grow the cache */ |
4afaaefa | 1399 | if (list_empty(&skc->skc_partial_list)) { |
1400 | spin_unlock(&skc->skc_lock); | |
ff449ac4 | 1401 | |
4afaaefa | 1402 | sks = spl_cache_grow(skc, flags); |
1403 | if (!sks) | |
e9d7a2be | 1404 | GOTO(out, rc); |
4afaaefa | 1405 | |
1406 | /* Rescheduled to different CPU skm is not local */ | |
1407 | if (skm != skc->skc_mag[smp_processor_id()]) | |
e9d7a2be | 1408 | GOTO(out, rc); |
1409 | ||
1410 | /* Potentially rescheduled to the same CPU but | |
1411 | * allocations may have occured from this CPU while | |
1412 | * we were sleeping so recalculate max refill. */ | |
1413 | refill = MIN(refill, skm->skm_size - skm->skm_avail); | |
4afaaefa | 1414 | |
1415 | spin_lock(&skc->skc_lock); | |
1416 | continue; | |
1417 | } | |
d46630e0 | 1418 | |
4afaaefa | 1419 | /* Grab the next available slab */ |
1420 | sks = list_entry((&skc->skc_partial_list)->next, | |
1421 | spl_kmem_slab_t, sks_list); | |
1422 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1423 | ASSERT(sks->sks_ref < sks->sks_objs); | |
1424 | ASSERT(!list_empty(&sks->sks_free_list)); | |
d46630e0 | 1425 | |
4afaaefa | 1426 | /* Consume as many objects as needed to refill the requested |
e9d7a2be | 1427 | * cache. We must also be careful not to overfill it. */ |
1428 | while (sks->sks_ref < sks->sks_objs && refill-- > 0 && ++rc) { | |
1429 | ASSERT(skm->skm_avail < skm->skm_size); | |
1430 | ASSERT(rc < skm->skm_size); | |
4afaaefa | 1431 | skm->skm_objs[skm->skm_avail++]=spl_cache_obj(skc,sks); |
e9d7a2be | 1432 | } |
f1ca4da6 | 1433 | |
4afaaefa | 1434 | /* Move slab to skc_complete_list when full */ |
1435 | if (sks->sks_ref == sks->sks_objs) { | |
1436 | list_del(&sks->sks_list); | |
1437 | list_add(&sks->sks_list, &skc->skc_complete_list); | |
2fb9b26a | 1438 | } |
1439 | } | |
57d86234 | 1440 | |
4afaaefa | 1441 | spin_unlock(&skc->skc_lock); |
1442 | out: | |
1443 | /* Returns the number of entries added to cache */ | |
e9d7a2be | 1444 | RETURN(rc); |
4afaaefa | 1445 | } |
1446 | ||
ea3e6ca9 BB |
1447 | /* |
1448 | * Release an object back to the slab from which it came. | |
1449 | */ | |
4afaaefa | 1450 | static void |
1451 | spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) | |
1452 | { | |
e9d7a2be | 1453 | spl_kmem_slab_t *sks = NULL; |
4afaaefa | 1454 | spl_kmem_obj_t *sko = NULL; |
1455 | ENTRY; | |
1456 | ||
e9d7a2be | 1457 | ASSERT(skc->skc_magic == SKC_MAGIC); |
4afaaefa | 1458 | ASSERT(spin_is_locked(&skc->skc_lock)); |
1459 | ||
48e0606a | 1460 | sko = obj + P2ROUNDUP(skc->skc_obj_size, skc->skc_obj_align); |
a1502d76 | 1461 | ASSERT(sko->sko_magic == SKO_MAGIC); |
4afaaefa | 1462 | |
1463 | sks = sko->sko_slab; | |
a1502d76 | 1464 | ASSERT(sks->sks_magic == SKS_MAGIC); |
2fb9b26a | 1465 | ASSERT(sks->sks_cache == skc); |
2fb9b26a | 1466 | list_add(&sko->sko_list, &sks->sks_free_list); |
d6a26c6a | 1467 | |
2fb9b26a | 1468 | sks->sks_age = jiffies; |
4afaaefa | 1469 | sks->sks_ref--; |
2fb9b26a | 1470 | skc->skc_obj_alloc--; |
f1ca4da6 | 1471 | |
2fb9b26a | 1472 | /* Move slab to skc_partial_list when no longer full. Slabs |
4afaaefa | 1473 | * are added to the head to keep the partial list is quasi-full |
1474 | * sorted order. Fuller at the head, emptier at the tail. */ | |
1475 | if (sks->sks_ref == (sks->sks_objs - 1)) { | |
2fb9b26a | 1476 | list_del(&sks->sks_list); |
1477 | list_add(&sks->sks_list, &skc->skc_partial_list); | |
1478 | } | |
f1ca4da6 | 1479 | |
2fb9b26a | 1480 | /* Move emply slabs to the end of the partial list so |
4afaaefa | 1481 | * they can be easily found and freed during reclamation. */ |
1482 | if (sks->sks_ref == 0) { | |
2fb9b26a | 1483 | list_del(&sks->sks_list); |
1484 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1485 | skc->skc_slab_alloc--; | |
1486 | } | |
1487 | ||
4afaaefa | 1488 | EXIT; |
1489 | } | |
1490 | ||
ea3e6ca9 BB |
1491 | /* |
1492 | * Release a batch of objects from a per-cpu magazine back to their | |
1493 | * respective slabs. This occurs when we exceed the magazine size, | |
1494 | * are under memory pressure, when the cache is idle, or during | |
1495 | * cache cleanup. The flush argument contains the number of entries | |
1496 | * to remove from the magazine. | |
1497 | */ | |
4afaaefa | 1498 | static int |
1499 | spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) | |
1500 | { | |
1501 | int i, count = MIN(flush, skm->skm_avail); | |
1502 | ENTRY; | |
1503 | ||
e9d7a2be | 1504 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1505 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
4afaaefa | 1506 | |
ea3e6ca9 BB |
1507 | /* |
1508 | * XXX: Currently we simply return objects from the magazine to | |
1509 | * the slabs in fifo order. The ideal thing to do from a memory | |
1510 | * fragmentation standpoint is to cheaply determine the set of | |
1511 | * objects in the magazine which will result in the largest | |
1512 | * number of free slabs if released from the magazine. | |
1513 | */ | |
4afaaefa | 1514 | spin_lock(&skc->skc_lock); |
1515 | for (i = 0; i < count; i++) | |
1516 | spl_cache_shrink(skc, skm->skm_objs[i]); | |
1517 | ||
e9d7a2be | 1518 | skm->skm_avail -= count; |
1519 | memmove(skm->skm_objs, &(skm->skm_objs[count]), | |
4afaaefa | 1520 | sizeof(void *) * skm->skm_avail); |
1521 | ||
d46630e0 | 1522 | spin_unlock(&skc->skc_lock); |
4afaaefa | 1523 | |
1524 | RETURN(count); | |
1525 | } | |
1526 | ||
ea3e6ca9 BB |
1527 | /* |
1528 | * Allocate an object from the per-cpu magazine, or if the magazine | |
1529 | * is empty directly allocate from a slab and repopulate the magazine. | |
1530 | */ | |
4afaaefa | 1531 | void * |
1532 | spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) | |
1533 | { | |
1534 | spl_kmem_magazine_t *skm; | |
1535 | unsigned long irq_flags; | |
1536 | void *obj = NULL; | |
1537 | ENTRY; | |
1538 | ||
e9d7a2be | 1539 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1540 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1541 | ASSERT(flags & KM_SLEEP); | |
1542 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1543 | local_irq_save(irq_flags); |
1544 | ||
1545 | restart: | |
1546 | /* Safe to update per-cpu structure without lock, but | |
1547 | * in the restart case we must be careful to reaquire | |
1548 | * the local magazine since this may have changed | |
1549 | * when we need to grow the cache. */ | |
1550 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1551 | ASSERTF(skm->skm_magic == SKM_MAGIC, "%x != %x: %s/%p/%p %x/%x/%x\n", |
1552 | skm->skm_magic, SKM_MAGIC, skc->skc_name, skc, skm, | |
1553 | skm->skm_size, skm->skm_refill, skm->skm_avail); | |
4afaaefa | 1554 | |
1555 | if (likely(skm->skm_avail)) { | |
1556 | /* Object available in CPU cache, use it */ | |
1557 | obj = skm->skm_objs[--skm->skm_avail]; | |
ea3e6ca9 | 1558 | skm->skm_age = jiffies; |
4afaaefa | 1559 | } else { |
1560 | /* Per-CPU cache empty, directly allocate from | |
1561 | * the slab and refill the per-CPU cache. */ | |
1562 | (void)spl_cache_refill(skc, skm, flags); | |
1563 | GOTO(restart, obj = NULL); | |
1564 | } | |
1565 | ||
1566 | local_irq_restore(irq_flags); | |
fece7c99 | 1567 | ASSERT(obj); |
48e0606a | 1568 | ASSERT(((unsigned long)(obj) % skc->skc_obj_align) == 0); |
4afaaefa | 1569 | |
1570 | /* Pre-emptively migrate object to CPU L1 cache */ | |
1571 | prefetchw(obj); | |
ea3e6ca9 | 1572 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1573 | |
1574 | RETURN(obj); | |
1575 | } | |
1576 | EXPORT_SYMBOL(spl_kmem_cache_alloc); | |
1577 | ||
ea3e6ca9 BB |
1578 | /* |
1579 | * Free an object back to the local per-cpu magazine, there is no | |
1580 | * guarantee that this is the same magazine the object was originally | |
1581 | * allocated from. We may need to flush entire from the magazine | |
1582 | * back to the slabs to make space. | |
1583 | */ | |
4afaaefa | 1584 | void |
1585 | spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) | |
1586 | { | |
1587 | spl_kmem_magazine_t *skm; | |
1588 | unsigned long flags; | |
1589 | ENTRY; | |
1590 | ||
e9d7a2be | 1591 | ASSERT(skc->skc_magic == SKC_MAGIC); |
ea3e6ca9 BB |
1592 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
1593 | atomic_inc(&skc->skc_ref); | |
4afaaefa | 1594 | local_irq_save(flags); |
1595 | ||
1596 | /* Safe to update per-cpu structure without lock, but | |
1597 | * no remote memory allocation tracking is being performed | |
1598 | * it is entirely possible to allocate an object from one | |
1599 | * CPU cache and return it to another. */ | |
1600 | skm = skc->skc_mag[smp_processor_id()]; | |
e9d7a2be | 1601 | ASSERT(skm->skm_magic == SKM_MAGIC); |
4afaaefa | 1602 | |
1603 | /* Per-CPU cache full, flush it to make space */ | |
1604 | if (unlikely(skm->skm_avail >= skm->skm_size)) | |
1605 | (void)spl_cache_flush(skc, skm, skm->skm_refill); | |
1606 | ||
1607 | /* Available space in cache, use it */ | |
1608 | skm->skm_objs[skm->skm_avail++] = obj; | |
1609 | ||
1610 | local_irq_restore(flags); | |
ea3e6ca9 | 1611 | atomic_dec(&skc->skc_ref); |
4afaaefa | 1612 | |
1613 | EXIT; | |
f1ca4da6 | 1614 | } |
2fb9b26a | 1615 | EXPORT_SYMBOL(spl_kmem_cache_free); |
5c2bb9b2 | 1616 | |
ea3e6ca9 BB |
1617 | /* |
1618 | * The generic shrinker function for all caches. Under linux a shrinker | |
1619 | * may not be tightly coupled with a slab cache. In fact linux always | |
1620 | * systematically trys calling all registered shrinker callbacks which | |
1621 | * report that they contain unused objects. Because of this we only | |
1622 | * register one shrinker function in the shim layer for all slab caches. | |
1623 | * We always attempt to shrink all caches when this generic shrinker | |
1624 | * is called. The shrinker should return the number of free objects | |
1625 | * in the cache when called with nr_to_scan == 0 but not attempt to | |
1626 | * free any objects. When nr_to_scan > 0 it is a request that nr_to_scan | |
1627 | * objects should be freed, because Solaris semantics are to free | |
1628 | * all available objects we may free more objects than requested. | |
1629 | */ | |
2fb9b26a | 1630 | static int |
4afaaefa | 1631 | spl_kmem_cache_generic_shrinker(int nr_to_scan, unsigned int gfp_mask) |
2fb9b26a | 1632 | { |
e9d7a2be | 1633 | spl_kmem_cache_t *skc; |
ea3e6ca9 | 1634 | int unused = 0; |
5c2bb9b2 | 1635 | |
e9d7a2be | 1636 | down_read(&spl_kmem_cache_sem); |
ea3e6ca9 BB |
1637 | list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { |
1638 | if (nr_to_scan) | |
1639 | spl_kmem_cache_reap_now(skc); | |
1640 | ||
1641 | /* | |
1642 | * Presume everything alloc'ed in reclaimable, this ensures | |
1643 | * we are called again with nr_to_scan > 0 so can try and | |
1644 | * reclaim. The exact number is not important either so | |
1645 | * we forgo taking this already highly contented lock. | |
1646 | */ | |
1647 | unused += skc->skc_obj_alloc; | |
1648 | } | |
e9d7a2be | 1649 | up_read(&spl_kmem_cache_sem); |
2fb9b26a | 1650 | |
ea3e6ca9 | 1651 | return (unused * sysctl_vfs_cache_pressure) / 100; |
5c2bb9b2 | 1652 | } |
5c2bb9b2 | 1653 | |
ea3e6ca9 BB |
1654 | /* |
1655 | * Call the registered reclaim function for a cache. Depending on how | |
1656 | * many and which objects are released it may simply repopulate the | |
1657 | * local magazine which will then need to age-out. Objects which cannot | |
1658 | * fit in the magazine we will be released back to their slabs which will | |
1659 | * also need to age out before being release. This is all just best | |
1660 | * effort and we do not want to thrash creating and destroying slabs. | |
1661 | */ | |
57d86234 | 1662 | void |
2fb9b26a | 1663 | spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) |
57d86234 | 1664 | { |
2fb9b26a | 1665 | ENTRY; |
e9d7a2be | 1666 | |
1667 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
ea3e6ca9 | 1668 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); |
2fb9b26a | 1669 | |
ea3e6ca9 BB |
1670 | /* Prevent concurrent cache reaping when contended */ |
1671 | if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1672 | EXIT; | |
1673 | return; | |
1674 | } | |
2fb9b26a | 1675 | |
ea3e6ca9 | 1676 | atomic_inc(&skc->skc_ref); |
4afaaefa | 1677 | |
ea3e6ca9 BB |
1678 | if (skc->skc_reclaim) |
1679 | skc->skc_reclaim(skc->skc_private); | |
4afaaefa | 1680 | |
37db7d8c | 1681 | spl_slab_reclaim(skc, skc->skc_reap, 0); |
ea3e6ca9 BB |
1682 | clear_bit(KMC_BIT_REAPING, &skc->skc_flags); |
1683 | atomic_dec(&skc->skc_ref); | |
4afaaefa | 1684 | |
2fb9b26a | 1685 | EXIT; |
57d86234 | 1686 | } |
2fb9b26a | 1687 | EXPORT_SYMBOL(spl_kmem_cache_reap_now); |
57d86234 | 1688 | |
ea3e6ca9 BB |
1689 | /* |
1690 | * Reap all free slabs from all registered caches. | |
1691 | */ | |
f1b59d26 | 1692 | void |
2fb9b26a | 1693 | spl_kmem_reap(void) |
937879f1 | 1694 | { |
4afaaefa | 1695 | spl_kmem_cache_generic_shrinker(KMC_REAP_CHUNK, GFP_KERNEL); |
f1ca4da6 | 1696 | } |
2fb9b26a | 1697 | EXPORT_SYMBOL(spl_kmem_reap); |
5d86345d | 1698 | |
ff449ac4 | 1699 | #if defined(DEBUG_KMEM) && defined(DEBUG_KMEM_TRACKING) |
c6dc93d6 | 1700 | static char * |
4afaaefa | 1701 | spl_sprintf_addr(kmem_debug_t *kd, char *str, int len, int min) |
d6a26c6a | 1702 | { |
e9d7a2be | 1703 | int size = ((len - 1) < kd->kd_size) ? (len - 1) : kd->kd_size; |
d6a26c6a | 1704 | int i, flag = 1; |
1705 | ||
1706 | ASSERT(str != NULL && len >= 17); | |
e9d7a2be | 1707 | memset(str, 0, len); |
d6a26c6a | 1708 | |
1709 | /* Check for a fully printable string, and while we are at | |
1710 | * it place the printable characters in the passed buffer. */ | |
1711 | for (i = 0; i < size; i++) { | |
e9d7a2be | 1712 | str[i] = ((char *)(kd->kd_addr))[i]; |
1713 | if (isprint(str[i])) { | |
1714 | continue; | |
1715 | } else { | |
1716 | /* Minimum number of printable characters found | |
1717 | * to make it worthwhile to print this as ascii. */ | |
1718 | if (i > min) | |
1719 | break; | |
1720 | ||
1721 | flag = 0; | |
1722 | break; | |
1723 | } | |
d6a26c6a | 1724 | } |
1725 | ||
1726 | if (!flag) { | |
1727 | sprintf(str, "%02x%02x%02x%02x%02x%02x%02x%02x", | |
1728 | *((uint8_t *)kd->kd_addr), | |
1729 | *((uint8_t *)kd->kd_addr + 2), | |
1730 | *((uint8_t *)kd->kd_addr + 4), | |
1731 | *((uint8_t *)kd->kd_addr + 6), | |
1732 | *((uint8_t *)kd->kd_addr + 8), | |
1733 | *((uint8_t *)kd->kd_addr + 10), | |
1734 | *((uint8_t *)kd->kd_addr + 12), | |
1735 | *((uint8_t *)kd->kd_addr + 14)); | |
1736 | } | |
1737 | ||
1738 | return str; | |
1739 | } | |
1740 | ||
a1502d76 | 1741 | static int |
1742 | spl_kmem_init_tracking(struct list_head *list, spinlock_t *lock, int size) | |
1743 | { | |
1744 | int i; | |
1745 | ENTRY; | |
1746 | ||
1747 | spin_lock_init(lock); | |
1748 | INIT_LIST_HEAD(list); | |
1749 | ||
1750 | for (i = 0; i < size; i++) | |
1751 | INIT_HLIST_HEAD(&kmem_table[i]); | |
1752 | ||
1753 | RETURN(0); | |
1754 | } | |
1755 | ||
ff449ac4 | 1756 | static void |
1757 | spl_kmem_fini_tracking(struct list_head *list, spinlock_t *lock) | |
5d86345d | 1758 | { |
2fb9b26a | 1759 | unsigned long flags; |
1760 | kmem_debug_t *kd; | |
1761 | char str[17]; | |
a1502d76 | 1762 | ENTRY; |
2fb9b26a | 1763 | |
ff449ac4 | 1764 | spin_lock_irqsave(lock, flags); |
1765 | if (!list_empty(list)) | |
a0f6da3d | 1766 | printk(KERN_WARNING "%-16s %-5s %-16s %s:%s\n", "address", |
1767 | "size", "data", "func", "line"); | |
2fb9b26a | 1768 | |
ff449ac4 | 1769 | list_for_each_entry(kd, list, kd_list) |
a0f6da3d | 1770 | printk(KERN_WARNING "%p %-5d %-16s %s:%d\n", kd->kd_addr, |
b6b2acc6 | 1771 | (int)kd->kd_size, spl_sprintf_addr(kd, str, 17, 8), |
2fb9b26a | 1772 | kd->kd_func, kd->kd_line); |
1773 | ||
ff449ac4 | 1774 | spin_unlock_irqrestore(lock, flags); |
a1502d76 | 1775 | EXIT; |
ff449ac4 | 1776 | } |
1777 | #else /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
a1502d76 | 1778 | #define spl_kmem_init_tracking(list, lock, size) |
ff449ac4 | 1779 | #define spl_kmem_fini_tracking(list, lock) |
1780 | #endif /* DEBUG_KMEM && DEBUG_KMEM_TRACKING */ | |
1781 | ||
36b313da BB |
1782 | static void |
1783 | spl_kmem_init_globals(void) | |
1784 | { | |
1785 | struct zone *zone; | |
1786 | ||
1787 | /* For now all zones are includes, it may be wise to restrict | |
1788 | * this to normal and highmem zones if we see problems. */ | |
1789 | for_each_zone(zone) { | |
1790 | ||
1791 | if (!populated_zone(zone)) | |
1792 | continue; | |
1793 | ||
1794 | minfree += zone->pages_min; | |
1795 | desfree += zone->pages_low; | |
1796 | lotsfree += zone->pages_high; | |
1797 | } | |
4ab13d3b BB |
1798 | |
1799 | /* Solaris default values */ | |
96dded38 BB |
1800 | swapfs_minfree = MAX(2*1024*1024 >> PAGE_SHIFT, physmem >> 3); |
1801 | swapfs_reserve = MIN(4*1024*1024 >> PAGE_SHIFT, physmem >> 4); | |
36b313da BB |
1802 | } |
1803 | ||
d1ff2312 BB |
1804 | /* |
1805 | * Called at module init when it is safe to use spl_kallsyms_lookup_name() | |
1806 | */ | |
1807 | int | |
1808 | spl_kmem_init_kallsyms_lookup(void) | |
1809 | { | |
1810 | #ifndef HAVE_GET_VMALLOC_INFO | |
1811 | get_vmalloc_info_fn = (get_vmalloc_info_t) | |
1812 | spl_kallsyms_lookup_name("get_vmalloc_info"); | |
e11d6c5f BB |
1813 | if (!get_vmalloc_info_fn) { |
1814 | printk(KERN_ERR "Error: Unknown symbol get_vmalloc_info\n"); | |
d1ff2312 | 1815 | return -EFAULT; |
e11d6c5f | 1816 | } |
d1ff2312 BB |
1817 | #endif /* HAVE_GET_VMALLOC_INFO */ |
1818 | ||
5232d256 BB |
1819 | #ifdef HAVE_PGDAT_HELPERS |
1820 | # ifndef HAVE_FIRST_ONLINE_PGDAT | |
d1ff2312 BB |
1821 | first_online_pgdat_fn = (first_online_pgdat_t) |
1822 | spl_kallsyms_lookup_name("first_online_pgdat"); | |
e11d6c5f BB |
1823 | if (!first_online_pgdat_fn) { |
1824 | printk(KERN_ERR "Error: Unknown symbol first_online_pgdat\n"); | |
d1ff2312 | 1825 | return -EFAULT; |
e11d6c5f | 1826 | } |
5232d256 | 1827 | # endif /* HAVE_FIRST_ONLINE_PGDAT */ |
d1ff2312 | 1828 | |
5232d256 | 1829 | # ifndef HAVE_NEXT_ONLINE_PGDAT |
d1ff2312 BB |
1830 | next_online_pgdat_fn = (next_online_pgdat_t) |
1831 | spl_kallsyms_lookup_name("next_online_pgdat"); | |
e11d6c5f BB |
1832 | if (!next_online_pgdat_fn) { |
1833 | printk(KERN_ERR "Error: Unknown symbol next_online_pgdat\n"); | |
d1ff2312 | 1834 | return -EFAULT; |
e11d6c5f | 1835 | } |
5232d256 | 1836 | # endif /* HAVE_NEXT_ONLINE_PGDAT */ |
d1ff2312 | 1837 | |
5232d256 | 1838 | # ifndef HAVE_NEXT_ZONE |
d1ff2312 BB |
1839 | next_zone_fn = (next_zone_t) |
1840 | spl_kallsyms_lookup_name("next_zone"); | |
e11d6c5f BB |
1841 | if (!next_zone_fn) { |
1842 | printk(KERN_ERR "Error: Unknown symbol next_zone\n"); | |
d1ff2312 | 1843 | return -EFAULT; |
e11d6c5f | 1844 | } |
5232d256 BB |
1845 | # endif /* HAVE_NEXT_ZONE */ |
1846 | ||
1847 | #else /* HAVE_PGDAT_HELPERS */ | |
1848 | ||
1849 | # ifndef HAVE_PGDAT_LIST | |
1850 | pgdat_list_addr = (struct pglist_data *) | |
1851 | spl_kallsyms_lookup_name("pgdat_list"); | |
1852 | if (!pgdat_list_addr) { | |
1853 | printk(KERN_ERR "Error: Unknown symbol pgdat_list\n"); | |
1854 | return -EFAULT; | |
1855 | } | |
1856 | # endif /* HAVE_PGDAT_LIST */ | |
1857 | #endif /* HAVE_PGDAT_HELPERS */ | |
d1ff2312 | 1858 | |
e11d6c5f BB |
1859 | #ifndef HAVE_ZONE_STAT_ITEM_FIA |
1860 | # ifndef HAVE_GET_ZONE_COUNTS | |
d1ff2312 BB |
1861 | get_zone_counts_fn = (get_zone_counts_t) |
1862 | spl_kallsyms_lookup_name("get_zone_counts"); | |
e11d6c5f BB |
1863 | if (!get_zone_counts_fn) { |
1864 | printk(KERN_ERR "Error: Unknown symbol get_zone_counts\n"); | |
d1ff2312 | 1865 | return -EFAULT; |
e11d6c5f | 1866 | } |
e11d6c5f BB |
1867 | # endif /* HAVE_GET_ZONE_COUNTS */ |
1868 | #endif /* HAVE_ZONE_STAT_ITEM_FIA */ | |
d1ff2312 BB |
1869 | |
1870 | /* | |
1871 | * It is now safe to initialize the global tunings which rely on | |
1872 | * the use of the for_each_zone() macro. This macro in turns | |
1873 | * depends on the *_pgdat symbols which are now available. | |
1874 | */ | |
1875 | spl_kmem_init_globals(); | |
1876 | ||
1877 | return 0; | |
1878 | } | |
1879 | ||
a1502d76 | 1880 | int |
1881 | spl_kmem_init(void) | |
1882 | { | |
1883 | int rc = 0; | |
1884 | ENTRY; | |
1885 | ||
1886 | init_rwsem(&spl_kmem_cache_sem); | |
1887 | INIT_LIST_HEAD(&spl_kmem_cache_list); | |
1888 | ||
1889 | #ifdef HAVE_SET_SHRINKER | |
1890 | spl_kmem_cache_shrinker = set_shrinker(KMC_DEFAULT_SEEKS, | |
1891 | spl_kmem_cache_generic_shrinker); | |
1892 | if (spl_kmem_cache_shrinker == NULL) | |
f78a933f | 1893 | RETURN(rc = -ENOMEM); |
a1502d76 | 1894 | #else |
1895 | register_shrinker(&spl_kmem_cache_shrinker); | |
1896 | #endif | |
1897 | ||
1898 | #ifdef DEBUG_KMEM | |
1899 | atomic64_set(&kmem_alloc_used, 0); | |
1900 | atomic64_set(&vmem_alloc_used, 0); | |
1901 | ||
1902 | spl_kmem_init_tracking(&kmem_list, &kmem_lock, KMEM_TABLE_SIZE); | |
1903 | spl_kmem_init_tracking(&vmem_list, &vmem_lock, VMEM_TABLE_SIZE); | |
1904 | #endif | |
a1502d76 | 1905 | RETURN(rc); |
1906 | } | |
1907 | ||
ff449ac4 | 1908 | void |
1909 | spl_kmem_fini(void) | |
1910 | { | |
1911 | #ifdef DEBUG_KMEM | |
1912 | /* Display all unreclaimed memory addresses, including the | |
1913 | * allocation size and the first few bytes of what's located | |
1914 | * at that address to aid in debugging. Performance is not | |
1915 | * a serious concern here since it is module unload time. */ | |
1916 | if (atomic64_read(&kmem_alloc_used) != 0) | |
1917 | CWARN("kmem leaked %ld/%ld bytes\n", | |
550f1705 | 1918 | atomic64_read(&kmem_alloc_used), kmem_alloc_max); |
ff449ac4 | 1919 | |
2fb9b26a | 1920 | |
1921 | if (atomic64_read(&vmem_alloc_used) != 0) | |
1922 | CWARN("vmem leaked %ld/%ld bytes\n", | |
550f1705 | 1923 | atomic64_read(&vmem_alloc_used), vmem_alloc_max); |
2fb9b26a | 1924 | |
ff449ac4 | 1925 | spl_kmem_fini_tracking(&kmem_list, &kmem_lock); |
1926 | spl_kmem_fini_tracking(&vmem_list, &vmem_lock); | |
1927 | #endif /* DEBUG_KMEM */ | |
2fb9b26a | 1928 | ENTRY; |
1929 | ||
1930 | #ifdef HAVE_SET_SHRINKER | |
1931 | remove_shrinker(spl_kmem_cache_shrinker); | |
1932 | #else | |
1933 | unregister_shrinker(&spl_kmem_cache_shrinker); | |
5d86345d | 1934 | #endif |
2fb9b26a | 1935 | |
937879f1 | 1936 | EXIT; |
5d86345d | 1937 | } |