]>
Commit | Line | Data |
---|---|---|
b34b9563 | 1 | /* |
e5b9b344 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
9 | * For details, see <http://zfsonlinux.org/>. | |
10 | * | |
11 | * The SPL is free software; you can redistribute it and/or modify it | |
12 | * under the terms of the GNU General Public License as published by the | |
13 | * Free Software Foundation; either version 2 of the License, or (at your | |
14 | * option) any later version. | |
15 | * | |
16 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
17 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 | * for more details. | |
20 | * | |
21 | * You should have received a copy of the GNU General Public License along | |
22 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
b34b9563 | 23 | */ |
e5b9b344 BB |
24 | |
25 | #include <sys/kmem.h> | |
26 | #include <sys/kmem_cache.h> | |
27 | #include <sys/taskq.h> | |
28 | #include <sys/timer.h> | |
29 | #include <sys/vmem.h> | |
30 | #include <linux/slab.h> | |
31 | #include <linux/swap.h> | |
32 | #include <linux/mm_compat.h> | |
33 | #include <linux/wait_compat.h> | |
34 | ||
35 | /* | |
36 | * Within the scope of spl-kmem.c file the kmem_cache_* definitions | |
37 | * are removed to allow access to the real Linux slab allocator. | |
38 | */ | |
39 | #undef kmem_cache_destroy | |
40 | #undef kmem_cache_create | |
41 | #undef kmem_cache_alloc | |
42 | #undef kmem_cache_free | |
43 | ||
44 | ||
a988a35a RY |
45 | /* |
46 | * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() | |
47 | * with smp_mb__{before,after}_atomic() because they were redundant. This is | |
48 | * only used inside our SLAB allocator, so we implement an internal wrapper | |
49 | * here to give us smp_mb__{before,after}_atomic() on older kernels. | |
50 | */ | |
51 | #ifndef smp_mb__before_atomic | |
52 | #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) | |
53 | #endif | |
54 | ||
55 | #ifndef smp_mb__after_atomic | |
56 | #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) | |
57 | #endif | |
58 | ||
e5b9b344 BB |
59 | /* |
60 | * Cache expiration was implemented because it was part of the default Solaris | |
61 | * kmem_cache behavior. The idea is that per-cpu objects which haven't been | |
62 | * accessed in several seconds should be returned to the cache. On the other | |
63 | * hand Linux slabs never move objects back to the slabs unless there is | |
64 | * memory pressure on the system. By default the Linux method is enabled | |
65 | * because it has been shown to improve responsiveness on low memory systems. | |
66 | * This policy may be changed by setting KMC_EXPIRE_AGE or KMC_EXPIRE_MEM. | |
67 | */ | |
68 | unsigned int spl_kmem_cache_expire = KMC_EXPIRE_MEM; | |
69 | EXPORT_SYMBOL(spl_kmem_cache_expire); | |
70 | module_param(spl_kmem_cache_expire, uint, 0644); | |
71 | MODULE_PARM_DESC(spl_kmem_cache_expire, "By age (0x1) or low memory (0x2)"); | |
72 | ||
1a204968 BB |
73 | /* |
74 | * Cache magazines are an optimization designed to minimize the cost of | |
75 | * allocating memory. They do this by keeping a per-cpu cache of recently | |
76 | * freed objects, which can then be reallocated without taking a lock. This | |
77 | * can improve performance on highly contended caches. However, because | |
78 | * objects in magazines will prevent otherwise empty slabs from being | |
79 | * immediately released this may not be ideal for low memory machines. | |
80 | * | |
81 | * For this reason spl_kmem_cache_magazine_size can be used to set a maximum | |
82 | * magazine size. When this value is set to 0 the magazine size will be | |
83 | * automatically determined based on the object size. Otherwise magazines | |
84 | * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines | |
85 | * may never be entirely disabled in this implementation. | |
86 | */ | |
87 | unsigned int spl_kmem_cache_magazine_size = 0; | |
88 | module_param(spl_kmem_cache_magazine_size, uint, 0444); | |
89 | MODULE_PARM_DESC(spl_kmem_cache_magazine_size, | |
90 | "Default magazine size (2-256), set automatically (0)\n"); | |
91 | ||
e5b9b344 BB |
92 | /* |
93 | * The default behavior is to report the number of objects remaining in the | |
94 | * cache. This allows the Linux VM to repeatedly reclaim objects from the | |
95 | * cache when memory is low satisfy other memory allocations. Alternately, | |
96 | * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache | |
97 | * is reclaimed. This may increase the likelihood of out of memory events. | |
98 | */ | |
99 | unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; | |
100 | module_param(spl_kmem_cache_reclaim, uint, 0644); | |
101 | MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); | |
102 | ||
103 | unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; | |
104 | module_param(spl_kmem_cache_obj_per_slab, uint, 0644); | |
105 | MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); | |
106 | ||
107 | unsigned int spl_kmem_cache_obj_per_slab_min = SPL_KMEM_CACHE_OBJ_PER_SLAB_MIN; | |
108 | module_param(spl_kmem_cache_obj_per_slab_min, uint, 0644); | |
109 | MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab_min, | |
b34b9563 | 110 | "Minimal number of objects per slab"); |
e5b9b344 | 111 | |
3018bffa | 112 | unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; |
e5b9b344 BB |
113 | module_param(spl_kmem_cache_max_size, uint, 0644); |
114 | MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); | |
115 | ||
116 | /* | |
117 | * For small objects the Linux slab allocator should be used to make the most | |
118 | * efficient use of the memory. However, large objects are not supported by | |
119 | * the Linux slab and therefore the SPL implementation is preferred. A cutoff | |
120 | * of 16K was determined to be optimal for architectures using 4K pages. | |
121 | */ | |
122 | #if PAGE_SIZE == 4096 | |
123 | unsigned int spl_kmem_cache_slab_limit = 16384; | |
124 | #else | |
125 | unsigned int spl_kmem_cache_slab_limit = 0; | |
126 | #endif | |
127 | module_param(spl_kmem_cache_slab_limit, uint, 0644); | |
128 | MODULE_PARM_DESC(spl_kmem_cache_slab_limit, | |
b34b9563 | 129 | "Objects less than N bytes use the Linux slab"); |
e5b9b344 | 130 | |
3018bffa BB |
131 | /* |
132 | * This value defaults to a threshold designed to avoid allocations which | |
133 | * have been deemed costly by the kernel. | |
134 | */ | |
135 | unsigned int spl_kmem_cache_kmem_limit = | |
136 | ((1 << (PAGE_ALLOC_COSTLY_ORDER - 1)) * PAGE_SIZE) / | |
137 | SPL_KMEM_CACHE_OBJ_PER_SLAB; | |
e5b9b344 BB |
138 | module_param(spl_kmem_cache_kmem_limit, uint, 0644); |
139 | MODULE_PARM_DESC(spl_kmem_cache_kmem_limit, | |
b34b9563 | 140 | "Objects less than N bytes use the kmalloc"); |
e5b9b344 | 141 | |
436ad60f BB |
142 | /* |
143 | * The number of threads available to allocate new slabs for caches. This | |
144 | * should not need to be tuned but it is available for performance analysis. | |
145 | */ | |
146 | unsigned int spl_kmem_cache_kmem_threads = 4; | |
147 | module_param(spl_kmem_cache_kmem_threads, uint, 0444); | |
148 | MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, | |
149 | "Number of spl_kmem_cache threads"); | |
150 | ||
e5b9b344 BB |
151 | /* |
152 | * Slab allocation interfaces | |
153 | * | |
154 | * While the Linux slab implementation was inspired by the Solaris | |
155 | * implementation I cannot use it to emulate the Solaris APIs. I | |
156 | * require two features which are not provided by the Linux slab. | |
157 | * | |
158 | * 1) Constructors AND destructors. Recent versions of the Linux | |
159 | * kernel have removed support for destructors. This is a deal | |
160 | * breaker for the SPL which contains particularly expensive | |
161 | * initializers for mutex's, condition variables, etc. We also | |
162 | * require a minimal level of cleanup for these data types unlike | |
b34b9563 | 163 | * many Linux data types which do need to be explicitly destroyed. |
e5b9b344 BB |
164 | * |
165 | * 2) Virtual address space backed slab. Callers of the Solaris slab | |
166 | * expect it to work well for both small are very large allocations. | |
167 | * Because of memory fragmentation the Linux slab which is backed | |
168 | * by kmalloc'ed memory performs very badly when confronted with | |
169 | * large numbers of large allocations. Basing the slab on the | |
170 | * virtual address space removes the need for contiguous pages | |
171 | * and greatly improve performance for large allocations. | |
172 | * | |
173 | * For these reasons, the SPL has its own slab implementation with | |
174 | * the needed features. It is not as highly optimized as either the | |
175 | * Solaris or Linux slabs, but it should get me most of what is | |
176 | * needed until it can be optimized or obsoleted by another approach. | |
177 | * | |
178 | * One serious concern I do have about this method is the relatively | |
179 | * small virtual address space on 32bit arches. This will seriously | |
180 | * constrain the size of the slab caches and their performance. | |
e5b9b344 BB |
181 | */ |
182 | ||
183 | struct list_head spl_kmem_cache_list; /* List of caches */ | |
184 | struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ | |
b34b9563 | 185 | taskq_t *spl_kmem_cache_taskq; /* Task queue for ageing / reclaim */ |
e5b9b344 BB |
186 | |
187 | static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); | |
188 | ||
189 | SPL_SHRINKER_CALLBACK_FWD_DECLARE(spl_kmem_cache_generic_shrinker); | |
190 | SPL_SHRINKER_DECLARE(spl_kmem_cache_shrinker, | |
191 | spl_kmem_cache_generic_shrinker, KMC_DEFAULT_SEEKS); | |
192 | ||
193 | static void * | |
194 | kv_alloc(spl_kmem_cache_t *skc, int size, int flags) | |
195 | { | |
c3eabc75 | 196 | gfp_t lflags = kmem_flags_convert(flags); |
e5b9b344 BB |
197 | void *ptr; |
198 | ||
3018bffa BB |
199 | if (skc->skc_flags & KMC_KMEM) { |
200 | ASSERT(ISP2(size)); | |
c3eabc75 | 201 | ptr = (void *)__get_free_pages(lflags, get_order(size)); |
3018bffa | 202 | } else { |
c2fa0945 | 203 | ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM, PAGE_KERNEL); |
3018bffa | 204 | } |
e5b9b344 BB |
205 | |
206 | /* Resulting allocated memory will be page aligned */ | |
207 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); | |
208 | ||
b34b9563 | 209 | return (ptr); |
e5b9b344 BB |
210 | } |
211 | ||
212 | static void | |
213 | kv_free(spl_kmem_cache_t *skc, void *ptr, int size) | |
214 | { | |
215 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); | |
e5b9b344 BB |
216 | |
217 | /* | |
218 | * The Linux direct reclaim path uses this out of band value to | |
219 | * determine if forward progress is being made. Normally this is | |
220 | * incremented by kmem_freepages() which is part of the various | |
221 | * Linux slab implementations. However, since we are using none | |
222 | * of that infrastructure we are responsible for incrementing it. | |
223 | */ | |
224 | if (current->reclaim_state) | |
225 | current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; | |
226 | ||
3018bffa BB |
227 | if (skc->skc_flags & KMC_KMEM) { |
228 | ASSERT(ISP2(size)); | |
e5b9b344 | 229 | free_pages((unsigned long)ptr, get_order(size)); |
3018bffa | 230 | } else { |
e5b9b344 | 231 | vfree(ptr); |
3018bffa | 232 | } |
e5b9b344 BB |
233 | } |
234 | ||
235 | /* | |
236 | * Required space for each aligned sks. | |
237 | */ | |
238 | static inline uint32_t | |
239 | spl_sks_size(spl_kmem_cache_t *skc) | |
240 | { | |
b34b9563 BB |
241 | return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), |
242 | skc->skc_obj_align, uint32_t)); | |
e5b9b344 BB |
243 | } |
244 | ||
245 | /* | |
246 | * Required space for each aligned object. | |
247 | */ | |
248 | static inline uint32_t | |
249 | spl_obj_size(spl_kmem_cache_t *skc) | |
250 | { | |
251 | uint32_t align = skc->skc_obj_align; | |
252 | ||
b34b9563 BB |
253 | return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + |
254 | P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); | |
e5b9b344 BB |
255 | } |
256 | ||
257 | /* | |
258 | * Lookup the spl_kmem_object_t for an object given that object. | |
259 | */ | |
260 | static inline spl_kmem_obj_t * | |
261 | spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) | |
262 | { | |
b34b9563 BB |
263 | return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, |
264 | skc->skc_obj_align, uint32_t)); | |
e5b9b344 BB |
265 | } |
266 | ||
267 | /* | |
268 | * Required space for each offslab object taking in to account alignment | |
269 | * restrictions and the power-of-two requirement of kv_alloc(). | |
270 | */ | |
271 | static inline uint32_t | |
272 | spl_offslab_size(spl_kmem_cache_t *skc) | |
273 | { | |
b34b9563 | 274 | return (1UL << (fls64(spl_obj_size(skc)) + 1)); |
e5b9b344 BB |
275 | } |
276 | ||
277 | /* | |
278 | * It's important that we pack the spl_kmem_obj_t structure and the | |
279 | * actual objects in to one large address space to minimize the number | |
280 | * of calls to the allocator. It is far better to do a few large | |
281 | * allocations and then subdivide it ourselves. Now which allocator | |
282 | * we use requires balancing a few trade offs. | |
283 | * | |
284 | * For small objects we use kmem_alloc() because as long as you are | |
285 | * only requesting a small number of pages (ideally just one) its cheap. | |
286 | * However, when you start requesting multiple pages with kmem_alloc() | |
287 | * it gets increasingly expensive since it requires contiguous pages. | |
288 | * For this reason we shift to vmem_alloc() for slabs of large objects | |
289 | * which removes the need for contiguous pages. We do not use | |
290 | * vmem_alloc() in all cases because there is significant locking | |
291 | * overhead in __get_vm_area_node(). This function takes a single | |
292 | * global lock when acquiring an available virtual address range which | |
293 | * serializes all vmem_alloc()'s for all slab caches. Using slightly | |
294 | * different allocation functions for small and large objects should | |
295 | * give us the best of both worlds. | |
296 | * | |
297 | * KMC_ONSLAB KMC_OFFSLAB | |
298 | * | |
299 | * +------------------------+ +-----------------+ | |
300 | * | spl_kmem_slab_t --+-+ | | spl_kmem_slab_t |---+-+ | |
301 | * | skc_obj_size <-+ | | +-----------------+ | | | |
302 | * | spl_kmem_obj_t | | | | | |
303 | * | skc_obj_size <---+ | +-----------------+ | | | |
304 | * | spl_kmem_obj_t | | | skc_obj_size | <-+ | | |
305 | * | ... v | | spl_kmem_obj_t | | | |
306 | * +------------------------+ +-----------------+ v | |
307 | */ | |
308 | static spl_kmem_slab_t * | |
309 | spl_slab_alloc(spl_kmem_cache_t *skc, int flags) | |
310 | { | |
311 | spl_kmem_slab_t *sks; | |
312 | spl_kmem_obj_t *sko, *n; | |
313 | void *base, *obj; | |
314 | uint32_t obj_size, offslab_size = 0; | |
315 | int i, rc = 0; | |
316 | ||
317 | base = kv_alloc(skc, skc->skc_slab_size, flags); | |
318 | if (base == NULL) | |
319 | return (NULL); | |
320 | ||
321 | sks = (spl_kmem_slab_t *)base; | |
322 | sks->sks_magic = SKS_MAGIC; | |
323 | sks->sks_objs = skc->skc_slab_objs; | |
324 | sks->sks_age = jiffies; | |
325 | sks->sks_cache = skc; | |
326 | INIT_LIST_HEAD(&sks->sks_list); | |
327 | INIT_LIST_HEAD(&sks->sks_free_list); | |
328 | sks->sks_ref = 0; | |
329 | obj_size = spl_obj_size(skc); | |
330 | ||
331 | if (skc->skc_flags & KMC_OFFSLAB) | |
332 | offslab_size = spl_offslab_size(skc); | |
333 | ||
334 | for (i = 0; i < sks->sks_objs; i++) { | |
335 | if (skc->skc_flags & KMC_OFFSLAB) { | |
336 | obj = kv_alloc(skc, offslab_size, flags); | |
337 | if (!obj) { | |
338 | rc = -ENOMEM; | |
339 | goto out; | |
340 | } | |
341 | } else { | |
342 | obj = base + spl_sks_size(skc) + (i * obj_size); | |
343 | } | |
344 | ||
345 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); | |
346 | sko = spl_sko_from_obj(skc, obj); | |
347 | sko->sko_addr = obj; | |
348 | sko->sko_magic = SKO_MAGIC; | |
349 | sko->sko_slab = sks; | |
350 | INIT_LIST_HEAD(&sko->sko_list); | |
351 | list_add_tail(&sko->sko_list, &sks->sks_free_list); | |
352 | } | |
353 | ||
354 | out: | |
355 | if (rc) { | |
356 | if (skc->skc_flags & KMC_OFFSLAB) | |
b34b9563 BB |
357 | list_for_each_entry_safe(sko, |
358 | n, &sks->sks_free_list, sko_list) | |
e5b9b344 BB |
359 | kv_free(skc, sko->sko_addr, offslab_size); |
360 | ||
361 | kv_free(skc, base, skc->skc_slab_size); | |
362 | sks = NULL; | |
363 | } | |
364 | ||
365 | return (sks); | |
366 | } | |
367 | ||
368 | /* | |
369 | * Remove a slab from complete or partial list, it must be called with | |
370 | * the 'skc->skc_lock' held but the actual free must be performed | |
371 | * outside the lock to prevent deadlocking on vmem addresses. | |
372 | */ | |
373 | static void | |
374 | spl_slab_free(spl_kmem_slab_t *sks, | |
b34b9563 | 375 | struct list_head *sks_list, struct list_head *sko_list) |
e5b9b344 BB |
376 | { |
377 | spl_kmem_cache_t *skc; | |
378 | ||
379 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
380 | ASSERT(sks->sks_ref == 0); | |
381 | ||
382 | skc = sks->sks_cache; | |
383 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
384 | ASSERT(spin_is_locked(&skc->skc_lock)); | |
385 | ||
386 | /* | |
387 | * Update slab/objects counters in the cache, then remove the | |
388 | * slab from the skc->skc_partial_list. Finally add the slab | |
389 | * and all its objects in to the private work lists where the | |
390 | * destructors will be called and the memory freed to the system. | |
391 | */ | |
392 | skc->skc_obj_total -= sks->sks_objs; | |
393 | skc->skc_slab_total--; | |
394 | list_del(&sks->sks_list); | |
395 | list_add(&sks->sks_list, sks_list); | |
396 | list_splice_init(&sks->sks_free_list, sko_list); | |
397 | } | |
398 | ||
399 | /* | |
1a204968 | 400 | * Reclaim empty slabs at the end of the partial list. |
e5b9b344 BB |
401 | */ |
402 | static void | |
1a204968 | 403 | spl_slab_reclaim(spl_kmem_cache_t *skc) |
e5b9b344 BB |
404 | { |
405 | spl_kmem_slab_t *sks, *m; | |
406 | spl_kmem_obj_t *sko, *n; | |
407 | LIST_HEAD(sks_list); | |
408 | LIST_HEAD(sko_list); | |
409 | uint32_t size = 0; | |
e5b9b344 BB |
410 | |
411 | /* | |
1a204968 BB |
412 | * Empty slabs and objects must be moved to a private list so they |
413 | * can be safely freed outside the spin lock. All empty slabs are | |
414 | * at the end of skc->skc_partial_list, therefore once a non-empty | |
415 | * slab is found we can stop scanning. | |
e5b9b344 BB |
416 | */ |
417 | spin_lock(&skc->skc_lock); | |
b34b9563 BB |
418 | list_for_each_entry_safe_reverse(sks, m, |
419 | &skc->skc_partial_list, sks_list) { | |
1a204968 BB |
420 | |
421 | if (sks->sks_ref > 0) | |
e5b9b344 BB |
422 | break; |
423 | ||
1a204968 | 424 | spl_slab_free(sks, &sks_list, &sko_list); |
e5b9b344 BB |
425 | } |
426 | spin_unlock(&skc->skc_lock); | |
427 | ||
428 | /* | |
429 | * The following two loops ensure all the object destructors are | |
430 | * run, any offslab objects are freed, and the slabs themselves | |
431 | * are freed. This is all done outside the skc->skc_lock since | |
432 | * this allows the destructor to sleep, and allows us to perform | |
433 | * a conditional reschedule when a freeing a large number of | |
434 | * objects and slabs back to the system. | |
435 | */ | |
436 | if (skc->skc_flags & KMC_OFFSLAB) | |
437 | size = spl_offslab_size(skc); | |
438 | ||
439 | list_for_each_entry_safe(sko, n, &sko_list, sko_list) { | |
440 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
441 | ||
442 | if (skc->skc_flags & KMC_OFFSLAB) | |
443 | kv_free(skc, sko->sko_addr, size); | |
444 | } | |
445 | ||
446 | list_for_each_entry_safe(sks, m, &sks_list, sks_list) { | |
447 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
448 | kv_free(skc, sks, skc->skc_slab_size); | |
449 | } | |
450 | } | |
451 | ||
452 | static spl_kmem_emergency_t * | |
453 | spl_emergency_search(struct rb_root *root, void *obj) | |
454 | { | |
455 | struct rb_node *node = root->rb_node; | |
456 | spl_kmem_emergency_t *ske; | |
457 | unsigned long address = (unsigned long)obj; | |
458 | ||
459 | while (node) { | |
460 | ske = container_of(node, spl_kmem_emergency_t, ske_node); | |
461 | ||
ee335174 | 462 | if (address < ske->ske_obj) |
e5b9b344 | 463 | node = node->rb_left; |
ee335174 | 464 | else if (address > ske->ske_obj) |
e5b9b344 BB |
465 | node = node->rb_right; |
466 | else | |
b34b9563 | 467 | return (ske); |
e5b9b344 BB |
468 | } |
469 | ||
b34b9563 | 470 | return (NULL); |
e5b9b344 BB |
471 | } |
472 | ||
473 | static int | |
474 | spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) | |
475 | { | |
476 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
477 | spl_kmem_emergency_t *ske_tmp; | |
ee335174 | 478 | unsigned long address = ske->ske_obj; |
e5b9b344 BB |
479 | |
480 | while (*new) { | |
481 | ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); | |
482 | ||
483 | parent = *new; | |
ee335174 | 484 | if (address < ske_tmp->ske_obj) |
e5b9b344 | 485 | new = &((*new)->rb_left); |
ee335174 | 486 | else if (address > ske_tmp->ske_obj) |
e5b9b344 BB |
487 | new = &((*new)->rb_right); |
488 | else | |
b34b9563 | 489 | return (0); |
e5b9b344 BB |
490 | } |
491 | ||
492 | rb_link_node(&ske->ske_node, parent, new); | |
493 | rb_insert_color(&ske->ske_node, root); | |
494 | ||
b34b9563 | 495 | return (1); |
e5b9b344 BB |
496 | } |
497 | ||
498 | /* | |
499 | * Allocate a single emergency object and track it in a red black tree. | |
500 | */ | |
501 | static int | |
502 | spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) | |
503 | { | |
c3eabc75 | 504 | gfp_t lflags = kmem_flags_convert(flags); |
e5b9b344 | 505 | spl_kmem_emergency_t *ske; |
ee335174 | 506 | int order = get_order(skc->skc_obj_size); |
e5b9b344 BB |
507 | int empty; |
508 | ||
509 | /* Last chance use a partial slab if one now exists */ | |
510 | spin_lock(&skc->skc_lock); | |
511 | empty = list_empty(&skc->skc_partial_list); | |
512 | spin_unlock(&skc->skc_lock); | |
513 | if (!empty) | |
514 | return (-EEXIST); | |
515 | ||
c3eabc75 | 516 | ske = kmalloc(sizeof (*ske), lflags); |
e5b9b344 BB |
517 | if (ske == NULL) |
518 | return (-ENOMEM); | |
519 | ||
ee335174 BB |
520 | ske->ske_obj = __get_free_pages(lflags, order); |
521 | if (ske->ske_obj == 0) { | |
e5b9b344 BB |
522 | kfree(ske); |
523 | return (-ENOMEM); | |
524 | } | |
525 | ||
526 | spin_lock(&skc->skc_lock); | |
527 | empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); | |
528 | if (likely(empty)) { | |
529 | skc->skc_obj_total++; | |
530 | skc->skc_obj_emergency++; | |
531 | if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) | |
532 | skc->skc_obj_emergency_max = skc->skc_obj_emergency; | |
533 | } | |
534 | spin_unlock(&skc->skc_lock); | |
535 | ||
536 | if (unlikely(!empty)) { | |
ee335174 | 537 | free_pages(ske->ske_obj, order); |
e5b9b344 BB |
538 | kfree(ske); |
539 | return (-EINVAL); | |
540 | } | |
541 | ||
ee335174 | 542 | *obj = (void *)ske->ske_obj; |
e5b9b344 BB |
543 | |
544 | return (0); | |
545 | } | |
546 | ||
547 | /* | |
548 | * Locate the passed object in the red black tree and free it. | |
549 | */ | |
550 | static int | |
551 | spl_emergency_free(spl_kmem_cache_t *skc, void *obj) | |
552 | { | |
553 | spl_kmem_emergency_t *ske; | |
ee335174 | 554 | int order = get_order(skc->skc_obj_size); |
e5b9b344 BB |
555 | |
556 | spin_lock(&skc->skc_lock); | |
557 | ske = spl_emergency_search(&skc->skc_emergency_tree, obj); | |
436ad60f | 558 | if (ske) { |
e5b9b344 BB |
559 | rb_erase(&ske->ske_node, &skc->skc_emergency_tree); |
560 | skc->skc_obj_emergency--; | |
561 | skc->skc_obj_total--; | |
562 | } | |
563 | spin_unlock(&skc->skc_lock); | |
564 | ||
436ad60f | 565 | if (ske == NULL) |
e5b9b344 BB |
566 | return (-ENOENT); |
567 | ||
ee335174 | 568 | free_pages(ske->ske_obj, order); |
e5b9b344 BB |
569 | kfree(ske); |
570 | ||
571 | return (0); | |
572 | } | |
573 | ||
574 | /* | |
575 | * Release objects from the per-cpu magazine back to their slab. The flush | |
576 | * argument contains the max number of entries to remove from the magazine. | |
577 | */ | |
578 | static void | |
579 | __spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) | |
580 | { | |
581 | int i, count = MIN(flush, skm->skm_avail); | |
582 | ||
583 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
584 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
585 | ASSERT(spin_is_locked(&skc->skc_lock)); | |
586 | ||
587 | for (i = 0; i < count; i++) | |
588 | spl_cache_shrink(skc, skm->skm_objs[i]); | |
589 | ||
590 | skm->skm_avail -= count; | |
591 | memmove(skm->skm_objs, &(skm->skm_objs[count]), | |
b34b9563 | 592 | sizeof (void *) * skm->skm_avail); |
e5b9b344 BB |
593 | } |
594 | ||
595 | static void | |
596 | spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) | |
597 | { | |
598 | spin_lock(&skc->skc_lock); | |
599 | __spl_cache_flush(skc, skm, flush); | |
600 | spin_unlock(&skc->skc_lock); | |
601 | } | |
602 | ||
603 | static void | |
604 | spl_magazine_age(void *data) | |
605 | { | |
606 | spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data; | |
607 | spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; | |
608 | ||
609 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
610 | ASSERT(skm->skm_cpu == smp_processor_id()); | |
611 | ASSERT(irqs_disabled()); | |
612 | ||
613 | /* There are no available objects or they are too young to age out */ | |
614 | if ((skm->skm_avail == 0) || | |
615 | time_before(jiffies, skm->skm_age + skc->skc_delay * HZ)) | |
616 | return; | |
617 | ||
618 | /* | |
619 | * Because we're executing in interrupt context we may have | |
620 | * interrupted the holder of this lock. To avoid a potential | |
621 | * deadlock return if the lock is contended. | |
622 | */ | |
623 | if (!spin_trylock(&skc->skc_lock)) | |
624 | return; | |
625 | ||
626 | __spl_cache_flush(skc, skm, skm->skm_refill); | |
627 | spin_unlock(&skc->skc_lock); | |
628 | } | |
629 | ||
630 | /* | |
631 | * Called regularly to keep a downward pressure on the cache. | |
632 | * | |
633 | * Objects older than skc->skc_delay seconds in the per-cpu magazines will | |
634 | * be returned to the caches. This is done to prevent idle magazines from | |
635 | * holding memory which could be better used elsewhere. The delay is | |
636 | * present to prevent thrashing the magazine. | |
637 | * | |
638 | * The newly released objects may result in empty partial slabs. Those | |
639 | * slabs should be released to the system. Otherwise moving the objects | |
640 | * out of the magazines is just wasted work. | |
641 | */ | |
642 | static void | |
643 | spl_cache_age(void *data) | |
644 | { | |
645 | spl_kmem_cache_t *skc = (spl_kmem_cache_t *)data; | |
646 | taskqid_t id = 0; | |
647 | ||
648 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
649 | ||
650 | /* Dynamically disabled at run time */ | |
651 | if (!(spl_kmem_cache_expire & KMC_EXPIRE_AGE)) | |
652 | return; | |
653 | ||
654 | atomic_inc(&skc->skc_ref); | |
655 | ||
656 | if (!(skc->skc_flags & KMC_NOMAGAZINE)) | |
657 | on_each_cpu(spl_magazine_age, skc, 1); | |
658 | ||
1a204968 | 659 | spl_slab_reclaim(skc); |
e5b9b344 BB |
660 | |
661 | while (!test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && !id) { | |
662 | id = taskq_dispatch_delay( | |
663 | spl_kmem_cache_taskq, spl_cache_age, skc, TQ_SLEEP, | |
664 | ddi_get_lbolt() + skc->skc_delay / 3 * HZ); | |
665 | ||
666 | /* Destroy issued after dispatch immediately cancel it */ | |
667 | if (test_bit(KMC_BIT_DESTROY, &skc->skc_flags) && id) | |
668 | taskq_cancel_id(spl_kmem_cache_taskq, id); | |
669 | } | |
670 | ||
671 | spin_lock(&skc->skc_lock); | |
672 | skc->skc_taskqid = id; | |
673 | spin_unlock(&skc->skc_lock); | |
674 | ||
675 | atomic_dec(&skc->skc_ref); | |
676 | } | |
677 | ||
678 | /* | |
679 | * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. | |
680 | * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, | |
681 | * for very small objects we may end up with more than this so as not | |
682 | * to waste space in the minimal allocation of a single page. Also for | |
683 | * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min, | |
684 | * lower than this and we will fail. | |
685 | */ | |
686 | static int | |
687 | spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) | |
688 | { | |
3018bffa | 689 | uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; |
e5b9b344 BB |
690 | |
691 | if (skc->skc_flags & KMC_OFFSLAB) { | |
3018bffa BB |
692 | tgt_objs = spl_kmem_cache_obj_per_slab; |
693 | tgt_size = P2ROUNDUP(sizeof (spl_kmem_slab_t), PAGE_SIZE); | |
694 | ||
695 | if ((skc->skc_flags & KMC_KMEM) && | |
696 | (spl_obj_size(skc) > (SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE))) | |
697 | return (-ENOSPC); | |
e5b9b344 BB |
698 | } else { |
699 | sks_size = spl_sks_size(skc); | |
700 | obj_size = spl_obj_size(skc); | |
3018bffa BB |
701 | max_size = (spl_kmem_cache_max_size * 1024 * 1024); |
702 | tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); | |
e5b9b344 BB |
703 | |
704 | /* | |
3018bffa BB |
705 | * KMC_KMEM slabs are allocated by __get_free_pages() which |
706 | * rounds up to the nearest order. Knowing this the size | |
707 | * should be rounded up to the next power of two with a hard | |
708 | * maximum defined by the maximum allowed allocation order. | |
e5b9b344 | 709 | */ |
3018bffa BB |
710 | if (skc->skc_flags & KMC_KMEM) { |
711 | max_size = SPL_MAX_ORDER_NR_PAGES * PAGE_SIZE; | |
712 | tgt_size = MIN(max_size, | |
713 | PAGE_SIZE * (1 << MAX(get_order(tgt_size) - 1, 1))); | |
714 | } | |
715 | ||
716 | if (tgt_size <= max_size) { | |
717 | tgt_objs = (tgt_size - sks_size) / obj_size; | |
718 | } else { | |
719 | tgt_objs = (max_size - sks_size) / obj_size; | |
720 | tgt_size = (tgt_objs * obj_size) + sks_size; | |
721 | } | |
e5b9b344 BB |
722 | } |
723 | ||
3018bffa BB |
724 | if (tgt_objs == 0) |
725 | return (-ENOSPC); | |
726 | ||
727 | *objs = tgt_objs; | |
728 | *size = tgt_size; | |
729 | ||
730 | return (0); | |
e5b9b344 BB |
731 | } |
732 | ||
733 | /* | |
734 | * Make a guess at reasonable per-cpu magazine size based on the size of | |
735 | * each object and the cost of caching N of them in each magazine. Long | |
736 | * term this should really adapt based on an observed usage heuristic. | |
737 | */ | |
738 | static int | |
739 | spl_magazine_size(spl_kmem_cache_t *skc) | |
740 | { | |
741 | uint32_t obj_size = spl_obj_size(skc); | |
742 | int size; | |
743 | ||
1a204968 BB |
744 | if (spl_kmem_cache_magazine_size > 0) |
745 | return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); | |
746 | ||
e5b9b344 BB |
747 | /* Per-magazine sizes below assume a 4Kib page size */ |
748 | if (obj_size > (PAGE_SIZE * 256)) | |
749 | size = 4; /* Minimum 4Mib per-magazine */ | |
750 | else if (obj_size > (PAGE_SIZE * 32)) | |
751 | size = 16; /* Minimum 2Mib per-magazine */ | |
752 | else if (obj_size > (PAGE_SIZE)) | |
753 | size = 64; /* Minimum 256Kib per-magazine */ | |
754 | else if (obj_size > (PAGE_SIZE / 4)) | |
755 | size = 128; /* Minimum 128Kib per-magazine */ | |
756 | else | |
757 | size = 256; | |
758 | ||
759 | return (size); | |
760 | } | |
761 | ||
762 | /* | |
763 | * Allocate a per-cpu magazine to associate with a specific core. | |
764 | */ | |
765 | static spl_kmem_magazine_t * | |
766 | spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) | |
767 | { | |
768 | spl_kmem_magazine_t *skm; | |
b34b9563 BB |
769 | int size = sizeof (spl_kmem_magazine_t) + |
770 | sizeof (void *) * skc->skc_mag_size; | |
e5b9b344 | 771 | |
c3eabc75 | 772 | skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); |
e5b9b344 BB |
773 | if (skm) { |
774 | skm->skm_magic = SKM_MAGIC; | |
775 | skm->skm_avail = 0; | |
776 | skm->skm_size = skc->skc_mag_size; | |
777 | skm->skm_refill = skc->skc_mag_refill; | |
778 | skm->skm_cache = skc; | |
779 | skm->skm_age = jiffies; | |
780 | skm->skm_cpu = cpu; | |
781 | } | |
782 | ||
783 | return (skm); | |
784 | } | |
785 | ||
786 | /* | |
787 | * Free a per-cpu magazine associated with a specific core. | |
788 | */ | |
789 | static void | |
790 | spl_magazine_free(spl_kmem_magazine_t *skm) | |
791 | { | |
e5b9b344 BB |
792 | ASSERT(skm->skm_magic == SKM_MAGIC); |
793 | ASSERT(skm->skm_avail == 0); | |
c3eabc75 | 794 | kfree(skm); |
e5b9b344 BB |
795 | } |
796 | ||
797 | /* | |
798 | * Create all pre-cpu magazines of reasonable sizes. | |
799 | */ | |
800 | static int | |
801 | spl_magazine_create(spl_kmem_cache_t *skc) | |
802 | { | |
803 | int i; | |
804 | ||
805 | if (skc->skc_flags & KMC_NOMAGAZINE) | |
806 | return (0); | |
807 | ||
808 | skc->skc_mag_size = spl_magazine_size(skc); | |
809 | skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; | |
810 | ||
811 | for_each_online_cpu(i) { | |
812 | skc->skc_mag[i] = spl_magazine_alloc(skc, i); | |
813 | if (!skc->skc_mag[i]) { | |
814 | for (i--; i >= 0; i--) | |
815 | spl_magazine_free(skc->skc_mag[i]); | |
816 | ||
817 | return (-ENOMEM); | |
818 | } | |
819 | } | |
820 | ||
821 | return (0); | |
822 | } | |
823 | ||
824 | /* | |
825 | * Destroy all pre-cpu magazines. | |
826 | */ | |
827 | static void | |
828 | spl_magazine_destroy(spl_kmem_cache_t *skc) | |
829 | { | |
830 | spl_kmem_magazine_t *skm; | |
831 | int i; | |
832 | ||
833 | if (skc->skc_flags & KMC_NOMAGAZINE) | |
834 | return; | |
835 | ||
b34b9563 | 836 | for_each_online_cpu(i) { |
e5b9b344 BB |
837 | skm = skc->skc_mag[i]; |
838 | spl_cache_flush(skc, skm, skm->skm_avail); | |
839 | spl_magazine_free(skm); | |
b34b9563 | 840 | } |
e5b9b344 BB |
841 | } |
842 | ||
843 | /* | |
844 | * Create a object cache based on the following arguments: | |
845 | * name cache name | |
846 | * size cache object size | |
847 | * align cache object alignment | |
848 | * ctor cache object constructor | |
849 | * dtor cache object destructor | |
850 | * reclaim cache object reclaim | |
851 | * priv cache private data for ctor/dtor/reclaim | |
852 | * vmp unused must be NULL | |
853 | * flags | |
854 | * KMC_NOTOUCH Disable cache object aging (unsupported) | |
855 | * KMC_NODEBUG Disable debugging (unsupported) | |
856 | * KMC_NOHASH Disable hashing (unsupported) | |
857 | * KMC_QCACHE Disable qcache (unsupported) | |
858 | * KMC_NOMAGAZINE Enabled for kmem/vmem, Disabled for Linux slab | |
859 | * KMC_KMEM Force kmem backed cache | |
860 | * KMC_VMEM Force vmem backed cache | |
861 | * KMC_SLAB Force Linux slab backed cache | |
862 | * KMC_OFFSLAB Locate objects off the slab | |
863 | */ | |
864 | spl_kmem_cache_t * | |
865 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
b34b9563 BB |
866 | spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, spl_kmem_reclaim_t reclaim, |
867 | void *priv, void *vmp, int flags) | |
e5b9b344 | 868 | { |
c3eabc75 | 869 | gfp_t lflags = kmem_flags_convert(KM_SLEEP); |
b34b9563 | 870 | spl_kmem_cache_t *skc; |
e5b9b344 BB |
871 | int rc; |
872 | ||
873 | /* | |
874 | * Unsupported flags | |
875 | */ | |
876 | ASSERT0(flags & KMC_NOMAGAZINE); | |
877 | ASSERT0(flags & KMC_NOHASH); | |
878 | ASSERT0(flags & KMC_QCACHE); | |
879 | ASSERT(vmp == NULL); | |
880 | ||
881 | might_sleep(); | |
882 | ||
883 | /* | |
b34b9563 | 884 | * Allocate memory for a new cache and initialize it. Unfortunately, |
e5b9b344 BB |
885 | * this usually ends up being a large allocation of ~32k because |
886 | * we need to allocate enough memory for the worst case number of | |
c3eabc75 | 887 | * cpus in the magazine, skc_mag[NR_CPUS]. |
e5b9b344 | 888 | */ |
c3eabc75 | 889 | skc = kzalloc(sizeof (*skc), lflags); |
e5b9b344 BB |
890 | if (skc == NULL) |
891 | return (NULL); | |
892 | ||
893 | skc->skc_magic = SKC_MAGIC; | |
894 | skc->skc_name_size = strlen(name) + 1; | |
c3eabc75 | 895 | skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags); |
e5b9b344 | 896 | if (skc->skc_name == NULL) { |
c3eabc75 | 897 | kfree(skc); |
e5b9b344 BB |
898 | return (NULL); |
899 | } | |
900 | strncpy(skc->skc_name, name, skc->skc_name_size); | |
901 | ||
902 | skc->skc_ctor = ctor; | |
903 | skc->skc_dtor = dtor; | |
904 | skc->skc_reclaim = reclaim; | |
905 | skc->skc_private = priv; | |
906 | skc->skc_vmp = vmp; | |
907 | skc->skc_linux_cache = NULL; | |
908 | skc->skc_flags = flags; | |
909 | skc->skc_obj_size = size; | |
910 | skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; | |
911 | skc->skc_delay = SPL_KMEM_CACHE_DELAY; | |
912 | skc->skc_reap = SPL_KMEM_CACHE_REAP; | |
913 | atomic_set(&skc->skc_ref, 0); | |
914 | ||
915 | INIT_LIST_HEAD(&skc->skc_list); | |
916 | INIT_LIST_HEAD(&skc->skc_complete_list); | |
917 | INIT_LIST_HEAD(&skc->skc_partial_list); | |
918 | skc->skc_emergency_tree = RB_ROOT; | |
919 | spin_lock_init(&skc->skc_lock); | |
920 | init_waitqueue_head(&skc->skc_waitq); | |
921 | skc->skc_slab_fail = 0; | |
922 | skc->skc_slab_create = 0; | |
923 | skc->skc_slab_destroy = 0; | |
924 | skc->skc_slab_total = 0; | |
925 | skc->skc_slab_alloc = 0; | |
926 | skc->skc_slab_max = 0; | |
927 | skc->skc_obj_total = 0; | |
928 | skc->skc_obj_alloc = 0; | |
929 | skc->skc_obj_max = 0; | |
930 | skc->skc_obj_deadlock = 0; | |
931 | skc->skc_obj_emergency = 0; | |
932 | skc->skc_obj_emergency_max = 0; | |
933 | ||
934 | /* | |
935 | * Verify the requested alignment restriction is sane. | |
936 | */ | |
937 | if (align) { | |
938 | VERIFY(ISP2(align)); | |
939 | VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); | |
940 | VERIFY3U(align, <=, PAGE_SIZE); | |
941 | skc->skc_obj_align = align; | |
942 | } | |
943 | ||
944 | /* | |
945 | * When no specific type of slab is requested (kmem, vmem, or | |
946 | * linuxslab) then select a cache type based on the object size | |
947 | * and default tunables. | |
948 | */ | |
949 | if (!(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB))) { | |
950 | ||
951 | /* | |
952 | * Objects smaller than spl_kmem_cache_slab_limit can | |
953 | * use the Linux slab for better space-efficiency. By | |
954 | * default this functionality is disabled until its | |
b34b9563 | 955 | * performance characteristics are fully understood. |
e5b9b344 BB |
956 | */ |
957 | if (spl_kmem_cache_slab_limit && | |
958 | size <= (size_t)spl_kmem_cache_slab_limit) | |
959 | skc->skc_flags |= KMC_SLAB; | |
960 | ||
961 | /* | |
962 | * Small objects, less than spl_kmem_cache_kmem_limit per | |
963 | * object should use kmem because their slabs are small. | |
964 | */ | |
965 | else if (spl_obj_size(skc) <= spl_kmem_cache_kmem_limit) | |
966 | skc->skc_flags |= KMC_KMEM; | |
967 | ||
968 | /* | |
969 | * All other objects are considered large and are placed | |
970 | * on vmem backed slabs. | |
971 | */ | |
972 | else | |
973 | skc->skc_flags |= KMC_VMEM; | |
974 | } | |
975 | ||
976 | /* | |
977 | * Given the type of slab allocate the required resources. | |
978 | */ | |
979 | if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) { | |
980 | rc = spl_slab_size(skc, | |
981 | &skc->skc_slab_objs, &skc->skc_slab_size); | |
982 | if (rc) | |
983 | goto out; | |
984 | ||
985 | rc = spl_magazine_create(skc); | |
986 | if (rc) | |
987 | goto out; | |
988 | } else { | |
3018bffa BB |
989 | if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) { |
990 | rc = EINVAL; | |
991 | goto out; | |
992 | } | |
993 | ||
e5b9b344 BB |
994 | skc->skc_linux_cache = kmem_cache_create( |
995 | skc->skc_name, size, align, 0, NULL); | |
996 | if (skc->skc_linux_cache == NULL) { | |
997 | rc = ENOMEM; | |
998 | goto out; | |
999 | } | |
1000 | ||
c3eabc75 BB |
1001 | #if defined(HAVE_KMEM_CACHE_ALLOCFLAGS) |
1002 | skc->skc_linux_cache->allocflags |= __GFP_COMP; | |
1003 | #elif defined(HAVE_KMEM_CACHE_GFPFLAGS) | |
1004 | skc->skc_linux_cache->gfpflags |= __GFP_COMP; | |
1005 | #endif | |
e5b9b344 BB |
1006 | skc->skc_flags |= KMC_NOMAGAZINE; |
1007 | } | |
1008 | ||
1009 | if (spl_kmem_cache_expire & KMC_EXPIRE_AGE) | |
1010 | skc->skc_taskqid = taskq_dispatch_delay(spl_kmem_cache_taskq, | |
1011 | spl_cache_age, skc, TQ_SLEEP, | |
1012 | ddi_get_lbolt() + skc->skc_delay / 3 * HZ); | |
1013 | ||
1014 | down_write(&spl_kmem_cache_sem); | |
1015 | list_add_tail(&skc->skc_list, &spl_kmem_cache_list); | |
1016 | up_write(&spl_kmem_cache_sem); | |
1017 | ||
1018 | return (skc); | |
1019 | out: | |
c3eabc75 BB |
1020 | kfree(skc->skc_name); |
1021 | kfree(skc); | |
e5b9b344 BB |
1022 | return (NULL); |
1023 | } | |
1024 | EXPORT_SYMBOL(spl_kmem_cache_create); | |
1025 | ||
1026 | /* | |
b34b9563 | 1027 | * Register a move callback for cache defragmentation. |
e5b9b344 BB |
1028 | * XXX: Unimplemented but harmless to stub out for now. |
1029 | */ | |
1030 | void | |
1031 | spl_kmem_cache_set_move(spl_kmem_cache_t *skc, | |
1032 | kmem_cbrc_t (move)(void *, void *, size_t, void *)) | |
1033 | { | |
b34b9563 | 1034 | ASSERT(move != NULL); |
e5b9b344 BB |
1035 | } |
1036 | EXPORT_SYMBOL(spl_kmem_cache_set_move); | |
1037 | ||
1038 | /* | |
1039 | * Destroy a cache and all objects associated with the cache. | |
1040 | */ | |
1041 | void | |
1042 | spl_kmem_cache_destroy(spl_kmem_cache_t *skc) | |
1043 | { | |
1044 | DECLARE_WAIT_QUEUE_HEAD(wq); | |
1045 | taskqid_t id; | |
1046 | ||
1047 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1048 | ASSERT(skc->skc_flags & (KMC_KMEM | KMC_VMEM | KMC_SLAB)); | |
1049 | ||
1050 | down_write(&spl_kmem_cache_sem); | |
1051 | list_del_init(&skc->skc_list); | |
1052 | up_write(&spl_kmem_cache_sem); | |
1053 | ||
1054 | /* Cancel any and wait for any pending delayed tasks */ | |
1055 | VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1056 | ||
1057 | spin_lock(&skc->skc_lock); | |
1058 | id = skc->skc_taskqid; | |
1059 | spin_unlock(&skc->skc_lock); | |
1060 | ||
1061 | taskq_cancel_id(spl_kmem_cache_taskq, id); | |
1062 | ||
b34b9563 BB |
1063 | /* |
1064 | * Wait until all current callers complete, this is mainly | |
e5b9b344 | 1065 | * to catch the case where a low memory situation triggers a |
b34b9563 BB |
1066 | * cache reaping action which races with this destroy. |
1067 | */ | |
e5b9b344 BB |
1068 | wait_event(wq, atomic_read(&skc->skc_ref) == 0); |
1069 | ||
1070 | if (skc->skc_flags & (KMC_KMEM | KMC_VMEM)) { | |
1071 | spl_magazine_destroy(skc); | |
1a204968 | 1072 | spl_slab_reclaim(skc); |
e5b9b344 BB |
1073 | } else { |
1074 | ASSERT(skc->skc_flags & KMC_SLAB); | |
1075 | kmem_cache_destroy(skc->skc_linux_cache); | |
1076 | } | |
1077 | ||
1078 | spin_lock(&skc->skc_lock); | |
1079 | ||
b34b9563 BB |
1080 | /* |
1081 | * Validate there are no objects in use and free all the | |
1082 | * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. | |
1083 | */ | |
e5b9b344 BB |
1084 | ASSERT3U(skc->skc_slab_alloc, ==, 0); |
1085 | ASSERT3U(skc->skc_obj_alloc, ==, 0); | |
1086 | ASSERT3U(skc->skc_slab_total, ==, 0); | |
1087 | ASSERT3U(skc->skc_obj_total, ==, 0); | |
1088 | ASSERT3U(skc->skc_obj_emergency, ==, 0); | |
1089 | ASSERT(list_empty(&skc->skc_complete_list)); | |
1090 | ||
e5b9b344 BB |
1091 | spin_unlock(&skc->skc_lock); |
1092 | ||
c3eabc75 BB |
1093 | kfree(skc->skc_name); |
1094 | kfree(skc); | |
e5b9b344 BB |
1095 | } |
1096 | EXPORT_SYMBOL(spl_kmem_cache_destroy); | |
1097 | ||
1098 | /* | |
1099 | * Allocate an object from a slab attached to the cache. This is used to | |
1100 | * repopulate the per-cpu magazine caches in batches when they run low. | |
1101 | */ | |
1102 | static void * | |
1103 | spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) | |
1104 | { | |
1105 | spl_kmem_obj_t *sko; | |
1106 | ||
1107 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1108 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1109 | ASSERT(spin_is_locked(&skc->skc_lock)); | |
1110 | ||
1111 | sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); | |
1112 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
1113 | ASSERT(sko->sko_addr != NULL); | |
1114 | ||
1115 | /* Remove from sks_free_list */ | |
1116 | list_del_init(&sko->sko_list); | |
1117 | ||
1118 | sks->sks_age = jiffies; | |
1119 | sks->sks_ref++; | |
1120 | skc->skc_obj_alloc++; | |
1121 | ||
1122 | /* Track max obj usage statistics */ | |
1123 | if (skc->skc_obj_alloc > skc->skc_obj_max) | |
1124 | skc->skc_obj_max = skc->skc_obj_alloc; | |
1125 | ||
1126 | /* Track max slab usage statistics */ | |
1127 | if (sks->sks_ref == 1) { | |
1128 | skc->skc_slab_alloc++; | |
1129 | ||
1130 | if (skc->skc_slab_alloc > skc->skc_slab_max) | |
1131 | skc->skc_slab_max = skc->skc_slab_alloc; | |
1132 | } | |
1133 | ||
b34b9563 | 1134 | return (sko->sko_addr); |
e5b9b344 BB |
1135 | } |
1136 | ||
1137 | /* | |
1138 | * Generic slab allocation function to run by the global work queues. | |
1139 | * It is responsible for allocating a new slab, linking it in to the list | |
1140 | * of partial slabs, and then waking any waiters. | |
1141 | */ | |
1142 | static void | |
1143 | spl_cache_grow_work(void *data) | |
1144 | { | |
1145 | spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; | |
1146 | spl_kmem_cache_t *skc = ska->ska_cache; | |
1147 | spl_kmem_slab_t *sks; | |
1148 | ||
c3eabc75 BB |
1149 | #if defined(PF_MEMALLOC_NOIO) |
1150 | unsigned noio_flag = memalloc_noio_save(); | |
1151 | sks = spl_slab_alloc(skc, ska->ska_flags); | |
1152 | memalloc_noio_restore(noio_flag); | |
1153 | #else | |
c2fa0945 | 1154 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
c3eabc75 | 1155 | sks = spl_slab_alloc(skc, ska->ska_flags); |
c2fa0945 | 1156 | spl_fstrans_unmark(cookie); |
c3eabc75 | 1157 | #endif |
e5b9b344 BB |
1158 | spin_lock(&skc->skc_lock); |
1159 | if (sks) { | |
1160 | skc->skc_slab_total++; | |
1161 | skc->skc_obj_total += sks->sks_objs; | |
1162 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1163 | } | |
1164 | ||
1165 | atomic_dec(&skc->skc_ref); | |
a988a35a | 1166 | smp_mb__before_atomic(); |
e5b9b344 BB |
1167 | clear_bit(KMC_BIT_GROWING, &skc->skc_flags); |
1168 | clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); | |
a988a35a | 1169 | smp_mb__after_atomic(); |
e5b9b344 BB |
1170 | wake_up_all(&skc->skc_waitq); |
1171 | spin_unlock(&skc->skc_lock); | |
1172 | ||
1173 | kfree(ska); | |
1174 | } | |
1175 | ||
1176 | /* | |
1177 | * Returns non-zero when a new slab should be available. | |
1178 | */ | |
1179 | static int | |
1180 | spl_cache_grow_wait(spl_kmem_cache_t *skc) | |
1181 | { | |
b34b9563 | 1182 | return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); |
e5b9b344 BB |
1183 | } |
1184 | ||
1185 | /* | |
1186 | * No available objects on any slabs, create a new slab. Note that this | |
1187 | * functionality is disabled for KMC_SLAB caches which are backed by the | |
1188 | * Linux slab. | |
1189 | */ | |
1190 | static int | |
1191 | spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) | |
1192 | { | |
c3eabc75 | 1193 | int remaining, rc = 0; |
e5b9b344 | 1194 | |
c3eabc75 | 1195 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
e5b9b344 BB |
1196 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1197 | ASSERT((skc->skc_flags & KMC_SLAB) == 0); | |
1198 | might_sleep(); | |
1199 | *obj = NULL; | |
1200 | ||
1201 | /* | |
1202 | * Before allocating a new slab wait for any reaping to complete and | |
1203 | * then return so the local magazine can be rechecked for new objects. | |
1204 | */ | |
1205 | if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1206 | rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, | |
1207 | TASK_UNINTERRUPTIBLE); | |
1208 | return (rc ? rc : -EAGAIN); | |
1209 | } | |
1210 | ||
1211 | /* | |
1212 | * This is handled by dispatching a work request to the global work | |
1213 | * queue. This allows us to asynchronously allocate a new slab while | |
1214 | * retaining the ability to safely fall back to a smaller synchronous | |
1215 | * allocations to ensure forward progress is always maintained. | |
1216 | */ | |
1217 | if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { | |
1218 | spl_kmem_alloc_t *ska; | |
1219 | ||
c3eabc75 | 1220 | ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); |
e5b9b344 | 1221 | if (ska == NULL) { |
a988a35a RY |
1222 | clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); |
1223 | smp_mb__after_atomic(); | |
e5b9b344 BB |
1224 | wake_up_all(&skc->skc_waitq); |
1225 | return (-ENOMEM); | |
1226 | } | |
1227 | ||
1228 | atomic_inc(&skc->skc_ref); | |
1229 | ska->ska_cache = skc; | |
c3eabc75 | 1230 | ska->ska_flags = flags; |
e5b9b344 BB |
1231 | taskq_init_ent(&ska->ska_tqe); |
1232 | taskq_dispatch_ent(spl_kmem_cache_taskq, | |
1233 | spl_cache_grow_work, ska, 0, &ska->ska_tqe); | |
1234 | } | |
1235 | ||
1236 | /* | |
1237 | * The goal here is to only detect the rare case where a virtual slab | |
1238 | * allocation has deadlocked. We must be careful to minimize the use | |
1239 | * of emergency objects which are more expensive to track. Therefore, | |
1240 | * we set a very long timeout for the asynchronous allocation and if | |
1241 | * the timeout is reached the cache is flagged as deadlocked. From | |
1242 | * this point only new emergency objects will be allocated until the | |
1243 | * asynchronous allocation completes and clears the deadlocked flag. | |
1244 | */ | |
1245 | if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { | |
1246 | rc = spl_emergency_alloc(skc, flags, obj); | |
1247 | } else { | |
1248 | remaining = wait_event_timeout(skc->skc_waitq, | |
e50e6cc9 | 1249 | spl_cache_grow_wait(skc), HZ / 10); |
e5b9b344 | 1250 | |
436ad60f | 1251 | if (!remaining) { |
e5b9b344 BB |
1252 | spin_lock(&skc->skc_lock); |
1253 | if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { | |
1254 | set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); | |
1255 | skc->skc_obj_deadlock++; | |
1256 | } | |
1257 | spin_unlock(&skc->skc_lock); | |
1258 | } | |
1259 | ||
1260 | rc = -ENOMEM; | |
1261 | } | |
1262 | ||
1263 | return (rc); | |
1264 | } | |
1265 | ||
1266 | /* | |
1267 | * Refill a per-cpu magazine with objects from the slabs for this cache. | |
1268 | * Ideally the magazine can be repopulated using existing objects which have | |
1269 | * been released, however if we are unable to locate enough free objects new | |
1270 | * slabs of objects will be created. On success NULL is returned, otherwise | |
1271 | * the address of a single emergency object is returned for use by the caller. | |
1272 | */ | |
1273 | static void * | |
1274 | spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) | |
1275 | { | |
1276 | spl_kmem_slab_t *sks; | |
1277 | int count = 0, rc, refill; | |
1278 | void *obj = NULL; | |
1279 | ||
1280 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1281 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1282 | ||
1283 | refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); | |
1284 | spin_lock(&skc->skc_lock); | |
1285 | ||
1286 | while (refill > 0) { | |
1287 | /* No slabs available we may need to grow the cache */ | |
1288 | if (list_empty(&skc->skc_partial_list)) { | |
1289 | spin_unlock(&skc->skc_lock); | |
1290 | ||
1291 | local_irq_enable(); | |
1292 | rc = spl_cache_grow(skc, flags, &obj); | |
1293 | local_irq_disable(); | |
1294 | ||
1295 | /* Emergency object for immediate use by caller */ | |
1296 | if (rc == 0 && obj != NULL) | |
1297 | return (obj); | |
1298 | ||
1299 | if (rc) | |
1300 | goto out; | |
1301 | ||
1302 | /* Rescheduled to different CPU skm is not local */ | |
1303 | if (skm != skc->skc_mag[smp_processor_id()]) | |
1304 | goto out; | |
1305 | ||
b34b9563 BB |
1306 | /* |
1307 | * Potentially rescheduled to the same CPU but | |
e5b9b344 | 1308 | * allocations may have occurred from this CPU while |
b34b9563 BB |
1309 | * we were sleeping so recalculate max refill. |
1310 | */ | |
e5b9b344 BB |
1311 | refill = MIN(refill, skm->skm_size - skm->skm_avail); |
1312 | ||
1313 | spin_lock(&skc->skc_lock); | |
1314 | continue; | |
1315 | } | |
1316 | ||
1317 | /* Grab the next available slab */ | |
1318 | sks = list_entry((&skc->skc_partial_list)->next, | |
b34b9563 | 1319 | spl_kmem_slab_t, sks_list); |
e5b9b344 BB |
1320 | ASSERT(sks->sks_magic == SKS_MAGIC); |
1321 | ASSERT(sks->sks_ref < sks->sks_objs); | |
1322 | ASSERT(!list_empty(&sks->sks_free_list)); | |
1323 | ||
b34b9563 BB |
1324 | /* |
1325 | * Consume as many objects as needed to refill the requested | |
1326 | * cache. We must also be careful not to overfill it. | |
1327 | */ | |
1328 | while (sks->sks_ref < sks->sks_objs && refill-- > 0 && | |
1329 | ++count) { | |
e5b9b344 BB |
1330 | ASSERT(skm->skm_avail < skm->skm_size); |
1331 | ASSERT(count < skm->skm_size); | |
b34b9563 BB |
1332 | skm->skm_objs[skm->skm_avail++] = |
1333 | spl_cache_obj(skc, sks); | |
e5b9b344 BB |
1334 | } |
1335 | ||
1336 | /* Move slab to skc_complete_list when full */ | |
1337 | if (sks->sks_ref == sks->sks_objs) { | |
1338 | list_del(&sks->sks_list); | |
1339 | list_add(&sks->sks_list, &skc->skc_complete_list); | |
1340 | } | |
1341 | } | |
1342 | ||
1343 | spin_unlock(&skc->skc_lock); | |
1344 | out: | |
1345 | return (NULL); | |
1346 | } | |
1347 | ||
1348 | /* | |
1349 | * Release an object back to the slab from which it came. | |
1350 | */ | |
1351 | static void | |
1352 | spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) | |
1353 | { | |
1354 | spl_kmem_slab_t *sks = NULL; | |
1355 | spl_kmem_obj_t *sko = NULL; | |
1356 | ||
1357 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1358 | ASSERT(spin_is_locked(&skc->skc_lock)); | |
1359 | ||
1360 | sko = spl_sko_from_obj(skc, obj); | |
1361 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
1362 | sks = sko->sko_slab; | |
1363 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1364 | ASSERT(sks->sks_cache == skc); | |
1365 | list_add(&sko->sko_list, &sks->sks_free_list); | |
1366 | ||
1367 | sks->sks_age = jiffies; | |
1368 | sks->sks_ref--; | |
1369 | skc->skc_obj_alloc--; | |
1370 | ||
b34b9563 BB |
1371 | /* |
1372 | * Move slab to skc_partial_list when no longer full. Slabs | |
e5b9b344 | 1373 | * are added to the head to keep the partial list is quasi-full |
b34b9563 BB |
1374 | * sorted order. Fuller at the head, emptier at the tail. |
1375 | */ | |
e5b9b344 BB |
1376 | if (sks->sks_ref == (sks->sks_objs - 1)) { |
1377 | list_del(&sks->sks_list); | |
1378 | list_add(&sks->sks_list, &skc->skc_partial_list); | |
1379 | } | |
1380 | ||
b34b9563 BB |
1381 | /* |
1382 | * Move empty slabs to the end of the partial list so | |
1383 | * they can be easily found and freed during reclamation. | |
1384 | */ | |
e5b9b344 BB |
1385 | if (sks->sks_ref == 0) { |
1386 | list_del(&sks->sks_list); | |
1387 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1388 | skc->skc_slab_alloc--; | |
1389 | } | |
1390 | } | |
1391 | ||
1392 | /* | |
1393 | * Allocate an object from the per-cpu magazine, or if the magazine | |
1394 | * is empty directly allocate from a slab and repopulate the magazine. | |
1395 | */ | |
1396 | void * | |
1397 | spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) | |
1398 | { | |
1399 | spl_kmem_magazine_t *skm; | |
1400 | void *obj = NULL; | |
1401 | ||
c3eabc75 | 1402 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
e5b9b344 BB |
1403 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1404 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
e5b9b344 BB |
1405 | |
1406 | atomic_inc(&skc->skc_ref); | |
1407 | ||
1408 | /* | |
1409 | * Allocate directly from a Linux slab. All optimizations are left | |
1410 | * to the underlying cache we only need to guarantee that KM_SLEEP | |
1411 | * callers will never fail. | |
1412 | */ | |
1413 | if (skc->skc_flags & KMC_SLAB) { | |
1414 | struct kmem_cache *slc = skc->skc_linux_cache; | |
e5b9b344 | 1415 | do { |
c3eabc75 | 1416 | obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); |
e5b9b344 BB |
1417 | } while ((obj == NULL) && !(flags & KM_NOSLEEP)); |
1418 | ||
1419 | goto ret; | |
1420 | } | |
1421 | ||
1422 | local_irq_disable(); | |
1423 | ||
1424 | restart: | |
b34b9563 BB |
1425 | /* |
1426 | * Safe to update per-cpu structure without lock, but | |
e5b9b344 BB |
1427 | * in the restart case we must be careful to reacquire |
1428 | * the local magazine since this may have changed | |
b34b9563 BB |
1429 | * when we need to grow the cache. |
1430 | */ | |
e5b9b344 BB |
1431 | skm = skc->skc_mag[smp_processor_id()]; |
1432 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1433 | ||
1434 | if (likely(skm->skm_avail)) { | |
1435 | /* Object available in CPU cache, use it */ | |
1436 | obj = skm->skm_objs[--skm->skm_avail]; | |
1437 | skm->skm_age = jiffies; | |
1438 | } else { | |
1439 | obj = spl_cache_refill(skc, skm, flags); | |
3018bffa | 1440 | if ((obj == NULL) && !(flags & KM_NOSLEEP)) |
e5b9b344 | 1441 | goto restart; |
3018bffa BB |
1442 | |
1443 | local_irq_enable(); | |
1444 | goto ret; | |
e5b9b344 BB |
1445 | } |
1446 | ||
1447 | local_irq_enable(); | |
1448 | ASSERT(obj); | |
1449 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); | |
1450 | ||
1451 | ret: | |
1452 | /* Pre-emptively migrate object to CPU L1 cache */ | |
1453 | if (obj) { | |
1454 | if (obj && skc->skc_ctor) | |
1455 | skc->skc_ctor(obj, skc->skc_private, flags); | |
1456 | else | |
1457 | prefetchw(obj); | |
1458 | } | |
1459 | ||
1460 | atomic_dec(&skc->skc_ref); | |
1461 | ||
1462 | return (obj); | |
1463 | } | |
e5b9b344 BB |
1464 | EXPORT_SYMBOL(spl_kmem_cache_alloc); |
1465 | ||
1466 | /* | |
1467 | * Free an object back to the local per-cpu magazine, there is no | |
1468 | * guarantee that this is the same magazine the object was originally | |
1469 | * allocated from. We may need to flush entire from the magazine | |
1470 | * back to the slabs to make space. | |
1471 | */ | |
1472 | void | |
1473 | spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) | |
1474 | { | |
1475 | spl_kmem_magazine_t *skm; | |
1476 | unsigned long flags; | |
1a204968 | 1477 | int do_reclaim = 0; |
436ad60f | 1478 | int do_emergency = 0; |
e5b9b344 BB |
1479 | |
1480 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1481 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1482 | atomic_inc(&skc->skc_ref); | |
1483 | ||
1484 | /* | |
1485 | * Run the destructor | |
1486 | */ | |
1487 | if (skc->skc_dtor) | |
1488 | skc->skc_dtor(obj, skc->skc_private); | |
1489 | ||
1490 | /* | |
1491 | * Free the object from the Linux underlying Linux slab. | |
1492 | */ | |
1493 | if (skc->skc_flags & KMC_SLAB) { | |
1494 | kmem_cache_free(skc->skc_linux_cache, obj); | |
1495 | goto out; | |
1496 | } | |
1497 | ||
1498 | /* | |
436ad60f BB |
1499 | * While a cache has outstanding emergency objects all freed objects |
1500 | * must be checked. However, since emergency objects will never use | |
1501 | * a virtual address these objects can be safely excluded as an | |
1502 | * optimization. | |
e5b9b344 | 1503 | */ |
436ad60f BB |
1504 | if (!is_vmalloc_addr(obj)) { |
1505 | spin_lock(&skc->skc_lock); | |
1506 | do_emergency = (skc->skc_obj_emergency > 0); | |
1507 | spin_unlock(&skc->skc_lock); | |
1508 | ||
1509 | if (do_emergency && (spl_emergency_free(skc, obj) == 0)) | |
1510 | goto out; | |
e5b9b344 BB |
1511 | } |
1512 | ||
1513 | local_irq_save(flags); | |
1514 | ||
b34b9563 BB |
1515 | /* |
1516 | * Safe to update per-cpu structure without lock, but | |
e5b9b344 BB |
1517 | * no remote memory allocation tracking is being performed |
1518 | * it is entirely possible to allocate an object from one | |
b34b9563 BB |
1519 | * CPU cache and return it to another. |
1520 | */ | |
e5b9b344 BB |
1521 | skm = skc->skc_mag[smp_processor_id()]; |
1522 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1523 | ||
1a204968 BB |
1524 | /* |
1525 | * Per-CPU cache full, flush it to make space for this object, | |
1526 | * this may result in an empty slab which can be reclaimed once | |
1527 | * interrupts are re-enabled. | |
1528 | */ | |
1529 | if (unlikely(skm->skm_avail >= skm->skm_size)) { | |
e5b9b344 | 1530 | spl_cache_flush(skc, skm, skm->skm_refill); |
1a204968 BB |
1531 | do_reclaim = 1; |
1532 | } | |
e5b9b344 BB |
1533 | |
1534 | /* Available space in cache, use it */ | |
1535 | skm->skm_objs[skm->skm_avail++] = obj; | |
1536 | ||
1537 | local_irq_restore(flags); | |
1a204968 BB |
1538 | |
1539 | if (do_reclaim) | |
1540 | spl_slab_reclaim(skc); | |
e5b9b344 BB |
1541 | out: |
1542 | atomic_dec(&skc->skc_ref); | |
1543 | } | |
1544 | EXPORT_SYMBOL(spl_kmem_cache_free); | |
1545 | ||
1546 | /* | |
1547 | * The generic shrinker function for all caches. Under Linux a shrinker | |
1548 | * may not be tightly coupled with a slab cache. In fact Linux always | |
1549 | * systematically tries calling all registered shrinker callbacks which | |
1550 | * report that they contain unused objects. Because of this we only | |
1551 | * register one shrinker function in the shim layer for all slab caches. | |
1552 | * We always attempt to shrink all caches when this generic shrinker | |
1553 | * is called. | |
1554 | * | |
1555 | * If sc->nr_to_scan is zero, the caller is requesting a query of the | |
1556 | * number of objects which can potentially be freed. If it is nonzero, | |
1557 | * the request is to free that many objects. | |
1558 | * | |
1559 | * Linux kernels >= 3.12 have the count_objects and scan_objects callbacks | |
1560 | * in struct shrinker and also require the shrinker to return the number | |
1561 | * of objects freed. | |
1562 | * | |
1563 | * Older kernels require the shrinker to return the number of freeable | |
1564 | * objects following the freeing of nr_to_free. | |
1565 | * | |
1566 | * Linux semantics differ from those under Solaris, which are to | |
1567 | * free all available objects which may (and probably will) be more | |
1568 | * objects than the requested nr_to_scan. | |
1569 | */ | |
1570 | static spl_shrinker_t | |
1571 | __spl_kmem_cache_generic_shrinker(struct shrinker *shrink, | |
1572 | struct shrink_control *sc) | |
1573 | { | |
1574 | spl_kmem_cache_t *skc; | |
1575 | int alloc = 0; | |
1576 | ||
ae26dd00 TC |
1577 | /* |
1578 | * No shrinking in a transaction context. Can cause deadlocks. | |
1579 | */ | |
1580 | if (sc->nr_to_scan && spl_fstrans_check()) | |
1581 | return (SHRINK_STOP); | |
1582 | ||
e5b9b344 BB |
1583 | down_read(&spl_kmem_cache_sem); |
1584 | list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { | |
1585 | if (sc->nr_to_scan) { | |
1586 | #ifdef HAVE_SPLIT_SHRINKER_CALLBACK | |
1587 | uint64_t oldalloc = skc->skc_obj_alloc; | |
1588 | spl_kmem_cache_reap_now(skc, | |
b34b9563 | 1589 | MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1)); |
e5b9b344 BB |
1590 | if (oldalloc > skc->skc_obj_alloc) |
1591 | alloc += oldalloc - skc->skc_obj_alloc; | |
1592 | #else | |
1593 | spl_kmem_cache_reap_now(skc, | |
b34b9563 | 1594 | MAX(sc->nr_to_scan>>fls64(skc->skc_slab_objs), 1)); |
e5b9b344 BB |
1595 | alloc += skc->skc_obj_alloc; |
1596 | #endif /* HAVE_SPLIT_SHRINKER_CALLBACK */ | |
1597 | } else { | |
1598 | /* Request to query number of freeable objects */ | |
1599 | alloc += skc->skc_obj_alloc; | |
1600 | } | |
1601 | } | |
1602 | up_read(&spl_kmem_cache_sem); | |
1603 | ||
1604 | /* | |
1605 | * When KMC_RECLAIM_ONCE is set allow only a single reclaim pass. | |
1606 | * This functionality only exists to work around a rare issue where | |
1607 | * shrink_slabs() is repeatedly invoked by many cores causing the | |
1608 | * system to thrash. | |
1609 | */ | |
1610 | if ((spl_kmem_cache_reclaim & KMC_RECLAIM_ONCE) && sc->nr_to_scan) | |
1611 | return (SHRINK_STOP); | |
1612 | ||
1613 | return (MAX(alloc, 0)); | |
1614 | } | |
1615 | ||
1616 | SPL_SHRINKER_CALLBACK_WRAPPER(spl_kmem_cache_generic_shrinker); | |
1617 | ||
1618 | /* | |
1619 | * Call the registered reclaim function for a cache. Depending on how | |
1620 | * many and which objects are released it may simply repopulate the | |
1621 | * local magazine which will then need to age-out. Objects which cannot | |
1622 | * fit in the magazine we will be released back to their slabs which will | |
1623 | * also need to age out before being release. This is all just best | |
1624 | * effort and we do not want to thrash creating and destroying slabs. | |
1625 | */ | |
1626 | void | |
1627 | spl_kmem_cache_reap_now(spl_kmem_cache_t *skc, int count) | |
1628 | { | |
1629 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1630 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1631 | ||
1632 | atomic_inc(&skc->skc_ref); | |
1633 | ||
1634 | /* | |
1635 | * Execute the registered reclaim callback if it exists. The | |
1636 | * per-cpu caches will be drained when is set KMC_EXPIRE_MEM. | |
1637 | */ | |
1638 | if (skc->skc_flags & KMC_SLAB) { | |
1639 | if (skc->skc_reclaim) | |
1640 | skc->skc_reclaim(skc->skc_private); | |
1641 | ||
1642 | if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) | |
1643 | kmem_cache_shrink(skc->skc_linux_cache); | |
1644 | ||
1645 | goto out; | |
1646 | } | |
1647 | ||
1648 | /* | |
1649 | * Prevent concurrent cache reaping when contended. | |
1650 | */ | |
1651 | if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) | |
1652 | goto out; | |
1653 | ||
1654 | /* | |
1655 | * When a reclaim function is available it may be invoked repeatedly | |
1656 | * until at least a single slab can be freed. This ensures that we | |
1657 | * do free memory back to the system. This helps minimize the chance | |
1658 | * of an OOM event when the bulk of memory is used by the slab. | |
1659 | * | |
1660 | * When free slabs are already available the reclaim callback will be | |
1661 | * skipped. Additionally, if no forward progress is detected despite | |
1662 | * a reclaim function the cache will be skipped to avoid deadlock. | |
1663 | * | |
1664 | * Longer term this would be the correct place to add the code which | |
1665 | * repacks the slabs in order minimize fragmentation. | |
1666 | */ | |
1667 | if (skc->skc_reclaim) { | |
1668 | uint64_t objects = UINT64_MAX; | |
1669 | int do_reclaim; | |
1670 | ||
1671 | do { | |
1672 | spin_lock(&skc->skc_lock); | |
1673 | do_reclaim = | |
1674 | (skc->skc_slab_total > 0) && | |
b34b9563 | 1675 | ((skc->skc_slab_total-skc->skc_slab_alloc) == 0) && |
e5b9b344 BB |
1676 | (skc->skc_obj_alloc < objects); |
1677 | ||
1678 | objects = skc->skc_obj_alloc; | |
1679 | spin_unlock(&skc->skc_lock); | |
1680 | ||
1681 | if (do_reclaim) | |
1682 | skc->skc_reclaim(skc->skc_private); | |
1683 | ||
1684 | } while (do_reclaim); | |
1685 | } | |
1686 | ||
1a204968 | 1687 | /* Reclaim from the magazine and free all now empty slabs. */ |
e5b9b344 BB |
1688 | if (spl_kmem_cache_expire & KMC_EXPIRE_MEM) { |
1689 | spl_kmem_magazine_t *skm; | |
1690 | unsigned long irq_flags; | |
1691 | ||
1692 | local_irq_save(irq_flags); | |
1693 | skm = skc->skc_mag[smp_processor_id()]; | |
1694 | spl_cache_flush(skc, skm, skm->skm_avail); | |
1695 | local_irq_restore(irq_flags); | |
1696 | } | |
1697 | ||
1a204968 | 1698 | spl_slab_reclaim(skc); |
a988a35a RY |
1699 | clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); |
1700 | smp_mb__after_atomic(); | |
e5b9b344 BB |
1701 | wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); |
1702 | out: | |
1703 | atomic_dec(&skc->skc_ref); | |
1704 | } | |
1705 | EXPORT_SYMBOL(spl_kmem_cache_reap_now); | |
1706 | ||
1707 | /* | |
1708 | * Reap all free slabs from all registered caches. | |
1709 | */ | |
1710 | void | |
1711 | spl_kmem_reap(void) | |
1712 | { | |
1713 | struct shrink_control sc; | |
1714 | ||
1715 | sc.nr_to_scan = KMC_REAP_CHUNK; | |
1716 | sc.gfp_mask = GFP_KERNEL; | |
1717 | ||
1718 | (void) __spl_kmem_cache_generic_shrinker(NULL, &sc); | |
1719 | } | |
1720 | EXPORT_SYMBOL(spl_kmem_reap); | |
1721 | ||
1722 | int | |
1723 | spl_kmem_cache_init(void) | |
1724 | { | |
1725 | init_rwsem(&spl_kmem_cache_sem); | |
1726 | INIT_LIST_HEAD(&spl_kmem_cache_list); | |
1727 | spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", | |
62aa81a5 | 1728 | spl_kmem_cache_kmem_threads, defclsyspri, |
3c82160f BB |
1729 | spl_kmem_cache_kmem_threads * 8, INT_MAX, |
1730 | TASKQ_PREPOPULATE | TASKQ_DYNAMIC); | |
e5b9b344 BB |
1731 | spl_register_shrinker(&spl_kmem_cache_shrinker); |
1732 | ||
1733 | return (0); | |
1734 | } | |
1735 | ||
1736 | void | |
1737 | spl_kmem_cache_fini(void) | |
1738 | { | |
1739 | spl_unregister_shrinker(&spl_kmem_cache_shrinker); | |
1740 | taskq_destroy(spl_kmem_cache_taskq); | |
1741 | } |