]>
Commit | Line | Data |
---|---|---|
b34b9563 | 1 | /* |
e5b9b344 BB |
2 | * Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC. |
3 | * Copyright (C) 2007 The Regents of the University of California. | |
4 | * Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). | |
5 | * Written by Brian Behlendorf <behlendorf1@llnl.gov>. | |
6 | * UCRL-CODE-235197 | |
7 | * | |
8 | * This file is part of the SPL, Solaris Porting Layer. | |
e5b9b344 BB |
9 | * |
10 | * The SPL is free software; you can redistribute it and/or modify it | |
11 | * under the terms of the GNU General Public License as published by the | |
12 | * Free Software Foundation; either version 2 of the License, or (at your | |
13 | * option) any later version. | |
14 | * | |
15 | * The SPL is distributed in the hope that it will be useful, but WITHOUT | |
16 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
17 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
18 | * for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License along | |
21 | * with the SPL. If not, see <http://www.gnu.org/licenses/>. | |
b34b9563 | 22 | */ |
e5b9b344 | 23 | |
01a4852e | 24 | #include <linux/percpu_compat.h> |
e5b9b344 BB |
25 | #include <sys/kmem.h> |
26 | #include <sys/kmem_cache.h> | |
27 | #include <sys/taskq.h> | |
28 | #include <sys/timer.h> | |
29 | #include <sys/vmem.h> | |
a9125891 | 30 | #include <sys/wait.h> |
e5b9b344 BB |
31 | #include <linux/slab.h> |
32 | #include <linux/swap.h> | |
9f456111 | 33 | #include <linux/prefetch.h> |
e5b9b344 BB |
34 | |
35 | /* | |
36 | * Within the scope of spl-kmem.c file the kmem_cache_* definitions | |
37 | * are removed to allow access to the real Linux slab allocator. | |
38 | */ | |
39 | #undef kmem_cache_destroy | |
40 | #undef kmem_cache_create | |
41 | #undef kmem_cache_alloc | |
42 | #undef kmem_cache_free | |
43 | ||
44 | ||
a988a35a RY |
45 | /* |
46 | * Linux 3.16 replaced smp_mb__{before,after}_{atomic,clear}_{dec,inc,bit}() | |
47 | * with smp_mb__{before,after}_atomic() because they were redundant. This is | |
48 | * only used inside our SLAB allocator, so we implement an internal wrapper | |
49 | * here to give us smp_mb__{before,after}_atomic() on older kernels. | |
50 | */ | |
51 | #ifndef smp_mb__before_atomic | |
52 | #define smp_mb__before_atomic(x) smp_mb__before_clear_bit(x) | |
53 | #endif | |
54 | ||
55 | #ifndef smp_mb__after_atomic | |
56 | #define smp_mb__after_atomic(x) smp_mb__after_clear_bit(x) | |
57 | #endif | |
58 | ||
3673d032 | 59 | /* BEGIN CSTYLED */ |
e5b9b344 | 60 | |
1a204968 BB |
61 | /* |
62 | * Cache magazines are an optimization designed to minimize the cost of | |
63 | * allocating memory. They do this by keeping a per-cpu cache of recently | |
64 | * freed objects, which can then be reallocated without taking a lock. This | |
65 | * can improve performance on highly contended caches. However, because | |
66 | * objects in magazines will prevent otherwise empty slabs from being | |
67 | * immediately released this may not be ideal for low memory machines. | |
68 | * | |
69 | * For this reason spl_kmem_cache_magazine_size can be used to set a maximum | |
70 | * magazine size. When this value is set to 0 the magazine size will be | |
71 | * automatically determined based on the object size. Otherwise magazines | |
72 | * will be limited to 2-256 objects per magazine (i.e per cpu). Magazines | |
73 | * may never be entirely disabled in this implementation. | |
74 | */ | |
75 | unsigned int spl_kmem_cache_magazine_size = 0; | |
76 | module_param(spl_kmem_cache_magazine_size, uint, 0444); | |
77 | MODULE_PARM_DESC(spl_kmem_cache_magazine_size, | |
31f24932 | 78 | "Default magazine size (2-256), set automatically (0)"); |
1a204968 | 79 | |
e5b9b344 BB |
80 | /* |
81 | * The default behavior is to report the number of objects remaining in the | |
82 | * cache. This allows the Linux VM to repeatedly reclaim objects from the | |
83 | * cache when memory is low satisfy other memory allocations. Alternately, | |
84 | * setting this value to KMC_RECLAIM_ONCE limits how aggressively the cache | |
85 | * is reclaimed. This may increase the likelihood of out of memory events. | |
86 | */ | |
87 | unsigned int spl_kmem_cache_reclaim = 0 /* KMC_RECLAIM_ONCE */; | |
88 | module_param(spl_kmem_cache_reclaim, uint, 0644); | |
89 | MODULE_PARM_DESC(spl_kmem_cache_reclaim, "Single reclaim pass (0x1)"); | |
90 | ||
91 | unsigned int spl_kmem_cache_obj_per_slab = SPL_KMEM_CACHE_OBJ_PER_SLAB; | |
92 | module_param(spl_kmem_cache_obj_per_slab, uint, 0644); | |
93 | MODULE_PARM_DESC(spl_kmem_cache_obj_per_slab, "Number of objects per slab"); | |
94 | ||
3018bffa | 95 | unsigned int spl_kmem_cache_max_size = SPL_KMEM_CACHE_MAX_SIZE; |
e5b9b344 BB |
96 | module_param(spl_kmem_cache_max_size, uint, 0644); |
97 | MODULE_PARM_DESC(spl_kmem_cache_max_size, "Maximum size of slab in MB"); | |
98 | ||
99 | /* | |
100 | * For small objects the Linux slab allocator should be used to make the most | |
101 | * efficient use of the memory. However, large objects are not supported by | |
102 | * the Linux slab and therefore the SPL implementation is preferred. A cutoff | |
103 | * of 16K was determined to be optimal for architectures using 4K pages. | |
104 | */ | |
105 | #if PAGE_SIZE == 4096 | |
106 | unsigned int spl_kmem_cache_slab_limit = 16384; | |
107 | #else | |
108 | unsigned int spl_kmem_cache_slab_limit = 0; | |
109 | #endif | |
110 | module_param(spl_kmem_cache_slab_limit, uint, 0644); | |
111 | MODULE_PARM_DESC(spl_kmem_cache_slab_limit, | |
b34b9563 | 112 | "Objects less than N bytes use the Linux slab"); |
e5b9b344 | 113 | |
436ad60f BB |
114 | /* |
115 | * The number of threads available to allocate new slabs for caches. This | |
116 | * should not need to be tuned but it is available for performance analysis. | |
117 | */ | |
118 | unsigned int spl_kmem_cache_kmem_threads = 4; | |
119 | module_param(spl_kmem_cache_kmem_threads, uint, 0444); | |
120 | MODULE_PARM_DESC(spl_kmem_cache_kmem_threads, | |
121 | "Number of spl_kmem_cache threads"); | |
3673d032 | 122 | /* END CSTYLED */ |
436ad60f | 123 | |
e5b9b344 BB |
124 | /* |
125 | * Slab allocation interfaces | |
126 | * | |
127 | * While the Linux slab implementation was inspired by the Solaris | |
128 | * implementation I cannot use it to emulate the Solaris APIs. I | |
129 | * require two features which are not provided by the Linux slab. | |
130 | * | |
131 | * 1) Constructors AND destructors. Recent versions of the Linux | |
132 | * kernel have removed support for destructors. This is a deal | |
133 | * breaker for the SPL which contains particularly expensive | |
134 | * initializers for mutex's, condition variables, etc. We also | |
135 | * require a minimal level of cleanup for these data types unlike | |
b34b9563 | 136 | * many Linux data types which do need to be explicitly destroyed. |
e5b9b344 BB |
137 | * |
138 | * 2) Virtual address space backed slab. Callers of the Solaris slab | |
139 | * expect it to work well for both small are very large allocations. | |
140 | * Because of memory fragmentation the Linux slab which is backed | |
141 | * by kmalloc'ed memory performs very badly when confronted with | |
142 | * large numbers of large allocations. Basing the slab on the | |
143 | * virtual address space removes the need for contiguous pages | |
144 | * and greatly improve performance for large allocations. | |
145 | * | |
146 | * For these reasons, the SPL has its own slab implementation with | |
147 | * the needed features. It is not as highly optimized as either the | |
148 | * Solaris or Linux slabs, but it should get me most of what is | |
149 | * needed until it can be optimized or obsoleted by another approach. | |
150 | * | |
151 | * One serious concern I do have about this method is the relatively | |
152 | * small virtual address space on 32bit arches. This will seriously | |
153 | * constrain the size of the slab caches and their performance. | |
e5b9b344 BB |
154 | */ |
155 | ||
156 | struct list_head spl_kmem_cache_list; /* List of caches */ | |
157 | struct rw_semaphore spl_kmem_cache_sem; /* Cache list lock */ | |
9f5c1bc6 | 158 | taskq_t *spl_kmem_cache_taskq; /* Task queue for aging / reclaim */ |
e5b9b344 BB |
159 | |
160 | static void spl_cache_shrink(spl_kmem_cache_t *skc, void *obj); | |
161 | ||
e5b9b344 BB |
162 | static void * |
163 | kv_alloc(spl_kmem_cache_t *skc, int size, int flags) | |
164 | { | |
c3eabc75 | 165 | gfp_t lflags = kmem_flags_convert(flags); |
e5b9b344 BB |
166 | void *ptr; |
167 | ||
994de7e4 | 168 | ptr = spl_vmalloc(size, lflags | __GFP_HIGHMEM); |
e5b9b344 BB |
169 | |
170 | /* Resulting allocated memory will be page aligned */ | |
171 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); | |
172 | ||
b34b9563 | 173 | return (ptr); |
e5b9b344 BB |
174 | } |
175 | ||
176 | static void | |
177 | kv_free(spl_kmem_cache_t *skc, void *ptr, int size) | |
178 | { | |
179 | ASSERT(IS_P2ALIGNED(ptr, PAGE_SIZE)); | |
e5b9b344 BB |
180 | |
181 | /* | |
182 | * The Linux direct reclaim path uses this out of band value to | |
183 | * determine if forward progress is being made. Normally this is | |
184 | * incremented by kmem_freepages() which is part of the various | |
185 | * Linux slab implementations. However, since we are using none | |
186 | * of that infrastructure we are responsible for incrementing it. | |
187 | */ | |
188 | if (current->reclaim_state) | |
189 | current->reclaim_state->reclaimed_slab += size >> PAGE_SHIFT; | |
190 | ||
994de7e4 | 191 | vfree(ptr); |
e5b9b344 BB |
192 | } |
193 | ||
194 | /* | |
195 | * Required space for each aligned sks. | |
196 | */ | |
197 | static inline uint32_t | |
198 | spl_sks_size(spl_kmem_cache_t *skc) | |
199 | { | |
b34b9563 BB |
200 | return (P2ROUNDUP_TYPED(sizeof (spl_kmem_slab_t), |
201 | skc->skc_obj_align, uint32_t)); | |
e5b9b344 BB |
202 | } |
203 | ||
204 | /* | |
205 | * Required space for each aligned object. | |
206 | */ | |
207 | static inline uint32_t | |
208 | spl_obj_size(spl_kmem_cache_t *skc) | |
209 | { | |
210 | uint32_t align = skc->skc_obj_align; | |
211 | ||
b34b9563 BB |
212 | return (P2ROUNDUP_TYPED(skc->skc_obj_size, align, uint32_t) + |
213 | P2ROUNDUP_TYPED(sizeof (spl_kmem_obj_t), align, uint32_t)); | |
e5b9b344 BB |
214 | } |
215 | ||
65019062 MM |
216 | uint64_t |
217 | spl_kmem_cache_inuse(kmem_cache_t *cache) | |
218 | { | |
219 | return (cache->skc_obj_total); | |
220 | } | |
221 | EXPORT_SYMBOL(spl_kmem_cache_inuse); | |
222 | ||
223 | uint64_t | |
224 | spl_kmem_cache_entry_size(kmem_cache_t *cache) | |
225 | { | |
226 | return (cache->skc_obj_size); | |
227 | } | |
228 | EXPORT_SYMBOL(spl_kmem_cache_entry_size); | |
229 | ||
e5b9b344 BB |
230 | /* |
231 | * Lookup the spl_kmem_object_t for an object given that object. | |
232 | */ | |
233 | static inline spl_kmem_obj_t * | |
234 | spl_sko_from_obj(spl_kmem_cache_t *skc, void *obj) | |
235 | { | |
b34b9563 BB |
236 | return (obj + P2ROUNDUP_TYPED(skc->skc_obj_size, |
237 | skc->skc_obj_align, uint32_t)); | |
e5b9b344 BB |
238 | } |
239 | ||
e5b9b344 BB |
240 | /* |
241 | * It's important that we pack the spl_kmem_obj_t structure and the | |
242 | * actual objects in to one large address space to minimize the number | |
243 | * of calls to the allocator. It is far better to do a few large | |
244 | * allocations and then subdivide it ourselves. Now which allocator | |
245 | * we use requires balancing a few trade offs. | |
246 | * | |
247 | * For small objects we use kmem_alloc() because as long as you are | |
248 | * only requesting a small number of pages (ideally just one) its cheap. | |
249 | * However, when you start requesting multiple pages with kmem_alloc() | |
250 | * it gets increasingly expensive since it requires contiguous pages. | |
251 | * For this reason we shift to vmem_alloc() for slabs of large objects | |
252 | * which removes the need for contiguous pages. We do not use | |
253 | * vmem_alloc() in all cases because there is significant locking | |
254 | * overhead in __get_vm_area_node(). This function takes a single | |
255 | * global lock when acquiring an available virtual address range which | |
256 | * serializes all vmem_alloc()'s for all slab caches. Using slightly | |
257 | * different allocation functions for small and large objects should | |
258 | * give us the best of both worlds. | |
259 | * | |
492db125 MA |
260 | * +------------------------+ |
261 | * | spl_kmem_slab_t --+-+ | | |
262 | * | skc_obj_size <-+ | | | |
263 | * | spl_kmem_obj_t | | | |
264 | * | skc_obj_size <---+ | | |
265 | * | spl_kmem_obj_t | | | |
266 | * | ... v | | |
267 | * +------------------------+ | |
e5b9b344 BB |
268 | */ |
269 | static spl_kmem_slab_t * | |
270 | spl_slab_alloc(spl_kmem_cache_t *skc, int flags) | |
271 | { | |
272 | spl_kmem_slab_t *sks; | |
492db125 MA |
273 | void *base; |
274 | uint32_t obj_size; | |
e5b9b344 BB |
275 | |
276 | base = kv_alloc(skc, skc->skc_slab_size, flags); | |
277 | if (base == NULL) | |
278 | return (NULL); | |
279 | ||
280 | sks = (spl_kmem_slab_t *)base; | |
281 | sks->sks_magic = SKS_MAGIC; | |
282 | sks->sks_objs = skc->skc_slab_objs; | |
283 | sks->sks_age = jiffies; | |
284 | sks->sks_cache = skc; | |
285 | INIT_LIST_HEAD(&sks->sks_list); | |
286 | INIT_LIST_HEAD(&sks->sks_free_list); | |
287 | sks->sks_ref = 0; | |
288 | obj_size = spl_obj_size(skc); | |
289 | ||
492db125 MA |
290 | for (int i = 0; i < sks->sks_objs; i++) { |
291 | void *obj = base + spl_sks_size(skc) + (i * obj_size); | |
e5b9b344 BB |
292 | |
293 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); | |
492db125 | 294 | spl_kmem_obj_t *sko = spl_sko_from_obj(skc, obj); |
e5b9b344 BB |
295 | sko->sko_addr = obj; |
296 | sko->sko_magic = SKO_MAGIC; | |
297 | sko->sko_slab = sks; | |
298 | INIT_LIST_HEAD(&sko->sko_list); | |
299 | list_add_tail(&sko->sko_list, &sks->sks_free_list); | |
300 | } | |
301 | ||
e5b9b344 BB |
302 | return (sks); |
303 | } | |
304 | ||
305 | /* | |
306 | * Remove a slab from complete or partial list, it must be called with | |
307 | * the 'skc->skc_lock' held but the actual free must be performed | |
308 | * outside the lock to prevent deadlocking on vmem addresses. | |
309 | */ | |
310 | static void | |
311 | spl_slab_free(spl_kmem_slab_t *sks, | |
b34b9563 | 312 | struct list_head *sks_list, struct list_head *sko_list) |
e5b9b344 BB |
313 | { |
314 | spl_kmem_cache_t *skc; | |
315 | ||
316 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
317 | ASSERT(sks->sks_ref == 0); | |
318 | ||
319 | skc = sks->sks_cache; | |
320 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
e5b9b344 BB |
321 | |
322 | /* | |
323 | * Update slab/objects counters in the cache, then remove the | |
324 | * slab from the skc->skc_partial_list. Finally add the slab | |
325 | * and all its objects in to the private work lists where the | |
326 | * destructors will be called and the memory freed to the system. | |
327 | */ | |
328 | skc->skc_obj_total -= sks->sks_objs; | |
329 | skc->skc_slab_total--; | |
330 | list_del(&sks->sks_list); | |
331 | list_add(&sks->sks_list, sks_list); | |
332 | list_splice_init(&sks->sks_free_list, sko_list); | |
333 | } | |
334 | ||
335 | /* | |
1a204968 | 336 | * Reclaim empty slabs at the end of the partial list. |
e5b9b344 BB |
337 | */ |
338 | static void | |
1a204968 | 339 | spl_slab_reclaim(spl_kmem_cache_t *skc) |
e5b9b344 | 340 | { |
7cf1fe63 BB |
341 | spl_kmem_slab_t *sks = NULL, *m = NULL; |
342 | spl_kmem_obj_t *sko = NULL, *n = NULL; | |
e5b9b344 BB |
343 | LIST_HEAD(sks_list); |
344 | LIST_HEAD(sko_list); | |
e5b9b344 BB |
345 | |
346 | /* | |
1a204968 BB |
347 | * Empty slabs and objects must be moved to a private list so they |
348 | * can be safely freed outside the spin lock. All empty slabs are | |
349 | * at the end of skc->skc_partial_list, therefore once a non-empty | |
350 | * slab is found we can stop scanning. | |
e5b9b344 BB |
351 | */ |
352 | spin_lock(&skc->skc_lock); | |
b34b9563 BB |
353 | list_for_each_entry_safe_reverse(sks, m, |
354 | &skc->skc_partial_list, sks_list) { | |
1a204968 BB |
355 | |
356 | if (sks->sks_ref > 0) | |
e5b9b344 BB |
357 | break; |
358 | ||
1a204968 | 359 | spl_slab_free(sks, &sks_list, &sko_list); |
e5b9b344 BB |
360 | } |
361 | spin_unlock(&skc->skc_lock); | |
362 | ||
363 | /* | |
492db125 MA |
364 | * The following two loops ensure all the object destructors are run, |
365 | * and the slabs themselves are freed. This is all done outside the | |
366 | * skc->skc_lock since this allows the destructor to sleep, and | |
367 | * allows us to perform a conditional reschedule when a freeing a | |
368 | * large number of objects and slabs back to the system. | |
e5b9b344 | 369 | */ |
e5b9b344 BB |
370 | |
371 | list_for_each_entry_safe(sko, n, &sko_list, sko_list) { | |
372 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
e5b9b344 BB |
373 | } |
374 | ||
375 | list_for_each_entry_safe(sks, m, &sks_list, sks_list) { | |
376 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
377 | kv_free(skc, sks, skc->skc_slab_size); | |
378 | } | |
379 | } | |
380 | ||
381 | static spl_kmem_emergency_t * | |
382 | spl_emergency_search(struct rb_root *root, void *obj) | |
383 | { | |
384 | struct rb_node *node = root->rb_node; | |
385 | spl_kmem_emergency_t *ske; | |
386 | unsigned long address = (unsigned long)obj; | |
387 | ||
388 | while (node) { | |
389 | ske = container_of(node, spl_kmem_emergency_t, ske_node); | |
390 | ||
ee335174 | 391 | if (address < ske->ske_obj) |
e5b9b344 | 392 | node = node->rb_left; |
ee335174 | 393 | else if (address > ske->ske_obj) |
e5b9b344 BB |
394 | node = node->rb_right; |
395 | else | |
b34b9563 | 396 | return (ske); |
e5b9b344 BB |
397 | } |
398 | ||
b34b9563 | 399 | return (NULL); |
e5b9b344 BB |
400 | } |
401 | ||
402 | static int | |
403 | spl_emergency_insert(struct rb_root *root, spl_kmem_emergency_t *ske) | |
404 | { | |
405 | struct rb_node **new = &(root->rb_node), *parent = NULL; | |
406 | spl_kmem_emergency_t *ske_tmp; | |
ee335174 | 407 | unsigned long address = ske->ske_obj; |
e5b9b344 BB |
408 | |
409 | while (*new) { | |
410 | ske_tmp = container_of(*new, spl_kmem_emergency_t, ske_node); | |
411 | ||
412 | parent = *new; | |
ee335174 | 413 | if (address < ske_tmp->ske_obj) |
e5b9b344 | 414 | new = &((*new)->rb_left); |
ee335174 | 415 | else if (address > ske_tmp->ske_obj) |
e5b9b344 BB |
416 | new = &((*new)->rb_right); |
417 | else | |
b34b9563 | 418 | return (0); |
e5b9b344 BB |
419 | } |
420 | ||
421 | rb_link_node(&ske->ske_node, parent, new); | |
422 | rb_insert_color(&ske->ske_node, root); | |
423 | ||
b34b9563 | 424 | return (1); |
e5b9b344 BB |
425 | } |
426 | ||
427 | /* | |
428 | * Allocate a single emergency object and track it in a red black tree. | |
429 | */ | |
430 | static int | |
431 | spl_emergency_alloc(spl_kmem_cache_t *skc, int flags, void **obj) | |
432 | { | |
c3eabc75 | 433 | gfp_t lflags = kmem_flags_convert(flags); |
e5b9b344 | 434 | spl_kmem_emergency_t *ske; |
ee335174 | 435 | int order = get_order(skc->skc_obj_size); |
e5b9b344 BB |
436 | int empty; |
437 | ||
438 | /* Last chance use a partial slab if one now exists */ | |
439 | spin_lock(&skc->skc_lock); | |
440 | empty = list_empty(&skc->skc_partial_list); | |
441 | spin_unlock(&skc->skc_lock); | |
442 | if (!empty) | |
443 | return (-EEXIST); | |
444 | ||
c3eabc75 | 445 | ske = kmalloc(sizeof (*ske), lflags); |
e5b9b344 BB |
446 | if (ske == NULL) |
447 | return (-ENOMEM); | |
448 | ||
ee335174 BB |
449 | ske->ske_obj = __get_free_pages(lflags, order); |
450 | if (ske->ske_obj == 0) { | |
e5b9b344 BB |
451 | kfree(ske); |
452 | return (-ENOMEM); | |
453 | } | |
454 | ||
455 | spin_lock(&skc->skc_lock); | |
456 | empty = spl_emergency_insert(&skc->skc_emergency_tree, ske); | |
457 | if (likely(empty)) { | |
458 | skc->skc_obj_total++; | |
459 | skc->skc_obj_emergency++; | |
460 | if (skc->skc_obj_emergency > skc->skc_obj_emergency_max) | |
461 | skc->skc_obj_emergency_max = skc->skc_obj_emergency; | |
462 | } | |
463 | spin_unlock(&skc->skc_lock); | |
464 | ||
465 | if (unlikely(!empty)) { | |
ee335174 | 466 | free_pages(ske->ske_obj, order); |
e5b9b344 BB |
467 | kfree(ske); |
468 | return (-EINVAL); | |
469 | } | |
470 | ||
ee335174 | 471 | *obj = (void *)ske->ske_obj; |
e5b9b344 BB |
472 | |
473 | return (0); | |
474 | } | |
475 | ||
476 | /* | |
477 | * Locate the passed object in the red black tree and free it. | |
478 | */ | |
479 | static int | |
480 | spl_emergency_free(spl_kmem_cache_t *skc, void *obj) | |
481 | { | |
482 | spl_kmem_emergency_t *ske; | |
ee335174 | 483 | int order = get_order(skc->skc_obj_size); |
e5b9b344 BB |
484 | |
485 | spin_lock(&skc->skc_lock); | |
486 | ske = spl_emergency_search(&skc->skc_emergency_tree, obj); | |
436ad60f | 487 | if (ske) { |
e5b9b344 BB |
488 | rb_erase(&ske->ske_node, &skc->skc_emergency_tree); |
489 | skc->skc_obj_emergency--; | |
490 | skc->skc_obj_total--; | |
491 | } | |
492 | spin_unlock(&skc->skc_lock); | |
493 | ||
436ad60f | 494 | if (ske == NULL) |
e5b9b344 BB |
495 | return (-ENOENT); |
496 | ||
ee335174 | 497 | free_pages(ske->ske_obj, order); |
e5b9b344 BB |
498 | kfree(ske); |
499 | ||
500 | return (0); | |
501 | } | |
502 | ||
503 | /* | |
504 | * Release objects from the per-cpu magazine back to their slab. The flush | |
505 | * argument contains the max number of entries to remove from the magazine. | |
506 | */ | |
507 | static void | |
4fbdb10c | 508 | spl_cache_flush(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flush) |
e5b9b344 | 509 | { |
4fbdb10c | 510 | spin_lock(&skc->skc_lock); |
e5b9b344 BB |
511 | |
512 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
513 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
e5b9b344 | 514 | |
4fbdb10c MA |
515 | int count = MIN(flush, skm->skm_avail); |
516 | for (int i = 0; i < count; i++) | |
e5b9b344 BB |
517 | spl_cache_shrink(skc, skm->skm_objs[i]); |
518 | ||
519 | skm->skm_avail -= count; | |
520 | memmove(skm->skm_objs, &(skm->skm_objs[count]), | |
b34b9563 | 521 | sizeof (void *) * skm->skm_avail); |
e5b9b344 | 522 | |
e5b9b344 | 523 | spin_unlock(&skc->skc_lock); |
e5b9b344 BB |
524 | } |
525 | ||
526 | /* | |
527 | * Size a slab based on the size of each aligned object plus spl_kmem_obj_t. | |
528 | * When on-slab we want to target spl_kmem_cache_obj_per_slab. However, | |
529 | * for very small objects we may end up with more than this so as not | |
530 | * to waste space in the minimal allocation of a single page. Also for | |
531 | * very large objects we may use as few as spl_kmem_cache_obj_per_slab_min, | |
532 | * lower than this and we will fail. | |
533 | */ | |
534 | static int | |
535 | spl_slab_size(spl_kmem_cache_t *skc, uint32_t *objs, uint32_t *size) | |
536 | { | |
3018bffa | 537 | uint32_t sks_size, obj_size, max_size, tgt_size, tgt_objs; |
e5b9b344 | 538 | |
492db125 MA |
539 | sks_size = spl_sks_size(skc); |
540 | obj_size = spl_obj_size(skc); | |
541 | max_size = (spl_kmem_cache_max_size * 1024 * 1024); | |
542 | tgt_size = (spl_kmem_cache_obj_per_slab * obj_size + sks_size); | |
3018bffa | 543 | |
492db125 MA |
544 | if (tgt_size <= max_size) { |
545 | tgt_objs = (tgt_size - sks_size) / obj_size; | |
546 | } else { | |
547 | tgt_objs = (max_size - sks_size) / obj_size; | |
548 | tgt_size = (tgt_objs * obj_size) + sks_size; | |
e5b9b344 BB |
549 | } |
550 | ||
3018bffa BB |
551 | if (tgt_objs == 0) |
552 | return (-ENOSPC); | |
553 | ||
554 | *objs = tgt_objs; | |
555 | *size = tgt_size; | |
556 | ||
557 | return (0); | |
e5b9b344 BB |
558 | } |
559 | ||
560 | /* | |
561 | * Make a guess at reasonable per-cpu magazine size based on the size of | |
562 | * each object and the cost of caching N of them in each magazine. Long | |
563 | * term this should really adapt based on an observed usage heuristic. | |
564 | */ | |
565 | static int | |
566 | spl_magazine_size(spl_kmem_cache_t *skc) | |
567 | { | |
568 | uint32_t obj_size = spl_obj_size(skc); | |
569 | int size; | |
570 | ||
1a204968 BB |
571 | if (spl_kmem_cache_magazine_size > 0) |
572 | return (MAX(MIN(spl_kmem_cache_magazine_size, 256), 2)); | |
573 | ||
e5b9b344 BB |
574 | /* Per-magazine sizes below assume a 4Kib page size */ |
575 | if (obj_size > (PAGE_SIZE * 256)) | |
576 | size = 4; /* Minimum 4Mib per-magazine */ | |
577 | else if (obj_size > (PAGE_SIZE * 32)) | |
578 | size = 16; /* Minimum 2Mib per-magazine */ | |
579 | else if (obj_size > (PAGE_SIZE)) | |
580 | size = 64; /* Minimum 256Kib per-magazine */ | |
581 | else if (obj_size > (PAGE_SIZE / 4)) | |
582 | size = 128; /* Minimum 128Kib per-magazine */ | |
583 | else | |
584 | size = 256; | |
585 | ||
586 | return (size); | |
587 | } | |
588 | ||
589 | /* | |
590 | * Allocate a per-cpu magazine to associate with a specific core. | |
591 | */ | |
592 | static spl_kmem_magazine_t * | |
593 | spl_magazine_alloc(spl_kmem_cache_t *skc, int cpu) | |
594 | { | |
595 | spl_kmem_magazine_t *skm; | |
b34b9563 BB |
596 | int size = sizeof (spl_kmem_magazine_t) + |
597 | sizeof (void *) * skc->skc_mag_size; | |
e5b9b344 | 598 | |
c3eabc75 | 599 | skm = kmalloc_node(size, GFP_KERNEL, cpu_to_node(cpu)); |
e5b9b344 BB |
600 | if (skm) { |
601 | skm->skm_magic = SKM_MAGIC; | |
602 | skm->skm_avail = 0; | |
603 | skm->skm_size = skc->skc_mag_size; | |
604 | skm->skm_refill = skc->skc_mag_refill; | |
605 | skm->skm_cache = skc; | |
e5b9b344 BB |
606 | skm->skm_cpu = cpu; |
607 | } | |
608 | ||
609 | return (skm); | |
610 | } | |
611 | ||
612 | /* | |
613 | * Free a per-cpu magazine associated with a specific core. | |
614 | */ | |
615 | static void | |
616 | spl_magazine_free(spl_kmem_magazine_t *skm) | |
617 | { | |
e5b9b344 BB |
618 | ASSERT(skm->skm_magic == SKM_MAGIC); |
619 | ASSERT(skm->skm_avail == 0); | |
c3eabc75 | 620 | kfree(skm); |
e5b9b344 BB |
621 | } |
622 | ||
623 | /* | |
624 | * Create all pre-cpu magazines of reasonable sizes. | |
625 | */ | |
626 | static int | |
627 | spl_magazine_create(spl_kmem_cache_t *skc) | |
628 | { | |
7cf1fe63 | 629 | int i = 0; |
e5b9b344 | 630 | |
c6f2b942 | 631 | ASSERT((skc->skc_flags & KMC_SLAB) == 0); |
e5b9b344 | 632 | |
9b13f65d BB |
633 | skc->skc_mag = kzalloc(sizeof (spl_kmem_magazine_t *) * |
634 | num_possible_cpus(), kmem_flags_convert(KM_SLEEP)); | |
e5b9b344 BB |
635 | skc->skc_mag_size = spl_magazine_size(skc); |
636 | skc->skc_mag_refill = (skc->skc_mag_size + 1) / 2; | |
637 | ||
9b13f65d | 638 | for_each_possible_cpu(i) { |
e5b9b344 BB |
639 | skc->skc_mag[i] = spl_magazine_alloc(skc, i); |
640 | if (!skc->skc_mag[i]) { | |
641 | for (i--; i >= 0; i--) | |
642 | spl_magazine_free(skc->skc_mag[i]); | |
643 | ||
9b13f65d | 644 | kfree(skc->skc_mag); |
e5b9b344 BB |
645 | return (-ENOMEM); |
646 | } | |
647 | } | |
648 | ||
649 | return (0); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Destroy all pre-cpu magazines. | |
654 | */ | |
655 | static void | |
656 | spl_magazine_destroy(spl_kmem_cache_t *skc) | |
657 | { | |
658 | spl_kmem_magazine_t *skm; | |
7cf1fe63 | 659 | int i = 0; |
e5b9b344 | 660 | |
c6f2b942 | 661 | ASSERT((skc->skc_flags & KMC_SLAB) == 0); |
e5b9b344 | 662 | |
9b13f65d | 663 | for_each_possible_cpu(i) { |
e5b9b344 BB |
664 | skm = skc->skc_mag[i]; |
665 | spl_cache_flush(skc, skm, skm->skm_avail); | |
666 | spl_magazine_free(skm); | |
b34b9563 | 667 | } |
9b13f65d BB |
668 | |
669 | kfree(skc->skc_mag); | |
e5b9b344 BB |
670 | } |
671 | ||
672 | /* | |
673 | * Create a object cache based on the following arguments: | |
674 | * name cache name | |
675 | * size cache object size | |
676 | * align cache object alignment | |
677 | * ctor cache object constructor | |
678 | * dtor cache object destructor | |
679 | * reclaim cache object reclaim | |
680 | * priv cache private data for ctor/dtor/reclaim | |
681 | * vmp unused must be NULL | |
682 | * flags | |
492db125 | 683 | * KMC_KVMEM Force kvmem backed SPL cache |
e5b9b344 | 684 | * KMC_SLAB Force Linux slab backed cache |
c025008d | 685 | * KMC_NODEBUG Disable debugging (unsupported) |
e5b9b344 BB |
686 | */ |
687 | spl_kmem_cache_t * | |
688 | spl_kmem_cache_create(char *name, size_t size, size_t align, | |
026e529c | 689 | spl_kmem_ctor_t ctor, spl_kmem_dtor_t dtor, void *reclaim, |
b34b9563 | 690 | void *priv, void *vmp, int flags) |
e5b9b344 | 691 | { |
c3eabc75 | 692 | gfp_t lflags = kmem_flags_convert(KM_SLEEP); |
b34b9563 | 693 | spl_kmem_cache_t *skc; |
e5b9b344 BB |
694 | int rc; |
695 | ||
696 | /* | |
697 | * Unsupported flags | |
698 | */ | |
e5b9b344 | 699 | ASSERT(vmp == NULL); |
026e529c | 700 | ASSERT(reclaim == NULL); |
e5b9b344 BB |
701 | |
702 | might_sleep(); | |
703 | ||
c3eabc75 | 704 | skc = kzalloc(sizeof (*skc), lflags); |
e5b9b344 BB |
705 | if (skc == NULL) |
706 | return (NULL); | |
707 | ||
708 | skc->skc_magic = SKC_MAGIC; | |
709 | skc->skc_name_size = strlen(name) + 1; | |
c3eabc75 | 710 | skc->skc_name = (char *)kmalloc(skc->skc_name_size, lflags); |
e5b9b344 | 711 | if (skc->skc_name == NULL) { |
c3eabc75 | 712 | kfree(skc); |
e5b9b344 BB |
713 | return (NULL); |
714 | } | |
715 | strncpy(skc->skc_name, name, skc->skc_name_size); | |
716 | ||
717 | skc->skc_ctor = ctor; | |
718 | skc->skc_dtor = dtor; | |
e5b9b344 BB |
719 | skc->skc_private = priv; |
720 | skc->skc_vmp = vmp; | |
721 | skc->skc_linux_cache = NULL; | |
722 | skc->skc_flags = flags; | |
723 | skc->skc_obj_size = size; | |
724 | skc->skc_obj_align = SPL_KMEM_CACHE_ALIGN; | |
e5b9b344 BB |
725 | atomic_set(&skc->skc_ref, 0); |
726 | ||
727 | INIT_LIST_HEAD(&skc->skc_list); | |
728 | INIT_LIST_HEAD(&skc->skc_complete_list); | |
729 | INIT_LIST_HEAD(&skc->skc_partial_list); | |
730 | skc->skc_emergency_tree = RB_ROOT; | |
731 | spin_lock_init(&skc->skc_lock); | |
732 | init_waitqueue_head(&skc->skc_waitq); | |
733 | skc->skc_slab_fail = 0; | |
734 | skc->skc_slab_create = 0; | |
735 | skc->skc_slab_destroy = 0; | |
736 | skc->skc_slab_total = 0; | |
737 | skc->skc_slab_alloc = 0; | |
738 | skc->skc_slab_max = 0; | |
739 | skc->skc_obj_total = 0; | |
740 | skc->skc_obj_alloc = 0; | |
741 | skc->skc_obj_max = 0; | |
742 | skc->skc_obj_deadlock = 0; | |
743 | skc->skc_obj_emergency = 0; | |
744 | skc->skc_obj_emergency_max = 0; | |
745 | ||
ec1fea45 SD |
746 | rc = percpu_counter_init_common(&skc->skc_linux_alloc, 0, |
747 | GFP_KERNEL); | |
748 | if (rc != 0) { | |
749 | kfree(skc); | |
750 | return (NULL); | |
751 | } | |
752 | ||
e5b9b344 BB |
753 | /* |
754 | * Verify the requested alignment restriction is sane. | |
755 | */ | |
756 | if (align) { | |
757 | VERIFY(ISP2(align)); | |
758 | VERIFY3U(align, >=, SPL_KMEM_CACHE_ALIGN); | |
759 | VERIFY3U(align, <=, PAGE_SIZE); | |
760 | skc->skc_obj_align = align; | |
761 | } | |
762 | ||
763 | /* | |
764 | * When no specific type of slab is requested (kmem, vmem, or | |
765 | * linuxslab) then select a cache type based on the object size | |
766 | * and default tunables. | |
767 | */ | |
994de7e4 | 768 | if (!(skc->skc_flags & (KMC_SLAB | KMC_KVMEM))) { |
e5b9b344 | 769 | if (spl_kmem_cache_slab_limit && |
0409679d MA |
770 | size <= (size_t)spl_kmem_cache_slab_limit) { |
771 | /* | |
772 | * Objects smaller than spl_kmem_cache_slab_limit can | |
773 | * use the Linux slab for better space-efficiency. | |
774 | */ | |
e5b9b344 | 775 | skc->skc_flags |= KMC_SLAB; |
0409679d MA |
776 | } else { |
777 | /* | |
778 | * All other objects are considered large and are | |
6d948c35 | 779 | * placed on kvmem backed slabs. |
0409679d | 780 | */ |
6d948c35 | 781 | skc->skc_flags |= KMC_KVMEM; |
0409679d | 782 | } |
e5b9b344 BB |
783 | } |
784 | ||
785 | /* | |
786 | * Given the type of slab allocate the required resources. | |
787 | */ | |
994de7e4 | 788 | if (skc->skc_flags & KMC_KVMEM) { |
e5b9b344 BB |
789 | rc = spl_slab_size(skc, |
790 | &skc->skc_slab_objs, &skc->skc_slab_size); | |
791 | if (rc) | |
792 | goto out; | |
793 | ||
794 | rc = spl_magazine_create(skc); | |
795 | if (rc) | |
796 | goto out; | |
797 | } else { | |
2ebe3960 BB |
798 | unsigned long slabflags = 0; |
799 | ||
3018bffa BB |
800 | if (size > (SPL_MAX_KMEM_ORDER_NR_PAGES * PAGE_SIZE)) { |
801 | rc = EINVAL; | |
802 | goto out; | |
803 | } | |
804 | ||
2ebe3960 BB |
805 | #if defined(SLAB_USERCOPY) |
806 | /* | |
807 | * Required for PAX-enabled kernels if the slab is to be | |
9f5c1bc6 | 808 | * used for copying between user and kernel space. |
2ebe3960 BB |
809 | */ |
810 | slabflags |= SLAB_USERCOPY; | |
811 | #endif | |
812 | ||
0194e4a0 | 813 | #if defined(HAVE_KMEM_CACHE_CREATE_USERCOPY) |
0409679d MA |
814 | /* |
815 | * Newer grsec patchset uses kmem_cache_create_usercopy() | |
816 | * instead of SLAB_USERCOPY flag | |
817 | */ | |
818 | skc->skc_linux_cache = kmem_cache_create_usercopy( | |
819 | skc->skc_name, size, align, slabflags, 0, size, NULL); | |
0194e4a0 | 820 | #else |
0409679d MA |
821 | skc->skc_linux_cache = kmem_cache_create( |
822 | skc->skc_name, size, align, slabflags, NULL); | |
0194e4a0 | 823 | #endif |
e5b9b344 BB |
824 | if (skc->skc_linux_cache == NULL) { |
825 | rc = ENOMEM; | |
826 | goto out; | |
827 | } | |
e5b9b344 BB |
828 | } |
829 | ||
e5b9b344 BB |
830 | down_write(&spl_kmem_cache_sem); |
831 | list_add_tail(&skc->skc_list, &spl_kmem_cache_list); | |
832 | up_write(&spl_kmem_cache_sem); | |
833 | ||
834 | return (skc); | |
835 | out: | |
c3eabc75 | 836 | kfree(skc->skc_name); |
ec1fea45 | 837 | percpu_counter_destroy(&skc->skc_linux_alloc); |
c3eabc75 | 838 | kfree(skc); |
e5b9b344 BB |
839 | return (NULL); |
840 | } | |
841 | EXPORT_SYMBOL(spl_kmem_cache_create); | |
842 | ||
843 | /* | |
b34b9563 | 844 | * Register a move callback for cache defragmentation. |
e5b9b344 BB |
845 | * XXX: Unimplemented but harmless to stub out for now. |
846 | */ | |
847 | void | |
848 | spl_kmem_cache_set_move(spl_kmem_cache_t *skc, | |
849 | kmem_cbrc_t (move)(void *, void *, size_t, void *)) | |
850 | { | |
b34b9563 | 851 | ASSERT(move != NULL); |
e5b9b344 BB |
852 | } |
853 | EXPORT_SYMBOL(spl_kmem_cache_set_move); | |
854 | ||
855 | /* | |
856 | * Destroy a cache and all objects associated with the cache. | |
857 | */ | |
858 | void | |
859 | spl_kmem_cache_destroy(spl_kmem_cache_t *skc) | |
860 | { | |
861 | DECLARE_WAIT_QUEUE_HEAD(wq); | |
862 | taskqid_t id; | |
863 | ||
864 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
994de7e4 | 865 | ASSERT(skc->skc_flags & (KMC_KVMEM | KMC_SLAB)); |
e5b9b344 BB |
866 | |
867 | down_write(&spl_kmem_cache_sem); | |
868 | list_del_init(&skc->skc_list); | |
869 | up_write(&spl_kmem_cache_sem); | |
870 | ||
871 | /* Cancel any and wait for any pending delayed tasks */ | |
872 | VERIFY(!test_and_set_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
873 | ||
874 | spin_lock(&skc->skc_lock); | |
875 | id = skc->skc_taskqid; | |
876 | spin_unlock(&skc->skc_lock); | |
877 | ||
878 | taskq_cancel_id(spl_kmem_cache_taskq, id); | |
879 | ||
b34b9563 BB |
880 | /* |
881 | * Wait until all current callers complete, this is mainly | |
e5b9b344 | 882 | * to catch the case where a low memory situation triggers a |
b34b9563 BB |
883 | * cache reaping action which races with this destroy. |
884 | */ | |
e5b9b344 BB |
885 | wait_event(wq, atomic_read(&skc->skc_ref) == 0); |
886 | ||
994de7e4 | 887 | if (skc->skc_flags & KMC_KVMEM) { |
e5b9b344 | 888 | spl_magazine_destroy(skc); |
1a204968 | 889 | spl_slab_reclaim(skc); |
e5b9b344 BB |
890 | } else { |
891 | ASSERT(skc->skc_flags & KMC_SLAB); | |
892 | kmem_cache_destroy(skc->skc_linux_cache); | |
893 | } | |
894 | ||
895 | spin_lock(&skc->skc_lock); | |
896 | ||
b34b9563 BB |
897 | /* |
898 | * Validate there are no objects in use and free all the | |
899 | * spl_kmem_slab_t, spl_kmem_obj_t, and object buffers. | |
900 | */ | |
e5b9b344 BB |
901 | ASSERT3U(skc->skc_slab_alloc, ==, 0); |
902 | ASSERT3U(skc->skc_obj_alloc, ==, 0); | |
903 | ASSERT3U(skc->skc_slab_total, ==, 0); | |
904 | ASSERT3U(skc->skc_obj_total, ==, 0); | |
905 | ASSERT3U(skc->skc_obj_emergency, ==, 0); | |
906 | ASSERT(list_empty(&skc->skc_complete_list)); | |
907 | ||
ec1fea45 SD |
908 | ASSERT3U(percpu_counter_sum(&skc->skc_linux_alloc), ==, 0); |
909 | percpu_counter_destroy(&skc->skc_linux_alloc); | |
910 | ||
e5b9b344 BB |
911 | spin_unlock(&skc->skc_lock); |
912 | ||
c3eabc75 BB |
913 | kfree(skc->skc_name); |
914 | kfree(skc); | |
e5b9b344 BB |
915 | } |
916 | EXPORT_SYMBOL(spl_kmem_cache_destroy); | |
917 | ||
918 | /* | |
919 | * Allocate an object from a slab attached to the cache. This is used to | |
920 | * repopulate the per-cpu magazine caches in batches when they run low. | |
921 | */ | |
922 | static void * | |
923 | spl_cache_obj(spl_kmem_cache_t *skc, spl_kmem_slab_t *sks) | |
924 | { | |
925 | spl_kmem_obj_t *sko; | |
926 | ||
927 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
928 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
e5b9b344 BB |
929 | |
930 | sko = list_entry(sks->sks_free_list.next, spl_kmem_obj_t, sko_list); | |
931 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
932 | ASSERT(sko->sko_addr != NULL); | |
933 | ||
934 | /* Remove from sks_free_list */ | |
935 | list_del_init(&sko->sko_list); | |
936 | ||
937 | sks->sks_age = jiffies; | |
938 | sks->sks_ref++; | |
939 | skc->skc_obj_alloc++; | |
940 | ||
941 | /* Track max obj usage statistics */ | |
942 | if (skc->skc_obj_alloc > skc->skc_obj_max) | |
943 | skc->skc_obj_max = skc->skc_obj_alloc; | |
944 | ||
945 | /* Track max slab usage statistics */ | |
946 | if (sks->sks_ref == 1) { | |
947 | skc->skc_slab_alloc++; | |
948 | ||
949 | if (skc->skc_slab_alloc > skc->skc_slab_max) | |
950 | skc->skc_slab_max = skc->skc_slab_alloc; | |
951 | } | |
952 | ||
b34b9563 | 953 | return (sko->sko_addr); |
e5b9b344 BB |
954 | } |
955 | ||
956 | /* | |
957 | * Generic slab allocation function to run by the global work queues. | |
958 | * It is responsible for allocating a new slab, linking it in to the list | |
959 | * of partial slabs, and then waking any waiters. | |
960 | */ | |
16fc1ec3 JX |
961 | static int |
962 | __spl_cache_grow(spl_kmem_cache_t *skc, int flags) | |
e5b9b344 | 963 | { |
e5b9b344 BB |
964 | spl_kmem_slab_t *sks; |
965 | ||
c2fa0945 | 966 | fstrans_cookie_t cookie = spl_fstrans_mark(); |
16fc1ec3 | 967 | sks = spl_slab_alloc(skc, flags); |
c2fa0945 | 968 | spl_fstrans_unmark(cookie); |
b4ad50ac | 969 | |
e5b9b344 BB |
970 | spin_lock(&skc->skc_lock); |
971 | if (sks) { | |
972 | skc->skc_slab_total++; | |
973 | skc->skc_obj_total += sks->sks_objs; | |
974 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
16fc1ec3 JX |
975 | |
976 | smp_mb__before_atomic(); | |
977 | clear_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); | |
978 | smp_mb__after_atomic(); | |
e5b9b344 | 979 | } |
16fc1ec3 JX |
980 | spin_unlock(&skc->skc_lock); |
981 | ||
982 | return (sks == NULL ? -ENOMEM : 0); | |
983 | } | |
984 | ||
985 | static void | |
986 | spl_cache_grow_work(void *data) | |
987 | { | |
988 | spl_kmem_alloc_t *ska = (spl_kmem_alloc_t *)data; | |
989 | spl_kmem_cache_t *skc = ska->ska_cache; | |
990 | ||
2adc6b35 | 991 | int error = __spl_cache_grow(skc, ska->ska_flags); |
e5b9b344 BB |
992 | |
993 | atomic_dec(&skc->skc_ref); | |
a988a35a | 994 | smp_mb__before_atomic(); |
e5b9b344 | 995 | clear_bit(KMC_BIT_GROWING, &skc->skc_flags); |
a988a35a | 996 | smp_mb__after_atomic(); |
2adc6b35 MA |
997 | if (error == 0) |
998 | wake_up_all(&skc->skc_waitq); | |
e5b9b344 BB |
999 | |
1000 | kfree(ska); | |
1001 | } | |
1002 | ||
1003 | /* | |
1004 | * Returns non-zero when a new slab should be available. | |
1005 | */ | |
1006 | static int | |
1007 | spl_cache_grow_wait(spl_kmem_cache_t *skc) | |
1008 | { | |
b34b9563 | 1009 | return (!test_bit(KMC_BIT_GROWING, &skc->skc_flags)); |
e5b9b344 BB |
1010 | } |
1011 | ||
1012 | /* | |
1013 | * No available objects on any slabs, create a new slab. Note that this | |
1014 | * functionality is disabled for KMC_SLAB caches which are backed by the | |
1015 | * Linux slab. | |
1016 | */ | |
1017 | static int | |
1018 | spl_cache_grow(spl_kmem_cache_t *skc, int flags, void **obj) | |
1019 | { | |
c3eabc75 | 1020 | int remaining, rc = 0; |
e5b9b344 | 1021 | |
c3eabc75 | 1022 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
e5b9b344 BB |
1023 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1024 | ASSERT((skc->skc_flags & KMC_SLAB) == 0); | |
1025 | might_sleep(); | |
1026 | *obj = NULL; | |
1027 | ||
1028 | /* | |
1029 | * Before allocating a new slab wait for any reaping to complete and | |
1030 | * then return so the local magazine can be rechecked for new objects. | |
1031 | */ | |
1032 | if (test_bit(KMC_BIT_REAPING, &skc->skc_flags)) { | |
1033 | rc = spl_wait_on_bit(&skc->skc_flags, KMC_BIT_REAPING, | |
1034 | TASK_UNINTERRUPTIBLE); | |
1035 | return (rc ? rc : -EAGAIN); | |
1036 | } | |
1037 | ||
16fc1ec3 | 1038 | /* |
994de7e4 MA |
1039 | * Note: It would be nice to reduce the overhead of context switch |
1040 | * and improve NUMA locality, by trying to allocate a new slab in the | |
1041 | * current process context with KM_NOSLEEP flag. | |
16fc1ec3 | 1042 | * |
994de7e4 | 1043 | * However, this can't be applied to vmem/kvmem due to a bug that |
080102a1 | 1044 | * spl_vmalloc() doesn't honor gfp flags in page table allocation. |
16fc1ec3 | 1045 | */ |
16fc1ec3 | 1046 | |
e5b9b344 BB |
1047 | /* |
1048 | * This is handled by dispatching a work request to the global work | |
1049 | * queue. This allows us to asynchronously allocate a new slab while | |
1050 | * retaining the ability to safely fall back to a smaller synchronous | |
1051 | * allocations to ensure forward progress is always maintained. | |
1052 | */ | |
1053 | if (test_and_set_bit(KMC_BIT_GROWING, &skc->skc_flags) == 0) { | |
1054 | spl_kmem_alloc_t *ska; | |
1055 | ||
c3eabc75 | 1056 | ska = kmalloc(sizeof (*ska), kmem_flags_convert(flags)); |
e5b9b344 | 1057 | if (ska == NULL) { |
a988a35a RY |
1058 | clear_bit_unlock(KMC_BIT_GROWING, &skc->skc_flags); |
1059 | smp_mb__after_atomic(); | |
e5b9b344 BB |
1060 | wake_up_all(&skc->skc_waitq); |
1061 | return (-ENOMEM); | |
1062 | } | |
1063 | ||
1064 | atomic_inc(&skc->skc_ref); | |
1065 | ska->ska_cache = skc; | |
c3eabc75 | 1066 | ska->ska_flags = flags; |
e5b9b344 BB |
1067 | taskq_init_ent(&ska->ska_tqe); |
1068 | taskq_dispatch_ent(spl_kmem_cache_taskq, | |
1069 | spl_cache_grow_work, ska, 0, &ska->ska_tqe); | |
1070 | } | |
1071 | ||
1072 | /* | |
1073 | * The goal here is to only detect the rare case where a virtual slab | |
1074 | * allocation has deadlocked. We must be careful to minimize the use | |
1075 | * of emergency objects which are more expensive to track. Therefore, | |
1076 | * we set a very long timeout for the asynchronous allocation and if | |
1077 | * the timeout is reached the cache is flagged as deadlocked. From | |
1078 | * this point only new emergency objects will be allocated until the | |
1079 | * asynchronous allocation completes and clears the deadlocked flag. | |
1080 | */ | |
1081 | if (test_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags)) { | |
1082 | rc = spl_emergency_alloc(skc, flags, obj); | |
1083 | } else { | |
1084 | remaining = wait_event_timeout(skc->skc_waitq, | |
e50e6cc9 | 1085 | spl_cache_grow_wait(skc), HZ / 10); |
e5b9b344 | 1086 | |
436ad60f | 1087 | if (!remaining) { |
e5b9b344 BB |
1088 | spin_lock(&skc->skc_lock); |
1089 | if (test_bit(KMC_BIT_GROWING, &skc->skc_flags)) { | |
1090 | set_bit(KMC_BIT_DEADLOCKED, &skc->skc_flags); | |
1091 | skc->skc_obj_deadlock++; | |
1092 | } | |
1093 | spin_unlock(&skc->skc_lock); | |
1094 | } | |
1095 | ||
1096 | rc = -ENOMEM; | |
1097 | } | |
1098 | ||
1099 | return (rc); | |
1100 | } | |
1101 | ||
1102 | /* | |
1103 | * Refill a per-cpu magazine with objects from the slabs for this cache. | |
1104 | * Ideally the magazine can be repopulated using existing objects which have | |
1105 | * been released, however if we are unable to locate enough free objects new | |
1106 | * slabs of objects will be created. On success NULL is returned, otherwise | |
1107 | * the address of a single emergency object is returned for use by the caller. | |
1108 | */ | |
1109 | static void * | |
1110 | spl_cache_refill(spl_kmem_cache_t *skc, spl_kmem_magazine_t *skm, int flags) | |
1111 | { | |
1112 | spl_kmem_slab_t *sks; | |
1113 | int count = 0, rc, refill; | |
1114 | void *obj = NULL; | |
1115 | ||
1116 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1117 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1118 | ||
1119 | refill = MIN(skm->skm_refill, skm->skm_size - skm->skm_avail); | |
1120 | spin_lock(&skc->skc_lock); | |
1121 | ||
1122 | while (refill > 0) { | |
1123 | /* No slabs available we may need to grow the cache */ | |
1124 | if (list_empty(&skc->skc_partial_list)) { | |
1125 | spin_unlock(&skc->skc_lock); | |
1126 | ||
1127 | local_irq_enable(); | |
1128 | rc = spl_cache_grow(skc, flags, &obj); | |
1129 | local_irq_disable(); | |
1130 | ||
1131 | /* Emergency object for immediate use by caller */ | |
1132 | if (rc == 0 && obj != NULL) | |
1133 | return (obj); | |
1134 | ||
1135 | if (rc) | |
1136 | goto out; | |
1137 | ||
1138 | /* Rescheduled to different CPU skm is not local */ | |
1139 | if (skm != skc->skc_mag[smp_processor_id()]) | |
1140 | goto out; | |
1141 | ||
b34b9563 BB |
1142 | /* |
1143 | * Potentially rescheduled to the same CPU but | |
e5b9b344 | 1144 | * allocations may have occurred from this CPU while |
b34b9563 BB |
1145 | * we were sleeping so recalculate max refill. |
1146 | */ | |
e5b9b344 BB |
1147 | refill = MIN(refill, skm->skm_size - skm->skm_avail); |
1148 | ||
1149 | spin_lock(&skc->skc_lock); | |
1150 | continue; | |
1151 | } | |
1152 | ||
1153 | /* Grab the next available slab */ | |
1154 | sks = list_entry((&skc->skc_partial_list)->next, | |
b34b9563 | 1155 | spl_kmem_slab_t, sks_list); |
e5b9b344 BB |
1156 | ASSERT(sks->sks_magic == SKS_MAGIC); |
1157 | ASSERT(sks->sks_ref < sks->sks_objs); | |
1158 | ASSERT(!list_empty(&sks->sks_free_list)); | |
1159 | ||
b34b9563 BB |
1160 | /* |
1161 | * Consume as many objects as needed to refill the requested | |
1162 | * cache. We must also be careful not to overfill it. | |
1163 | */ | |
1164 | while (sks->sks_ref < sks->sks_objs && refill-- > 0 && | |
1165 | ++count) { | |
e5b9b344 BB |
1166 | ASSERT(skm->skm_avail < skm->skm_size); |
1167 | ASSERT(count < skm->skm_size); | |
b34b9563 BB |
1168 | skm->skm_objs[skm->skm_avail++] = |
1169 | spl_cache_obj(skc, sks); | |
e5b9b344 BB |
1170 | } |
1171 | ||
1172 | /* Move slab to skc_complete_list when full */ | |
1173 | if (sks->sks_ref == sks->sks_objs) { | |
1174 | list_del(&sks->sks_list); | |
1175 | list_add(&sks->sks_list, &skc->skc_complete_list); | |
1176 | } | |
1177 | } | |
1178 | ||
1179 | spin_unlock(&skc->skc_lock); | |
1180 | out: | |
1181 | return (NULL); | |
1182 | } | |
1183 | ||
1184 | /* | |
1185 | * Release an object back to the slab from which it came. | |
1186 | */ | |
1187 | static void | |
1188 | spl_cache_shrink(spl_kmem_cache_t *skc, void *obj) | |
1189 | { | |
1190 | spl_kmem_slab_t *sks = NULL; | |
1191 | spl_kmem_obj_t *sko = NULL; | |
1192 | ||
1193 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
e5b9b344 BB |
1194 | |
1195 | sko = spl_sko_from_obj(skc, obj); | |
1196 | ASSERT(sko->sko_magic == SKO_MAGIC); | |
1197 | sks = sko->sko_slab; | |
1198 | ASSERT(sks->sks_magic == SKS_MAGIC); | |
1199 | ASSERT(sks->sks_cache == skc); | |
1200 | list_add(&sko->sko_list, &sks->sks_free_list); | |
1201 | ||
1202 | sks->sks_age = jiffies; | |
1203 | sks->sks_ref--; | |
1204 | skc->skc_obj_alloc--; | |
1205 | ||
b34b9563 BB |
1206 | /* |
1207 | * Move slab to skc_partial_list when no longer full. Slabs | |
e5b9b344 | 1208 | * are added to the head to keep the partial list is quasi-full |
b34b9563 BB |
1209 | * sorted order. Fuller at the head, emptier at the tail. |
1210 | */ | |
e5b9b344 BB |
1211 | if (sks->sks_ref == (sks->sks_objs - 1)) { |
1212 | list_del(&sks->sks_list); | |
1213 | list_add(&sks->sks_list, &skc->skc_partial_list); | |
1214 | } | |
1215 | ||
b34b9563 BB |
1216 | /* |
1217 | * Move empty slabs to the end of the partial list so | |
1218 | * they can be easily found and freed during reclamation. | |
1219 | */ | |
e5b9b344 BB |
1220 | if (sks->sks_ref == 0) { |
1221 | list_del(&sks->sks_list); | |
1222 | list_add_tail(&sks->sks_list, &skc->skc_partial_list); | |
1223 | skc->skc_slab_alloc--; | |
1224 | } | |
1225 | } | |
1226 | ||
1227 | /* | |
1228 | * Allocate an object from the per-cpu magazine, or if the magazine | |
1229 | * is empty directly allocate from a slab and repopulate the magazine. | |
1230 | */ | |
1231 | void * | |
1232 | spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags) | |
1233 | { | |
1234 | spl_kmem_magazine_t *skm; | |
1235 | void *obj = NULL; | |
1236 | ||
c3eabc75 | 1237 | ASSERT0(flags & ~KM_PUBLIC_MASK); |
e5b9b344 BB |
1238 | ASSERT(skc->skc_magic == SKC_MAGIC); |
1239 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
e5b9b344 | 1240 | |
e5b9b344 BB |
1241 | /* |
1242 | * Allocate directly from a Linux slab. All optimizations are left | |
1243 | * to the underlying cache we only need to guarantee that KM_SLEEP | |
1244 | * callers will never fail. | |
1245 | */ | |
1246 | if (skc->skc_flags & KMC_SLAB) { | |
1247 | struct kmem_cache *slc = skc->skc_linux_cache; | |
e5b9b344 | 1248 | do { |
c3eabc75 | 1249 | obj = kmem_cache_alloc(slc, kmem_flags_convert(flags)); |
e5b9b344 BB |
1250 | } while ((obj == NULL) && !(flags & KM_NOSLEEP)); |
1251 | ||
851eda35 SD |
1252 | if (obj != NULL) { |
1253 | /* | |
1254 | * Even though we leave everything up to the | |
1255 | * underlying cache we still keep track of | |
1256 | * how many objects we've allocated in it for | |
1257 | * better debuggability. | |
1258 | */ | |
ec1fea45 | 1259 | percpu_counter_inc(&skc->skc_linux_alloc); |
851eda35 | 1260 | } |
e5b9b344 BB |
1261 | goto ret; |
1262 | } | |
1263 | ||
1264 | local_irq_disable(); | |
1265 | ||
1266 | restart: | |
b34b9563 BB |
1267 | /* |
1268 | * Safe to update per-cpu structure without lock, but | |
e5b9b344 BB |
1269 | * in the restart case we must be careful to reacquire |
1270 | * the local magazine since this may have changed | |
b34b9563 BB |
1271 | * when we need to grow the cache. |
1272 | */ | |
e5b9b344 BB |
1273 | skm = skc->skc_mag[smp_processor_id()]; |
1274 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1275 | ||
1276 | if (likely(skm->skm_avail)) { | |
1277 | /* Object available in CPU cache, use it */ | |
1278 | obj = skm->skm_objs[--skm->skm_avail]; | |
e5b9b344 BB |
1279 | } else { |
1280 | obj = spl_cache_refill(skc, skm, flags); | |
3018bffa | 1281 | if ((obj == NULL) && !(flags & KM_NOSLEEP)) |
e5b9b344 | 1282 | goto restart; |
3018bffa BB |
1283 | |
1284 | local_irq_enable(); | |
1285 | goto ret; | |
e5b9b344 BB |
1286 | } |
1287 | ||
1288 | local_irq_enable(); | |
1289 | ASSERT(obj); | |
1290 | ASSERT(IS_P2ALIGNED(obj, skc->skc_obj_align)); | |
1291 | ||
1292 | ret: | |
1293 | /* Pre-emptively migrate object to CPU L1 cache */ | |
1294 | if (obj) { | |
1295 | if (obj && skc->skc_ctor) | |
1296 | skc->skc_ctor(obj, skc->skc_private, flags); | |
1297 | else | |
1298 | prefetchw(obj); | |
1299 | } | |
1300 | ||
e5b9b344 BB |
1301 | return (obj); |
1302 | } | |
e5b9b344 BB |
1303 | EXPORT_SYMBOL(spl_kmem_cache_alloc); |
1304 | ||
1305 | /* | |
1306 | * Free an object back to the local per-cpu magazine, there is no | |
1307 | * guarantee that this is the same magazine the object was originally | |
1308 | * allocated from. We may need to flush entire from the magazine | |
1309 | * back to the slabs to make space. | |
1310 | */ | |
1311 | void | |
1312 | spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj) | |
1313 | { | |
1314 | spl_kmem_magazine_t *skm; | |
1315 | unsigned long flags; | |
1a204968 | 1316 | int do_reclaim = 0; |
436ad60f | 1317 | int do_emergency = 0; |
e5b9b344 BB |
1318 | |
1319 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1320 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
e5b9b344 BB |
1321 | |
1322 | /* | |
1323 | * Run the destructor | |
1324 | */ | |
1325 | if (skc->skc_dtor) | |
1326 | skc->skc_dtor(obj, skc->skc_private); | |
1327 | ||
1328 | /* | |
1329 | * Free the object from the Linux underlying Linux slab. | |
1330 | */ | |
1331 | if (skc->skc_flags & KMC_SLAB) { | |
1332 | kmem_cache_free(skc->skc_linux_cache, obj); | |
ec1fea45 | 1333 | percpu_counter_dec(&skc->skc_linux_alloc); |
4699d76d | 1334 | return; |
e5b9b344 BB |
1335 | } |
1336 | ||
1337 | /* | |
436ad60f BB |
1338 | * While a cache has outstanding emergency objects all freed objects |
1339 | * must be checked. However, since emergency objects will never use | |
1340 | * a virtual address these objects can be safely excluded as an | |
1341 | * optimization. | |
e5b9b344 | 1342 | */ |
436ad60f BB |
1343 | if (!is_vmalloc_addr(obj)) { |
1344 | spin_lock(&skc->skc_lock); | |
1345 | do_emergency = (skc->skc_obj_emergency > 0); | |
1346 | spin_unlock(&skc->skc_lock); | |
1347 | ||
1348 | if (do_emergency && (spl_emergency_free(skc, obj) == 0)) | |
4699d76d | 1349 | return; |
e5b9b344 BB |
1350 | } |
1351 | ||
1352 | local_irq_save(flags); | |
1353 | ||
b34b9563 BB |
1354 | /* |
1355 | * Safe to update per-cpu structure without lock, but | |
e5b9b344 BB |
1356 | * no remote memory allocation tracking is being performed |
1357 | * it is entirely possible to allocate an object from one | |
b34b9563 BB |
1358 | * CPU cache and return it to another. |
1359 | */ | |
e5b9b344 BB |
1360 | skm = skc->skc_mag[smp_processor_id()]; |
1361 | ASSERT(skm->skm_magic == SKM_MAGIC); | |
1362 | ||
1a204968 BB |
1363 | /* |
1364 | * Per-CPU cache full, flush it to make space for this object, | |
1365 | * this may result in an empty slab which can be reclaimed once | |
1366 | * interrupts are re-enabled. | |
1367 | */ | |
1368 | if (unlikely(skm->skm_avail >= skm->skm_size)) { | |
e5b9b344 | 1369 | spl_cache_flush(skc, skm, skm->skm_refill); |
1a204968 BB |
1370 | do_reclaim = 1; |
1371 | } | |
e5b9b344 BB |
1372 | |
1373 | /* Available space in cache, use it */ | |
1374 | skm->skm_objs[skm->skm_avail++] = obj; | |
1375 | ||
1376 | local_irq_restore(flags); | |
1a204968 BB |
1377 | |
1378 | if (do_reclaim) | |
1379 | spl_slab_reclaim(skc); | |
e5b9b344 BB |
1380 | } |
1381 | EXPORT_SYMBOL(spl_kmem_cache_free); | |
1382 | ||
1383 | /* | |
026e529c MA |
1384 | * Depending on how many and which objects are released it may simply |
1385 | * repopulate the local magazine which will then need to age-out. Objects | |
1386 | * which cannot fit in the magazine will be released back to their slabs | |
1387 | * which will also need to age out before being released. This is all just | |
1388 | * best effort and we do not want to thrash creating and destroying slabs. | |
e5b9b344 BB |
1389 | */ |
1390 | void | |
3c42c9ed | 1391 | spl_kmem_cache_reap_now(spl_kmem_cache_t *skc) |
e5b9b344 BB |
1392 | { |
1393 | ASSERT(skc->skc_magic == SKC_MAGIC); | |
1394 | ASSERT(!test_bit(KMC_BIT_DESTROY, &skc->skc_flags)); | |
1395 | ||
026e529c MA |
1396 | if (skc->skc_flags & KMC_SLAB) |
1397 | return; | |
e5b9b344 | 1398 | |
026e529c | 1399 | atomic_inc(&skc->skc_ref); |
e5b9b344 BB |
1400 | |
1401 | /* | |
1402 | * Prevent concurrent cache reaping when contended. | |
1403 | */ | |
1404 | if (test_and_set_bit(KMC_BIT_REAPING, &skc->skc_flags)) | |
1405 | goto out; | |
1406 | ||
1a204968 | 1407 | /* Reclaim from the magazine and free all now empty slabs. */ |
4fbdb10c MA |
1408 | unsigned long irq_flags; |
1409 | local_irq_save(irq_flags); | |
1410 | spl_kmem_magazine_t *skm = skc->skc_mag[smp_processor_id()]; | |
1411 | spl_cache_flush(skc, skm, skm->skm_avail); | |
1412 | local_irq_restore(irq_flags); | |
e5b9b344 | 1413 | |
1a204968 | 1414 | spl_slab_reclaim(skc); |
a988a35a RY |
1415 | clear_bit_unlock(KMC_BIT_REAPING, &skc->skc_flags); |
1416 | smp_mb__after_atomic(); | |
e5b9b344 BB |
1417 | wake_up_bit(&skc->skc_flags, KMC_BIT_REAPING); |
1418 | out: | |
1419 | atomic_dec(&skc->skc_ref); | |
1420 | } | |
1421 | EXPORT_SYMBOL(spl_kmem_cache_reap_now); | |
1422 | ||
3ec34e55 BL |
1423 | /* |
1424 | * This is stubbed out for code consistency with other platforms. There | |
1425 | * is existing logic to prevent concurrent reaping so while this is ugly | |
1426 | * it should do no harm. | |
1427 | */ | |
1428 | int | |
1429 | spl_kmem_cache_reap_active() | |
1430 | { | |
1431 | return (0); | |
1432 | } | |
1433 | EXPORT_SYMBOL(spl_kmem_cache_reap_active); | |
1434 | ||
e5b9b344 BB |
1435 | /* |
1436 | * Reap all free slabs from all registered caches. | |
1437 | */ | |
1438 | void | |
1439 | spl_kmem_reap(void) | |
1440 | { | |
026e529c | 1441 | spl_kmem_cache_t *skc = NULL; |
e5b9b344 | 1442 | |
026e529c MA |
1443 | down_read(&spl_kmem_cache_sem); |
1444 | list_for_each_entry(skc, &spl_kmem_cache_list, skc_list) { | |
1445 | spl_kmem_cache_reap_now(skc); | |
1446 | } | |
1447 | up_read(&spl_kmem_cache_sem); | |
e5b9b344 BB |
1448 | } |
1449 | EXPORT_SYMBOL(spl_kmem_reap); | |
1450 | ||
1451 | int | |
1452 | spl_kmem_cache_init(void) | |
1453 | { | |
1454 | init_rwsem(&spl_kmem_cache_sem); | |
1455 | INIT_LIST_HEAD(&spl_kmem_cache_list); | |
1456 | spl_kmem_cache_taskq = taskq_create("spl_kmem_cache", | |
9dc5ffbe | 1457 | spl_kmem_cache_kmem_threads, maxclsyspri, |
3c82160f BB |
1458 | spl_kmem_cache_kmem_threads * 8, INT_MAX, |
1459 | TASKQ_PREPOPULATE | TASKQ_DYNAMIC); | |
e5b9b344 BB |
1460 | |
1461 | return (0); | |
1462 | } | |
1463 | ||
1464 | void | |
1465 | spl_kmem_cache_fini(void) | |
1466 | { | |
e5b9b344 BB |
1467 | taskq_destroy(spl_kmem_cache_taskq); |
1468 | } |