]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/mm/mempool.c | |
3 | * | |
4 | * memory buffer pool support. Such pools are mostly used | |
5 | * for guaranteed, deadlock-free memory allocations during | |
6 | * extreme VM load. | |
7 | * | |
8 | * started by Ingo Molnar, Copyright (C) 2001 | |
bdfedb76 | 9 | * debugging by David Rientjes, Copyright (C) 2015 |
1da177e4 LT |
10 | */ |
11 | ||
12 | #include <linux/mm.h> | |
13 | #include <linux/slab.h> | |
bdfedb76 | 14 | #include <linux/highmem.h> |
17411962 | 15 | #include <linux/kmemleak.h> |
b95f1b31 | 16 | #include <linux/export.h> |
1da177e4 LT |
17 | #include <linux/mempool.h> |
18 | #include <linux/blkdev.h> | |
19 | #include <linux/writeback.h> | |
e244c9e6 | 20 | #include "slab.h" |
1da177e4 | 21 | |
bdfedb76 DR |
22 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
23 | static void poison_error(mempool_t *pool, void *element, size_t size, | |
24 | size_t byte) | |
25 | { | |
26 | const int nr = pool->curr_nr; | |
27 | const int start = max_t(int, byte - (BITS_PER_LONG / 8), 0); | |
28 | const int end = min_t(int, byte + (BITS_PER_LONG / 8), size); | |
29 | int i; | |
30 | ||
31 | pr_err("BUG: mempool element poison mismatch\n"); | |
32 | pr_err("Mempool %p size %zu\n", pool, size); | |
33 | pr_err(" nr=%d @ %p: %s0x", nr, element, start > 0 ? "... " : ""); | |
34 | for (i = start; i < end; i++) | |
35 | pr_cont("%x ", *(u8 *)(element + i)); | |
36 | pr_cont("%s\n", end < size ? "..." : ""); | |
37 | dump_stack(); | |
38 | } | |
39 | ||
40 | static void __check_element(mempool_t *pool, void *element, size_t size) | |
41 | { | |
42 | u8 *obj = element; | |
43 | size_t i; | |
44 | ||
45 | for (i = 0; i < size; i++) { | |
46 | u8 exp = (i < size - 1) ? POISON_FREE : POISON_END; | |
47 | ||
48 | if (obj[i] != exp) { | |
49 | poison_error(pool, element, size, i); | |
50 | return; | |
51 | } | |
52 | } | |
53 | memset(obj, POISON_INUSE, size); | |
54 | } | |
55 | ||
56 | static void check_element(mempool_t *pool, void *element) | |
57 | { | |
58 | /* Mempools backed by slab allocator */ | |
59 | if (pool->free == mempool_free_slab || pool->free == mempool_kfree) | |
60 | __check_element(pool, element, ksize(element)); | |
61 | ||
62 | /* Mempools backed by page allocator */ | |
63 | if (pool->free == mempool_free_pages) { | |
64 | int order = (int)(long)pool->pool_data; | |
65 | void *addr = kmap_atomic((struct page *)element); | |
66 | ||
67 | __check_element(pool, addr, 1UL << (PAGE_SHIFT + order)); | |
68 | kunmap_atomic(addr); | |
69 | } | |
70 | } | |
71 | ||
72 | static void __poison_element(void *element, size_t size) | |
73 | { | |
74 | u8 *obj = element; | |
75 | ||
76 | memset(obj, POISON_FREE, size - 1); | |
77 | obj[size - 1] = POISON_END; | |
78 | } | |
79 | ||
80 | static void poison_element(mempool_t *pool, void *element) | |
81 | { | |
82 | /* Mempools backed by slab allocator */ | |
83 | if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc) | |
84 | __poison_element(element, ksize(element)); | |
85 | ||
86 | /* Mempools backed by page allocator */ | |
87 | if (pool->alloc == mempool_alloc_pages) { | |
88 | int order = (int)(long)pool->pool_data; | |
89 | void *addr = kmap_atomic((struct page *)element); | |
90 | ||
91 | __poison_element(addr, 1UL << (PAGE_SHIFT + order)); | |
92 | kunmap_atomic(addr); | |
93 | } | |
94 | } | |
95 | #else /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | |
96 | static inline void check_element(mempool_t *pool, void *element) | |
97 | { | |
98 | } | |
99 | static inline void poison_element(mempool_t *pool, void *element) | |
100 | { | |
101 | } | |
102 | #endif /* CONFIG_DEBUG_SLAB || CONFIG_SLUB_DEBUG_ON */ | |
103 | ||
1da177e4 LT |
104 | static void add_element(mempool_t *pool, void *element) |
105 | { | |
106 | BUG_ON(pool->curr_nr >= pool->min_nr); | |
bdfedb76 | 107 | poison_element(pool, element); |
1da177e4 LT |
108 | pool->elements[pool->curr_nr++] = element; |
109 | } | |
110 | ||
111 | static void *remove_element(mempool_t *pool) | |
112 | { | |
bdfedb76 DR |
113 | void *element = pool->elements[--pool->curr_nr]; |
114 | ||
115 | BUG_ON(pool->curr_nr < 0); | |
116 | check_element(pool, element); | |
117 | return element; | |
1da177e4 LT |
118 | } |
119 | ||
0565d317 TH |
120 | /** |
121 | * mempool_destroy - deallocate a memory pool | |
122 | * @pool: pointer to the memory pool which was allocated via | |
123 | * mempool_create(). | |
124 | * | |
125 | * Free all reserved elements in @pool and @pool itself. This function | |
126 | * only sleeps if the free_fn() function sleeps. | |
127 | */ | |
128 | void mempool_destroy(mempool_t *pool) | |
1da177e4 LT |
129 | { |
130 | while (pool->curr_nr) { | |
131 | void *element = remove_element(pool); | |
132 | pool->free(element, pool->pool_data); | |
133 | } | |
134 | kfree(pool->elements); | |
135 | kfree(pool); | |
136 | } | |
0565d317 | 137 | EXPORT_SYMBOL(mempool_destroy); |
1da177e4 LT |
138 | |
139 | /** | |
140 | * mempool_create - create a memory pool | |
141 | * @min_nr: the minimum number of elements guaranteed to be | |
142 | * allocated for this pool. | |
143 | * @alloc_fn: user-defined element-allocation function. | |
144 | * @free_fn: user-defined element-freeing function. | |
145 | * @pool_data: optional private data available to the user-defined functions. | |
146 | * | |
147 | * this function creates and allocates a guaranteed size, preallocated | |
72fd4a35 | 148 | * memory pool. The pool can be used from the mempool_alloc() and mempool_free() |
1da177e4 | 149 | * functions. This function might sleep. Both the alloc_fn() and the free_fn() |
72fd4a35 | 150 | * functions might sleep - as long as the mempool_alloc() function is not called |
1da177e4 LT |
151 | * from IRQ contexts. |
152 | */ | |
1946089a | 153 | mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn, |
1da177e4 LT |
154 | mempool_free_t *free_fn, void *pool_data) |
155 | { | |
a91a5ac6 TH |
156 | return mempool_create_node(min_nr,alloc_fn,free_fn, pool_data, |
157 | GFP_KERNEL, NUMA_NO_NODE); | |
1946089a CL |
158 | } |
159 | EXPORT_SYMBOL(mempool_create); | |
1da177e4 | 160 | |
1946089a | 161 | mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn, |
a91a5ac6 TH |
162 | mempool_free_t *free_fn, void *pool_data, |
163 | gfp_t gfp_mask, int node_id) | |
1946089a CL |
164 | { |
165 | mempool_t *pool; | |
7b5219db | 166 | pool = kzalloc_node(sizeof(*pool), gfp_mask, node_id); |
1da177e4 LT |
167 | if (!pool) |
168 | return NULL; | |
1946089a | 169 | pool->elements = kmalloc_node(min_nr * sizeof(void *), |
a91a5ac6 | 170 | gfp_mask, node_id); |
1da177e4 LT |
171 | if (!pool->elements) { |
172 | kfree(pool); | |
173 | return NULL; | |
174 | } | |
175 | spin_lock_init(&pool->lock); | |
176 | pool->min_nr = min_nr; | |
177 | pool->pool_data = pool_data; | |
178 | init_waitqueue_head(&pool->wait); | |
179 | pool->alloc = alloc_fn; | |
180 | pool->free = free_fn; | |
181 | ||
182 | /* | |
183 | * First pre-allocate the guaranteed number of buffers. | |
184 | */ | |
185 | while (pool->curr_nr < pool->min_nr) { | |
186 | void *element; | |
187 | ||
a91a5ac6 | 188 | element = pool->alloc(gfp_mask, pool->pool_data); |
1da177e4 | 189 | if (unlikely(!element)) { |
0565d317 | 190 | mempool_destroy(pool); |
1da177e4 LT |
191 | return NULL; |
192 | } | |
193 | add_element(pool, element); | |
194 | } | |
195 | return pool; | |
196 | } | |
1946089a | 197 | EXPORT_SYMBOL(mempool_create_node); |
1da177e4 LT |
198 | |
199 | /** | |
200 | * mempool_resize - resize an existing memory pool | |
201 | * @pool: pointer to the memory pool which was allocated via | |
202 | * mempool_create(). | |
203 | * @new_min_nr: the new minimum number of elements guaranteed to be | |
204 | * allocated for this pool. | |
1da177e4 LT |
205 | * |
206 | * This function shrinks/grows the pool. In the case of growing, | |
207 | * it cannot be guaranteed that the pool will be grown to the new | |
208 | * size immediately, but new mempool_free() calls will refill it. | |
11d83360 | 209 | * This function may sleep. |
1da177e4 LT |
210 | * |
211 | * Note, the caller must guarantee that no mempool_destroy is called | |
212 | * while this function is running. mempool_alloc() & mempool_free() | |
213 | * might be called (eg. from IRQ contexts) while this function executes. | |
214 | */ | |
11d83360 | 215 | int mempool_resize(mempool_t *pool, int new_min_nr) |
1da177e4 LT |
216 | { |
217 | void *element; | |
218 | void **new_elements; | |
219 | unsigned long flags; | |
220 | ||
221 | BUG_ON(new_min_nr <= 0); | |
11d83360 | 222 | might_sleep(); |
1da177e4 LT |
223 | |
224 | spin_lock_irqsave(&pool->lock, flags); | |
225 | if (new_min_nr <= pool->min_nr) { | |
226 | while (new_min_nr < pool->curr_nr) { | |
227 | element = remove_element(pool); | |
228 | spin_unlock_irqrestore(&pool->lock, flags); | |
229 | pool->free(element, pool->pool_data); | |
230 | spin_lock_irqsave(&pool->lock, flags); | |
231 | } | |
232 | pool->min_nr = new_min_nr; | |
233 | goto out_unlock; | |
234 | } | |
235 | spin_unlock_irqrestore(&pool->lock, flags); | |
236 | ||
237 | /* Grow the pool */ | |
11d83360 DR |
238 | new_elements = kmalloc_array(new_min_nr, sizeof(*new_elements), |
239 | GFP_KERNEL); | |
1da177e4 LT |
240 | if (!new_elements) |
241 | return -ENOMEM; | |
242 | ||
243 | spin_lock_irqsave(&pool->lock, flags); | |
244 | if (unlikely(new_min_nr <= pool->min_nr)) { | |
245 | /* Raced, other resize will do our work */ | |
246 | spin_unlock_irqrestore(&pool->lock, flags); | |
247 | kfree(new_elements); | |
248 | goto out; | |
249 | } | |
250 | memcpy(new_elements, pool->elements, | |
251 | pool->curr_nr * sizeof(*new_elements)); | |
252 | kfree(pool->elements); | |
253 | pool->elements = new_elements; | |
254 | pool->min_nr = new_min_nr; | |
255 | ||
256 | while (pool->curr_nr < pool->min_nr) { | |
257 | spin_unlock_irqrestore(&pool->lock, flags); | |
11d83360 | 258 | element = pool->alloc(GFP_KERNEL, pool->pool_data); |
1da177e4 LT |
259 | if (!element) |
260 | goto out; | |
261 | spin_lock_irqsave(&pool->lock, flags); | |
262 | if (pool->curr_nr < pool->min_nr) { | |
263 | add_element(pool, element); | |
264 | } else { | |
265 | spin_unlock_irqrestore(&pool->lock, flags); | |
266 | pool->free(element, pool->pool_data); /* Raced */ | |
267 | goto out; | |
268 | } | |
269 | } | |
270 | out_unlock: | |
271 | spin_unlock_irqrestore(&pool->lock, flags); | |
272 | out: | |
273 | return 0; | |
274 | } | |
275 | EXPORT_SYMBOL(mempool_resize); | |
276 | ||
1da177e4 LT |
277 | /** |
278 | * mempool_alloc - allocate an element from a specific memory pool | |
279 | * @pool: pointer to the memory pool which was allocated via | |
280 | * mempool_create(). | |
281 | * @gfp_mask: the usual allocation bitmask. | |
282 | * | |
72fd4a35 | 283 | * this function only sleeps if the alloc_fn() function sleeps or |
1da177e4 LT |
284 | * returns NULL. Note that due to preallocation, this function |
285 | * *never* fails when called from process contexts. (it might | |
286 | * fail if called from an IRQ context.) | |
8bf8fcb0 | 287 | * Note: using __GFP_ZERO is not supported. |
1da177e4 | 288 | */ |
dd0fc66f | 289 | void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask) |
1da177e4 LT |
290 | { |
291 | void *element; | |
292 | unsigned long flags; | |
01890a4c | 293 | wait_queue_t wait; |
6daa0e28 | 294 | gfp_t gfp_temp; |
20a77776 | 295 | |
8bf8fcb0 | 296 | VM_WARN_ON_ONCE(gfp_mask & __GFP_ZERO); |
20a77776 | 297 | might_sleep_if(gfp_mask & __GFP_WAIT); |
b84a35be NP |
298 | |
299 | gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ | |
300 | gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ | |
301 | gfp_mask |= __GFP_NOWARN; /* failures are OK */ | |
1da177e4 | 302 | |
20a77776 NP |
303 | gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO); |
304 | ||
1da177e4 | 305 | repeat_alloc: |
20a77776 NP |
306 | |
307 | element = pool->alloc(gfp_temp, pool->pool_data); | |
1da177e4 LT |
308 | if (likely(element != NULL)) |
309 | return element; | |
310 | ||
1da177e4 LT |
311 | spin_lock_irqsave(&pool->lock, flags); |
312 | if (likely(pool->curr_nr)) { | |
313 | element = remove_element(pool); | |
314 | spin_unlock_irqrestore(&pool->lock, flags); | |
5b990546 TH |
315 | /* paired with rmb in mempool_free(), read comment there */ |
316 | smp_wmb(); | |
17411962 CM |
317 | /* |
318 | * Update the allocation stack trace as this is more useful | |
319 | * for debugging. | |
320 | */ | |
321 | kmemleak_update_trace(element); | |
1da177e4 LT |
322 | return element; |
323 | } | |
1da177e4 | 324 | |
1ebb7044 TH |
325 | /* |
326 | * We use gfp mask w/o __GFP_WAIT or IO for the first round. If | |
327 | * alloc failed with that and @pool was empty, retry immediately. | |
328 | */ | |
329 | if (gfp_temp != gfp_mask) { | |
330 | spin_unlock_irqrestore(&pool->lock, flags); | |
331 | gfp_temp = gfp_mask; | |
332 | goto repeat_alloc; | |
333 | } | |
334 | ||
335 | /* We must not sleep if !__GFP_WAIT */ | |
5b990546 TH |
336 | if (!(gfp_mask & __GFP_WAIT)) { |
337 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 | 338 | return NULL; |
5b990546 | 339 | } |
1da177e4 | 340 | |
5b990546 | 341 | /* Let's wait for someone else to return an element to @pool */ |
01890a4c | 342 | init_wait(&wait); |
1da177e4 | 343 | prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE); |
1da177e4 | 344 | |
5b990546 TH |
345 | spin_unlock_irqrestore(&pool->lock, flags); |
346 | ||
347 | /* | |
348 | * FIXME: this should be io_schedule(). The timeout is there as a | |
349 | * workaround for some DM problems in 2.6.18. | |
350 | */ | |
351 | io_schedule_timeout(5*HZ); | |
352 | ||
353 | finish_wait(&pool->wait, &wait); | |
1da177e4 LT |
354 | goto repeat_alloc; |
355 | } | |
356 | EXPORT_SYMBOL(mempool_alloc); | |
357 | ||
358 | /** | |
359 | * mempool_free - return an element to the pool. | |
360 | * @element: pool element pointer. | |
361 | * @pool: pointer to the memory pool which was allocated via | |
362 | * mempool_create(). | |
363 | * | |
364 | * this function only sleeps if the free_fn() function sleeps. | |
365 | */ | |
366 | void mempool_free(void *element, mempool_t *pool) | |
367 | { | |
368 | unsigned long flags; | |
369 | ||
c80e7a82 RR |
370 | if (unlikely(element == NULL)) |
371 | return; | |
372 | ||
5b990546 TH |
373 | /* |
374 | * Paired with the wmb in mempool_alloc(). The preceding read is | |
375 | * for @element and the following @pool->curr_nr. This ensures | |
376 | * that the visible value of @pool->curr_nr is from after the | |
377 | * allocation of @element. This is necessary for fringe cases | |
378 | * where @element was passed to this task without going through | |
379 | * barriers. | |
380 | * | |
381 | * For example, assume @p is %NULL at the beginning and one task | |
382 | * performs "p = mempool_alloc(...);" while another task is doing | |
383 | * "while (!p) cpu_relax(); mempool_free(p, ...);". This function | |
384 | * may end up using curr_nr value which is from before allocation | |
385 | * of @p without the following rmb. | |
386 | */ | |
387 | smp_rmb(); | |
388 | ||
389 | /* | |
390 | * For correctness, we need a test which is guaranteed to trigger | |
391 | * if curr_nr + #allocated == min_nr. Testing curr_nr < min_nr | |
392 | * without locking achieves that and refilling as soon as possible | |
393 | * is desirable. | |
394 | * | |
395 | * Because curr_nr visible here is always a value after the | |
396 | * allocation of @element, any task which decremented curr_nr below | |
397 | * min_nr is guaranteed to see curr_nr < min_nr unless curr_nr gets | |
398 | * incremented to min_nr afterwards. If curr_nr gets incremented | |
399 | * to min_nr after the allocation of @element, the elements | |
400 | * allocated after that are subject to the same guarantee. | |
401 | * | |
402 | * Waiters happen iff curr_nr is 0 and the above guarantee also | |
403 | * ensures that there will be frees which return elements to the | |
404 | * pool waking up the waiters. | |
405 | */ | |
eb9a3c62 | 406 | if (unlikely(pool->curr_nr < pool->min_nr)) { |
1da177e4 | 407 | spin_lock_irqsave(&pool->lock, flags); |
eb9a3c62 | 408 | if (likely(pool->curr_nr < pool->min_nr)) { |
1da177e4 LT |
409 | add_element(pool, element); |
410 | spin_unlock_irqrestore(&pool->lock, flags); | |
411 | wake_up(&pool->wait); | |
412 | return; | |
413 | } | |
414 | spin_unlock_irqrestore(&pool->lock, flags); | |
415 | } | |
416 | pool->free(element, pool->pool_data); | |
417 | } | |
418 | EXPORT_SYMBOL(mempool_free); | |
419 | ||
420 | /* | |
421 | * A commonly used alloc and free fn. | |
422 | */ | |
dd0fc66f | 423 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data) |
1da177e4 | 424 | { |
fcc234f8 | 425 | struct kmem_cache *mem = pool_data; |
e244c9e6 | 426 | VM_BUG_ON(mem->ctor); |
1da177e4 LT |
427 | return kmem_cache_alloc(mem, gfp_mask); |
428 | } | |
429 | EXPORT_SYMBOL(mempool_alloc_slab); | |
430 | ||
431 | void mempool_free_slab(void *element, void *pool_data) | |
432 | { | |
fcc234f8 | 433 | struct kmem_cache *mem = pool_data; |
1da177e4 LT |
434 | kmem_cache_free(mem, element); |
435 | } | |
436 | EXPORT_SYMBOL(mempool_free_slab); | |
6e0678f3 | 437 | |
53184082 MD |
438 | /* |
439 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory | |
183ff22b | 440 | * specified by pool_data |
53184082 MD |
441 | */ |
442 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | |
443 | { | |
5e2f89b5 | 444 | size_t size = (size_t)pool_data; |
53184082 MD |
445 | return kmalloc(size, gfp_mask); |
446 | } | |
447 | EXPORT_SYMBOL(mempool_kmalloc); | |
448 | ||
449 | void mempool_kfree(void *element, void *pool_data) | |
450 | { | |
451 | kfree(element); | |
452 | } | |
453 | EXPORT_SYMBOL(mempool_kfree); | |
454 | ||
6e0678f3 MD |
455 | /* |
456 | * A simple mempool-backed page allocator that allocates pages | |
457 | * of the order specified by pool_data. | |
458 | */ | |
459 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) | |
460 | { | |
461 | int order = (int)(long)pool_data; | |
462 | return alloc_pages(gfp_mask, order); | |
463 | } | |
464 | EXPORT_SYMBOL(mempool_alloc_pages); | |
465 | ||
466 | void mempool_free_pages(void *element, void *pool_data) | |
467 | { | |
468 | int order = (int)(long)pool_data; | |
469 | __free_pages(element, order); | |
470 | } | |
471 | EXPORT_SYMBOL(mempool_free_pages); |