]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Basic general purpose allocator for managing special purpose | |
3 | * memory, for example, memory that is not managed by the regular | |
4 | * kmalloc/kfree interface. Uses for this includes on-device special | |
5 | * memory, uncached memory etc. | |
6 | * | |
7 | * It is safe to use the allocator in NMI handlers and other special | |
8 | * unblockable contexts that could otherwise deadlock on locks. This | |
9 | * is implemented by using atomic operations and retries on any | |
10 | * conflicts. The disadvantage is that there may be livelocks in | |
11 | * extreme cases. For better scalability, one allocator can be used | |
12 | * for each CPU. | |
13 | * | |
14 | * The lockless operation only works if there is enough memory | |
15 | * available. If new memory is added to the pool a lock has to be | |
16 | * still taken. So any user relying on locklessness has to ensure | |
17 | * that sufficient memory is preallocated. | |
18 | * | |
19 | * The basic atomic operation of this allocator is cmpxchg on long. | |
20 | * On architectures that don't have NMI-safe cmpxchg implementation, | |
21 | * the allocator can NOT be used in NMI handler. So code uses the | |
22 | * allocator in NMI handler should depend on | |
23 | * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. | |
24 | * | |
25 | * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org> | |
26 | * | |
27 | * This source code is licensed under the GNU General Public License, | |
28 | * Version 2. See the file COPYING for more details. | |
29 | */ | |
30 | ||
31 | #include <linux/slab.h> | |
32 | #include <linux/export.h> | |
33 | #include <linux/bitmap.h> | |
34 | #include <linux/rculist.h> | |
35 | #include <linux/interrupt.h> | |
36 | #include <linux/genalloc.h> | |
37 | #include <linux/of_device.h> | |
38 | ||
39 | static inline size_t chunk_size(const struct gen_pool_chunk *chunk) | |
40 | { | |
41 | return chunk->end_addr - chunk->start_addr + 1; | |
42 | } | |
43 | ||
44 | static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set) | |
45 | { | |
46 | unsigned long val, nval; | |
47 | ||
48 | nval = *addr; | |
49 | do { | |
50 | val = nval; | |
51 | if (val & mask_to_set) | |
52 | return -EBUSY; | |
53 | cpu_relax(); | |
54 | } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val); | |
55 | ||
56 | return 0; | |
57 | } | |
58 | ||
59 | static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear) | |
60 | { | |
61 | unsigned long val, nval; | |
62 | ||
63 | nval = *addr; | |
64 | do { | |
65 | val = nval; | |
66 | if ((val & mask_to_clear) != mask_to_clear) | |
67 | return -EBUSY; | |
68 | cpu_relax(); | |
69 | } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val); | |
70 | ||
71 | return 0; | |
72 | } | |
73 | ||
74 | /* | |
75 | * bitmap_set_ll - set the specified number of bits at the specified position | |
76 | * @map: pointer to a bitmap | |
77 | * @start: a bit position in @map | |
78 | * @nr: number of bits to set | |
79 | * | |
80 | * Set @nr bits start from @start in @map lock-lessly. Several users | |
81 | * can set/clear the same bitmap simultaneously without lock. If two | |
82 | * users set the same bit, one user will return remain bits, otherwise | |
83 | * return 0. | |
84 | */ | |
85 | static int bitmap_set_ll(unsigned long *map, int start, int nr) | |
86 | { | |
87 | unsigned long *p = map + BIT_WORD(start); | |
88 | const int size = start + nr; | |
89 | int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); | |
90 | unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); | |
91 | ||
92 | while (nr - bits_to_set >= 0) { | |
93 | if (set_bits_ll(p, mask_to_set)) | |
94 | return nr; | |
95 | nr -= bits_to_set; | |
96 | bits_to_set = BITS_PER_LONG; | |
97 | mask_to_set = ~0UL; | |
98 | p++; | |
99 | } | |
100 | if (nr) { | |
101 | mask_to_set &= BITMAP_LAST_WORD_MASK(size); | |
102 | if (set_bits_ll(p, mask_to_set)) | |
103 | return nr; | |
104 | } | |
105 | ||
106 | return 0; | |
107 | } | |
108 | ||
109 | /* | |
110 | * bitmap_clear_ll - clear the specified number of bits at the specified position | |
111 | * @map: pointer to a bitmap | |
112 | * @start: a bit position in @map | |
113 | * @nr: number of bits to set | |
114 | * | |
115 | * Clear @nr bits start from @start in @map lock-lessly. Several users | |
116 | * can set/clear the same bitmap simultaneously without lock. If two | |
117 | * users clear the same bit, one user will return remain bits, | |
118 | * otherwise return 0. | |
119 | */ | |
120 | static int bitmap_clear_ll(unsigned long *map, int start, int nr) | |
121 | { | |
122 | unsigned long *p = map + BIT_WORD(start); | |
123 | const int size = start + nr; | |
124 | int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); | |
125 | unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); | |
126 | ||
127 | while (nr - bits_to_clear >= 0) { | |
128 | if (clear_bits_ll(p, mask_to_clear)) | |
129 | return nr; | |
130 | nr -= bits_to_clear; | |
131 | bits_to_clear = BITS_PER_LONG; | |
132 | mask_to_clear = ~0UL; | |
133 | p++; | |
134 | } | |
135 | if (nr) { | |
136 | mask_to_clear &= BITMAP_LAST_WORD_MASK(size); | |
137 | if (clear_bits_ll(p, mask_to_clear)) | |
138 | return nr; | |
139 | } | |
140 | ||
141 | return 0; | |
142 | } | |
143 | ||
144 | /** | |
145 | * gen_pool_create - create a new special memory pool | |
146 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents | |
147 | * @nid: node id of the node the pool structure should be allocated on, or -1 | |
148 | * | |
149 | * Create a new special memory pool that can be used to manage special purpose | |
150 | * memory not managed by the regular kmalloc/kfree interface. | |
151 | */ | |
152 | struct gen_pool *gen_pool_create(int min_alloc_order, int nid) | |
153 | { | |
154 | struct gen_pool *pool; | |
155 | ||
156 | pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); | |
157 | if (pool != NULL) { | |
158 | spin_lock_init(&pool->lock); | |
159 | INIT_LIST_HEAD(&pool->chunks); | |
160 | pool->min_alloc_order = min_alloc_order; | |
161 | pool->algo = gen_pool_first_fit; | |
162 | pool->data = NULL; | |
163 | pool->name = NULL; | |
164 | } | |
165 | return pool; | |
166 | } | |
167 | EXPORT_SYMBOL(gen_pool_create); | |
168 | ||
169 | /** | |
170 | * gen_pool_add_virt - add a new chunk of special memory to the pool | |
171 | * @pool: pool to add new memory chunk to | |
172 | * @virt: virtual starting address of memory chunk to add to pool | |
173 | * @phys: physical starting address of memory chunk to add to pool | |
174 | * @size: size in bytes of the memory chunk to add to pool | |
175 | * @nid: node id of the node the chunk structure and bitmap should be | |
176 | * allocated on, or -1 | |
177 | * | |
178 | * Add a new chunk of special memory to the specified pool. | |
179 | * | |
180 | * Returns 0 on success or a -ve errno on failure. | |
181 | */ | |
182 | int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, | |
183 | size_t size, int nid) | |
184 | { | |
185 | struct gen_pool_chunk *chunk; | |
186 | int nbits = size >> pool->min_alloc_order; | |
187 | int nbytes = sizeof(struct gen_pool_chunk) + | |
188 | BITS_TO_LONGS(nbits) * sizeof(long); | |
189 | ||
190 | chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); | |
191 | if (unlikely(chunk == NULL)) | |
192 | return -ENOMEM; | |
193 | ||
194 | chunk->phys_addr = phys; | |
195 | chunk->start_addr = virt; | |
196 | chunk->end_addr = virt + size - 1; | |
197 | atomic_set(&chunk->avail, size); | |
198 | ||
199 | spin_lock(&pool->lock); | |
200 | list_add_rcu(&chunk->next_chunk, &pool->chunks); | |
201 | spin_unlock(&pool->lock); | |
202 | ||
203 | return 0; | |
204 | } | |
205 | EXPORT_SYMBOL(gen_pool_add_virt); | |
206 | ||
207 | /** | |
208 | * gen_pool_virt_to_phys - return the physical address of memory | |
209 | * @pool: pool to allocate from | |
210 | * @addr: starting address of memory | |
211 | * | |
212 | * Returns the physical address on success, or -1 on error. | |
213 | */ | |
214 | phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) | |
215 | { | |
216 | struct gen_pool_chunk *chunk; | |
217 | phys_addr_t paddr = -1; | |
218 | ||
219 | rcu_read_lock(); | |
220 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
221 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { | |
222 | paddr = chunk->phys_addr + (addr - chunk->start_addr); | |
223 | break; | |
224 | } | |
225 | } | |
226 | rcu_read_unlock(); | |
227 | ||
228 | return paddr; | |
229 | } | |
230 | EXPORT_SYMBOL(gen_pool_virt_to_phys); | |
231 | ||
232 | /** | |
233 | * gen_pool_destroy - destroy a special memory pool | |
234 | * @pool: pool to destroy | |
235 | * | |
236 | * Destroy the specified special memory pool. Verifies that there are no | |
237 | * outstanding allocations. | |
238 | */ | |
239 | void gen_pool_destroy(struct gen_pool *pool) | |
240 | { | |
241 | struct list_head *_chunk, *_next_chunk; | |
242 | struct gen_pool_chunk *chunk; | |
243 | int order = pool->min_alloc_order; | |
244 | int bit, end_bit; | |
245 | ||
246 | list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { | |
247 | chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); | |
248 | list_del(&chunk->next_chunk); | |
249 | ||
250 | end_bit = chunk_size(chunk) >> order; | |
251 | bit = find_next_bit(chunk->bits, end_bit, 0); | |
252 | BUG_ON(bit < end_bit); | |
253 | ||
254 | kfree(chunk); | |
255 | } | |
256 | kfree_const(pool->name); | |
257 | kfree(pool); | |
258 | } | |
259 | EXPORT_SYMBOL(gen_pool_destroy); | |
260 | ||
261 | /** | |
262 | * gen_pool_alloc - allocate special memory from the pool | |
263 | * @pool: pool to allocate from | |
264 | * @size: number of bytes to allocate from the pool | |
265 | * | |
266 | * Allocate the requested number of bytes from the specified pool. | |
267 | * Uses the pool allocation function (with first-fit algorithm by default). | |
268 | * Can not be used in NMI handler on architectures without | |
269 | * NMI-safe cmpxchg implementation. | |
270 | */ | |
271 | unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) | |
272 | { | |
273 | return gen_pool_alloc_algo(pool, size, pool->algo, pool->data); | |
274 | } | |
275 | EXPORT_SYMBOL(gen_pool_alloc); | |
276 | ||
277 | /** | |
278 | * gen_pool_alloc_algo - allocate special memory from the pool | |
279 | * @pool: pool to allocate from | |
280 | * @size: number of bytes to allocate from the pool | |
281 | * @algo: algorithm passed from caller | |
282 | * @data: data passed to algorithm | |
283 | * | |
284 | * Allocate the requested number of bytes from the specified pool. | |
285 | * Uses the pool allocation function (with first-fit algorithm by default). | |
286 | * Can not be used in NMI handler on architectures without | |
287 | * NMI-safe cmpxchg implementation. | |
288 | */ | |
289 | unsigned long gen_pool_alloc_algo(struct gen_pool *pool, size_t size, | |
290 | genpool_algo_t algo, void *data) | |
291 | { | |
292 | struct gen_pool_chunk *chunk; | |
293 | unsigned long addr = 0; | |
294 | int order = pool->min_alloc_order; | |
295 | int nbits, start_bit = 0, end_bit, remain; | |
296 | ||
297 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | |
298 | BUG_ON(in_nmi()); | |
299 | #endif | |
300 | ||
301 | if (size == 0) | |
302 | return 0; | |
303 | ||
304 | nbits = (size + (1UL << order) - 1) >> order; | |
305 | rcu_read_lock(); | |
306 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
307 | if (size > atomic_read(&chunk->avail)) | |
308 | continue; | |
309 | ||
310 | end_bit = chunk_size(chunk) >> order; | |
311 | retry: | |
312 | start_bit = algo(chunk->bits, end_bit, start_bit, | |
313 | nbits, data, pool); | |
314 | if (start_bit >= end_bit) | |
315 | continue; | |
316 | remain = bitmap_set_ll(chunk->bits, start_bit, nbits); | |
317 | if (remain) { | |
318 | remain = bitmap_clear_ll(chunk->bits, start_bit, | |
319 | nbits - remain); | |
320 | BUG_ON(remain); | |
321 | goto retry; | |
322 | } | |
323 | ||
324 | addr = chunk->start_addr + ((unsigned long)start_bit << order); | |
325 | size = nbits << order; | |
326 | atomic_sub(size, &chunk->avail); | |
327 | break; | |
328 | } | |
329 | rcu_read_unlock(); | |
330 | return addr; | |
331 | } | |
332 | EXPORT_SYMBOL(gen_pool_alloc_algo); | |
333 | ||
334 | /** | |
335 | * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage | |
336 | * @pool: pool to allocate from | |
337 | * @size: number of bytes to allocate from the pool | |
338 | * @dma: dma-view physical address return value. Use NULL if unneeded. | |
339 | * | |
340 | * Allocate the requested number of bytes from the specified pool. | |
341 | * Uses the pool allocation function (with first-fit algorithm by default). | |
342 | * Can not be used in NMI handler on architectures without | |
343 | * NMI-safe cmpxchg implementation. | |
344 | */ | |
345 | void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma) | |
346 | { | |
347 | unsigned long vaddr; | |
348 | ||
349 | if (!pool) | |
350 | return NULL; | |
351 | ||
352 | vaddr = gen_pool_alloc(pool, size); | |
353 | if (!vaddr) | |
354 | return NULL; | |
355 | ||
356 | if (dma) | |
357 | *dma = gen_pool_virt_to_phys(pool, vaddr); | |
358 | ||
359 | return (void *)vaddr; | |
360 | } | |
361 | EXPORT_SYMBOL(gen_pool_dma_alloc); | |
362 | ||
363 | /** | |
364 | * gen_pool_free - free allocated special memory back to the pool | |
365 | * @pool: pool to free to | |
366 | * @addr: starting address of memory to free back to pool | |
367 | * @size: size in bytes of memory to free | |
368 | * | |
369 | * Free previously allocated special memory back to the specified | |
370 | * pool. Can not be used in NMI handler on architectures without | |
371 | * NMI-safe cmpxchg implementation. | |
372 | */ | |
373 | void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) | |
374 | { | |
375 | struct gen_pool_chunk *chunk; | |
376 | int order = pool->min_alloc_order; | |
377 | int start_bit, nbits, remain; | |
378 | ||
379 | #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG | |
380 | BUG_ON(in_nmi()); | |
381 | #endif | |
382 | ||
383 | nbits = (size + (1UL << order) - 1) >> order; | |
384 | rcu_read_lock(); | |
385 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) { | |
386 | if (addr >= chunk->start_addr && addr <= chunk->end_addr) { | |
387 | BUG_ON(addr + size - 1 > chunk->end_addr); | |
388 | start_bit = (addr - chunk->start_addr) >> order; | |
389 | remain = bitmap_clear_ll(chunk->bits, start_bit, nbits); | |
390 | BUG_ON(remain); | |
391 | size = nbits << order; | |
392 | atomic_add(size, &chunk->avail); | |
393 | rcu_read_unlock(); | |
394 | return; | |
395 | } | |
396 | } | |
397 | rcu_read_unlock(); | |
398 | BUG(); | |
399 | } | |
400 | EXPORT_SYMBOL(gen_pool_free); | |
401 | ||
402 | /** | |
403 | * gen_pool_for_each_chunk - call func for every chunk of generic memory pool | |
404 | * @pool: the generic memory pool | |
405 | * @func: func to call | |
406 | * @data: additional data used by @func | |
407 | * | |
408 | * Call @func for every chunk of generic memory pool. The @func is | |
409 | * called with rcu_read_lock held. | |
410 | */ | |
411 | void gen_pool_for_each_chunk(struct gen_pool *pool, | |
412 | void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data), | |
413 | void *data) | |
414 | { | |
415 | struct gen_pool_chunk *chunk; | |
416 | ||
417 | rcu_read_lock(); | |
418 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) | |
419 | func(pool, chunk, data); | |
420 | rcu_read_unlock(); | |
421 | } | |
422 | EXPORT_SYMBOL(gen_pool_for_each_chunk); | |
423 | ||
424 | /** | |
425 | * addr_in_gen_pool - checks if an address falls within the range of a pool | |
426 | * @pool: the generic memory pool | |
427 | * @start: start address | |
428 | * @size: size of the region | |
429 | * | |
430 | * Check if the range of addresses falls within the specified pool. Returns | |
431 | * true if the entire range is contained in the pool and false otherwise. | |
432 | */ | |
433 | bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, | |
434 | size_t size) | |
435 | { | |
436 | bool found = false; | |
437 | unsigned long end = start + size - 1; | |
438 | struct gen_pool_chunk *chunk; | |
439 | ||
440 | rcu_read_lock(); | |
441 | list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { | |
442 | if (start >= chunk->start_addr && start <= chunk->end_addr) { | |
443 | if (end <= chunk->end_addr) { | |
444 | found = true; | |
445 | break; | |
446 | } | |
447 | } | |
448 | } | |
449 | rcu_read_unlock(); | |
450 | return found; | |
451 | } | |
452 | ||
453 | /** | |
454 | * gen_pool_avail - get available free space of the pool | |
455 | * @pool: pool to get available free space | |
456 | * | |
457 | * Return available free space of the specified pool. | |
458 | */ | |
459 | size_t gen_pool_avail(struct gen_pool *pool) | |
460 | { | |
461 | struct gen_pool_chunk *chunk; | |
462 | size_t avail = 0; | |
463 | ||
464 | rcu_read_lock(); | |
465 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | |
466 | avail += atomic_read(&chunk->avail); | |
467 | rcu_read_unlock(); | |
468 | return avail; | |
469 | } | |
470 | EXPORT_SYMBOL_GPL(gen_pool_avail); | |
471 | ||
472 | /** | |
473 | * gen_pool_size - get size in bytes of memory managed by the pool | |
474 | * @pool: pool to get size | |
475 | * | |
476 | * Return size in bytes of memory managed by the pool. | |
477 | */ | |
478 | size_t gen_pool_size(struct gen_pool *pool) | |
479 | { | |
480 | struct gen_pool_chunk *chunk; | |
481 | size_t size = 0; | |
482 | ||
483 | rcu_read_lock(); | |
484 | list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) | |
485 | size += chunk_size(chunk); | |
486 | rcu_read_unlock(); | |
487 | return size; | |
488 | } | |
489 | EXPORT_SYMBOL_GPL(gen_pool_size); | |
490 | ||
491 | /** | |
492 | * gen_pool_set_algo - set the allocation algorithm | |
493 | * @pool: pool to change allocation algorithm | |
494 | * @algo: custom algorithm function | |
495 | * @data: additional data used by @algo | |
496 | * | |
497 | * Call @algo for each memory allocation in the pool. | |
498 | * If @algo is NULL use gen_pool_first_fit as default | |
499 | * memory allocation function. | |
500 | */ | |
501 | void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data) | |
502 | { | |
503 | rcu_read_lock(); | |
504 | ||
505 | pool->algo = algo; | |
506 | if (!pool->algo) | |
507 | pool->algo = gen_pool_first_fit; | |
508 | ||
509 | pool->data = data; | |
510 | ||
511 | rcu_read_unlock(); | |
512 | } | |
513 | EXPORT_SYMBOL(gen_pool_set_algo); | |
514 | ||
515 | /** | |
516 | * gen_pool_first_fit - find the first available region | |
517 | * of memory matching the size requirement (no alignment constraint) | |
518 | * @map: The address to base the search on | |
519 | * @size: The bitmap size in bits | |
520 | * @start: The bitnumber to start searching at | |
521 | * @nr: The number of zeroed bits we're looking for | |
522 | * @data: additional data - unused | |
523 | * @pool: pool to find the fit region memory from | |
524 | */ | |
525 | unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, | |
526 | unsigned long start, unsigned int nr, void *data, | |
527 | struct gen_pool *pool) | |
528 | { | |
529 | return bitmap_find_next_zero_area(map, size, start, nr, 0); | |
530 | } | |
531 | EXPORT_SYMBOL(gen_pool_first_fit); | |
532 | ||
533 | /** | |
534 | * gen_pool_first_fit_align - find the first available region | |
535 | * of memory matching the size requirement (alignment constraint) | |
536 | * @map: The address to base the search on | |
537 | * @size: The bitmap size in bits | |
538 | * @start: The bitnumber to start searching at | |
539 | * @nr: The number of zeroed bits we're looking for | |
540 | * @data: data for alignment | |
541 | * @pool: pool to get order from | |
542 | */ | |
543 | unsigned long gen_pool_first_fit_align(unsigned long *map, unsigned long size, | |
544 | unsigned long start, unsigned int nr, void *data, | |
545 | struct gen_pool *pool) | |
546 | { | |
547 | struct genpool_data_align *alignment; | |
548 | unsigned long align_mask; | |
549 | int order; | |
550 | ||
551 | alignment = data; | |
552 | order = pool->min_alloc_order; | |
553 | align_mask = ((alignment->align + (1UL << order) - 1) >> order) - 1; | |
554 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); | |
555 | } | |
556 | EXPORT_SYMBOL(gen_pool_first_fit_align); | |
557 | ||
558 | /** | |
559 | * gen_pool_first_fit_order_align - find the first available region | |
560 | * of memory matching the size requirement. The region will be aligned | |
561 | * to the order of the size specified. | |
562 | * @map: The address to base the search on | |
563 | * @size: The bitmap size in bits | |
564 | * @start: The bitnumber to start searching at | |
565 | * @nr: The number of zeroed bits we're looking for | |
566 | * @data: additional data - unused | |
567 | * @pool: pool to find the fit region memory from | |
568 | */ | |
569 | unsigned long gen_pool_first_fit_order_align(unsigned long *map, | |
570 | unsigned long size, unsigned long start, | |
571 | unsigned int nr, void *data, struct gen_pool *pool) | |
572 | { | |
573 | unsigned long align_mask = roundup_pow_of_two(nr) - 1; | |
574 | ||
575 | return bitmap_find_next_zero_area(map, size, start, nr, align_mask); | |
576 | } | |
577 | EXPORT_SYMBOL(gen_pool_first_fit_order_align); | |
578 | ||
579 | /** | |
580 | * gen_pool_best_fit - find the best fitting region of memory | |
581 | * macthing the size requirement (no alignment constraint) | |
582 | * @map: The address to base the search on | |
583 | * @size: The bitmap size in bits | |
584 | * @start: The bitnumber to start searching at | |
585 | * @nr: The number of zeroed bits we're looking for | |
586 | * @data: additional data - unused | |
587 | * @pool: pool to find the fit region memory from | |
588 | * | |
589 | * Iterate over the bitmap to find the smallest free region | |
590 | * which we can allocate the memory. | |
591 | */ | |
592 | unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, | |
593 | unsigned long start, unsigned int nr, void *data, | |
594 | struct gen_pool *pool) | |
595 | { | |
596 | unsigned long start_bit = size; | |
597 | unsigned long len = size + 1; | |
598 | unsigned long index; | |
599 | ||
600 | index = bitmap_find_next_zero_area(map, size, start, nr, 0); | |
601 | ||
602 | while (index < size) { | |
603 | int next_bit = find_next_bit(map, size, index + nr); | |
604 | if ((next_bit - index) < len) { | |
605 | len = next_bit - index; | |
606 | start_bit = index; | |
607 | if (len == nr) | |
608 | return start_bit; | |
609 | } | |
610 | index = bitmap_find_next_zero_area(map, size, | |
611 | next_bit + 1, nr, 0); | |
612 | } | |
613 | ||
614 | return start_bit; | |
615 | } | |
616 | EXPORT_SYMBOL(gen_pool_best_fit); | |
617 | ||
618 | static void devm_gen_pool_release(struct device *dev, void *res) | |
619 | { | |
620 | gen_pool_destroy(*(struct gen_pool **)res); | |
621 | } | |
622 | ||
623 | static int devm_gen_pool_match(struct device *dev, void *res, void *data) | |
624 | { | |
625 | struct gen_pool **p = res; | |
626 | ||
627 | /* NULL data matches only a pool without an assigned name */ | |
628 | if (!data && !(*p)->name) | |
629 | return 1; | |
630 | ||
631 | if (!data || !(*p)->name) | |
632 | return 0; | |
633 | ||
634 | return !strcmp((*p)->name, data); | |
635 | } | |
636 | ||
637 | /** | |
638 | * gen_pool_get - Obtain the gen_pool (if any) for a device | |
639 | * @dev: device to retrieve the gen_pool from | |
640 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device | |
641 | * | |
642 | * Returns the gen_pool for the device if one is present, or NULL. | |
643 | */ | |
644 | struct gen_pool *gen_pool_get(struct device *dev, const char *name) | |
645 | { | |
646 | struct gen_pool **p; | |
647 | ||
648 | p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match, | |
649 | (void *)name); | |
650 | if (!p) | |
651 | return NULL; | |
652 | return *p; | |
653 | } | |
654 | EXPORT_SYMBOL_GPL(gen_pool_get); | |
655 | ||
656 | /** | |
657 | * devm_gen_pool_create - managed gen_pool_create | |
658 | * @dev: device that provides the gen_pool | |
659 | * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents | |
660 | * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes | |
661 | * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device | |
662 | * | |
663 | * Create a new special memory pool that can be used to manage special purpose | |
664 | * memory not managed by the regular kmalloc/kfree interface. The pool will be | |
665 | * automatically destroyed by the device management code. | |
666 | */ | |
667 | struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, | |
668 | int nid, const char *name) | |
669 | { | |
670 | struct gen_pool **ptr, *pool; | |
671 | const char *pool_name = NULL; | |
672 | ||
673 | /* Check that genpool to be created is uniquely addressed on device */ | |
674 | if (gen_pool_get(dev, name)) | |
675 | return ERR_PTR(-EINVAL); | |
676 | ||
677 | if (name) { | |
678 | pool_name = kstrdup_const(name, GFP_KERNEL); | |
679 | if (!pool_name) | |
680 | return ERR_PTR(-ENOMEM); | |
681 | } | |
682 | ||
683 | ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL); | |
684 | if (!ptr) | |
685 | goto free_pool_name; | |
686 | ||
687 | pool = gen_pool_create(min_alloc_order, nid); | |
688 | if (!pool) | |
689 | goto free_devres; | |
690 | ||
691 | *ptr = pool; | |
692 | pool->name = pool_name; | |
693 | devres_add(dev, ptr); | |
694 | ||
695 | return pool; | |
696 | ||
697 | free_devres: | |
698 | devres_free(ptr); | |
699 | free_pool_name: | |
700 | kfree_const(pool_name); | |
701 | ||
702 | return ERR_PTR(-ENOMEM); | |
703 | } | |
704 | EXPORT_SYMBOL(devm_gen_pool_create); | |
705 | ||
706 | #ifdef CONFIG_OF | |
707 | /** | |
708 | * of_gen_pool_get - find a pool by phandle property | |
709 | * @np: device node | |
710 | * @propname: property name containing phandle(s) | |
711 | * @index: index into the phandle array | |
712 | * | |
713 | * Returns the pool that contains the chunk starting at the physical | |
714 | * address of the device tree node pointed at by the phandle property, | |
715 | * or NULL if not found. | |
716 | */ | |
717 | struct gen_pool *of_gen_pool_get(struct device_node *np, | |
718 | const char *propname, int index) | |
719 | { | |
720 | struct platform_device *pdev; | |
721 | struct device_node *np_pool, *parent; | |
722 | const char *name = NULL; | |
723 | struct gen_pool *pool = NULL; | |
724 | ||
725 | np_pool = of_parse_phandle(np, propname, index); | |
726 | if (!np_pool) | |
727 | return NULL; | |
728 | ||
729 | pdev = of_find_device_by_node(np_pool); | |
730 | if (!pdev) { | |
731 | /* Check if named gen_pool is created by parent node device */ | |
732 | parent = of_get_parent(np_pool); | |
733 | pdev = of_find_device_by_node(parent); | |
734 | of_node_put(parent); | |
735 | ||
736 | of_property_read_string(np_pool, "label", &name); | |
737 | if (!name) | |
738 | name = np_pool->name; | |
739 | } | |
740 | if (pdev) | |
741 | pool = gen_pool_get(&pdev->dev, name); | |
742 | of_node_put(np_pool); | |
743 | ||
744 | return pool; | |
745 | } | |
746 | EXPORT_SYMBOL_GPL(of_gen_pool_get); | |
747 | #endif /* CONFIG_OF */ |