]>
Commit | Line | Data |
---|---|---|
b2139ce0 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
6182a094 MW |
2 | /* |
3 | * DMA Pool allocator | |
4 | * | |
5 | * Copyright 2001 David Brownell | |
6 | * Copyright 2007 Intel Corporation | |
7 | * Author: Matthew Wilcox <willy@linux.intel.com> | |
8 | * | |
6182a094 MW |
9 | * This allocator returns small blocks of a given size which are DMA-able by |
10 | * the given device. It uses the dma_alloc_coherent page allocator to get | |
11 | * new pages, then splits them up into blocks of the required size. | |
12 | * Many older drivers still have their own code to do this. | |
13 | * | |
14 | * The current design of this allocator is fairly simple. The pool is | |
15 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | |
16 | * allocated pages. Each page in the page_list is split into blocks of at | |
a35a3455 MW |
17 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
18 | * list of free blocks within the page. Used blocks aren't tracked, but we | |
19 | * keep a count of how many are currently allocated from each page. | |
6182a094 | 20 | */ |
1da177e4 LT |
21 | |
22 | #include <linux/device.h> | |
1da177e4 LT |
23 | #include <linux/dma-mapping.h> |
24 | #include <linux/dmapool.h> | |
6182a094 MW |
25 | #include <linux/kernel.h> |
26 | #include <linux/list.h> | |
b95f1b31 | 27 | #include <linux/export.h> |
6182a094 | 28 | #include <linux/mutex.h> |
c9cf5528 | 29 | #include <linux/poison.h> |
e8edc6e0 | 30 | #include <linux/sched.h> |
6182a094 | 31 | #include <linux/slab.h> |
7c77509c | 32 | #include <linux/stat.h> |
6182a094 MW |
33 | #include <linux/spinlock.h> |
34 | #include <linux/string.h> | |
35 | #include <linux/types.h> | |
36 | #include <linux/wait.h> | |
1da177e4 | 37 | |
b5ee5bef AK |
38 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
39 | #define DMAPOOL_DEBUG 1 | |
40 | #endif | |
41 | ||
e87aa773 MW |
42 | struct dma_pool { /* the pool */ |
43 | struct list_head page_list; | |
44 | spinlock_t lock; | |
e87aa773 MW |
45 | size_t size; |
46 | struct device *dev; | |
47 | size_t allocation; | |
e34f44b3 | 48 | size_t boundary; |
e87aa773 | 49 | char name[32]; |
e87aa773 | 50 | struct list_head pools; |
1da177e4 LT |
51 | }; |
52 | ||
e87aa773 MW |
53 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
54 | struct list_head page_list; | |
55 | void *vaddr; | |
56 | dma_addr_t dma; | |
a35a3455 MW |
57 | unsigned int in_use; |
58 | unsigned int offset; | |
1da177e4 LT |
59 | }; |
60 | ||
e87aa773 | 61 | static DEFINE_MUTEX(pools_lock); |
01c2965f | 62 | static DEFINE_MUTEX(pools_reg_lock); |
1da177e4 LT |
63 | |
64 | static ssize_t | |
e87aa773 | 65 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
66 | { |
67 | unsigned temp; | |
68 | unsigned size; | |
69 | char *next; | |
70 | struct dma_page *page; | |
71 | struct dma_pool *pool; | |
72 | ||
73 | next = buf; | |
74 | size = PAGE_SIZE; | |
75 | ||
76 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
77 | size -= temp; | |
78 | next += temp; | |
79 | ||
b2366d68 | 80 | mutex_lock(&pools_lock); |
1da177e4 LT |
81 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
82 | unsigned pages = 0; | |
83 | unsigned blocks = 0; | |
84 | ||
c4956823 | 85 | spin_lock_irq(&pool->lock); |
1da177e4 LT |
86 | list_for_each_entry(page, &pool->page_list, page_list) { |
87 | pages++; | |
88 | blocks += page->in_use; | |
89 | } | |
c4956823 | 90 | spin_unlock_irq(&pool->lock); |
1da177e4 LT |
91 | |
92 | /* per-pool info, no real statistics yet */ | |
5b5e0928 | 93 | temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", |
a35a3455 MW |
94 | pool->name, blocks, |
95 | pages * (pool->allocation / pool->size), | |
e87aa773 | 96 | pool->size, pages); |
1da177e4 LT |
97 | size -= temp; |
98 | next += temp; | |
99 | } | |
b2366d68 | 100 | mutex_unlock(&pools_lock); |
1da177e4 LT |
101 | |
102 | return PAGE_SIZE - size; | |
103 | } | |
e87aa773 | 104 | |
0825a6f9 | 105 | static DEVICE_ATTR(pools, 0444, show_pools, NULL); |
1da177e4 LT |
106 | |
107 | /** | |
108 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
109 | * @name: name of pool, for diagnostics | |
110 | * @dev: device that will be doing the DMA | |
111 | * @size: size of the blocks in this pool. | |
112 | * @align: alignment requirement for blocks; must be a power of two | |
e34f44b3 | 113 | * @boundary: returned blocks won't cross this power of two boundary |
a862f68a | 114 | * Context: not in_interrupt() |
1da177e4 | 115 | * |
a862f68a | 116 | * Given one of these pools, dma_pool_alloc() |
1da177e4 LT |
117 | * may be used to allocate memory. Such memory will all have "consistent" |
118 | * DMA mappings, accessible by the device and its driver without using | |
119 | * cache flushing primitives. The actual size of blocks allocated may be | |
120 | * larger than requested because of alignment. | |
121 | * | |
e34f44b3 | 122 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4 LT |
123 | * cross that size boundary. This is useful for devices which have |
124 | * addressing restrictions on individual DMA transfers, such as not crossing | |
125 | * boundaries of 4KBytes. | |
a862f68a MR |
126 | * |
127 | * Return: a dma allocation pool with the requested characteristics, or | |
128 | * %NULL if one can't be created. | |
1da177e4 | 129 | */ |
e87aa773 | 130 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b3 | 131 | size_t size, size_t align, size_t boundary) |
1da177e4 | 132 | { |
e87aa773 | 133 | struct dma_pool *retval; |
e34f44b3 | 134 | size_t allocation; |
01c2965f | 135 | bool empty = false; |
1da177e4 | 136 | |
baa2ef83 | 137 | if (align == 0) |
1da177e4 | 138 | align = 1; |
baa2ef83 | 139 | else if (align & (align - 1)) |
1da177e4 | 140 | return NULL; |
1da177e4 | 141 | |
baa2ef83 | 142 | if (size == 0) |
399154be | 143 | return NULL; |
baa2ef83 | 144 | else if (size < 4) |
a35a3455 | 145 | size = 4; |
399154be MW |
146 | |
147 | if ((size % align) != 0) | |
148 | size = ALIGN(size, align); | |
149 | ||
e34f44b3 MW |
150 | allocation = max_t(size_t, size, PAGE_SIZE); |
151 | ||
baa2ef83 | 152 | if (!boundary) |
e34f44b3 | 153 | boundary = allocation; |
baa2ef83 | 154 | else if ((boundary < size) || (boundary & (boundary - 1))) |
1da177e4 LT |
155 | return NULL; |
156 | ||
e34f44b3 MW |
157 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); |
158 | if (!retval) | |
1da177e4 LT |
159 | return retval; |
160 | ||
e34f44b3 | 161 | strlcpy(retval->name, name, sizeof(retval->name)); |
1da177e4 LT |
162 | |
163 | retval->dev = dev; | |
164 | ||
e87aa773 MW |
165 | INIT_LIST_HEAD(&retval->page_list); |
166 | spin_lock_init(&retval->lock); | |
1da177e4 | 167 | retval->size = size; |
e34f44b3 | 168 | retval->boundary = boundary; |
1da177e4 | 169 | retval->allocation = allocation; |
1da177e4 | 170 | |
cc6b664a DY |
171 | INIT_LIST_HEAD(&retval->pools); |
172 | ||
01c2965f SAS |
173 | /* |
174 | * pools_lock ensures that the ->dma_pools list does not get corrupted. | |
175 | * pools_reg_lock ensures that there is not a race between | |
176 | * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() | |
177 | * when the first invocation of dma_pool_create() failed on | |
178 | * device_create_file() and the second assumes that it has been done (I | |
179 | * know it is a short window). | |
180 | */ | |
181 | mutex_lock(&pools_reg_lock); | |
cc6b664a | 182 | mutex_lock(&pools_lock); |
01c2965f SAS |
183 | if (list_empty(&dev->dma_pools)) |
184 | empty = true; | |
185 | list_add(&retval->pools, &dev->dma_pools); | |
cc6b664a | 186 | mutex_unlock(&pools_lock); |
01c2965f SAS |
187 | if (empty) { |
188 | int err; | |
189 | ||
190 | err = device_create_file(dev, &dev_attr_pools); | |
191 | if (err) { | |
192 | mutex_lock(&pools_lock); | |
193 | list_del(&retval->pools); | |
194 | mutex_unlock(&pools_lock); | |
195 | mutex_unlock(&pools_reg_lock); | |
196 | kfree(retval); | |
197 | return NULL; | |
198 | } | |
199 | } | |
200 | mutex_unlock(&pools_reg_lock); | |
1da177e4 LT |
201 | return retval; |
202 | } | |
e87aa773 | 203 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 204 | |
a35a3455 MW |
205 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
206 | { | |
207 | unsigned int offset = 0; | |
e34f44b3 | 208 | unsigned int next_boundary = pool->boundary; |
a35a3455 MW |
209 | |
210 | do { | |
211 | unsigned int next = offset + pool->size; | |
e34f44b3 MW |
212 | if (unlikely((next + pool->size) >= next_boundary)) { |
213 | next = next_boundary; | |
214 | next_boundary += pool->boundary; | |
215 | } | |
a35a3455 MW |
216 | *(int *)(page->vaddr + offset) = next; |
217 | offset = next; | |
218 | } while (offset < pool->allocation); | |
219 | } | |
220 | ||
e87aa773 | 221 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 222 | { |
e87aa773 | 223 | struct dma_page *page; |
1da177e4 | 224 | |
a35a3455 | 225 | page = kmalloc(sizeof(*page), mem_flags); |
1da177e4 LT |
226 | if (!page) |
227 | return NULL; | |
a35a3455 | 228 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa773 | 229 | &page->dma, mem_flags); |
1da177e4 | 230 | if (page->vaddr) { |
b5ee5bef | 231 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 232 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 233 | #endif |
a35a3455 | 234 | pool_initialise_page(pool, page); |
1da177e4 | 235 | page->in_use = 0; |
a35a3455 | 236 | page->offset = 0; |
1da177e4 | 237 | } else { |
e87aa773 | 238 | kfree(page); |
1da177e4 LT |
239 | page = NULL; |
240 | } | |
241 | return page; | |
242 | } | |
243 | ||
d9e7e37b | 244 | static inline bool is_page_busy(struct dma_page *page) |
1da177e4 | 245 | { |
a35a3455 | 246 | return page->in_use != 0; |
1da177e4 LT |
247 | } |
248 | ||
e87aa773 | 249 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 250 | { |
e87aa773 | 251 | dma_addr_t dma = page->dma; |
1da177e4 | 252 | |
b5ee5bef | 253 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 254 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 255 | #endif |
e87aa773 MW |
256 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
257 | list_del(&page->page_list); | |
258 | kfree(page); | |
1da177e4 LT |
259 | } |
260 | ||
1da177e4 LT |
261 | /** |
262 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
263 | * @pool: dma pool that will be destroyed | |
264 | * Context: !in_interrupt() | |
265 | * | |
266 | * Caller guarantees that no more memory from the pool is in use, | |
267 | * and that nothing will try to use the pool after this call. | |
268 | */ | |
e87aa773 | 269 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 270 | { |
01c2965f SAS |
271 | bool empty = false; |
272 | ||
44d7175d SS |
273 | if (unlikely(!pool)) |
274 | return; | |
275 | ||
01c2965f | 276 | mutex_lock(&pools_reg_lock); |
b2366d68 | 277 | mutex_lock(&pools_lock); |
e87aa773 MW |
278 | list_del(&pool->pools); |
279 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | |
01c2965f | 280 | empty = true; |
b2366d68 | 281 | mutex_unlock(&pools_lock); |
01c2965f SAS |
282 | if (empty) |
283 | device_remove_file(pool->dev, &dev_attr_pools); | |
284 | mutex_unlock(&pools_reg_lock); | |
1da177e4 | 285 | |
e87aa773 MW |
286 | while (!list_empty(&pool->page_list)) { |
287 | struct dma_page *page; | |
288 | page = list_entry(pool->page_list.next, | |
289 | struct dma_page, page_list); | |
a35a3455 | 290 | if (is_page_busy(page)) { |
1da177e4 | 291 | if (pool->dev) |
e87aa773 MW |
292 | dev_err(pool->dev, |
293 | "dma_pool_destroy %s, %p busy\n", | |
1da177e4 LT |
294 | pool->name, page->vaddr); |
295 | else | |
1170532b | 296 | pr_err("dma_pool_destroy %s, %p busy\n", |
e87aa773 | 297 | pool->name, page->vaddr); |
1da177e4 | 298 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
299 | list_del(&page->page_list); |
300 | kfree(page); | |
1da177e4 | 301 | } else |
e87aa773 | 302 | pool_free_page(pool, page); |
1da177e4 LT |
303 | } |
304 | ||
e87aa773 | 305 | kfree(pool); |
1da177e4 | 306 | } |
e87aa773 | 307 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
308 | |
309 | /** | |
310 | * dma_pool_alloc - get a block of consistent memory | |
311 | * @pool: dma pool that will produce the block | |
312 | * @mem_flags: GFP_* bitmask | |
313 | * @handle: pointer to dma address of block | |
314 | * | |
a862f68a | 315 | * Return: the kernel virtual address of a currently unused block, |
1da177e4 | 316 | * and reports its dma address through the handle. |
6182a094 | 317 | * If such a memory block can't be allocated, %NULL is returned. |
1da177e4 | 318 | */ |
e87aa773 MW |
319 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
320 | dma_addr_t *handle) | |
1da177e4 | 321 | { |
e87aa773 MW |
322 | unsigned long flags; |
323 | struct dma_page *page; | |
e87aa773 MW |
324 | size_t offset; |
325 | void *retval; | |
326 | ||
d0164adc | 327 | might_sleep_if(gfpflags_allow_blocking(mem_flags)); |
ea05c844 | 328 | |
e87aa773 | 329 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 330 | list_for_each_entry(page, &pool->page_list, page_list) { |
a35a3455 MW |
331 | if (page->offset < pool->allocation) |
332 | goto ready; | |
1da177e4 | 333 | } |
1da177e4 | 334 | |
387870f2 MS |
335 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
336 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 | 337 | |
fa23f56d | 338 | page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
387870f2 MS |
339 | if (!page) |
340 | return NULL; | |
1da177e4 | 341 | |
387870f2 | 342 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 343 | |
387870f2 | 344 | list_add(&page->page_list, &pool->page_list); |
e87aa773 | 345 | ready: |
1da177e4 | 346 | page->in_use++; |
a35a3455 MW |
347 | offset = page->offset; |
348 | page->offset = *(int *)(page->vaddr + offset); | |
1da177e4 LT |
349 | retval = offset + page->vaddr; |
350 | *handle = offset + page->dma; | |
b5ee5bef | 351 | #ifdef DMAPOOL_DEBUG |
5de55b26 MC |
352 | { |
353 | int i; | |
354 | u8 *data = retval; | |
355 | /* page->offset is stored in first 4 bytes */ | |
356 | for (i = sizeof(page->offset); i < pool->size; i++) { | |
357 | if (data[i] == POOL_POISON_FREED) | |
358 | continue; | |
359 | if (pool->dev) | |
360 | dev_err(pool->dev, | |
5835f251 | 361 | "dma_pool_alloc %s, %p (corrupted)\n", |
5de55b26 MC |
362 | pool->name, retval); |
363 | else | |
5835f251 | 364 | pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
5de55b26 MC |
365 | pool->name, retval); |
366 | ||
367 | /* | |
368 | * Dump the first 4 bytes even if they are not | |
369 | * POOL_POISON_FREED | |
370 | */ | |
371 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, | |
372 | data, pool->size, 1); | |
373 | break; | |
374 | } | |
375 | } | |
fa23f56d SS |
376 | if (!(mem_flags & __GFP_ZERO)) |
377 | memset(retval, POOL_POISON_ALLOCATED, pool->size); | |
1da177e4 | 378 | #endif |
e87aa773 | 379 | spin_unlock_irqrestore(&pool->lock, flags); |
fa23f56d SS |
380 | |
381 | if (mem_flags & __GFP_ZERO) | |
382 | memset(retval, 0, pool->size); | |
383 | ||
1da177e4 LT |
384 | return retval; |
385 | } | |
e87aa773 | 386 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 387 | |
e87aa773 | 388 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 389 | { |
e87aa773 | 390 | struct dma_page *page; |
1da177e4 | 391 | |
1da177e4 LT |
392 | list_for_each_entry(page, &pool->page_list, page_list) { |
393 | if (dma < page->dma) | |
394 | continue; | |
676bd991 | 395 | if ((dma - page->dma) < pool->allocation) |
84bc227d | 396 | return page; |
1da177e4 | 397 | } |
84bc227d | 398 | return NULL; |
1da177e4 LT |
399 | } |
400 | ||
1da177e4 LT |
401 | /** |
402 | * dma_pool_free - put block back into dma pool | |
403 | * @pool: the dma pool holding the block | |
404 | * @vaddr: virtual address of block | |
405 | * @dma: dma address of block | |
406 | * | |
407 | * Caller promises neither device nor driver will again touch this block | |
408 | * unless it is first re-allocated. | |
409 | */ | |
e87aa773 | 410 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 411 | { |
e87aa773 MW |
412 | struct dma_page *page; |
413 | unsigned long flags; | |
a35a3455 | 414 | unsigned int offset; |
1da177e4 | 415 | |
84bc227d | 416 | spin_lock_irqsave(&pool->lock, flags); |
e87aa773 MW |
417 | page = pool_find_page(pool, dma); |
418 | if (!page) { | |
84bc227d | 419 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 420 | if (pool->dev) |
e87aa773 MW |
421 | dev_err(pool->dev, |
422 | "dma_pool_free %s, %p/%lx (bad dma)\n", | |
423 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 | 424 | else |
1170532b | 425 | pr_err("dma_pool_free %s, %p/%lx (bad dma)\n", |
e87aa773 | 426 | pool->name, vaddr, (unsigned long)dma); |
1da177e4 LT |
427 | return; |
428 | } | |
429 | ||
a35a3455 | 430 | offset = vaddr - page->vaddr; |
b5ee5bef | 431 | #ifdef DMAPOOL_DEBUG |
a35a3455 | 432 | if ((dma - page->dma) != offset) { |
84bc227d | 433 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 434 | if (pool->dev) |
e87aa773 | 435 | dev_err(pool->dev, |
199eaa05 MC |
436 | "dma_pool_free %s, %p (bad vaddr)/%pad\n", |
437 | pool->name, vaddr, &dma); | |
1da177e4 | 438 | else |
199eaa05 MC |
439 | pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n", |
440 | pool->name, vaddr, &dma); | |
1da177e4 LT |
441 | return; |
442 | } | |
a35a3455 MW |
443 | { |
444 | unsigned int chain = page->offset; | |
445 | while (chain < pool->allocation) { | |
446 | if (chain != offset) { | |
447 | chain = *(int *)(page->vaddr + chain); | |
448 | continue; | |
449 | } | |
84bc227d | 450 | spin_unlock_irqrestore(&pool->lock, flags); |
a35a3455 | 451 | if (pool->dev) |
199eaa05 MC |
452 | dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", |
453 | pool->name, &dma); | |
a35a3455 | 454 | else |
199eaa05 MC |
455 | pr_err("dma_pool_free %s, dma %pad already free\n", |
456 | pool->name, &dma); | |
a35a3455 MW |
457 | return; |
458 | } | |
1da177e4 | 459 | } |
e87aa773 | 460 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
461 | #endif |
462 | ||
1da177e4 | 463 | page->in_use--; |
a35a3455 MW |
464 | *(int *)vaddr = page->offset; |
465 | page->offset = offset; | |
1da177e4 LT |
466 | /* |
467 | * Resist a temptation to do | |
a35a3455 | 468 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4 LT |
469 | * Better have a few empty pages hang around. |
470 | */ | |
e87aa773 | 471 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 472 | } |
e87aa773 | 473 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 474 | |
9ac7849e TH |
475 | /* |
476 | * Managed DMA pool | |
477 | */ | |
478 | static void dmam_pool_release(struct device *dev, void *res) | |
479 | { | |
480 | struct dma_pool *pool = *(struct dma_pool **)res; | |
481 | ||
482 | dma_pool_destroy(pool); | |
483 | } | |
484 | ||
485 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
486 | { | |
487 | return *(struct dma_pool **)res == match_data; | |
488 | } | |
489 | ||
490 | /** | |
491 | * dmam_pool_create - Managed dma_pool_create() | |
492 | * @name: name of pool, for diagnostics | |
493 | * @dev: device that will be doing the DMA | |
494 | * @size: size of the blocks in this pool. | |
495 | * @align: alignment requirement for blocks; must be a power of two | |
496 | * @allocation: returned blocks won't cross this boundary (or zero) | |
497 | * | |
498 | * Managed dma_pool_create(). DMA pool created with this function is | |
499 | * automatically destroyed on driver detach. | |
a862f68a MR |
500 | * |
501 | * Return: a managed dma allocation pool with the requested | |
502 | * characteristics, or %NULL if one can't be created. | |
9ac7849e TH |
503 | */ |
504 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
505 | size_t size, size_t align, size_t allocation) | |
506 | { | |
507 | struct dma_pool **ptr, *pool; | |
508 | ||
509 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
510 | if (!ptr) | |
511 | return NULL; | |
512 | ||
513 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
514 | if (pool) | |
515 | devres_add(dev, ptr); | |
516 | else | |
517 | devres_free(ptr); | |
518 | ||
519 | return pool; | |
520 | } | |
e87aa773 | 521 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
522 | |
523 | /** | |
524 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
525 | * @pool: dma pool that will be destroyed | |
526 | * | |
527 | * Managed dma_pool_destroy(). | |
528 | */ | |
529 | void dmam_pool_destroy(struct dma_pool *pool) | |
530 | { | |
531 | struct device *dev = pool->dev; | |
532 | ||
172cb4b3 | 533 | WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
9ac7849e | 534 | } |
e87aa773 | 535 | EXPORT_SYMBOL(dmam_pool_destroy); |