]>
Commit | Line | Data |
---|---|---|
6182a094 MW |
1 | /* |
2 | * DMA Pool allocator | |
3 | * | |
4 | * Copyright 2001 David Brownell | |
5 | * Copyright 2007 Intel Corporation | |
6 | * Author: Matthew Wilcox <willy@linux.intel.com> | |
7 | * | |
8 | * This software may be redistributed and/or modified under the terms of | |
9 | * the GNU General Public License ("GPL") version 2 as published by the | |
10 | * Free Software Foundation. | |
11 | * | |
12 | * This allocator returns small blocks of a given size which are DMA-able by | |
13 | * the given device. It uses the dma_alloc_coherent page allocator to get | |
14 | * new pages, then splits them up into blocks of the required size. | |
15 | * Many older drivers still have their own code to do this. | |
16 | * | |
17 | * The current design of this allocator is fairly simple. The pool is | |
18 | * represented by the 'struct dma_pool' which keeps a doubly-linked list of | |
19 | * allocated pages. Each page in the page_list is split into blocks of at | |
a35a3455 MW |
20 | * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked |
21 | * list of free blocks within the page. Used blocks aren't tracked, but we | |
22 | * keep a count of how many are currently allocated from each page. | |
6182a094 | 23 | */ |
1da177e4 LT |
24 | |
25 | #include <linux/device.h> | |
1da177e4 LT |
26 | #include <linux/dma-mapping.h> |
27 | #include <linux/dmapool.h> | |
6182a094 MW |
28 | #include <linux/kernel.h> |
29 | #include <linux/list.h> | |
b95f1b31 | 30 | #include <linux/export.h> |
6182a094 | 31 | #include <linux/mutex.h> |
c9cf5528 | 32 | #include <linux/poison.h> |
e8edc6e0 | 33 | #include <linux/sched.h> |
6182a094 | 34 | #include <linux/slab.h> |
7c77509c | 35 | #include <linux/stat.h> |
6182a094 MW |
36 | #include <linux/spinlock.h> |
37 | #include <linux/string.h> | |
38 | #include <linux/types.h> | |
39 | #include <linux/wait.h> | |
1da177e4 | 40 | |
b5ee5bef AK |
41 | #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON) |
42 | #define DMAPOOL_DEBUG 1 | |
43 | #endif | |
44 | ||
e87aa773 MW |
45 | struct dma_pool { /* the pool */ |
46 | struct list_head page_list; | |
47 | spinlock_t lock; | |
e87aa773 MW |
48 | size_t size; |
49 | struct device *dev; | |
50 | size_t allocation; | |
e34f44b3 | 51 | size_t boundary; |
e87aa773 | 52 | char name[32]; |
e87aa773 | 53 | struct list_head pools; |
1da177e4 LT |
54 | }; |
55 | ||
e87aa773 MW |
56 | struct dma_page { /* cacheable header for 'allocation' bytes */ |
57 | struct list_head page_list; | |
58 | void *vaddr; | |
59 | dma_addr_t dma; | |
a35a3455 MW |
60 | unsigned int in_use; |
61 | unsigned int offset; | |
1da177e4 LT |
62 | }; |
63 | ||
e87aa773 | 64 | static DEFINE_MUTEX(pools_lock); |
01c2965f | 65 | static DEFINE_MUTEX(pools_reg_lock); |
1da177e4 LT |
66 | |
67 | static ssize_t | |
e87aa773 | 68 | show_pools(struct device *dev, struct device_attribute *attr, char *buf) |
1da177e4 LT |
69 | { |
70 | unsigned temp; | |
71 | unsigned size; | |
72 | char *next; | |
73 | struct dma_page *page; | |
74 | struct dma_pool *pool; | |
75 | ||
76 | next = buf; | |
77 | size = PAGE_SIZE; | |
78 | ||
79 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
80 | size -= temp; | |
81 | next += temp; | |
82 | ||
b2366d68 | 83 | mutex_lock(&pools_lock); |
1da177e4 LT |
84 | list_for_each_entry(pool, &dev->dma_pools, pools) { |
85 | unsigned pages = 0; | |
86 | unsigned blocks = 0; | |
87 | ||
c4956823 | 88 | spin_lock_irq(&pool->lock); |
1da177e4 LT |
89 | list_for_each_entry(page, &pool->page_list, page_list) { |
90 | pages++; | |
91 | blocks += page->in_use; | |
92 | } | |
c4956823 | 93 | spin_unlock_irq(&pool->lock); |
1da177e4 LT |
94 | |
95 | /* per-pool info, no real statistics yet */ | |
5b5e0928 | 96 | temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", |
a35a3455 MW |
97 | pool->name, blocks, |
98 | pages * (pool->allocation / pool->size), | |
e87aa773 | 99 | pool->size, pages); |
1da177e4 LT |
100 | size -= temp; |
101 | next += temp; | |
102 | } | |
b2366d68 | 103 | mutex_unlock(&pools_lock); |
1da177e4 LT |
104 | |
105 | return PAGE_SIZE - size; | |
106 | } | |
e87aa773 | 107 | |
0825a6f9 | 108 | static DEVICE_ATTR(pools, 0444, show_pools, NULL); |
1da177e4 LT |
109 | |
110 | /** | |
111 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
112 | * @name: name of pool, for diagnostics | |
113 | * @dev: device that will be doing the DMA | |
114 | * @size: size of the blocks in this pool. | |
115 | * @align: alignment requirement for blocks; must be a power of two | |
e34f44b3 | 116 | * @boundary: returned blocks won't cross this power of two boundary |
a862f68a | 117 | * Context: not in_interrupt() |
1da177e4 | 118 | * |
a862f68a | 119 | * Given one of these pools, dma_pool_alloc() |
1da177e4 LT |
120 | * may be used to allocate memory. Such memory will all have "consistent" |
121 | * DMA mappings, accessible by the device and its driver without using | |
122 | * cache flushing primitives. The actual size of blocks allocated may be | |
123 | * larger than requested because of alignment. | |
124 | * | |
e34f44b3 | 125 | * If @boundary is nonzero, objects returned from dma_pool_alloc() won't |
1da177e4 LT |
126 | * cross that size boundary. This is useful for devices which have |
127 | * addressing restrictions on individual DMA transfers, such as not crossing | |
128 | * boundaries of 4KBytes. | |
a862f68a MR |
129 | * |
130 | * Return: a dma allocation pool with the requested characteristics, or | |
131 | * %NULL if one can't be created. | |
1da177e4 | 132 | */ |
e87aa773 | 133 | struct dma_pool *dma_pool_create(const char *name, struct device *dev, |
e34f44b3 | 134 | size_t size, size_t align, size_t boundary) |
1da177e4 | 135 | { |
e87aa773 | 136 | struct dma_pool *retval; |
e34f44b3 | 137 | size_t allocation; |
01c2965f | 138 | bool empty = false; |
1da177e4 | 139 | |
baa2ef83 | 140 | if (align == 0) |
1da177e4 | 141 | align = 1; |
baa2ef83 | 142 | else if (align & (align - 1)) |
1da177e4 | 143 | return NULL; |
1da177e4 | 144 | |
baa2ef83 | 145 | if (size == 0) |
399154be | 146 | return NULL; |
baa2ef83 | 147 | else if (size < 4) |
a35a3455 | 148 | size = 4; |
399154be MW |
149 | |
150 | if ((size % align) != 0) | |
151 | size = ALIGN(size, align); | |
152 | ||
e34f44b3 MW |
153 | allocation = max_t(size_t, size, PAGE_SIZE); |
154 | ||
baa2ef83 | 155 | if (!boundary) |
e34f44b3 | 156 | boundary = allocation; |
baa2ef83 | 157 | else if ((boundary < size) || (boundary & (boundary - 1))) |
1da177e4 LT |
158 | return NULL; |
159 | ||
e34f44b3 MW |
160 | retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev)); |
161 | if (!retval) | |
1da177e4 LT |
162 | return retval; |
163 | ||
e34f44b3 | 164 | strlcpy(retval->name, name, sizeof(retval->name)); |
1da177e4 LT |
165 | |
166 | retval->dev = dev; | |
167 | ||
e87aa773 MW |
168 | INIT_LIST_HEAD(&retval->page_list); |
169 | spin_lock_init(&retval->lock); | |
1da177e4 | 170 | retval->size = size; |
e34f44b3 | 171 | retval->boundary = boundary; |
1da177e4 | 172 | retval->allocation = allocation; |
1da177e4 | 173 | |
cc6b664a DY |
174 | INIT_LIST_HEAD(&retval->pools); |
175 | ||
01c2965f SAS |
176 | /* |
177 | * pools_lock ensures that the ->dma_pools list does not get corrupted. | |
178 | * pools_reg_lock ensures that there is not a race between | |
179 | * dma_pool_create() and dma_pool_destroy() or within dma_pool_create() | |
180 | * when the first invocation of dma_pool_create() failed on | |
181 | * device_create_file() and the second assumes that it has been done (I | |
182 | * know it is a short window). | |
183 | */ | |
184 | mutex_lock(&pools_reg_lock); | |
cc6b664a | 185 | mutex_lock(&pools_lock); |
01c2965f SAS |
186 | if (list_empty(&dev->dma_pools)) |
187 | empty = true; | |
188 | list_add(&retval->pools, &dev->dma_pools); | |
cc6b664a | 189 | mutex_unlock(&pools_lock); |
01c2965f SAS |
190 | if (empty) { |
191 | int err; | |
192 | ||
193 | err = device_create_file(dev, &dev_attr_pools); | |
194 | if (err) { | |
195 | mutex_lock(&pools_lock); | |
196 | list_del(&retval->pools); | |
197 | mutex_unlock(&pools_lock); | |
198 | mutex_unlock(&pools_reg_lock); | |
199 | kfree(retval); | |
200 | return NULL; | |
201 | } | |
202 | } | |
203 | mutex_unlock(&pools_reg_lock); | |
1da177e4 LT |
204 | return retval; |
205 | } | |
e87aa773 | 206 | EXPORT_SYMBOL(dma_pool_create); |
1da177e4 | 207 | |
a35a3455 MW |
208 | static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) |
209 | { | |
210 | unsigned int offset = 0; | |
e34f44b3 | 211 | unsigned int next_boundary = pool->boundary; |
a35a3455 MW |
212 | |
213 | do { | |
214 | unsigned int next = offset + pool->size; | |
e34f44b3 MW |
215 | if (unlikely((next + pool->size) >= next_boundary)) { |
216 | next = next_boundary; | |
217 | next_boundary += pool->boundary; | |
218 | } | |
a35a3455 MW |
219 | *(int *)(page->vaddr + offset) = next; |
220 | offset = next; | |
221 | } while (offset < pool->allocation); | |
222 | } | |
223 | ||
e87aa773 | 224 | static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) |
1da177e4 | 225 | { |
e87aa773 | 226 | struct dma_page *page; |
1da177e4 | 227 | |
a35a3455 | 228 | page = kmalloc(sizeof(*page), mem_flags); |
1da177e4 LT |
229 | if (!page) |
230 | return NULL; | |
a35a3455 | 231 | page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, |
e87aa773 | 232 | &page->dma, mem_flags); |
1da177e4 | 233 | if (page->vaddr) { |
b5ee5bef | 234 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 235 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 236 | #endif |
a35a3455 | 237 | pool_initialise_page(pool, page); |
1da177e4 | 238 | page->in_use = 0; |
a35a3455 | 239 | page->offset = 0; |
1da177e4 | 240 | } else { |
e87aa773 | 241 | kfree(page); |
1da177e4 LT |
242 | page = NULL; |
243 | } | |
244 | return page; | |
245 | } | |
246 | ||
d9e7e37b | 247 | static inline bool is_page_busy(struct dma_page *page) |
1da177e4 | 248 | { |
a35a3455 | 249 | return page->in_use != 0; |
1da177e4 LT |
250 | } |
251 | ||
e87aa773 | 252 | static void pool_free_page(struct dma_pool *pool, struct dma_page *page) |
1da177e4 | 253 | { |
e87aa773 | 254 | dma_addr_t dma = page->dma; |
1da177e4 | 255 | |
b5ee5bef | 256 | #ifdef DMAPOOL_DEBUG |
e87aa773 | 257 | memset(page->vaddr, POOL_POISON_FREED, pool->allocation); |
1da177e4 | 258 | #endif |
e87aa773 MW |
259 | dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); |
260 | list_del(&page->page_list); | |
261 | kfree(page); | |
1da177e4 LT |
262 | } |
263 | ||
1da177e4 LT |
264 | /** |
265 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
266 | * @pool: dma pool that will be destroyed | |
267 | * Context: !in_interrupt() | |
268 | * | |
269 | * Caller guarantees that no more memory from the pool is in use, | |
270 | * and that nothing will try to use the pool after this call. | |
271 | */ | |
e87aa773 | 272 | void dma_pool_destroy(struct dma_pool *pool) |
1da177e4 | 273 | { |
01c2965f SAS |
274 | bool empty = false; |
275 | ||
44d7175d SS |
276 | if (unlikely(!pool)) |
277 | return; | |
278 | ||
01c2965f | 279 | mutex_lock(&pools_reg_lock); |
b2366d68 | 280 | mutex_lock(&pools_lock); |
e87aa773 MW |
281 | list_del(&pool->pools); |
282 | if (pool->dev && list_empty(&pool->dev->dma_pools)) | |
01c2965f | 283 | empty = true; |
b2366d68 | 284 | mutex_unlock(&pools_lock); |
01c2965f SAS |
285 | if (empty) |
286 | device_remove_file(pool->dev, &dev_attr_pools); | |
287 | mutex_unlock(&pools_reg_lock); | |
1da177e4 | 288 | |
e87aa773 MW |
289 | while (!list_empty(&pool->page_list)) { |
290 | struct dma_page *page; | |
291 | page = list_entry(pool->page_list.next, | |
292 | struct dma_page, page_list); | |
a35a3455 | 293 | if (is_page_busy(page)) { |
1da177e4 | 294 | if (pool->dev) |
e87aa773 MW |
295 | dev_err(pool->dev, |
296 | "dma_pool_destroy %s, %p busy\n", | |
1da177e4 LT |
297 | pool->name, page->vaddr); |
298 | else | |
1170532b | 299 | pr_err("dma_pool_destroy %s, %p busy\n", |
e87aa773 | 300 | pool->name, page->vaddr); |
1da177e4 | 301 | /* leak the still-in-use consistent memory */ |
e87aa773 MW |
302 | list_del(&page->page_list); |
303 | kfree(page); | |
1da177e4 | 304 | } else |
e87aa773 | 305 | pool_free_page(pool, page); |
1da177e4 LT |
306 | } |
307 | ||
e87aa773 | 308 | kfree(pool); |
1da177e4 | 309 | } |
e87aa773 | 310 | EXPORT_SYMBOL(dma_pool_destroy); |
1da177e4 LT |
311 | |
312 | /** | |
313 | * dma_pool_alloc - get a block of consistent memory | |
314 | * @pool: dma pool that will produce the block | |
315 | * @mem_flags: GFP_* bitmask | |
316 | * @handle: pointer to dma address of block | |
317 | * | |
a862f68a | 318 | * Return: the kernel virtual address of a currently unused block, |
1da177e4 | 319 | * and reports its dma address through the handle. |
6182a094 | 320 | * If such a memory block can't be allocated, %NULL is returned. |
1da177e4 | 321 | */ |
e87aa773 MW |
322 | void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, |
323 | dma_addr_t *handle) | |
1da177e4 | 324 | { |
e87aa773 MW |
325 | unsigned long flags; |
326 | struct dma_page *page; | |
e87aa773 MW |
327 | size_t offset; |
328 | void *retval; | |
329 | ||
d0164adc | 330 | might_sleep_if(gfpflags_allow_blocking(mem_flags)); |
ea05c844 | 331 | |
e87aa773 | 332 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 333 | list_for_each_entry(page, &pool->page_list, page_list) { |
a35a3455 MW |
334 | if (page->offset < pool->allocation) |
335 | goto ready; | |
1da177e4 | 336 | } |
1da177e4 | 337 | |
387870f2 MS |
338 | /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ |
339 | spin_unlock_irqrestore(&pool->lock, flags); | |
1da177e4 | 340 | |
fa23f56d | 341 | page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); |
387870f2 MS |
342 | if (!page) |
343 | return NULL; | |
1da177e4 | 344 | |
387870f2 | 345 | spin_lock_irqsave(&pool->lock, flags); |
1da177e4 | 346 | |
387870f2 | 347 | list_add(&page->page_list, &pool->page_list); |
e87aa773 | 348 | ready: |
1da177e4 | 349 | page->in_use++; |
a35a3455 MW |
350 | offset = page->offset; |
351 | page->offset = *(int *)(page->vaddr + offset); | |
1da177e4 LT |
352 | retval = offset + page->vaddr; |
353 | *handle = offset + page->dma; | |
b5ee5bef | 354 | #ifdef DMAPOOL_DEBUG |
5de55b26 MC |
355 | { |
356 | int i; | |
357 | u8 *data = retval; | |
358 | /* page->offset is stored in first 4 bytes */ | |
359 | for (i = sizeof(page->offset); i < pool->size; i++) { | |
360 | if (data[i] == POOL_POISON_FREED) | |
361 | continue; | |
362 | if (pool->dev) | |
363 | dev_err(pool->dev, | |
5835f251 | 364 | "dma_pool_alloc %s, %p (corrupted)\n", |
5de55b26 MC |
365 | pool->name, retval); |
366 | else | |
5835f251 | 367 | pr_err("dma_pool_alloc %s, %p (corrupted)\n", |
5de55b26 MC |
368 | pool->name, retval); |
369 | ||
370 | /* | |
371 | * Dump the first 4 bytes even if they are not | |
372 | * POOL_POISON_FREED | |
373 | */ | |
374 | print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, | |
375 | data, pool->size, 1); | |
376 | break; | |
377 | } | |
378 | } | |
fa23f56d SS |
379 | if (!(mem_flags & __GFP_ZERO)) |
380 | memset(retval, POOL_POISON_ALLOCATED, pool->size); | |
1da177e4 | 381 | #endif |
e87aa773 | 382 | spin_unlock_irqrestore(&pool->lock, flags); |
fa23f56d SS |
383 | |
384 | if (mem_flags & __GFP_ZERO) | |
385 | memset(retval, 0, pool->size); | |
386 | ||
1da177e4 LT |
387 | return retval; |
388 | } | |
e87aa773 | 389 | EXPORT_SYMBOL(dma_pool_alloc); |
1da177e4 | 390 | |
e87aa773 | 391 | static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) |
1da177e4 | 392 | { |
e87aa773 | 393 | struct dma_page *page; |
1da177e4 | 394 | |
1da177e4 LT |
395 | list_for_each_entry(page, &pool->page_list, page_list) { |
396 | if (dma < page->dma) | |
397 | continue; | |
676bd991 | 398 | if ((dma - page->dma) < pool->allocation) |
84bc227d | 399 | return page; |
1da177e4 | 400 | } |
84bc227d | 401 | return NULL; |
1da177e4 LT |
402 | } |
403 | ||
1da177e4 LT |
404 | /** |
405 | * dma_pool_free - put block back into dma pool | |
406 | * @pool: the dma pool holding the block | |
407 | * @vaddr: virtual address of block | |
408 | * @dma: dma address of block | |
409 | * | |
410 | * Caller promises neither device nor driver will again touch this block | |
411 | * unless it is first re-allocated. | |
412 | */ | |
e87aa773 | 413 | void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) |
1da177e4 | 414 | { |
e87aa773 MW |
415 | struct dma_page *page; |
416 | unsigned long flags; | |
a35a3455 | 417 | unsigned int offset; |
1da177e4 | 418 | |
84bc227d | 419 | spin_lock_irqsave(&pool->lock, flags); |
e87aa773 MW |
420 | page = pool_find_page(pool, dma); |
421 | if (!page) { | |
84bc227d | 422 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 423 | if (pool->dev) |
e87aa773 MW |
424 | dev_err(pool->dev, |
425 | "dma_pool_free %s, %p/%lx (bad dma)\n", | |
426 | pool->name, vaddr, (unsigned long)dma); | |
1da177e4 | 427 | else |
1170532b | 428 | pr_err("dma_pool_free %s, %p/%lx (bad dma)\n", |
e87aa773 | 429 | pool->name, vaddr, (unsigned long)dma); |
1da177e4 LT |
430 | return; |
431 | } | |
432 | ||
a35a3455 | 433 | offset = vaddr - page->vaddr; |
b5ee5bef | 434 | #ifdef DMAPOOL_DEBUG |
a35a3455 | 435 | if ((dma - page->dma) != offset) { |
84bc227d | 436 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 437 | if (pool->dev) |
e87aa773 | 438 | dev_err(pool->dev, |
199eaa05 MC |
439 | "dma_pool_free %s, %p (bad vaddr)/%pad\n", |
440 | pool->name, vaddr, &dma); | |
1da177e4 | 441 | else |
199eaa05 MC |
442 | pr_err("dma_pool_free %s, %p (bad vaddr)/%pad\n", |
443 | pool->name, vaddr, &dma); | |
1da177e4 LT |
444 | return; |
445 | } | |
a35a3455 MW |
446 | { |
447 | unsigned int chain = page->offset; | |
448 | while (chain < pool->allocation) { | |
449 | if (chain != offset) { | |
450 | chain = *(int *)(page->vaddr + chain); | |
451 | continue; | |
452 | } | |
84bc227d | 453 | spin_unlock_irqrestore(&pool->lock, flags); |
a35a3455 | 454 | if (pool->dev) |
199eaa05 MC |
455 | dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", |
456 | pool->name, &dma); | |
a35a3455 | 457 | else |
199eaa05 MC |
458 | pr_err("dma_pool_free %s, dma %pad already free\n", |
459 | pool->name, &dma); | |
a35a3455 MW |
460 | return; |
461 | } | |
1da177e4 | 462 | } |
e87aa773 | 463 | memset(vaddr, POOL_POISON_FREED, pool->size); |
1da177e4 LT |
464 | #endif |
465 | ||
1da177e4 | 466 | page->in_use--; |
a35a3455 MW |
467 | *(int *)vaddr = page->offset; |
468 | page->offset = offset; | |
1da177e4 LT |
469 | /* |
470 | * Resist a temptation to do | |
a35a3455 | 471 | * if (!is_page_busy(page)) pool_free_page(pool, page); |
1da177e4 LT |
472 | * Better have a few empty pages hang around. |
473 | */ | |
e87aa773 | 474 | spin_unlock_irqrestore(&pool->lock, flags); |
1da177e4 | 475 | } |
e87aa773 | 476 | EXPORT_SYMBOL(dma_pool_free); |
1da177e4 | 477 | |
9ac7849e TH |
478 | /* |
479 | * Managed DMA pool | |
480 | */ | |
481 | static void dmam_pool_release(struct device *dev, void *res) | |
482 | { | |
483 | struct dma_pool *pool = *(struct dma_pool **)res; | |
484 | ||
485 | dma_pool_destroy(pool); | |
486 | } | |
487 | ||
488 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
489 | { | |
490 | return *(struct dma_pool **)res == match_data; | |
491 | } | |
492 | ||
493 | /** | |
494 | * dmam_pool_create - Managed dma_pool_create() | |
495 | * @name: name of pool, for diagnostics | |
496 | * @dev: device that will be doing the DMA | |
497 | * @size: size of the blocks in this pool. | |
498 | * @align: alignment requirement for blocks; must be a power of two | |
499 | * @allocation: returned blocks won't cross this boundary (or zero) | |
500 | * | |
501 | * Managed dma_pool_create(). DMA pool created with this function is | |
502 | * automatically destroyed on driver detach. | |
a862f68a MR |
503 | * |
504 | * Return: a managed dma allocation pool with the requested | |
505 | * characteristics, or %NULL if one can't be created. | |
9ac7849e TH |
506 | */ |
507 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
508 | size_t size, size_t align, size_t allocation) | |
509 | { | |
510 | struct dma_pool **ptr, *pool; | |
511 | ||
512 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
513 | if (!ptr) | |
514 | return NULL; | |
515 | ||
516 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
517 | if (pool) | |
518 | devres_add(dev, ptr); | |
519 | else | |
520 | devres_free(ptr); | |
521 | ||
522 | return pool; | |
523 | } | |
e87aa773 | 524 | EXPORT_SYMBOL(dmam_pool_create); |
9ac7849e TH |
525 | |
526 | /** | |
527 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
528 | * @pool: dma pool that will be destroyed | |
529 | * | |
530 | * Managed dma_pool_destroy(). | |
531 | */ | |
532 | void dmam_pool_destroy(struct dma_pool *pool) | |
533 | { | |
534 | struct device *dev = pool->dev; | |
535 | ||
172cb4b3 | 536 | WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); |
9ac7849e | 537 | } |
e87aa773 | 538 | EXPORT_SYMBOL(dmam_pool_destroy); |