]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/dmapool.c
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmapool.h>
25 #include <linux/kernel.h>
26 #include <linux/list.h>
27 #include <linux/export.h>
28 #include <linux/mutex.h>
29 #include <linux/poison.h>
30 #include <linux/sched.h>
31 #include <linux/sched/mm.h>
32 #include <linux/slab.h>
33 #include <linux/stat.h>
34 #include <linux/spinlock.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/wait.h>
39 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 #define DMAPOOL_DEBUG 1
43 struct dma_pool
{ /* the pool */
44 struct list_head page_list
;
51 struct list_head pools
;
54 struct dma_page
{ /* cacheable header for 'allocation' bytes */
55 struct list_head page_list
;
62 static DEFINE_MUTEX(pools_lock
);
63 static DEFINE_MUTEX(pools_reg_lock
);
66 show_pools(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
71 struct dma_page
*page
;
72 struct dma_pool
*pool
;
77 temp
= scnprintf(next
, size
, "poolinfo - 0.1\n");
81 mutex_lock(&pools_lock
);
82 list_for_each_entry(pool
, &dev
->dma_pools
, pools
) {
86 spin_lock_irq(&pool
->lock
);
87 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
89 blocks
+= page
->in_use
;
91 spin_unlock_irq(&pool
->lock
);
93 /* per-pool info, no real statistics yet */
94 temp
= scnprintf(next
, size
, "%-16s %4u %4zu %4zu %2u\n",
96 pages
* (pool
->allocation
/ pool
->size
),
101 mutex_unlock(&pools_lock
);
103 return PAGE_SIZE
- size
;
106 static DEVICE_ATTR(pools
, 0444, show_pools
, NULL
);
109 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
110 * @name: name of pool, for diagnostics
111 * @dev: device that will be doing the DMA
112 * @size: size of the blocks in this pool.
113 * @align: alignment requirement for blocks; must be a power of two
114 * @boundary: returned blocks won't cross this power of two boundary
115 * Context: not in_interrupt()
117 * Given one of these pools, dma_pool_alloc()
118 * may be used to allocate memory. Such memory will all have "consistent"
119 * DMA mappings, accessible by the device and its driver without using
120 * cache flushing primitives. The actual size of blocks allocated may be
121 * larger than requested because of alignment.
123 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
124 * cross that size boundary. This is useful for devices which have
125 * addressing restrictions on individual DMA transfers, such as not crossing
126 * boundaries of 4KBytes.
128 * Return: a dma allocation pool with the requested characteristics, or
129 * %NULL if one can't be created.
131 struct dma_pool
*dma_pool_create(const char *name
, struct device
*dev
,
132 size_t size
, size_t align
, size_t boundary
)
134 struct dma_pool
*retval
;
140 else if (align
& (align
- 1))
148 size
= ALIGN(size
, align
);
149 allocation
= max_t(size_t, size
, PAGE_SIZE
);
152 boundary
= allocation
;
153 else if ((boundary
< size
) || (boundary
& (boundary
- 1)))
156 retval
= kmalloc_node(sizeof(*retval
), GFP_KERNEL
, dev_to_node(dev
));
160 strlcpy(retval
->name
, name
, sizeof(retval
->name
));
164 INIT_LIST_HEAD(&retval
->page_list
);
165 spin_lock_init(&retval
->lock
);
167 retval
->boundary
= boundary
;
168 retval
->allocation
= allocation
;
170 INIT_LIST_HEAD(&retval
->pools
);
173 * pools_lock ensures that the ->dma_pools list does not get corrupted.
174 * pools_reg_lock ensures that there is not a race between
175 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
176 * when the first invocation of dma_pool_create() failed on
177 * device_create_file() and the second assumes that it has been done (I
178 * know it is a short window).
180 mutex_lock(&pools_reg_lock
);
181 mutex_lock(&pools_lock
);
182 if (list_empty(&dev
->dma_pools
))
184 list_add(&retval
->pools
, &dev
->dma_pools
);
185 mutex_unlock(&pools_lock
);
189 err
= device_create_file(dev
, &dev_attr_pools
);
191 mutex_lock(&pools_lock
);
192 list_del(&retval
->pools
);
193 mutex_unlock(&pools_lock
);
194 mutex_unlock(&pools_reg_lock
);
199 mutex_unlock(&pools_reg_lock
);
202 EXPORT_SYMBOL(dma_pool_create
);
204 static void pool_initialise_page(struct dma_pool
*pool
, struct dma_page
*page
)
206 unsigned int offset
= 0;
207 unsigned int next_boundary
= pool
->boundary
;
210 unsigned int next
= offset
+ pool
->size
;
211 if (unlikely((next
+ pool
->size
) >= next_boundary
)) {
212 next
= next_boundary
;
213 next_boundary
+= pool
->boundary
;
215 *(int *)(page
->vaddr
+ offset
) = next
;
217 } while (offset
< pool
->allocation
);
220 static struct dma_page
*pool_alloc_page(struct dma_pool
*pool
, gfp_t mem_flags
)
222 struct dma_page
*page
;
224 page
= kmalloc(sizeof(*page
), mem_flags
);
227 page
->vaddr
= dma_alloc_coherent(pool
->dev
, pool
->allocation
,
228 &page
->dma
, mem_flags
);
231 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
233 pool_initialise_page(pool
, page
);
243 static inline bool is_page_busy(struct dma_page
*page
)
245 return page
->in_use
!= 0;
248 static void pool_free_page(struct dma_pool
*pool
, struct dma_page
*page
)
250 dma_addr_t dma
= page
->dma
;
253 memset(page
->vaddr
, POOL_POISON_FREED
, pool
->allocation
);
255 dma_free_coherent(pool
->dev
, pool
->allocation
, page
->vaddr
, dma
);
256 list_del(&page
->page_list
);
261 * dma_pool_destroy - destroys a pool of dma memory blocks.
262 * @pool: dma pool that will be destroyed
263 * Context: !in_interrupt()
265 * Caller guarantees that no more memory from the pool is in use,
266 * and that nothing will try to use the pool after this call.
268 void dma_pool_destroy(struct dma_pool
*pool
)
270 struct dma_page
*page
, *tmp
;
276 mutex_lock(&pools_reg_lock
);
277 mutex_lock(&pools_lock
);
278 list_del(&pool
->pools
);
279 if (pool
->dev
&& list_empty(&pool
->dev
->dma_pools
))
281 mutex_unlock(&pools_lock
);
283 device_remove_file(pool
->dev
, &dev_attr_pools
);
284 mutex_unlock(&pools_reg_lock
);
286 list_for_each_entry_safe(page
, tmp
, &pool
->page_list
, page_list
) {
287 if (is_page_busy(page
)) {
289 dev_err(pool
->dev
, "%s %s, %p busy\n", __func__
,
290 pool
->name
, page
->vaddr
);
292 pr_err("%s %s, %p busy\n", __func__
,
293 pool
->name
, page
->vaddr
);
294 /* leak the still-in-use consistent memory */
295 list_del(&page
->page_list
);
298 pool_free_page(pool
, page
);
303 EXPORT_SYMBOL(dma_pool_destroy
);
306 * dma_pool_alloc - get a block of consistent memory
307 * @pool: dma pool that will produce the block
308 * @mem_flags: GFP_* bitmask
309 * @handle: pointer to dma address of block
311 * Return: the kernel virtual address of a currently unused block,
312 * and reports its dma address through the handle.
313 * If such a memory block can't be allocated, %NULL is returned.
315 void *dma_pool_alloc(struct dma_pool
*pool
, gfp_t mem_flags
,
319 struct dma_page
*page
;
323 might_alloc(mem_flags
);
325 spin_lock_irqsave(&pool
->lock
, flags
);
326 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
327 if (page
->offset
< pool
->allocation
)
331 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
332 spin_unlock_irqrestore(&pool
->lock
, flags
);
334 page
= pool_alloc_page(pool
, mem_flags
& (~__GFP_ZERO
));
338 spin_lock_irqsave(&pool
->lock
, flags
);
340 list_add(&page
->page_list
, &pool
->page_list
);
343 offset
= page
->offset
;
344 page
->offset
= *(int *)(page
->vaddr
+ offset
);
345 retval
= offset
+ page
->vaddr
;
346 *handle
= offset
+ page
->dma
;
351 /* page->offset is stored in first 4 bytes */
352 for (i
= sizeof(page
->offset
); i
< pool
->size
; i
++) {
353 if (data
[i
] == POOL_POISON_FREED
)
356 dev_err(pool
->dev
, "%s %s, %p (corrupted)\n",
357 __func__
, pool
->name
, retval
);
359 pr_err("%s %s, %p (corrupted)\n",
360 __func__
, pool
->name
, retval
);
363 * Dump the first 4 bytes even if they are not
366 print_hex_dump(KERN_ERR
, "", DUMP_PREFIX_OFFSET
, 16, 1,
367 data
, pool
->size
, 1);
371 if (!(mem_flags
& __GFP_ZERO
))
372 memset(retval
, POOL_POISON_ALLOCATED
, pool
->size
);
374 spin_unlock_irqrestore(&pool
->lock
, flags
);
376 if (want_init_on_alloc(mem_flags
))
377 memset(retval
, 0, pool
->size
);
381 EXPORT_SYMBOL(dma_pool_alloc
);
383 static struct dma_page
*pool_find_page(struct dma_pool
*pool
, dma_addr_t dma
)
385 struct dma_page
*page
;
387 list_for_each_entry(page
, &pool
->page_list
, page_list
) {
390 if ((dma
- page
->dma
) < pool
->allocation
)
397 * dma_pool_free - put block back into dma pool
398 * @pool: the dma pool holding the block
399 * @vaddr: virtual address of block
400 * @dma: dma address of block
402 * Caller promises neither device nor driver will again touch this block
403 * unless it is first re-allocated.
405 void dma_pool_free(struct dma_pool
*pool
, void *vaddr
, dma_addr_t dma
)
407 struct dma_page
*page
;
411 spin_lock_irqsave(&pool
->lock
, flags
);
412 page
= pool_find_page(pool
, dma
);
414 spin_unlock_irqrestore(&pool
->lock
, flags
);
416 dev_err(pool
->dev
, "%s %s, %p/%pad (bad dma)\n",
417 __func__
, pool
->name
, vaddr
, &dma
);
419 pr_err("%s %s, %p/%pad (bad dma)\n",
420 __func__
, pool
->name
, vaddr
, &dma
);
424 offset
= vaddr
- page
->vaddr
;
425 if (want_init_on_free())
426 memset(vaddr
, 0, pool
->size
);
428 if ((dma
- page
->dma
) != offset
) {
429 spin_unlock_irqrestore(&pool
->lock
, flags
);
431 dev_err(pool
->dev
, "%s %s, %p (bad vaddr)/%pad\n",
432 __func__
, pool
->name
, vaddr
, &dma
);
434 pr_err("%s %s, %p (bad vaddr)/%pad\n",
435 __func__
, pool
->name
, vaddr
, &dma
);
439 unsigned int chain
= page
->offset
;
440 while (chain
< pool
->allocation
) {
441 if (chain
!= offset
) {
442 chain
= *(int *)(page
->vaddr
+ chain
);
445 spin_unlock_irqrestore(&pool
->lock
, flags
);
447 dev_err(pool
->dev
, "%s %s, dma %pad already free\n",
448 __func__
, pool
->name
, &dma
);
450 pr_err("%s %s, dma %pad already free\n",
451 __func__
, pool
->name
, &dma
);
455 memset(vaddr
, POOL_POISON_FREED
, pool
->size
);
459 *(int *)vaddr
= page
->offset
;
460 page
->offset
= offset
;
462 * Resist a temptation to do
463 * if (!is_page_busy(page)) pool_free_page(pool, page);
464 * Better have a few empty pages hang around.
466 spin_unlock_irqrestore(&pool
->lock
, flags
);
468 EXPORT_SYMBOL(dma_pool_free
);
473 static void dmam_pool_release(struct device
*dev
, void *res
)
475 struct dma_pool
*pool
= *(struct dma_pool
**)res
;
477 dma_pool_destroy(pool
);
480 static int dmam_pool_match(struct device
*dev
, void *res
, void *match_data
)
482 return *(struct dma_pool
**)res
== match_data
;
486 * dmam_pool_create - Managed dma_pool_create()
487 * @name: name of pool, for diagnostics
488 * @dev: device that will be doing the DMA
489 * @size: size of the blocks in this pool.
490 * @align: alignment requirement for blocks; must be a power of two
491 * @allocation: returned blocks won't cross this boundary (or zero)
493 * Managed dma_pool_create(). DMA pool created with this function is
494 * automatically destroyed on driver detach.
496 * Return: a managed dma allocation pool with the requested
497 * characteristics, or %NULL if one can't be created.
499 struct dma_pool
*dmam_pool_create(const char *name
, struct device
*dev
,
500 size_t size
, size_t align
, size_t allocation
)
502 struct dma_pool
**ptr
, *pool
;
504 ptr
= devres_alloc(dmam_pool_release
, sizeof(*ptr
), GFP_KERNEL
);
508 pool
= *ptr
= dma_pool_create(name
, dev
, size
, align
, allocation
);
510 devres_add(dev
, ptr
);
516 EXPORT_SYMBOL(dmam_pool_create
);
519 * dmam_pool_destroy - Managed dma_pool_destroy()
520 * @pool: dma pool that will be destroyed
522 * Managed dma_pool_destroy().
524 void dmam_pool_destroy(struct dma_pool
*pool
)
526 struct device
*dev
= pool
->dev
;
528 WARN_ON(devres_release(dev
, dmam_pool_release
, dmam_pool_match
, pool
));
530 EXPORT_SYMBOL(dmam_pool_destroy
);