]> git.proxmox.com Git - mirror_ubuntu-impish-kernel.git/blame - mm/dmapool.c
UBUNTU: [Config] enable signing for ppc64el
[mirror_ubuntu-impish-kernel.git] / mm / dmapool.c
CommitLineData
b2139ce0 1// SPDX-License-Identifier: GPL-2.0-only
6182a094
MW
2/*
3 * DMA Pool allocator
4 *
5 * Copyright 2001 David Brownell
6 * Copyright 2007 Intel Corporation
7 * Author: Matthew Wilcox <willy@linux.intel.com>
8 *
6182a094
MW
9 * This allocator returns small blocks of a given size which are DMA-able by
10 * the given device. It uses the dma_alloc_coherent page allocator to get
11 * new pages, then splits them up into blocks of the required size.
12 * Many older drivers still have their own code to do this.
13 *
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 * allocated pages. Each page in the page_list is split into blocks of at
a35a3455
MW
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 * list of free blocks within the page. Used blocks aren't tracked, but we
19 * keep a count of how many are currently allocated from each page.
6182a094 20 */
1da177e4
LT
21
22#include <linux/device.h>
1da177e4
LT
23#include <linux/dma-mapping.h>
24#include <linux/dmapool.h>
6182a094
MW
25#include <linux/kernel.h>
26#include <linux/list.h>
b95f1b31 27#include <linux/export.h>
6182a094 28#include <linux/mutex.h>
c9cf5528 29#include <linux/poison.h>
e8edc6e0 30#include <linux/sched.h>
0f2f89b6 31#include <linux/sched/mm.h>
6182a094 32#include <linux/slab.h>
7c77509c 33#include <linux/stat.h>
6182a094
MW
34#include <linux/spinlock.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/wait.h>
1da177e4 38
b5ee5bef
AK
39#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40#define DMAPOOL_DEBUG 1
41#endif
42
e87aa773
MW
43struct dma_pool { /* the pool */
44 struct list_head page_list;
45 spinlock_t lock;
e87aa773
MW
46 size_t size;
47 struct device *dev;
48 size_t allocation;
e34f44b3 49 size_t boundary;
e87aa773 50 char name[32];
e87aa773 51 struct list_head pools;
1da177e4
LT
52};
53
e87aa773
MW
54struct dma_page { /* cacheable header for 'allocation' bytes */
55 struct list_head page_list;
56 void *vaddr;
57 dma_addr_t dma;
a35a3455
MW
58 unsigned int in_use;
59 unsigned int offset;
1da177e4
LT
60};
61
e87aa773 62static DEFINE_MUTEX(pools_lock);
01c2965f 63static DEFINE_MUTEX(pools_reg_lock);
1da177e4
LT
64
65static ssize_t
e87aa773 66show_pools(struct device *dev, struct device_attribute *attr, char *buf)
1da177e4
LT
67{
68 unsigned temp;
69 unsigned size;
70 char *next;
71 struct dma_page *page;
72 struct dma_pool *pool;
73
74 next = buf;
75 size = PAGE_SIZE;
76
77 temp = scnprintf(next, size, "poolinfo - 0.1\n");
78 size -= temp;
79 next += temp;
80
b2366d68 81 mutex_lock(&pools_lock);
1da177e4
LT
82 list_for_each_entry(pool, &dev->dma_pools, pools) {
83 unsigned pages = 0;
84 unsigned blocks = 0;
85
c4956823 86 spin_lock_irq(&pool->lock);
1da177e4
LT
87 list_for_each_entry(page, &pool->page_list, page_list) {
88 pages++;
89 blocks += page->in_use;
90 }
c4956823 91 spin_unlock_irq(&pool->lock);
1da177e4
LT
92
93 /* per-pool info, no real statistics yet */
5b5e0928 94 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
a35a3455
MW
95 pool->name, blocks,
96 pages * (pool->allocation / pool->size),
e87aa773 97 pool->size, pages);
1da177e4
LT
98 size -= temp;
99 next += temp;
100 }
b2366d68 101 mutex_unlock(&pools_lock);
1da177e4
LT
102
103 return PAGE_SIZE - size;
104}
e87aa773 105
0825a6f9 106static DEVICE_ATTR(pools, 0444, show_pools, NULL);
1da177e4
LT
107
108/**
109 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
110 * @name: name of pool, for diagnostics
111 * @dev: device that will be doing the DMA
112 * @size: size of the blocks in this pool.
113 * @align: alignment requirement for blocks; must be a power of two
e34f44b3 114 * @boundary: returned blocks won't cross this power of two boundary
a862f68a 115 * Context: not in_interrupt()
1da177e4 116 *
a862f68a 117 * Given one of these pools, dma_pool_alloc()
1da177e4
LT
118 * may be used to allocate memory. Such memory will all have "consistent"
119 * DMA mappings, accessible by the device and its driver without using
120 * cache flushing primitives. The actual size of blocks allocated may be
121 * larger than requested because of alignment.
122 *
e34f44b3 123 * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
1da177e4
LT
124 * cross that size boundary. This is useful for devices which have
125 * addressing restrictions on individual DMA transfers, such as not crossing
126 * boundaries of 4KBytes.
a862f68a
MR
127 *
128 * Return: a dma allocation pool with the requested characteristics, or
129 * %NULL if one can't be created.
1da177e4 130 */
e87aa773 131struct dma_pool *dma_pool_create(const char *name, struct device *dev,
e34f44b3 132 size_t size, size_t align, size_t boundary)
1da177e4 133{
e87aa773 134 struct dma_pool *retval;
e34f44b3 135 size_t allocation;
01c2965f 136 bool empty = false;
1da177e4 137
baa2ef83 138 if (align == 0)
1da177e4 139 align = 1;
baa2ef83 140 else if (align & (align - 1))
1da177e4 141 return NULL;
1da177e4 142
baa2ef83 143 if (size == 0)
399154be 144 return NULL;
baa2ef83 145 else if (size < 4)
a35a3455 146 size = 4;
399154be 147
1386f7a3 148 size = ALIGN(size, align);
e34f44b3
MW
149 allocation = max_t(size_t, size, PAGE_SIZE);
150
baa2ef83 151 if (!boundary)
e34f44b3 152 boundary = allocation;
baa2ef83 153 else if ((boundary < size) || (boundary & (boundary - 1)))
1da177e4
LT
154 return NULL;
155
e34f44b3
MW
156 retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
157 if (!retval)
1da177e4
LT
158 return retval;
159
943f229e 160 strscpy(retval->name, name, sizeof(retval->name));
1da177e4
LT
161
162 retval->dev = dev;
163
e87aa773
MW
164 INIT_LIST_HEAD(&retval->page_list);
165 spin_lock_init(&retval->lock);
1da177e4 166 retval->size = size;
e34f44b3 167 retval->boundary = boundary;
1da177e4 168 retval->allocation = allocation;
1da177e4 169
cc6b664a
DY
170 INIT_LIST_HEAD(&retval->pools);
171
01c2965f
SAS
172 /*
173 * pools_lock ensures that the ->dma_pools list does not get corrupted.
174 * pools_reg_lock ensures that there is not a race between
175 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
176 * when the first invocation of dma_pool_create() failed on
177 * device_create_file() and the second assumes that it has been done (I
178 * know it is a short window).
179 */
180 mutex_lock(&pools_reg_lock);
cc6b664a 181 mutex_lock(&pools_lock);
01c2965f
SAS
182 if (list_empty(&dev->dma_pools))
183 empty = true;
184 list_add(&retval->pools, &dev->dma_pools);
cc6b664a 185 mutex_unlock(&pools_lock);
01c2965f
SAS
186 if (empty) {
187 int err;
188
189 err = device_create_file(dev, &dev_attr_pools);
190 if (err) {
191 mutex_lock(&pools_lock);
192 list_del(&retval->pools);
193 mutex_unlock(&pools_lock);
194 mutex_unlock(&pools_reg_lock);
195 kfree(retval);
196 return NULL;
197 }
198 }
199 mutex_unlock(&pools_reg_lock);
1da177e4
LT
200 return retval;
201}
e87aa773 202EXPORT_SYMBOL(dma_pool_create);
1da177e4 203
a35a3455
MW
204static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
205{
206 unsigned int offset = 0;
e34f44b3 207 unsigned int next_boundary = pool->boundary;
a35a3455
MW
208
209 do {
210 unsigned int next = offset + pool->size;
e34f44b3
MW
211 if (unlikely((next + pool->size) >= next_boundary)) {
212 next = next_boundary;
213 next_boundary += pool->boundary;
214 }
a35a3455
MW
215 *(int *)(page->vaddr + offset) = next;
216 offset = next;
217 } while (offset < pool->allocation);
218}
219
e87aa773 220static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
1da177e4 221{
e87aa773 222 struct dma_page *page;
1da177e4 223
a35a3455 224 page = kmalloc(sizeof(*page), mem_flags);
1da177e4
LT
225 if (!page)
226 return NULL;
a35a3455 227 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
e87aa773 228 &page->dma, mem_flags);
1da177e4 229 if (page->vaddr) {
b5ee5bef 230#ifdef DMAPOOL_DEBUG
e87aa773 231 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
1da177e4 232#endif
a35a3455 233 pool_initialise_page(pool, page);
1da177e4 234 page->in_use = 0;
a35a3455 235 page->offset = 0;
1da177e4 236 } else {
e87aa773 237 kfree(page);
1da177e4
LT
238 page = NULL;
239 }
240 return page;
241}
242
d9e7e37b 243static inline bool is_page_busy(struct dma_page *page)
1da177e4 244{
a35a3455 245 return page->in_use != 0;
1da177e4
LT
246}
247
e87aa773 248static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
1da177e4 249{
e87aa773 250 dma_addr_t dma = page->dma;
1da177e4 251
b5ee5bef 252#ifdef DMAPOOL_DEBUG
e87aa773 253 memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
1da177e4 254#endif
e87aa773
MW
255 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
256 list_del(&page->page_list);
257 kfree(page);
1da177e4
LT
258}
259
1da177e4
LT
260/**
261 * dma_pool_destroy - destroys a pool of dma memory blocks.
262 * @pool: dma pool that will be destroyed
263 * Context: !in_interrupt()
264 *
265 * Caller guarantees that no more memory from the pool is in use,
266 * and that nothing will try to use the pool after this call.
267 */
e87aa773 268void dma_pool_destroy(struct dma_pool *pool)
1da177e4 269{
42286f83 270 struct dma_page *page, *tmp;
01c2965f
SAS
271 bool empty = false;
272
44d7175d
SS
273 if (unlikely(!pool))
274 return;
275
01c2965f 276 mutex_lock(&pools_reg_lock);
b2366d68 277 mutex_lock(&pools_lock);
e87aa773
MW
278 list_del(&pool->pools);
279 if (pool->dev && list_empty(&pool->dev->dma_pools))
01c2965f 280 empty = true;
b2366d68 281 mutex_unlock(&pools_lock);
01c2965f
SAS
282 if (empty)
283 device_remove_file(pool->dev, &dev_attr_pools);
284 mutex_unlock(&pools_reg_lock);
1da177e4 285
42286f83 286 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
a35a3455 287 if (is_page_busy(page)) {
1da177e4 288 if (pool->dev)
41a04814 289 dev_err(pool->dev, "%s %s, %p busy\n", __func__,
1da177e4
LT
290 pool->name, page->vaddr);
291 else
41a04814 292 pr_err("%s %s, %p busy\n", __func__,
e87aa773 293 pool->name, page->vaddr);
1da177e4 294 /* leak the still-in-use consistent memory */
e87aa773
MW
295 list_del(&page->page_list);
296 kfree(page);
1da177e4 297 } else
e87aa773 298 pool_free_page(pool, page);
1da177e4
LT
299 }
300
e87aa773 301 kfree(pool);
1da177e4 302}
e87aa773 303EXPORT_SYMBOL(dma_pool_destroy);
1da177e4
LT
304
305/**
306 * dma_pool_alloc - get a block of consistent memory
307 * @pool: dma pool that will produce the block
308 * @mem_flags: GFP_* bitmask
309 * @handle: pointer to dma address of block
310 *
a862f68a 311 * Return: the kernel virtual address of a currently unused block,
1da177e4 312 * and reports its dma address through the handle.
6182a094 313 * If such a memory block can't be allocated, %NULL is returned.
1da177e4 314 */
e87aa773
MW
315void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
316 dma_addr_t *handle)
1da177e4 317{
e87aa773
MW
318 unsigned long flags;
319 struct dma_page *page;
e87aa773
MW
320 size_t offset;
321 void *retval;
322
0f2f89b6 323 might_alloc(mem_flags);
ea05c844 324
e87aa773 325 spin_lock_irqsave(&pool->lock, flags);
1da177e4 326 list_for_each_entry(page, &pool->page_list, page_list) {
a35a3455
MW
327 if (page->offset < pool->allocation)
328 goto ready;
1da177e4 329 }
1da177e4 330
387870f2
MS
331 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
332 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 333
fa23f56d 334 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
387870f2
MS
335 if (!page)
336 return NULL;
1da177e4 337
387870f2 338 spin_lock_irqsave(&pool->lock, flags);
1da177e4 339
387870f2 340 list_add(&page->page_list, &pool->page_list);
e87aa773 341 ready:
1da177e4 342 page->in_use++;
a35a3455
MW
343 offset = page->offset;
344 page->offset = *(int *)(page->vaddr + offset);
1da177e4
LT
345 retval = offset + page->vaddr;
346 *handle = offset + page->dma;
b5ee5bef 347#ifdef DMAPOOL_DEBUG
5de55b26
MC
348 {
349 int i;
350 u8 *data = retval;
351 /* page->offset is stored in first 4 bytes */
352 for (i = sizeof(page->offset); i < pool->size; i++) {
353 if (data[i] == POOL_POISON_FREED)
354 continue;
355 if (pool->dev)
41a04814
AS
356 dev_err(pool->dev, "%s %s, %p (corrupted)\n",
357 __func__, pool->name, retval);
5de55b26 358 else
41a04814
AS
359 pr_err("%s %s, %p (corrupted)\n",
360 __func__, pool->name, retval);
5de55b26
MC
361
362 /*
363 * Dump the first 4 bytes even if they are not
364 * POOL_POISON_FREED
365 */
366 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
367 data, pool->size, 1);
368 break;
369 }
370 }
fa23f56d
SS
371 if (!(mem_flags & __GFP_ZERO))
372 memset(retval, POOL_POISON_ALLOCATED, pool->size);
1da177e4 373#endif
e87aa773 374 spin_unlock_irqrestore(&pool->lock, flags);
fa23f56d 375
6471384a 376 if (want_init_on_alloc(mem_flags))
fa23f56d
SS
377 memset(retval, 0, pool->size);
378
1da177e4
LT
379 return retval;
380}
e87aa773 381EXPORT_SYMBOL(dma_pool_alloc);
1da177e4 382
e87aa773 383static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
1da177e4 384{
e87aa773 385 struct dma_page *page;
1da177e4 386
1da177e4
LT
387 list_for_each_entry(page, &pool->page_list, page_list) {
388 if (dma < page->dma)
389 continue;
676bd991 390 if ((dma - page->dma) < pool->allocation)
84bc227d 391 return page;
1da177e4 392 }
84bc227d 393 return NULL;
1da177e4
LT
394}
395
1da177e4
LT
396/**
397 * dma_pool_free - put block back into dma pool
398 * @pool: the dma pool holding the block
399 * @vaddr: virtual address of block
400 * @dma: dma address of block
401 *
402 * Caller promises neither device nor driver will again touch this block
403 * unless it is first re-allocated.
404 */
e87aa773 405void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
1da177e4 406{
e87aa773
MW
407 struct dma_page *page;
408 unsigned long flags;
a35a3455 409 unsigned int offset;
1da177e4 410
84bc227d 411 spin_lock_irqsave(&pool->lock, flags);
e87aa773
MW
412 page = pool_find_page(pool, dma);
413 if (!page) {
84bc227d 414 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 415 if (pool->dev)
41a04814
AS
416 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
417 __func__, pool->name, vaddr, &dma);
1da177e4 418 else
41a04814
AS
419 pr_err("%s %s, %p/%pad (bad dma)\n",
420 __func__, pool->name, vaddr, &dma);
1da177e4
LT
421 return;
422 }
423
a35a3455 424 offset = vaddr - page->vaddr;
6471384a
AP
425 if (want_init_on_free())
426 memset(vaddr, 0, pool->size);
b5ee5bef 427#ifdef DMAPOOL_DEBUG
a35a3455 428 if ((dma - page->dma) != offset) {
84bc227d 429 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 430 if (pool->dev)
41a04814
AS
431 dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
432 __func__, pool->name, vaddr, &dma);
1da177e4 433 else
41a04814
AS
434 pr_err("%s %s, %p (bad vaddr)/%pad\n",
435 __func__, pool->name, vaddr, &dma);
1da177e4
LT
436 return;
437 }
a35a3455
MW
438 {
439 unsigned int chain = page->offset;
440 while (chain < pool->allocation) {
441 if (chain != offset) {
442 chain = *(int *)(page->vaddr + chain);
443 continue;
444 }
84bc227d 445 spin_unlock_irqrestore(&pool->lock, flags);
a35a3455 446 if (pool->dev)
41a04814
AS
447 dev_err(pool->dev, "%s %s, dma %pad already free\n",
448 __func__, pool->name, &dma);
a35a3455 449 else
41a04814
AS
450 pr_err("%s %s, dma %pad already free\n",
451 __func__, pool->name, &dma);
a35a3455
MW
452 return;
453 }
1da177e4 454 }
e87aa773 455 memset(vaddr, POOL_POISON_FREED, pool->size);
1da177e4
LT
456#endif
457
1da177e4 458 page->in_use--;
a35a3455
MW
459 *(int *)vaddr = page->offset;
460 page->offset = offset;
1da177e4
LT
461 /*
462 * Resist a temptation to do
a35a3455 463 * if (!is_page_busy(page)) pool_free_page(pool, page);
1da177e4
LT
464 * Better have a few empty pages hang around.
465 */
e87aa773 466 spin_unlock_irqrestore(&pool->lock, flags);
1da177e4 467}
e87aa773 468EXPORT_SYMBOL(dma_pool_free);
1da177e4 469
9ac7849e
TH
470/*
471 * Managed DMA pool
472 */
473static void dmam_pool_release(struct device *dev, void *res)
474{
475 struct dma_pool *pool = *(struct dma_pool **)res;
476
477 dma_pool_destroy(pool);
478}
479
480static int dmam_pool_match(struct device *dev, void *res, void *match_data)
481{
482 return *(struct dma_pool **)res == match_data;
483}
484
485/**
486 * dmam_pool_create - Managed dma_pool_create()
487 * @name: name of pool, for diagnostics
488 * @dev: device that will be doing the DMA
489 * @size: size of the blocks in this pool.
490 * @align: alignment requirement for blocks; must be a power of two
491 * @allocation: returned blocks won't cross this boundary (or zero)
492 *
493 * Managed dma_pool_create(). DMA pool created with this function is
494 * automatically destroyed on driver detach.
a862f68a
MR
495 *
496 * Return: a managed dma allocation pool with the requested
497 * characteristics, or %NULL if one can't be created.
9ac7849e
TH
498 */
499struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
500 size_t size, size_t align, size_t allocation)
501{
502 struct dma_pool **ptr, *pool;
503
504 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
505 if (!ptr)
506 return NULL;
507
508 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
509 if (pool)
510 devres_add(dev, ptr);
511 else
512 devres_free(ptr);
513
514 return pool;
515}
e87aa773 516EXPORT_SYMBOL(dmam_pool_create);
9ac7849e
TH
517
518/**
519 * dmam_pool_destroy - Managed dma_pool_destroy()
520 * @pool: dma pool that will be destroyed
521 *
522 * Managed dma_pool_destroy().
523 */
524void dmam_pool_destroy(struct dma_pool *pool)
525{
526 struct device *dev = pool->dev;
527
172cb4b3 528 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
9ac7849e 529}
e87aa773 530EXPORT_SYMBOL(dmam_pool_destroy);