]>
Commit | Line | Data |
---|---|---|
1 | ||
2 | #include <linux/device.h> | |
3 | #include <linux/mm.h> | |
4 | #include <asm/io.h> /* Needed for i386 to build */ | |
5 | #include <asm/scatterlist.h> /* Needed for i386 to build */ | |
6 | #include <linux/dma-mapping.h> | |
7 | #include <linux/dmapool.h> | |
8 | #include <linux/slab.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/poison.h> | |
11 | #include <linux/sched.h> | |
12 | ||
13 | /* | |
14 | * Pool allocator ... wraps the dma_alloc_coherent page allocator, so | |
15 | * small blocks are easily used by drivers for bus mastering controllers. | |
16 | * This should probably be sharing the guts of the slab allocator. | |
17 | */ | |
18 | ||
19 | struct dma_pool { /* the pool */ | |
20 | struct list_head page_list; | |
21 | spinlock_t lock; | |
22 | size_t blocks_per_page; | |
23 | size_t size; | |
24 | struct device *dev; | |
25 | size_t allocation; | |
26 | char name [32]; | |
27 | wait_queue_head_t waitq; | |
28 | struct list_head pools; | |
29 | }; | |
30 | ||
31 | struct dma_page { /* cacheable header for 'allocation' bytes */ | |
32 | struct list_head page_list; | |
33 | void *vaddr; | |
34 | dma_addr_t dma; | |
35 | unsigned in_use; | |
36 | unsigned long bitmap [0]; | |
37 | }; | |
38 | ||
39 | #define POOL_TIMEOUT_JIFFIES ((100 /* msec */ * HZ) / 1000) | |
40 | ||
41 | static DEFINE_MUTEX (pools_lock); | |
42 | ||
43 | static ssize_t | |
44 | show_pools (struct device *dev, struct device_attribute *attr, char *buf) | |
45 | { | |
46 | unsigned temp; | |
47 | unsigned size; | |
48 | char *next; | |
49 | struct dma_page *page; | |
50 | struct dma_pool *pool; | |
51 | ||
52 | next = buf; | |
53 | size = PAGE_SIZE; | |
54 | ||
55 | temp = scnprintf(next, size, "poolinfo - 0.1\n"); | |
56 | size -= temp; | |
57 | next += temp; | |
58 | ||
59 | mutex_lock(&pools_lock); | |
60 | list_for_each_entry(pool, &dev->dma_pools, pools) { | |
61 | unsigned pages = 0; | |
62 | unsigned blocks = 0; | |
63 | ||
64 | list_for_each_entry(page, &pool->page_list, page_list) { | |
65 | pages++; | |
66 | blocks += page->in_use; | |
67 | } | |
68 | ||
69 | /* per-pool info, no real statistics yet */ | |
70 | temp = scnprintf(next, size, "%-16s %4u %4Zu %4Zu %2u\n", | |
71 | pool->name, | |
72 | blocks, pages * pool->blocks_per_page, | |
73 | pool->size, pages); | |
74 | size -= temp; | |
75 | next += temp; | |
76 | } | |
77 | mutex_unlock(&pools_lock); | |
78 | ||
79 | return PAGE_SIZE - size; | |
80 | } | |
81 | static DEVICE_ATTR (pools, S_IRUGO, show_pools, NULL); | |
82 | ||
83 | /** | |
84 | * dma_pool_create - Creates a pool of consistent memory blocks, for dma. | |
85 | * @name: name of pool, for diagnostics | |
86 | * @dev: device that will be doing the DMA | |
87 | * @size: size of the blocks in this pool. | |
88 | * @align: alignment requirement for blocks; must be a power of two | |
89 | * @allocation: returned blocks won't cross this boundary (or zero) | |
90 | * Context: !in_interrupt() | |
91 | * | |
92 | * Returns a dma allocation pool with the requested characteristics, or | |
93 | * null if one can't be created. Given one of these pools, dma_pool_alloc() | |
94 | * may be used to allocate memory. Such memory will all have "consistent" | |
95 | * DMA mappings, accessible by the device and its driver without using | |
96 | * cache flushing primitives. The actual size of blocks allocated may be | |
97 | * larger than requested because of alignment. | |
98 | * | |
99 | * If allocation is nonzero, objects returned from dma_pool_alloc() won't | |
100 | * cross that size boundary. This is useful for devices which have | |
101 | * addressing restrictions on individual DMA transfers, such as not crossing | |
102 | * boundaries of 4KBytes. | |
103 | */ | |
104 | struct dma_pool * | |
105 | dma_pool_create (const char *name, struct device *dev, | |
106 | size_t size, size_t align, size_t allocation) | |
107 | { | |
108 | struct dma_pool *retval; | |
109 | ||
110 | if (align == 0) | |
111 | align = 1; | |
112 | if (size == 0) | |
113 | return NULL; | |
114 | else if (size < align) | |
115 | size = align; | |
116 | else if ((size % align) != 0) { | |
117 | size += align + 1; | |
118 | size &= ~(align - 1); | |
119 | } | |
120 | ||
121 | if (allocation == 0) { | |
122 | if (PAGE_SIZE < size) | |
123 | allocation = size; | |
124 | else | |
125 | allocation = PAGE_SIZE; | |
126 | // FIXME: round up for less fragmentation | |
127 | } else if (allocation < size) | |
128 | return NULL; | |
129 | ||
130 | if (!(retval = kmalloc_node (sizeof *retval, GFP_KERNEL, dev_to_node(dev)))) | |
131 | return retval; | |
132 | ||
133 | strlcpy (retval->name, name, sizeof retval->name); | |
134 | ||
135 | retval->dev = dev; | |
136 | ||
137 | INIT_LIST_HEAD (&retval->page_list); | |
138 | spin_lock_init (&retval->lock); | |
139 | retval->size = size; | |
140 | retval->allocation = allocation; | |
141 | retval->blocks_per_page = allocation / size; | |
142 | init_waitqueue_head (&retval->waitq); | |
143 | ||
144 | if (dev) { | |
145 | int ret; | |
146 | ||
147 | mutex_lock(&pools_lock); | |
148 | if (list_empty (&dev->dma_pools)) | |
149 | ret = device_create_file (dev, &dev_attr_pools); | |
150 | else | |
151 | ret = 0; | |
152 | /* note: not currently insisting "name" be unique */ | |
153 | if (!ret) | |
154 | list_add (&retval->pools, &dev->dma_pools); | |
155 | else { | |
156 | kfree(retval); | |
157 | retval = NULL; | |
158 | } | |
159 | mutex_unlock(&pools_lock); | |
160 | } else | |
161 | INIT_LIST_HEAD (&retval->pools); | |
162 | ||
163 | return retval; | |
164 | } | |
165 | ||
166 | ||
167 | static struct dma_page * | |
168 | pool_alloc_page (struct dma_pool *pool, gfp_t mem_flags) | |
169 | { | |
170 | struct dma_page *page; | |
171 | int mapsize; | |
172 | ||
173 | mapsize = pool->blocks_per_page; | |
174 | mapsize = (mapsize + BITS_PER_LONG - 1) / BITS_PER_LONG; | |
175 | mapsize *= sizeof (long); | |
176 | ||
177 | page = kmalloc(mapsize + sizeof *page, mem_flags); | |
178 | if (!page) | |
179 | return NULL; | |
180 | page->vaddr = dma_alloc_coherent (pool->dev, | |
181 | pool->allocation, | |
182 | &page->dma, | |
183 | mem_flags); | |
184 | if (page->vaddr) { | |
185 | memset (page->bitmap, 0xff, mapsize); // bit set == free | |
186 | #ifdef CONFIG_DEBUG_SLAB | |
187 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | |
188 | #endif | |
189 | list_add (&page->page_list, &pool->page_list); | |
190 | page->in_use = 0; | |
191 | } else { | |
192 | kfree (page); | |
193 | page = NULL; | |
194 | } | |
195 | return page; | |
196 | } | |
197 | ||
198 | ||
199 | static inline int | |
200 | is_page_busy (int blocks, unsigned long *bitmap) | |
201 | { | |
202 | while (blocks > 0) { | |
203 | if (*bitmap++ != ~0UL) | |
204 | return 1; | |
205 | blocks -= BITS_PER_LONG; | |
206 | } | |
207 | return 0; | |
208 | } | |
209 | ||
210 | static void | |
211 | pool_free_page (struct dma_pool *pool, struct dma_page *page) | |
212 | { | |
213 | dma_addr_t dma = page->dma; | |
214 | ||
215 | #ifdef CONFIG_DEBUG_SLAB | |
216 | memset (page->vaddr, POOL_POISON_FREED, pool->allocation); | |
217 | #endif | |
218 | dma_free_coherent (pool->dev, pool->allocation, page->vaddr, dma); | |
219 | list_del (&page->page_list); | |
220 | kfree (page); | |
221 | } | |
222 | ||
223 | ||
224 | /** | |
225 | * dma_pool_destroy - destroys a pool of dma memory blocks. | |
226 | * @pool: dma pool that will be destroyed | |
227 | * Context: !in_interrupt() | |
228 | * | |
229 | * Caller guarantees that no more memory from the pool is in use, | |
230 | * and that nothing will try to use the pool after this call. | |
231 | */ | |
232 | void | |
233 | dma_pool_destroy (struct dma_pool *pool) | |
234 | { | |
235 | mutex_lock(&pools_lock); | |
236 | list_del (&pool->pools); | |
237 | if (pool->dev && list_empty (&pool->dev->dma_pools)) | |
238 | device_remove_file (pool->dev, &dev_attr_pools); | |
239 | mutex_unlock(&pools_lock); | |
240 | ||
241 | while (!list_empty (&pool->page_list)) { | |
242 | struct dma_page *page; | |
243 | page = list_entry (pool->page_list.next, | |
244 | struct dma_page, page_list); | |
245 | if (is_page_busy (pool->blocks_per_page, page->bitmap)) { | |
246 | if (pool->dev) | |
247 | dev_err(pool->dev, "dma_pool_destroy %s, %p busy\n", | |
248 | pool->name, page->vaddr); | |
249 | else | |
250 | printk (KERN_ERR "dma_pool_destroy %s, %p busy\n", | |
251 | pool->name, page->vaddr); | |
252 | /* leak the still-in-use consistent memory */ | |
253 | list_del (&page->page_list); | |
254 | kfree (page); | |
255 | } else | |
256 | pool_free_page (pool, page); | |
257 | } | |
258 | ||
259 | kfree (pool); | |
260 | } | |
261 | ||
262 | ||
263 | /** | |
264 | * dma_pool_alloc - get a block of consistent memory | |
265 | * @pool: dma pool that will produce the block | |
266 | * @mem_flags: GFP_* bitmask | |
267 | * @handle: pointer to dma address of block | |
268 | * | |
269 | * This returns the kernel virtual address of a currently unused block, | |
270 | * and reports its dma address through the handle. | |
271 | * If such a memory block can't be allocated, null is returned. | |
272 | */ | |
273 | void * | |
274 | dma_pool_alloc (struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) | |
275 | { | |
276 | unsigned long flags; | |
277 | struct dma_page *page; | |
278 | int map, block; | |
279 | size_t offset; | |
280 | void *retval; | |
281 | ||
282 | restart: | |
283 | spin_lock_irqsave (&pool->lock, flags); | |
284 | list_for_each_entry(page, &pool->page_list, page_list) { | |
285 | int i; | |
286 | /* only cachable accesses here ... */ | |
287 | for (map = 0, i = 0; | |
288 | i < pool->blocks_per_page; | |
289 | i += BITS_PER_LONG, map++) { | |
290 | if (page->bitmap [map] == 0) | |
291 | continue; | |
292 | block = ffz (~ page->bitmap [map]); | |
293 | if ((i + block) < pool->blocks_per_page) { | |
294 | clear_bit (block, &page->bitmap [map]); | |
295 | offset = (BITS_PER_LONG * map) + block; | |
296 | offset *= pool->size; | |
297 | goto ready; | |
298 | } | |
299 | } | |
300 | } | |
301 | if (!(page = pool_alloc_page (pool, GFP_ATOMIC))) { | |
302 | if (mem_flags & __GFP_WAIT) { | |
303 | DECLARE_WAITQUEUE (wait, current); | |
304 | ||
305 | __set_current_state(TASK_INTERRUPTIBLE); | |
306 | add_wait_queue (&pool->waitq, &wait); | |
307 | spin_unlock_irqrestore (&pool->lock, flags); | |
308 | ||
309 | schedule_timeout (POOL_TIMEOUT_JIFFIES); | |
310 | ||
311 | remove_wait_queue (&pool->waitq, &wait); | |
312 | goto restart; | |
313 | } | |
314 | retval = NULL; | |
315 | goto done; | |
316 | } | |
317 | ||
318 | clear_bit (0, &page->bitmap [0]); | |
319 | offset = 0; | |
320 | ready: | |
321 | page->in_use++; | |
322 | retval = offset + page->vaddr; | |
323 | *handle = offset + page->dma; | |
324 | #ifdef CONFIG_DEBUG_SLAB | |
325 | memset (retval, POOL_POISON_ALLOCATED, pool->size); | |
326 | #endif | |
327 | done: | |
328 | spin_unlock_irqrestore (&pool->lock, flags); | |
329 | return retval; | |
330 | } | |
331 | ||
332 | ||
333 | static struct dma_page * | |
334 | pool_find_page (struct dma_pool *pool, dma_addr_t dma) | |
335 | { | |
336 | unsigned long flags; | |
337 | struct dma_page *page; | |
338 | ||
339 | spin_lock_irqsave (&pool->lock, flags); | |
340 | list_for_each_entry(page, &pool->page_list, page_list) { | |
341 | if (dma < page->dma) | |
342 | continue; | |
343 | if (dma < (page->dma + pool->allocation)) | |
344 | goto done; | |
345 | } | |
346 | page = NULL; | |
347 | done: | |
348 | spin_unlock_irqrestore (&pool->lock, flags); | |
349 | return page; | |
350 | } | |
351 | ||
352 | ||
353 | /** | |
354 | * dma_pool_free - put block back into dma pool | |
355 | * @pool: the dma pool holding the block | |
356 | * @vaddr: virtual address of block | |
357 | * @dma: dma address of block | |
358 | * | |
359 | * Caller promises neither device nor driver will again touch this block | |
360 | * unless it is first re-allocated. | |
361 | */ | |
362 | void | |
363 | dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma) | |
364 | { | |
365 | struct dma_page *page; | |
366 | unsigned long flags; | |
367 | int map, block; | |
368 | ||
369 | if ((page = pool_find_page(pool, dma)) == NULL) { | |
370 | if (pool->dev) | |
371 | dev_err(pool->dev, "dma_pool_free %s, %p/%lx (bad dma)\n", | |
372 | pool->name, vaddr, (unsigned long) dma); | |
373 | else | |
374 | printk (KERN_ERR "dma_pool_free %s, %p/%lx (bad dma)\n", | |
375 | pool->name, vaddr, (unsigned long) dma); | |
376 | return; | |
377 | } | |
378 | ||
379 | block = dma - page->dma; | |
380 | block /= pool->size; | |
381 | map = block / BITS_PER_LONG; | |
382 | block %= BITS_PER_LONG; | |
383 | ||
384 | #ifdef CONFIG_DEBUG_SLAB | |
385 | if (((dma - page->dma) + (void *)page->vaddr) != vaddr) { | |
386 | if (pool->dev) | |
387 | dev_err(pool->dev, "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
388 | pool->name, vaddr, (unsigned long long) dma); | |
389 | else | |
390 | printk (KERN_ERR "dma_pool_free %s, %p (bad vaddr)/%Lx\n", | |
391 | pool->name, vaddr, (unsigned long long) dma); | |
392 | return; | |
393 | } | |
394 | if (page->bitmap [map] & (1UL << block)) { | |
395 | if (pool->dev) | |
396 | dev_err(pool->dev, "dma_pool_free %s, dma %Lx already free\n", | |
397 | pool->name, (unsigned long long)dma); | |
398 | else | |
399 | printk (KERN_ERR "dma_pool_free %s, dma %Lx already free\n", | |
400 | pool->name, (unsigned long long)dma); | |
401 | return; | |
402 | } | |
403 | memset (vaddr, POOL_POISON_FREED, pool->size); | |
404 | #endif | |
405 | ||
406 | spin_lock_irqsave (&pool->lock, flags); | |
407 | page->in_use--; | |
408 | set_bit (block, &page->bitmap [map]); | |
409 | if (waitqueue_active (&pool->waitq)) | |
410 | wake_up (&pool->waitq); | |
411 | /* | |
412 | * Resist a temptation to do | |
413 | * if (!is_page_busy(bpp, page->bitmap)) pool_free_page(pool, page); | |
414 | * Better have a few empty pages hang around. | |
415 | */ | |
416 | spin_unlock_irqrestore (&pool->lock, flags); | |
417 | } | |
418 | ||
419 | /* | |
420 | * Managed DMA pool | |
421 | */ | |
422 | static void dmam_pool_release(struct device *dev, void *res) | |
423 | { | |
424 | struct dma_pool *pool = *(struct dma_pool **)res; | |
425 | ||
426 | dma_pool_destroy(pool); | |
427 | } | |
428 | ||
429 | static int dmam_pool_match(struct device *dev, void *res, void *match_data) | |
430 | { | |
431 | return *(struct dma_pool **)res == match_data; | |
432 | } | |
433 | ||
434 | /** | |
435 | * dmam_pool_create - Managed dma_pool_create() | |
436 | * @name: name of pool, for diagnostics | |
437 | * @dev: device that will be doing the DMA | |
438 | * @size: size of the blocks in this pool. | |
439 | * @align: alignment requirement for blocks; must be a power of two | |
440 | * @allocation: returned blocks won't cross this boundary (or zero) | |
441 | * | |
442 | * Managed dma_pool_create(). DMA pool created with this function is | |
443 | * automatically destroyed on driver detach. | |
444 | */ | |
445 | struct dma_pool *dmam_pool_create(const char *name, struct device *dev, | |
446 | size_t size, size_t align, size_t allocation) | |
447 | { | |
448 | struct dma_pool **ptr, *pool; | |
449 | ||
450 | ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL); | |
451 | if (!ptr) | |
452 | return NULL; | |
453 | ||
454 | pool = *ptr = dma_pool_create(name, dev, size, align, allocation); | |
455 | if (pool) | |
456 | devres_add(dev, ptr); | |
457 | else | |
458 | devres_free(ptr); | |
459 | ||
460 | return pool; | |
461 | } | |
462 | ||
463 | /** | |
464 | * dmam_pool_destroy - Managed dma_pool_destroy() | |
465 | * @pool: dma pool that will be destroyed | |
466 | * | |
467 | * Managed dma_pool_destroy(). | |
468 | */ | |
469 | void dmam_pool_destroy(struct dma_pool *pool) | |
470 | { | |
471 | struct device *dev = pool->dev; | |
472 | ||
473 | dma_pool_destroy(pool); | |
474 | WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool)); | |
475 | } | |
476 | ||
477 | EXPORT_SYMBOL (dma_pool_create); | |
478 | EXPORT_SYMBOL (dma_pool_destroy); | |
479 | EXPORT_SYMBOL (dma_pool_alloc); | |
480 | EXPORT_SYMBOL (dma_pool_free); | |
481 | EXPORT_SYMBOL (dmam_pool_create); | |
482 | EXPORT_SYMBOL (dmam_pool_destroy); |