]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
8 | * RAM and the remainder of memory is at the top an the DMA memory | |
9 | * can be marked as ZONE_DMA. Anything beyond that such as discontigous | |
10 | * DMA windows will require custom implementations that reserve memory | |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker (brad@heeltoe.com) | |
14 | * Re-written by Christopher Hoover <ch@murgatroid.com> | |
15 | * Made generic by Deepak Saxena <dsaxena@plexity.net> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/dmapool.h> | |
31 | #include <linux/list.h> | |
32 | ||
14eb75b6 RK |
33 | #include <asm/cacheflush.h> |
34 | ||
1da177e4 LT |
35 | #undef DEBUG |
36 | ||
37 | #undef STATS | |
38 | #ifdef STATS | |
39 | #define DO_STATS(X) do { X ; } while (0) | |
40 | #else | |
41 | #define DO_STATS(X) do { } while (0) | |
42 | #endif | |
43 | ||
44 | /* ************************************************** */ | |
45 | ||
46 | struct safe_buffer { | |
47 | struct list_head node; | |
48 | ||
49 | /* original request */ | |
50 | void *ptr; | |
51 | size_t size; | |
52 | int direction; | |
53 | ||
54 | /* safe buffer info */ | |
55 | struct dma_pool *pool; | |
56 | void *safe; | |
57 | dma_addr_t safe_dma_addr; | |
58 | }; | |
59 | ||
60 | struct dmabounce_device_info { | |
61 | struct list_head node; | |
62 | ||
63 | struct device *dev; | |
64 | struct dma_pool *small_buffer_pool; | |
65 | struct dma_pool *large_buffer_pool; | |
66 | struct list_head safe_buffers; | |
67 | unsigned long small_buffer_size, large_buffer_size; | |
68 | #ifdef STATS | |
69 | unsigned long sbp_allocs; | |
70 | unsigned long lbp_allocs; | |
71 | unsigned long total_allocs; | |
72 | unsigned long map_op_count; | |
73 | unsigned long bounce_count; | |
74 | #endif | |
75 | }; | |
76 | ||
77 | static LIST_HEAD(dmabounce_devs); | |
78 | ||
79 | #ifdef STATS | |
80 | static void print_alloc_stats(struct dmabounce_device_info *device_info) | |
81 | { | |
82 | printk(KERN_INFO | |
83 | "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", | |
84 | device_info->dev->bus_id, | |
85 | device_info->sbp_allocs, device_info->lbp_allocs, | |
86 | device_info->total_allocs - device_info->sbp_allocs - | |
87 | device_info->lbp_allocs, | |
88 | device_info->total_allocs); | |
89 | } | |
90 | #endif | |
91 | ||
92 | /* find the given device in the dmabounce device list */ | |
93 | static inline struct dmabounce_device_info * | |
94 | find_dmabounce_dev(struct device *dev) | |
95 | { | |
b46a58fd | 96 | struct dmabounce_device_info *d; |
1da177e4 | 97 | |
b46a58fd | 98 | list_for_each_entry(d, &dmabounce_devs, node) |
1da177e4 LT |
99 | if (d->dev == dev) |
100 | return d; | |
b46a58fd | 101 | |
1da177e4 LT |
102 | return NULL; |
103 | } | |
104 | ||
105 | ||
106 | /* allocate a 'safe' buffer and keep track of it */ | |
107 | static inline struct safe_buffer * | |
108 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
109 | size_t size, enum dma_data_direction dir) | |
110 | { | |
111 | struct safe_buffer *buf; | |
112 | struct dma_pool *pool; | |
113 | struct device *dev = device_info->dev; | |
114 | void *safe; | |
115 | dma_addr_t safe_dma_addr; | |
116 | ||
117 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
118 | __func__, ptr, size, dir); | |
119 | ||
120 | DO_STATS ( device_info->total_allocs++ ); | |
121 | ||
122 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
123 | if (buf == NULL) { | |
124 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
125 | return NULL; | |
126 | } | |
127 | ||
128 | if (size <= device_info->small_buffer_size) { | |
129 | pool = device_info->small_buffer_pool; | |
130 | safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); | |
131 | ||
132 | DO_STATS ( device_info->sbp_allocs++ ); | |
133 | } else if (size <= device_info->large_buffer_size) { | |
134 | pool = device_info->large_buffer_pool; | |
135 | safe = dma_pool_alloc(pool, GFP_ATOMIC, &safe_dma_addr); | |
136 | ||
137 | DO_STATS ( device_info->lbp_allocs++ ); | |
138 | } else { | |
139 | pool = NULL; | |
140 | safe = dma_alloc_coherent(dev, size, &safe_dma_addr, GFP_ATOMIC); | |
141 | } | |
142 | ||
143 | if (safe == NULL) { | |
144 | dev_warn(device_info->dev, | |
145 | "%s: could not alloc dma memory (size=%d)\n", | |
146 | __func__, size); | |
147 | kfree(buf); | |
148 | return NULL; | |
149 | } | |
150 | ||
151 | #ifdef STATS | |
152 | if (device_info->total_allocs % 1000 == 0) | |
153 | print_alloc_stats(device_info); | |
154 | #endif | |
155 | ||
156 | buf->ptr = ptr; | |
157 | buf->size = size; | |
158 | buf->direction = dir; | |
159 | buf->pool = pool; | |
160 | buf->safe = safe; | |
161 | buf->safe_dma_addr = safe_dma_addr; | |
162 | ||
163 | list_add(&buf->node, &device_info->safe_buffers); | |
164 | ||
165 | return buf; | |
166 | } | |
167 | ||
168 | /* determine if a buffer is from our "safe" pool */ | |
169 | static inline struct safe_buffer * | |
170 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
171 | { | |
b46a58fd | 172 | struct safe_buffer *b; |
1da177e4 | 173 | |
b46a58fd | 174 | list_for_each_entry(b, &device_info->safe_buffers, node) |
1da177e4 LT |
175 | if (b->safe_dma_addr == safe_dma_addr) |
176 | return b; | |
1da177e4 LT |
177 | |
178 | return NULL; | |
179 | } | |
180 | ||
181 | static inline void | |
182 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
183 | { | |
184 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); | |
185 | ||
186 | list_del(&buf->node); | |
187 | ||
188 | if (buf->pool) | |
189 | dma_pool_free(buf->pool, buf->safe, buf->safe_dma_addr); | |
190 | else | |
191 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
192 | buf->safe_dma_addr); | |
193 | ||
194 | kfree(buf); | |
195 | } | |
196 | ||
197 | /* ************************************************** */ | |
198 | ||
199 | #ifdef STATS | |
200 | ||
201 | static void print_map_stats(struct dmabounce_device_info *device_info) | |
202 | { | |
203 | printk(KERN_INFO | |
204 | "%s: dmabounce: map_op_count=%lu, bounce_count=%lu\n", | |
205 | device_info->dev->bus_id, | |
206 | device_info->map_op_count, device_info->bounce_count); | |
207 | } | |
208 | #endif | |
209 | ||
210 | static inline dma_addr_t | |
211 | map_single(struct device *dev, void *ptr, size_t size, | |
212 | enum dma_data_direction dir) | |
213 | { | |
214 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
215 | dma_addr_t dma_addr; | |
216 | int needs_bounce = 0; | |
217 | ||
218 | if (device_info) | |
219 | DO_STATS ( device_info->map_op_count++ ); | |
220 | ||
221 | dma_addr = virt_to_dma(dev, ptr); | |
222 | ||
223 | if (dev->dma_mask) { | |
224 | unsigned long mask = *dev->dma_mask; | |
225 | unsigned long limit; | |
226 | ||
227 | limit = (mask + 1) & ~mask; | |
228 | if (limit && size > limit) { | |
229 | dev_err(dev, "DMA mapping too big (requested %#x " | |
230 | "mask %#Lx)\n", size, *dev->dma_mask); | |
231 | return ~0; | |
232 | } | |
233 | ||
234 | /* | |
235 | * Figure out if we need to bounce from the DMA mask. | |
236 | */ | |
237 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | |
238 | } | |
239 | ||
240 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | |
241 | struct safe_buffer *buf; | |
242 | ||
243 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | |
244 | if (buf == 0) { | |
245 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | |
246 | __func__, ptr); | |
247 | return 0; | |
248 | } | |
249 | ||
250 | dev_dbg(dev, | |
251 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
252 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
253 | buf->safe, (void *) buf->safe_dma_addr); | |
254 | ||
255 | if ((dir == DMA_TO_DEVICE) || | |
256 | (dir == DMA_BIDIRECTIONAL)) { | |
257 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
258 | __func__, ptr, buf->safe, size); | |
259 | memcpy(buf->safe, ptr, size); | |
260 | } | |
261 | consistent_sync(buf->safe, size, dir); | |
262 | ||
263 | dma_addr = buf->safe_dma_addr; | |
264 | } else { | |
265 | consistent_sync(ptr, size, dir); | |
266 | } | |
267 | ||
268 | return dma_addr; | |
269 | } | |
270 | ||
271 | static inline void | |
272 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
273 | enum dma_data_direction dir) | |
274 | { | |
275 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
276 | struct safe_buffer *buf = NULL; | |
277 | ||
278 | /* | |
279 | * Trying to unmap an invalid mapping | |
280 | */ | |
281 | if (dma_addr == ~0) { | |
282 | dev_err(dev, "Trying to unmap invalid mapping\n"); | |
283 | return; | |
284 | } | |
285 | ||
286 | if (device_info) | |
287 | buf = find_safe_buffer(device_info, dma_addr); | |
288 | ||
289 | if (buf) { | |
290 | BUG_ON(buf->size != size); | |
291 | ||
292 | dev_dbg(dev, | |
293 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
294 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
295 | buf->safe, (void *) buf->safe_dma_addr); | |
296 | ||
1da177e4 LT |
297 | DO_STATS ( device_info->bounce_count++ ); |
298 | ||
5abc100e RK |
299 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
300 | unsigned long ptr; | |
301 | ||
1da177e4 LT |
302 | dev_dbg(dev, |
303 | "%s: copy back safe %p to unsafe %p size %d\n", | |
304 | __func__, buf->safe, buf->ptr, size); | |
305 | memcpy(buf->ptr, buf->safe, size); | |
5abc100e RK |
306 | |
307 | /* | |
308 | * DMA buffers must have the same cache properties | |
309 | * as if they were really used for DMA - which means | |
310 | * data must be written back to RAM. Note that | |
311 | * we don't use dmac_flush_range() here for the | |
312 | * bidirectional case because we know the cache | |
313 | * lines will be coherent with the data written. | |
314 | */ | |
315 | ptr = (unsigned long)buf->ptr; | |
316 | dmac_clean_range(ptr, ptr + size); | |
1da177e4 LT |
317 | } |
318 | free_safe_buffer(device_info, buf); | |
319 | } | |
320 | } | |
321 | ||
322 | static inline void | |
323 | sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
324 | enum dma_data_direction dir) | |
325 | { | |
326 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
327 | struct safe_buffer *buf = NULL; | |
328 | ||
329 | if (device_info) | |
330 | buf = find_safe_buffer(device_info, dma_addr); | |
331 | ||
332 | if (buf) { | |
333 | /* | |
334 | * Both of these checks from original code need to be | |
335 | * commented out b/c some drivers rely on the following: | |
336 | * | |
337 | * 1) Drivers may map a large chunk of memory into DMA space | |
338 | * but only sync a small portion of it. Good example is | |
339 | * allocating a large buffer, mapping it, and then | |
340 | * breaking it up into small descriptors. No point | |
341 | * in syncing the whole buffer if you only have to | |
342 | * touch one descriptor. | |
343 | * | |
344 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | |
345 | * usually only synced in one dir at a time. | |
346 | * | |
347 | * See drivers/net/eepro100.c for examples of both cases. | |
348 | * | |
349 | * -ds | |
350 | * | |
351 | * BUG_ON(buf->size != size); | |
352 | * BUG_ON(buf->direction != dir); | |
353 | */ | |
354 | ||
355 | dev_dbg(dev, | |
356 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
357 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
358 | buf->safe, (void *) buf->safe_dma_addr); | |
359 | ||
360 | DO_STATS ( device_info->bounce_count++ ); | |
361 | ||
362 | switch (dir) { | |
363 | case DMA_FROM_DEVICE: | |
364 | dev_dbg(dev, | |
365 | "%s: copy back safe %p to unsafe %p size %d\n", | |
366 | __func__, buf->safe, buf->ptr, size); | |
367 | memcpy(buf->ptr, buf->safe, size); | |
368 | break; | |
369 | case DMA_TO_DEVICE: | |
370 | dev_dbg(dev, | |
371 | "%s: copy out unsafe %p to safe %p, size %d\n", | |
372 | __func__,buf->ptr, buf->safe, size); | |
373 | memcpy(buf->safe, buf->ptr, size); | |
374 | break; | |
375 | case DMA_BIDIRECTIONAL: | |
376 | BUG(); /* is this allowed? what does it mean? */ | |
377 | default: | |
378 | BUG(); | |
379 | } | |
380 | consistent_sync(buf->safe, size, dir); | |
381 | } else { | |
382 | consistent_sync(dma_to_virt(dev, dma_addr), size, dir); | |
383 | } | |
384 | } | |
385 | ||
386 | /* ************************************************** */ | |
387 | ||
388 | /* | |
389 | * see if a buffer address is in an 'unsafe' range. if it is | |
390 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
391 | * substitute the safe buffer for the unsafe one. | |
392 | * (basically move the buffer from an unsafe area to a safe one) | |
393 | */ | |
394 | dma_addr_t | |
395 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
396 | enum dma_data_direction dir) | |
397 | { | |
398 | unsigned long flags; | |
399 | dma_addr_t dma_addr; | |
400 | ||
401 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
402 | __func__, ptr, size, dir); | |
403 | ||
404 | BUG_ON(dir == DMA_NONE); | |
405 | ||
406 | local_irq_save(flags); | |
407 | ||
408 | dma_addr = map_single(dev, ptr, size, dir); | |
409 | ||
410 | local_irq_restore(flags); | |
411 | ||
412 | return dma_addr; | |
413 | } | |
414 | ||
415 | /* | |
416 | * see if a mapped address was really a "safe" buffer and if so, copy | |
417 | * the data from the safe buffer back to the unsafe buffer and free up | |
418 | * the safe buffer. (basically return things back to the way they | |
419 | * should be) | |
420 | */ | |
421 | ||
422 | void | |
423 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
424 | enum dma_data_direction dir) | |
425 | { | |
426 | unsigned long flags; | |
427 | ||
428 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
429 | __func__, (void *) dma_addr, size, dir); | |
430 | ||
431 | BUG_ON(dir == DMA_NONE); | |
432 | ||
433 | local_irq_save(flags); | |
434 | ||
435 | unmap_single(dev, dma_addr, size, dir); | |
436 | ||
437 | local_irq_restore(flags); | |
438 | } | |
439 | ||
440 | int | |
441 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
442 | enum dma_data_direction dir) | |
443 | { | |
444 | unsigned long flags; | |
445 | int i; | |
446 | ||
447 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
448 | __func__, sg, nents, dir); | |
449 | ||
450 | BUG_ON(dir == DMA_NONE); | |
451 | ||
452 | local_irq_save(flags); | |
453 | ||
454 | for (i = 0; i < nents; i++, sg++) { | |
455 | struct page *page = sg->page; | |
456 | unsigned int offset = sg->offset; | |
457 | unsigned int length = sg->length; | |
458 | void *ptr = page_address(page) + offset; | |
459 | ||
460 | sg->dma_address = | |
461 | map_single(dev, ptr, length, dir); | |
462 | } | |
463 | ||
464 | local_irq_restore(flags); | |
465 | ||
466 | return nents; | |
467 | } | |
468 | ||
469 | void | |
470 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
471 | enum dma_data_direction dir) | |
472 | { | |
473 | unsigned long flags; | |
474 | int i; | |
475 | ||
476 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
477 | __func__, sg, nents, dir); | |
478 | ||
479 | BUG_ON(dir == DMA_NONE); | |
480 | ||
481 | local_irq_save(flags); | |
482 | ||
483 | for (i = 0; i < nents; i++, sg++) { | |
484 | dma_addr_t dma_addr = sg->dma_address; | |
485 | unsigned int length = sg->length; | |
486 | ||
487 | unmap_single(dev, dma_addr, length, dir); | |
488 | } | |
489 | ||
490 | local_irq_restore(flags); | |
491 | } | |
492 | ||
493 | void | |
494 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, | |
495 | enum dma_data_direction dir) | |
496 | { | |
497 | unsigned long flags; | |
498 | ||
499 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
500 | __func__, (void *) dma_addr, size, dir); | |
501 | ||
502 | local_irq_save(flags); | |
503 | ||
504 | sync_single(dev, dma_addr, size, dir); | |
505 | ||
506 | local_irq_restore(flags); | |
507 | } | |
508 | ||
509 | void | |
510 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, | |
511 | enum dma_data_direction dir) | |
512 | { | |
513 | unsigned long flags; | |
514 | ||
515 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
516 | __func__, (void *) dma_addr, size, dir); | |
517 | ||
518 | local_irq_save(flags); | |
519 | ||
520 | sync_single(dev, dma_addr, size, dir); | |
521 | ||
522 | local_irq_restore(flags); | |
523 | } | |
524 | ||
525 | void | |
526 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
527 | enum dma_data_direction dir) | |
528 | { | |
529 | unsigned long flags; | |
530 | int i; | |
531 | ||
532 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
533 | __func__, sg, nents, dir); | |
534 | ||
535 | BUG_ON(dir == DMA_NONE); | |
536 | ||
537 | local_irq_save(flags); | |
538 | ||
539 | for (i = 0; i < nents; i++, sg++) { | |
540 | dma_addr_t dma_addr = sg->dma_address; | |
541 | unsigned int length = sg->length; | |
542 | ||
543 | sync_single(dev, dma_addr, length, dir); | |
544 | } | |
545 | ||
546 | local_irq_restore(flags); | |
547 | } | |
548 | ||
549 | void | |
550 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
551 | enum dma_data_direction dir) | |
552 | { | |
553 | unsigned long flags; | |
554 | int i; | |
555 | ||
556 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
557 | __func__, sg, nents, dir); | |
558 | ||
559 | BUG_ON(dir == DMA_NONE); | |
560 | ||
561 | local_irq_save(flags); | |
562 | ||
563 | for (i = 0; i < nents; i++, sg++) { | |
564 | dma_addr_t dma_addr = sg->dma_address; | |
565 | unsigned int length = sg->length; | |
566 | ||
567 | sync_single(dev, dma_addr, length, dir); | |
568 | } | |
569 | ||
570 | local_irq_restore(flags); | |
571 | } | |
572 | ||
573 | int | |
574 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |
575 | unsigned long large_buffer_size) | |
576 | { | |
577 | struct dmabounce_device_info *device_info; | |
578 | ||
579 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
580 | if (!device_info) { | |
581 | printk(KERN_ERR | |
582 | "Could not allocated dmabounce_device_info for %s", | |
583 | dev->bus_id); | |
584 | return -ENOMEM; | |
585 | } | |
586 | ||
587 | device_info->small_buffer_pool = | |
588 | dma_pool_create("small_dmabounce_pool", | |
589 | dev, | |
590 | small_buffer_size, | |
591 | 0 /* byte alignment */, | |
592 | 0 /* no page-crossing issues */); | |
593 | if (!device_info->small_buffer_pool) { | |
594 | printk(KERN_ERR | |
595 | "dmabounce: could not allocate small DMA pool for %s\n", | |
596 | dev->bus_id); | |
597 | kfree(device_info); | |
598 | return -ENOMEM; | |
599 | } | |
600 | ||
601 | if (large_buffer_size) { | |
602 | device_info->large_buffer_pool = | |
603 | dma_pool_create("large_dmabounce_pool", | |
604 | dev, | |
605 | large_buffer_size, | |
606 | 0 /* byte alignment */, | |
607 | 0 /* no page-crossing issues */); | |
608 | if (!device_info->large_buffer_pool) { | |
609 | printk(KERN_ERR | |
610 | "dmabounce: could not allocate large DMA pool for %s\n", | |
611 | dev->bus_id); | |
612 | dma_pool_destroy(device_info->small_buffer_pool); | |
613 | ||
614 | return -ENOMEM; | |
615 | } | |
616 | } | |
617 | ||
618 | device_info->dev = dev; | |
619 | device_info->small_buffer_size = small_buffer_size; | |
620 | device_info->large_buffer_size = large_buffer_size; | |
621 | INIT_LIST_HEAD(&device_info->safe_buffers); | |
622 | ||
623 | #ifdef STATS | |
624 | device_info->sbp_allocs = 0; | |
625 | device_info->lbp_allocs = 0; | |
626 | device_info->total_allocs = 0; | |
627 | device_info->map_op_count = 0; | |
628 | device_info->bounce_count = 0; | |
629 | #endif | |
630 | ||
631 | list_add(&device_info->node, &dmabounce_devs); | |
632 | ||
633 | printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", | |
634 | dev->bus_id, dev->bus->name); | |
635 | ||
636 | return 0; | |
637 | } | |
638 | ||
639 | void | |
640 | dmabounce_unregister_dev(struct device *dev) | |
641 | { | |
642 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
643 | ||
644 | if (!device_info) { | |
645 | printk(KERN_WARNING | |
646 | "%s: Never registered with dmabounce but attempting" \ | |
647 | "to unregister!\n", dev->bus_id); | |
648 | return; | |
649 | } | |
650 | ||
651 | if (!list_empty(&device_info->safe_buffers)) { | |
652 | printk(KERN_ERR | |
653 | "%s: Removing from dmabounce with pending buffers!\n", | |
654 | dev->bus_id); | |
655 | BUG(); | |
656 | } | |
657 | ||
658 | if (device_info->small_buffer_pool) | |
659 | dma_pool_destroy(device_info->small_buffer_pool); | |
660 | if (device_info->large_buffer_pool) | |
661 | dma_pool_destroy(device_info->large_buffer_pool); | |
662 | ||
663 | #ifdef STATS | |
664 | print_alloc_stats(device_info); | |
665 | print_map_stats(device_info); | |
666 | #endif | |
667 | ||
668 | list_del(&device_info->node); | |
669 | ||
670 | kfree(device_info); | |
671 | ||
672 | printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", | |
673 | dev->bus_id, dev->bus->name); | |
674 | } | |
675 | ||
676 | ||
677 | EXPORT_SYMBOL(dma_map_single); | |
678 | EXPORT_SYMBOL(dma_unmap_single); | |
679 | EXPORT_SYMBOL(dma_map_sg); | |
680 | EXPORT_SYMBOL(dma_unmap_sg); | |
681 | EXPORT_SYMBOL(dma_sync_single); | |
682 | EXPORT_SYMBOL(dma_sync_sg); | |
683 | EXPORT_SYMBOL(dmabounce_register_dev); | |
684 | EXPORT_SYMBOL(dmabounce_unregister_dev); | |
685 | ||
686 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | |
687 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
688 | MODULE_LICENSE("GPL"); |