]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * arch/arm/common/dmabounce.c | |
3 | * | |
4 | * Special dma_{map/unmap/dma_sync}_* routines for systems that have | |
5 | * limited DMA windows. These functions utilize bounce buffers to | |
6 | * copy data to/from buffers located outside the DMA region. This | |
7 | * only works for systems in which DMA memory is at the bottom of | |
3a2916aa | 8 | * RAM, the remainder of memory is at the top and the DMA memory |
1da177e4 LT |
9 | * can be marked as ZONE_DMA. Anything beyond that such as discontigous |
10 | * DMA windows will require custom implementations that reserve memory | |
11 | * areas at early bootup. | |
12 | * | |
13 | * Original version by Brad Parker (brad@heeltoe.com) | |
14 | * Re-written by Christopher Hoover <ch@murgatroid.com> | |
15 | * Made generic by Deepak Saxena <dsaxena@plexity.net> | |
16 | * | |
17 | * Copyright (C) 2002 Hewlett Packard Company. | |
18 | * Copyright (C) 2004 MontaVista Software, Inc. | |
19 | * | |
20 | * This program is free software; you can redistribute it and/or | |
21 | * modify it under the terms of the GNU General Public License | |
22 | * version 2 as published by the Free Software Foundation. | |
23 | */ | |
24 | ||
25 | #include <linux/module.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/slab.h> | |
28 | #include <linux/device.h> | |
29 | #include <linux/dma-mapping.h> | |
30 | #include <linux/dmapool.h> | |
31 | #include <linux/list.h> | |
32 | ||
14eb75b6 RK |
33 | #include <asm/cacheflush.h> |
34 | ||
1da177e4 | 35 | #undef DEBUG |
1da177e4 | 36 | #undef STATS |
cb7610d0 | 37 | |
1da177e4 LT |
38 | #ifdef STATS |
39 | #define DO_STATS(X) do { X ; } while (0) | |
40 | #else | |
41 | #define DO_STATS(X) do { } while (0) | |
42 | #endif | |
43 | ||
44 | /* ************************************************** */ | |
45 | ||
46 | struct safe_buffer { | |
47 | struct list_head node; | |
48 | ||
49 | /* original request */ | |
50 | void *ptr; | |
51 | size_t size; | |
52 | int direction; | |
53 | ||
54 | /* safe buffer info */ | |
cb7610d0 | 55 | struct dmabounce_pool *pool; |
1da177e4 LT |
56 | void *safe; |
57 | dma_addr_t safe_dma_addr; | |
58 | }; | |
59 | ||
cb7610d0 RK |
60 | struct dmabounce_pool { |
61 | unsigned long size; | |
62 | struct dma_pool *pool; | |
63 | #ifdef STATS | |
64 | unsigned long allocs; | |
65 | #endif | |
66 | }; | |
67 | ||
1da177e4 LT |
68 | struct dmabounce_device_info { |
69 | struct list_head node; | |
70 | ||
71 | struct device *dev; | |
1da177e4 | 72 | struct list_head safe_buffers; |
1da177e4 | 73 | #ifdef STATS |
1da177e4 LT |
74 | unsigned long total_allocs; |
75 | unsigned long map_op_count; | |
76 | unsigned long bounce_count; | |
77 | #endif | |
cb7610d0 RK |
78 | struct dmabounce_pool small; |
79 | struct dmabounce_pool large; | |
823588c1 KH |
80 | |
81 | rwlock_t lock; | |
1da177e4 LT |
82 | }; |
83 | ||
84 | static LIST_HEAD(dmabounce_devs); | |
85 | ||
86 | #ifdef STATS | |
87 | static void print_alloc_stats(struct dmabounce_device_info *device_info) | |
88 | { | |
89 | printk(KERN_INFO | |
90 | "%s: dmabounce: sbp: %lu, lbp: %lu, other: %lu, total: %lu\n", | |
91 | device_info->dev->bus_id, | |
cb7610d0 RK |
92 | device_info->small.allocs, device_info->large.allocs, |
93 | device_info->total_allocs - device_info->small.allocs - | |
94 | device_info->large.allocs, | |
1da177e4 LT |
95 | device_info->total_allocs); |
96 | } | |
97 | #endif | |
98 | ||
99 | /* find the given device in the dmabounce device list */ | |
100 | static inline struct dmabounce_device_info * | |
101 | find_dmabounce_dev(struct device *dev) | |
102 | { | |
b46a58fd | 103 | struct dmabounce_device_info *d; |
1da177e4 | 104 | |
b46a58fd | 105 | list_for_each_entry(d, &dmabounce_devs, node) |
1da177e4 LT |
106 | if (d->dev == dev) |
107 | return d; | |
b46a58fd | 108 | |
1da177e4 LT |
109 | return NULL; |
110 | } | |
111 | ||
112 | ||
113 | /* allocate a 'safe' buffer and keep track of it */ | |
114 | static inline struct safe_buffer * | |
115 | alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr, | |
cb7610d0 | 116 | size_t size, enum dma_data_direction dir) |
1da177e4 LT |
117 | { |
118 | struct safe_buffer *buf; | |
cb7610d0 | 119 | struct dmabounce_pool *pool; |
1da177e4 | 120 | struct device *dev = device_info->dev; |
823588c1 | 121 | unsigned long flags; |
1da177e4 LT |
122 | |
123 | dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n", | |
124 | __func__, ptr, size, dir); | |
125 | ||
cb7610d0 RK |
126 | if (size <= device_info->small.size) { |
127 | pool = &device_info->small; | |
128 | } else if (size <= device_info->large.size) { | |
129 | pool = &device_info->large; | |
130 | } else { | |
131 | pool = NULL; | |
132 | } | |
1da177e4 LT |
133 | |
134 | buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC); | |
135 | if (buf == NULL) { | |
136 | dev_warn(dev, "%s: kmalloc failed\n", __func__); | |
137 | return NULL; | |
138 | } | |
139 | ||
cb7610d0 RK |
140 | buf->ptr = ptr; |
141 | buf->size = size; | |
142 | buf->direction = dir; | |
143 | buf->pool = pool; | |
1da177e4 | 144 | |
cb7610d0 RK |
145 | if (pool) { |
146 | buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC, | |
147 | &buf->safe_dma_addr); | |
1da177e4 | 148 | } else { |
cb7610d0 RK |
149 | buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr, |
150 | GFP_ATOMIC); | |
1da177e4 LT |
151 | } |
152 | ||
cb7610d0 RK |
153 | if (buf->safe == NULL) { |
154 | dev_warn(dev, | |
155 | "%s: could not alloc dma memory (size=%d)\n", | |
156 | __func__, size); | |
1da177e4 LT |
157 | kfree(buf); |
158 | return NULL; | |
159 | } | |
160 | ||
161 | #ifdef STATS | |
cb7610d0 RK |
162 | if (pool) |
163 | pool->allocs++; | |
164 | device_info->total_allocs++; | |
1da177e4 LT |
165 | if (device_info->total_allocs % 1000 == 0) |
166 | print_alloc_stats(device_info); | |
167 | #endif | |
168 | ||
823588c1 KH |
169 | write_lock_irqsave(&device_info->lock, flags); |
170 | ||
1da177e4 LT |
171 | list_add(&buf->node, &device_info->safe_buffers); |
172 | ||
823588c1 KH |
173 | write_unlock_irqrestore(&device_info->lock, flags); |
174 | ||
1da177e4 LT |
175 | return buf; |
176 | } | |
177 | ||
178 | /* determine if a buffer is from our "safe" pool */ | |
179 | static inline struct safe_buffer * | |
180 | find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr) | |
181 | { | |
e2785f0d | 182 | struct safe_buffer *b, *rb = NULL; |
823588c1 KH |
183 | unsigned long flags; |
184 | ||
185 | read_lock_irqsave(&device_info->lock, flags); | |
1da177e4 | 186 | |
b46a58fd | 187 | list_for_each_entry(b, &device_info->safe_buffers, node) |
e2785f0d KH |
188 | if (b->safe_dma_addr == safe_dma_addr) { |
189 | rb = b; | |
823588c1 | 190 | break; |
e2785f0d | 191 | } |
1da177e4 | 192 | |
823588c1 | 193 | read_unlock_irqrestore(&device_info->lock, flags); |
e2785f0d | 194 | return rb; |
1da177e4 LT |
195 | } |
196 | ||
197 | static inline void | |
198 | free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf) | |
199 | { | |
823588c1 KH |
200 | unsigned long flags; |
201 | ||
1da177e4 LT |
202 | dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf); |
203 | ||
823588c1 KH |
204 | write_lock_irqsave(&device_info->lock, flags); |
205 | ||
1da177e4 LT |
206 | list_del(&buf->node); |
207 | ||
823588c1 KH |
208 | write_unlock_irqrestore(&device_info->lock, flags); |
209 | ||
1da177e4 | 210 | if (buf->pool) |
cb7610d0 | 211 | dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr); |
1da177e4 LT |
212 | else |
213 | dma_free_coherent(device_info->dev, buf->size, buf->safe, | |
214 | buf->safe_dma_addr); | |
215 | ||
216 | kfree(buf); | |
217 | } | |
218 | ||
219 | /* ************************************************** */ | |
220 | ||
221 | #ifdef STATS | |
1da177e4 LT |
222 | static void print_map_stats(struct dmabounce_device_info *device_info) |
223 | { | |
cb7610d0 RK |
224 | dev_info(device_info->dev, |
225 | "dmabounce: map_op_count=%lu, bounce_count=%lu\n", | |
1da177e4 LT |
226 | device_info->map_op_count, device_info->bounce_count); |
227 | } | |
228 | #endif | |
229 | ||
230 | static inline dma_addr_t | |
231 | map_single(struct device *dev, void *ptr, size_t size, | |
232 | enum dma_data_direction dir) | |
233 | { | |
234 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
235 | dma_addr_t dma_addr; | |
236 | int needs_bounce = 0; | |
237 | ||
238 | if (device_info) | |
239 | DO_STATS ( device_info->map_op_count++ ); | |
240 | ||
241 | dma_addr = virt_to_dma(dev, ptr); | |
242 | ||
243 | if (dev->dma_mask) { | |
244 | unsigned long mask = *dev->dma_mask; | |
245 | unsigned long limit; | |
246 | ||
247 | limit = (mask + 1) & ~mask; | |
248 | if (limit && size > limit) { | |
249 | dev_err(dev, "DMA mapping too big (requested %#x " | |
250 | "mask %#Lx)\n", size, *dev->dma_mask); | |
251 | return ~0; | |
252 | } | |
253 | ||
254 | /* | |
255 | * Figure out if we need to bounce from the DMA mask. | |
256 | */ | |
257 | needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask; | |
258 | } | |
259 | ||
260 | if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { | |
261 | struct safe_buffer *buf; | |
262 | ||
263 | buf = alloc_safe_buffer(device_info, ptr, size, dir); | |
264 | if (buf == 0) { | |
265 | dev_err(dev, "%s: unable to map unsafe buffer %p!\n", | |
266 | __func__, ptr); | |
267 | return 0; | |
268 | } | |
269 | ||
270 | dev_dbg(dev, | |
271 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
272 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
273 | buf->safe, (void *) buf->safe_dma_addr); | |
274 | ||
275 | if ((dir == DMA_TO_DEVICE) || | |
276 | (dir == DMA_BIDIRECTIONAL)) { | |
277 | dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", | |
278 | __func__, ptr, buf->safe, size); | |
279 | memcpy(buf->safe, ptr, size); | |
280 | } | |
cb7610d0 | 281 | ptr = buf->safe; |
1da177e4 LT |
282 | |
283 | dma_addr = buf->safe_dma_addr; | |
7f8e3354 RK |
284 | } else { |
285 | /* | |
286 | * We don't need to sync the DMA buffer since | |
287 | * it was allocated via the coherent allocators. | |
288 | */ | |
289 | consistent_sync(ptr, size, dir); | |
1da177e4 LT |
290 | } |
291 | ||
292 | return dma_addr; | |
293 | } | |
294 | ||
295 | static inline void | |
296 | unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
297 | enum dma_data_direction dir) | |
298 | { | |
299 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
300 | struct safe_buffer *buf = NULL; | |
301 | ||
302 | /* | |
303 | * Trying to unmap an invalid mapping | |
304 | */ | |
cb7610d0 | 305 | if (dma_mapping_error(dma_addr)) { |
1da177e4 LT |
306 | dev_err(dev, "Trying to unmap invalid mapping\n"); |
307 | return; | |
308 | } | |
309 | ||
310 | if (device_info) | |
311 | buf = find_safe_buffer(device_info, dma_addr); | |
312 | ||
313 | if (buf) { | |
314 | BUG_ON(buf->size != size); | |
315 | ||
316 | dev_dbg(dev, | |
317 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
318 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
319 | buf->safe, (void *) buf->safe_dma_addr); | |
320 | ||
1da177e4 LT |
321 | DO_STATS ( device_info->bounce_count++ ); |
322 | ||
5abc100e RK |
323 | if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { |
324 | unsigned long ptr; | |
325 | ||
1da177e4 LT |
326 | dev_dbg(dev, |
327 | "%s: copy back safe %p to unsafe %p size %d\n", | |
328 | __func__, buf->safe, buf->ptr, size); | |
329 | memcpy(buf->ptr, buf->safe, size); | |
5abc100e RK |
330 | |
331 | /* | |
332 | * DMA buffers must have the same cache properties | |
333 | * as if they were really used for DMA - which means | |
334 | * data must be written back to RAM. Note that | |
335 | * we don't use dmac_flush_range() here for the | |
336 | * bidirectional case because we know the cache | |
337 | * lines will be coherent with the data written. | |
338 | */ | |
339 | ptr = (unsigned long)buf->ptr; | |
340 | dmac_clean_range(ptr, ptr + size); | |
953233dc | 341 | outer_clean_range(__pa(ptr), __pa(ptr) + size); |
1da177e4 LT |
342 | } |
343 | free_safe_buffer(device_info, buf); | |
344 | } | |
345 | } | |
346 | ||
347 | static inline void | |
348 | sync_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
349 | enum dma_data_direction dir) | |
350 | { | |
351 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
352 | struct safe_buffer *buf = NULL; | |
353 | ||
354 | if (device_info) | |
355 | buf = find_safe_buffer(device_info, dma_addr); | |
356 | ||
357 | if (buf) { | |
358 | /* | |
359 | * Both of these checks from original code need to be | |
360 | * commented out b/c some drivers rely on the following: | |
361 | * | |
362 | * 1) Drivers may map a large chunk of memory into DMA space | |
363 | * but only sync a small portion of it. Good example is | |
364 | * allocating a large buffer, mapping it, and then | |
365 | * breaking it up into small descriptors. No point | |
366 | * in syncing the whole buffer if you only have to | |
367 | * touch one descriptor. | |
368 | * | |
369 | * 2) Buffers that are mapped as DMA_BIDIRECTIONAL are | |
370 | * usually only synced in one dir at a time. | |
371 | * | |
372 | * See drivers/net/eepro100.c for examples of both cases. | |
373 | * | |
374 | * -ds | |
375 | * | |
376 | * BUG_ON(buf->size != size); | |
377 | * BUG_ON(buf->direction != dir); | |
378 | */ | |
379 | ||
380 | dev_dbg(dev, | |
381 | "%s: unsafe buffer %p (phy=%p) mapped to %p (phy=%p)\n", | |
382 | __func__, buf->ptr, (void *) virt_to_dma(dev, buf->ptr), | |
383 | buf->safe, (void *) buf->safe_dma_addr); | |
384 | ||
385 | DO_STATS ( device_info->bounce_count++ ); | |
386 | ||
387 | switch (dir) { | |
388 | case DMA_FROM_DEVICE: | |
389 | dev_dbg(dev, | |
390 | "%s: copy back safe %p to unsafe %p size %d\n", | |
391 | __func__, buf->safe, buf->ptr, size); | |
392 | memcpy(buf->ptr, buf->safe, size); | |
393 | break; | |
394 | case DMA_TO_DEVICE: | |
395 | dev_dbg(dev, | |
396 | "%s: copy out unsafe %p to safe %p, size %d\n", | |
397 | __func__,buf->ptr, buf->safe, size); | |
398 | memcpy(buf->safe, buf->ptr, size); | |
399 | break; | |
400 | case DMA_BIDIRECTIONAL: | |
401 | BUG(); /* is this allowed? what does it mean? */ | |
402 | default: | |
403 | BUG(); | |
404 | } | |
7f8e3354 RK |
405 | /* |
406 | * No need to sync the safe buffer - it was allocated | |
407 | * via the coherent allocators. | |
408 | */ | |
1da177e4 LT |
409 | } else { |
410 | consistent_sync(dma_to_virt(dev, dma_addr), size, dir); | |
411 | } | |
412 | } | |
413 | ||
414 | /* ************************************************** */ | |
415 | ||
416 | /* | |
417 | * see if a buffer address is in an 'unsafe' range. if it is | |
418 | * allocate a 'safe' buffer and copy the unsafe buffer into it. | |
419 | * substitute the safe buffer for the unsafe one. | |
420 | * (basically move the buffer from an unsafe area to a safe one) | |
421 | */ | |
422 | dma_addr_t | |
423 | dma_map_single(struct device *dev, void *ptr, size_t size, | |
424 | enum dma_data_direction dir) | |
425 | { | |
1da177e4 LT |
426 | dma_addr_t dma_addr; |
427 | ||
428 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", | |
429 | __func__, ptr, size, dir); | |
430 | ||
431 | BUG_ON(dir == DMA_NONE); | |
432 | ||
1da177e4 LT |
433 | dma_addr = map_single(dev, ptr, size, dir); |
434 | ||
1da177e4 LT |
435 | return dma_addr; |
436 | } | |
437 | ||
438 | /* | |
439 | * see if a mapped address was really a "safe" buffer and if so, copy | |
440 | * the data from the safe buffer back to the unsafe buffer and free up | |
441 | * the safe buffer. (basically return things back to the way they | |
442 | * should be) | |
443 | */ | |
444 | ||
445 | void | |
446 | dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |
447 | enum dma_data_direction dir) | |
448 | { | |
1da177e4 LT |
449 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
450 | __func__, (void *) dma_addr, size, dir); | |
451 | ||
452 | BUG_ON(dir == DMA_NONE); | |
453 | ||
1da177e4 | 454 | unmap_single(dev, dma_addr, size, dir); |
1da177e4 LT |
455 | } |
456 | ||
457 | int | |
458 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, | |
459 | enum dma_data_direction dir) | |
460 | { | |
1da177e4 LT |
461 | int i; |
462 | ||
463 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
464 | __func__, sg, nents, dir); | |
465 | ||
466 | BUG_ON(dir == DMA_NONE); | |
467 | ||
1da177e4 LT |
468 | for (i = 0; i < nents; i++, sg++) { |
469 | struct page *page = sg->page; | |
470 | unsigned int offset = sg->offset; | |
471 | unsigned int length = sg->length; | |
472 | void *ptr = page_address(page) + offset; | |
473 | ||
474 | sg->dma_address = | |
475 | map_single(dev, ptr, length, dir); | |
476 | } | |
477 | ||
1da177e4 LT |
478 | return nents; |
479 | } | |
480 | ||
481 | void | |
482 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, | |
483 | enum dma_data_direction dir) | |
484 | { | |
1da177e4 LT |
485 | int i; |
486 | ||
487 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
488 | __func__, sg, nents, dir); | |
489 | ||
490 | BUG_ON(dir == DMA_NONE); | |
491 | ||
1da177e4 LT |
492 | for (i = 0; i < nents; i++, sg++) { |
493 | dma_addr_t dma_addr = sg->dma_address; | |
494 | unsigned int length = sg->length; | |
495 | ||
496 | unmap_single(dev, dma_addr, length, dir); | |
497 | } | |
1da177e4 LT |
498 | } |
499 | ||
500 | void | |
501 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, size_t size, | |
502 | enum dma_data_direction dir) | |
503 | { | |
1da177e4 LT |
504 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
505 | __func__, (void *) dma_addr, size, dir); | |
506 | ||
1da177e4 | 507 | sync_single(dev, dma_addr, size, dir); |
1da177e4 LT |
508 | } |
509 | ||
510 | void | |
511 | dma_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, size_t size, | |
512 | enum dma_data_direction dir) | |
513 | { | |
1da177e4 LT |
514 | dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", |
515 | __func__, (void *) dma_addr, size, dir); | |
516 | ||
1da177e4 | 517 | sync_single(dev, dma_addr, size, dir); |
1da177e4 LT |
518 | } |
519 | ||
520 | void | |
521 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, | |
522 | enum dma_data_direction dir) | |
523 | { | |
1da177e4 LT |
524 | int i; |
525 | ||
526 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
527 | __func__, sg, nents, dir); | |
528 | ||
529 | BUG_ON(dir == DMA_NONE); | |
530 | ||
1da177e4 LT |
531 | for (i = 0; i < nents; i++, sg++) { |
532 | dma_addr_t dma_addr = sg->dma_address; | |
533 | unsigned int length = sg->length; | |
534 | ||
535 | sync_single(dev, dma_addr, length, dir); | |
536 | } | |
1da177e4 LT |
537 | } |
538 | ||
539 | void | |
540 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, | |
541 | enum dma_data_direction dir) | |
542 | { | |
1da177e4 LT |
543 | int i; |
544 | ||
545 | dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n", | |
546 | __func__, sg, nents, dir); | |
547 | ||
548 | BUG_ON(dir == DMA_NONE); | |
549 | ||
1da177e4 LT |
550 | for (i = 0; i < nents; i++, sg++) { |
551 | dma_addr_t dma_addr = sg->dma_address; | |
552 | unsigned int length = sg->length; | |
553 | ||
554 | sync_single(dev, dma_addr, length, dir); | |
555 | } | |
1da177e4 LT |
556 | } |
557 | ||
cb7610d0 RK |
558 | static int |
559 | dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, | |
560 | unsigned long size) | |
561 | { | |
562 | pool->size = size; | |
563 | DO_STATS(pool->allocs = 0); | |
564 | pool->pool = dma_pool_create(name, dev, size, | |
565 | 0 /* byte alignment */, | |
566 | 0 /* no page-crossing issues */); | |
567 | ||
568 | return pool->pool ? 0 : -ENOMEM; | |
569 | } | |
570 | ||
1da177e4 LT |
571 | int |
572 | dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, | |
573 | unsigned long large_buffer_size) | |
574 | { | |
575 | struct dmabounce_device_info *device_info; | |
cb7610d0 | 576 | int ret; |
1da177e4 LT |
577 | |
578 | device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC); | |
579 | if (!device_info) { | |
580 | printk(KERN_ERR | |
581 | "Could not allocated dmabounce_device_info for %s", | |
582 | dev->bus_id); | |
583 | return -ENOMEM; | |
584 | } | |
585 | ||
cb7610d0 RK |
586 | ret = dmabounce_init_pool(&device_info->small, dev, |
587 | "small_dmabounce_pool", small_buffer_size); | |
588 | if (ret) { | |
589 | dev_err(dev, | |
590 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
591 | small_buffer_size); | |
592 | goto err_free; | |
1da177e4 LT |
593 | } |
594 | ||
595 | if (large_buffer_size) { | |
cb7610d0 RK |
596 | ret = dmabounce_init_pool(&device_info->large, dev, |
597 | "large_dmabounce_pool", | |
598 | large_buffer_size); | |
599 | if (ret) { | |
600 | dev_err(dev, | |
601 | "dmabounce: could not allocate DMA pool for %ld byte objects\n", | |
602 | large_buffer_size); | |
603 | goto err_destroy; | |
1da177e4 LT |
604 | } |
605 | } | |
606 | ||
607 | device_info->dev = dev; | |
1da177e4 | 608 | INIT_LIST_HEAD(&device_info->safe_buffers); |
823588c1 | 609 | rwlock_init(&device_info->lock); |
1da177e4 LT |
610 | |
611 | #ifdef STATS | |
1da177e4 LT |
612 | device_info->total_allocs = 0; |
613 | device_info->map_op_count = 0; | |
614 | device_info->bounce_count = 0; | |
615 | #endif | |
616 | ||
617 | list_add(&device_info->node, &dmabounce_devs); | |
618 | ||
619 | printk(KERN_INFO "dmabounce: registered device %s on %s bus\n", | |
620 | dev->bus_id, dev->bus->name); | |
621 | ||
622 | return 0; | |
cb7610d0 RK |
623 | |
624 | err_destroy: | |
625 | dma_pool_destroy(device_info->small.pool); | |
626 | err_free: | |
627 | kfree(device_info); | |
628 | return ret; | |
1da177e4 LT |
629 | } |
630 | ||
631 | void | |
632 | dmabounce_unregister_dev(struct device *dev) | |
633 | { | |
634 | struct dmabounce_device_info *device_info = find_dmabounce_dev(dev); | |
635 | ||
636 | if (!device_info) { | |
637 | printk(KERN_WARNING | |
638 | "%s: Never registered with dmabounce but attempting" \ | |
639 | "to unregister!\n", dev->bus_id); | |
640 | return; | |
641 | } | |
642 | ||
643 | if (!list_empty(&device_info->safe_buffers)) { | |
644 | printk(KERN_ERR | |
645 | "%s: Removing from dmabounce with pending buffers!\n", | |
646 | dev->bus_id); | |
647 | BUG(); | |
648 | } | |
649 | ||
cb7610d0 RK |
650 | if (device_info->small.pool) |
651 | dma_pool_destroy(device_info->small.pool); | |
652 | if (device_info->large.pool) | |
653 | dma_pool_destroy(device_info->large.pool); | |
1da177e4 LT |
654 | |
655 | #ifdef STATS | |
656 | print_alloc_stats(device_info); | |
657 | print_map_stats(device_info); | |
658 | #endif | |
659 | ||
660 | list_del(&device_info->node); | |
661 | ||
662 | kfree(device_info); | |
663 | ||
664 | printk(KERN_INFO "dmabounce: device %s on %s bus unregistered\n", | |
665 | dev->bus_id, dev->bus->name); | |
666 | } | |
667 | ||
668 | ||
669 | EXPORT_SYMBOL(dma_map_single); | |
670 | EXPORT_SYMBOL(dma_unmap_single); | |
671 | EXPORT_SYMBOL(dma_map_sg); | |
672 | EXPORT_SYMBOL(dma_unmap_sg); | |
73218187 KH |
673 | EXPORT_SYMBOL(dma_sync_single_for_cpu); |
674 | EXPORT_SYMBOL(dma_sync_single_for_device); | |
1da177e4 LT |
675 | EXPORT_SYMBOL(dma_sync_sg); |
676 | EXPORT_SYMBOL(dmabounce_register_dev); | |
677 | EXPORT_SYMBOL(dmabounce_unregister_dev); | |
678 | ||
679 | MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>"); | |
680 | MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows"); | |
681 | MODULE_LICENSE("GPL"); |