]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm/common/dmabounce.c
Merge tag 'mac80211-for-davem-2017-02-28' of git://git.kernel.org/pub/scm/linux/kerne...
[mirror_ubuntu-artful-kernel.git] / arch / arm / common / dmabounce.c
1 /*
2 * arch/arm/common/dmabounce.c
3 *
4 * Special dma_{map/unmap/dma_sync}_* routines for systems that have
5 * limited DMA windows. These functions utilize bounce buffers to
6 * copy data to/from buffers located outside the DMA region. This
7 * only works for systems in which DMA memory is at the bottom of
8 * RAM, the remainder of memory is at the top and the DMA memory
9 * can be marked as ZONE_DMA. Anything beyond that such as discontiguous
10 * DMA windows will require custom implementations that reserve memory
11 * areas at early bootup.
12 *
13 * Original version by Brad Parker (brad@heeltoe.com)
14 * Re-written by Christopher Hoover <ch@murgatroid.com>
15 * Made generic by Deepak Saxena <dsaxena@plexity.net>
16 *
17 * Copyright (C) 2002 Hewlett Packard Company.
18 * Copyright (C) 2004 MontaVista Software, Inc.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * version 2 as published by the Free Software Foundation.
23 */
24
25 #include <linux/module.h>
26 #include <linux/init.h>
27 #include <linux/slab.h>
28 #include <linux/page-flags.h>
29 #include <linux/device.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/dmapool.h>
32 #include <linux/list.h>
33 #include <linux/scatterlist.h>
34
35 #include <asm/cacheflush.h>
36
37 #undef STATS
38
39 #ifdef STATS
40 #define DO_STATS(X) do { X ; } while (0)
41 #else
42 #define DO_STATS(X) do { } while (0)
43 #endif
44
45 /* ************************************************** */
46
47 struct safe_buffer {
48 struct list_head node;
49
50 /* original request */
51 void *ptr;
52 size_t size;
53 int direction;
54
55 /* safe buffer info */
56 struct dmabounce_pool *pool;
57 void *safe;
58 dma_addr_t safe_dma_addr;
59 };
60
61 struct dmabounce_pool {
62 unsigned long size;
63 struct dma_pool *pool;
64 #ifdef STATS
65 unsigned long allocs;
66 #endif
67 };
68
69 struct dmabounce_device_info {
70 struct device *dev;
71 struct list_head safe_buffers;
72 #ifdef STATS
73 unsigned long total_allocs;
74 unsigned long map_op_count;
75 unsigned long bounce_count;
76 int attr_res;
77 #endif
78 struct dmabounce_pool small;
79 struct dmabounce_pool large;
80
81 rwlock_t lock;
82
83 int (*needs_bounce)(struct device *, dma_addr_t, size_t);
84 };
85
86 #ifdef STATS
87 static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
88 char *buf)
89 {
90 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
91 return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
92 device_info->small.allocs,
93 device_info->large.allocs,
94 device_info->total_allocs - device_info->small.allocs -
95 device_info->large.allocs,
96 device_info->total_allocs,
97 device_info->map_op_count,
98 device_info->bounce_count);
99 }
100
101 static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
102 #endif
103
104
105 /* allocate a 'safe' buffer and keep track of it */
106 static inline struct safe_buffer *
107 alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
108 size_t size, enum dma_data_direction dir)
109 {
110 struct safe_buffer *buf;
111 struct dmabounce_pool *pool;
112 struct device *dev = device_info->dev;
113 unsigned long flags;
114
115 dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
116 __func__, ptr, size, dir);
117
118 if (size <= device_info->small.size) {
119 pool = &device_info->small;
120 } else if (size <= device_info->large.size) {
121 pool = &device_info->large;
122 } else {
123 pool = NULL;
124 }
125
126 buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
127 if (buf == NULL) {
128 dev_warn(dev, "%s: kmalloc failed\n", __func__);
129 return NULL;
130 }
131
132 buf->ptr = ptr;
133 buf->size = size;
134 buf->direction = dir;
135 buf->pool = pool;
136
137 if (pool) {
138 buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
139 &buf->safe_dma_addr);
140 } else {
141 buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
142 GFP_ATOMIC);
143 }
144
145 if (buf->safe == NULL) {
146 dev_warn(dev,
147 "%s: could not alloc dma memory (size=%d)\n",
148 __func__, size);
149 kfree(buf);
150 return NULL;
151 }
152
153 #ifdef STATS
154 if (pool)
155 pool->allocs++;
156 device_info->total_allocs++;
157 #endif
158
159 write_lock_irqsave(&device_info->lock, flags);
160 list_add(&buf->node, &device_info->safe_buffers);
161 write_unlock_irqrestore(&device_info->lock, flags);
162
163 return buf;
164 }
165
166 /* determine if a buffer is from our "safe" pool */
167 static inline struct safe_buffer *
168 find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
169 {
170 struct safe_buffer *b, *rb = NULL;
171 unsigned long flags;
172
173 read_lock_irqsave(&device_info->lock, flags);
174
175 list_for_each_entry(b, &device_info->safe_buffers, node)
176 if (b->safe_dma_addr <= safe_dma_addr &&
177 b->safe_dma_addr + b->size > safe_dma_addr) {
178 rb = b;
179 break;
180 }
181
182 read_unlock_irqrestore(&device_info->lock, flags);
183 return rb;
184 }
185
186 static inline void
187 free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
188 {
189 unsigned long flags;
190
191 dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
192
193 write_lock_irqsave(&device_info->lock, flags);
194
195 list_del(&buf->node);
196
197 write_unlock_irqrestore(&device_info->lock, flags);
198
199 if (buf->pool)
200 dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
201 else
202 dma_free_coherent(device_info->dev, buf->size, buf->safe,
203 buf->safe_dma_addr);
204
205 kfree(buf);
206 }
207
208 /* ************************************************** */
209
210 static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
211 dma_addr_t dma_addr, const char *where)
212 {
213 if (!dev || !dev->archdata.dmabounce)
214 return NULL;
215 if (dma_mapping_error(dev, dma_addr)) {
216 dev_err(dev, "Trying to %s invalid mapping\n", where);
217 return NULL;
218 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220 }
221
222 static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223 {
224 if (!dev || !dev->archdata.dmabounce)
225 return 0;
226
227 if (dev->dma_mask) {
228 unsigned long limit, mask = *dev->dma_mask;
229
230 limit = (mask + 1) & ~mask;
231 if (limit && size > limit) {
232 dev_err(dev, "DMA mapping too big (requested %#x "
233 "mask %#Lx)\n", size, *dev->dma_mask);
234 return -E2BIG;
235 }
236
237 /* Figure out if we need to bounce from the DMA mask. */
238 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
239 return 1;
240 }
241
242 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
243 }
244
245 static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
246 enum dma_data_direction dir,
247 unsigned long attrs)
248 {
249 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
250 struct safe_buffer *buf;
251
252 if (device_info)
253 DO_STATS ( device_info->map_op_count++ );
254
255 buf = alloc_safe_buffer(device_info, ptr, size, dir);
256 if (buf == NULL) {
257 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
258 __func__, ptr);
259 return DMA_ERROR_CODE;
260 }
261
262 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
263 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264 buf->safe, buf->safe_dma_addr);
265
266 if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
267 !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
268 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
269 __func__, ptr, buf->safe, size);
270 memcpy(buf->safe, ptr, size);
271 }
272
273 return buf->safe_dma_addr;
274 }
275
276 static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
277 size_t size, enum dma_data_direction dir,
278 unsigned long attrs)
279 {
280 BUG_ON(buf->size != size);
281 BUG_ON(buf->direction != dir);
282
283 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
284 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
285 buf->safe, buf->safe_dma_addr);
286
287 DO_STATS(dev->archdata.dmabounce->bounce_count++);
288
289 if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
290 !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
291 void *ptr = buf->ptr;
292
293 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
294 __func__, buf->safe, ptr, size);
295 memcpy(ptr, buf->safe, size);
296
297 /*
298 * Since we may have written to a page cache page,
299 * we need to ensure that the data will be coherent
300 * with user mappings.
301 */
302 __cpuc_flush_dcache_area(ptr, size);
303 }
304 free_safe_buffer(dev->archdata.dmabounce, buf);
305 }
306
307 /* ************************************************** */
308
309 /*
310 * see if a buffer address is in an 'unsafe' range. if it is
311 * allocate a 'safe' buffer and copy the unsafe buffer into it.
312 * substitute the safe buffer for the unsafe one.
313 * (basically move the buffer from an unsafe area to a safe one)
314 */
315 static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
316 unsigned long offset, size_t size, enum dma_data_direction dir,
317 unsigned long attrs)
318 {
319 dma_addr_t dma_addr;
320 int ret;
321
322 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
323 __func__, page, offset, size, dir);
324
325 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
326
327 ret = needs_bounce(dev, dma_addr, size);
328 if (ret < 0)
329 return DMA_ERROR_CODE;
330
331 if (ret == 0) {
332 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
333 return dma_addr;
334 }
335
336 if (PageHighMem(page)) {
337 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
338 return DMA_ERROR_CODE;
339 }
340
341 return map_single(dev, page_address(page) + offset, size, dir, attrs);
342 }
343
344 /*
345 * see if a mapped address was really a "safe" buffer and if so, copy
346 * the data from the safe buffer back to the unsafe buffer and free up
347 * the safe buffer. (basically return things back to the way they
348 * should be)
349 */
350 static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
351 enum dma_data_direction dir, unsigned long attrs)
352 {
353 struct safe_buffer *buf;
354
355 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
356 __func__, dma_addr, size, dir);
357
358 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
359 if (!buf) {
360 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
361 return;
362 }
363
364 unmap_single(dev, buf, size, dir, attrs);
365 }
366
367 static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
368 size_t sz, enum dma_data_direction dir)
369 {
370 struct safe_buffer *buf;
371 unsigned long off;
372
373 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
374 __func__, addr, sz, dir);
375
376 buf = find_safe_buffer_dev(dev, addr, __func__);
377 if (!buf)
378 return 1;
379
380 off = addr - buf->safe_dma_addr;
381
382 BUG_ON(buf->direction != dir);
383
384 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
385 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
386 buf->safe, buf->safe_dma_addr);
387
388 DO_STATS(dev->archdata.dmabounce->bounce_count++);
389
390 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
391 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
392 __func__, buf->safe + off, buf->ptr + off, sz);
393 memcpy(buf->ptr + off, buf->safe + off, sz);
394 }
395 return 0;
396 }
397
398 static void dmabounce_sync_for_cpu(struct device *dev,
399 dma_addr_t handle, size_t size, enum dma_data_direction dir)
400 {
401 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
402 return;
403
404 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
405 }
406
407 static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
408 size_t sz, enum dma_data_direction dir)
409 {
410 struct safe_buffer *buf;
411 unsigned long off;
412
413 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
414 __func__, addr, sz, dir);
415
416 buf = find_safe_buffer_dev(dev, addr, __func__);
417 if (!buf)
418 return 1;
419
420 off = addr - buf->safe_dma_addr;
421
422 BUG_ON(buf->direction != dir);
423
424 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
425 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
426 buf->safe, buf->safe_dma_addr);
427
428 DO_STATS(dev->archdata.dmabounce->bounce_count++);
429
430 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
431 dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
432 __func__,buf->ptr + off, buf->safe + off, sz);
433 memcpy(buf->safe + off, buf->ptr + off, sz);
434 }
435 return 0;
436 }
437
438 static void dmabounce_sync_for_device(struct device *dev,
439 dma_addr_t handle, size_t size, enum dma_data_direction dir)
440 {
441 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
442 return;
443
444 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
445 }
446
447 static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
448 {
449 if (dev->archdata.dmabounce)
450 return 0;
451
452 return arm_dma_ops.set_dma_mask(dev, dma_mask);
453 }
454
455 static const struct dma_map_ops dmabounce_ops = {
456 .alloc = arm_dma_alloc,
457 .free = arm_dma_free,
458 .mmap = arm_dma_mmap,
459 .get_sgtable = arm_dma_get_sgtable,
460 .map_page = dmabounce_map_page,
461 .unmap_page = dmabounce_unmap_page,
462 .sync_single_for_cpu = dmabounce_sync_for_cpu,
463 .sync_single_for_device = dmabounce_sync_for_device,
464 .map_sg = arm_dma_map_sg,
465 .unmap_sg = arm_dma_unmap_sg,
466 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
467 .sync_sg_for_device = arm_dma_sync_sg_for_device,
468 .set_dma_mask = dmabounce_set_mask,
469 };
470
471 static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
472 const char *name, unsigned long size)
473 {
474 pool->size = size;
475 DO_STATS(pool->allocs = 0);
476 pool->pool = dma_pool_create(name, dev, size,
477 0 /* byte alignment */,
478 0 /* no page-crossing issues */);
479
480 return pool->pool ? 0 : -ENOMEM;
481 }
482
483 int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
484 unsigned long large_buffer_size,
485 int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
486 {
487 struct dmabounce_device_info *device_info;
488 int ret;
489
490 device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
491 if (!device_info) {
492 dev_err(dev,
493 "Could not allocated dmabounce_device_info\n");
494 return -ENOMEM;
495 }
496
497 ret = dmabounce_init_pool(&device_info->small, dev,
498 "small_dmabounce_pool", small_buffer_size);
499 if (ret) {
500 dev_err(dev,
501 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
502 small_buffer_size);
503 goto err_free;
504 }
505
506 if (large_buffer_size) {
507 ret = dmabounce_init_pool(&device_info->large, dev,
508 "large_dmabounce_pool",
509 large_buffer_size);
510 if (ret) {
511 dev_err(dev,
512 "dmabounce: could not allocate DMA pool for %ld byte objects\n",
513 large_buffer_size);
514 goto err_destroy;
515 }
516 }
517
518 device_info->dev = dev;
519 INIT_LIST_HEAD(&device_info->safe_buffers);
520 rwlock_init(&device_info->lock);
521 device_info->needs_bounce = needs_bounce_fn;
522
523 #ifdef STATS
524 device_info->total_allocs = 0;
525 device_info->map_op_count = 0;
526 device_info->bounce_count = 0;
527 device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
528 #endif
529
530 dev->archdata.dmabounce = device_info;
531 set_dma_ops(dev, &dmabounce_ops);
532
533 dev_info(dev, "dmabounce: registered device\n");
534
535 return 0;
536
537 err_destroy:
538 dma_pool_destroy(device_info->small.pool);
539 err_free:
540 kfree(device_info);
541 return ret;
542 }
543 EXPORT_SYMBOL(dmabounce_register_dev);
544
545 void dmabounce_unregister_dev(struct device *dev)
546 {
547 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
548
549 dev->archdata.dmabounce = NULL;
550 set_dma_ops(dev, NULL);
551
552 if (!device_info) {
553 dev_warn(dev,
554 "Never registered with dmabounce but attempting"
555 "to unregister!\n");
556 return;
557 }
558
559 if (!list_empty(&device_info->safe_buffers)) {
560 dev_err(dev,
561 "Removing from dmabounce with pending buffers!\n");
562 BUG();
563 }
564
565 if (device_info->small.pool)
566 dma_pool_destroy(device_info->small.pool);
567 if (device_info->large.pool)
568 dma_pool_destroy(device_info->large.pool);
569
570 #ifdef STATS
571 if (device_info->attr_res == 0)
572 device_remove_file(dev, &dev_attr_dmabounce_stats);
573 #endif
574
575 kfree(device_info);
576
577 dev_info(dev, "dmabounce: device unregistered\n");
578 }
579 EXPORT_SYMBOL(dmabounce_unregister_dev);
580
581 MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
582 MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
583 MODULE_LICENSE("GPL");