]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/dma/dmaengine.c
Merge remote-tracking branch 'asoc/topic/dma' into asoc-next
[mirror_ubuntu-artful-kernel.git] / drivers / dma / dmaengine.c
1 /*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22 /*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50 #include <linux/dma-mapping.h>
51 #include <linux/init.h>
52 #include <linux/module.h>
53 #include <linux/mm.h>
54 #include <linux/device.h>
55 #include <linux/dmaengine.h>
56 #include <linux/hardirq.h>
57 #include <linux/spinlock.h>
58 #include <linux/percpu.h>
59 #include <linux/rcupdate.h>
60 #include <linux/mutex.h>
61 #include <linux/jiffies.h>
62 #include <linux/rculist.h>
63 #include <linux/idr.h>
64 #include <linux/slab.h>
65 #include <linux/acpi.h>
66 #include <linux/acpi_dma.h>
67 #include <linux/of_dma.h>
68 #include <linux/mempool.h>
69
70 static DEFINE_MUTEX(dma_list_mutex);
71 static DEFINE_IDR(dma_idr);
72 static LIST_HEAD(dma_device_list);
73 static long dmaengine_ref_count;
74
75 /* --- sysfs implementation --- */
76
77 /**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
80 *
81 * Must be called under dma_list_mutex
82 */
83 static struct dma_chan *dev_to_dma_chan(struct device *dev)
84 {
85 struct dma_chan_dev *chan_dev;
86
87 chan_dev = container_of(dev, typeof(*chan_dev), device);
88 return chan_dev->chan;
89 }
90
91 static ssize_t memcpy_count_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
93 {
94 struct dma_chan *chan;
95 unsigned long count = 0;
96 int i;
97 int err;
98
99 mutex_lock(&dma_list_mutex);
100 chan = dev_to_dma_chan(dev);
101 if (chan) {
102 for_each_possible_cpu(i)
103 count += per_cpu_ptr(chan->local, i)->memcpy_count;
104 err = sprintf(buf, "%lu\n", count);
105 } else
106 err = -ENODEV;
107 mutex_unlock(&dma_list_mutex);
108
109 return err;
110 }
111 static DEVICE_ATTR_RO(memcpy_count);
112
113 static ssize_t bytes_transferred_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
115 {
116 struct dma_chan *chan;
117 unsigned long count = 0;
118 int i;
119 int err;
120
121 mutex_lock(&dma_list_mutex);
122 chan = dev_to_dma_chan(dev);
123 if (chan) {
124 for_each_possible_cpu(i)
125 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126 err = sprintf(buf, "%lu\n", count);
127 } else
128 err = -ENODEV;
129 mutex_unlock(&dma_list_mutex);
130
131 return err;
132 }
133 static DEVICE_ATTR_RO(bytes_transferred);
134
135 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
137 {
138 struct dma_chan *chan;
139 int err;
140
141 mutex_lock(&dma_list_mutex);
142 chan = dev_to_dma_chan(dev);
143 if (chan)
144 err = sprintf(buf, "%d\n", chan->client_count);
145 else
146 err = -ENODEV;
147 mutex_unlock(&dma_list_mutex);
148
149 return err;
150 }
151 static DEVICE_ATTR_RO(in_use);
152
153 static struct attribute *dma_dev_attrs[] = {
154 &dev_attr_memcpy_count.attr,
155 &dev_attr_bytes_transferred.attr,
156 &dev_attr_in_use.attr,
157 NULL,
158 };
159 ATTRIBUTE_GROUPS(dma_dev);
160
161 static void chan_dev_release(struct device *dev)
162 {
163 struct dma_chan_dev *chan_dev;
164
165 chan_dev = container_of(dev, typeof(*chan_dev), device);
166 if (atomic_dec_and_test(chan_dev->idr_ref)) {
167 mutex_lock(&dma_list_mutex);
168 idr_remove(&dma_idr, chan_dev->dev_id);
169 mutex_unlock(&dma_list_mutex);
170 kfree(chan_dev->idr_ref);
171 }
172 kfree(chan_dev);
173 }
174
175 static struct class dma_devclass = {
176 .name = "dma",
177 .dev_groups = dma_dev_groups,
178 .dev_release = chan_dev_release,
179 };
180
181 /* --- client and device registration --- */
182
183 #define dma_device_satisfies_mask(device, mask) \
184 __dma_device_satisfies_mask((device), &(mask))
185 static int
186 __dma_device_satisfies_mask(struct dma_device *device,
187 const dma_cap_mask_t *want)
188 {
189 dma_cap_mask_t has;
190
191 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 DMA_TX_TYPE_END);
193 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
194 }
195
196 static struct module *dma_chan_to_owner(struct dma_chan *chan)
197 {
198 return chan->device->dev->driver->owner;
199 }
200
201 /**
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 *
205 * balance_ref_count must be called under dma_list_mutex
206 */
207 static void balance_ref_count(struct dma_chan *chan)
208 {
209 struct module *owner = dma_chan_to_owner(chan);
210
211 while (chan->client_count < dmaengine_ref_count) {
212 __module_get(owner);
213 chan->client_count++;
214 }
215 }
216
217 /**
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
220 *
221 * Must be called under dma_list_mutex
222 */
223 static int dma_chan_get(struct dma_chan *chan)
224 {
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan);
227
228 if (chan->client_count) {
229 __module_get(owner);
230 err = 0;
231 } else if (try_module_get(owner))
232 err = 0;
233
234 if (err == 0)
235 chan->client_count++;
236
237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
240
241 if (desc_cnt < 0) {
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 }
248
249 return err;
250 }
251
252 /**
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
255 *
256 * Must be called under dma_list_mutex
257 */
258 static void dma_chan_put(struct dma_chan *chan)
259 {
260 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */
262 chan->client_count--;
263 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0)
265 chan->device->device_free_chan_resources(chan);
266 }
267
268 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
269 {
270 enum dma_status status;
271 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
272
273 dma_async_issue_pending(chan);
274 do {
275 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
276 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277 pr_err("%s: timeout!\n", __func__);
278 return DMA_ERROR;
279 }
280 if (status != DMA_IN_PROGRESS)
281 break;
282 cpu_relax();
283 } while (1);
284
285 return status;
286 }
287 EXPORT_SYMBOL(dma_sync_wait);
288
289 /**
290 * dma_cap_mask_all - enable iteration over all operation types
291 */
292 static dma_cap_mask_t dma_cap_mask_all;
293
294 /**
295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
296 * @chan - associated channel for this entry
297 */
298 struct dma_chan_tbl_ent {
299 struct dma_chan *chan;
300 };
301
302 /**
303 * channel_table - percpu lookup table for memory-to-memory offload providers
304 */
305 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306
307 static int __init dma_channel_table_init(void)
308 {
309 enum dma_transaction_type cap;
310 int err = 0;
311
312 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
313
314 /* 'interrupt', 'private', and 'slave' are channel capabilities,
315 * but are not associated with an operation so they do not need
316 * an entry in the channel_table
317 */
318 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
321
322 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
323 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
324 if (!channel_table[cap]) {
325 err = -ENOMEM;
326 break;
327 }
328 }
329
330 if (err) {
331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 if (channel_table[cap])
334 free_percpu(channel_table[cap]);
335 }
336
337 return err;
338 }
339 arch_initcall(dma_channel_table_init);
340
341 /**
342 * dma_find_channel - find a channel to carry out the operation
343 * @tx_type: transaction type
344 */
345 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
346 {
347 return this_cpu_read(channel_table[tx_type]->chan);
348 }
349 EXPORT_SYMBOL(dma_find_channel);
350
351 /*
352 * net_dma_find_channel - find a channel for net_dma
353 * net_dma has alignment requirements
354 */
355 struct dma_chan *net_dma_find_channel(void)
356 {
357 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
358 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
359 return NULL;
360
361 return chan;
362 }
363 EXPORT_SYMBOL(net_dma_find_channel);
364
365 /**
366 * dma_issue_pending_all - flush all pending operations across all channels
367 */
368 void dma_issue_pending_all(void)
369 {
370 struct dma_device *device;
371 struct dma_chan *chan;
372
373 rcu_read_lock();
374 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 continue;
377 list_for_each_entry(chan, &device->channels, device_node)
378 if (chan->client_count)
379 device->device_issue_pending(chan);
380 }
381 rcu_read_unlock();
382 }
383 EXPORT_SYMBOL(dma_issue_pending_all);
384
385 /**
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 */
388 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389 {
390 int node = dev_to_node(chan->device->dev);
391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
392 }
393
394 /**
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
398 *
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
403 */
404 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405 {
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *min = NULL;
409 struct dma_chan *localmin = NULL;
410
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min || chan->table_count < min->table_count)
419 min = chan;
420
421 if (dma_chan_is_local(chan, cpu))
422 if (!localmin ||
423 chan->table_count < localmin->table_count)
424 localmin = chan;
425 }
426 }
427
428 chan = localmin ? localmin : min;
429
430 if (chan)
431 chan->table_count++;
432
433 return chan;
434 }
435
436 /**
437 * dma_channel_rebalance - redistribute the available channels
438 *
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case, and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case. Must be called under
442 * dma_list_mutex.
443 */
444 static void dma_channel_rebalance(void)
445 {
446 struct dma_chan *chan;
447 struct dma_device *device;
448 int cpu;
449 int cap;
450
451 /* undo the last distribution */
452 for_each_dma_cap_mask(cap, dma_cap_mask_all)
453 for_each_possible_cpu(cpu)
454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
455
456 list_for_each_entry(device, &dma_device_list, global_node) {
457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 continue;
459 list_for_each_entry(chan, &device->channels, device_node)
460 chan->table_count = 0;
461 }
462
463 /* don't populate the channel_table if no clients are available */
464 if (!dmaengine_ref_count)
465 return;
466
467 /* redistribute available channels */
468 for_each_dma_cap_mask(cap, dma_cap_mask_all)
469 for_each_online_cpu(cpu) {
470 chan = min_chan(cap, cpu);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472 }
473 }
474
475 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
476 struct dma_device *dev,
477 dma_filter_fn fn, void *fn_param)
478 {
479 struct dma_chan *chan;
480
481 if (!__dma_device_satisfies_mask(dev, mask)) {
482 pr_debug("%s: wrong capabilities\n", __func__);
483 return NULL;
484 }
485 /* devices with multiple channels need special handling as we need to
486 * ensure that all channels are either private or public.
487 */
488 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 /* some channels are already publicly allocated */
491 if (chan->client_count)
492 return NULL;
493 }
494
495 list_for_each_entry(chan, &dev->channels, device_node) {
496 if (chan->client_count) {
497 pr_debug("%s: %s busy\n",
498 __func__, dma_chan_name(chan));
499 continue;
500 }
501 if (fn && !fn(chan, fn_param)) {
502 pr_debug("%s: %s filter said false\n",
503 __func__, dma_chan_name(chan));
504 continue;
505 }
506 return chan;
507 }
508
509 return NULL;
510 }
511
512 /**
513 * dma_request_slave_channel - try to get specific channel exclusively
514 * @chan: target channel
515 */
516 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
517 {
518 int err = -EBUSY;
519
520 /* lock against __dma_request_channel */
521 mutex_lock(&dma_list_mutex);
522
523 if (chan->client_count == 0) {
524 err = dma_chan_get(chan);
525 if (err)
526 pr_debug("%s: failed to get %s: (%d)\n",
527 __func__, dma_chan_name(chan), err);
528 } else
529 chan = NULL;
530
531 mutex_unlock(&dma_list_mutex);
532
533
534 return chan;
535 }
536 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
537
538 /**
539 * __dma_request_channel - try to allocate an exclusive channel
540 * @mask: capabilities that the channel must satisfy
541 * @fn: optional callback to disposition available channels
542 * @fn_param: opaque parameter to pass to dma_filter_fn
543 *
544 * Returns pointer to appropriate DMA channel on success or NULL.
545 */
546 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
547 dma_filter_fn fn, void *fn_param)
548 {
549 struct dma_device *device, *_d;
550 struct dma_chan *chan = NULL;
551 int err;
552
553 /* Find a channel */
554 mutex_lock(&dma_list_mutex);
555 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
556 chan = private_candidate(mask, device, fn, fn_param);
557 if (chan) {
558 /* Found a suitable channel, try to grab, prep, and
559 * return it. We first set DMA_PRIVATE to disable
560 * balance_ref_count as this channel will not be
561 * published in the general-purpose allocator
562 */
563 dma_cap_set(DMA_PRIVATE, device->cap_mask);
564 device->privatecnt++;
565 err = dma_chan_get(chan);
566
567 if (err == -ENODEV) {
568 pr_debug("%s: %s module removed\n",
569 __func__, dma_chan_name(chan));
570 list_del_rcu(&device->global_node);
571 } else if (err)
572 pr_debug("%s: failed to get %s: (%d)\n",
573 __func__, dma_chan_name(chan), err);
574 else
575 break;
576 if (--device->privatecnt == 0)
577 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
578 chan = NULL;
579 }
580 }
581 mutex_unlock(&dma_list_mutex);
582
583 pr_debug("%s: %s (%s)\n",
584 __func__,
585 chan ? "success" : "fail",
586 chan ? dma_chan_name(chan) : NULL);
587
588 return chan;
589 }
590 EXPORT_SYMBOL_GPL(__dma_request_channel);
591
592 /**
593 * dma_request_slave_channel - try to allocate an exclusive slave channel
594 * @dev: pointer to client device structure
595 * @name: slave channel name
596 *
597 * Returns pointer to appropriate DMA channel on success or an error pointer.
598 */
599 struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
600 const char *name)
601 {
602 struct dma_chan *chan;
603
604 /* If device-tree is present get slave info from here */
605 if (dev->of_node)
606 return of_dma_request_slave_channel(dev->of_node, name);
607
608 /* If device was enumerated by ACPI get slave info from here */
609 if (ACPI_HANDLE(dev)) {
610 chan = acpi_dma_request_slave_chan_by_name(dev, name);
611 if (chan)
612 return chan;
613 }
614
615 return ERR_PTR(-ENODEV);
616 }
617 EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
618
619 /**
620 * dma_request_slave_channel - try to allocate an exclusive slave channel
621 * @dev: pointer to client device structure
622 * @name: slave channel name
623 *
624 * Returns pointer to appropriate DMA channel on success or NULL.
625 */
626 struct dma_chan *dma_request_slave_channel(struct device *dev,
627 const char *name)
628 {
629 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
630 if (IS_ERR(ch))
631 return NULL;
632 return ch;
633 }
634 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
635
636 void dma_release_channel(struct dma_chan *chan)
637 {
638 mutex_lock(&dma_list_mutex);
639 WARN_ONCE(chan->client_count != 1,
640 "chan reference count %d != 1\n", chan->client_count);
641 dma_chan_put(chan);
642 /* drop PRIVATE cap enabled by __dma_request_channel() */
643 if (--chan->device->privatecnt == 0)
644 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
645 mutex_unlock(&dma_list_mutex);
646 }
647 EXPORT_SYMBOL_GPL(dma_release_channel);
648
649 /**
650 * dmaengine_get - register interest in dma_channels
651 */
652 void dmaengine_get(void)
653 {
654 struct dma_device *device, *_d;
655 struct dma_chan *chan;
656 int err;
657
658 mutex_lock(&dma_list_mutex);
659 dmaengine_ref_count++;
660
661 /* try to grab channels */
662 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
663 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
664 continue;
665 list_for_each_entry(chan, &device->channels, device_node) {
666 err = dma_chan_get(chan);
667 if (err == -ENODEV) {
668 /* module removed before we could use it */
669 list_del_rcu(&device->global_node);
670 break;
671 } else if (err)
672 pr_debug("%s: failed to get %s: (%d)\n",
673 __func__, dma_chan_name(chan), err);
674 }
675 }
676
677 /* if this is the first reference and there were channels
678 * waiting we need to rebalance to get those channels
679 * incorporated into the channel table
680 */
681 if (dmaengine_ref_count == 1)
682 dma_channel_rebalance();
683 mutex_unlock(&dma_list_mutex);
684 }
685 EXPORT_SYMBOL(dmaengine_get);
686
687 /**
688 * dmaengine_put - let dma drivers be removed when ref_count == 0
689 */
690 void dmaengine_put(void)
691 {
692 struct dma_device *device;
693 struct dma_chan *chan;
694
695 mutex_lock(&dma_list_mutex);
696 dmaengine_ref_count--;
697 BUG_ON(dmaengine_ref_count < 0);
698 /* drop channel references */
699 list_for_each_entry(device, &dma_device_list, global_node) {
700 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
701 continue;
702 list_for_each_entry(chan, &device->channels, device_node)
703 dma_chan_put(chan);
704 }
705 mutex_unlock(&dma_list_mutex);
706 }
707 EXPORT_SYMBOL(dmaengine_put);
708
709 static bool device_has_all_tx_types(struct dma_device *device)
710 {
711 /* A device that satisfies this test has channels that will never cause
712 * an async_tx channel switch event as all possible operation types can
713 * be handled.
714 */
715 #ifdef CONFIG_ASYNC_TX_DMA
716 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
717 return false;
718 #endif
719
720 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
721 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
722 return false;
723 #endif
724
725 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
726 if (!dma_has_cap(DMA_XOR, device->cap_mask))
727 return false;
728
729 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
730 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
731 return false;
732 #endif
733 #endif
734
735 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
736 if (!dma_has_cap(DMA_PQ, device->cap_mask))
737 return false;
738
739 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
740 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
741 return false;
742 #endif
743 #endif
744
745 return true;
746 }
747
748 static int get_dma_id(struct dma_device *device)
749 {
750 int rc;
751
752 mutex_lock(&dma_list_mutex);
753
754 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
755 if (rc >= 0)
756 device->dev_id = rc;
757
758 mutex_unlock(&dma_list_mutex);
759 return rc < 0 ? rc : 0;
760 }
761
762 /**
763 * dma_async_device_register - registers DMA devices found
764 * @device: &dma_device
765 */
766 int dma_async_device_register(struct dma_device *device)
767 {
768 int chancnt = 0, rc;
769 struct dma_chan* chan;
770 atomic_t *idr_ref;
771
772 if (!device)
773 return -ENODEV;
774
775 /* validate device routines */
776 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
777 !device->device_prep_dma_memcpy);
778 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
779 !device->device_prep_dma_xor);
780 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
781 !device->device_prep_dma_xor_val);
782 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
783 !device->device_prep_dma_pq);
784 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
785 !device->device_prep_dma_pq_val);
786 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
787 !device->device_prep_dma_interrupt);
788 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
789 !device->device_prep_dma_sg);
790 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
791 !device->device_prep_dma_cyclic);
792 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
793 !device->device_control);
794 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
795 !device->device_prep_interleaved_dma);
796
797 BUG_ON(!device->device_alloc_chan_resources);
798 BUG_ON(!device->device_free_chan_resources);
799 BUG_ON(!device->device_tx_status);
800 BUG_ON(!device->device_issue_pending);
801 BUG_ON(!device->dev);
802
803 /* note: this only matters in the
804 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
805 */
806 if (device_has_all_tx_types(device))
807 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
808
809 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
810 if (!idr_ref)
811 return -ENOMEM;
812 rc = get_dma_id(device);
813 if (rc != 0) {
814 kfree(idr_ref);
815 return rc;
816 }
817
818 atomic_set(idr_ref, 0);
819
820 /* represent channels in sysfs. Probably want devs too */
821 list_for_each_entry(chan, &device->channels, device_node) {
822 rc = -ENOMEM;
823 chan->local = alloc_percpu(typeof(*chan->local));
824 if (chan->local == NULL)
825 goto err_out;
826 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
827 if (chan->dev == NULL) {
828 free_percpu(chan->local);
829 chan->local = NULL;
830 goto err_out;
831 }
832
833 chan->chan_id = chancnt++;
834 chan->dev->device.class = &dma_devclass;
835 chan->dev->device.parent = device->dev;
836 chan->dev->chan = chan;
837 chan->dev->idr_ref = idr_ref;
838 chan->dev->dev_id = device->dev_id;
839 atomic_inc(idr_ref);
840 dev_set_name(&chan->dev->device, "dma%dchan%d",
841 device->dev_id, chan->chan_id);
842
843 rc = device_register(&chan->dev->device);
844 if (rc) {
845 free_percpu(chan->local);
846 chan->local = NULL;
847 kfree(chan->dev);
848 atomic_dec(idr_ref);
849 goto err_out;
850 }
851 chan->client_count = 0;
852 }
853 device->chancnt = chancnt;
854
855 mutex_lock(&dma_list_mutex);
856 /* take references on public channels */
857 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
858 list_for_each_entry(chan, &device->channels, device_node) {
859 /* if clients are already waiting for channels we need
860 * to take references on their behalf
861 */
862 if (dma_chan_get(chan) == -ENODEV) {
863 /* note we can only get here for the first
864 * channel as the remaining channels are
865 * guaranteed to get a reference
866 */
867 rc = -ENODEV;
868 mutex_unlock(&dma_list_mutex);
869 goto err_out;
870 }
871 }
872 list_add_tail_rcu(&device->global_node, &dma_device_list);
873 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
874 device->privatecnt++; /* Always private */
875 dma_channel_rebalance();
876 mutex_unlock(&dma_list_mutex);
877
878 return 0;
879
880 err_out:
881 /* if we never registered a channel just release the idr */
882 if (atomic_read(idr_ref) == 0) {
883 mutex_lock(&dma_list_mutex);
884 idr_remove(&dma_idr, device->dev_id);
885 mutex_unlock(&dma_list_mutex);
886 kfree(idr_ref);
887 return rc;
888 }
889
890 list_for_each_entry(chan, &device->channels, device_node) {
891 if (chan->local == NULL)
892 continue;
893 mutex_lock(&dma_list_mutex);
894 chan->dev->chan = NULL;
895 mutex_unlock(&dma_list_mutex);
896 device_unregister(&chan->dev->device);
897 free_percpu(chan->local);
898 }
899 return rc;
900 }
901 EXPORT_SYMBOL(dma_async_device_register);
902
903 /**
904 * dma_async_device_unregister - unregister a DMA device
905 * @device: &dma_device
906 *
907 * This routine is called by dma driver exit routines, dmaengine holds module
908 * references to prevent it being called while channels are in use.
909 */
910 void dma_async_device_unregister(struct dma_device *device)
911 {
912 struct dma_chan *chan;
913
914 mutex_lock(&dma_list_mutex);
915 list_del_rcu(&device->global_node);
916 dma_channel_rebalance();
917 mutex_unlock(&dma_list_mutex);
918
919 list_for_each_entry(chan, &device->channels, device_node) {
920 WARN_ONCE(chan->client_count,
921 "%s called while %d clients hold a reference\n",
922 __func__, chan->client_count);
923 mutex_lock(&dma_list_mutex);
924 chan->dev->chan = NULL;
925 mutex_unlock(&dma_list_mutex);
926 device_unregister(&chan->dev->device);
927 free_percpu(chan->local);
928 }
929 }
930 EXPORT_SYMBOL(dma_async_device_unregister);
931
932 struct dmaengine_unmap_pool {
933 struct kmem_cache *cache;
934 const char *name;
935 mempool_t *pool;
936 size_t size;
937 };
938
939 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
940 static struct dmaengine_unmap_pool unmap_pool[] = {
941 __UNMAP_POOL(2),
942 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
943 __UNMAP_POOL(16),
944 __UNMAP_POOL(128),
945 __UNMAP_POOL(256),
946 #endif
947 };
948
949 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
950 {
951 int order = get_count_order(nr);
952
953 switch (order) {
954 case 0 ... 1:
955 return &unmap_pool[0];
956 case 2 ... 4:
957 return &unmap_pool[1];
958 case 5 ... 7:
959 return &unmap_pool[2];
960 case 8:
961 return &unmap_pool[3];
962 default:
963 BUG();
964 return NULL;
965 }
966 }
967
968 static void dmaengine_unmap(struct kref *kref)
969 {
970 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
971 struct device *dev = unmap->dev;
972 int cnt, i;
973
974 cnt = unmap->to_cnt;
975 for (i = 0; i < cnt; i++)
976 dma_unmap_page(dev, unmap->addr[i], unmap->len,
977 DMA_TO_DEVICE);
978 cnt += unmap->from_cnt;
979 for (; i < cnt; i++)
980 dma_unmap_page(dev, unmap->addr[i], unmap->len,
981 DMA_FROM_DEVICE);
982 cnt += unmap->bidi_cnt;
983 for (; i < cnt; i++) {
984 if (unmap->addr[i] == 0)
985 continue;
986 dma_unmap_page(dev, unmap->addr[i], unmap->len,
987 DMA_BIDIRECTIONAL);
988 }
989 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
990 }
991
992 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
993 {
994 if (unmap)
995 kref_put(&unmap->kref, dmaengine_unmap);
996 }
997 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
998
999 static void dmaengine_destroy_unmap_pool(void)
1000 {
1001 int i;
1002
1003 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1004 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1005
1006 if (p->pool)
1007 mempool_destroy(p->pool);
1008 p->pool = NULL;
1009 if (p->cache)
1010 kmem_cache_destroy(p->cache);
1011 p->cache = NULL;
1012 }
1013 }
1014
1015 static int __init dmaengine_init_unmap_pool(void)
1016 {
1017 int i;
1018
1019 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1020 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1021 size_t size;
1022
1023 size = sizeof(struct dmaengine_unmap_data) +
1024 sizeof(dma_addr_t) * p->size;
1025
1026 p->cache = kmem_cache_create(p->name, size, 0,
1027 SLAB_HWCACHE_ALIGN, NULL);
1028 if (!p->cache)
1029 break;
1030 p->pool = mempool_create_slab_pool(1, p->cache);
1031 if (!p->pool)
1032 break;
1033 }
1034
1035 if (i == ARRAY_SIZE(unmap_pool))
1036 return 0;
1037
1038 dmaengine_destroy_unmap_pool();
1039 return -ENOMEM;
1040 }
1041
1042 struct dmaengine_unmap_data *
1043 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1044 {
1045 struct dmaengine_unmap_data *unmap;
1046
1047 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1048 if (!unmap)
1049 return NULL;
1050
1051 memset(unmap, 0, sizeof(*unmap));
1052 kref_init(&unmap->kref);
1053 unmap->dev = dev;
1054
1055 return unmap;
1056 }
1057 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1058
1059 /**
1060 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1061 * @chan: DMA channel to offload copy to
1062 * @dest_pg: destination page
1063 * @dest_off: offset in page to copy to
1064 * @src_pg: source page
1065 * @src_off: offset in page to copy from
1066 * @len: length
1067 *
1068 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1069 * address according to the DMA mapping API rules for streaming mappings.
1070 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1071 * (kernel memory or locked user space pages).
1072 */
1073 dma_cookie_t
1074 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1075 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1076 size_t len)
1077 {
1078 struct dma_device *dev = chan->device;
1079 struct dma_async_tx_descriptor *tx;
1080 struct dmaengine_unmap_data *unmap;
1081 dma_cookie_t cookie;
1082 unsigned long flags;
1083
1084 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1085 if (!unmap)
1086 return -ENOMEM;
1087
1088 unmap->to_cnt = 1;
1089 unmap->from_cnt = 1;
1090 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1091 DMA_TO_DEVICE);
1092 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1093 DMA_FROM_DEVICE);
1094 unmap->len = len;
1095 flags = DMA_CTRL_ACK;
1096 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1097 len, flags);
1098
1099 if (!tx) {
1100 dmaengine_unmap_put(unmap);
1101 return -ENOMEM;
1102 }
1103
1104 dma_set_unmap(tx, unmap);
1105 cookie = tx->tx_submit(tx);
1106 dmaengine_unmap_put(unmap);
1107
1108 preempt_disable();
1109 __this_cpu_add(chan->local->bytes_transferred, len);
1110 __this_cpu_inc(chan->local->memcpy_count);
1111 preempt_enable();
1112
1113 return cookie;
1114 }
1115 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1116
1117 /**
1118 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1119 * @chan: DMA channel to offload copy to
1120 * @dest: destination address (virtual)
1121 * @src: source address (virtual)
1122 * @len: length
1123 *
1124 * Both @dest and @src must be mappable to a bus address according to the
1125 * DMA mapping API rules for streaming mappings.
1126 * Both @dest and @src must stay memory resident (kernel memory or locked
1127 * user space pages).
1128 */
1129 dma_cookie_t
1130 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1131 void *src, size_t len)
1132 {
1133 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1134 (unsigned long) dest & ~PAGE_MASK,
1135 virt_to_page(src),
1136 (unsigned long) src & ~PAGE_MASK, len);
1137 }
1138 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1139
1140 /**
1141 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1142 * @chan: DMA channel to offload copy to
1143 * @page: destination page
1144 * @offset: offset in page to copy to
1145 * @kdata: source address (virtual)
1146 * @len: length
1147 *
1148 * Both @page/@offset and @kdata must be mappable to a bus address according
1149 * to the DMA mapping API rules for streaming mappings.
1150 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1151 * locked user space pages)
1152 */
1153 dma_cookie_t
1154 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1155 unsigned int offset, void *kdata, size_t len)
1156 {
1157 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1158 virt_to_page(kdata),
1159 (unsigned long) kdata & ~PAGE_MASK, len);
1160 }
1161 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1162
1163 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1164 struct dma_chan *chan)
1165 {
1166 tx->chan = chan;
1167 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1168 spin_lock_init(&tx->lock);
1169 #endif
1170 }
1171 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1172
1173 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1174 * @tx: in-flight transaction to wait on
1175 */
1176 enum dma_status
1177 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1178 {
1179 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1180
1181 if (!tx)
1182 return DMA_COMPLETE;
1183
1184 while (tx->cookie == -EBUSY) {
1185 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1186 pr_err("%s timeout waiting for descriptor submission\n",
1187 __func__);
1188 return DMA_ERROR;
1189 }
1190 cpu_relax();
1191 }
1192 return dma_sync_wait(tx->chan, tx->cookie);
1193 }
1194 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1195
1196 /* dma_run_dependencies - helper routine for dma drivers to process
1197 * (start) dependent operations on their target channel
1198 * @tx: transaction with dependencies
1199 */
1200 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1201 {
1202 struct dma_async_tx_descriptor *dep = txd_next(tx);
1203 struct dma_async_tx_descriptor *dep_next;
1204 struct dma_chan *chan;
1205
1206 if (!dep)
1207 return;
1208
1209 /* we'll submit tx->next now, so clear the link */
1210 txd_clear_next(tx);
1211 chan = dep->chan;
1212
1213 /* keep submitting up until a channel switch is detected
1214 * in that case we will be called again as a result of
1215 * processing the interrupt from async_tx_channel_switch
1216 */
1217 for (; dep; dep = dep_next) {
1218 txd_lock(dep);
1219 txd_clear_parent(dep);
1220 dep_next = txd_next(dep);
1221 if (dep_next && dep_next->chan == chan)
1222 txd_clear_next(dep); /* ->next will be submitted */
1223 else
1224 dep_next = NULL; /* submit current dep and terminate */
1225 txd_unlock(dep);
1226
1227 dep->tx_submit(dep);
1228 }
1229
1230 chan->device->device_issue_pending(chan);
1231 }
1232 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1233
1234 static int __init dma_bus_init(void)
1235 {
1236 int err = dmaengine_init_unmap_pool();
1237
1238 if (err)
1239 return err;
1240 return class_register(&dma_devclass);
1241 }
1242 arch_initcall(dma_bus_init);
1243
1244