2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
45 * See Documentation/dmaengine.txt for more details
48 #include <linux/init.h>
49 #include <linux/module.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
61 static DEFINE_MUTEX(dma_list_mutex
);
62 static LIST_HEAD(dma_device_list
);
63 static long dmaengine_ref_count
;
65 /* --- sysfs implementation --- */
67 static ssize_t
show_memcpy_count(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
69 struct dma_chan
*chan
= to_dma_chan(dev
);
70 unsigned long count
= 0;
73 for_each_possible_cpu(i
)
74 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
76 return sprintf(buf
, "%lu\n", count
);
79 static ssize_t
show_bytes_transferred(struct device
*dev
, struct device_attribute
*attr
,
82 struct dma_chan
*chan
= to_dma_chan(dev
);
83 unsigned long count
= 0;
86 for_each_possible_cpu(i
)
87 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
89 return sprintf(buf
, "%lu\n", count
);
92 static ssize_t
show_in_use(struct device
*dev
, struct device_attribute
*attr
, char *buf
)
94 struct dma_chan
*chan
= to_dma_chan(dev
);
96 return sprintf(buf
, "%d\n", chan
->client_count
);
99 static struct device_attribute dma_attrs
[] = {
100 __ATTR(memcpy_count
, S_IRUGO
, show_memcpy_count
, NULL
),
101 __ATTR(bytes_transferred
, S_IRUGO
, show_bytes_transferred
, NULL
),
102 __ATTR(in_use
, S_IRUGO
, show_in_use
, NULL
),
106 static struct class dma_devclass
= {
108 .dev_attrs
= dma_attrs
,
111 /* --- client and device registration --- */
113 #define dma_device_satisfies_mask(device, mask) \
114 __dma_device_satisfies_mask((device), &(mask))
116 __dma_device_satisfies_mask(struct dma_device
*device
, dma_cap_mask_t
*want
)
120 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
122 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
125 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
127 return chan
->device
->dev
->driver
->owner
;
131 * balance_ref_count - catch up the channel reference count
132 * @chan - channel to balance ->client_count versus dmaengine_ref_count
134 * balance_ref_count must be called under dma_list_mutex
136 static void balance_ref_count(struct dma_chan
*chan
)
138 struct module
*owner
= dma_chan_to_owner(chan
);
140 while (chan
->client_count
< dmaengine_ref_count
) {
142 chan
->client_count
++;
147 * dma_chan_get - try to grab a dma channel's parent driver module
148 * @chan - channel to grab
150 * Must be called under dma_list_mutex
152 static int dma_chan_get(struct dma_chan
*chan
)
155 struct module
*owner
= dma_chan_to_owner(chan
);
157 if (chan
->client_count
) {
160 } else if (try_module_get(owner
))
164 chan
->client_count
++;
166 /* allocate upon first client reference */
167 if (chan
->client_count
== 1 && err
== 0) {
168 int desc_cnt
= chan
->device
->device_alloc_chan_resources(chan
);
172 chan
->client_count
= 0;
174 } else if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
175 balance_ref_count(chan
);
182 * dma_chan_put - drop a reference to a dma channel's parent driver module
183 * @chan - channel to release
185 * Must be called under dma_list_mutex
187 static void dma_chan_put(struct dma_chan
*chan
)
189 if (!chan
->client_count
)
190 return; /* this channel failed alloc_chan_resources */
191 chan
->client_count
--;
192 module_put(dma_chan_to_owner(chan
));
193 if (chan
->client_count
== 0)
194 chan
->device
->device_free_chan_resources(chan
);
197 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
199 enum dma_status status
;
200 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
202 dma_async_issue_pending(chan
);
204 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
205 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
206 printk(KERN_ERR
"dma_sync_wait_timeout!\n");
209 } while (status
== DMA_IN_PROGRESS
);
213 EXPORT_SYMBOL(dma_sync_wait
);
216 * dma_cap_mask_all - enable iteration over all operation types
218 static dma_cap_mask_t dma_cap_mask_all
;
221 * dma_chan_tbl_ent - tracks channel allocations per core/operation
222 * @chan - associated channel for this entry
224 struct dma_chan_tbl_ent
{
225 struct dma_chan
*chan
;
229 * channel_table - percpu lookup table for memory-to-memory offload providers
231 static struct dma_chan_tbl_ent
*channel_table
[DMA_TX_TYPE_END
];
233 static int __init
dma_channel_table_init(void)
235 enum dma_transaction_type cap
;
238 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
240 /* 'interrupt', 'private', and 'slave' are channel capabilities,
241 * but are not associated with an operation so they do not need
242 * an entry in the channel_table
244 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
245 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
246 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
248 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
249 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
250 if (!channel_table
[cap
]) {
257 pr_err("dmaengine: initialization failure\n");
258 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
259 if (channel_table
[cap
])
260 free_percpu(channel_table
[cap
]);
265 subsys_initcall(dma_channel_table_init
);
268 * dma_find_channel - find a channel to carry out the operation
269 * @tx_type: transaction type
271 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
273 struct dma_chan
*chan
;
276 WARN_ONCE(dmaengine_ref_count
== 0,
277 "client called %s without a reference", __func__
);
280 chan
= per_cpu_ptr(channel_table
[tx_type
], cpu
)->chan
;
285 EXPORT_SYMBOL(dma_find_channel
);
288 * dma_issue_pending_all - flush all pending operations across all channels
290 void dma_issue_pending_all(void)
292 struct dma_device
*device
;
293 struct dma_chan
*chan
;
295 WARN_ONCE(dmaengine_ref_count
== 0,
296 "client called %s without a reference", __func__
);
299 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
300 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
302 list_for_each_entry(chan
, &device
->channels
, device_node
)
303 if (chan
->client_count
)
304 device
->device_issue_pending(chan
);
308 EXPORT_SYMBOL(dma_issue_pending_all
);
311 * nth_chan - returns the nth channel of the given capability
312 * @cap: capability to match
313 * @n: nth channel desired
315 * Defaults to returning the channel with the desired capability and the
316 * lowest reference count when 'n' cannot be satisfied. Must be called
317 * under dma_list_mutex.
319 static struct dma_chan
*nth_chan(enum dma_transaction_type cap
, int n
)
321 struct dma_device
*device
;
322 struct dma_chan
*chan
;
323 struct dma_chan
*ret
= NULL
;
324 struct dma_chan
*min
= NULL
;
326 list_for_each_entry(device
, &dma_device_list
, global_node
) {
327 if (!dma_has_cap(cap
, device
->cap_mask
) ||
328 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
330 list_for_each_entry(chan
, &device
->channels
, device_node
) {
331 if (!chan
->client_count
)
335 else if (chan
->table_count
< min
->table_count
)
357 * dma_channel_rebalance - redistribute the available channels
359 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
360 * operation type) in the SMP case, and operation isolation (avoid
361 * multi-tasking channels) in the non-SMP case. Must be called under
364 static void dma_channel_rebalance(void)
366 struct dma_chan
*chan
;
367 struct dma_device
*device
;
372 /* undo the last distribution */
373 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
374 for_each_possible_cpu(cpu
)
375 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
377 list_for_each_entry(device
, &dma_device_list
, global_node
) {
378 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
380 list_for_each_entry(chan
, &device
->channels
, device_node
)
381 chan
->table_count
= 0;
384 /* don't populate the channel_table if no clients are available */
385 if (!dmaengine_ref_count
)
388 /* redistribute available channels */
390 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
391 for_each_online_cpu(cpu
) {
392 if (num_possible_cpus() > 1)
393 chan
= nth_chan(cap
, n
++);
395 chan
= nth_chan(cap
, -1);
397 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
401 static struct dma_chan
*private_candidate(dma_cap_mask_t
*mask
, struct dma_device
*dev
)
403 struct dma_chan
*chan
;
404 struct dma_chan
*ret
= NULL
;
406 if (!__dma_device_satisfies_mask(dev
, mask
)) {
407 pr_debug("%s: wrong capabilities\n", __func__
);
410 /* devices with multiple channels need special handling as we need to
411 * ensure that all channels are either private or public.
413 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
414 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
415 /* some channels are already publicly allocated */
416 if (chan
->client_count
)
420 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
421 if (chan
->client_count
) {
422 pr_debug("%s: %s busy\n",
423 __func__
, dev_name(&chan
->dev
));
434 * dma_request_channel - try to allocate an exclusive channel
435 * @mask: capabilities that the channel must satisfy
436 * @fn: optional callback to disposition available channels
437 * @fn_param: opaque parameter to pass to dma_filter_fn
439 struct dma_chan
*__dma_request_channel(dma_cap_mask_t
*mask
, dma_filter_fn fn
, void *fn_param
)
441 struct dma_device
*device
, *_d
;
442 struct dma_chan
*chan
= NULL
;
447 mutex_lock(&dma_list_mutex
);
448 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
449 chan
= private_candidate(mask
, device
);
454 ack
= fn(chan
, fn_param
);
459 /* Found a suitable channel, try to grab, prep, and
460 * return it. We first set DMA_PRIVATE to disable
461 * balance_ref_count as this channel will not be
462 * published in the general-purpose allocator
464 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
465 err
= dma_chan_get(chan
);
467 if (err
== -ENODEV
) {
468 pr_debug("%s: %s module removed\n", __func__
,
469 dev_name(&chan
->dev
));
470 list_del_rcu(&device
->global_node
);
472 pr_err("dmaengine: failed to get %s: (%d)\n",
473 dev_name(&chan
->dev
), err
);
477 pr_debug("%s: %s filter said false\n",
478 __func__
, dev_name(&chan
->dev
));
481 mutex_unlock(&dma_list_mutex
);
483 pr_debug("%s: %s (%s)\n", __func__
, chan
? "success" : "fail",
484 chan
? dev_name(&chan
->dev
) : NULL
);
488 EXPORT_SYMBOL_GPL(__dma_request_channel
);
490 void dma_release_channel(struct dma_chan
*chan
)
492 mutex_lock(&dma_list_mutex
);
493 WARN_ONCE(chan
->client_count
!= 1,
494 "chan reference count %d != 1\n", chan
->client_count
);
496 mutex_unlock(&dma_list_mutex
);
498 EXPORT_SYMBOL_GPL(dma_release_channel
);
501 * dmaengine_get - register interest in dma_channels
503 void dmaengine_get(void)
505 struct dma_device
*device
, *_d
;
506 struct dma_chan
*chan
;
509 mutex_lock(&dma_list_mutex
);
510 dmaengine_ref_count
++;
512 /* try to grab channels */
513 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
514 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
516 list_for_each_entry(chan
, &device
->channels
, device_node
) {
517 err
= dma_chan_get(chan
);
518 if (err
== -ENODEV
) {
519 /* module removed before we could use it */
520 list_del_rcu(&device
->global_node
);
523 pr_err("dmaengine: failed to get %s: (%d)\n",
524 dev_name(&chan
->dev
), err
);
528 /* if this is the first reference and there were channels
529 * waiting we need to rebalance to get those channels
530 * incorporated into the channel table
532 if (dmaengine_ref_count
== 1)
533 dma_channel_rebalance();
534 mutex_unlock(&dma_list_mutex
);
536 EXPORT_SYMBOL(dmaengine_get
);
539 * dmaengine_put - let dma drivers be removed when ref_count == 0
541 void dmaengine_put(void)
543 struct dma_device
*device
;
544 struct dma_chan
*chan
;
546 mutex_lock(&dma_list_mutex
);
547 dmaengine_ref_count
--;
548 BUG_ON(dmaengine_ref_count
< 0);
549 /* drop channel references */
550 list_for_each_entry(device
, &dma_device_list
, global_node
) {
551 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
553 list_for_each_entry(chan
, &device
->channels
, device_node
)
556 mutex_unlock(&dma_list_mutex
);
558 EXPORT_SYMBOL(dmaengine_put
);
561 * dma_async_device_register - registers DMA devices found
562 * @device: &dma_device
564 int dma_async_device_register(struct dma_device
*device
)
568 struct dma_chan
* chan
;
573 /* validate device routines */
574 BUG_ON(dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) &&
575 !device
->device_prep_dma_memcpy
);
576 BUG_ON(dma_has_cap(DMA_XOR
, device
->cap_mask
) &&
577 !device
->device_prep_dma_xor
);
578 BUG_ON(dma_has_cap(DMA_ZERO_SUM
, device
->cap_mask
) &&
579 !device
->device_prep_dma_zero_sum
);
580 BUG_ON(dma_has_cap(DMA_MEMSET
, device
->cap_mask
) &&
581 !device
->device_prep_dma_memset
);
582 BUG_ON(dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) &&
583 !device
->device_prep_dma_interrupt
);
584 BUG_ON(dma_has_cap(DMA_SLAVE
, device
->cap_mask
) &&
585 !device
->device_prep_slave_sg
);
586 BUG_ON(dma_has_cap(DMA_SLAVE
, device
->cap_mask
) &&
587 !device
->device_terminate_all
);
589 BUG_ON(!device
->device_alloc_chan_resources
);
590 BUG_ON(!device
->device_free_chan_resources
);
591 BUG_ON(!device
->device_is_tx_complete
);
592 BUG_ON(!device
->device_issue_pending
);
593 BUG_ON(!device
->dev
);
595 mutex_lock(&dma_list_mutex
);
596 device
->dev_id
= id
++;
597 mutex_unlock(&dma_list_mutex
);
599 /* represent channels in sysfs. Probably want devs too */
600 list_for_each_entry(chan
, &device
->channels
, device_node
) {
601 chan
->local
= alloc_percpu(typeof(*chan
->local
));
602 if (chan
->local
== NULL
)
605 chan
->chan_id
= chancnt
++;
606 chan
->dev
.class = &dma_devclass
;
607 chan
->dev
.parent
= device
->dev
;
608 dev_set_name(&chan
->dev
, "dma%dchan%d",
609 device
->dev_id
, chan
->chan_id
);
611 rc
= device_register(&chan
->dev
);
613 free_percpu(chan
->local
);
617 chan
->client_count
= 0;
619 device
->chancnt
= chancnt
;
621 mutex_lock(&dma_list_mutex
);
622 /* take references on public channels */
623 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
624 list_for_each_entry(chan
, &device
->channels
, device_node
) {
625 /* if clients are already waiting for channels we need
626 * to take references on their behalf
628 if (dma_chan_get(chan
) == -ENODEV
) {
629 /* note we can only get here for the first
630 * channel as the remaining channels are
631 * guaranteed to get a reference
634 mutex_unlock(&dma_list_mutex
);
638 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
639 dma_channel_rebalance();
640 mutex_unlock(&dma_list_mutex
);
645 list_for_each_entry(chan
, &device
->channels
, device_node
) {
646 if (chan
->local
== NULL
)
648 device_unregister(&chan
->dev
);
649 free_percpu(chan
->local
);
653 EXPORT_SYMBOL(dma_async_device_register
);
656 * dma_async_device_unregister - unregister a DMA device
657 * @device: &dma_device
659 * This routine is called by dma driver exit routines, dmaengine holds module
660 * references to prevent it being called while channels are in use.
662 void dma_async_device_unregister(struct dma_device
*device
)
664 struct dma_chan
*chan
;
666 mutex_lock(&dma_list_mutex
);
667 list_del_rcu(&device
->global_node
);
668 dma_channel_rebalance();
669 mutex_unlock(&dma_list_mutex
);
671 list_for_each_entry(chan
, &device
->channels
, device_node
) {
672 WARN_ONCE(chan
->client_count
,
673 "%s called while %d clients hold a reference\n",
674 __func__
, chan
->client_count
);
675 device_unregister(&chan
->dev
);
678 EXPORT_SYMBOL(dma_async_device_unregister
);
681 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
682 * @chan: DMA channel to offload copy to
683 * @dest: destination address (virtual)
684 * @src: source address (virtual)
687 * Both @dest and @src must be mappable to a bus address according to the
688 * DMA mapping API rules for streaming mappings.
689 * Both @dest and @src must stay memory resident (kernel memory or locked
693 dma_async_memcpy_buf_to_buf(struct dma_chan
*chan
, void *dest
,
694 void *src
, size_t len
)
696 struct dma_device
*dev
= chan
->device
;
697 struct dma_async_tx_descriptor
*tx
;
698 dma_addr_t dma_dest
, dma_src
;
702 dma_src
= dma_map_single(dev
->dev
, src
, len
, DMA_TO_DEVICE
);
703 dma_dest
= dma_map_single(dev
->dev
, dest
, len
, DMA_FROM_DEVICE
);
704 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
,
708 dma_unmap_single(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
709 dma_unmap_single(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
714 cookie
= tx
->tx_submit(tx
);
717 per_cpu_ptr(chan
->local
, cpu
)->bytes_transferred
+= len
;
718 per_cpu_ptr(chan
->local
, cpu
)->memcpy_count
++;
723 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf
);
726 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
727 * @chan: DMA channel to offload copy to
728 * @page: destination page
729 * @offset: offset in page to copy to
730 * @kdata: source address (virtual)
733 * Both @page/@offset and @kdata must be mappable to a bus address according
734 * to the DMA mapping API rules for streaming mappings.
735 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
736 * locked user space pages)
739 dma_async_memcpy_buf_to_pg(struct dma_chan
*chan
, struct page
*page
,
740 unsigned int offset
, void *kdata
, size_t len
)
742 struct dma_device
*dev
= chan
->device
;
743 struct dma_async_tx_descriptor
*tx
;
744 dma_addr_t dma_dest
, dma_src
;
748 dma_src
= dma_map_single(dev
->dev
, kdata
, len
, DMA_TO_DEVICE
);
749 dma_dest
= dma_map_page(dev
->dev
, page
, offset
, len
, DMA_FROM_DEVICE
);
750 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
,
754 dma_unmap_single(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
755 dma_unmap_page(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
760 cookie
= tx
->tx_submit(tx
);
763 per_cpu_ptr(chan
->local
, cpu
)->bytes_transferred
+= len
;
764 per_cpu_ptr(chan
->local
, cpu
)->memcpy_count
++;
769 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg
);
772 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
773 * @chan: DMA channel to offload copy to
774 * @dest_pg: destination page
775 * @dest_off: offset in page to copy to
776 * @src_pg: source page
777 * @src_off: offset in page to copy from
780 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
781 * address according to the DMA mapping API rules for streaming mappings.
782 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
783 * (kernel memory or locked user space pages).
786 dma_async_memcpy_pg_to_pg(struct dma_chan
*chan
, struct page
*dest_pg
,
787 unsigned int dest_off
, struct page
*src_pg
, unsigned int src_off
,
790 struct dma_device
*dev
= chan
->device
;
791 struct dma_async_tx_descriptor
*tx
;
792 dma_addr_t dma_dest
, dma_src
;
796 dma_src
= dma_map_page(dev
->dev
, src_pg
, src_off
, len
, DMA_TO_DEVICE
);
797 dma_dest
= dma_map_page(dev
->dev
, dest_pg
, dest_off
, len
,
799 tx
= dev
->device_prep_dma_memcpy(chan
, dma_dest
, dma_src
, len
,
803 dma_unmap_page(dev
->dev
, dma_src
, len
, DMA_TO_DEVICE
);
804 dma_unmap_page(dev
->dev
, dma_dest
, len
, DMA_FROM_DEVICE
);
809 cookie
= tx
->tx_submit(tx
);
812 per_cpu_ptr(chan
->local
, cpu
)->bytes_transferred
+= len
;
813 per_cpu_ptr(chan
->local
, cpu
)->memcpy_count
++;
818 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg
);
820 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
821 struct dma_chan
*chan
)
824 spin_lock_init(&tx
->lock
);
826 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
828 /* dma_wait_for_async_tx - spin wait for a transaction to complete
829 * @tx: in-flight transaction to wait on
831 * This routine assumes that tx was obtained from a call to async_memcpy,
832 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
833 * and submitted). Walking the parent chain is only meant to cover for DMA
834 * drivers that do not implement the DMA_INTERRUPT capability and may race with
835 * the driver's descriptor cleanup routine.
838 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
840 enum dma_status status
;
841 struct dma_async_tx_descriptor
*iter
;
842 struct dma_async_tx_descriptor
*parent
;
847 WARN_ONCE(tx
->parent
, "%s: speculatively walking dependency chain for"
848 " %s\n", __func__
, dev_name(&tx
->chan
->dev
));
850 /* poll through the dependency chain, return when tx is complete */
854 /* find the root of the unsubmitted dependency chain */
856 parent
= iter
->parent
;
863 /* there is a small window for ->parent == NULL and
866 while (iter
->cookie
== -EBUSY
)
869 status
= dma_sync_wait(iter
->chan
, iter
->cookie
);
870 } while (status
== DMA_IN_PROGRESS
|| (iter
!= tx
));
874 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
876 /* dma_run_dependencies - helper routine for dma drivers to process
877 * (start) dependent operations on their target channel
878 * @tx: transaction with dependencies
880 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
882 struct dma_async_tx_descriptor
*dep
= tx
->next
;
883 struct dma_async_tx_descriptor
*dep_next
;
884 struct dma_chan
*chan
;
891 /* keep submitting up until a channel switch is detected
892 * in that case we will be called again as a result of
893 * processing the interrupt from async_tx_channel_switch
895 for (; dep
; dep
= dep_next
) {
896 spin_lock_bh(&dep
->lock
);
898 dep_next
= dep
->next
;
899 if (dep_next
&& dep_next
->chan
== chan
)
900 dep
->next
= NULL
; /* ->next will be submitted */
902 dep_next
= NULL
; /* submit current dep and terminate */
903 spin_unlock_bh(&dep
->lock
);
908 chan
->device
->device_issue_pending(chan
);
910 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
912 static int __init
dma_bus_init(void)
914 mutex_init(&dma_list_mutex
);
915 return class_register(&dma_devclass
);
917 subsys_initcall(dma_bus_init
);