2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
41 * See Documentation/dmaengine.txt for more details
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/platform_device.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/init.h>
49 #include <linux/module.h>
51 #include <linux/device.h>
52 #include <linux/dmaengine.h>
53 #include <linux/hardirq.h>
54 #include <linux/spinlock.h>
55 #include <linux/percpu.h>
56 #include <linux/rcupdate.h>
57 #include <linux/mutex.h>
58 #include <linux/jiffies.h>
59 #include <linux/rculist.h>
60 #include <linux/idr.h>
61 #include <linux/slab.h>
62 #include <linux/acpi.h>
63 #include <linux/acpi_dma.h>
64 #include <linux/of_dma.h>
65 #include <linux/mempool.h>
67 static DEFINE_MUTEX(dma_list_mutex
);
68 static DEFINE_IDR(dma_idr
);
69 static LIST_HEAD(dma_device_list
);
70 static long dmaengine_ref_count
;
72 /* --- sysfs implementation --- */
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
78 * Must be called under dma_list_mutex
80 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
82 struct dma_chan_dev
*chan_dev
;
84 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
85 return chan_dev
->chan
;
88 static ssize_t
memcpy_count_show(struct device
*dev
,
89 struct device_attribute
*attr
, char *buf
)
91 struct dma_chan
*chan
;
92 unsigned long count
= 0;
96 mutex_lock(&dma_list_mutex
);
97 chan
= dev_to_dma_chan(dev
);
99 for_each_possible_cpu(i
)
100 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
101 err
= sprintf(buf
, "%lu\n", count
);
104 mutex_unlock(&dma_list_mutex
);
108 static DEVICE_ATTR_RO(memcpy_count
);
110 static ssize_t
bytes_transferred_show(struct device
*dev
,
111 struct device_attribute
*attr
, char *buf
)
113 struct dma_chan
*chan
;
114 unsigned long count
= 0;
118 mutex_lock(&dma_list_mutex
);
119 chan
= dev_to_dma_chan(dev
);
121 for_each_possible_cpu(i
)
122 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
123 err
= sprintf(buf
, "%lu\n", count
);
126 mutex_unlock(&dma_list_mutex
);
130 static DEVICE_ATTR_RO(bytes_transferred
);
132 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
135 struct dma_chan
*chan
;
138 mutex_lock(&dma_list_mutex
);
139 chan
= dev_to_dma_chan(dev
);
141 err
= sprintf(buf
, "%d\n", chan
->client_count
);
144 mutex_unlock(&dma_list_mutex
);
148 static DEVICE_ATTR_RO(in_use
);
150 static struct attribute
*dma_dev_attrs
[] = {
151 &dev_attr_memcpy_count
.attr
,
152 &dev_attr_bytes_transferred
.attr
,
153 &dev_attr_in_use
.attr
,
156 ATTRIBUTE_GROUPS(dma_dev
);
158 static void chan_dev_release(struct device
*dev
)
160 struct dma_chan_dev
*chan_dev
;
162 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
163 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
164 mutex_lock(&dma_list_mutex
);
165 idr_remove(&dma_idr
, chan_dev
->dev_id
);
166 mutex_unlock(&dma_list_mutex
);
167 kfree(chan_dev
->idr_ref
);
172 static struct class dma_devclass
= {
174 .dev_groups
= dma_dev_groups
,
175 .dev_release
= chan_dev_release
,
178 /* --- client and device registration --- */
180 #define dma_device_satisfies_mask(device, mask) \
181 __dma_device_satisfies_mask((device), &(mask))
183 __dma_device_satisfies_mask(struct dma_device
*device
,
184 const dma_cap_mask_t
*want
)
188 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
190 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
193 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
195 return chan
->device
->dev
->driver
->owner
;
199 * balance_ref_count - catch up the channel reference count
200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
202 * balance_ref_count must be called under dma_list_mutex
204 static void balance_ref_count(struct dma_chan
*chan
)
206 struct module
*owner
= dma_chan_to_owner(chan
);
208 while (chan
->client_count
< dmaengine_ref_count
) {
210 chan
->client_count
++;
215 * dma_chan_get - try to grab a dma channel's parent driver module
216 * @chan - channel to grab
218 * Must be called under dma_list_mutex
220 static int dma_chan_get(struct dma_chan
*chan
)
222 struct module
*owner
= dma_chan_to_owner(chan
);
225 /* The channel is already in use, update client count */
226 if (chan
->client_count
) {
231 if (!try_module_get(owner
))
234 /* allocate upon first client reference */
235 if (chan
->device
->device_alloc_chan_resources
) {
236 ret
= chan
->device
->device_alloc_chan_resources(chan
);
241 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
242 balance_ref_count(chan
);
245 chan
->client_count
++;
254 * dma_chan_put - drop a reference to a dma channel's parent driver module
255 * @chan - channel to release
257 * Must be called under dma_list_mutex
259 static void dma_chan_put(struct dma_chan
*chan
)
261 /* This channel is not in use, bail out */
262 if (!chan
->client_count
)
265 chan
->client_count
--;
266 module_put(dma_chan_to_owner(chan
));
268 /* This channel is not in use anymore, free it */
269 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
)
270 chan
->device
->device_free_chan_resources(chan
);
272 /* If the channel is used via a DMA request router, free the mapping */
273 if (chan
->router
&& chan
->router
->route_free
) {
274 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
276 chan
->route_data
= NULL
;
280 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
282 enum dma_status status
;
283 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
285 dma_async_issue_pending(chan
);
287 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
288 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
289 pr_err("%s: timeout!\n", __func__
);
292 if (status
!= DMA_IN_PROGRESS
)
299 EXPORT_SYMBOL(dma_sync_wait
);
302 * dma_cap_mask_all - enable iteration over all operation types
304 static dma_cap_mask_t dma_cap_mask_all
;
307 * dma_chan_tbl_ent - tracks channel allocations per core/operation
308 * @chan - associated channel for this entry
310 struct dma_chan_tbl_ent
{
311 struct dma_chan
*chan
;
315 * channel_table - percpu lookup table for memory-to-memory offload providers
317 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
319 static int __init
dma_channel_table_init(void)
321 enum dma_transaction_type cap
;
324 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
326 /* 'interrupt', 'private', and 'slave' are channel capabilities,
327 * but are not associated with an operation so they do not need
328 * an entry in the channel_table
330 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
331 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
332 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
334 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
335 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
336 if (!channel_table
[cap
]) {
343 pr_err("initialization failure\n");
344 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
345 free_percpu(channel_table
[cap
]);
350 arch_initcall(dma_channel_table_init
);
353 * dma_find_channel - find a channel to carry out the operation
354 * @tx_type: transaction type
356 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
358 return this_cpu_read(channel_table
[tx_type
]->chan
);
360 EXPORT_SYMBOL(dma_find_channel
);
363 * dma_issue_pending_all - flush all pending operations across all channels
365 void dma_issue_pending_all(void)
367 struct dma_device
*device
;
368 struct dma_chan
*chan
;
371 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
372 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
374 list_for_each_entry(chan
, &device
->channels
, device_node
)
375 if (chan
->client_count
)
376 device
->device_issue_pending(chan
);
380 EXPORT_SYMBOL(dma_issue_pending_all
);
383 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
385 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
387 int node
= dev_to_node(chan
->device
->dev
);
388 return node
== -1 || cpumask_test_cpu(cpu
, cpumask_of_node(node
));
392 * min_chan - returns the channel with min count and in the same numa-node as the cpu
393 * @cap: capability to match
394 * @cpu: cpu index which the channel should be close to
396 * If some channels are close to the given cpu, the one with the lowest
397 * reference count is returned. Otherwise, cpu is ignored and only the
398 * reference count is taken into account.
399 * Must be called under dma_list_mutex.
401 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
403 struct dma_device
*device
;
404 struct dma_chan
*chan
;
405 struct dma_chan
*min
= NULL
;
406 struct dma_chan
*localmin
= NULL
;
408 list_for_each_entry(device
, &dma_device_list
, global_node
) {
409 if (!dma_has_cap(cap
, device
->cap_mask
) ||
410 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
412 list_for_each_entry(chan
, &device
->channels
, device_node
) {
413 if (!chan
->client_count
)
415 if (!min
|| chan
->table_count
< min
->table_count
)
418 if (dma_chan_is_local(chan
, cpu
))
420 chan
->table_count
< localmin
->table_count
)
425 chan
= localmin
? localmin
: min
;
434 * dma_channel_rebalance - redistribute the available channels
436 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
437 * operation type) in the SMP case, and operation isolation (avoid
438 * multi-tasking channels) in the non-SMP case. Must be called under
441 static void dma_channel_rebalance(void)
443 struct dma_chan
*chan
;
444 struct dma_device
*device
;
448 /* undo the last distribution */
449 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
450 for_each_possible_cpu(cpu
)
451 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
453 list_for_each_entry(device
, &dma_device_list
, global_node
) {
454 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
456 list_for_each_entry(chan
, &device
->channels
, device_node
)
457 chan
->table_count
= 0;
460 /* don't populate the channel_table if no clients are available */
461 if (!dmaengine_ref_count
)
464 /* redistribute available channels */
465 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
466 for_each_online_cpu(cpu
) {
467 chan
= min_chan(cap
, cpu
);
468 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
472 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
474 struct dma_device
*device
;
479 device
= chan
->device
;
481 /* check if the channel supports slave transactions */
482 if (!test_bit(DMA_SLAVE
, device
->cap_mask
.bits
))
486 * Check whether it reports it uses the generic slave
487 * capabilities, if not, that means it doesn't support any
488 * kind of slave capabilities reporting.
490 if (!device
->directions
)
493 caps
->src_addr_widths
= device
->src_addr_widths
;
494 caps
->dst_addr_widths
= device
->dst_addr_widths
;
495 caps
->directions
= device
->directions
;
496 caps
->residue_granularity
= device
->residue_granularity
;
499 * Some devices implement only pause (e.g. to get residuum) but no
500 * resume. However cmd_pause is advertised as pause AND resume.
502 caps
->cmd_pause
= !!(device
->device_pause
&& device
->device_resume
);
503 caps
->cmd_terminate
= !!device
->device_terminate_all
;
507 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
509 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
510 struct dma_device
*dev
,
511 dma_filter_fn fn
, void *fn_param
)
513 struct dma_chan
*chan
;
515 if (mask
&& !__dma_device_satisfies_mask(dev
, mask
)) {
516 pr_debug("%s: wrong capabilities\n", __func__
);
519 /* devices with multiple channels need special handling as we need to
520 * ensure that all channels are either private or public.
522 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
523 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
524 /* some channels are already publicly allocated */
525 if (chan
->client_count
)
529 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
530 if (chan
->client_count
) {
531 pr_debug("%s: %s busy\n",
532 __func__
, dma_chan_name(chan
));
535 if (fn
&& !fn(chan
, fn_param
)) {
536 pr_debug("%s: %s filter said false\n",
537 __func__
, dma_chan_name(chan
));
546 static struct dma_chan
*find_candidate(struct dma_device
*device
,
547 const dma_cap_mask_t
*mask
,
548 dma_filter_fn fn
, void *fn_param
)
550 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
554 /* Found a suitable channel, try to grab, prep, and return it.
555 * We first set DMA_PRIVATE to disable balance_ref_count as this
556 * channel will not be published in the general-purpose
559 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
560 device
->privatecnt
++;
561 err
= dma_chan_get(chan
);
564 if (err
== -ENODEV
) {
565 pr_debug("%s: %s module removed\n", __func__
,
566 dma_chan_name(chan
));
567 list_del_rcu(&device
->global_node
);
569 pr_debug("%s: failed to get %s: (%d)\n",
570 __func__
, dma_chan_name(chan
), err
);
572 if (--device
->privatecnt
== 0)
573 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
579 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
583 * dma_get_slave_channel - try to get specific channel exclusively
584 * @chan: target channel
586 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
590 /* lock against __dma_request_channel */
591 mutex_lock(&dma_list_mutex
);
593 if (chan
->client_count
== 0) {
594 struct dma_device
*device
= chan
->device
;
596 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
597 device
->privatecnt
++;
598 err
= dma_chan_get(chan
);
600 pr_debug("%s: failed to get %s: (%d)\n",
601 __func__
, dma_chan_name(chan
), err
);
603 if (--device
->privatecnt
== 0)
604 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
609 mutex_unlock(&dma_list_mutex
);
614 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
616 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
619 struct dma_chan
*chan
;
622 dma_cap_set(DMA_SLAVE
, mask
);
624 /* lock against __dma_request_channel */
625 mutex_lock(&dma_list_mutex
);
627 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
629 mutex_unlock(&dma_list_mutex
);
631 return IS_ERR(chan
) ? NULL
: chan
;
633 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
636 * __dma_request_channel - try to allocate an exclusive channel
637 * @mask: capabilities that the channel must satisfy
638 * @fn: optional callback to disposition available channels
639 * @fn_param: opaque parameter to pass to dma_filter_fn
641 * Returns pointer to appropriate DMA channel on success or NULL.
643 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
644 dma_filter_fn fn
, void *fn_param
)
646 struct dma_device
*device
, *_d
;
647 struct dma_chan
*chan
= NULL
;
650 mutex_lock(&dma_list_mutex
);
651 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
652 chan
= find_candidate(device
, mask
, fn
, fn_param
);
658 mutex_unlock(&dma_list_mutex
);
660 pr_debug("%s: %s (%s)\n",
662 chan
? "success" : "fail",
663 chan
? dma_chan_name(chan
) : NULL
);
667 EXPORT_SYMBOL_GPL(__dma_request_channel
);
669 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
675 if (!device
->filter
.mapcnt
)
678 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
679 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
681 if (!strcmp(map
->devname
, dev_name(dev
)) &&
682 !strcmp(map
->slave
, name
))
690 * dma_request_chan - try to allocate an exclusive slave channel
691 * @dev: pointer to client device structure
692 * @name: slave channel name
694 * Returns pointer to appropriate DMA channel on success or an error pointer.
696 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
698 struct dma_device
*d
, *_d
;
699 struct dma_chan
*chan
= NULL
;
701 /* If device-tree is present get slave info from here */
703 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
705 /* If device was enumerated by ACPI get slave info from here */
706 if (has_acpi_companion(dev
) && !chan
)
707 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
710 /* Valid channel found or requester need to be deferred */
711 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
715 /* Try to find the channel via the DMA filter map(s) */
716 mutex_lock(&dma_list_mutex
);
717 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
719 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
725 dma_cap_set(DMA_SLAVE
, mask
);
727 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
731 mutex_unlock(&dma_list_mutex
);
733 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
735 EXPORT_SYMBOL_GPL(dma_request_chan
);
738 * dma_request_slave_channel - try to allocate an exclusive slave channel
739 * @dev: pointer to client device structure
740 * @name: slave channel name
742 * Returns pointer to appropriate DMA channel on success or NULL.
744 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
747 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
753 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
756 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
757 * @mask: capabilities that the channel must satisfy
759 * Returns pointer to appropriate DMA channel on success or an error pointer.
761 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
763 struct dma_chan
*chan
;
766 return ERR_PTR(-ENODEV
);
768 chan
= __dma_request_channel(mask
, NULL
, NULL
);
770 chan
= ERR_PTR(-ENODEV
);
774 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
776 void dma_release_channel(struct dma_chan
*chan
)
778 mutex_lock(&dma_list_mutex
);
779 WARN_ONCE(chan
->client_count
!= 1,
780 "chan reference count %d != 1\n", chan
->client_count
);
782 /* drop PRIVATE cap enabled by __dma_request_channel() */
783 if (--chan
->device
->privatecnt
== 0)
784 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
785 mutex_unlock(&dma_list_mutex
);
787 EXPORT_SYMBOL_GPL(dma_release_channel
);
790 * dmaengine_get - register interest in dma_channels
792 void dmaengine_get(void)
794 struct dma_device
*device
, *_d
;
795 struct dma_chan
*chan
;
798 mutex_lock(&dma_list_mutex
);
799 dmaengine_ref_count
++;
801 /* try to grab channels */
802 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
803 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
805 list_for_each_entry(chan
, &device
->channels
, device_node
) {
806 err
= dma_chan_get(chan
);
807 if (err
== -ENODEV
) {
808 /* module removed before we could use it */
809 list_del_rcu(&device
->global_node
);
812 pr_debug("%s: failed to get %s: (%d)\n",
813 __func__
, dma_chan_name(chan
), err
);
817 /* if this is the first reference and there were channels
818 * waiting we need to rebalance to get those channels
819 * incorporated into the channel table
821 if (dmaengine_ref_count
== 1)
822 dma_channel_rebalance();
823 mutex_unlock(&dma_list_mutex
);
825 EXPORT_SYMBOL(dmaengine_get
);
828 * dmaengine_put - let dma drivers be removed when ref_count == 0
830 void dmaengine_put(void)
832 struct dma_device
*device
;
833 struct dma_chan
*chan
;
835 mutex_lock(&dma_list_mutex
);
836 dmaengine_ref_count
--;
837 BUG_ON(dmaengine_ref_count
< 0);
838 /* drop channel references */
839 list_for_each_entry(device
, &dma_device_list
, global_node
) {
840 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
842 list_for_each_entry(chan
, &device
->channels
, device_node
)
845 mutex_unlock(&dma_list_mutex
);
847 EXPORT_SYMBOL(dmaengine_put
);
849 static bool device_has_all_tx_types(struct dma_device
*device
)
851 /* A device that satisfies this test has channels that will never cause
852 * an async_tx channel switch event as all possible operation types can
855 #ifdef CONFIG_ASYNC_TX_DMA
856 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
860 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
861 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
865 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
866 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
869 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
870 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
875 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
876 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
879 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
880 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
888 static int get_dma_id(struct dma_device
*device
)
892 mutex_lock(&dma_list_mutex
);
894 rc
= idr_alloc(&dma_idr
, NULL
, 0, 0, GFP_KERNEL
);
898 mutex_unlock(&dma_list_mutex
);
899 return rc
< 0 ? rc
: 0;
903 * dma_async_device_register - registers DMA devices found
904 * @device: &dma_device
906 int dma_async_device_register(struct dma_device
*device
)
909 struct dma_chan
* chan
;
915 /* validate device routines */
916 BUG_ON(dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) &&
917 !device
->device_prep_dma_memcpy
);
918 BUG_ON(dma_has_cap(DMA_XOR
, device
->cap_mask
) &&
919 !device
->device_prep_dma_xor
);
920 BUG_ON(dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) &&
921 !device
->device_prep_dma_xor_val
);
922 BUG_ON(dma_has_cap(DMA_PQ
, device
->cap_mask
) &&
923 !device
->device_prep_dma_pq
);
924 BUG_ON(dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) &&
925 !device
->device_prep_dma_pq_val
);
926 BUG_ON(dma_has_cap(DMA_MEMSET
, device
->cap_mask
) &&
927 !device
->device_prep_dma_memset
);
928 BUG_ON(dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) &&
929 !device
->device_prep_dma_interrupt
);
930 BUG_ON(dma_has_cap(DMA_SG
, device
->cap_mask
) &&
931 !device
->device_prep_dma_sg
);
932 BUG_ON(dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) &&
933 !device
->device_prep_dma_cyclic
);
934 BUG_ON(dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) &&
935 !device
->device_prep_interleaved_dma
);
937 BUG_ON(!device
->device_tx_status
);
938 BUG_ON(!device
->device_issue_pending
);
939 BUG_ON(!device
->dev
);
941 /* note: this only matters in the
942 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
944 if (device_has_all_tx_types(device
))
945 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
947 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
950 rc
= get_dma_id(device
);
956 atomic_set(idr_ref
, 0);
958 /* represent channels in sysfs. Probably want devs too */
959 list_for_each_entry(chan
, &device
->channels
, device_node
) {
961 chan
->local
= alloc_percpu(typeof(*chan
->local
));
962 if (chan
->local
== NULL
)
964 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
965 if (chan
->dev
== NULL
) {
966 free_percpu(chan
->local
);
971 chan
->chan_id
= chancnt
++;
972 chan
->dev
->device
.class = &dma_devclass
;
973 chan
->dev
->device
.parent
= device
->dev
;
974 chan
->dev
->chan
= chan
;
975 chan
->dev
->idr_ref
= idr_ref
;
976 chan
->dev
->dev_id
= device
->dev_id
;
978 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
979 device
->dev_id
, chan
->chan_id
);
981 rc
= device_register(&chan
->dev
->device
);
983 free_percpu(chan
->local
);
989 chan
->client_count
= 0;
991 device
->chancnt
= chancnt
;
993 mutex_lock(&dma_list_mutex
);
994 /* take references on public channels */
995 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
996 list_for_each_entry(chan
, &device
->channels
, device_node
) {
997 /* if clients are already waiting for channels we need
998 * to take references on their behalf
1000 if (dma_chan_get(chan
) == -ENODEV
) {
1001 /* note we can only get here for the first
1002 * channel as the remaining channels are
1003 * guaranteed to get a reference
1006 mutex_unlock(&dma_list_mutex
);
1010 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1011 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1012 device
->privatecnt
++; /* Always private */
1013 dma_channel_rebalance();
1014 mutex_unlock(&dma_list_mutex
);
1019 /* if we never registered a channel just release the idr */
1020 if (atomic_read(idr_ref
) == 0) {
1021 mutex_lock(&dma_list_mutex
);
1022 idr_remove(&dma_idr
, device
->dev_id
);
1023 mutex_unlock(&dma_list_mutex
);
1028 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1029 if (chan
->local
== NULL
)
1031 mutex_lock(&dma_list_mutex
);
1032 chan
->dev
->chan
= NULL
;
1033 mutex_unlock(&dma_list_mutex
);
1034 device_unregister(&chan
->dev
->device
);
1035 free_percpu(chan
->local
);
1039 EXPORT_SYMBOL(dma_async_device_register
);
1042 * dma_async_device_unregister - unregister a DMA device
1043 * @device: &dma_device
1045 * This routine is called by dma driver exit routines, dmaengine holds module
1046 * references to prevent it being called while channels are in use.
1048 void dma_async_device_unregister(struct dma_device
*device
)
1050 struct dma_chan
*chan
;
1052 mutex_lock(&dma_list_mutex
);
1053 list_del_rcu(&device
->global_node
);
1054 dma_channel_rebalance();
1055 mutex_unlock(&dma_list_mutex
);
1057 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1058 WARN_ONCE(chan
->client_count
,
1059 "%s called while %d clients hold a reference\n",
1060 __func__
, chan
->client_count
);
1061 mutex_lock(&dma_list_mutex
);
1062 chan
->dev
->chan
= NULL
;
1063 mutex_unlock(&dma_list_mutex
);
1064 device_unregister(&chan
->dev
->device
);
1065 free_percpu(chan
->local
);
1068 EXPORT_SYMBOL(dma_async_device_unregister
);
1070 struct dmaengine_unmap_pool
{
1071 struct kmem_cache
*cache
;
1077 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1078 static struct dmaengine_unmap_pool unmap_pool
[] = {
1080 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1087 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1089 int order
= get_count_order(nr
);
1093 return &unmap_pool
[0];
1095 return &unmap_pool
[1];
1097 return &unmap_pool
[2];
1099 return &unmap_pool
[3];
1106 static void dmaengine_unmap(struct kref
*kref
)
1108 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1109 struct device
*dev
= unmap
->dev
;
1112 cnt
= unmap
->to_cnt
;
1113 for (i
= 0; i
< cnt
; i
++)
1114 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1116 cnt
+= unmap
->from_cnt
;
1117 for (; i
< cnt
; i
++)
1118 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1120 cnt
+= unmap
->bidi_cnt
;
1121 for (; i
< cnt
; i
++) {
1122 if (unmap
->addr
[i
] == 0)
1124 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1127 cnt
= unmap
->map_cnt
;
1128 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1131 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1134 kref_put(&unmap
->kref
, dmaengine_unmap
);
1136 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1138 static void dmaengine_destroy_unmap_pool(void)
1142 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1143 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1145 mempool_destroy(p
->pool
);
1147 kmem_cache_destroy(p
->cache
);
1152 static int __init
dmaengine_init_unmap_pool(void)
1156 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1157 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1160 size
= sizeof(struct dmaengine_unmap_data
) +
1161 sizeof(dma_addr_t
) * p
->size
;
1163 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1164 SLAB_HWCACHE_ALIGN
, NULL
);
1167 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1172 if (i
== ARRAY_SIZE(unmap_pool
))
1175 dmaengine_destroy_unmap_pool();
1179 struct dmaengine_unmap_data
*
1180 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1182 struct dmaengine_unmap_data
*unmap
;
1184 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1188 memset(unmap
, 0, sizeof(*unmap
));
1189 kref_init(&unmap
->kref
);
1191 unmap
->map_cnt
= nr
;
1195 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1197 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1198 struct dma_chan
*chan
)
1201 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1202 spin_lock_init(&tx
->lock
);
1205 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1207 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1208 * @tx: in-flight transaction to wait on
1211 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1213 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1216 return DMA_COMPLETE
;
1218 while (tx
->cookie
== -EBUSY
) {
1219 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1220 pr_err("%s timeout waiting for descriptor submission\n",
1226 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1228 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1230 /* dma_run_dependencies - helper routine for dma drivers to process
1231 * (start) dependent operations on their target channel
1232 * @tx: transaction with dependencies
1234 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1236 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1237 struct dma_async_tx_descriptor
*dep_next
;
1238 struct dma_chan
*chan
;
1243 /* we'll submit tx->next now, so clear the link */
1247 /* keep submitting up until a channel switch is detected
1248 * in that case we will be called again as a result of
1249 * processing the interrupt from async_tx_channel_switch
1251 for (; dep
; dep
= dep_next
) {
1253 txd_clear_parent(dep
);
1254 dep_next
= txd_next(dep
);
1255 if (dep_next
&& dep_next
->chan
== chan
)
1256 txd_clear_next(dep
); /* ->next will be submitted */
1258 dep_next
= NULL
; /* submit current dep and terminate */
1261 dep
->tx_submit(dep
);
1264 chan
->device
->device_issue_pending(chan
);
1266 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1268 static int __init
dma_bus_init(void)
1270 int err
= dmaengine_init_unmap_pool();
1274 return class_register(&dma_devclass
);
1276 arch_initcall(dma_bus_init
);