1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 static DEFINE_MUTEX(dma_list_mutex
);
57 static DEFINE_IDA(dma_ida
);
58 static LIST_HEAD(dma_device_list
);
59 static long dmaengine_ref_count
;
61 /* --- sysfs implementation --- */
64 * dev_to_dma_chan - convert a device pointer to its sysfs container object
67 * Must be called under dma_list_mutex
69 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
71 struct dma_chan_dev
*chan_dev
;
73 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
74 return chan_dev
->chan
;
77 static ssize_t
memcpy_count_show(struct device
*dev
,
78 struct device_attribute
*attr
, char *buf
)
80 struct dma_chan
*chan
;
81 unsigned long count
= 0;
85 mutex_lock(&dma_list_mutex
);
86 chan
= dev_to_dma_chan(dev
);
88 for_each_possible_cpu(i
)
89 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
90 err
= sprintf(buf
, "%lu\n", count
);
93 mutex_unlock(&dma_list_mutex
);
97 static DEVICE_ATTR_RO(memcpy_count
);
99 static ssize_t
bytes_transferred_show(struct device
*dev
,
100 struct device_attribute
*attr
, char *buf
)
102 struct dma_chan
*chan
;
103 unsigned long count
= 0;
107 mutex_lock(&dma_list_mutex
);
108 chan
= dev_to_dma_chan(dev
);
110 for_each_possible_cpu(i
)
111 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
112 err
= sprintf(buf
, "%lu\n", count
);
115 mutex_unlock(&dma_list_mutex
);
119 static DEVICE_ATTR_RO(bytes_transferred
);
121 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
124 struct dma_chan
*chan
;
127 mutex_lock(&dma_list_mutex
);
128 chan
= dev_to_dma_chan(dev
);
130 err
= sprintf(buf
, "%d\n", chan
->client_count
);
133 mutex_unlock(&dma_list_mutex
);
137 static DEVICE_ATTR_RO(in_use
);
139 static struct attribute
*dma_dev_attrs
[] = {
140 &dev_attr_memcpy_count
.attr
,
141 &dev_attr_bytes_transferred
.attr
,
142 &dev_attr_in_use
.attr
,
145 ATTRIBUTE_GROUPS(dma_dev
);
147 static void chan_dev_release(struct device
*dev
)
149 struct dma_chan_dev
*chan_dev
;
151 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
152 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
153 ida_free(&dma_ida
, chan_dev
->dev_id
);
154 kfree(chan_dev
->idr_ref
);
159 static struct class dma_devclass
= {
161 .dev_groups
= dma_dev_groups
,
162 .dev_release
= chan_dev_release
,
165 /* --- client and device registration --- */
167 #define dma_device_satisfies_mask(device, mask) \
168 __dma_device_satisfies_mask((device), &(mask))
170 __dma_device_satisfies_mask(struct dma_device
*device
,
171 const dma_cap_mask_t
*want
)
175 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
177 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
180 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
182 return chan
->device
->owner
;
186 * balance_ref_count - catch up the channel reference count
187 * @chan - channel to balance ->client_count versus dmaengine_ref_count
189 * balance_ref_count must be called under dma_list_mutex
191 static void balance_ref_count(struct dma_chan
*chan
)
193 struct module
*owner
= dma_chan_to_owner(chan
);
195 while (chan
->client_count
< dmaengine_ref_count
) {
197 chan
->client_count
++;
202 * dma_chan_get - try to grab a dma channel's parent driver module
203 * @chan - channel to grab
205 * Must be called under dma_list_mutex
207 static int dma_chan_get(struct dma_chan
*chan
)
209 struct module
*owner
= dma_chan_to_owner(chan
);
212 /* The channel is already in use, update client count */
213 if (chan
->client_count
) {
218 if (!try_module_get(owner
))
221 /* allocate upon first client reference */
222 if (chan
->device
->device_alloc_chan_resources
) {
223 ret
= chan
->device
->device_alloc_chan_resources(chan
);
228 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
229 balance_ref_count(chan
);
232 chan
->client_count
++;
241 * dma_chan_put - drop a reference to a dma channel's parent driver module
242 * @chan - channel to release
244 * Must be called under dma_list_mutex
246 static void dma_chan_put(struct dma_chan
*chan
)
248 /* This channel is not in use, bail out */
249 if (!chan
->client_count
)
252 chan
->client_count
--;
253 module_put(dma_chan_to_owner(chan
));
255 /* This channel is not in use anymore, free it */
256 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
) {
257 /* Make sure all operations have completed */
258 dmaengine_synchronize(chan
);
259 chan
->device
->device_free_chan_resources(chan
);
262 /* If the channel is used via a DMA request router, free the mapping */
263 if (chan
->router
&& chan
->router
->route_free
) {
264 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
266 chan
->route_data
= NULL
;
270 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
272 enum dma_status status
;
273 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
275 dma_async_issue_pending(chan
);
277 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
278 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
279 dev_err(chan
->device
->dev
, "%s: timeout!\n", __func__
);
282 if (status
!= DMA_IN_PROGRESS
)
289 EXPORT_SYMBOL(dma_sync_wait
);
292 * dma_cap_mask_all - enable iteration over all operation types
294 static dma_cap_mask_t dma_cap_mask_all
;
297 * dma_chan_tbl_ent - tracks channel allocations per core/operation
298 * @chan - associated channel for this entry
300 struct dma_chan_tbl_ent
{
301 struct dma_chan
*chan
;
305 * channel_table - percpu lookup table for memory-to-memory offload providers
307 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
309 static int __init
dma_channel_table_init(void)
311 enum dma_transaction_type cap
;
314 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
316 /* 'interrupt', 'private', and 'slave' are channel capabilities,
317 * but are not associated with an operation so they do not need
318 * an entry in the channel_table
320 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
321 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
322 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
324 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
325 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
326 if (!channel_table
[cap
]) {
333 pr_err("initialization failure\n");
334 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
335 free_percpu(channel_table
[cap
]);
340 arch_initcall(dma_channel_table_init
);
343 * dma_find_channel - find a channel to carry out the operation
344 * @tx_type: transaction type
346 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
348 return this_cpu_read(channel_table
[tx_type
]->chan
);
350 EXPORT_SYMBOL(dma_find_channel
);
353 * dma_issue_pending_all - flush all pending operations across all channels
355 void dma_issue_pending_all(void)
357 struct dma_device
*device
;
358 struct dma_chan
*chan
;
361 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
362 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
364 list_for_each_entry(chan
, &device
->channels
, device_node
)
365 if (chan
->client_count
)
366 device
->device_issue_pending(chan
);
370 EXPORT_SYMBOL(dma_issue_pending_all
);
373 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
375 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
377 int node
= dev_to_node(chan
->device
->dev
);
378 return node
== NUMA_NO_NODE
||
379 cpumask_test_cpu(cpu
, cpumask_of_node(node
));
383 * min_chan - returns the channel with min count and in the same numa-node as the cpu
384 * @cap: capability to match
385 * @cpu: cpu index which the channel should be close to
387 * If some channels are close to the given cpu, the one with the lowest
388 * reference count is returned. Otherwise, cpu is ignored and only the
389 * reference count is taken into account.
390 * Must be called under dma_list_mutex.
392 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
394 struct dma_device
*device
;
395 struct dma_chan
*chan
;
396 struct dma_chan
*min
= NULL
;
397 struct dma_chan
*localmin
= NULL
;
399 list_for_each_entry(device
, &dma_device_list
, global_node
) {
400 if (!dma_has_cap(cap
, device
->cap_mask
) ||
401 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
403 list_for_each_entry(chan
, &device
->channels
, device_node
) {
404 if (!chan
->client_count
)
406 if (!min
|| chan
->table_count
< min
->table_count
)
409 if (dma_chan_is_local(chan
, cpu
))
411 chan
->table_count
< localmin
->table_count
)
416 chan
= localmin
? localmin
: min
;
425 * dma_channel_rebalance - redistribute the available channels
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
432 static void dma_channel_rebalance(void)
434 struct dma_chan
*chan
;
435 struct dma_device
*device
;
439 /* undo the last distribution */
440 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
441 for_each_possible_cpu(cpu
)
442 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
444 list_for_each_entry(device
, &dma_device_list
, global_node
) {
445 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
447 list_for_each_entry(chan
, &device
->channels
, device_node
)
448 chan
->table_count
= 0;
451 /* don't populate the channel_table if no clients are available */
452 if (!dmaengine_ref_count
)
455 /* redistribute available channels */
456 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
457 for_each_online_cpu(cpu
) {
458 chan
= min_chan(cap
, cpu
);
459 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
463 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
465 struct dma_device
*device
;
470 device
= chan
->device
;
472 /* check if the channel supports slave transactions */
473 if (!(test_bit(DMA_SLAVE
, device
->cap_mask
.bits
) ||
474 test_bit(DMA_CYCLIC
, device
->cap_mask
.bits
)))
478 * Check whether it reports it uses the generic slave
479 * capabilities, if not, that means it doesn't support any
480 * kind of slave capabilities reporting.
482 if (!device
->directions
)
485 caps
->src_addr_widths
= device
->src_addr_widths
;
486 caps
->dst_addr_widths
= device
->dst_addr_widths
;
487 caps
->directions
= device
->directions
;
488 caps
->max_burst
= device
->max_burst
;
489 caps
->residue_granularity
= device
->residue_granularity
;
490 caps
->descriptor_reuse
= device
->descriptor_reuse
;
491 caps
->cmd_pause
= !!device
->device_pause
;
492 caps
->cmd_resume
= !!device
->device_resume
;
493 caps
->cmd_terminate
= !!device
->device_terminate_all
;
497 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
499 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
500 struct dma_device
*dev
,
501 dma_filter_fn fn
, void *fn_param
)
503 struct dma_chan
*chan
;
505 if (mask
&& !__dma_device_satisfies_mask(dev
, mask
)) {
506 dev_dbg(dev
->dev
, "%s: wrong capabilities\n", __func__
);
509 /* devices with multiple channels need special handling as we need to
510 * ensure that all channels are either private or public.
512 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
513 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
514 /* some channels are already publicly allocated */
515 if (chan
->client_count
)
519 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
520 if (chan
->client_count
) {
521 dev_dbg(dev
->dev
, "%s: %s busy\n",
522 __func__
, dma_chan_name(chan
));
525 if (fn
&& !fn(chan
, fn_param
)) {
526 dev_dbg(dev
->dev
, "%s: %s filter said false\n",
527 __func__
, dma_chan_name(chan
));
536 static struct dma_chan
*find_candidate(struct dma_device
*device
,
537 const dma_cap_mask_t
*mask
,
538 dma_filter_fn fn
, void *fn_param
)
540 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
544 /* Found a suitable channel, try to grab, prep, and return it.
545 * We first set DMA_PRIVATE to disable balance_ref_count as this
546 * channel will not be published in the general-purpose
549 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
550 device
->privatecnt
++;
551 err
= dma_chan_get(chan
);
554 if (err
== -ENODEV
) {
555 dev_dbg(device
->dev
, "%s: %s module removed\n",
556 __func__
, dma_chan_name(chan
));
557 list_del_rcu(&device
->global_node
);
560 "%s: failed to get %s: (%d)\n",
561 __func__
, dma_chan_name(chan
), err
);
563 if (--device
->privatecnt
== 0)
564 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
570 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
574 * dma_get_slave_channel - try to get specific channel exclusively
575 * @chan: target channel
577 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
581 /* lock against __dma_request_channel */
582 mutex_lock(&dma_list_mutex
);
584 if (chan
->client_count
== 0) {
585 struct dma_device
*device
= chan
->device
;
587 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
588 device
->privatecnt
++;
589 err
= dma_chan_get(chan
);
591 dev_dbg(chan
->device
->dev
,
592 "%s: failed to get %s: (%d)\n",
593 __func__
, dma_chan_name(chan
), err
);
595 if (--device
->privatecnt
== 0)
596 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
601 mutex_unlock(&dma_list_mutex
);
606 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
608 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
611 struct dma_chan
*chan
;
614 dma_cap_set(DMA_SLAVE
, mask
);
616 /* lock against __dma_request_channel */
617 mutex_lock(&dma_list_mutex
);
619 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
621 mutex_unlock(&dma_list_mutex
);
623 return IS_ERR(chan
) ? NULL
: chan
;
625 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
628 * __dma_request_channel - try to allocate an exclusive channel
629 * @mask: capabilities that the channel must satisfy
630 * @fn: optional callback to disposition available channels
631 * @fn_param: opaque parameter to pass to dma_filter_fn
632 * @np: device node to look for DMA channels
634 * Returns pointer to appropriate DMA channel on success or NULL.
636 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
637 dma_filter_fn fn
, void *fn_param
,
638 struct device_node
*np
)
640 struct dma_device
*device
, *_d
;
641 struct dma_chan
*chan
= NULL
;
644 mutex_lock(&dma_list_mutex
);
645 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
646 /* Finds a DMA controller with matching device node */
647 if (np
&& device
->dev
->of_node
&& np
!= device
->dev
->of_node
)
650 chan
= find_candidate(device
, mask
, fn
, fn_param
);
656 mutex_unlock(&dma_list_mutex
);
658 pr_debug("%s: %s (%s)\n",
660 chan
? "success" : "fail",
661 chan
? dma_chan_name(chan
) : NULL
);
665 EXPORT_SYMBOL_GPL(__dma_request_channel
);
667 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
673 if (!device
->filter
.mapcnt
)
676 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
677 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
679 if (!strcmp(map
->devname
, dev_name(dev
)) &&
680 !strcmp(map
->slave
, name
))
688 * dma_request_chan - try to allocate an exclusive slave channel
689 * @dev: pointer to client device structure
690 * @name: slave channel name
692 * Returns pointer to appropriate DMA channel on success or an error pointer.
694 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
696 struct dma_device
*d
, *_d
;
697 struct dma_chan
*chan
= NULL
;
699 /* If device-tree is present get slave info from here */
701 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
703 /* If device was enumerated by ACPI get slave info from here */
704 if (has_acpi_companion(dev
) && !chan
)
705 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
708 /* Valid channel found or requester needs to be deferred */
709 if (!IS_ERR(chan
) || PTR_ERR(chan
) == -EPROBE_DEFER
)
713 /* Try to find the channel via the DMA filter map(s) */
714 mutex_lock(&dma_list_mutex
);
715 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
717 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
723 dma_cap_set(DMA_SLAVE
, mask
);
725 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
729 mutex_unlock(&dma_list_mutex
);
731 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
733 EXPORT_SYMBOL_GPL(dma_request_chan
);
736 * dma_request_slave_channel - try to allocate an exclusive slave channel
737 * @dev: pointer to client device structure
738 * @name: slave channel name
740 * Returns pointer to appropriate DMA channel on success or NULL.
742 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
745 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
751 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
754 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
755 * @mask: capabilities that the channel must satisfy
757 * Returns pointer to appropriate DMA channel on success or an error pointer.
759 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
761 struct dma_chan
*chan
;
764 return ERR_PTR(-ENODEV
);
766 chan
= __dma_request_channel(mask
, NULL
, NULL
, NULL
);
768 mutex_lock(&dma_list_mutex
);
769 if (list_empty(&dma_device_list
))
770 chan
= ERR_PTR(-EPROBE_DEFER
);
772 chan
= ERR_PTR(-ENODEV
);
773 mutex_unlock(&dma_list_mutex
);
778 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
780 void dma_release_channel(struct dma_chan
*chan
)
782 mutex_lock(&dma_list_mutex
);
783 WARN_ONCE(chan
->client_count
!= 1,
784 "chan reference count %d != 1\n", chan
->client_count
);
786 /* drop PRIVATE cap enabled by __dma_request_channel() */
787 if (--chan
->device
->privatecnt
== 0)
788 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
789 mutex_unlock(&dma_list_mutex
);
791 EXPORT_SYMBOL_GPL(dma_release_channel
);
794 * dmaengine_get - register interest in dma_channels
796 void dmaengine_get(void)
798 struct dma_device
*device
, *_d
;
799 struct dma_chan
*chan
;
802 mutex_lock(&dma_list_mutex
);
803 dmaengine_ref_count
++;
805 /* try to grab channels */
806 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
807 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
809 list_for_each_entry(chan
, &device
->channels
, device_node
) {
810 err
= dma_chan_get(chan
);
811 if (err
== -ENODEV
) {
812 /* module removed before we could use it */
813 list_del_rcu(&device
->global_node
);
816 dev_dbg(chan
->device
->dev
,
817 "%s: failed to get %s: (%d)\n",
818 __func__
, dma_chan_name(chan
), err
);
822 /* if this is the first reference and there were channels
823 * waiting we need to rebalance to get those channels
824 * incorporated into the channel table
826 if (dmaengine_ref_count
== 1)
827 dma_channel_rebalance();
828 mutex_unlock(&dma_list_mutex
);
830 EXPORT_SYMBOL(dmaengine_get
);
833 * dmaengine_put - let dma drivers be removed when ref_count == 0
835 void dmaengine_put(void)
837 struct dma_device
*device
;
838 struct dma_chan
*chan
;
840 mutex_lock(&dma_list_mutex
);
841 dmaengine_ref_count
--;
842 BUG_ON(dmaengine_ref_count
< 0);
843 /* drop channel references */
844 list_for_each_entry(device
, &dma_device_list
, global_node
) {
845 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
847 list_for_each_entry(chan
, &device
->channels
, device_node
)
850 mutex_unlock(&dma_list_mutex
);
852 EXPORT_SYMBOL(dmaengine_put
);
854 static bool device_has_all_tx_types(struct dma_device
*device
)
856 /* A device that satisfies this test has channels that will never cause
857 * an async_tx channel switch event as all possible operation types can
860 #ifdef CONFIG_ASYNC_TX_DMA
861 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
865 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
866 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
870 #if IS_ENABLED(CONFIG_ASYNC_XOR)
871 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
874 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
875 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
880 #if IS_ENABLED(CONFIG_ASYNC_PQ)
881 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
884 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
885 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
893 static int get_dma_id(struct dma_device
*device
)
895 int rc
= ida_alloc(&dma_ida
, GFP_KERNEL
);
904 * dma_async_device_register - registers DMA devices found
905 * @device: &dma_device
907 int dma_async_device_register(struct dma_device
*device
)
910 struct dma_chan
* chan
;
916 /* validate device routines */
918 pr_err("DMAdevice must have dev\n");
922 device
->owner
= device
->dev
->driver
->owner
;
924 if (dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) && !device
->device_prep_dma_memcpy
) {
926 "Device claims capability %s, but op is not defined\n",
931 if (dma_has_cap(DMA_XOR
, device
->cap_mask
) && !device
->device_prep_dma_xor
) {
933 "Device claims capability %s, but op is not defined\n",
938 if (dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) && !device
->device_prep_dma_xor_val
) {
940 "Device claims capability %s, but op is not defined\n",
945 if (dma_has_cap(DMA_PQ
, device
->cap_mask
) && !device
->device_prep_dma_pq
) {
947 "Device claims capability %s, but op is not defined\n",
952 if (dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) && !device
->device_prep_dma_pq_val
) {
954 "Device claims capability %s, but op is not defined\n",
959 if (dma_has_cap(DMA_MEMSET
, device
->cap_mask
) && !device
->device_prep_dma_memset
) {
961 "Device claims capability %s, but op is not defined\n",
966 if (dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) && !device
->device_prep_dma_interrupt
) {
968 "Device claims capability %s, but op is not defined\n",
973 if (dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) && !device
->device_prep_dma_cyclic
) {
975 "Device claims capability %s, but op is not defined\n",
980 if (dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) && !device
->device_prep_interleaved_dma
) {
982 "Device claims capability %s, but op is not defined\n",
988 if (!device
->device_tx_status
) {
989 dev_err(device
->dev
, "Device tx_status is not defined\n");
994 if (!device
->device_issue_pending
) {
995 dev_err(device
->dev
, "Device issue_pending is not defined\n");
999 /* note: this only matters in the
1000 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1002 if (device_has_all_tx_types(device
))
1003 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
1005 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
1008 rc
= get_dma_id(device
);
1014 atomic_set(idr_ref
, 0);
1016 /* represent channels in sysfs. Probably want devs too */
1017 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1019 chan
->local
= alloc_percpu(typeof(*chan
->local
));
1020 if (chan
->local
== NULL
)
1022 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
1023 if (chan
->dev
== NULL
) {
1024 free_percpu(chan
->local
);
1029 chan
->chan_id
= chancnt
++;
1030 chan
->dev
->device
.class = &dma_devclass
;
1031 chan
->dev
->device
.parent
= device
->dev
;
1032 chan
->dev
->chan
= chan
;
1033 chan
->dev
->idr_ref
= idr_ref
;
1034 chan
->dev
->dev_id
= device
->dev_id
;
1035 atomic_inc(idr_ref
);
1036 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
1037 device
->dev_id
, chan
->chan_id
);
1039 rc
= device_register(&chan
->dev
->device
);
1041 free_percpu(chan
->local
);
1044 atomic_dec(idr_ref
);
1047 chan
->client_count
= 0;
1051 dev_err(device
->dev
, "%s: device has no channels!\n", __func__
);
1056 device
->chancnt
= chancnt
;
1058 mutex_lock(&dma_list_mutex
);
1059 /* take references on public channels */
1060 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1061 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1062 /* if clients are already waiting for channels we need
1063 * to take references on their behalf
1065 if (dma_chan_get(chan
) == -ENODEV
) {
1066 /* note we can only get here for the first
1067 * channel as the remaining channels are
1068 * guaranteed to get a reference
1071 mutex_unlock(&dma_list_mutex
);
1075 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1076 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1077 device
->privatecnt
++; /* Always private */
1078 dma_channel_rebalance();
1079 mutex_unlock(&dma_list_mutex
);
1084 /* if we never registered a channel just release the idr */
1085 if (atomic_read(idr_ref
) == 0) {
1086 ida_free(&dma_ida
, device
->dev_id
);
1091 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1092 if (chan
->local
== NULL
)
1094 mutex_lock(&dma_list_mutex
);
1095 chan
->dev
->chan
= NULL
;
1096 mutex_unlock(&dma_list_mutex
);
1097 device_unregister(&chan
->dev
->device
);
1098 free_percpu(chan
->local
);
1102 EXPORT_SYMBOL(dma_async_device_register
);
1105 * dma_async_device_unregister - unregister a DMA device
1106 * @device: &dma_device
1108 * This routine is called by dma driver exit routines, dmaengine holds module
1109 * references to prevent it being called while channels are in use.
1111 void dma_async_device_unregister(struct dma_device
*device
)
1113 struct dma_chan
*chan
;
1115 mutex_lock(&dma_list_mutex
);
1116 list_del_rcu(&device
->global_node
);
1117 dma_channel_rebalance();
1118 mutex_unlock(&dma_list_mutex
);
1120 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1121 WARN_ONCE(chan
->client_count
,
1122 "%s called while %d clients hold a reference\n",
1123 __func__
, chan
->client_count
);
1124 mutex_lock(&dma_list_mutex
);
1125 chan
->dev
->chan
= NULL
;
1126 mutex_unlock(&dma_list_mutex
);
1127 device_unregister(&chan
->dev
->device
);
1128 free_percpu(chan
->local
);
1131 EXPORT_SYMBOL(dma_async_device_unregister
);
1133 static void dmam_device_release(struct device
*dev
, void *res
)
1135 struct dma_device
*device
;
1137 device
= *(struct dma_device
**)res
;
1138 dma_async_device_unregister(device
);
1142 * dmaenginem_async_device_register - registers DMA devices found
1143 * @device: &dma_device
1145 * The operation is managed and will be undone on driver detach.
1147 int dmaenginem_async_device_register(struct dma_device
*device
)
1152 p
= devres_alloc(dmam_device_release
, sizeof(void *), GFP_KERNEL
);
1156 ret
= dma_async_device_register(device
);
1158 *(struct dma_device
**)p
= device
;
1159 devres_add(device
->dev
, p
);
1166 EXPORT_SYMBOL(dmaenginem_async_device_register
);
1168 struct dmaengine_unmap_pool
{
1169 struct kmem_cache
*cache
;
1175 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1176 static struct dmaengine_unmap_pool unmap_pool
[] = {
1178 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1185 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1187 int order
= get_count_order(nr
);
1191 return &unmap_pool
[0];
1192 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1194 return &unmap_pool
[1];
1196 return &unmap_pool
[2];
1198 return &unmap_pool
[3];
1206 static void dmaengine_unmap(struct kref
*kref
)
1208 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1209 struct device
*dev
= unmap
->dev
;
1212 cnt
= unmap
->to_cnt
;
1213 for (i
= 0; i
< cnt
; i
++)
1214 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1216 cnt
+= unmap
->from_cnt
;
1217 for (; i
< cnt
; i
++)
1218 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1220 cnt
+= unmap
->bidi_cnt
;
1221 for (; i
< cnt
; i
++) {
1222 if (unmap
->addr
[i
] == 0)
1224 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1227 cnt
= unmap
->map_cnt
;
1228 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1231 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1234 kref_put(&unmap
->kref
, dmaengine_unmap
);
1236 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1238 static void dmaengine_destroy_unmap_pool(void)
1242 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1243 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1245 mempool_destroy(p
->pool
);
1247 kmem_cache_destroy(p
->cache
);
1252 static int __init
dmaengine_init_unmap_pool(void)
1256 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1257 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1260 size
= sizeof(struct dmaengine_unmap_data
) +
1261 sizeof(dma_addr_t
) * p
->size
;
1263 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1264 SLAB_HWCACHE_ALIGN
, NULL
);
1267 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1272 if (i
== ARRAY_SIZE(unmap_pool
))
1275 dmaengine_destroy_unmap_pool();
1279 struct dmaengine_unmap_data
*
1280 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1282 struct dmaengine_unmap_data
*unmap
;
1284 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1288 memset(unmap
, 0, sizeof(*unmap
));
1289 kref_init(&unmap
->kref
);
1291 unmap
->map_cnt
= nr
;
1295 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1297 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1298 struct dma_chan
*chan
)
1301 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1302 spin_lock_init(&tx
->lock
);
1305 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1307 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1308 * @tx: in-flight transaction to wait on
1311 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1313 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1316 return DMA_COMPLETE
;
1318 while (tx
->cookie
== -EBUSY
) {
1319 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1320 dev_err(tx
->chan
->device
->dev
,
1321 "%s timeout waiting for descriptor submission\n",
1327 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1329 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1331 /* dma_run_dependencies - helper routine for dma drivers to process
1332 * (start) dependent operations on their target channel
1333 * @tx: transaction with dependencies
1335 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1337 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1338 struct dma_async_tx_descriptor
*dep_next
;
1339 struct dma_chan
*chan
;
1344 /* we'll submit tx->next now, so clear the link */
1348 /* keep submitting up until a channel switch is detected
1349 * in that case we will be called again as a result of
1350 * processing the interrupt from async_tx_channel_switch
1352 for (; dep
; dep
= dep_next
) {
1354 txd_clear_parent(dep
);
1355 dep_next
= txd_next(dep
);
1356 if (dep_next
&& dep_next
->chan
== chan
)
1357 txd_clear_next(dep
); /* ->next will be submitted */
1359 dep_next
= NULL
; /* submit current dep and terminate */
1362 dep
->tx_submit(dep
);
1365 chan
->device
->device_issue_pending(chan
);
1367 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1369 static int __init
dma_bus_init(void)
1371 int err
= dmaengine_init_unmap_pool();
1375 return class_register(&dma_devclass
);
1377 arch_initcall(dma_bus_init
);