1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 static DEFINE_MUTEX(dma_list_mutex
);
57 static DEFINE_IDA(dma_ida
);
58 static LIST_HEAD(dma_device_list
);
59 static long dmaengine_ref_count
;
61 /* --- sysfs implementation --- */
63 #define DMA_SLAVE_NAME "slave"
66 * dev_to_dma_chan - convert a device pointer to its sysfs container object
69 * Must be called under dma_list_mutex
71 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
73 struct dma_chan_dev
*chan_dev
;
75 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
76 return chan_dev
->chan
;
79 static ssize_t
memcpy_count_show(struct device
*dev
,
80 struct device_attribute
*attr
, char *buf
)
82 struct dma_chan
*chan
;
83 unsigned long count
= 0;
87 mutex_lock(&dma_list_mutex
);
88 chan
= dev_to_dma_chan(dev
);
90 for_each_possible_cpu(i
)
91 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
92 err
= sprintf(buf
, "%lu\n", count
);
95 mutex_unlock(&dma_list_mutex
);
99 static DEVICE_ATTR_RO(memcpy_count
);
101 static ssize_t
bytes_transferred_show(struct device
*dev
,
102 struct device_attribute
*attr
, char *buf
)
104 struct dma_chan
*chan
;
105 unsigned long count
= 0;
109 mutex_lock(&dma_list_mutex
);
110 chan
= dev_to_dma_chan(dev
);
112 for_each_possible_cpu(i
)
113 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
114 err
= sprintf(buf
, "%lu\n", count
);
117 mutex_unlock(&dma_list_mutex
);
121 static DEVICE_ATTR_RO(bytes_transferred
);
123 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
126 struct dma_chan
*chan
;
129 mutex_lock(&dma_list_mutex
);
130 chan
= dev_to_dma_chan(dev
);
132 err
= sprintf(buf
, "%d\n", chan
->client_count
);
135 mutex_unlock(&dma_list_mutex
);
139 static DEVICE_ATTR_RO(in_use
);
141 static struct attribute
*dma_dev_attrs
[] = {
142 &dev_attr_memcpy_count
.attr
,
143 &dev_attr_bytes_transferred
.attr
,
144 &dev_attr_in_use
.attr
,
147 ATTRIBUTE_GROUPS(dma_dev
);
149 static void chan_dev_release(struct device
*dev
)
151 struct dma_chan_dev
*chan_dev
;
153 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
154 if (atomic_dec_and_test(chan_dev
->idr_ref
)) {
155 ida_free(&dma_ida
, chan_dev
->dev_id
);
156 kfree(chan_dev
->idr_ref
);
161 static struct class dma_devclass
= {
163 .dev_groups
= dma_dev_groups
,
164 .dev_release
= chan_dev_release
,
167 /* --- client and device registration --- */
170 * dma_cap_mask_all - enable iteration over all operation types
172 static dma_cap_mask_t dma_cap_mask_all
;
175 * dma_chan_tbl_ent - tracks channel allocations per core/operation
176 * @chan - associated channel for this entry
178 struct dma_chan_tbl_ent
{
179 struct dma_chan
*chan
;
183 * channel_table - percpu lookup table for memory-to-memory offload providers
185 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
187 static int __init
dma_channel_table_init(void)
189 enum dma_transaction_type cap
;
192 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
194 /* 'interrupt', 'private', and 'slave' are channel capabilities,
195 * but are not associated with an operation so they do not need
196 * an entry in the channel_table
198 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
199 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
200 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
202 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
203 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
204 if (!channel_table
[cap
]) {
211 pr_err("dmaengine dma_channel_table_init failure: %d\n", err
);
212 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
213 free_percpu(channel_table
[cap
]);
218 arch_initcall(dma_channel_table_init
);
221 * dma_chan_is_local - returns true if the channel is in the same numa-node as
224 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
226 int node
= dev_to_node(chan
->device
->dev
);
227 return node
== NUMA_NO_NODE
||
228 cpumask_test_cpu(cpu
, cpumask_of_node(node
));
232 * min_chan - returns the channel with min count and in the same numa-node as
234 * @cap: capability to match
235 * @cpu: cpu index which the channel should be close to
237 * If some channels are close to the given cpu, the one with the lowest
238 * reference count is returned. Otherwise, cpu is ignored and only the
239 * reference count is taken into account.
240 * Must be called under dma_list_mutex.
242 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
244 struct dma_device
*device
;
245 struct dma_chan
*chan
;
246 struct dma_chan
*min
= NULL
;
247 struct dma_chan
*localmin
= NULL
;
249 list_for_each_entry(device
, &dma_device_list
, global_node
) {
250 if (!dma_has_cap(cap
, device
->cap_mask
) ||
251 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
253 list_for_each_entry(chan
, &device
->channels
, device_node
) {
254 if (!chan
->client_count
)
256 if (!min
|| chan
->table_count
< min
->table_count
)
259 if (dma_chan_is_local(chan
, cpu
))
261 chan
->table_count
< localmin
->table_count
)
266 chan
= localmin
? localmin
: min
;
275 * dma_channel_rebalance - redistribute the available channels
277 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
278 * operation type) in the SMP case, and operation isolation (avoid
279 * multi-tasking channels) in the non-SMP case. Must be called under
282 static void dma_channel_rebalance(void)
284 struct dma_chan
*chan
;
285 struct dma_device
*device
;
289 /* undo the last distribution */
290 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
291 for_each_possible_cpu(cpu
)
292 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
294 list_for_each_entry(device
, &dma_device_list
, global_node
) {
295 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
297 list_for_each_entry(chan
, &device
->channels
, device_node
)
298 chan
->table_count
= 0;
301 /* don't populate the channel_table if no clients are available */
302 if (!dmaengine_ref_count
)
305 /* redistribute available channels */
306 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
307 for_each_online_cpu(cpu
) {
308 chan
= min_chan(cap
, cpu
);
309 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
313 static int dma_device_satisfies_mask(struct dma_device
*device
,
314 const dma_cap_mask_t
*want
)
318 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
320 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
323 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
325 return chan
->device
->owner
;
329 * balance_ref_count - catch up the channel reference count
330 * @chan - channel to balance ->client_count versus dmaengine_ref_count
332 * balance_ref_count must be called under dma_list_mutex
334 static void balance_ref_count(struct dma_chan
*chan
)
336 struct module
*owner
= dma_chan_to_owner(chan
);
338 while (chan
->client_count
< dmaengine_ref_count
) {
340 chan
->client_count
++;
344 static void dma_device_release(struct kref
*ref
)
346 struct dma_device
*device
= container_of(ref
, struct dma_device
, ref
);
348 list_del_rcu(&device
->global_node
);
349 dma_channel_rebalance();
351 if (device
->device_release
)
352 device
->device_release(device
);
355 static void dma_device_put(struct dma_device
*device
)
357 lockdep_assert_held(&dma_list_mutex
);
358 kref_put(&device
->ref
, dma_device_release
);
362 * dma_chan_get - try to grab a dma channel's parent driver module
363 * @chan - channel to grab
365 * Must be called under dma_list_mutex
367 static int dma_chan_get(struct dma_chan
*chan
)
369 struct module
*owner
= dma_chan_to_owner(chan
);
372 /* The channel is already in use, update client count */
373 if (chan
->client_count
) {
378 if (!try_module_get(owner
))
381 ret
= kref_get_unless_zero(&chan
->device
->ref
);
387 /* allocate upon first client reference */
388 if (chan
->device
->device_alloc_chan_resources
) {
389 ret
= chan
->device
->device_alloc_chan_resources(chan
);
394 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
395 balance_ref_count(chan
);
398 chan
->client_count
++;
402 dma_device_put(chan
->device
);
409 * dma_chan_put - drop a reference to a dma channel's parent driver module
410 * @chan - channel to release
412 * Must be called under dma_list_mutex
414 static void dma_chan_put(struct dma_chan
*chan
)
416 /* This channel is not in use, bail out */
417 if (!chan
->client_count
)
420 chan
->client_count
--;
422 /* This channel is not in use anymore, free it */
423 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
) {
424 /* Make sure all operations have completed */
425 dmaengine_synchronize(chan
);
426 chan
->device
->device_free_chan_resources(chan
);
429 /* If the channel is used via a DMA request router, free the mapping */
430 if (chan
->router
&& chan
->router
->route_free
) {
431 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
433 chan
->route_data
= NULL
;
436 dma_device_put(chan
->device
);
437 module_put(dma_chan_to_owner(chan
));
440 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
442 enum dma_status status
;
443 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
445 dma_async_issue_pending(chan
);
447 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
448 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
449 dev_err(chan
->device
->dev
, "%s: timeout!\n", __func__
);
452 if (status
!= DMA_IN_PROGRESS
)
459 EXPORT_SYMBOL(dma_sync_wait
);
462 * dma_find_channel - find a channel to carry out the operation
463 * @tx_type: transaction type
465 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
467 return this_cpu_read(channel_table
[tx_type
]->chan
);
469 EXPORT_SYMBOL(dma_find_channel
);
472 * dma_issue_pending_all - flush all pending operations across all channels
474 void dma_issue_pending_all(void)
476 struct dma_device
*device
;
477 struct dma_chan
*chan
;
480 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
481 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
483 list_for_each_entry(chan
, &device
->channels
, device_node
)
484 if (chan
->client_count
)
485 device
->device_issue_pending(chan
);
489 EXPORT_SYMBOL(dma_issue_pending_all
);
491 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
493 struct dma_device
*device
;
498 device
= chan
->device
;
500 /* check if the channel supports slave transactions */
501 if (!(test_bit(DMA_SLAVE
, device
->cap_mask
.bits
) ||
502 test_bit(DMA_CYCLIC
, device
->cap_mask
.bits
)))
506 * Check whether it reports it uses the generic slave
507 * capabilities, if not, that means it doesn't support any
508 * kind of slave capabilities reporting.
510 if (!device
->directions
)
513 caps
->src_addr_widths
= device
->src_addr_widths
;
514 caps
->dst_addr_widths
= device
->dst_addr_widths
;
515 caps
->directions
= device
->directions
;
516 caps
->max_burst
= device
->max_burst
;
517 caps
->residue_granularity
= device
->residue_granularity
;
518 caps
->descriptor_reuse
= device
->descriptor_reuse
;
519 caps
->cmd_pause
= !!device
->device_pause
;
520 caps
->cmd_resume
= !!device
->device_resume
;
521 caps
->cmd_terminate
= !!device
->device_terminate_all
;
525 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
527 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
528 struct dma_device
*dev
,
529 dma_filter_fn fn
, void *fn_param
)
531 struct dma_chan
*chan
;
533 if (mask
&& !dma_device_satisfies_mask(dev
, mask
)) {
534 dev_dbg(dev
->dev
, "%s: wrong capabilities\n", __func__
);
537 /* devices with multiple channels need special handling as we need to
538 * ensure that all channels are either private or public.
540 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
541 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
542 /* some channels are already publicly allocated */
543 if (chan
->client_count
)
547 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
548 if (chan
->client_count
) {
549 dev_dbg(dev
->dev
, "%s: %s busy\n",
550 __func__
, dma_chan_name(chan
));
553 if (fn
&& !fn(chan
, fn_param
)) {
554 dev_dbg(dev
->dev
, "%s: %s filter said false\n",
555 __func__
, dma_chan_name(chan
));
564 static struct dma_chan
*find_candidate(struct dma_device
*device
,
565 const dma_cap_mask_t
*mask
,
566 dma_filter_fn fn
, void *fn_param
)
568 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
572 /* Found a suitable channel, try to grab, prep, and return it.
573 * We first set DMA_PRIVATE to disable balance_ref_count as this
574 * channel will not be published in the general-purpose
577 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
578 device
->privatecnt
++;
579 err
= dma_chan_get(chan
);
582 if (err
== -ENODEV
) {
583 dev_dbg(device
->dev
, "%s: %s module removed\n",
584 __func__
, dma_chan_name(chan
));
585 list_del_rcu(&device
->global_node
);
588 "%s: failed to get %s: (%d)\n",
589 __func__
, dma_chan_name(chan
), err
);
591 if (--device
->privatecnt
== 0)
592 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
598 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
602 * dma_get_slave_channel - try to get specific channel exclusively
603 * @chan: target channel
605 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
609 /* lock against __dma_request_channel */
610 mutex_lock(&dma_list_mutex
);
612 if (chan
->client_count
== 0) {
613 struct dma_device
*device
= chan
->device
;
615 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
616 device
->privatecnt
++;
617 err
= dma_chan_get(chan
);
619 dev_dbg(chan
->device
->dev
,
620 "%s: failed to get %s: (%d)\n",
621 __func__
, dma_chan_name(chan
), err
);
623 if (--device
->privatecnt
== 0)
624 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
629 mutex_unlock(&dma_list_mutex
);
634 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
636 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
639 struct dma_chan
*chan
;
642 dma_cap_set(DMA_SLAVE
, mask
);
644 /* lock against __dma_request_channel */
645 mutex_lock(&dma_list_mutex
);
647 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
649 mutex_unlock(&dma_list_mutex
);
651 return IS_ERR(chan
) ? NULL
: chan
;
653 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
656 * __dma_request_channel - try to allocate an exclusive channel
657 * @mask: capabilities that the channel must satisfy
658 * @fn: optional callback to disposition available channels
659 * @fn_param: opaque parameter to pass to dma_filter_fn
660 * @np: device node to look for DMA channels
662 * Returns pointer to appropriate DMA channel on success or NULL.
664 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
665 dma_filter_fn fn
, void *fn_param
,
666 struct device_node
*np
)
668 struct dma_device
*device
, *_d
;
669 struct dma_chan
*chan
= NULL
;
672 mutex_lock(&dma_list_mutex
);
673 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
674 /* Finds a DMA controller with matching device node */
675 if (np
&& device
->dev
->of_node
&& np
!= device
->dev
->of_node
)
678 chan
= find_candidate(device
, mask
, fn
, fn_param
);
684 mutex_unlock(&dma_list_mutex
);
686 pr_debug("%s: %s (%s)\n",
688 chan
? "success" : "fail",
689 chan
? dma_chan_name(chan
) : NULL
);
693 EXPORT_SYMBOL_GPL(__dma_request_channel
);
695 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
701 if (!device
->filter
.mapcnt
)
704 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
705 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
707 if (!strcmp(map
->devname
, dev_name(dev
)) &&
708 !strcmp(map
->slave
, name
))
716 * dma_request_chan - try to allocate an exclusive slave channel
717 * @dev: pointer to client device structure
718 * @name: slave channel name
720 * Returns pointer to appropriate DMA channel on success or an error pointer.
722 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
724 struct dma_device
*d
, *_d
;
725 struct dma_chan
*chan
= NULL
;
727 /* If device-tree is present get slave info from here */
729 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
731 /* If device was enumerated by ACPI get slave info from here */
732 if (has_acpi_companion(dev
) && !chan
)
733 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
735 if (PTR_ERR(chan
) == -EPROBE_DEFER
)
738 if (!IS_ERR_OR_NULL(chan
))
741 /* Try to find the channel via the DMA filter map(s) */
742 mutex_lock(&dma_list_mutex
);
743 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
745 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
751 dma_cap_set(DMA_SLAVE
, mask
);
753 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
757 mutex_unlock(&dma_list_mutex
);
759 if (IS_ERR_OR_NULL(chan
))
760 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
763 chan
->name
= kasprintf(GFP_KERNEL
, "dma:%s", name
);
768 if (sysfs_create_link(&chan
->dev
->device
.kobj
, &dev
->kobj
,
770 dev_warn(dev
, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME
);
771 if (sysfs_create_link(&dev
->kobj
, &chan
->dev
->device
.kobj
, chan
->name
))
772 dev_warn(dev
, "Cannot create DMA %s symlink\n", chan
->name
);
776 EXPORT_SYMBOL_GPL(dma_request_chan
);
779 * dma_request_slave_channel - try to allocate an exclusive slave channel
780 * @dev: pointer to client device structure
781 * @name: slave channel name
783 * Returns pointer to appropriate DMA channel on success or NULL.
785 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
788 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
794 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
797 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
798 * @mask: capabilities that the channel must satisfy
800 * Returns pointer to appropriate DMA channel on success or an error pointer.
802 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
804 struct dma_chan
*chan
;
807 return ERR_PTR(-ENODEV
);
809 chan
= __dma_request_channel(mask
, NULL
, NULL
, NULL
);
811 mutex_lock(&dma_list_mutex
);
812 if (list_empty(&dma_device_list
))
813 chan
= ERR_PTR(-EPROBE_DEFER
);
815 chan
= ERR_PTR(-ENODEV
);
816 mutex_unlock(&dma_list_mutex
);
821 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
823 void dma_release_channel(struct dma_chan
*chan
)
825 mutex_lock(&dma_list_mutex
);
826 WARN_ONCE(chan
->client_count
!= 1,
827 "chan reference count %d != 1\n", chan
->client_count
);
829 /* drop PRIVATE cap enabled by __dma_request_channel() */
830 if (--chan
->device
->privatecnt
== 0)
831 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
834 sysfs_remove_link(&chan
->dev
->device
.kobj
, DMA_SLAVE_NAME
);
835 sysfs_remove_link(&chan
->slave
->kobj
, chan
->name
);
840 mutex_unlock(&dma_list_mutex
);
842 EXPORT_SYMBOL_GPL(dma_release_channel
);
845 * dmaengine_get - register interest in dma_channels
847 void dmaengine_get(void)
849 struct dma_device
*device
, *_d
;
850 struct dma_chan
*chan
;
853 mutex_lock(&dma_list_mutex
);
854 dmaengine_ref_count
++;
856 /* try to grab channels */
857 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
858 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
860 list_for_each_entry(chan
, &device
->channels
, device_node
) {
861 err
= dma_chan_get(chan
);
862 if (err
== -ENODEV
) {
863 /* module removed before we could use it */
864 list_del_rcu(&device
->global_node
);
867 dev_dbg(chan
->device
->dev
,
868 "%s: failed to get %s: (%d)\n",
869 __func__
, dma_chan_name(chan
), err
);
873 /* if this is the first reference and there were channels
874 * waiting we need to rebalance to get those channels
875 * incorporated into the channel table
877 if (dmaengine_ref_count
== 1)
878 dma_channel_rebalance();
879 mutex_unlock(&dma_list_mutex
);
881 EXPORT_SYMBOL(dmaengine_get
);
884 * dmaengine_put - let dma drivers be removed when ref_count == 0
886 void dmaengine_put(void)
888 struct dma_device
*device
, *_d
;
889 struct dma_chan
*chan
;
891 mutex_lock(&dma_list_mutex
);
892 dmaengine_ref_count
--;
893 BUG_ON(dmaengine_ref_count
< 0);
894 /* drop channel references */
895 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
896 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
898 list_for_each_entry(chan
, &device
->channels
, device_node
)
901 mutex_unlock(&dma_list_mutex
);
903 EXPORT_SYMBOL(dmaengine_put
);
905 static bool device_has_all_tx_types(struct dma_device
*device
)
907 /* A device that satisfies this test has channels that will never cause
908 * an async_tx channel switch event as all possible operation types can
911 #ifdef CONFIG_ASYNC_TX_DMA
912 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
916 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
917 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
921 #if IS_ENABLED(CONFIG_ASYNC_XOR)
922 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
925 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
926 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
931 #if IS_ENABLED(CONFIG_ASYNC_PQ)
932 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
935 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
936 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
944 static int get_dma_id(struct dma_device
*device
)
946 int rc
= ida_alloc(&dma_ida
, GFP_KERNEL
);
954 static int __dma_async_device_channel_register(struct dma_device
*device
,
955 struct dma_chan
*chan
,
959 int chancnt
= device
->chancnt
;
961 struct dma_chan
*tchan
;
963 tchan
= list_first_entry_or_null(&device
->channels
,
964 struct dma_chan
, device_node
);
969 idr_ref
= tchan
->dev
->idr_ref
;
971 idr_ref
= kmalloc(sizeof(*idr_ref
), GFP_KERNEL
);
974 atomic_set(idr_ref
, 0);
977 chan
->local
= alloc_percpu(typeof(*chan
->local
));
980 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
982 free_percpu(chan
->local
);
988 * When the chan_id is a negative value, we are dynamically adding
989 * the channel. Otherwise we are static enumerating.
991 chan
->chan_id
= chan_id
< 0 ? chancnt
: chan_id
;
992 chan
->dev
->device
.class = &dma_devclass
;
993 chan
->dev
->device
.parent
= device
->dev
;
994 chan
->dev
->chan
= chan
;
995 chan
->dev
->idr_ref
= idr_ref
;
996 chan
->dev
->dev_id
= device
->dev_id
;
998 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
999 device
->dev_id
, chan
->chan_id
);
1001 rc
= device_register(&chan
->dev
->device
);
1004 chan
->client_count
= 0;
1005 device
->chancnt
= chan
->chan_id
+ 1;
1010 free_percpu(chan
->local
);
1012 if (atomic_dec_return(idr_ref
) == 0)
1017 int dma_async_device_channel_register(struct dma_device
*device
,
1018 struct dma_chan
*chan
)
1022 rc
= __dma_async_device_channel_register(device
, chan
, -1);
1026 dma_channel_rebalance();
1029 EXPORT_SYMBOL_GPL(dma_async_device_channel_register
);
1031 static void __dma_async_device_channel_unregister(struct dma_device
*device
,
1032 struct dma_chan
*chan
)
1034 WARN_ONCE(!device
->device_release
&& chan
->client_count
,
1035 "%s called while %d clients hold a reference\n",
1036 __func__
, chan
->client_count
);
1037 mutex_lock(&dma_list_mutex
);
1038 list_del(&chan
->device_node
);
1040 chan
->dev
->chan
= NULL
;
1041 mutex_unlock(&dma_list_mutex
);
1042 device_unregister(&chan
->dev
->device
);
1043 free_percpu(chan
->local
);
1046 void dma_async_device_channel_unregister(struct dma_device
*device
,
1047 struct dma_chan
*chan
)
1049 __dma_async_device_channel_unregister(device
, chan
);
1050 dma_channel_rebalance();
1052 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister
);
1055 * dma_async_device_register - registers DMA devices found
1056 * @device: &dma_device
1058 * After calling this routine the structure should not be freed except in the
1059 * device_release() callback which will be called after
1060 * dma_async_device_unregister() is called and no further references are taken.
1062 int dma_async_device_register(struct dma_device
*device
)
1065 struct dma_chan
* chan
;
1070 /* validate device routines */
1072 pr_err("DMAdevice must have dev\n");
1076 device
->owner
= device
->dev
->driver
->owner
;
1078 if (dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) && !device
->device_prep_dma_memcpy
) {
1079 dev_err(device
->dev
,
1080 "Device claims capability %s, but op is not defined\n",
1085 if (dma_has_cap(DMA_XOR
, device
->cap_mask
) && !device
->device_prep_dma_xor
) {
1086 dev_err(device
->dev
,
1087 "Device claims capability %s, but op is not defined\n",
1092 if (dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) && !device
->device_prep_dma_xor_val
) {
1093 dev_err(device
->dev
,
1094 "Device claims capability %s, but op is not defined\n",
1099 if (dma_has_cap(DMA_PQ
, device
->cap_mask
) && !device
->device_prep_dma_pq
) {
1100 dev_err(device
->dev
,
1101 "Device claims capability %s, but op is not defined\n",
1106 if (dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) && !device
->device_prep_dma_pq_val
) {
1107 dev_err(device
->dev
,
1108 "Device claims capability %s, but op is not defined\n",
1113 if (dma_has_cap(DMA_MEMSET
, device
->cap_mask
) && !device
->device_prep_dma_memset
) {
1114 dev_err(device
->dev
,
1115 "Device claims capability %s, but op is not defined\n",
1120 if (dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) && !device
->device_prep_dma_interrupt
) {
1121 dev_err(device
->dev
,
1122 "Device claims capability %s, but op is not defined\n",
1127 if (dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) && !device
->device_prep_dma_cyclic
) {
1128 dev_err(device
->dev
,
1129 "Device claims capability %s, but op is not defined\n",
1134 if (dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) && !device
->device_prep_interleaved_dma
) {
1135 dev_err(device
->dev
,
1136 "Device claims capability %s, but op is not defined\n",
1142 if (!device
->device_tx_status
) {
1143 dev_err(device
->dev
, "Device tx_status is not defined\n");
1148 if (!device
->device_issue_pending
) {
1149 dev_err(device
->dev
, "Device issue_pending is not defined\n");
1153 if (!device
->device_release
)
1154 dev_dbg(device
->dev
,
1155 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1157 kref_init(&device
->ref
);
1159 /* note: this only matters in the
1160 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1162 if (device_has_all_tx_types(device
))
1163 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
1165 rc
= get_dma_id(device
);
1169 /* represent channels in sysfs. Probably want devs too */
1170 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1171 rc
= __dma_async_device_channel_register(device
, chan
, i
++);
1176 mutex_lock(&dma_list_mutex
);
1177 /* take references on public channels */
1178 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1179 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1180 /* if clients are already waiting for channels we need
1181 * to take references on their behalf
1183 if (dma_chan_get(chan
) == -ENODEV
) {
1184 /* note we can only get here for the first
1185 * channel as the remaining channels are
1186 * guaranteed to get a reference
1189 mutex_unlock(&dma_list_mutex
);
1193 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1194 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1195 device
->privatecnt
++; /* Always private */
1196 dma_channel_rebalance();
1197 mutex_unlock(&dma_list_mutex
);
1202 /* if we never registered a channel just release the idr */
1203 if (!device
->chancnt
) {
1204 ida_free(&dma_ida
, device
->dev_id
);
1208 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1209 if (chan
->local
== NULL
)
1211 mutex_lock(&dma_list_mutex
);
1212 chan
->dev
->chan
= NULL
;
1213 mutex_unlock(&dma_list_mutex
);
1214 device_unregister(&chan
->dev
->device
);
1215 free_percpu(chan
->local
);
1219 EXPORT_SYMBOL(dma_async_device_register
);
1222 * dma_async_device_unregister - unregister a DMA device
1223 * @device: &dma_device
1225 * This routine is called by dma driver exit routines, dmaengine holds module
1226 * references to prevent it being called while channels are in use.
1228 void dma_async_device_unregister(struct dma_device
*device
)
1230 struct dma_chan
*chan
, *n
;
1232 list_for_each_entry_safe(chan
, n
, &device
->channels
, device_node
)
1233 __dma_async_device_channel_unregister(device
, chan
);
1235 mutex_lock(&dma_list_mutex
);
1237 * setting DMA_PRIVATE ensures the device being torn down will not
1238 * be used in the channel_table
1240 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
1241 dma_channel_rebalance();
1242 dma_device_put(device
);
1243 mutex_unlock(&dma_list_mutex
);
1245 EXPORT_SYMBOL(dma_async_device_unregister
);
1247 static void dmam_device_release(struct device
*dev
, void *res
)
1249 struct dma_device
*device
;
1251 device
= *(struct dma_device
**)res
;
1252 dma_async_device_unregister(device
);
1256 * dmaenginem_async_device_register - registers DMA devices found
1257 * @device: &dma_device
1259 * The operation is managed and will be undone on driver detach.
1261 int dmaenginem_async_device_register(struct dma_device
*device
)
1266 p
= devres_alloc(dmam_device_release
, sizeof(void *), GFP_KERNEL
);
1270 ret
= dma_async_device_register(device
);
1272 *(struct dma_device
**)p
= device
;
1273 devres_add(device
->dev
, p
);
1280 EXPORT_SYMBOL(dmaenginem_async_device_register
);
1282 struct dmaengine_unmap_pool
{
1283 struct kmem_cache
*cache
;
1289 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1290 static struct dmaengine_unmap_pool unmap_pool
[] = {
1292 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1299 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1301 int order
= get_count_order(nr
);
1305 return &unmap_pool
[0];
1306 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1308 return &unmap_pool
[1];
1310 return &unmap_pool
[2];
1312 return &unmap_pool
[3];
1320 static void dmaengine_unmap(struct kref
*kref
)
1322 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1323 struct device
*dev
= unmap
->dev
;
1326 cnt
= unmap
->to_cnt
;
1327 for (i
= 0; i
< cnt
; i
++)
1328 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1330 cnt
+= unmap
->from_cnt
;
1331 for (; i
< cnt
; i
++)
1332 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1334 cnt
+= unmap
->bidi_cnt
;
1335 for (; i
< cnt
; i
++) {
1336 if (unmap
->addr
[i
] == 0)
1338 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1341 cnt
= unmap
->map_cnt
;
1342 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1345 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1348 kref_put(&unmap
->kref
, dmaengine_unmap
);
1350 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1352 static void dmaengine_destroy_unmap_pool(void)
1356 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1357 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1359 mempool_destroy(p
->pool
);
1361 kmem_cache_destroy(p
->cache
);
1366 static int __init
dmaengine_init_unmap_pool(void)
1370 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1371 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1374 size
= sizeof(struct dmaengine_unmap_data
) +
1375 sizeof(dma_addr_t
) * p
->size
;
1377 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1378 SLAB_HWCACHE_ALIGN
, NULL
);
1381 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1386 if (i
== ARRAY_SIZE(unmap_pool
))
1389 dmaengine_destroy_unmap_pool();
1393 struct dmaengine_unmap_data
*
1394 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1396 struct dmaengine_unmap_data
*unmap
;
1398 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1402 memset(unmap
, 0, sizeof(*unmap
));
1403 kref_init(&unmap
->kref
);
1405 unmap
->map_cnt
= nr
;
1409 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1411 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1412 struct dma_chan
*chan
)
1415 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1416 spin_lock_init(&tx
->lock
);
1419 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1421 static inline int desc_check_and_set_metadata_mode(
1422 struct dma_async_tx_descriptor
*desc
, enum dma_desc_metadata_mode mode
)
1424 /* Make sure that the metadata mode is not mixed */
1425 if (!desc
->desc_metadata_mode
) {
1426 if (dmaengine_is_metadata_mode_supported(desc
->chan
, mode
))
1427 desc
->desc_metadata_mode
= mode
;
1430 } else if (desc
->desc_metadata_mode
!= mode
) {
1437 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor
*desc
,
1438 void *data
, size_t len
)
1445 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_CLIENT
);
1449 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->attach
)
1452 return desc
->metadata_ops
->attach(desc
, data
, len
);
1454 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata
);
1456 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
1457 size_t *payload_len
, size_t *max_len
)
1462 return ERR_PTR(-EINVAL
);
1464 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1466 return ERR_PTR(ret
);
1468 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->get_ptr
)
1469 return ERR_PTR(-ENOTSUPP
);
1471 return desc
->metadata_ops
->get_ptr(desc
, payload_len
, max_len
);
1473 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr
);
1475 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
1483 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1487 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->set_len
)
1490 return desc
->metadata_ops
->set_len(desc
, payload_len
);
1492 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len
);
1494 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1495 * @tx: in-flight transaction to wait on
1498 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1500 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1503 return DMA_COMPLETE
;
1505 while (tx
->cookie
== -EBUSY
) {
1506 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1507 dev_err(tx
->chan
->device
->dev
,
1508 "%s timeout waiting for descriptor submission\n",
1514 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1516 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1518 /* dma_run_dependencies - helper routine for dma drivers to process
1519 * (start) dependent operations on their target channel
1520 * @tx: transaction with dependencies
1522 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1524 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1525 struct dma_async_tx_descriptor
*dep_next
;
1526 struct dma_chan
*chan
;
1531 /* we'll submit tx->next now, so clear the link */
1535 /* keep submitting up until a channel switch is detected
1536 * in that case we will be called again as a result of
1537 * processing the interrupt from async_tx_channel_switch
1539 for (; dep
; dep
= dep_next
) {
1541 txd_clear_parent(dep
);
1542 dep_next
= txd_next(dep
);
1543 if (dep_next
&& dep_next
->chan
== chan
)
1544 txd_clear_next(dep
); /* ->next will be submitted */
1546 dep_next
= NULL
; /* submit current dep and terminate */
1549 dep
->tx_submit(dep
);
1552 chan
->device
->device_issue_pending(chan
);
1554 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1556 static int __init
dma_bus_init(void)
1558 int err
= dmaengine_init_unmap_pool();
1562 return class_register(&dma_devclass
);
1564 arch_initcall(dma_bus_init
);