1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
29 * See Documentation/driver-api/dmaengine for more details
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
56 #include "dmaengine.h"
58 static DEFINE_MUTEX(dma_list_mutex
);
59 static DEFINE_IDA(dma_ida
);
60 static LIST_HEAD(dma_device_list
);
61 static long dmaengine_ref_count
;
63 /* --- debugfs implementation --- */
64 #ifdef CONFIG_DEBUG_FS
65 #include <linux/debugfs.h>
67 static struct dentry
*rootdir
;
69 static void dmaengine_debug_register(struct dma_device
*dma_dev
)
71 dma_dev
->dbg_dev_root
= debugfs_create_dir(dev_name(dma_dev
->dev
),
73 if (IS_ERR(dma_dev
->dbg_dev_root
))
74 dma_dev
->dbg_dev_root
= NULL
;
77 static void dmaengine_debug_unregister(struct dma_device
*dma_dev
)
79 debugfs_remove_recursive(dma_dev
->dbg_dev_root
);
80 dma_dev
->dbg_dev_root
= NULL
;
83 static void dmaengine_dbg_summary_show(struct seq_file
*s
,
84 struct dma_device
*dma_dev
)
86 struct dma_chan
*chan
;
88 list_for_each_entry(chan
, &dma_dev
->channels
, device_node
) {
89 if (chan
->client_count
) {
90 seq_printf(s
, " %-13s| %s", dma_chan_name(chan
),
91 chan
->dbg_client_name
?: "in-use");
94 seq_printf(s
, " (via router: %s)\n",
95 dev_name(chan
->router
->dev
));
102 static int dmaengine_summary_show(struct seq_file
*s
, void *data
)
104 struct dma_device
*dma_dev
= NULL
;
106 mutex_lock(&dma_list_mutex
);
107 list_for_each_entry(dma_dev
, &dma_device_list
, global_node
) {
108 seq_printf(s
, "dma%d (%s): number of channels: %u\n",
109 dma_dev
->dev_id
, dev_name(dma_dev
->dev
),
112 if (dma_dev
->dbg_summary_show
)
113 dma_dev
->dbg_summary_show(s
, dma_dev
);
115 dmaengine_dbg_summary_show(s
, dma_dev
);
117 if (!list_is_last(&dma_dev
->global_node
, &dma_device_list
))
120 mutex_unlock(&dma_list_mutex
);
124 DEFINE_SHOW_ATTRIBUTE(dmaengine_summary
);
126 static void __init
dmaengine_debugfs_init(void)
128 rootdir
= debugfs_create_dir("dmaengine", NULL
);
130 /* /sys/kernel/debug/dmaengine/summary */
131 debugfs_create_file("summary", 0444, rootdir
, NULL
,
132 &dmaengine_summary_fops
);
135 static inline void dmaengine_debugfs_init(void) { }
136 static inline int dmaengine_debug_register(struct dma_device
*dma_dev
)
141 static inline void dmaengine_debug_unregister(struct dma_device
*dma_dev
) { }
142 #endif /* DEBUG_FS */
144 /* --- sysfs implementation --- */
146 #define DMA_SLAVE_NAME "slave"
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
152 * Must be called under dma_list_mutex.
154 static struct dma_chan
*dev_to_dma_chan(struct device
*dev
)
156 struct dma_chan_dev
*chan_dev
;
158 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
159 return chan_dev
->chan
;
162 static ssize_t
memcpy_count_show(struct device
*dev
,
163 struct device_attribute
*attr
, char *buf
)
165 struct dma_chan
*chan
;
166 unsigned long count
= 0;
170 mutex_lock(&dma_list_mutex
);
171 chan
= dev_to_dma_chan(dev
);
173 for_each_possible_cpu(i
)
174 count
+= per_cpu_ptr(chan
->local
, i
)->memcpy_count
;
175 err
= sprintf(buf
, "%lu\n", count
);
178 mutex_unlock(&dma_list_mutex
);
182 static DEVICE_ATTR_RO(memcpy_count
);
184 static ssize_t
bytes_transferred_show(struct device
*dev
,
185 struct device_attribute
*attr
, char *buf
)
187 struct dma_chan
*chan
;
188 unsigned long count
= 0;
192 mutex_lock(&dma_list_mutex
);
193 chan
= dev_to_dma_chan(dev
);
195 for_each_possible_cpu(i
)
196 count
+= per_cpu_ptr(chan
->local
, i
)->bytes_transferred
;
197 err
= sprintf(buf
, "%lu\n", count
);
200 mutex_unlock(&dma_list_mutex
);
204 static DEVICE_ATTR_RO(bytes_transferred
);
206 static ssize_t
in_use_show(struct device
*dev
, struct device_attribute
*attr
,
209 struct dma_chan
*chan
;
212 mutex_lock(&dma_list_mutex
);
213 chan
= dev_to_dma_chan(dev
);
215 err
= sprintf(buf
, "%d\n", chan
->client_count
);
218 mutex_unlock(&dma_list_mutex
);
222 static DEVICE_ATTR_RO(in_use
);
224 static struct attribute
*dma_dev_attrs
[] = {
225 &dev_attr_memcpy_count
.attr
,
226 &dev_attr_bytes_transferred
.attr
,
227 &dev_attr_in_use
.attr
,
230 ATTRIBUTE_GROUPS(dma_dev
);
232 static void chan_dev_release(struct device
*dev
)
234 struct dma_chan_dev
*chan_dev
;
236 chan_dev
= container_of(dev
, typeof(*chan_dev
), device
);
240 static struct class dma_devclass
= {
242 .dev_groups
= dma_dev_groups
,
243 .dev_release
= chan_dev_release
,
246 /* --- client and device registration --- */
248 /* enable iteration over all operation types */
249 static dma_cap_mask_t dma_cap_mask_all
;
252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan: associated channel for this entry
255 struct dma_chan_tbl_ent
{
256 struct dma_chan
*chan
;
259 /* percpu lookup table for memory-to-memory offload providers */
260 static struct dma_chan_tbl_ent __percpu
*channel_table
[DMA_TX_TYPE_END
];
262 static int __init
dma_channel_table_init(void)
264 enum dma_transaction_type cap
;
267 bitmap_fill(dma_cap_mask_all
.bits
, DMA_TX_TYPE_END
);
269 /* 'interrupt', 'private', and 'slave' are channel capabilities,
270 * but are not associated with an operation so they do not need
271 * an entry in the channel_table
273 clear_bit(DMA_INTERRUPT
, dma_cap_mask_all
.bits
);
274 clear_bit(DMA_PRIVATE
, dma_cap_mask_all
.bits
);
275 clear_bit(DMA_SLAVE
, dma_cap_mask_all
.bits
);
277 for_each_dma_cap_mask(cap
, dma_cap_mask_all
) {
278 channel_table
[cap
] = alloc_percpu(struct dma_chan_tbl_ent
);
279 if (!channel_table
[cap
]) {
286 pr_err("dmaengine dma_channel_table_init failure: %d\n", err
);
287 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
288 free_percpu(channel_table
[cap
]);
293 arch_initcall(dma_channel_table_init
);
296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297 * @chan: DMA channel to test
298 * @cpu: CPU index which the channel should be close to
300 * Returns true if the channel is in the same NUMA-node as the CPU.
302 static bool dma_chan_is_local(struct dma_chan
*chan
, int cpu
)
304 int node
= dev_to_node(chan
->device
->dev
);
305 return node
== NUMA_NO_NODE
||
306 cpumask_test_cpu(cpu
, cpumask_of_node(node
));
310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311 * @cap: capability to match
312 * @cpu: CPU index which the channel should be close to
314 * If some channels are close to the given CPU, the one with the lowest
315 * reference count is returned. Otherwise, CPU is ignored and only the
316 * reference count is taken into account.
318 * Must be called under dma_list_mutex.
320 static struct dma_chan
*min_chan(enum dma_transaction_type cap
, int cpu
)
322 struct dma_device
*device
;
323 struct dma_chan
*chan
;
324 struct dma_chan
*min
= NULL
;
325 struct dma_chan
*localmin
= NULL
;
327 list_for_each_entry(device
, &dma_device_list
, global_node
) {
328 if (!dma_has_cap(cap
, device
->cap_mask
) ||
329 dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
331 list_for_each_entry(chan
, &device
->channels
, device_node
) {
332 if (!chan
->client_count
)
334 if (!min
|| chan
->table_count
< min
->table_count
)
337 if (dma_chan_is_local(chan
, cpu
))
339 chan
->table_count
< localmin
->table_count
)
344 chan
= localmin
? localmin
: min
;
353 * dma_channel_rebalance - redistribute the available channels
355 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356 * operation type) in the SMP case, and operation isolation (avoid
357 * multi-tasking channels) in the non-SMP case.
359 * Must be called under dma_list_mutex.
361 static void dma_channel_rebalance(void)
363 struct dma_chan
*chan
;
364 struct dma_device
*device
;
368 /* undo the last distribution */
369 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
370 for_each_possible_cpu(cpu
)
371 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= NULL
;
373 list_for_each_entry(device
, &dma_device_list
, global_node
) {
374 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
376 list_for_each_entry(chan
, &device
->channels
, device_node
)
377 chan
->table_count
= 0;
380 /* don't populate the channel_table if no clients are available */
381 if (!dmaengine_ref_count
)
384 /* redistribute available channels */
385 for_each_dma_cap_mask(cap
, dma_cap_mask_all
)
386 for_each_online_cpu(cpu
) {
387 chan
= min_chan(cap
, cpu
);
388 per_cpu_ptr(channel_table
[cap
], cpu
)->chan
= chan
;
392 static int dma_device_satisfies_mask(struct dma_device
*device
,
393 const dma_cap_mask_t
*want
)
397 bitmap_and(has
.bits
, want
->bits
, device
->cap_mask
.bits
,
399 return bitmap_equal(want
->bits
, has
.bits
, DMA_TX_TYPE_END
);
402 static struct module
*dma_chan_to_owner(struct dma_chan
*chan
)
404 return chan
->device
->owner
;
408 * balance_ref_count - catch up the channel reference count
409 * @chan: channel to balance ->client_count versus dmaengine_ref_count
411 * Must be called under dma_list_mutex.
413 static void balance_ref_count(struct dma_chan
*chan
)
415 struct module
*owner
= dma_chan_to_owner(chan
);
417 while (chan
->client_count
< dmaengine_ref_count
) {
419 chan
->client_count
++;
423 static void dma_device_release(struct kref
*ref
)
425 struct dma_device
*device
= container_of(ref
, struct dma_device
, ref
);
427 list_del_rcu(&device
->global_node
);
428 dma_channel_rebalance();
430 if (device
->device_release
)
431 device
->device_release(device
);
434 static void dma_device_put(struct dma_device
*device
)
436 lockdep_assert_held(&dma_list_mutex
);
437 kref_put(&device
->ref
, dma_device_release
);
441 * dma_chan_get - try to grab a DMA channel's parent driver module
442 * @chan: channel to grab
444 * Must be called under dma_list_mutex.
446 static int dma_chan_get(struct dma_chan
*chan
)
448 struct module
*owner
= dma_chan_to_owner(chan
);
451 /* The channel is already in use, update client count */
452 if (chan
->client_count
) {
457 if (!try_module_get(owner
))
460 ret
= kref_get_unless_zero(&chan
->device
->ref
);
466 /* allocate upon first client reference */
467 if (chan
->device
->device_alloc_chan_resources
) {
468 ret
= chan
->device
->device_alloc_chan_resources(chan
);
473 if (!dma_has_cap(DMA_PRIVATE
, chan
->device
->cap_mask
))
474 balance_ref_count(chan
);
477 chan
->client_count
++;
481 dma_device_put(chan
->device
);
488 * dma_chan_put - drop a reference to a DMA channel's parent driver module
489 * @chan: channel to release
491 * Must be called under dma_list_mutex.
493 static void dma_chan_put(struct dma_chan
*chan
)
495 /* This channel is not in use, bail out */
496 if (!chan
->client_count
)
499 chan
->client_count
--;
501 /* This channel is not in use anymore, free it */
502 if (!chan
->client_count
&& chan
->device
->device_free_chan_resources
) {
503 /* Make sure all operations have completed */
504 dmaengine_synchronize(chan
);
505 chan
->device
->device_free_chan_resources(chan
);
508 /* If the channel is used via a DMA request router, free the mapping */
509 if (chan
->router
&& chan
->router
->route_free
) {
510 chan
->router
->route_free(chan
->router
->dev
, chan
->route_data
);
512 chan
->route_data
= NULL
;
515 dma_device_put(chan
->device
);
516 module_put(dma_chan_to_owner(chan
));
519 enum dma_status
dma_sync_wait(struct dma_chan
*chan
, dma_cookie_t cookie
)
521 enum dma_status status
;
522 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
524 dma_async_issue_pending(chan
);
526 status
= dma_async_is_tx_complete(chan
, cookie
, NULL
, NULL
);
527 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
528 dev_err(chan
->device
->dev
, "%s: timeout!\n", __func__
);
531 if (status
!= DMA_IN_PROGRESS
)
538 EXPORT_SYMBOL(dma_sync_wait
);
541 * dma_find_channel - find a channel to carry out the operation
542 * @tx_type: transaction type
544 struct dma_chan
*dma_find_channel(enum dma_transaction_type tx_type
)
546 return this_cpu_read(channel_table
[tx_type
]->chan
);
548 EXPORT_SYMBOL(dma_find_channel
);
551 * dma_issue_pending_all - flush all pending operations across all channels
553 void dma_issue_pending_all(void)
555 struct dma_device
*device
;
556 struct dma_chan
*chan
;
559 list_for_each_entry_rcu(device
, &dma_device_list
, global_node
) {
560 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
562 list_for_each_entry(chan
, &device
->channels
, device_node
)
563 if (chan
->client_count
)
564 device
->device_issue_pending(chan
);
568 EXPORT_SYMBOL(dma_issue_pending_all
);
570 int dma_get_slave_caps(struct dma_chan
*chan
, struct dma_slave_caps
*caps
)
572 struct dma_device
*device
;
577 device
= chan
->device
;
579 /* check if the channel supports slave transactions */
580 if (!(test_bit(DMA_SLAVE
, device
->cap_mask
.bits
) ||
581 test_bit(DMA_CYCLIC
, device
->cap_mask
.bits
)))
585 * Check whether it reports it uses the generic slave
586 * capabilities, if not, that means it doesn't support any
587 * kind of slave capabilities reporting.
589 if (!device
->directions
)
592 caps
->src_addr_widths
= device
->src_addr_widths
;
593 caps
->dst_addr_widths
= device
->dst_addr_widths
;
594 caps
->directions
= device
->directions
;
595 caps
->max_burst
= device
->max_burst
;
596 caps
->residue_granularity
= device
->residue_granularity
;
597 caps
->descriptor_reuse
= device
->descriptor_reuse
;
598 caps
->cmd_pause
= !!device
->device_pause
;
599 caps
->cmd_resume
= !!device
->device_resume
;
600 caps
->cmd_terminate
= !!device
->device_terminate_all
;
604 EXPORT_SYMBOL_GPL(dma_get_slave_caps
);
606 static struct dma_chan
*private_candidate(const dma_cap_mask_t
*mask
,
607 struct dma_device
*dev
,
608 dma_filter_fn fn
, void *fn_param
)
610 struct dma_chan
*chan
;
612 if (mask
&& !dma_device_satisfies_mask(dev
, mask
)) {
613 dev_dbg(dev
->dev
, "%s: wrong capabilities\n", __func__
);
616 /* devices with multiple channels need special handling as we need to
617 * ensure that all channels are either private or public.
619 if (dev
->chancnt
> 1 && !dma_has_cap(DMA_PRIVATE
, dev
->cap_mask
))
620 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
621 /* some channels are already publicly allocated */
622 if (chan
->client_count
)
626 list_for_each_entry(chan
, &dev
->channels
, device_node
) {
627 if (chan
->client_count
) {
628 dev_dbg(dev
->dev
, "%s: %s busy\n",
629 __func__
, dma_chan_name(chan
));
632 if (fn
&& !fn(chan
, fn_param
)) {
633 dev_dbg(dev
->dev
, "%s: %s filter said false\n",
634 __func__
, dma_chan_name(chan
));
643 static struct dma_chan
*find_candidate(struct dma_device
*device
,
644 const dma_cap_mask_t
*mask
,
645 dma_filter_fn fn
, void *fn_param
)
647 struct dma_chan
*chan
= private_candidate(mask
, device
, fn
, fn_param
);
651 /* Found a suitable channel, try to grab, prep, and return it.
652 * We first set DMA_PRIVATE to disable balance_ref_count as this
653 * channel will not be published in the general-purpose
656 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
657 device
->privatecnt
++;
658 err
= dma_chan_get(chan
);
661 if (err
== -ENODEV
) {
662 dev_dbg(device
->dev
, "%s: %s module removed\n",
663 __func__
, dma_chan_name(chan
));
664 list_del_rcu(&device
->global_node
);
667 "%s: failed to get %s: (%d)\n",
668 __func__
, dma_chan_name(chan
), err
);
670 if (--device
->privatecnt
== 0)
671 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
677 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
681 * dma_get_slave_channel - try to get specific channel exclusively
682 * @chan: target channel
684 struct dma_chan
*dma_get_slave_channel(struct dma_chan
*chan
)
688 /* lock against __dma_request_channel */
689 mutex_lock(&dma_list_mutex
);
691 if (chan
->client_count
== 0) {
692 struct dma_device
*device
= chan
->device
;
694 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
695 device
->privatecnt
++;
696 err
= dma_chan_get(chan
);
698 dev_dbg(chan
->device
->dev
,
699 "%s: failed to get %s: (%d)\n",
700 __func__
, dma_chan_name(chan
), err
);
702 if (--device
->privatecnt
== 0)
703 dma_cap_clear(DMA_PRIVATE
, device
->cap_mask
);
708 mutex_unlock(&dma_list_mutex
);
713 EXPORT_SYMBOL_GPL(dma_get_slave_channel
);
715 struct dma_chan
*dma_get_any_slave_channel(struct dma_device
*device
)
718 struct dma_chan
*chan
;
721 dma_cap_set(DMA_SLAVE
, mask
);
723 /* lock against __dma_request_channel */
724 mutex_lock(&dma_list_mutex
);
726 chan
= find_candidate(device
, &mask
, NULL
, NULL
);
728 mutex_unlock(&dma_list_mutex
);
730 return IS_ERR(chan
) ? NULL
: chan
;
732 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel
);
735 * __dma_request_channel - try to allocate an exclusive channel
736 * @mask: capabilities that the channel must satisfy
737 * @fn: optional callback to disposition available channels
738 * @fn_param: opaque parameter to pass to dma_filter_fn()
739 * @np: device node to look for DMA channels
741 * Returns pointer to appropriate DMA channel on success or NULL.
743 struct dma_chan
*__dma_request_channel(const dma_cap_mask_t
*mask
,
744 dma_filter_fn fn
, void *fn_param
,
745 struct device_node
*np
)
747 struct dma_device
*device
, *_d
;
748 struct dma_chan
*chan
= NULL
;
751 mutex_lock(&dma_list_mutex
);
752 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
753 /* Finds a DMA controller with matching device node */
754 if (np
&& device
->dev
->of_node
&& np
!= device
->dev
->of_node
)
757 chan
= find_candidate(device
, mask
, fn
, fn_param
);
763 mutex_unlock(&dma_list_mutex
);
765 pr_debug("%s: %s (%s)\n",
767 chan
? "success" : "fail",
768 chan
? dma_chan_name(chan
) : NULL
);
772 EXPORT_SYMBOL_GPL(__dma_request_channel
);
774 static const struct dma_slave_map
*dma_filter_match(struct dma_device
*device
,
780 if (!device
->filter
.mapcnt
)
783 for (i
= 0; i
< device
->filter
.mapcnt
; i
++) {
784 const struct dma_slave_map
*map
= &device
->filter
.map
[i
];
786 if (!strcmp(map
->devname
, dev_name(dev
)) &&
787 !strcmp(map
->slave
, name
))
795 * dma_request_chan - try to allocate an exclusive slave channel
796 * @dev: pointer to client device structure
797 * @name: slave channel name
799 * Returns pointer to appropriate DMA channel on success or an error pointer.
801 struct dma_chan
*dma_request_chan(struct device
*dev
, const char *name
)
803 struct dma_device
*d
, *_d
;
804 struct dma_chan
*chan
= NULL
;
806 /* If device-tree is present get slave info from here */
808 chan
= of_dma_request_slave_channel(dev
->of_node
, name
);
810 /* If device was enumerated by ACPI get slave info from here */
811 if (has_acpi_companion(dev
) && !chan
)
812 chan
= acpi_dma_request_slave_chan_by_name(dev
, name
);
814 if (PTR_ERR(chan
) == -EPROBE_DEFER
)
817 if (!IS_ERR_OR_NULL(chan
))
820 /* Try to find the channel via the DMA filter map(s) */
821 mutex_lock(&dma_list_mutex
);
822 list_for_each_entry_safe(d
, _d
, &dma_device_list
, global_node
) {
824 const struct dma_slave_map
*map
= dma_filter_match(d
, name
, dev
);
830 dma_cap_set(DMA_SLAVE
, mask
);
832 chan
= find_candidate(d
, &mask
, d
->filter
.fn
, map
->param
);
836 mutex_unlock(&dma_list_mutex
);
838 if (IS_ERR_OR_NULL(chan
))
839 return chan
? chan
: ERR_PTR(-EPROBE_DEFER
);
842 #ifdef CONFIG_DEBUG_FS
843 chan
->dbg_client_name
= kasprintf(GFP_KERNEL
, "%s:%s", dev_name(dev
),
847 chan
->name
= kasprintf(GFP_KERNEL
, "dma:%s", name
);
852 if (sysfs_create_link(&chan
->dev
->device
.kobj
, &dev
->kobj
,
854 dev_warn(dev
, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME
);
855 if (sysfs_create_link(&dev
->kobj
, &chan
->dev
->device
.kobj
, chan
->name
))
856 dev_warn(dev
, "Cannot create DMA %s symlink\n", chan
->name
);
860 EXPORT_SYMBOL_GPL(dma_request_chan
);
863 * dma_request_slave_channel - try to allocate an exclusive slave channel
864 * @dev: pointer to client device structure
865 * @name: slave channel name
867 * Returns pointer to appropriate DMA channel on success or NULL.
869 struct dma_chan
*dma_request_slave_channel(struct device
*dev
,
872 struct dma_chan
*ch
= dma_request_chan(dev
, name
);
878 EXPORT_SYMBOL_GPL(dma_request_slave_channel
);
881 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
882 * @mask: capabilities that the channel must satisfy
884 * Returns pointer to appropriate DMA channel on success or an error pointer.
886 struct dma_chan
*dma_request_chan_by_mask(const dma_cap_mask_t
*mask
)
888 struct dma_chan
*chan
;
891 return ERR_PTR(-ENODEV
);
893 chan
= __dma_request_channel(mask
, NULL
, NULL
, NULL
);
895 mutex_lock(&dma_list_mutex
);
896 if (list_empty(&dma_device_list
))
897 chan
= ERR_PTR(-EPROBE_DEFER
);
899 chan
= ERR_PTR(-ENODEV
);
900 mutex_unlock(&dma_list_mutex
);
905 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask
);
907 void dma_release_channel(struct dma_chan
*chan
)
909 mutex_lock(&dma_list_mutex
);
910 WARN_ONCE(chan
->client_count
!= 1,
911 "chan reference count %d != 1\n", chan
->client_count
);
913 /* drop PRIVATE cap enabled by __dma_request_channel() */
914 if (--chan
->device
->privatecnt
== 0)
915 dma_cap_clear(DMA_PRIVATE
, chan
->device
->cap_mask
);
918 sysfs_remove_link(&chan
->dev
->device
.kobj
, DMA_SLAVE_NAME
);
919 sysfs_remove_link(&chan
->slave
->kobj
, chan
->name
);
925 #ifdef CONFIG_DEBUG_FS
926 kfree(chan
->dbg_client_name
);
927 chan
->dbg_client_name
= NULL
;
929 mutex_unlock(&dma_list_mutex
);
931 EXPORT_SYMBOL_GPL(dma_release_channel
);
934 * dmaengine_get - register interest in dma_channels
936 void dmaengine_get(void)
938 struct dma_device
*device
, *_d
;
939 struct dma_chan
*chan
;
942 mutex_lock(&dma_list_mutex
);
943 dmaengine_ref_count
++;
945 /* try to grab channels */
946 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
947 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
949 list_for_each_entry(chan
, &device
->channels
, device_node
) {
950 err
= dma_chan_get(chan
);
951 if (err
== -ENODEV
) {
952 /* module removed before we could use it */
953 list_del_rcu(&device
->global_node
);
956 dev_dbg(chan
->device
->dev
,
957 "%s: failed to get %s: (%d)\n",
958 __func__
, dma_chan_name(chan
), err
);
962 /* if this is the first reference and there were channels
963 * waiting we need to rebalance to get those channels
964 * incorporated into the channel table
966 if (dmaengine_ref_count
== 1)
967 dma_channel_rebalance();
968 mutex_unlock(&dma_list_mutex
);
970 EXPORT_SYMBOL(dmaengine_get
);
973 * dmaengine_put - let DMA drivers be removed when ref_count == 0
975 void dmaengine_put(void)
977 struct dma_device
*device
, *_d
;
978 struct dma_chan
*chan
;
980 mutex_lock(&dma_list_mutex
);
981 dmaengine_ref_count
--;
982 BUG_ON(dmaengine_ref_count
< 0);
983 /* drop channel references */
984 list_for_each_entry_safe(device
, _d
, &dma_device_list
, global_node
) {
985 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
987 list_for_each_entry(chan
, &device
->channels
, device_node
)
990 mutex_unlock(&dma_list_mutex
);
992 EXPORT_SYMBOL(dmaengine_put
);
994 static bool device_has_all_tx_types(struct dma_device
*device
)
996 /* A device that satisfies this test has channels that will never cause
997 * an async_tx channel switch event as all possible operation types can
1000 #ifdef CONFIG_ASYNC_TX_DMA
1001 if (!dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
))
1005 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1006 if (!dma_has_cap(DMA_MEMCPY
, device
->cap_mask
))
1010 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1011 if (!dma_has_cap(DMA_XOR
, device
->cap_mask
))
1014 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1015 if (!dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
))
1020 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1021 if (!dma_has_cap(DMA_PQ
, device
->cap_mask
))
1024 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1025 if (!dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
))
1033 static int get_dma_id(struct dma_device
*device
)
1035 int rc
= ida_alloc(&dma_ida
, GFP_KERNEL
);
1039 device
->dev_id
= rc
;
1043 static int __dma_async_device_channel_register(struct dma_device
*device
,
1044 struct dma_chan
*chan
)
1048 chan
->local
= alloc_percpu(typeof(*chan
->local
));
1051 chan
->dev
= kzalloc(sizeof(*chan
->dev
), GFP_KERNEL
);
1053 free_percpu(chan
->local
);
1059 * When the chan_id is a negative value, we are dynamically adding
1060 * the channel. Otherwise we are static enumerating.
1062 mutex_lock(&device
->chan_mutex
);
1063 chan
->chan_id
= ida_alloc(&device
->chan_ida
, GFP_KERNEL
);
1064 mutex_unlock(&device
->chan_mutex
);
1065 if (chan
->chan_id
< 0) {
1066 pr_err("%s: unable to alloc ida for chan: %d\n",
1067 __func__
, chan
->chan_id
);
1071 chan
->dev
->device
.class = &dma_devclass
;
1072 chan
->dev
->device
.parent
= device
->dev
;
1073 chan
->dev
->chan
= chan
;
1074 chan
->dev
->dev_id
= device
->dev_id
;
1075 dev_set_name(&chan
->dev
->device
, "dma%dchan%d",
1076 device
->dev_id
, chan
->chan_id
);
1077 rc
= device_register(&chan
->dev
->device
);
1080 chan
->client_count
= 0;
1086 mutex_lock(&device
->chan_mutex
);
1087 ida_free(&device
->chan_ida
, chan
->chan_id
);
1088 mutex_unlock(&device
->chan_mutex
);
1090 free_percpu(chan
->local
);
1095 int dma_async_device_channel_register(struct dma_device
*device
,
1096 struct dma_chan
*chan
)
1100 rc
= __dma_async_device_channel_register(device
, chan
);
1104 dma_channel_rebalance();
1107 EXPORT_SYMBOL_GPL(dma_async_device_channel_register
);
1109 static void __dma_async_device_channel_unregister(struct dma_device
*device
,
1110 struct dma_chan
*chan
)
1112 WARN_ONCE(!device
->device_release
&& chan
->client_count
,
1113 "%s called while %d clients hold a reference\n",
1114 __func__
, chan
->client_count
);
1115 mutex_lock(&dma_list_mutex
);
1116 list_del(&chan
->device_node
);
1118 chan
->dev
->chan
= NULL
;
1119 mutex_unlock(&dma_list_mutex
);
1120 mutex_lock(&device
->chan_mutex
);
1121 ida_free(&device
->chan_ida
, chan
->chan_id
);
1122 mutex_unlock(&device
->chan_mutex
);
1123 device_unregister(&chan
->dev
->device
);
1124 free_percpu(chan
->local
);
1127 void dma_async_device_channel_unregister(struct dma_device
*device
,
1128 struct dma_chan
*chan
)
1130 __dma_async_device_channel_unregister(device
, chan
);
1131 dma_channel_rebalance();
1133 EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister
);
1136 * dma_async_device_register - registers DMA devices found
1137 * @device: pointer to &struct dma_device
1139 * After calling this routine the structure should not be freed except in the
1140 * device_release() callback which will be called after
1141 * dma_async_device_unregister() is called and no further references are taken.
1143 int dma_async_device_register(struct dma_device
*device
)
1146 struct dma_chan
* chan
;
1151 /* validate device routines */
1153 pr_err("DMAdevice must have dev\n");
1157 device
->owner
= device
->dev
->driver
->owner
;
1159 if (dma_has_cap(DMA_MEMCPY
, device
->cap_mask
) && !device
->device_prep_dma_memcpy
) {
1160 dev_err(device
->dev
,
1161 "Device claims capability %s, but op is not defined\n",
1166 if (dma_has_cap(DMA_XOR
, device
->cap_mask
) && !device
->device_prep_dma_xor
) {
1167 dev_err(device
->dev
,
1168 "Device claims capability %s, but op is not defined\n",
1173 if (dma_has_cap(DMA_XOR_VAL
, device
->cap_mask
) && !device
->device_prep_dma_xor_val
) {
1174 dev_err(device
->dev
,
1175 "Device claims capability %s, but op is not defined\n",
1180 if (dma_has_cap(DMA_PQ
, device
->cap_mask
) && !device
->device_prep_dma_pq
) {
1181 dev_err(device
->dev
,
1182 "Device claims capability %s, but op is not defined\n",
1187 if (dma_has_cap(DMA_PQ_VAL
, device
->cap_mask
) && !device
->device_prep_dma_pq_val
) {
1188 dev_err(device
->dev
,
1189 "Device claims capability %s, but op is not defined\n",
1194 if (dma_has_cap(DMA_MEMSET
, device
->cap_mask
) && !device
->device_prep_dma_memset
) {
1195 dev_err(device
->dev
,
1196 "Device claims capability %s, but op is not defined\n",
1201 if (dma_has_cap(DMA_INTERRUPT
, device
->cap_mask
) && !device
->device_prep_dma_interrupt
) {
1202 dev_err(device
->dev
,
1203 "Device claims capability %s, but op is not defined\n",
1208 if (dma_has_cap(DMA_CYCLIC
, device
->cap_mask
) && !device
->device_prep_dma_cyclic
) {
1209 dev_err(device
->dev
,
1210 "Device claims capability %s, but op is not defined\n",
1215 if (dma_has_cap(DMA_INTERLEAVE
, device
->cap_mask
) && !device
->device_prep_interleaved_dma
) {
1216 dev_err(device
->dev
,
1217 "Device claims capability %s, but op is not defined\n",
1223 if (!device
->device_tx_status
) {
1224 dev_err(device
->dev
, "Device tx_status is not defined\n");
1229 if (!device
->device_issue_pending
) {
1230 dev_err(device
->dev
, "Device issue_pending is not defined\n");
1234 if (!device
->device_release
)
1235 dev_dbg(device
->dev
,
1236 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1238 kref_init(&device
->ref
);
1240 /* note: this only matters in the
1241 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1243 if (device_has_all_tx_types(device
))
1244 dma_cap_set(DMA_ASYNC_TX
, device
->cap_mask
);
1246 rc
= get_dma_id(device
);
1250 mutex_init(&device
->chan_mutex
);
1251 ida_init(&device
->chan_ida
);
1253 /* represent channels in sysfs. Probably want devs too */
1254 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1255 rc
= __dma_async_device_channel_register(device
, chan
);
1260 mutex_lock(&dma_list_mutex
);
1261 /* take references on public channels */
1262 if (dmaengine_ref_count
&& !dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1263 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1264 /* if clients are already waiting for channels we need
1265 * to take references on their behalf
1267 if (dma_chan_get(chan
) == -ENODEV
) {
1268 /* note we can only get here for the first
1269 * channel as the remaining channels are
1270 * guaranteed to get a reference
1273 mutex_unlock(&dma_list_mutex
);
1277 list_add_tail_rcu(&device
->global_node
, &dma_device_list
);
1278 if (dma_has_cap(DMA_PRIVATE
, device
->cap_mask
))
1279 device
->privatecnt
++; /* Always private */
1280 dma_channel_rebalance();
1281 mutex_unlock(&dma_list_mutex
);
1283 dmaengine_debug_register(device
);
1288 /* if we never registered a channel just release the idr */
1289 if (!device
->chancnt
) {
1290 ida_free(&dma_ida
, device
->dev_id
);
1294 list_for_each_entry(chan
, &device
->channels
, device_node
) {
1295 if (chan
->local
== NULL
)
1297 mutex_lock(&dma_list_mutex
);
1298 chan
->dev
->chan
= NULL
;
1299 mutex_unlock(&dma_list_mutex
);
1300 device_unregister(&chan
->dev
->device
);
1301 free_percpu(chan
->local
);
1305 EXPORT_SYMBOL(dma_async_device_register
);
1308 * dma_async_device_unregister - unregister a DMA device
1309 * @device: pointer to &struct dma_device
1311 * This routine is called by dma driver exit routines, dmaengine holds module
1312 * references to prevent it being called while channels are in use.
1314 void dma_async_device_unregister(struct dma_device
*device
)
1316 struct dma_chan
*chan
, *n
;
1318 dmaengine_debug_unregister(device
);
1320 list_for_each_entry_safe(chan
, n
, &device
->channels
, device_node
)
1321 __dma_async_device_channel_unregister(device
, chan
);
1323 mutex_lock(&dma_list_mutex
);
1325 * setting DMA_PRIVATE ensures the device being torn down will not
1326 * be used in the channel_table
1328 dma_cap_set(DMA_PRIVATE
, device
->cap_mask
);
1329 dma_channel_rebalance();
1330 ida_free(&dma_ida
, device
->dev_id
);
1331 dma_device_put(device
);
1332 mutex_unlock(&dma_list_mutex
);
1334 EXPORT_SYMBOL(dma_async_device_unregister
);
1336 static void dmam_device_release(struct device
*dev
, void *res
)
1338 struct dma_device
*device
;
1340 device
= *(struct dma_device
**)res
;
1341 dma_async_device_unregister(device
);
1345 * dmaenginem_async_device_register - registers DMA devices found
1346 * @device: pointer to &struct dma_device
1348 * The operation is managed and will be undone on driver detach.
1350 int dmaenginem_async_device_register(struct dma_device
*device
)
1355 p
= devres_alloc(dmam_device_release
, sizeof(void *), GFP_KERNEL
);
1359 ret
= dma_async_device_register(device
);
1361 *(struct dma_device
**)p
= device
;
1362 devres_add(device
->dev
, p
);
1369 EXPORT_SYMBOL(dmaenginem_async_device_register
);
1371 struct dmaengine_unmap_pool
{
1372 struct kmem_cache
*cache
;
1378 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1379 static struct dmaengine_unmap_pool unmap_pool
[] = {
1381 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1388 static struct dmaengine_unmap_pool
*__get_unmap_pool(int nr
)
1390 int order
= get_count_order(nr
);
1394 return &unmap_pool
[0];
1395 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1397 return &unmap_pool
[1];
1399 return &unmap_pool
[2];
1401 return &unmap_pool
[3];
1409 static void dmaengine_unmap(struct kref
*kref
)
1411 struct dmaengine_unmap_data
*unmap
= container_of(kref
, typeof(*unmap
), kref
);
1412 struct device
*dev
= unmap
->dev
;
1415 cnt
= unmap
->to_cnt
;
1416 for (i
= 0; i
< cnt
; i
++)
1417 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1419 cnt
+= unmap
->from_cnt
;
1420 for (; i
< cnt
; i
++)
1421 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1423 cnt
+= unmap
->bidi_cnt
;
1424 for (; i
< cnt
; i
++) {
1425 if (unmap
->addr
[i
] == 0)
1427 dma_unmap_page(dev
, unmap
->addr
[i
], unmap
->len
,
1430 cnt
= unmap
->map_cnt
;
1431 mempool_free(unmap
, __get_unmap_pool(cnt
)->pool
);
1434 void dmaengine_unmap_put(struct dmaengine_unmap_data
*unmap
)
1437 kref_put(&unmap
->kref
, dmaengine_unmap
);
1439 EXPORT_SYMBOL_GPL(dmaengine_unmap_put
);
1441 static void dmaengine_destroy_unmap_pool(void)
1445 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1446 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1448 mempool_destroy(p
->pool
);
1450 kmem_cache_destroy(p
->cache
);
1455 static int __init
dmaengine_init_unmap_pool(void)
1459 for (i
= 0; i
< ARRAY_SIZE(unmap_pool
); i
++) {
1460 struct dmaengine_unmap_pool
*p
= &unmap_pool
[i
];
1463 size
= sizeof(struct dmaengine_unmap_data
) +
1464 sizeof(dma_addr_t
) * p
->size
;
1466 p
->cache
= kmem_cache_create(p
->name
, size
, 0,
1467 SLAB_HWCACHE_ALIGN
, NULL
);
1470 p
->pool
= mempool_create_slab_pool(1, p
->cache
);
1475 if (i
== ARRAY_SIZE(unmap_pool
))
1478 dmaengine_destroy_unmap_pool();
1482 struct dmaengine_unmap_data
*
1483 dmaengine_get_unmap_data(struct device
*dev
, int nr
, gfp_t flags
)
1485 struct dmaengine_unmap_data
*unmap
;
1487 unmap
= mempool_alloc(__get_unmap_pool(nr
)->pool
, flags
);
1491 memset(unmap
, 0, sizeof(*unmap
));
1492 kref_init(&unmap
->kref
);
1494 unmap
->map_cnt
= nr
;
1498 EXPORT_SYMBOL(dmaengine_get_unmap_data
);
1500 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor
*tx
,
1501 struct dma_chan
*chan
)
1504 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1505 spin_lock_init(&tx
->lock
);
1508 EXPORT_SYMBOL(dma_async_tx_descriptor_init
);
1510 static inline int desc_check_and_set_metadata_mode(
1511 struct dma_async_tx_descriptor
*desc
, enum dma_desc_metadata_mode mode
)
1513 /* Make sure that the metadata mode is not mixed */
1514 if (!desc
->desc_metadata_mode
) {
1515 if (dmaengine_is_metadata_mode_supported(desc
->chan
, mode
))
1516 desc
->desc_metadata_mode
= mode
;
1519 } else if (desc
->desc_metadata_mode
!= mode
) {
1526 int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor
*desc
,
1527 void *data
, size_t len
)
1534 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_CLIENT
);
1538 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->attach
)
1541 return desc
->metadata_ops
->attach(desc
, data
, len
);
1543 EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata
);
1545 void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor
*desc
,
1546 size_t *payload_len
, size_t *max_len
)
1551 return ERR_PTR(-EINVAL
);
1553 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1555 return ERR_PTR(ret
);
1557 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->get_ptr
)
1558 return ERR_PTR(-ENOTSUPP
);
1560 return desc
->metadata_ops
->get_ptr(desc
, payload_len
, max_len
);
1562 EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr
);
1564 int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor
*desc
,
1572 ret
= desc_check_and_set_metadata_mode(desc
, DESC_METADATA_ENGINE
);
1576 if (!desc
->metadata_ops
|| !desc
->metadata_ops
->set_len
)
1579 return desc
->metadata_ops
->set_len(desc
, payload_len
);
1581 EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len
);
1584 * dma_wait_for_async_tx - spin wait for a transaction to complete
1585 * @tx: in-flight transaction to wait on
1588 dma_wait_for_async_tx(struct dma_async_tx_descriptor
*tx
)
1590 unsigned long dma_sync_wait_timeout
= jiffies
+ msecs_to_jiffies(5000);
1593 return DMA_COMPLETE
;
1595 while (tx
->cookie
== -EBUSY
) {
1596 if (time_after_eq(jiffies
, dma_sync_wait_timeout
)) {
1597 dev_err(tx
->chan
->device
->dev
,
1598 "%s timeout waiting for descriptor submission\n",
1604 return dma_sync_wait(tx
->chan
, tx
->cookie
);
1606 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx
);
1609 * dma_run_dependencies - process dependent operations on the target channel
1610 * @tx: transaction with dependencies
1612 * Helper routine for DMA drivers to process (start) dependent operations
1613 * on their target channel.
1615 void dma_run_dependencies(struct dma_async_tx_descriptor
*tx
)
1617 struct dma_async_tx_descriptor
*dep
= txd_next(tx
);
1618 struct dma_async_tx_descriptor
*dep_next
;
1619 struct dma_chan
*chan
;
1624 /* we'll submit tx->next now, so clear the link */
1628 /* keep submitting up until a channel switch is detected
1629 * in that case we will be called again as a result of
1630 * processing the interrupt from async_tx_channel_switch
1632 for (; dep
; dep
= dep_next
) {
1634 txd_clear_parent(dep
);
1635 dep_next
= txd_next(dep
);
1636 if (dep_next
&& dep_next
->chan
== chan
)
1637 txd_clear_next(dep
); /* ->next will be submitted */
1639 dep_next
= NULL
; /* submit current dep and terminate */
1642 dep
->tx_submit(dep
);
1645 chan
->device
->device_issue_pending(chan
);
1647 EXPORT_SYMBOL_GPL(dma_run_dependencies
);
1649 static int __init
dma_bus_init(void)
1651 int err
= dmaengine_init_unmap_pool();
1656 err
= class_register(&dma_devclass
);
1658 dmaengine_debugfs_init();
1662 arch_initcall(dma_bus_init
);