]>
Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
24 | * for other kernel code to use asynchronous memory copy capabilities, | |
25 | * if present, and allows different HW DMA drivers to register as providing | |
26 | * this capability. | |
27 | * | |
28 | * Due to the fact we are accelerating what is already a relatively fast | |
29 | * operation, the code goes to great lengths to avoid additional overhead, | |
30 | * such as locking. | |
31 | * | |
32 | * LOCKING: | |
33 | * | |
aa1e6f1a DW |
34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * mutex, dma_list_mutex. | |
c13c8260 | 36 | * |
f27c580c DW |
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
40 | * against its corresponding driver to disable removal. | |
41 | * | |
c13c8260 CL |
42 | * Each device has a channels list, which runs unlocked but is never modified |
43 | * once the device is registered, it's just setup by the driver. | |
44 | * | |
f27c580c | 45 | * See Documentation/dmaengine.txt for more details |
c13c8260 CL |
46 | */ |
47 | ||
63433250 JP |
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
49 | ||
b7f080cf | 50 | #include <linux/dma-mapping.h> |
c13c8260 CL |
51 | #include <linux/init.h> |
52 | #include <linux/module.h> | |
7405f74b | 53 | #include <linux/mm.h> |
c13c8260 CL |
54 | #include <linux/device.h> |
55 | #include <linux/dmaengine.h> | |
56 | #include <linux/hardirq.h> | |
57 | #include <linux/spinlock.h> | |
58 | #include <linux/percpu.h> | |
59 | #include <linux/rcupdate.h> | |
60 | #include <linux/mutex.h> | |
7405f74b | 61 | #include <linux/jiffies.h> |
2ba05622 | 62 | #include <linux/rculist.h> |
864498aa | 63 | #include <linux/idr.h> |
5a0e3ad6 | 64 | #include <linux/slab.h> |
4e82f5dd AS |
65 | #include <linux/acpi.h> |
66 | #include <linux/acpi_dma.h> | |
9a6cecc8 | 67 | #include <linux/of_dma.h> |
c13c8260 CL |
68 | |
69 | static DEFINE_MUTEX(dma_list_mutex); | |
21ef4b8b | 70 | static DEFINE_IDR(dma_idr); |
c13c8260 | 71 | static LIST_HEAD(dma_device_list); |
6f49a57a | 72 | static long dmaengine_ref_count; |
c13c8260 CL |
73 | |
74 | /* --- sysfs implementation --- */ | |
75 | ||
41d5e59c DW |
76 | /** |
77 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | |
78 | * @dev - device node | |
79 | * | |
80 | * Must be called under dma_list_mutex | |
81 | */ | |
82 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
83 | { | |
84 | struct dma_chan_dev *chan_dev; | |
85 | ||
86 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
87 | return chan_dev->chan; | |
88 | } | |
89 | ||
891f78ea | 90 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 91 | { |
41d5e59c | 92 | struct dma_chan *chan; |
c13c8260 CL |
93 | unsigned long count = 0; |
94 | int i; | |
41d5e59c | 95 | int err; |
c13c8260 | 96 | |
41d5e59c DW |
97 | mutex_lock(&dma_list_mutex); |
98 | chan = dev_to_dma_chan(dev); | |
99 | if (chan) { | |
100 | for_each_possible_cpu(i) | |
101 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
102 | err = sprintf(buf, "%lu\n", count); | |
103 | } else | |
104 | err = -ENODEV; | |
105 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 106 | |
41d5e59c | 107 | return err; |
c13c8260 CL |
108 | } |
109 | ||
891f78ea TJ |
110 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
111 | char *buf) | |
c13c8260 | 112 | { |
41d5e59c | 113 | struct dma_chan *chan; |
c13c8260 CL |
114 | unsigned long count = 0; |
115 | int i; | |
41d5e59c | 116 | int err; |
c13c8260 | 117 | |
41d5e59c DW |
118 | mutex_lock(&dma_list_mutex); |
119 | chan = dev_to_dma_chan(dev); | |
120 | if (chan) { | |
121 | for_each_possible_cpu(i) | |
122 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
123 | err = sprintf(buf, "%lu\n", count); | |
124 | } else | |
125 | err = -ENODEV; | |
126 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 127 | |
41d5e59c | 128 | return err; |
c13c8260 CL |
129 | } |
130 | ||
891f78ea | 131 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 132 | { |
41d5e59c DW |
133 | struct dma_chan *chan; |
134 | int err; | |
c13c8260 | 135 | |
41d5e59c DW |
136 | mutex_lock(&dma_list_mutex); |
137 | chan = dev_to_dma_chan(dev); | |
138 | if (chan) | |
139 | err = sprintf(buf, "%d\n", chan->client_count); | |
140 | else | |
141 | err = -ENODEV; | |
142 | mutex_unlock(&dma_list_mutex); | |
143 | ||
144 | return err; | |
c13c8260 CL |
145 | } |
146 | ||
891f78ea | 147 | static struct device_attribute dma_attrs[] = { |
c13c8260 CL |
148 | __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), |
149 | __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), | |
150 | __ATTR(in_use, S_IRUGO, show_in_use, NULL), | |
151 | __ATTR_NULL | |
152 | }; | |
153 | ||
41d5e59c DW |
154 | static void chan_dev_release(struct device *dev) |
155 | { | |
156 | struct dma_chan_dev *chan_dev; | |
157 | ||
158 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
864498aa DW |
159 | if (atomic_dec_and_test(chan_dev->idr_ref)) { |
160 | mutex_lock(&dma_list_mutex); | |
161 | idr_remove(&dma_idr, chan_dev->dev_id); | |
162 | mutex_unlock(&dma_list_mutex); | |
163 | kfree(chan_dev->idr_ref); | |
164 | } | |
41d5e59c DW |
165 | kfree(chan_dev); |
166 | } | |
167 | ||
c13c8260 | 168 | static struct class dma_devclass = { |
891f78ea TJ |
169 | .name = "dma", |
170 | .dev_attrs = dma_attrs, | |
41d5e59c | 171 | .dev_release = chan_dev_release, |
c13c8260 CL |
172 | }; |
173 | ||
174 | /* --- client and device registration --- */ | |
175 | ||
59b5ec21 DW |
176 | #define dma_device_satisfies_mask(device, mask) \ |
177 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 178 | static int |
a53e28da LPC |
179 | __dma_device_satisfies_mask(struct dma_device *device, |
180 | const dma_cap_mask_t *want) | |
d379b01e DW |
181 | { |
182 | dma_cap_mask_t has; | |
183 | ||
59b5ec21 | 184 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
185 | DMA_TX_TYPE_END); |
186 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
187 | } | |
188 | ||
6f49a57a DW |
189 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
190 | { | |
191 | return chan->device->dev->driver->owner; | |
192 | } | |
193 | ||
194 | /** | |
195 | * balance_ref_count - catch up the channel reference count | |
196 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
197 | * | |
198 | * balance_ref_count must be called under dma_list_mutex | |
199 | */ | |
200 | static void balance_ref_count(struct dma_chan *chan) | |
201 | { | |
202 | struct module *owner = dma_chan_to_owner(chan); | |
203 | ||
204 | while (chan->client_count < dmaengine_ref_count) { | |
205 | __module_get(owner); | |
206 | chan->client_count++; | |
207 | } | |
208 | } | |
209 | ||
210 | /** | |
211 | * dma_chan_get - try to grab a dma channel's parent driver module | |
212 | * @chan - channel to grab | |
213 | * | |
214 | * Must be called under dma_list_mutex | |
215 | */ | |
216 | static int dma_chan_get(struct dma_chan *chan) | |
217 | { | |
218 | int err = -ENODEV; | |
219 | struct module *owner = dma_chan_to_owner(chan); | |
220 | ||
221 | if (chan->client_count) { | |
222 | __module_get(owner); | |
223 | err = 0; | |
224 | } else if (try_module_get(owner)) | |
225 | err = 0; | |
226 | ||
227 | if (err == 0) | |
228 | chan->client_count++; | |
229 | ||
230 | /* allocate upon first client reference */ | |
231 | if (chan->client_count == 1 && err == 0) { | |
aa1e6f1a | 232 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); |
6f49a57a DW |
233 | |
234 | if (desc_cnt < 0) { | |
235 | err = desc_cnt; | |
236 | chan->client_count = 0; | |
237 | module_put(owner); | |
59b5ec21 | 238 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
6f49a57a DW |
239 | balance_ref_count(chan); |
240 | } | |
241 | ||
242 | return err; | |
243 | } | |
244 | ||
245 | /** | |
246 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
247 | * @chan - channel to release | |
248 | * | |
249 | * Must be called under dma_list_mutex | |
250 | */ | |
251 | static void dma_chan_put(struct dma_chan *chan) | |
252 | { | |
253 | if (!chan->client_count) | |
254 | return; /* this channel failed alloc_chan_resources */ | |
255 | chan->client_count--; | |
256 | module_put(dma_chan_to_owner(chan)); | |
257 | if (chan->client_count == 0) | |
258 | chan->device->device_free_chan_resources(chan); | |
259 | } | |
260 | ||
7405f74b DW |
261 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
262 | { | |
263 | enum dma_status status; | |
264 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
265 | ||
266 | dma_async_issue_pending(chan); | |
267 | do { | |
268 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
269 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
63433250 | 270 | pr_err("%s: timeout!\n", __func__); |
7405f74b DW |
271 | return DMA_ERROR; |
272 | } | |
2cbe7feb BZ |
273 | if (status != DMA_IN_PROGRESS) |
274 | break; | |
275 | cpu_relax(); | |
276 | } while (1); | |
7405f74b DW |
277 | |
278 | return status; | |
279 | } | |
280 | EXPORT_SYMBOL(dma_sync_wait); | |
281 | ||
bec08513 DW |
282 | /** |
283 | * dma_cap_mask_all - enable iteration over all operation types | |
284 | */ | |
285 | static dma_cap_mask_t dma_cap_mask_all; | |
286 | ||
287 | /** | |
288 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
289 | * @chan - associated channel for this entry | |
290 | */ | |
291 | struct dma_chan_tbl_ent { | |
292 | struct dma_chan *chan; | |
293 | }; | |
294 | ||
295 | /** | |
296 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
297 | */ | |
a29d8b8e | 298 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; |
bec08513 DW |
299 | |
300 | static int __init dma_channel_table_init(void) | |
301 | { | |
302 | enum dma_transaction_type cap; | |
303 | int err = 0; | |
304 | ||
305 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
306 | ||
59b5ec21 DW |
307 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
308 | * but are not associated with an operation so they do not need | |
309 | * an entry in the channel_table | |
bec08513 DW |
310 | */ |
311 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 312 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
313 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
314 | ||
315 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
316 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
317 | if (!channel_table[cap]) { | |
318 | err = -ENOMEM; | |
319 | break; | |
320 | } | |
321 | } | |
322 | ||
323 | if (err) { | |
63433250 | 324 | pr_err("initialization failure\n"); |
bec08513 DW |
325 | for_each_dma_cap_mask(cap, dma_cap_mask_all) |
326 | if (channel_table[cap]) | |
327 | free_percpu(channel_table[cap]); | |
328 | } | |
329 | ||
330 | return err; | |
331 | } | |
652afc27 | 332 | arch_initcall(dma_channel_table_init); |
bec08513 DW |
333 | |
334 | /** | |
335 | * dma_find_channel - find a channel to carry out the operation | |
336 | * @tx_type: transaction type | |
337 | */ | |
338 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
339 | { | |
e7dcaa47 | 340 | return this_cpu_read(channel_table[tx_type]->chan); |
bec08513 DW |
341 | } |
342 | EXPORT_SYMBOL(dma_find_channel); | |
343 | ||
a2bd1140 DJ |
344 | /* |
345 | * net_dma_find_channel - find a channel for net_dma | |
346 | * net_dma has alignment requirements | |
347 | */ | |
348 | struct dma_chan *net_dma_find_channel(void) | |
349 | { | |
350 | struct dma_chan *chan = dma_find_channel(DMA_MEMCPY); | |
351 | if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1)) | |
352 | return NULL; | |
353 | ||
354 | return chan; | |
355 | } | |
356 | EXPORT_SYMBOL(net_dma_find_channel); | |
357 | ||
2ba05622 DW |
358 | /** |
359 | * dma_issue_pending_all - flush all pending operations across all channels | |
360 | */ | |
361 | void dma_issue_pending_all(void) | |
362 | { | |
363 | struct dma_device *device; | |
364 | struct dma_chan *chan; | |
365 | ||
2ba05622 | 366 | rcu_read_lock(); |
59b5ec21 DW |
367 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
368 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
369 | continue; | |
2ba05622 DW |
370 | list_for_each_entry(chan, &device->channels, device_node) |
371 | if (chan->client_count) | |
372 | device->device_issue_pending(chan); | |
59b5ec21 | 373 | } |
2ba05622 DW |
374 | rcu_read_unlock(); |
375 | } | |
376 | EXPORT_SYMBOL(dma_issue_pending_all); | |
377 | ||
bec08513 DW |
378 | /** |
379 | * nth_chan - returns the nth channel of the given capability | |
380 | * @cap: capability to match | |
381 | * @n: nth channel desired | |
382 | * | |
383 | * Defaults to returning the channel with the desired capability and the | |
384 | * lowest reference count when 'n' cannot be satisfied. Must be called | |
385 | * under dma_list_mutex. | |
386 | */ | |
387 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | |
388 | { | |
389 | struct dma_device *device; | |
390 | struct dma_chan *chan; | |
391 | struct dma_chan *ret = NULL; | |
392 | struct dma_chan *min = NULL; | |
393 | ||
394 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
395 | if (!dma_has_cap(cap, device->cap_mask) || |
396 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
397 | continue; |
398 | list_for_each_entry(chan, &device->channels, device_node) { | |
399 | if (!chan->client_count) | |
400 | continue; | |
401 | if (!min) | |
402 | min = chan; | |
403 | else if (chan->table_count < min->table_count) | |
404 | min = chan; | |
405 | ||
406 | if (n-- == 0) { | |
407 | ret = chan; | |
408 | break; /* done */ | |
409 | } | |
410 | } | |
411 | if (ret) | |
412 | break; /* done */ | |
413 | } | |
414 | ||
415 | if (!ret) | |
416 | ret = min; | |
417 | ||
418 | if (ret) | |
419 | ret->table_count++; | |
420 | ||
421 | return ret; | |
422 | } | |
423 | ||
424 | /** | |
425 | * dma_channel_rebalance - redistribute the available channels | |
426 | * | |
427 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
428 | * operation type) in the SMP case, and operation isolation (avoid | |
429 | * multi-tasking channels) in the non-SMP case. Must be called under | |
430 | * dma_list_mutex. | |
431 | */ | |
432 | static void dma_channel_rebalance(void) | |
433 | { | |
434 | struct dma_chan *chan; | |
435 | struct dma_device *device; | |
436 | int cpu; | |
437 | int cap; | |
438 | int n; | |
439 | ||
440 | /* undo the last distribution */ | |
441 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
442 | for_each_possible_cpu(cpu) | |
443 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
444 | ||
59b5ec21 DW |
445 | list_for_each_entry(device, &dma_device_list, global_node) { |
446 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
447 | continue; | |
bec08513 DW |
448 | list_for_each_entry(chan, &device->channels, device_node) |
449 | chan->table_count = 0; | |
59b5ec21 | 450 | } |
bec08513 DW |
451 | |
452 | /* don't populate the channel_table if no clients are available */ | |
453 | if (!dmaengine_ref_count) | |
454 | return; | |
455 | ||
456 | /* redistribute available channels */ | |
457 | n = 0; | |
458 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
459 | for_each_online_cpu(cpu) { | |
460 | if (num_possible_cpus() > 1) | |
461 | chan = nth_chan(cap, n++); | |
462 | else | |
463 | chan = nth_chan(cap, -1); | |
464 | ||
465 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
466 | } | |
467 | } | |
468 | ||
a53e28da LPC |
469 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, |
470 | struct dma_device *dev, | |
e2346677 | 471 | dma_filter_fn fn, void *fn_param) |
59b5ec21 DW |
472 | { |
473 | struct dma_chan *chan; | |
59b5ec21 DW |
474 | |
475 | if (!__dma_device_satisfies_mask(dev, mask)) { | |
476 | pr_debug("%s: wrong capabilities\n", __func__); | |
477 | return NULL; | |
478 | } | |
479 | /* devices with multiple channels need special handling as we need to | |
480 | * ensure that all channels are either private or public. | |
481 | */ | |
482 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
483 | list_for_each_entry(chan, &dev->channels, device_node) { | |
484 | /* some channels are already publicly allocated */ | |
485 | if (chan->client_count) | |
486 | return NULL; | |
487 | } | |
488 | ||
489 | list_for_each_entry(chan, &dev->channels, device_node) { | |
490 | if (chan->client_count) { | |
491 | pr_debug("%s: %s busy\n", | |
41d5e59c | 492 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
493 | continue; |
494 | } | |
e2346677 DW |
495 | if (fn && !fn(chan, fn_param)) { |
496 | pr_debug("%s: %s filter said false\n", | |
497 | __func__, dma_chan_name(chan)); | |
498 | continue; | |
499 | } | |
500 | return chan; | |
59b5ec21 DW |
501 | } |
502 | ||
e2346677 | 503 | return NULL; |
59b5ec21 DW |
504 | } |
505 | ||
506 | /** | |
507 | * dma_request_channel - try to allocate an exclusive channel | |
508 | * @mask: capabilities that the channel must satisfy | |
509 | * @fn: optional callback to disposition available channels | |
510 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
511 | */ | |
a53e28da LPC |
512 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, |
513 | dma_filter_fn fn, void *fn_param) | |
59b5ec21 DW |
514 | { |
515 | struct dma_device *device, *_d; | |
516 | struct dma_chan *chan = NULL; | |
59b5ec21 DW |
517 | int err; |
518 | ||
519 | /* Find a channel */ | |
520 | mutex_lock(&dma_list_mutex); | |
521 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
e2346677 DW |
522 | chan = private_candidate(mask, device, fn, fn_param); |
523 | if (chan) { | |
59b5ec21 DW |
524 | /* Found a suitable channel, try to grab, prep, and |
525 | * return it. We first set DMA_PRIVATE to disable | |
526 | * balance_ref_count as this channel will not be | |
527 | * published in the general-purpose allocator | |
528 | */ | |
529 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
0f571515 | 530 | device->privatecnt++; |
59b5ec21 DW |
531 | err = dma_chan_get(chan); |
532 | ||
533 | if (err == -ENODEV) { | |
63433250 JP |
534 | pr_debug("%s: %s module removed\n", |
535 | __func__, dma_chan_name(chan)); | |
59b5ec21 DW |
536 | list_del_rcu(&device->global_node); |
537 | } else if (err) | |
d8b53489 | 538 | pr_debug("%s: failed to get %s: (%d)\n", |
63433250 | 539 | __func__, dma_chan_name(chan), err); |
59b5ec21 DW |
540 | else |
541 | break; | |
0f571515 AN |
542 | if (--device->privatecnt == 0) |
543 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
e2346677 DW |
544 | chan = NULL; |
545 | } | |
59b5ec21 DW |
546 | } |
547 | mutex_unlock(&dma_list_mutex); | |
548 | ||
63433250 JP |
549 | pr_debug("%s: %s (%s)\n", |
550 | __func__, | |
551 | chan ? "success" : "fail", | |
41d5e59c | 552 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
553 | |
554 | return chan; | |
555 | } | |
556 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
557 | ||
9a6cecc8 JH |
558 | /** |
559 | * dma_request_slave_channel - try to allocate an exclusive slave channel | |
560 | * @dev: pointer to client device structure | |
561 | * @name: slave channel name | |
562 | */ | |
bef29ec5 | 563 | struct dma_chan *dma_request_slave_channel(struct device *dev, const char *name) |
9a6cecc8 JH |
564 | { |
565 | /* If device-tree is present get slave info from here */ | |
566 | if (dev->of_node) | |
567 | return of_dma_request_slave_channel(dev->of_node, name); | |
568 | ||
4e82f5dd AS |
569 | /* If device was enumerated by ACPI get slave info from here */ |
570 | if (ACPI_HANDLE(dev)) | |
571 | return acpi_dma_request_slave_chan_by_name(dev, name); | |
572 | ||
9a6cecc8 JH |
573 | return NULL; |
574 | } | |
575 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | |
576 | ||
59b5ec21 DW |
577 | void dma_release_channel(struct dma_chan *chan) |
578 | { | |
579 | mutex_lock(&dma_list_mutex); | |
580 | WARN_ONCE(chan->client_count != 1, | |
581 | "chan reference count %d != 1\n", chan->client_count); | |
582 | dma_chan_put(chan); | |
0f571515 AN |
583 | /* drop PRIVATE cap enabled by __dma_request_channel() */ |
584 | if (--chan->device->privatecnt == 0) | |
585 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | |
59b5ec21 DW |
586 | mutex_unlock(&dma_list_mutex); |
587 | } | |
588 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
589 | ||
d379b01e | 590 | /** |
209b84a8 | 591 | * dmaengine_get - register interest in dma_channels |
d379b01e | 592 | */ |
209b84a8 | 593 | void dmaengine_get(void) |
d379b01e | 594 | { |
6f49a57a DW |
595 | struct dma_device *device, *_d; |
596 | struct dma_chan *chan; | |
597 | int err; | |
598 | ||
c13c8260 | 599 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
600 | dmaengine_ref_count++; |
601 | ||
602 | /* try to grab channels */ | |
59b5ec21 DW |
603 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
604 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
605 | continue; | |
6f49a57a DW |
606 | list_for_each_entry(chan, &device->channels, device_node) { |
607 | err = dma_chan_get(chan); | |
608 | if (err == -ENODEV) { | |
609 | /* module removed before we could use it */ | |
2ba05622 | 610 | list_del_rcu(&device->global_node); |
6f49a57a DW |
611 | break; |
612 | } else if (err) | |
0eb5a358 | 613 | pr_debug("%s: failed to get %s: (%d)\n", |
63433250 | 614 | __func__, dma_chan_name(chan), err); |
6f49a57a | 615 | } |
59b5ec21 | 616 | } |
6f49a57a | 617 | |
bec08513 DW |
618 | /* if this is the first reference and there were channels |
619 | * waiting we need to rebalance to get those channels | |
620 | * incorporated into the channel table | |
621 | */ | |
622 | if (dmaengine_ref_count == 1) | |
623 | dma_channel_rebalance(); | |
c13c8260 | 624 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 625 | } |
209b84a8 | 626 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
627 | |
628 | /** | |
209b84a8 | 629 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 630 | */ |
209b84a8 | 631 | void dmaengine_put(void) |
c13c8260 | 632 | { |
d379b01e | 633 | struct dma_device *device; |
c13c8260 CL |
634 | struct dma_chan *chan; |
635 | ||
c13c8260 | 636 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
637 | dmaengine_ref_count--; |
638 | BUG_ON(dmaengine_ref_count < 0); | |
639 | /* drop channel references */ | |
59b5ec21 DW |
640 | list_for_each_entry(device, &dma_device_list, global_node) { |
641 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
642 | continue; | |
6f49a57a DW |
643 | list_for_each_entry(chan, &device->channels, device_node) |
644 | dma_chan_put(chan); | |
59b5ec21 | 645 | } |
c13c8260 | 646 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 647 | } |
209b84a8 | 648 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 649 | |
138f4c35 DW |
650 | static bool device_has_all_tx_types(struct dma_device *device) |
651 | { | |
652 | /* A device that satisfies this test has channels that will never cause | |
653 | * an async_tx channel switch event as all possible operation types can | |
654 | * be handled. | |
655 | */ | |
656 | #ifdef CONFIG_ASYNC_TX_DMA | |
657 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
658 | return false; | |
659 | #endif | |
660 | ||
661 | #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE) | |
662 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | |
663 | return false; | |
664 | #endif | |
665 | ||
138f4c35 DW |
666 | #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE) |
667 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | |
668 | return false; | |
7b3cc2b1 DW |
669 | |
670 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
4499a24d DW |
671 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) |
672 | return false; | |
138f4c35 | 673 | #endif |
7b3cc2b1 | 674 | #endif |
138f4c35 DW |
675 | |
676 | #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE) | |
677 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | |
678 | return false; | |
7b3cc2b1 DW |
679 | |
680 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | |
4499a24d DW |
681 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) |
682 | return false; | |
138f4c35 | 683 | #endif |
7b3cc2b1 | 684 | #endif |
138f4c35 DW |
685 | |
686 | return true; | |
687 | } | |
688 | ||
257b17ca DW |
689 | static int get_dma_id(struct dma_device *device) |
690 | { | |
691 | int rc; | |
692 | ||
257b17ca | 693 | mutex_lock(&dma_list_mutex); |
257b17ca | 694 | |
69ee266b TH |
695 | rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL); |
696 | if (rc >= 0) | |
697 | device->dev_id = rc; | |
698 | ||
699 | mutex_unlock(&dma_list_mutex); | |
700 | return rc < 0 ? rc : 0; | |
257b17ca DW |
701 | } |
702 | ||
c13c8260 | 703 | /** |
6508871e | 704 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
705 | * @device: &dma_device |
706 | */ | |
707 | int dma_async_device_register(struct dma_device *device) | |
708 | { | |
ff487fb7 | 709 | int chancnt = 0, rc; |
c13c8260 | 710 | struct dma_chan* chan; |
864498aa | 711 | atomic_t *idr_ref; |
c13c8260 CL |
712 | |
713 | if (!device) | |
714 | return -ENODEV; | |
715 | ||
7405f74b DW |
716 | /* validate device routines */ |
717 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
718 | !device->device_prep_dma_memcpy); | |
719 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
720 | !device->device_prep_dma_xor); | |
099f53cb DW |
721 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && |
722 | !device->device_prep_dma_xor_val); | |
b2f46fd8 DW |
723 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && |
724 | !device->device_prep_dma_pq); | |
725 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | |
726 | !device->device_prep_dma_pq_val); | |
9b941c66 | 727 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
7405f74b | 728 | !device->device_prep_dma_interrupt); |
a86ee03c IS |
729 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && |
730 | !device->device_prep_dma_sg); | |
782bc950 SH |
731 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && |
732 | !device->device_prep_dma_cyclic); | |
dc0ee643 | 733 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
c3635c78 | 734 | !device->device_control); |
b14dab79 JB |
735 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && |
736 | !device->device_prep_interleaved_dma); | |
7405f74b DW |
737 | |
738 | BUG_ON(!device->device_alloc_chan_resources); | |
739 | BUG_ON(!device->device_free_chan_resources); | |
07934481 | 740 | BUG_ON(!device->device_tx_status); |
7405f74b DW |
741 | BUG_ON(!device->device_issue_pending); |
742 | BUG_ON(!device->dev); | |
743 | ||
138f4c35 | 744 | /* note: this only matters in the |
5fc6d897 | 745 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case |
138f4c35 DW |
746 | */ |
747 | if (device_has_all_tx_types(device)) | |
748 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | |
749 | ||
864498aa DW |
750 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
751 | if (!idr_ref) | |
752 | return -ENOMEM; | |
257b17ca DW |
753 | rc = get_dma_id(device); |
754 | if (rc != 0) { | |
755 | kfree(idr_ref); | |
864498aa | 756 | return rc; |
257b17ca DW |
757 | } |
758 | ||
759 | atomic_set(idr_ref, 0); | |
c13c8260 CL |
760 | |
761 | /* represent channels in sysfs. Probably want devs too */ | |
762 | list_for_each_entry(chan, &device->channels, device_node) { | |
257b17ca | 763 | rc = -ENOMEM; |
c13c8260 CL |
764 | chan->local = alloc_percpu(typeof(*chan->local)); |
765 | if (chan->local == NULL) | |
257b17ca | 766 | goto err_out; |
41d5e59c DW |
767 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
768 | if (chan->dev == NULL) { | |
769 | free_percpu(chan->local); | |
257b17ca DW |
770 | chan->local = NULL; |
771 | goto err_out; | |
41d5e59c | 772 | } |
c13c8260 CL |
773 | |
774 | chan->chan_id = chancnt++; | |
41d5e59c DW |
775 | chan->dev->device.class = &dma_devclass; |
776 | chan->dev->device.parent = device->dev; | |
777 | chan->dev->chan = chan; | |
864498aa DW |
778 | chan->dev->idr_ref = idr_ref; |
779 | chan->dev->dev_id = device->dev_id; | |
780 | atomic_inc(idr_ref); | |
41d5e59c | 781 | dev_set_name(&chan->dev->device, "dma%dchan%d", |
06190d84 | 782 | device->dev_id, chan->chan_id); |
c13c8260 | 783 | |
41d5e59c | 784 | rc = device_register(&chan->dev->device); |
ff487fb7 | 785 | if (rc) { |
ff487fb7 JG |
786 | free_percpu(chan->local); |
787 | chan->local = NULL; | |
257b17ca DW |
788 | kfree(chan->dev); |
789 | atomic_dec(idr_ref); | |
ff487fb7 JG |
790 | goto err_out; |
791 | } | |
7cc5bf9a | 792 | chan->client_count = 0; |
c13c8260 | 793 | } |
59b5ec21 | 794 | device->chancnt = chancnt; |
c13c8260 CL |
795 | |
796 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
797 | /* take references on public channels */ |
798 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
799 | list_for_each_entry(chan, &device->channels, device_node) { |
800 | /* if clients are already waiting for channels we need | |
801 | * to take references on their behalf | |
802 | */ | |
803 | if (dma_chan_get(chan) == -ENODEV) { | |
804 | /* note we can only get here for the first | |
805 | * channel as the remaining channels are | |
806 | * guaranteed to get a reference | |
807 | */ | |
808 | rc = -ENODEV; | |
809 | mutex_unlock(&dma_list_mutex); | |
810 | goto err_out; | |
811 | } | |
812 | } | |
2ba05622 | 813 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
0f571515 AN |
814 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
815 | device->privatecnt++; /* Always private */ | |
bec08513 | 816 | dma_channel_rebalance(); |
c13c8260 CL |
817 | mutex_unlock(&dma_list_mutex); |
818 | ||
c13c8260 | 819 | return 0; |
ff487fb7 JG |
820 | |
821 | err_out: | |
257b17ca DW |
822 | /* if we never registered a channel just release the idr */ |
823 | if (atomic_read(idr_ref) == 0) { | |
824 | mutex_lock(&dma_list_mutex); | |
825 | idr_remove(&dma_idr, device->dev_id); | |
826 | mutex_unlock(&dma_list_mutex); | |
827 | kfree(idr_ref); | |
828 | return rc; | |
829 | } | |
830 | ||
ff487fb7 JG |
831 | list_for_each_entry(chan, &device->channels, device_node) { |
832 | if (chan->local == NULL) | |
833 | continue; | |
41d5e59c DW |
834 | mutex_lock(&dma_list_mutex); |
835 | chan->dev->chan = NULL; | |
836 | mutex_unlock(&dma_list_mutex); | |
837 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
838 | free_percpu(chan->local); |
839 | } | |
840 | return rc; | |
c13c8260 | 841 | } |
765e3d8a | 842 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 843 | |
6508871e | 844 | /** |
6f49a57a | 845 | * dma_async_device_unregister - unregister a DMA device |
6508871e | 846 | * @device: &dma_device |
f27c580c DW |
847 | * |
848 | * This routine is called by dma driver exit routines, dmaengine holds module | |
849 | * references to prevent it being called while channels are in use. | |
6508871e RD |
850 | */ |
851 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
852 | { |
853 | struct dma_chan *chan; | |
c13c8260 CL |
854 | |
855 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 856 | list_del_rcu(&device->global_node); |
bec08513 | 857 | dma_channel_rebalance(); |
c13c8260 CL |
858 | mutex_unlock(&dma_list_mutex); |
859 | ||
860 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
861 | WARN_ONCE(chan->client_count, |
862 | "%s called while %d clients hold a reference\n", | |
863 | __func__, chan->client_count); | |
41d5e59c DW |
864 | mutex_lock(&dma_list_mutex); |
865 | chan->dev->chan = NULL; | |
866 | mutex_unlock(&dma_list_mutex); | |
867 | device_unregister(&chan->dev->device); | |
adef4772 | 868 | free_percpu(chan->local); |
c13c8260 | 869 | } |
c13c8260 | 870 | } |
765e3d8a | 871 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 872 | |
7405f74b DW |
873 | /** |
874 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | |
875 | * @chan: DMA channel to offload copy to | |
876 | * @dest: destination address (virtual) | |
877 | * @src: source address (virtual) | |
878 | * @len: length | |
879 | * | |
880 | * Both @dest and @src must be mappable to a bus address according to the | |
881 | * DMA mapping API rules for streaming mappings. | |
882 | * Both @dest and @src must stay memory resident (kernel memory or locked | |
883 | * user space pages). | |
884 | */ | |
885 | dma_cookie_t | |
886 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |
887 | void *src, size_t len) | |
888 | { | |
889 | struct dma_device *dev = chan->device; | |
890 | struct dma_async_tx_descriptor *tx; | |
0036731c | 891 | dma_addr_t dma_dest, dma_src; |
7405f74b | 892 | dma_cookie_t cookie; |
4f005dbe | 893 | unsigned long flags; |
7405f74b | 894 | |
0036731c DW |
895 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
896 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | |
4f005dbe MS |
897 | flags = DMA_CTRL_ACK | |
898 | DMA_COMPL_SRC_UNMAP_SINGLE | | |
899 | DMA_COMPL_DEST_UNMAP_SINGLE; | |
900 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | |
0036731c DW |
901 | |
902 | if (!tx) { | |
903 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
904 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 905 | return -ENOMEM; |
0036731c | 906 | } |
7405f74b | 907 | |
7405f74b | 908 | tx->callback = NULL; |
7405f74b DW |
909 | cookie = tx->tx_submit(tx); |
910 | ||
e7dcaa47 CL |
911 | preempt_disable(); |
912 | __this_cpu_add(chan->local->bytes_transferred, len); | |
913 | __this_cpu_inc(chan->local->memcpy_count); | |
914 | preempt_enable(); | |
7405f74b DW |
915 | |
916 | return cookie; | |
917 | } | |
918 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | |
919 | ||
920 | /** | |
921 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | |
922 | * @chan: DMA channel to offload copy to | |
923 | * @page: destination page | |
924 | * @offset: offset in page to copy to | |
925 | * @kdata: source address (virtual) | |
926 | * @len: length | |
927 | * | |
928 | * Both @page/@offset and @kdata must be mappable to a bus address according | |
929 | * to the DMA mapping API rules for streaming mappings. | |
930 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | |
931 | * locked user space pages) | |
932 | */ | |
933 | dma_cookie_t | |
934 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |
935 | unsigned int offset, void *kdata, size_t len) | |
936 | { | |
937 | struct dma_device *dev = chan->device; | |
938 | struct dma_async_tx_descriptor *tx; | |
0036731c | 939 | dma_addr_t dma_dest, dma_src; |
7405f74b | 940 | dma_cookie_t cookie; |
4f005dbe | 941 | unsigned long flags; |
7405f74b | 942 | |
0036731c DW |
943 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
944 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | |
4f005dbe MS |
945 | flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE; |
946 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | |
0036731c DW |
947 | |
948 | if (!tx) { | |
949 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
950 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 951 | return -ENOMEM; |
0036731c | 952 | } |
7405f74b | 953 | |
7405f74b | 954 | tx->callback = NULL; |
7405f74b DW |
955 | cookie = tx->tx_submit(tx); |
956 | ||
e7dcaa47 CL |
957 | preempt_disable(); |
958 | __this_cpu_add(chan->local->bytes_transferred, len); | |
959 | __this_cpu_inc(chan->local->memcpy_count); | |
960 | preempt_enable(); | |
7405f74b DW |
961 | |
962 | return cookie; | |
963 | } | |
964 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | |
965 | ||
966 | /** | |
967 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | |
968 | * @chan: DMA channel to offload copy to | |
969 | * @dest_pg: destination page | |
970 | * @dest_off: offset in page to copy to | |
971 | * @src_pg: source page | |
972 | * @src_off: offset in page to copy from | |
973 | * @len: length | |
974 | * | |
975 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | |
976 | * address according to the DMA mapping API rules for streaming mappings. | |
977 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | |
978 | * (kernel memory or locked user space pages). | |
979 | */ | |
980 | dma_cookie_t | |
981 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |
982 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | |
983 | size_t len) | |
984 | { | |
985 | struct dma_device *dev = chan->device; | |
986 | struct dma_async_tx_descriptor *tx; | |
0036731c | 987 | dma_addr_t dma_dest, dma_src; |
7405f74b | 988 | dma_cookie_t cookie; |
4f005dbe | 989 | unsigned long flags; |
7405f74b | 990 | |
0036731c DW |
991 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
992 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | |
993 | DMA_FROM_DEVICE); | |
4f005dbe MS |
994 | flags = DMA_CTRL_ACK; |
995 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags); | |
0036731c DW |
996 | |
997 | if (!tx) { | |
998 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
999 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 1000 | return -ENOMEM; |
0036731c | 1001 | } |
7405f74b | 1002 | |
7405f74b | 1003 | tx->callback = NULL; |
7405f74b DW |
1004 | cookie = tx->tx_submit(tx); |
1005 | ||
e7dcaa47 CL |
1006 | preempt_disable(); |
1007 | __this_cpu_add(chan->local->bytes_transferred, len); | |
1008 | __this_cpu_inc(chan->local->memcpy_count); | |
1009 | preempt_enable(); | |
7405f74b DW |
1010 | |
1011 | return cookie; | |
1012 | } | |
1013 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | |
1014 | ||
1015 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
1016 | struct dma_chan *chan) | |
1017 | { | |
1018 | tx->chan = chan; | |
5fc6d897 | 1019 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH |
7405f74b | 1020 | spin_lock_init(&tx->lock); |
caa20d97 | 1021 | #endif |
7405f74b DW |
1022 | } |
1023 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
1024 | ||
07f2211e DW |
1025 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
1026 | * @tx: in-flight transaction to wait on | |
07f2211e DW |
1027 | */ |
1028 | enum dma_status | |
1029 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
1030 | { | |
95475e57 | 1031 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); |
07f2211e DW |
1032 | |
1033 | if (!tx) | |
1034 | return DMA_SUCCESS; | |
1035 | ||
95475e57 DW |
1036 | while (tx->cookie == -EBUSY) { |
1037 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
1038 | pr_err("%s timeout waiting for descriptor submission\n", | |
63433250 | 1039 | __func__); |
95475e57 DW |
1040 | return DMA_ERROR; |
1041 | } | |
1042 | cpu_relax(); | |
1043 | } | |
1044 | return dma_sync_wait(tx->chan, tx->cookie); | |
07f2211e DW |
1045 | } |
1046 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1047 | ||
1048 | /* dma_run_dependencies - helper routine for dma drivers to process | |
1049 | * (start) dependent operations on their target channel | |
1050 | * @tx: transaction with dependencies | |
1051 | */ | |
1052 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1053 | { | |
caa20d97 | 1054 | struct dma_async_tx_descriptor *dep = txd_next(tx); |
07f2211e DW |
1055 | struct dma_async_tx_descriptor *dep_next; |
1056 | struct dma_chan *chan; | |
1057 | ||
1058 | if (!dep) | |
1059 | return; | |
1060 | ||
dd59b853 | 1061 | /* we'll submit tx->next now, so clear the link */ |
caa20d97 | 1062 | txd_clear_next(tx); |
07f2211e DW |
1063 | chan = dep->chan; |
1064 | ||
1065 | /* keep submitting up until a channel switch is detected | |
1066 | * in that case we will be called again as a result of | |
1067 | * processing the interrupt from async_tx_channel_switch | |
1068 | */ | |
1069 | for (; dep; dep = dep_next) { | |
caa20d97 DW |
1070 | txd_lock(dep); |
1071 | txd_clear_parent(dep); | |
1072 | dep_next = txd_next(dep); | |
07f2211e | 1073 | if (dep_next && dep_next->chan == chan) |
caa20d97 | 1074 | txd_clear_next(dep); /* ->next will be submitted */ |
07f2211e DW |
1075 | else |
1076 | dep_next = NULL; /* submit current dep and terminate */ | |
caa20d97 | 1077 | txd_unlock(dep); |
07f2211e DW |
1078 | |
1079 | dep->tx_submit(dep); | |
1080 | } | |
1081 | ||
1082 | chan->device->device_issue_pending(chan); | |
1083 | } | |
1084 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1085 | ||
c13c8260 CL |
1086 | static int __init dma_bus_init(void) |
1087 | { | |
c13c8260 CL |
1088 | return class_register(&dma_devclass); |
1089 | } | |
652afc27 | 1090 | arch_initcall(dma_bus_init); |
c13c8260 | 1091 | |
bec08513 | 1092 |