]>
Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
24 | * for other kernel code to use asynchronous memory copy capabilities, | |
25 | * if present, and allows different HW DMA drivers to register as providing | |
26 | * this capability. | |
27 | * | |
28 | * Due to the fact we are accelerating what is already a relatively fast | |
29 | * operation, the code goes to great lengths to avoid additional overhead, | |
30 | * such as locking. | |
31 | * | |
32 | * LOCKING: | |
33 | * | |
34 | * The subsystem keeps two global lists, dma_device_list and dma_client_list. | |
35 | * Both of these are protected by a mutex, dma_list_mutex. | |
36 | * | |
37 | * Each device has a channels list, which runs unlocked but is never modified | |
38 | * once the device is registered, it's just setup by the driver. | |
39 | * | |
d379b01e DW |
40 | * Each client is responsible for keeping track of the channels it uses. See |
41 | * the definition of dma_event_callback in dmaengine.h. | |
c13c8260 CL |
42 | * |
43 | * Each device has a kref, which is initialized to 1 when the device is | |
891f78ea | 44 | * registered. A kref_get is done for each device registered. When the |
8a5703f8 | 45 | * device is released, the corresponding kref_put is done in the release |
c13c8260 | 46 | * method. Every time one of the device's channels is allocated to a client, |
8a5703f8 | 47 | * a kref_get occurs. When the channel is freed, the corresponding kref_put |
c13c8260 | 48 | * happens. The device's release function does a completion, so |
891f78ea | 49 | * unregister_device does a remove event, device_unregister, a kref_put |
c13c8260 CL |
50 | * for the first reference, then waits on the completion for all other |
51 | * references to finish. | |
52 | * | |
53 | * Each channel has an open-coded implementation of Rusty Russell's "bigref," | |
d379b01e DW |
54 | * with a kref and a per_cpu local_t. A dma_chan_get is called when a client |
55 | * signals that it wants to use a channel, and dma_chan_put is called when | |
8a5703f8 | 56 | * a channel is removed or a client using it is unregistered. A client can |
d379b01e DW |
57 | * take extra references per outstanding transaction, as is the case with |
58 | * the NET DMA client. The release function does a kref_put on the device. | |
59 | * -ChrisL, DanW | |
c13c8260 CL |
60 | */ |
61 | ||
62 | #include <linux/init.h> | |
63 | #include <linux/module.h> | |
7405f74b | 64 | #include <linux/mm.h> |
c13c8260 CL |
65 | #include <linux/device.h> |
66 | #include <linux/dmaengine.h> | |
67 | #include <linux/hardirq.h> | |
68 | #include <linux/spinlock.h> | |
69 | #include <linux/percpu.h> | |
70 | #include <linux/rcupdate.h> | |
71 | #include <linux/mutex.h> | |
7405f74b | 72 | #include <linux/jiffies.h> |
2ba05622 | 73 | #include <linux/rculist.h> |
c13c8260 CL |
74 | |
75 | static DEFINE_MUTEX(dma_list_mutex); | |
76 | static LIST_HEAD(dma_device_list); | |
77 | static LIST_HEAD(dma_client_list); | |
6f49a57a | 78 | static long dmaengine_ref_count; |
c13c8260 CL |
79 | |
80 | /* --- sysfs implementation --- */ | |
81 | ||
891f78ea | 82 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 83 | { |
891f78ea | 84 | struct dma_chan *chan = to_dma_chan(dev); |
c13c8260 CL |
85 | unsigned long count = 0; |
86 | int i; | |
87 | ||
17f3ae08 | 88 | for_each_possible_cpu(i) |
c13c8260 CL |
89 | count += per_cpu_ptr(chan->local, i)->memcpy_count; |
90 | ||
91 | return sprintf(buf, "%lu\n", count); | |
92 | } | |
93 | ||
891f78ea TJ |
94 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
95 | char *buf) | |
c13c8260 | 96 | { |
891f78ea | 97 | struct dma_chan *chan = to_dma_chan(dev); |
c13c8260 CL |
98 | unsigned long count = 0; |
99 | int i; | |
100 | ||
17f3ae08 | 101 | for_each_possible_cpu(i) |
c13c8260 CL |
102 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; |
103 | ||
104 | return sprintf(buf, "%lu\n", count); | |
105 | } | |
106 | ||
891f78ea | 107 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 108 | { |
891f78ea | 109 | struct dma_chan *chan = to_dma_chan(dev); |
c13c8260 | 110 | |
6f49a57a | 111 | return sprintf(buf, "%d\n", chan->client_count); |
c13c8260 CL |
112 | } |
113 | ||
891f78ea | 114 | static struct device_attribute dma_attrs[] = { |
c13c8260 CL |
115 | __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), |
116 | __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), | |
117 | __ATTR(in_use, S_IRUGO, show_in_use, NULL), | |
118 | __ATTR_NULL | |
119 | }; | |
120 | ||
121 | static void dma_async_device_cleanup(struct kref *kref); | |
122 | ||
891f78ea | 123 | static void dma_dev_release(struct device *dev) |
c13c8260 | 124 | { |
891f78ea | 125 | struct dma_chan *chan = to_dma_chan(dev); |
c13c8260 CL |
126 | kref_put(&chan->device->refcount, dma_async_device_cleanup); |
127 | } | |
128 | ||
129 | static struct class dma_devclass = { | |
891f78ea TJ |
130 | .name = "dma", |
131 | .dev_attrs = dma_attrs, | |
132 | .dev_release = dma_dev_release, | |
c13c8260 CL |
133 | }; |
134 | ||
135 | /* --- client and device registration --- */ | |
136 | ||
59b5ec21 DW |
137 | #define dma_device_satisfies_mask(device, mask) \ |
138 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 139 | static int |
59b5ec21 | 140 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
d379b01e DW |
141 | { |
142 | dma_cap_mask_t has; | |
143 | ||
59b5ec21 | 144 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
145 | DMA_TX_TYPE_END); |
146 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
147 | } | |
148 | ||
6f49a57a DW |
149 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
150 | { | |
151 | return chan->device->dev->driver->owner; | |
152 | } | |
153 | ||
154 | /** | |
155 | * balance_ref_count - catch up the channel reference count | |
156 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
157 | * | |
158 | * balance_ref_count must be called under dma_list_mutex | |
159 | */ | |
160 | static void balance_ref_count(struct dma_chan *chan) | |
161 | { | |
162 | struct module *owner = dma_chan_to_owner(chan); | |
163 | ||
164 | while (chan->client_count < dmaengine_ref_count) { | |
165 | __module_get(owner); | |
166 | chan->client_count++; | |
167 | } | |
168 | } | |
169 | ||
170 | /** | |
171 | * dma_chan_get - try to grab a dma channel's parent driver module | |
172 | * @chan - channel to grab | |
173 | * | |
174 | * Must be called under dma_list_mutex | |
175 | */ | |
176 | static int dma_chan_get(struct dma_chan *chan) | |
177 | { | |
178 | int err = -ENODEV; | |
179 | struct module *owner = dma_chan_to_owner(chan); | |
180 | ||
181 | if (chan->client_count) { | |
182 | __module_get(owner); | |
183 | err = 0; | |
184 | } else if (try_module_get(owner)) | |
185 | err = 0; | |
186 | ||
187 | if (err == 0) | |
188 | chan->client_count++; | |
189 | ||
190 | /* allocate upon first client reference */ | |
191 | if (chan->client_count == 1 && err == 0) { | |
192 | int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL); | |
193 | ||
194 | if (desc_cnt < 0) { | |
195 | err = desc_cnt; | |
196 | chan->client_count = 0; | |
197 | module_put(owner); | |
59b5ec21 | 198 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
6f49a57a DW |
199 | balance_ref_count(chan); |
200 | } | |
201 | ||
202 | return err; | |
203 | } | |
204 | ||
205 | /** | |
206 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
207 | * @chan - channel to release | |
208 | * | |
209 | * Must be called under dma_list_mutex | |
210 | */ | |
211 | static void dma_chan_put(struct dma_chan *chan) | |
212 | { | |
213 | if (!chan->client_count) | |
214 | return; /* this channel failed alloc_chan_resources */ | |
215 | chan->client_count--; | |
216 | module_put(dma_chan_to_owner(chan)); | |
217 | if (chan->client_count == 0) | |
218 | chan->device->device_free_chan_resources(chan); | |
219 | } | |
220 | ||
c13c8260 | 221 | /** |
d379b01e | 222 | * dma_client_chan_alloc - try to allocate channels to a client |
c13c8260 CL |
223 | * @client: &dma_client |
224 | * | |
225 | * Called with dma_list_mutex held. | |
226 | */ | |
d379b01e | 227 | static void dma_client_chan_alloc(struct dma_client *client) |
c13c8260 CL |
228 | { |
229 | struct dma_device *device; | |
230 | struct dma_chan *chan; | |
d379b01e | 231 | enum dma_state_client ack; |
c13c8260 | 232 | |
d379b01e | 233 | /* Find a channel */ |
dc0ee643 | 234 | list_for_each_entry(device, &dma_device_list, global_node) { |
59b5ec21 DW |
235 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) |
236 | continue; | |
59b5ec21 DW |
237 | if (!dma_device_satisfies_mask(device, client->cap_mask)) |
238 | continue; | |
dc0ee643 | 239 | |
c13c8260 | 240 | list_for_each_entry(chan, &device->channels, device_node) { |
6f49a57a DW |
241 | if (!chan->client_count) |
242 | continue; | |
243 | ack = client->event_callback(client, chan, | |
244 | DMA_RESOURCE_AVAILABLE); | |
c13c8260 | 245 | |
6f49a57a DW |
246 | /* we are done once this client rejects |
247 | * an available resource | |
248 | */ | |
249 | if (ack == DMA_NAK) | |
250 | return; | |
c13c8260 | 251 | } |
dc0ee643 | 252 | } |
c13c8260 CL |
253 | } |
254 | ||
7405f74b DW |
255 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
256 | { | |
257 | enum dma_status status; | |
258 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
259 | ||
260 | dma_async_issue_pending(chan); | |
261 | do { | |
262 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
263 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
264 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | |
265 | return DMA_ERROR; | |
266 | } | |
267 | } while (status == DMA_IN_PROGRESS); | |
268 | ||
269 | return status; | |
270 | } | |
271 | EXPORT_SYMBOL(dma_sync_wait); | |
272 | ||
c13c8260 | 273 | /** |
6508871e RD |
274 | * dma_chan_cleanup - release a DMA channel's resources |
275 | * @kref: kernel reference structure that contains the DMA channel device | |
c13c8260 CL |
276 | */ |
277 | void dma_chan_cleanup(struct kref *kref) | |
278 | { | |
279 | struct dma_chan *chan = container_of(kref, struct dma_chan, refcount); | |
c13c8260 CL |
280 | kref_put(&chan->device->refcount, dma_async_device_cleanup); |
281 | } | |
765e3d8a | 282 | EXPORT_SYMBOL(dma_chan_cleanup); |
c13c8260 CL |
283 | |
284 | static void dma_chan_free_rcu(struct rcu_head *rcu) | |
285 | { | |
286 | struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu); | |
6f49a57a | 287 | |
c13c8260 CL |
288 | kref_put(&chan->refcount, dma_chan_cleanup); |
289 | } | |
290 | ||
d379b01e | 291 | static void dma_chan_release(struct dma_chan *chan) |
c13c8260 | 292 | { |
c13c8260 CL |
293 | call_rcu(&chan->rcu, dma_chan_free_rcu); |
294 | } | |
295 | ||
bec08513 DW |
296 | /** |
297 | * dma_cap_mask_all - enable iteration over all operation types | |
298 | */ | |
299 | static dma_cap_mask_t dma_cap_mask_all; | |
300 | ||
301 | /** | |
302 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
303 | * @chan - associated channel for this entry | |
304 | */ | |
305 | struct dma_chan_tbl_ent { | |
306 | struct dma_chan *chan; | |
307 | }; | |
308 | ||
309 | /** | |
310 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
311 | */ | |
312 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; | |
313 | ||
314 | static int __init dma_channel_table_init(void) | |
315 | { | |
316 | enum dma_transaction_type cap; | |
317 | int err = 0; | |
318 | ||
319 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
320 | ||
59b5ec21 DW |
321 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
322 | * but are not associated with an operation so they do not need | |
323 | * an entry in the channel_table | |
bec08513 DW |
324 | */ |
325 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 326 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
327 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
328 | ||
329 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
330 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
331 | if (!channel_table[cap]) { | |
332 | err = -ENOMEM; | |
333 | break; | |
334 | } | |
335 | } | |
336 | ||
337 | if (err) { | |
338 | pr_err("dmaengine: initialization failure\n"); | |
339 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
340 | if (channel_table[cap]) | |
341 | free_percpu(channel_table[cap]); | |
342 | } | |
343 | ||
344 | return err; | |
345 | } | |
346 | subsys_initcall(dma_channel_table_init); | |
347 | ||
348 | /** | |
349 | * dma_find_channel - find a channel to carry out the operation | |
350 | * @tx_type: transaction type | |
351 | */ | |
352 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
353 | { | |
354 | struct dma_chan *chan; | |
355 | int cpu; | |
356 | ||
357 | WARN_ONCE(dmaengine_ref_count == 0, | |
358 | "client called %s without a reference", __func__); | |
359 | ||
360 | cpu = get_cpu(); | |
361 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | |
362 | put_cpu(); | |
363 | ||
364 | return chan; | |
365 | } | |
366 | EXPORT_SYMBOL(dma_find_channel); | |
367 | ||
2ba05622 DW |
368 | /** |
369 | * dma_issue_pending_all - flush all pending operations across all channels | |
370 | */ | |
371 | void dma_issue_pending_all(void) | |
372 | { | |
373 | struct dma_device *device; | |
374 | struct dma_chan *chan; | |
375 | ||
376 | WARN_ONCE(dmaengine_ref_count == 0, | |
377 | "client called %s without a reference", __func__); | |
378 | ||
379 | rcu_read_lock(); | |
59b5ec21 DW |
380 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
381 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
382 | continue; | |
2ba05622 DW |
383 | list_for_each_entry(chan, &device->channels, device_node) |
384 | if (chan->client_count) | |
385 | device->device_issue_pending(chan); | |
59b5ec21 | 386 | } |
2ba05622 DW |
387 | rcu_read_unlock(); |
388 | } | |
389 | EXPORT_SYMBOL(dma_issue_pending_all); | |
390 | ||
bec08513 DW |
391 | /** |
392 | * nth_chan - returns the nth channel of the given capability | |
393 | * @cap: capability to match | |
394 | * @n: nth channel desired | |
395 | * | |
396 | * Defaults to returning the channel with the desired capability and the | |
397 | * lowest reference count when 'n' cannot be satisfied. Must be called | |
398 | * under dma_list_mutex. | |
399 | */ | |
400 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | |
401 | { | |
402 | struct dma_device *device; | |
403 | struct dma_chan *chan; | |
404 | struct dma_chan *ret = NULL; | |
405 | struct dma_chan *min = NULL; | |
406 | ||
407 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
408 | if (!dma_has_cap(cap, device->cap_mask) || |
409 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
410 | continue; |
411 | list_for_each_entry(chan, &device->channels, device_node) { | |
412 | if (!chan->client_count) | |
413 | continue; | |
414 | if (!min) | |
415 | min = chan; | |
416 | else if (chan->table_count < min->table_count) | |
417 | min = chan; | |
418 | ||
419 | if (n-- == 0) { | |
420 | ret = chan; | |
421 | break; /* done */ | |
422 | } | |
423 | } | |
424 | if (ret) | |
425 | break; /* done */ | |
426 | } | |
427 | ||
428 | if (!ret) | |
429 | ret = min; | |
430 | ||
431 | if (ret) | |
432 | ret->table_count++; | |
433 | ||
434 | return ret; | |
435 | } | |
436 | ||
437 | /** | |
438 | * dma_channel_rebalance - redistribute the available channels | |
439 | * | |
440 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
441 | * operation type) in the SMP case, and operation isolation (avoid | |
442 | * multi-tasking channels) in the non-SMP case. Must be called under | |
443 | * dma_list_mutex. | |
444 | */ | |
445 | static void dma_channel_rebalance(void) | |
446 | { | |
447 | struct dma_chan *chan; | |
448 | struct dma_device *device; | |
449 | int cpu; | |
450 | int cap; | |
451 | int n; | |
452 | ||
453 | /* undo the last distribution */ | |
454 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
455 | for_each_possible_cpu(cpu) | |
456 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
457 | ||
59b5ec21 DW |
458 | list_for_each_entry(device, &dma_device_list, global_node) { |
459 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
460 | continue; | |
bec08513 DW |
461 | list_for_each_entry(chan, &device->channels, device_node) |
462 | chan->table_count = 0; | |
59b5ec21 | 463 | } |
bec08513 DW |
464 | |
465 | /* don't populate the channel_table if no clients are available */ | |
466 | if (!dmaengine_ref_count) | |
467 | return; | |
468 | ||
469 | /* redistribute available channels */ | |
470 | n = 0; | |
471 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
472 | for_each_online_cpu(cpu) { | |
473 | if (num_possible_cpus() > 1) | |
474 | chan = nth_chan(cap, n++); | |
475 | else | |
476 | chan = nth_chan(cap, -1); | |
477 | ||
478 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
479 | } | |
480 | } | |
481 | ||
59b5ec21 DW |
482 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev) |
483 | { | |
484 | struct dma_chan *chan; | |
485 | struct dma_chan *ret = NULL; | |
486 | ||
487 | if (!__dma_device_satisfies_mask(dev, mask)) { | |
488 | pr_debug("%s: wrong capabilities\n", __func__); | |
489 | return NULL; | |
490 | } | |
491 | /* devices with multiple channels need special handling as we need to | |
492 | * ensure that all channels are either private or public. | |
493 | */ | |
494 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
495 | list_for_each_entry(chan, &dev->channels, device_node) { | |
496 | /* some channels are already publicly allocated */ | |
497 | if (chan->client_count) | |
498 | return NULL; | |
499 | } | |
500 | ||
501 | list_for_each_entry(chan, &dev->channels, device_node) { | |
502 | if (chan->client_count) { | |
503 | pr_debug("%s: %s busy\n", | |
504 | __func__, dev_name(&chan->dev)); | |
505 | continue; | |
506 | } | |
507 | ret = chan; | |
508 | break; | |
509 | } | |
510 | ||
511 | return ret; | |
512 | } | |
513 | ||
514 | /** | |
515 | * dma_request_channel - try to allocate an exclusive channel | |
516 | * @mask: capabilities that the channel must satisfy | |
517 | * @fn: optional callback to disposition available channels | |
518 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
519 | */ | |
520 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) | |
521 | { | |
522 | struct dma_device *device, *_d; | |
523 | struct dma_chan *chan = NULL; | |
524 | enum dma_state_client ack; | |
525 | int err; | |
526 | ||
527 | /* Find a channel */ | |
528 | mutex_lock(&dma_list_mutex); | |
529 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
530 | chan = private_candidate(mask, device); | |
531 | if (!chan) | |
532 | continue; | |
533 | ||
534 | if (fn) | |
535 | ack = fn(chan, fn_param); | |
536 | else | |
537 | ack = DMA_ACK; | |
538 | ||
539 | if (ack == DMA_ACK) { | |
540 | /* Found a suitable channel, try to grab, prep, and | |
541 | * return it. We first set DMA_PRIVATE to disable | |
542 | * balance_ref_count as this channel will not be | |
543 | * published in the general-purpose allocator | |
544 | */ | |
545 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
546 | err = dma_chan_get(chan); | |
547 | ||
548 | if (err == -ENODEV) { | |
549 | pr_debug("%s: %s module removed\n", __func__, | |
550 | dev_name(&chan->dev)); | |
551 | list_del_rcu(&device->global_node); | |
552 | } else if (err) | |
553 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
554 | dev_name(&chan->dev), err); | |
555 | else | |
556 | break; | |
557 | } else if (ack == DMA_DUP) { | |
558 | pr_debug("%s: %s filter said DMA_DUP\n", | |
559 | __func__, dev_name(&chan->dev)); | |
560 | } else if (ack == DMA_NAK) { | |
561 | pr_debug("%s: %s filter said DMA_NAK\n", | |
562 | __func__, dev_name(&chan->dev)); | |
563 | break; | |
564 | } else | |
565 | WARN_ONCE(1, "filter_fn: unknown response?\n"); | |
566 | chan = NULL; | |
567 | } | |
568 | mutex_unlock(&dma_list_mutex); | |
569 | ||
570 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | |
571 | chan ? dev_name(&chan->dev) : NULL); | |
572 | ||
573 | return chan; | |
574 | } | |
575 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
576 | ||
577 | void dma_release_channel(struct dma_chan *chan) | |
578 | { | |
579 | mutex_lock(&dma_list_mutex); | |
580 | WARN_ONCE(chan->client_count != 1, | |
581 | "chan reference count %d != 1\n", chan->client_count); | |
582 | dma_chan_put(chan); | |
583 | mutex_unlock(&dma_list_mutex); | |
584 | } | |
585 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
586 | ||
c13c8260 | 587 | /** |
d379b01e | 588 | * dma_chans_notify_available - broadcast available channels to the clients |
c13c8260 | 589 | */ |
d379b01e | 590 | static void dma_clients_notify_available(void) |
c13c8260 CL |
591 | { |
592 | struct dma_client *client; | |
c13c8260 CL |
593 | |
594 | mutex_lock(&dma_list_mutex); | |
595 | ||
d379b01e DW |
596 | list_for_each_entry(client, &dma_client_list, global_node) |
597 | dma_client_chan_alloc(client); | |
c13c8260 CL |
598 | |
599 | mutex_unlock(&dma_list_mutex); | |
600 | } | |
601 | ||
d379b01e | 602 | /** |
209b84a8 | 603 | * dmaengine_get - register interest in dma_channels |
d379b01e | 604 | */ |
209b84a8 | 605 | void dmaengine_get(void) |
d379b01e | 606 | { |
6f49a57a DW |
607 | struct dma_device *device, *_d; |
608 | struct dma_chan *chan; | |
609 | int err; | |
610 | ||
c13c8260 | 611 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
612 | dmaengine_ref_count++; |
613 | ||
614 | /* try to grab channels */ | |
59b5ec21 DW |
615 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
616 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
617 | continue; | |
6f49a57a DW |
618 | list_for_each_entry(chan, &device->channels, device_node) { |
619 | err = dma_chan_get(chan); | |
620 | if (err == -ENODEV) { | |
621 | /* module removed before we could use it */ | |
2ba05622 | 622 | list_del_rcu(&device->global_node); |
6f49a57a DW |
623 | break; |
624 | } else if (err) | |
625 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
626 | dev_name(&chan->dev), err); | |
627 | } | |
59b5ec21 | 628 | } |
6f49a57a | 629 | |
bec08513 DW |
630 | /* if this is the first reference and there were channels |
631 | * waiting we need to rebalance to get those channels | |
632 | * incorporated into the channel table | |
633 | */ | |
634 | if (dmaengine_ref_count == 1) | |
635 | dma_channel_rebalance(); | |
c13c8260 | 636 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 637 | } |
209b84a8 | 638 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
639 | |
640 | /** | |
209b84a8 | 641 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 642 | */ |
209b84a8 | 643 | void dmaengine_put(void) |
c13c8260 | 644 | { |
d379b01e | 645 | struct dma_device *device; |
c13c8260 CL |
646 | struct dma_chan *chan; |
647 | ||
c13c8260 | 648 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
649 | dmaengine_ref_count--; |
650 | BUG_ON(dmaengine_ref_count < 0); | |
651 | /* drop channel references */ | |
59b5ec21 DW |
652 | list_for_each_entry(device, &dma_device_list, global_node) { |
653 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
654 | continue; | |
6f49a57a DW |
655 | list_for_each_entry(chan, &device->channels, device_node) |
656 | dma_chan_put(chan); | |
59b5ec21 | 657 | } |
c13c8260 | 658 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 659 | } |
209b84a8 | 660 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 CL |
661 | |
662 | /** | |
d379b01e DW |
663 | * dma_async_client_chan_request - send all available channels to the |
664 | * client that satisfy the capability mask | |
665 | * @client - requester | |
c13c8260 | 666 | */ |
d379b01e | 667 | void dma_async_client_chan_request(struct dma_client *client) |
c13c8260 | 668 | { |
d379b01e DW |
669 | mutex_lock(&dma_list_mutex); |
670 | dma_client_chan_alloc(client); | |
671 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 672 | } |
765e3d8a | 673 | EXPORT_SYMBOL(dma_async_client_chan_request); |
c13c8260 CL |
674 | |
675 | /** | |
6508871e | 676 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
677 | * @device: &dma_device |
678 | */ | |
679 | int dma_async_device_register(struct dma_device *device) | |
680 | { | |
681 | static int id; | |
ff487fb7 | 682 | int chancnt = 0, rc; |
c13c8260 CL |
683 | struct dma_chan* chan; |
684 | ||
685 | if (!device) | |
686 | return -ENODEV; | |
687 | ||
7405f74b DW |
688 | /* validate device routines */ |
689 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
690 | !device->device_prep_dma_memcpy); | |
691 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
692 | !device->device_prep_dma_xor); | |
693 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | |
694 | !device->device_prep_dma_zero_sum); | |
695 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | |
696 | !device->device_prep_dma_memset); | |
9b941c66 | 697 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
7405f74b | 698 | !device->device_prep_dma_interrupt); |
dc0ee643 HS |
699 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
700 | !device->device_prep_slave_sg); | |
701 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | |
702 | !device->device_terminate_all); | |
7405f74b DW |
703 | |
704 | BUG_ON(!device->device_alloc_chan_resources); | |
705 | BUG_ON(!device->device_free_chan_resources); | |
7405f74b DW |
706 | BUG_ON(!device->device_is_tx_complete); |
707 | BUG_ON(!device->device_issue_pending); | |
708 | BUG_ON(!device->dev); | |
709 | ||
c13c8260 CL |
710 | init_completion(&device->done); |
711 | kref_init(&device->refcount); | |
b0b42b16 DW |
712 | |
713 | mutex_lock(&dma_list_mutex); | |
c13c8260 | 714 | device->dev_id = id++; |
b0b42b16 | 715 | mutex_unlock(&dma_list_mutex); |
c13c8260 CL |
716 | |
717 | /* represent channels in sysfs. Probably want devs too */ | |
718 | list_for_each_entry(chan, &device->channels, device_node) { | |
719 | chan->local = alloc_percpu(typeof(*chan->local)); | |
720 | if (chan->local == NULL) | |
721 | continue; | |
722 | ||
723 | chan->chan_id = chancnt++; | |
891f78ea | 724 | chan->dev.class = &dma_devclass; |
1099dc79 | 725 | chan->dev.parent = device->dev; |
06190d84 KS |
726 | dev_set_name(&chan->dev, "dma%dchan%d", |
727 | device->dev_id, chan->chan_id); | |
c13c8260 | 728 | |
891f78ea | 729 | rc = device_register(&chan->dev); |
ff487fb7 JG |
730 | if (rc) { |
731 | chancnt--; | |
732 | free_percpu(chan->local); | |
733 | chan->local = NULL; | |
734 | goto err_out; | |
735 | } | |
736 | ||
348badf1 HS |
737 | /* One for the channel, one of the class device */ |
738 | kref_get(&device->refcount); | |
c13c8260 | 739 | kref_get(&device->refcount); |
d379b01e | 740 | kref_init(&chan->refcount); |
7cc5bf9a | 741 | chan->client_count = 0; |
d379b01e DW |
742 | chan->slow_ref = 0; |
743 | INIT_RCU_HEAD(&chan->rcu); | |
c13c8260 | 744 | } |
59b5ec21 | 745 | device->chancnt = chancnt; |
c13c8260 CL |
746 | |
747 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
748 | /* take references on public channels */ |
749 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
750 | list_for_each_entry(chan, &device->channels, device_node) { |
751 | /* if clients are already waiting for channels we need | |
752 | * to take references on their behalf | |
753 | */ | |
754 | if (dma_chan_get(chan) == -ENODEV) { | |
755 | /* note we can only get here for the first | |
756 | * channel as the remaining channels are | |
757 | * guaranteed to get a reference | |
758 | */ | |
759 | rc = -ENODEV; | |
760 | mutex_unlock(&dma_list_mutex); | |
761 | goto err_out; | |
762 | } | |
763 | } | |
2ba05622 | 764 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
bec08513 | 765 | dma_channel_rebalance(); |
c13c8260 CL |
766 | mutex_unlock(&dma_list_mutex); |
767 | ||
d379b01e | 768 | dma_clients_notify_available(); |
c13c8260 CL |
769 | |
770 | return 0; | |
ff487fb7 JG |
771 | |
772 | err_out: | |
773 | list_for_each_entry(chan, &device->channels, device_node) { | |
774 | if (chan->local == NULL) | |
775 | continue; | |
776 | kref_put(&device->refcount, dma_async_device_cleanup); | |
891f78ea | 777 | device_unregister(&chan->dev); |
ff487fb7 JG |
778 | chancnt--; |
779 | free_percpu(chan->local); | |
780 | } | |
781 | return rc; | |
c13c8260 | 782 | } |
765e3d8a | 783 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 CL |
784 | |
785 | /** | |
6508871e RD |
786 | * dma_async_device_cleanup - function called when all references are released |
787 | * @kref: kernel reference object | |
c13c8260 CL |
788 | */ |
789 | static void dma_async_device_cleanup(struct kref *kref) | |
790 | { | |
791 | struct dma_device *device; | |
792 | ||
793 | device = container_of(kref, struct dma_device, refcount); | |
794 | complete(&device->done); | |
795 | } | |
796 | ||
6508871e | 797 | /** |
6f49a57a | 798 | * dma_async_device_unregister - unregister a DMA device |
6508871e RD |
799 | * @device: &dma_device |
800 | */ | |
801 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
802 | { |
803 | struct dma_chan *chan; | |
c13c8260 CL |
804 | |
805 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 806 | list_del_rcu(&device->global_node); |
bec08513 | 807 | dma_channel_rebalance(); |
c13c8260 CL |
808 | mutex_unlock(&dma_list_mutex); |
809 | ||
810 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
811 | WARN_ONCE(chan->client_count, |
812 | "%s called while %d clients hold a reference\n", | |
813 | __func__, chan->client_count); | |
891f78ea | 814 | device_unregister(&chan->dev); |
d379b01e | 815 | dma_chan_release(chan); |
c13c8260 | 816 | } |
c13c8260 CL |
817 | |
818 | kref_put(&device->refcount, dma_async_device_cleanup); | |
819 | wait_for_completion(&device->done); | |
820 | } | |
765e3d8a | 821 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 822 | |
7405f74b DW |
823 | /** |
824 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | |
825 | * @chan: DMA channel to offload copy to | |
826 | * @dest: destination address (virtual) | |
827 | * @src: source address (virtual) | |
828 | * @len: length | |
829 | * | |
830 | * Both @dest and @src must be mappable to a bus address according to the | |
831 | * DMA mapping API rules for streaming mappings. | |
832 | * Both @dest and @src must stay memory resident (kernel memory or locked | |
833 | * user space pages). | |
834 | */ | |
835 | dma_cookie_t | |
836 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |
837 | void *src, size_t len) | |
838 | { | |
839 | struct dma_device *dev = chan->device; | |
840 | struct dma_async_tx_descriptor *tx; | |
0036731c | 841 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
842 | dma_cookie_t cookie; |
843 | int cpu; | |
844 | ||
0036731c DW |
845 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
846 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
847 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
848 | DMA_CTRL_ACK); | |
0036731c DW |
849 | |
850 | if (!tx) { | |
851 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
852 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 853 | return -ENOMEM; |
0036731c | 854 | } |
7405f74b | 855 | |
7405f74b | 856 | tx->callback = NULL; |
7405f74b DW |
857 | cookie = tx->tx_submit(tx); |
858 | ||
859 | cpu = get_cpu(); | |
860 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
861 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
862 | put_cpu(); | |
863 | ||
864 | return cookie; | |
865 | } | |
866 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | |
867 | ||
868 | /** | |
869 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | |
870 | * @chan: DMA channel to offload copy to | |
871 | * @page: destination page | |
872 | * @offset: offset in page to copy to | |
873 | * @kdata: source address (virtual) | |
874 | * @len: length | |
875 | * | |
876 | * Both @page/@offset and @kdata must be mappable to a bus address according | |
877 | * to the DMA mapping API rules for streaming mappings. | |
878 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | |
879 | * locked user space pages) | |
880 | */ | |
881 | dma_cookie_t | |
882 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |
883 | unsigned int offset, void *kdata, size_t len) | |
884 | { | |
885 | struct dma_device *dev = chan->device; | |
886 | struct dma_async_tx_descriptor *tx; | |
0036731c | 887 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
888 | dma_cookie_t cookie; |
889 | int cpu; | |
890 | ||
0036731c DW |
891 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
892 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
893 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
894 | DMA_CTRL_ACK); | |
0036731c DW |
895 | |
896 | if (!tx) { | |
897 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
898 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 899 | return -ENOMEM; |
0036731c | 900 | } |
7405f74b | 901 | |
7405f74b | 902 | tx->callback = NULL; |
7405f74b DW |
903 | cookie = tx->tx_submit(tx); |
904 | ||
905 | cpu = get_cpu(); | |
906 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
907 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
908 | put_cpu(); | |
909 | ||
910 | return cookie; | |
911 | } | |
912 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | |
913 | ||
914 | /** | |
915 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | |
916 | * @chan: DMA channel to offload copy to | |
917 | * @dest_pg: destination page | |
918 | * @dest_off: offset in page to copy to | |
919 | * @src_pg: source page | |
920 | * @src_off: offset in page to copy from | |
921 | * @len: length | |
922 | * | |
923 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | |
924 | * address according to the DMA mapping API rules for streaming mappings. | |
925 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | |
926 | * (kernel memory or locked user space pages). | |
927 | */ | |
928 | dma_cookie_t | |
929 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |
930 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | |
931 | size_t len) | |
932 | { | |
933 | struct dma_device *dev = chan->device; | |
934 | struct dma_async_tx_descriptor *tx; | |
0036731c | 935 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
936 | dma_cookie_t cookie; |
937 | int cpu; | |
938 | ||
0036731c DW |
939 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
940 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | |
941 | DMA_FROM_DEVICE); | |
636bdeaa DW |
942 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
943 | DMA_CTRL_ACK); | |
0036731c DW |
944 | |
945 | if (!tx) { | |
946 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
947 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 948 | return -ENOMEM; |
0036731c | 949 | } |
7405f74b | 950 | |
7405f74b | 951 | tx->callback = NULL; |
7405f74b DW |
952 | cookie = tx->tx_submit(tx); |
953 | ||
954 | cpu = get_cpu(); | |
955 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
956 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
957 | put_cpu(); | |
958 | ||
959 | return cookie; | |
960 | } | |
961 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | |
962 | ||
963 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
964 | struct dma_chan *chan) | |
965 | { | |
966 | tx->chan = chan; | |
967 | spin_lock_init(&tx->lock); | |
7405f74b DW |
968 | } |
969 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
970 | ||
07f2211e DW |
971 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
972 | * @tx: in-flight transaction to wait on | |
973 | * | |
974 | * This routine assumes that tx was obtained from a call to async_memcpy, | |
975 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | |
976 | * and submitted). Walking the parent chain is only meant to cover for DMA | |
977 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | |
978 | * the driver's descriptor cleanup routine. | |
979 | */ | |
980 | enum dma_status | |
981 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
982 | { | |
983 | enum dma_status status; | |
984 | struct dma_async_tx_descriptor *iter; | |
985 | struct dma_async_tx_descriptor *parent; | |
986 | ||
987 | if (!tx) | |
988 | return DMA_SUCCESS; | |
989 | ||
990 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | |
991 | " %s\n", __func__, dev_name(&tx->chan->dev)); | |
992 | ||
993 | /* poll through the dependency chain, return when tx is complete */ | |
994 | do { | |
995 | iter = tx; | |
996 | ||
997 | /* find the root of the unsubmitted dependency chain */ | |
998 | do { | |
999 | parent = iter->parent; | |
1000 | if (!parent) | |
1001 | break; | |
1002 | else | |
1003 | iter = parent; | |
1004 | } while (parent); | |
1005 | ||
1006 | /* there is a small window for ->parent == NULL and | |
1007 | * ->cookie == -EBUSY | |
1008 | */ | |
1009 | while (iter->cookie == -EBUSY) | |
1010 | cpu_relax(); | |
1011 | ||
1012 | status = dma_sync_wait(iter->chan, iter->cookie); | |
1013 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | |
1014 | ||
1015 | return status; | |
1016 | } | |
1017 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1018 | ||
1019 | /* dma_run_dependencies - helper routine for dma drivers to process | |
1020 | * (start) dependent operations on their target channel | |
1021 | * @tx: transaction with dependencies | |
1022 | */ | |
1023 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1024 | { | |
1025 | struct dma_async_tx_descriptor *dep = tx->next; | |
1026 | struct dma_async_tx_descriptor *dep_next; | |
1027 | struct dma_chan *chan; | |
1028 | ||
1029 | if (!dep) | |
1030 | return; | |
1031 | ||
1032 | chan = dep->chan; | |
1033 | ||
1034 | /* keep submitting up until a channel switch is detected | |
1035 | * in that case we will be called again as a result of | |
1036 | * processing the interrupt from async_tx_channel_switch | |
1037 | */ | |
1038 | for (; dep; dep = dep_next) { | |
1039 | spin_lock_bh(&dep->lock); | |
1040 | dep->parent = NULL; | |
1041 | dep_next = dep->next; | |
1042 | if (dep_next && dep_next->chan == chan) | |
1043 | dep->next = NULL; /* ->next will be submitted */ | |
1044 | else | |
1045 | dep_next = NULL; /* submit current dep and terminate */ | |
1046 | spin_unlock_bh(&dep->lock); | |
1047 | ||
1048 | dep->tx_submit(dep); | |
1049 | } | |
1050 | ||
1051 | chan->device->device_issue_pending(chan); | |
1052 | } | |
1053 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1054 | ||
c13c8260 CL |
1055 | static int __init dma_bus_init(void) |
1056 | { | |
1057 | mutex_init(&dma_list_mutex); | |
1058 | return class_register(&dma_devclass); | |
1059 | } | |
c13c8260 CL |
1060 | subsys_initcall(dma_bus_init); |
1061 | ||
bec08513 | 1062 |