]>
Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
24 | * for other kernel code to use asynchronous memory copy capabilities, | |
25 | * if present, and allows different HW DMA drivers to register as providing | |
26 | * this capability. | |
27 | * | |
28 | * Due to the fact we are accelerating what is already a relatively fast | |
29 | * operation, the code goes to great lengths to avoid additional overhead, | |
30 | * such as locking. | |
31 | * | |
32 | * LOCKING: | |
33 | * | |
aa1e6f1a DW |
34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * mutex, dma_list_mutex. | |
c13c8260 | 36 | * |
f27c580c DW |
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
40 | * against its corresponding driver to disable removal. | |
41 | * | |
c13c8260 CL |
42 | * Each device has a channels list, which runs unlocked but is never modified |
43 | * once the device is registered, it's just setup by the driver. | |
44 | * | |
f27c580c | 45 | * See Documentation/dmaengine.txt for more details |
c13c8260 CL |
46 | */ |
47 | ||
48 | #include <linux/init.h> | |
49 | #include <linux/module.h> | |
7405f74b | 50 | #include <linux/mm.h> |
c13c8260 CL |
51 | #include <linux/device.h> |
52 | #include <linux/dmaengine.h> | |
53 | #include <linux/hardirq.h> | |
54 | #include <linux/spinlock.h> | |
55 | #include <linux/percpu.h> | |
56 | #include <linux/rcupdate.h> | |
57 | #include <linux/mutex.h> | |
7405f74b | 58 | #include <linux/jiffies.h> |
2ba05622 | 59 | #include <linux/rculist.h> |
c13c8260 CL |
60 | |
61 | static DEFINE_MUTEX(dma_list_mutex); | |
62 | static LIST_HEAD(dma_device_list); | |
6f49a57a | 63 | static long dmaengine_ref_count; |
c13c8260 CL |
64 | |
65 | /* --- sysfs implementation --- */ | |
66 | ||
41d5e59c DW |
67 | /** |
68 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | |
69 | * @dev - device node | |
70 | * | |
71 | * Must be called under dma_list_mutex | |
72 | */ | |
73 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
74 | { | |
75 | struct dma_chan_dev *chan_dev; | |
76 | ||
77 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
78 | return chan_dev->chan; | |
79 | } | |
80 | ||
891f78ea | 81 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 82 | { |
41d5e59c | 83 | struct dma_chan *chan; |
c13c8260 CL |
84 | unsigned long count = 0; |
85 | int i; | |
41d5e59c | 86 | int err; |
c13c8260 | 87 | |
41d5e59c DW |
88 | mutex_lock(&dma_list_mutex); |
89 | chan = dev_to_dma_chan(dev); | |
90 | if (chan) { | |
91 | for_each_possible_cpu(i) | |
92 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
93 | err = sprintf(buf, "%lu\n", count); | |
94 | } else | |
95 | err = -ENODEV; | |
96 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 97 | |
41d5e59c | 98 | return err; |
c13c8260 CL |
99 | } |
100 | ||
891f78ea TJ |
101 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
102 | char *buf) | |
c13c8260 | 103 | { |
41d5e59c | 104 | struct dma_chan *chan; |
c13c8260 CL |
105 | unsigned long count = 0; |
106 | int i; | |
41d5e59c | 107 | int err; |
c13c8260 | 108 | |
41d5e59c DW |
109 | mutex_lock(&dma_list_mutex); |
110 | chan = dev_to_dma_chan(dev); | |
111 | if (chan) { | |
112 | for_each_possible_cpu(i) | |
113 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
114 | err = sprintf(buf, "%lu\n", count); | |
115 | } else | |
116 | err = -ENODEV; | |
117 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 118 | |
41d5e59c | 119 | return err; |
c13c8260 CL |
120 | } |
121 | ||
891f78ea | 122 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 123 | { |
41d5e59c DW |
124 | struct dma_chan *chan; |
125 | int err; | |
c13c8260 | 126 | |
41d5e59c DW |
127 | mutex_lock(&dma_list_mutex); |
128 | chan = dev_to_dma_chan(dev); | |
129 | if (chan) | |
130 | err = sprintf(buf, "%d\n", chan->client_count); | |
131 | else | |
132 | err = -ENODEV; | |
133 | mutex_unlock(&dma_list_mutex); | |
134 | ||
135 | return err; | |
c13c8260 CL |
136 | } |
137 | ||
891f78ea | 138 | static struct device_attribute dma_attrs[] = { |
c13c8260 CL |
139 | __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), |
140 | __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), | |
141 | __ATTR(in_use, S_IRUGO, show_in_use, NULL), | |
142 | __ATTR_NULL | |
143 | }; | |
144 | ||
41d5e59c DW |
145 | static void chan_dev_release(struct device *dev) |
146 | { | |
147 | struct dma_chan_dev *chan_dev; | |
148 | ||
149 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
150 | kfree(chan_dev); | |
151 | } | |
152 | ||
c13c8260 | 153 | static struct class dma_devclass = { |
891f78ea TJ |
154 | .name = "dma", |
155 | .dev_attrs = dma_attrs, | |
41d5e59c | 156 | .dev_release = chan_dev_release, |
c13c8260 CL |
157 | }; |
158 | ||
159 | /* --- client and device registration --- */ | |
160 | ||
59b5ec21 DW |
161 | #define dma_device_satisfies_mask(device, mask) \ |
162 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 163 | static int |
59b5ec21 | 164 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
d379b01e DW |
165 | { |
166 | dma_cap_mask_t has; | |
167 | ||
59b5ec21 | 168 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
169 | DMA_TX_TYPE_END); |
170 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
171 | } | |
172 | ||
6f49a57a DW |
173 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
174 | { | |
175 | return chan->device->dev->driver->owner; | |
176 | } | |
177 | ||
178 | /** | |
179 | * balance_ref_count - catch up the channel reference count | |
180 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
181 | * | |
182 | * balance_ref_count must be called under dma_list_mutex | |
183 | */ | |
184 | static void balance_ref_count(struct dma_chan *chan) | |
185 | { | |
186 | struct module *owner = dma_chan_to_owner(chan); | |
187 | ||
188 | while (chan->client_count < dmaengine_ref_count) { | |
189 | __module_get(owner); | |
190 | chan->client_count++; | |
191 | } | |
192 | } | |
193 | ||
194 | /** | |
195 | * dma_chan_get - try to grab a dma channel's parent driver module | |
196 | * @chan - channel to grab | |
197 | * | |
198 | * Must be called under dma_list_mutex | |
199 | */ | |
200 | static int dma_chan_get(struct dma_chan *chan) | |
201 | { | |
202 | int err = -ENODEV; | |
203 | struct module *owner = dma_chan_to_owner(chan); | |
204 | ||
205 | if (chan->client_count) { | |
206 | __module_get(owner); | |
207 | err = 0; | |
208 | } else if (try_module_get(owner)) | |
209 | err = 0; | |
210 | ||
211 | if (err == 0) | |
212 | chan->client_count++; | |
213 | ||
214 | /* allocate upon first client reference */ | |
215 | if (chan->client_count == 1 && err == 0) { | |
aa1e6f1a | 216 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); |
6f49a57a DW |
217 | |
218 | if (desc_cnt < 0) { | |
219 | err = desc_cnt; | |
220 | chan->client_count = 0; | |
221 | module_put(owner); | |
59b5ec21 | 222 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
6f49a57a DW |
223 | balance_ref_count(chan); |
224 | } | |
225 | ||
226 | return err; | |
227 | } | |
228 | ||
229 | /** | |
230 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
231 | * @chan - channel to release | |
232 | * | |
233 | * Must be called under dma_list_mutex | |
234 | */ | |
235 | static void dma_chan_put(struct dma_chan *chan) | |
236 | { | |
237 | if (!chan->client_count) | |
238 | return; /* this channel failed alloc_chan_resources */ | |
239 | chan->client_count--; | |
240 | module_put(dma_chan_to_owner(chan)); | |
241 | if (chan->client_count == 0) | |
242 | chan->device->device_free_chan_resources(chan); | |
243 | } | |
244 | ||
7405f74b DW |
245 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
246 | { | |
247 | enum dma_status status; | |
248 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
249 | ||
250 | dma_async_issue_pending(chan); | |
251 | do { | |
252 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
253 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
254 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | |
255 | return DMA_ERROR; | |
256 | } | |
257 | } while (status == DMA_IN_PROGRESS); | |
258 | ||
259 | return status; | |
260 | } | |
261 | EXPORT_SYMBOL(dma_sync_wait); | |
262 | ||
bec08513 DW |
263 | /** |
264 | * dma_cap_mask_all - enable iteration over all operation types | |
265 | */ | |
266 | static dma_cap_mask_t dma_cap_mask_all; | |
267 | ||
268 | /** | |
269 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
270 | * @chan - associated channel for this entry | |
271 | */ | |
272 | struct dma_chan_tbl_ent { | |
273 | struct dma_chan *chan; | |
274 | }; | |
275 | ||
276 | /** | |
277 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
278 | */ | |
279 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; | |
280 | ||
281 | static int __init dma_channel_table_init(void) | |
282 | { | |
283 | enum dma_transaction_type cap; | |
284 | int err = 0; | |
285 | ||
286 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
287 | ||
59b5ec21 DW |
288 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
289 | * but are not associated with an operation so they do not need | |
290 | * an entry in the channel_table | |
bec08513 DW |
291 | */ |
292 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 293 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
294 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
295 | ||
296 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
297 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
298 | if (!channel_table[cap]) { | |
299 | err = -ENOMEM; | |
300 | break; | |
301 | } | |
302 | } | |
303 | ||
304 | if (err) { | |
305 | pr_err("dmaengine: initialization failure\n"); | |
306 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
307 | if (channel_table[cap]) | |
308 | free_percpu(channel_table[cap]); | |
309 | } | |
310 | ||
311 | return err; | |
312 | } | |
313 | subsys_initcall(dma_channel_table_init); | |
314 | ||
315 | /** | |
316 | * dma_find_channel - find a channel to carry out the operation | |
317 | * @tx_type: transaction type | |
318 | */ | |
319 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
320 | { | |
321 | struct dma_chan *chan; | |
322 | int cpu; | |
323 | ||
324 | WARN_ONCE(dmaengine_ref_count == 0, | |
325 | "client called %s without a reference", __func__); | |
326 | ||
327 | cpu = get_cpu(); | |
328 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | |
329 | put_cpu(); | |
330 | ||
331 | return chan; | |
332 | } | |
333 | EXPORT_SYMBOL(dma_find_channel); | |
334 | ||
2ba05622 DW |
335 | /** |
336 | * dma_issue_pending_all - flush all pending operations across all channels | |
337 | */ | |
338 | void dma_issue_pending_all(void) | |
339 | { | |
340 | struct dma_device *device; | |
341 | struct dma_chan *chan; | |
342 | ||
343 | WARN_ONCE(dmaengine_ref_count == 0, | |
344 | "client called %s without a reference", __func__); | |
345 | ||
346 | rcu_read_lock(); | |
59b5ec21 DW |
347 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
348 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
349 | continue; | |
2ba05622 DW |
350 | list_for_each_entry(chan, &device->channels, device_node) |
351 | if (chan->client_count) | |
352 | device->device_issue_pending(chan); | |
59b5ec21 | 353 | } |
2ba05622 DW |
354 | rcu_read_unlock(); |
355 | } | |
356 | EXPORT_SYMBOL(dma_issue_pending_all); | |
357 | ||
bec08513 DW |
358 | /** |
359 | * nth_chan - returns the nth channel of the given capability | |
360 | * @cap: capability to match | |
361 | * @n: nth channel desired | |
362 | * | |
363 | * Defaults to returning the channel with the desired capability and the | |
364 | * lowest reference count when 'n' cannot be satisfied. Must be called | |
365 | * under dma_list_mutex. | |
366 | */ | |
367 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | |
368 | { | |
369 | struct dma_device *device; | |
370 | struct dma_chan *chan; | |
371 | struct dma_chan *ret = NULL; | |
372 | struct dma_chan *min = NULL; | |
373 | ||
374 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
375 | if (!dma_has_cap(cap, device->cap_mask) || |
376 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
377 | continue; |
378 | list_for_each_entry(chan, &device->channels, device_node) { | |
379 | if (!chan->client_count) | |
380 | continue; | |
381 | if (!min) | |
382 | min = chan; | |
383 | else if (chan->table_count < min->table_count) | |
384 | min = chan; | |
385 | ||
386 | if (n-- == 0) { | |
387 | ret = chan; | |
388 | break; /* done */ | |
389 | } | |
390 | } | |
391 | if (ret) | |
392 | break; /* done */ | |
393 | } | |
394 | ||
395 | if (!ret) | |
396 | ret = min; | |
397 | ||
398 | if (ret) | |
399 | ret->table_count++; | |
400 | ||
401 | return ret; | |
402 | } | |
403 | ||
404 | /** | |
405 | * dma_channel_rebalance - redistribute the available channels | |
406 | * | |
407 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
408 | * operation type) in the SMP case, and operation isolation (avoid | |
409 | * multi-tasking channels) in the non-SMP case. Must be called under | |
410 | * dma_list_mutex. | |
411 | */ | |
412 | static void dma_channel_rebalance(void) | |
413 | { | |
414 | struct dma_chan *chan; | |
415 | struct dma_device *device; | |
416 | int cpu; | |
417 | int cap; | |
418 | int n; | |
419 | ||
420 | /* undo the last distribution */ | |
421 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
422 | for_each_possible_cpu(cpu) | |
423 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
424 | ||
59b5ec21 DW |
425 | list_for_each_entry(device, &dma_device_list, global_node) { |
426 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
427 | continue; | |
bec08513 DW |
428 | list_for_each_entry(chan, &device->channels, device_node) |
429 | chan->table_count = 0; | |
59b5ec21 | 430 | } |
bec08513 DW |
431 | |
432 | /* don't populate the channel_table if no clients are available */ | |
433 | if (!dmaengine_ref_count) | |
434 | return; | |
435 | ||
436 | /* redistribute available channels */ | |
437 | n = 0; | |
438 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
439 | for_each_online_cpu(cpu) { | |
440 | if (num_possible_cpus() > 1) | |
441 | chan = nth_chan(cap, n++); | |
442 | else | |
443 | chan = nth_chan(cap, -1); | |
444 | ||
445 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
446 | } | |
447 | } | |
448 | ||
59b5ec21 DW |
449 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev) |
450 | { | |
451 | struct dma_chan *chan; | |
452 | struct dma_chan *ret = NULL; | |
453 | ||
454 | if (!__dma_device_satisfies_mask(dev, mask)) { | |
455 | pr_debug("%s: wrong capabilities\n", __func__); | |
456 | return NULL; | |
457 | } | |
458 | /* devices with multiple channels need special handling as we need to | |
459 | * ensure that all channels are either private or public. | |
460 | */ | |
461 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
462 | list_for_each_entry(chan, &dev->channels, device_node) { | |
463 | /* some channels are already publicly allocated */ | |
464 | if (chan->client_count) | |
465 | return NULL; | |
466 | } | |
467 | ||
468 | list_for_each_entry(chan, &dev->channels, device_node) { | |
469 | if (chan->client_count) { | |
470 | pr_debug("%s: %s busy\n", | |
41d5e59c | 471 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
472 | continue; |
473 | } | |
474 | ret = chan; | |
475 | break; | |
476 | } | |
477 | ||
478 | return ret; | |
479 | } | |
480 | ||
481 | /** | |
482 | * dma_request_channel - try to allocate an exclusive channel | |
483 | * @mask: capabilities that the channel must satisfy | |
484 | * @fn: optional callback to disposition available channels | |
485 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
486 | */ | |
487 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) | |
488 | { | |
489 | struct dma_device *device, *_d; | |
490 | struct dma_chan *chan = NULL; | |
7dd60251 | 491 | bool ack; |
59b5ec21 DW |
492 | int err; |
493 | ||
494 | /* Find a channel */ | |
495 | mutex_lock(&dma_list_mutex); | |
496 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
497 | chan = private_candidate(mask, device); | |
498 | if (!chan) | |
499 | continue; | |
500 | ||
501 | if (fn) | |
502 | ack = fn(chan, fn_param); | |
503 | else | |
7dd60251 | 504 | ack = true; |
59b5ec21 | 505 | |
7dd60251 | 506 | if (ack) { |
59b5ec21 DW |
507 | /* Found a suitable channel, try to grab, prep, and |
508 | * return it. We first set DMA_PRIVATE to disable | |
509 | * balance_ref_count as this channel will not be | |
510 | * published in the general-purpose allocator | |
511 | */ | |
512 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
513 | err = dma_chan_get(chan); | |
514 | ||
515 | if (err == -ENODEV) { | |
516 | pr_debug("%s: %s module removed\n", __func__, | |
41d5e59c | 517 | dma_chan_name(chan)); |
59b5ec21 DW |
518 | list_del_rcu(&device->global_node); |
519 | } else if (err) | |
520 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
41d5e59c | 521 | dma_chan_name(chan), err); |
59b5ec21 DW |
522 | else |
523 | break; | |
59b5ec21 | 524 | } else |
7dd60251 | 525 | pr_debug("%s: %s filter said false\n", |
41d5e59c | 526 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
527 | chan = NULL; |
528 | } | |
529 | mutex_unlock(&dma_list_mutex); | |
530 | ||
531 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | |
41d5e59c | 532 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
533 | |
534 | return chan; | |
535 | } | |
536 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
537 | ||
538 | void dma_release_channel(struct dma_chan *chan) | |
539 | { | |
540 | mutex_lock(&dma_list_mutex); | |
541 | WARN_ONCE(chan->client_count != 1, | |
542 | "chan reference count %d != 1\n", chan->client_count); | |
543 | dma_chan_put(chan); | |
544 | mutex_unlock(&dma_list_mutex); | |
545 | } | |
546 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
547 | ||
d379b01e | 548 | /** |
209b84a8 | 549 | * dmaengine_get - register interest in dma_channels |
d379b01e | 550 | */ |
209b84a8 | 551 | void dmaengine_get(void) |
d379b01e | 552 | { |
6f49a57a DW |
553 | struct dma_device *device, *_d; |
554 | struct dma_chan *chan; | |
555 | int err; | |
556 | ||
c13c8260 | 557 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
558 | dmaengine_ref_count++; |
559 | ||
560 | /* try to grab channels */ | |
59b5ec21 DW |
561 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
562 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
563 | continue; | |
6f49a57a DW |
564 | list_for_each_entry(chan, &device->channels, device_node) { |
565 | err = dma_chan_get(chan); | |
566 | if (err == -ENODEV) { | |
567 | /* module removed before we could use it */ | |
2ba05622 | 568 | list_del_rcu(&device->global_node); |
6f49a57a DW |
569 | break; |
570 | } else if (err) | |
571 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
41d5e59c | 572 | dma_chan_name(chan), err); |
6f49a57a | 573 | } |
59b5ec21 | 574 | } |
6f49a57a | 575 | |
bec08513 DW |
576 | /* if this is the first reference and there were channels |
577 | * waiting we need to rebalance to get those channels | |
578 | * incorporated into the channel table | |
579 | */ | |
580 | if (dmaengine_ref_count == 1) | |
581 | dma_channel_rebalance(); | |
c13c8260 | 582 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 583 | } |
209b84a8 | 584 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
585 | |
586 | /** | |
209b84a8 | 587 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 588 | */ |
209b84a8 | 589 | void dmaengine_put(void) |
c13c8260 | 590 | { |
d379b01e | 591 | struct dma_device *device; |
c13c8260 CL |
592 | struct dma_chan *chan; |
593 | ||
c13c8260 | 594 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
595 | dmaengine_ref_count--; |
596 | BUG_ON(dmaengine_ref_count < 0); | |
597 | /* drop channel references */ | |
59b5ec21 DW |
598 | list_for_each_entry(device, &dma_device_list, global_node) { |
599 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
600 | continue; | |
6f49a57a DW |
601 | list_for_each_entry(chan, &device->channels, device_node) |
602 | dma_chan_put(chan); | |
59b5ec21 | 603 | } |
c13c8260 | 604 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 605 | } |
209b84a8 | 606 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 607 | |
c13c8260 | 608 | /** |
6508871e | 609 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
610 | * @device: &dma_device |
611 | */ | |
612 | int dma_async_device_register(struct dma_device *device) | |
613 | { | |
614 | static int id; | |
ff487fb7 | 615 | int chancnt = 0, rc; |
c13c8260 CL |
616 | struct dma_chan* chan; |
617 | ||
618 | if (!device) | |
619 | return -ENODEV; | |
620 | ||
7405f74b DW |
621 | /* validate device routines */ |
622 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
623 | !device->device_prep_dma_memcpy); | |
624 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
625 | !device->device_prep_dma_xor); | |
626 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | |
627 | !device->device_prep_dma_zero_sum); | |
628 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | |
629 | !device->device_prep_dma_memset); | |
9b941c66 | 630 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
7405f74b | 631 | !device->device_prep_dma_interrupt); |
dc0ee643 HS |
632 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
633 | !device->device_prep_slave_sg); | |
634 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | |
635 | !device->device_terminate_all); | |
7405f74b DW |
636 | |
637 | BUG_ON(!device->device_alloc_chan_resources); | |
638 | BUG_ON(!device->device_free_chan_resources); | |
7405f74b DW |
639 | BUG_ON(!device->device_is_tx_complete); |
640 | BUG_ON(!device->device_issue_pending); | |
641 | BUG_ON(!device->dev); | |
642 | ||
b0b42b16 | 643 | mutex_lock(&dma_list_mutex); |
c13c8260 | 644 | device->dev_id = id++; |
b0b42b16 | 645 | mutex_unlock(&dma_list_mutex); |
c13c8260 CL |
646 | |
647 | /* represent channels in sysfs. Probably want devs too */ | |
648 | list_for_each_entry(chan, &device->channels, device_node) { | |
649 | chan->local = alloc_percpu(typeof(*chan->local)); | |
650 | if (chan->local == NULL) | |
651 | continue; | |
41d5e59c DW |
652 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
653 | if (chan->dev == NULL) { | |
654 | free_percpu(chan->local); | |
655 | continue; | |
656 | } | |
c13c8260 CL |
657 | |
658 | chan->chan_id = chancnt++; | |
41d5e59c DW |
659 | chan->dev->device.class = &dma_devclass; |
660 | chan->dev->device.parent = device->dev; | |
661 | chan->dev->chan = chan; | |
662 | dev_set_name(&chan->dev->device, "dma%dchan%d", | |
06190d84 | 663 | device->dev_id, chan->chan_id); |
c13c8260 | 664 | |
41d5e59c | 665 | rc = device_register(&chan->dev->device); |
ff487fb7 | 666 | if (rc) { |
ff487fb7 JG |
667 | free_percpu(chan->local); |
668 | chan->local = NULL; | |
669 | goto err_out; | |
670 | } | |
7cc5bf9a | 671 | chan->client_count = 0; |
c13c8260 | 672 | } |
59b5ec21 | 673 | device->chancnt = chancnt; |
c13c8260 CL |
674 | |
675 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
676 | /* take references on public channels */ |
677 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
678 | list_for_each_entry(chan, &device->channels, device_node) { |
679 | /* if clients are already waiting for channels we need | |
680 | * to take references on their behalf | |
681 | */ | |
682 | if (dma_chan_get(chan) == -ENODEV) { | |
683 | /* note we can only get here for the first | |
684 | * channel as the remaining channels are | |
685 | * guaranteed to get a reference | |
686 | */ | |
687 | rc = -ENODEV; | |
688 | mutex_unlock(&dma_list_mutex); | |
689 | goto err_out; | |
690 | } | |
691 | } | |
2ba05622 | 692 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
bec08513 | 693 | dma_channel_rebalance(); |
c13c8260 CL |
694 | mutex_unlock(&dma_list_mutex); |
695 | ||
c13c8260 | 696 | return 0; |
ff487fb7 JG |
697 | |
698 | err_out: | |
699 | list_for_each_entry(chan, &device->channels, device_node) { | |
700 | if (chan->local == NULL) | |
701 | continue; | |
41d5e59c DW |
702 | mutex_lock(&dma_list_mutex); |
703 | chan->dev->chan = NULL; | |
704 | mutex_unlock(&dma_list_mutex); | |
705 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
706 | free_percpu(chan->local); |
707 | } | |
708 | return rc; | |
c13c8260 | 709 | } |
765e3d8a | 710 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 711 | |
6508871e | 712 | /** |
6f49a57a | 713 | * dma_async_device_unregister - unregister a DMA device |
6508871e | 714 | * @device: &dma_device |
f27c580c DW |
715 | * |
716 | * This routine is called by dma driver exit routines, dmaengine holds module | |
717 | * references to prevent it being called while channels are in use. | |
6508871e RD |
718 | */ |
719 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
720 | { |
721 | struct dma_chan *chan; | |
c13c8260 CL |
722 | |
723 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 724 | list_del_rcu(&device->global_node); |
bec08513 | 725 | dma_channel_rebalance(); |
c13c8260 CL |
726 | mutex_unlock(&dma_list_mutex); |
727 | ||
728 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
729 | WARN_ONCE(chan->client_count, |
730 | "%s called while %d clients hold a reference\n", | |
731 | __func__, chan->client_count); | |
41d5e59c DW |
732 | mutex_lock(&dma_list_mutex); |
733 | chan->dev->chan = NULL; | |
734 | mutex_unlock(&dma_list_mutex); | |
735 | device_unregister(&chan->dev->device); | |
c13c8260 | 736 | } |
c13c8260 | 737 | } |
765e3d8a | 738 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 739 | |
7405f74b DW |
740 | /** |
741 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | |
742 | * @chan: DMA channel to offload copy to | |
743 | * @dest: destination address (virtual) | |
744 | * @src: source address (virtual) | |
745 | * @len: length | |
746 | * | |
747 | * Both @dest and @src must be mappable to a bus address according to the | |
748 | * DMA mapping API rules for streaming mappings. | |
749 | * Both @dest and @src must stay memory resident (kernel memory or locked | |
750 | * user space pages). | |
751 | */ | |
752 | dma_cookie_t | |
753 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |
754 | void *src, size_t len) | |
755 | { | |
756 | struct dma_device *dev = chan->device; | |
757 | struct dma_async_tx_descriptor *tx; | |
0036731c | 758 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
759 | dma_cookie_t cookie; |
760 | int cpu; | |
761 | ||
0036731c DW |
762 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
763 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
764 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
765 | DMA_CTRL_ACK); | |
0036731c DW |
766 | |
767 | if (!tx) { | |
768 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
769 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 770 | return -ENOMEM; |
0036731c | 771 | } |
7405f74b | 772 | |
7405f74b | 773 | tx->callback = NULL; |
7405f74b DW |
774 | cookie = tx->tx_submit(tx); |
775 | ||
776 | cpu = get_cpu(); | |
777 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
778 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
779 | put_cpu(); | |
780 | ||
781 | return cookie; | |
782 | } | |
783 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | |
784 | ||
785 | /** | |
786 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | |
787 | * @chan: DMA channel to offload copy to | |
788 | * @page: destination page | |
789 | * @offset: offset in page to copy to | |
790 | * @kdata: source address (virtual) | |
791 | * @len: length | |
792 | * | |
793 | * Both @page/@offset and @kdata must be mappable to a bus address according | |
794 | * to the DMA mapping API rules for streaming mappings. | |
795 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | |
796 | * locked user space pages) | |
797 | */ | |
798 | dma_cookie_t | |
799 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |
800 | unsigned int offset, void *kdata, size_t len) | |
801 | { | |
802 | struct dma_device *dev = chan->device; | |
803 | struct dma_async_tx_descriptor *tx; | |
0036731c | 804 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
805 | dma_cookie_t cookie; |
806 | int cpu; | |
807 | ||
0036731c DW |
808 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
809 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
810 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
811 | DMA_CTRL_ACK); | |
0036731c DW |
812 | |
813 | if (!tx) { | |
814 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
815 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 816 | return -ENOMEM; |
0036731c | 817 | } |
7405f74b | 818 | |
7405f74b | 819 | tx->callback = NULL; |
7405f74b DW |
820 | cookie = tx->tx_submit(tx); |
821 | ||
822 | cpu = get_cpu(); | |
823 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
824 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
825 | put_cpu(); | |
826 | ||
827 | return cookie; | |
828 | } | |
829 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | |
830 | ||
831 | /** | |
832 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | |
833 | * @chan: DMA channel to offload copy to | |
834 | * @dest_pg: destination page | |
835 | * @dest_off: offset in page to copy to | |
836 | * @src_pg: source page | |
837 | * @src_off: offset in page to copy from | |
838 | * @len: length | |
839 | * | |
840 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | |
841 | * address according to the DMA mapping API rules for streaming mappings. | |
842 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | |
843 | * (kernel memory or locked user space pages). | |
844 | */ | |
845 | dma_cookie_t | |
846 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |
847 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | |
848 | size_t len) | |
849 | { | |
850 | struct dma_device *dev = chan->device; | |
851 | struct dma_async_tx_descriptor *tx; | |
0036731c | 852 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
853 | dma_cookie_t cookie; |
854 | int cpu; | |
855 | ||
0036731c DW |
856 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
857 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | |
858 | DMA_FROM_DEVICE); | |
636bdeaa DW |
859 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
860 | DMA_CTRL_ACK); | |
0036731c DW |
861 | |
862 | if (!tx) { | |
863 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
864 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 865 | return -ENOMEM; |
0036731c | 866 | } |
7405f74b | 867 | |
7405f74b | 868 | tx->callback = NULL; |
7405f74b DW |
869 | cookie = tx->tx_submit(tx); |
870 | ||
871 | cpu = get_cpu(); | |
872 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
873 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
874 | put_cpu(); | |
875 | ||
876 | return cookie; | |
877 | } | |
878 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | |
879 | ||
880 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
881 | struct dma_chan *chan) | |
882 | { | |
883 | tx->chan = chan; | |
884 | spin_lock_init(&tx->lock); | |
7405f74b DW |
885 | } |
886 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
887 | ||
07f2211e DW |
888 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
889 | * @tx: in-flight transaction to wait on | |
890 | * | |
891 | * This routine assumes that tx was obtained from a call to async_memcpy, | |
892 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | |
893 | * and submitted). Walking the parent chain is only meant to cover for DMA | |
894 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | |
895 | * the driver's descriptor cleanup routine. | |
896 | */ | |
897 | enum dma_status | |
898 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
899 | { | |
900 | enum dma_status status; | |
901 | struct dma_async_tx_descriptor *iter; | |
902 | struct dma_async_tx_descriptor *parent; | |
903 | ||
904 | if (!tx) | |
905 | return DMA_SUCCESS; | |
906 | ||
907 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | |
41d5e59c | 908 | " %s\n", __func__, dma_chan_name(tx->chan)); |
07f2211e DW |
909 | |
910 | /* poll through the dependency chain, return when tx is complete */ | |
911 | do { | |
912 | iter = tx; | |
913 | ||
914 | /* find the root of the unsubmitted dependency chain */ | |
915 | do { | |
916 | parent = iter->parent; | |
917 | if (!parent) | |
918 | break; | |
919 | else | |
920 | iter = parent; | |
921 | } while (parent); | |
922 | ||
923 | /* there is a small window for ->parent == NULL and | |
924 | * ->cookie == -EBUSY | |
925 | */ | |
926 | while (iter->cookie == -EBUSY) | |
927 | cpu_relax(); | |
928 | ||
929 | status = dma_sync_wait(iter->chan, iter->cookie); | |
930 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | |
931 | ||
932 | return status; | |
933 | } | |
934 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
935 | ||
936 | /* dma_run_dependencies - helper routine for dma drivers to process | |
937 | * (start) dependent operations on their target channel | |
938 | * @tx: transaction with dependencies | |
939 | */ | |
940 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
941 | { | |
942 | struct dma_async_tx_descriptor *dep = tx->next; | |
943 | struct dma_async_tx_descriptor *dep_next; | |
944 | struct dma_chan *chan; | |
945 | ||
946 | if (!dep) | |
947 | return; | |
948 | ||
949 | chan = dep->chan; | |
950 | ||
951 | /* keep submitting up until a channel switch is detected | |
952 | * in that case we will be called again as a result of | |
953 | * processing the interrupt from async_tx_channel_switch | |
954 | */ | |
955 | for (; dep; dep = dep_next) { | |
956 | spin_lock_bh(&dep->lock); | |
957 | dep->parent = NULL; | |
958 | dep_next = dep->next; | |
959 | if (dep_next && dep_next->chan == chan) | |
960 | dep->next = NULL; /* ->next will be submitted */ | |
961 | else | |
962 | dep_next = NULL; /* submit current dep and terminate */ | |
963 | spin_unlock_bh(&dep->lock); | |
964 | ||
965 | dep->tx_submit(dep); | |
966 | } | |
967 | ||
968 | chan->device->device_issue_pending(chan); | |
969 | } | |
970 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
971 | ||
c13c8260 CL |
972 | static int __init dma_bus_init(void) |
973 | { | |
974 | mutex_init(&dma_list_mutex); | |
975 | return class_register(&dma_devclass); | |
976 | } | |
c13c8260 CL |
977 | subsys_initcall(dma_bus_init); |
978 | ||
bec08513 | 979 |