]>
Commit | Line | Data |
---|---|---|
c13c8260 CL |
1 | /* |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 | |
16 | * Temple Place - Suite 330, Boston, MA 02111-1307, USA. | |
17 | * | |
18 | * The full GNU General Public License is included in this distribution in the | |
19 | * file called COPYING. | |
20 | */ | |
21 | ||
22 | /* | |
23 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
24 | * for other kernel code to use asynchronous memory copy capabilities, | |
25 | * if present, and allows different HW DMA drivers to register as providing | |
26 | * this capability. | |
27 | * | |
28 | * Due to the fact we are accelerating what is already a relatively fast | |
29 | * operation, the code goes to great lengths to avoid additional overhead, | |
30 | * such as locking. | |
31 | * | |
32 | * LOCKING: | |
33 | * | |
aa1e6f1a DW |
34 | * The subsystem keeps a global list of dma_device structs it is protected by a |
35 | * mutex, dma_list_mutex. | |
c13c8260 | 36 | * |
f27c580c DW |
37 | * A subsystem can get access to a channel by calling dmaengine_get() followed |
38 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
39 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
40 | * against its corresponding driver to disable removal. | |
41 | * | |
c13c8260 CL |
42 | * Each device has a channels list, which runs unlocked but is never modified |
43 | * once the device is registered, it's just setup by the driver. | |
44 | * | |
f27c580c | 45 | * See Documentation/dmaengine.txt for more details |
c13c8260 CL |
46 | */ |
47 | ||
48 | #include <linux/init.h> | |
49 | #include <linux/module.h> | |
7405f74b | 50 | #include <linux/mm.h> |
c13c8260 CL |
51 | #include <linux/device.h> |
52 | #include <linux/dmaengine.h> | |
53 | #include <linux/hardirq.h> | |
54 | #include <linux/spinlock.h> | |
55 | #include <linux/percpu.h> | |
56 | #include <linux/rcupdate.h> | |
57 | #include <linux/mutex.h> | |
7405f74b | 58 | #include <linux/jiffies.h> |
2ba05622 | 59 | #include <linux/rculist.h> |
864498aa | 60 | #include <linux/idr.h> |
c13c8260 CL |
61 | |
62 | static DEFINE_MUTEX(dma_list_mutex); | |
63 | static LIST_HEAD(dma_device_list); | |
6f49a57a | 64 | static long dmaengine_ref_count; |
864498aa | 65 | static struct idr dma_idr; |
c13c8260 CL |
66 | |
67 | /* --- sysfs implementation --- */ | |
68 | ||
41d5e59c DW |
69 | /** |
70 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | |
71 | * @dev - device node | |
72 | * | |
73 | * Must be called under dma_list_mutex | |
74 | */ | |
75 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
76 | { | |
77 | struct dma_chan_dev *chan_dev; | |
78 | ||
79 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
80 | return chan_dev->chan; | |
81 | } | |
82 | ||
891f78ea | 83 | static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 84 | { |
41d5e59c | 85 | struct dma_chan *chan; |
c13c8260 CL |
86 | unsigned long count = 0; |
87 | int i; | |
41d5e59c | 88 | int err; |
c13c8260 | 89 | |
41d5e59c DW |
90 | mutex_lock(&dma_list_mutex); |
91 | chan = dev_to_dma_chan(dev); | |
92 | if (chan) { | |
93 | for_each_possible_cpu(i) | |
94 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
95 | err = sprintf(buf, "%lu\n", count); | |
96 | } else | |
97 | err = -ENODEV; | |
98 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 99 | |
41d5e59c | 100 | return err; |
c13c8260 CL |
101 | } |
102 | ||
891f78ea TJ |
103 | static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr, |
104 | char *buf) | |
c13c8260 | 105 | { |
41d5e59c | 106 | struct dma_chan *chan; |
c13c8260 CL |
107 | unsigned long count = 0; |
108 | int i; | |
41d5e59c | 109 | int err; |
c13c8260 | 110 | |
41d5e59c DW |
111 | mutex_lock(&dma_list_mutex); |
112 | chan = dev_to_dma_chan(dev); | |
113 | if (chan) { | |
114 | for_each_possible_cpu(i) | |
115 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
116 | err = sprintf(buf, "%lu\n", count); | |
117 | } else | |
118 | err = -ENODEV; | |
119 | mutex_unlock(&dma_list_mutex); | |
c13c8260 | 120 | |
41d5e59c | 121 | return err; |
c13c8260 CL |
122 | } |
123 | ||
891f78ea | 124 | static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf) |
c13c8260 | 125 | { |
41d5e59c DW |
126 | struct dma_chan *chan; |
127 | int err; | |
c13c8260 | 128 | |
41d5e59c DW |
129 | mutex_lock(&dma_list_mutex); |
130 | chan = dev_to_dma_chan(dev); | |
131 | if (chan) | |
132 | err = sprintf(buf, "%d\n", chan->client_count); | |
133 | else | |
134 | err = -ENODEV; | |
135 | mutex_unlock(&dma_list_mutex); | |
136 | ||
137 | return err; | |
c13c8260 CL |
138 | } |
139 | ||
891f78ea | 140 | static struct device_attribute dma_attrs[] = { |
c13c8260 CL |
141 | __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL), |
142 | __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL), | |
143 | __ATTR(in_use, S_IRUGO, show_in_use, NULL), | |
144 | __ATTR_NULL | |
145 | }; | |
146 | ||
41d5e59c DW |
147 | static void chan_dev_release(struct device *dev) |
148 | { | |
149 | struct dma_chan_dev *chan_dev; | |
150 | ||
151 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
864498aa DW |
152 | if (atomic_dec_and_test(chan_dev->idr_ref)) { |
153 | mutex_lock(&dma_list_mutex); | |
154 | idr_remove(&dma_idr, chan_dev->dev_id); | |
155 | mutex_unlock(&dma_list_mutex); | |
156 | kfree(chan_dev->idr_ref); | |
157 | } | |
41d5e59c DW |
158 | kfree(chan_dev); |
159 | } | |
160 | ||
c13c8260 | 161 | static struct class dma_devclass = { |
891f78ea TJ |
162 | .name = "dma", |
163 | .dev_attrs = dma_attrs, | |
41d5e59c | 164 | .dev_release = chan_dev_release, |
c13c8260 CL |
165 | }; |
166 | ||
167 | /* --- client and device registration --- */ | |
168 | ||
59b5ec21 DW |
169 | #define dma_device_satisfies_mask(device, mask) \ |
170 | __dma_device_satisfies_mask((device), &(mask)) | |
d379b01e | 171 | static int |
59b5ec21 | 172 | __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want) |
d379b01e DW |
173 | { |
174 | dma_cap_mask_t has; | |
175 | ||
59b5ec21 | 176 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, |
d379b01e DW |
177 | DMA_TX_TYPE_END); |
178 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
179 | } | |
180 | ||
6f49a57a DW |
181 | static struct module *dma_chan_to_owner(struct dma_chan *chan) |
182 | { | |
183 | return chan->device->dev->driver->owner; | |
184 | } | |
185 | ||
186 | /** | |
187 | * balance_ref_count - catch up the channel reference count | |
188 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
189 | * | |
190 | * balance_ref_count must be called under dma_list_mutex | |
191 | */ | |
192 | static void balance_ref_count(struct dma_chan *chan) | |
193 | { | |
194 | struct module *owner = dma_chan_to_owner(chan); | |
195 | ||
196 | while (chan->client_count < dmaengine_ref_count) { | |
197 | __module_get(owner); | |
198 | chan->client_count++; | |
199 | } | |
200 | } | |
201 | ||
202 | /** | |
203 | * dma_chan_get - try to grab a dma channel's parent driver module | |
204 | * @chan - channel to grab | |
205 | * | |
206 | * Must be called under dma_list_mutex | |
207 | */ | |
208 | static int dma_chan_get(struct dma_chan *chan) | |
209 | { | |
210 | int err = -ENODEV; | |
211 | struct module *owner = dma_chan_to_owner(chan); | |
212 | ||
213 | if (chan->client_count) { | |
214 | __module_get(owner); | |
215 | err = 0; | |
216 | } else if (try_module_get(owner)) | |
217 | err = 0; | |
218 | ||
219 | if (err == 0) | |
220 | chan->client_count++; | |
221 | ||
222 | /* allocate upon first client reference */ | |
223 | if (chan->client_count == 1 && err == 0) { | |
aa1e6f1a | 224 | int desc_cnt = chan->device->device_alloc_chan_resources(chan); |
6f49a57a DW |
225 | |
226 | if (desc_cnt < 0) { | |
227 | err = desc_cnt; | |
228 | chan->client_count = 0; | |
229 | module_put(owner); | |
59b5ec21 | 230 | } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) |
6f49a57a DW |
231 | balance_ref_count(chan); |
232 | } | |
233 | ||
234 | return err; | |
235 | } | |
236 | ||
237 | /** | |
238 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
239 | * @chan - channel to release | |
240 | * | |
241 | * Must be called under dma_list_mutex | |
242 | */ | |
243 | static void dma_chan_put(struct dma_chan *chan) | |
244 | { | |
245 | if (!chan->client_count) | |
246 | return; /* this channel failed alloc_chan_resources */ | |
247 | chan->client_count--; | |
248 | module_put(dma_chan_to_owner(chan)); | |
249 | if (chan->client_count == 0) | |
250 | chan->device->device_free_chan_resources(chan); | |
251 | } | |
252 | ||
7405f74b DW |
253 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) |
254 | { | |
255 | enum dma_status status; | |
256 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
257 | ||
258 | dma_async_issue_pending(chan); | |
259 | do { | |
260 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
261 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
262 | printk(KERN_ERR "dma_sync_wait_timeout!\n"); | |
263 | return DMA_ERROR; | |
264 | } | |
265 | } while (status == DMA_IN_PROGRESS); | |
266 | ||
267 | return status; | |
268 | } | |
269 | EXPORT_SYMBOL(dma_sync_wait); | |
270 | ||
bec08513 DW |
271 | /** |
272 | * dma_cap_mask_all - enable iteration over all operation types | |
273 | */ | |
274 | static dma_cap_mask_t dma_cap_mask_all; | |
275 | ||
276 | /** | |
277 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
278 | * @chan - associated channel for this entry | |
279 | */ | |
280 | struct dma_chan_tbl_ent { | |
281 | struct dma_chan *chan; | |
282 | }; | |
283 | ||
284 | /** | |
285 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
286 | */ | |
287 | static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END]; | |
288 | ||
289 | static int __init dma_channel_table_init(void) | |
290 | { | |
291 | enum dma_transaction_type cap; | |
292 | int err = 0; | |
293 | ||
294 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
295 | ||
59b5ec21 DW |
296 | /* 'interrupt', 'private', and 'slave' are channel capabilities, |
297 | * but are not associated with an operation so they do not need | |
298 | * an entry in the channel_table | |
bec08513 DW |
299 | */ |
300 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
59b5ec21 | 301 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); |
bec08513 DW |
302 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); |
303 | ||
304 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
305 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
306 | if (!channel_table[cap]) { | |
307 | err = -ENOMEM; | |
308 | break; | |
309 | } | |
310 | } | |
311 | ||
312 | if (err) { | |
313 | pr_err("dmaengine: initialization failure\n"); | |
314 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
315 | if (channel_table[cap]) | |
316 | free_percpu(channel_table[cap]); | |
317 | } | |
318 | ||
319 | return err; | |
320 | } | |
652afc27 | 321 | arch_initcall(dma_channel_table_init); |
bec08513 DW |
322 | |
323 | /** | |
324 | * dma_find_channel - find a channel to carry out the operation | |
325 | * @tx_type: transaction type | |
326 | */ | |
327 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
328 | { | |
329 | struct dma_chan *chan; | |
330 | int cpu; | |
331 | ||
332 | WARN_ONCE(dmaengine_ref_count == 0, | |
333 | "client called %s without a reference", __func__); | |
334 | ||
335 | cpu = get_cpu(); | |
336 | chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan; | |
337 | put_cpu(); | |
338 | ||
339 | return chan; | |
340 | } | |
341 | EXPORT_SYMBOL(dma_find_channel); | |
342 | ||
2ba05622 DW |
343 | /** |
344 | * dma_issue_pending_all - flush all pending operations across all channels | |
345 | */ | |
346 | void dma_issue_pending_all(void) | |
347 | { | |
348 | struct dma_device *device; | |
349 | struct dma_chan *chan; | |
350 | ||
351 | WARN_ONCE(dmaengine_ref_count == 0, | |
352 | "client called %s without a reference", __func__); | |
353 | ||
354 | rcu_read_lock(); | |
59b5ec21 DW |
355 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { |
356 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
357 | continue; | |
2ba05622 DW |
358 | list_for_each_entry(chan, &device->channels, device_node) |
359 | if (chan->client_count) | |
360 | device->device_issue_pending(chan); | |
59b5ec21 | 361 | } |
2ba05622 DW |
362 | rcu_read_unlock(); |
363 | } | |
364 | EXPORT_SYMBOL(dma_issue_pending_all); | |
365 | ||
bec08513 DW |
366 | /** |
367 | * nth_chan - returns the nth channel of the given capability | |
368 | * @cap: capability to match | |
369 | * @n: nth channel desired | |
370 | * | |
371 | * Defaults to returning the channel with the desired capability and the | |
372 | * lowest reference count when 'n' cannot be satisfied. Must be called | |
373 | * under dma_list_mutex. | |
374 | */ | |
375 | static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n) | |
376 | { | |
377 | struct dma_device *device; | |
378 | struct dma_chan *chan; | |
379 | struct dma_chan *ret = NULL; | |
380 | struct dma_chan *min = NULL; | |
381 | ||
382 | list_for_each_entry(device, &dma_device_list, global_node) { | |
59b5ec21 DW |
383 | if (!dma_has_cap(cap, device->cap_mask) || |
384 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
bec08513 DW |
385 | continue; |
386 | list_for_each_entry(chan, &device->channels, device_node) { | |
387 | if (!chan->client_count) | |
388 | continue; | |
389 | if (!min) | |
390 | min = chan; | |
391 | else if (chan->table_count < min->table_count) | |
392 | min = chan; | |
393 | ||
394 | if (n-- == 0) { | |
395 | ret = chan; | |
396 | break; /* done */ | |
397 | } | |
398 | } | |
399 | if (ret) | |
400 | break; /* done */ | |
401 | } | |
402 | ||
403 | if (!ret) | |
404 | ret = min; | |
405 | ||
406 | if (ret) | |
407 | ret->table_count++; | |
408 | ||
409 | return ret; | |
410 | } | |
411 | ||
412 | /** | |
413 | * dma_channel_rebalance - redistribute the available channels | |
414 | * | |
415 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
416 | * operation type) in the SMP case, and operation isolation (avoid | |
417 | * multi-tasking channels) in the non-SMP case. Must be called under | |
418 | * dma_list_mutex. | |
419 | */ | |
420 | static void dma_channel_rebalance(void) | |
421 | { | |
422 | struct dma_chan *chan; | |
423 | struct dma_device *device; | |
424 | int cpu; | |
425 | int cap; | |
426 | int n; | |
427 | ||
428 | /* undo the last distribution */ | |
429 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
430 | for_each_possible_cpu(cpu) | |
431 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
432 | ||
59b5ec21 DW |
433 | list_for_each_entry(device, &dma_device_list, global_node) { |
434 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
435 | continue; | |
bec08513 DW |
436 | list_for_each_entry(chan, &device->channels, device_node) |
437 | chan->table_count = 0; | |
59b5ec21 | 438 | } |
bec08513 DW |
439 | |
440 | /* don't populate the channel_table if no clients are available */ | |
441 | if (!dmaengine_ref_count) | |
442 | return; | |
443 | ||
444 | /* redistribute available channels */ | |
445 | n = 0; | |
446 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
447 | for_each_online_cpu(cpu) { | |
448 | if (num_possible_cpus() > 1) | |
449 | chan = nth_chan(cap, n++); | |
450 | else | |
451 | chan = nth_chan(cap, -1); | |
452 | ||
453 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
454 | } | |
455 | } | |
456 | ||
e2346677 DW |
457 | static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev, |
458 | dma_filter_fn fn, void *fn_param) | |
59b5ec21 DW |
459 | { |
460 | struct dma_chan *chan; | |
59b5ec21 DW |
461 | |
462 | if (!__dma_device_satisfies_mask(dev, mask)) { | |
463 | pr_debug("%s: wrong capabilities\n", __func__); | |
464 | return NULL; | |
465 | } | |
466 | /* devices with multiple channels need special handling as we need to | |
467 | * ensure that all channels are either private or public. | |
468 | */ | |
469 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
470 | list_for_each_entry(chan, &dev->channels, device_node) { | |
471 | /* some channels are already publicly allocated */ | |
472 | if (chan->client_count) | |
473 | return NULL; | |
474 | } | |
475 | ||
476 | list_for_each_entry(chan, &dev->channels, device_node) { | |
477 | if (chan->client_count) { | |
478 | pr_debug("%s: %s busy\n", | |
41d5e59c | 479 | __func__, dma_chan_name(chan)); |
59b5ec21 DW |
480 | continue; |
481 | } | |
e2346677 DW |
482 | if (fn && !fn(chan, fn_param)) { |
483 | pr_debug("%s: %s filter said false\n", | |
484 | __func__, dma_chan_name(chan)); | |
485 | continue; | |
486 | } | |
487 | return chan; | |
59b5ec21 DW |
488 | } |
489 | ||
e2346677 | 490 | return NULL; |
59b5ec21 DW |
491 | } |
492 | ||
493 | /** | |
494 | * dma_request_channel - try to allocate an exclusive channel | |
495 | * @mask: capabilities that the channel must satisfy | |
496 | * @fn: optional callback to disposition available channels | |
497 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
498 | */ | |
499 | struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param) | |
500 | { | |
501 | struct dma_device *device, *_d; | |
502 | struct dma_chan *chan = NULL; | |
59b5ec21 DW |
503 | int err; |
504 | ||
505 | /* Find a channel */ | |
506 | mutex_lock(&dma_list_mutex); | |
507 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
e2346677 DW |
508 | chan = private_candidate(mask, device, fn, fn_param); |
509 | if (chan) { | |
59b5ec21 DW |
510 | /* Found a suitable channel, try to grab, prep, and |
511 | * return it. We first set DMA_PRIVATE to disable | |
512 | * balance_ref_count as this channel will not be | |
513 | * published in the general-purpose allocator | |
514 | */ | |
515 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
516 | err = dma_chan_get(chan); | |
517 | ||
518 | if (err == -ENODEV) { | |
519 | pr_debug("%s: %s module removed\n", __func__, | |
41d5e59c | 520 | dma_chan_name(chan)); |
59b5ec21 DW |
521 | list_del_rcu(&device->global_node); |
522 | } else if (err) | |
523 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
41d5e59c | 524 | dma_chan_name(chan), err); |
59b5ec21 DW |
525 | else |
526 | break; | |
e2346677 DW |
527 | chan = NULL; |
528 | } | |
59b5ec21 DW |
529 | } |
530 | mutex_unlock(&dma_list_mutex); | |
531 | ||
532 | pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail", | |
41d5e59c | 533 | chan ? dma_chan_name(chan) : NULL); |
59b5ec21 DW |
534 | |
535 | return chan; | |
536 | } | |
537 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
538 | ||
539 | void dma_release_channel(struct dma_chan *chan) | |
540 | { | |
541 | mutex_lock(&dma_list_mutex); | |
542 | WARN_ONCE(chan->client_count != 1, | |
543 | "chan reference count %d != 1\n", chan->client_count); | |
544 | dma_chan_put(chan); | |
545 | mutex_unlock(&dma_list_mutex); | |
546 | } | |
547 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
548 | ||
d379b01e | 549 | /** |
209b84a8 | 550 | * dmaengine_get - register interest in dma_channels |
d379b01e | 551 | */ |
209b84a8 | 552 | void dmaengine_get(void) |
d379b01e | 553 | { |
6f49a57a DW |
554 | struct dma_device *device, *_d; |
555 | struct dma_chan *chan; | |
556 | int err; | |
557 | ||
c13c8260 | 558 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
559 | dmaengine_ref_count++; |
560 | ||
561 | /* try to grab channels */ | |
59b5ec21 DW |
562 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { |
563 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
564 | continue; | |
6f49a57a DW |
565 | list_for_each_entry(chan, &device->channels, device_node) { |
566 | err = dma_chan_get(chan); | |
567 | if (err == -ENODEV) { | |
568 | /* module removed before we could use it */ | |
2ba05622 | 569 | list_del_rcu(&device->global_node); |
6f49a57a DW |
570 | break; |
571 | } else if (err) | |
572 | pr_err("dmaengine: failed to get %s: (%d)\n", | |
41d5e59c | 573 | dma_chan_name(chan), err); |
6f49a57a | 574 | } |
59b5ec21 | 575 | } |
6f49a57a | 576 | |
bec08513 DW |
577 | /* if this is the first reference and there were channels |
578 | * waiting we need to rebalance to get those channels | |
579 | * incorporated into the channel table | |
580 | */ | |
581 | if (dmaengine_ref_count == 1) | |
582 | dma_channel_rebalance(); | |
c13c8260 | 583 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 584 | } |
209b84a8 | 585 | EXPORT_SYMBOL(dmaengine_get); |
c13c8260 CL |
586 | |
587 | /** | |
209b84a8 | 588 | * dmaengine_put - let dma drivers be removed when ref_count == 0 |
c13c8260 | 589 | */ |
209b84a8 | 590 | void dmaengine_put(void) |
c13c8260 | 591 | { |
d379b01e | 592 | struct dma_device *device; |
c13c8260 CL |
593 | struct dma_chan *chan; |
594 | ||
c13c8260 | 595 | mutex_lock(&dma_list_mutex); |
6f49a57a DW |
596 | dmaengine_ref_count--; |
597 | BUG_ON(dmaengine_ref_count < 0); | |
598 | /* drop channel references */ | |
59b5ec21 DW |
599 | list_for_each_entry(device, &dma_device_list, global_node) { |
600 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
601 | continue; | |
6f49a57a DW |
602 | list_for_each_entry(chan, &device->channels, device_node) |
603 | dma_chan_put(chan); | |
59b5ec21 | 604 | } |
c13c8260 | 605 | mutex_unlock(&dma_list_mutex); |
c13c8260 | 606 | } |
209b84a8 | 607 | EXPORT_SYMBOL(dmaengine_put); |
c13c8260 | 608 | |
c13c8260 | 609 | /** |
6508871e | 610 | * dma_async_device_register - registers DMA devices found |
c13c8260 CL |
611 | * @device: &dma_device |
612 | */ | |
613 | int dma_async_device_register(struct dma_device *device) | |
614 | { | |
ff487fb7 | 615 | int chancnt = 0, rc; |
c13c8260 | 616 | struct dma_chan* chan; |
864498aa | 617 | atomic_t *idr_ref; |
c13c8260 CL |
618 | |
619 | if (!device) | |
620 | return -ENODEV; | |
621 | ||
7405f74b DW |
622 | /* validate device routines */ |
623 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
624 | !device->device_prep_dma_memcpy); | |
625 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
626 | !device->device_prep_dma_xor); | |
627 | BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) && | |
628 | !device->device_prep_dma_zero_sum); | |
629 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | |
630 | !device->device_prep_dma_memset); | |
9b941c66 | 631 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && |
7405f74b | 632 | !device->device_prep_dma_interrupt); |
dc0ee643 HS |
633 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && |
634 | !device->device_prep_slave_sg); | |
635 | BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && | |
636 | !device->device_terminate_all); | |
7405f74b DW |
637 | |
638 | BUG_ON(!device->device_alloc_chan_resources); | |
639 | BUG_ON(!device->device_free_chan_resources); | |
7405f74b DW |
640 | BUG_ON(!device->device_is_tx_complete); |
641 | BUG_ON(!device->device_issue_pending); | |
642 | BUG_ON(!device->dev); | |
643 | ||
864498aa DW |
644 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); |
645 | if (!idr_ref) | |
646 | return -ENOMEM; | |
647 | atomic_set(idr_ref, 0); | |
648 | idr_retry: | |
649 | if (!idr_pre_get(&dma_idr, GFP_KERNEL)) | |
650 | return -ENOMEM; | |
b0b42b16 | 651 | mutex_lock(&dma_list_mutex); |
864498aa | 652 | rc = idr_get_new(&dma_idr, NULL, &device->dev_id); |
b0b42b16 | 653 | mutex_unlock(&dma_list_mutex); |
864498aa DW |
654 | if (rc == -EAGAIN) |
655 | goto idr_retry; | |
656 | else if (rc != 0) | |
657 | return rc; | |
c13c8260 CL |
658 | |
659 | /* represent channels in sysfs. Probably want devs too */ | |
660 | list_for_each_entry(chan, &device->channels, device_node) { | |
661 | chan->local = alloc_percpu(typeof(*chan->local)); | |
662 | if (chan->local == NULL) | |
663 | continue; | |
41d5e59c DW |
664 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); |
665 | if (chan->dev == NULL) { | |
666 | free_percpu(chan->local); | |
667 | continue; | |
668 | } | |
c13c8260 CL |
669 | |
670 | chan->chan_id = chancnt++; | |
41d5e59c DW |
671 | chan->dev->device.class = &dma_devclass; |
672 | chan->dev->device.parent = device->dev; | |
673 | chan->dev->chan = chan; | |
864498aa DW |
674 | chan->dev->idr_ref = idr_ref; |
675 | chan->dev->dev_id = device->dev_id; | |
676 | atomic_inc(idr_ref); | |
41d5e59c | 677 | dev_set_name(&chan->dev->device, "dma%dchan%d", |
06190d84 | 678 | device->dev_id, chan->chan_id); |
c13c8260 | 679 | |
41d5e59c | 680 | rc = device_register(&chan->dev->device); |
ff487fb7 | 681 | if (rc) { |
ff487fb7 JG |
682 | free_percpu(chan->local); |
683 | chan->local = NULL; | |
684 | goto err_out; | |
685 | } | |
7cc5bf9a | 686 | chan->client_count = 0; |
c13c8260 | 687 | } |
59b5ec21 | 688 | device->chancnt = chancnt; |
c13c8260 CL |
689 | |
690 | mutex_lock(&dma_list_mutex); | |
59b5ec21 DW |
691 | /* take references on public channels */ |
692 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
6f49a57a DW |
693 | list_for_each_entry(chan, &device->channels, device_node) { |
694 | /* if clients are already waiting for channels we need | |
695 | * to take references on their behalf | |
696 | */ | |
697 | if (dma_chan_get(chan) == -ENODEV) { | |
698 | /* note we can only get here for the first | |
699 | * channel as the remaining channels are | |
700 | * guaranteed to get a reference | |
701 | */ | |
702 | rc = -ENODEV; | |
703 | mutex_unlock(&dma_list_mutex); | |
704 | goto err_out; | |
705 | } | |
706 | } | |
2ba05622 | 707 | list_add_tail_rcu(&device->global_node, &dma_device_list); |
bec08513 | 708 | dma_channel_rebalance(); |
c13c8260 CL |
709 | mutex_unlock(&dma_list_mutex); |
710 | ||
c13c8260 | 711 | return 0; |
ff487fb7 JG |
712 | |
713 | err_out: | |
714 | list_for_each_entry(chan, &device->channels, device_node) { | |
715 | if (chan->local == NULL) | |
716 | continue; | |
41d5e59c DW |
717 | mutex_lock(&dma_list_mutex); |
718 | chan->dev->chan = NULL; | |
719 | mutex_unlock(&dma_list_mutex); | |
720 | device_unregister(&chan->dev->device); | |
ff487fb7 JG |
721 | free_percpu(chan->local); |
722 | } | |
723 | return rc; | |
c13c8260 | 724 | } |
765e3d8a | 725 | EXPORT_SYMBOL(dma_async_device_register); |
c13c8260 | 726 | |
6508871e | 727 | /** |
6f49a57a | 728 | * dma_async_device_unregister - unregister a DMA device |
6508871e | 729 | * @device: &dma_device |
f27c580c DW |
730 | * |
731 | * This routine is called by dma driver exit routines, dmaengine holds module | |
732 | * references to prevent it being called while channels are in use. | |
6508871e RD |
733 | */ |
734 | void dma_async_device_unregister(struct dma_device *device) | |
c13c8260 CL |
735 | { |
736 | struct dma_chan *chan; | |
c13c8260 CL |
737 | |
738 | mutex_lock(&dma_list_mutex); | |
2ba05622 | 739 | list_del_rcu(&device->global_node); |
bec08513 | 740 | dma_channel_rebalance(); |
c13c8260 CL |
741 | mutex_unlock(&dma_list_mutex); |
742 | ||
743 | list_for_each_entry(chan, &device->channels, device_node) { | |
6f49a57a DW |
744 | WARN_ONCE(chan->client_count, |
745 | "%s called while %d clients hold a reference\n", | |
746 | __func__, chan->client_count); | |
41d5e59c DW |
747 | mutex_lock(&dma_list_mutex); |
748 | chan->dev->chan = NULL; | |
749 | mutex_unlock(&dma_list_mutex); | |
750 | device_unregister(&chan->dev->device); | |
c13c8260 | 751 | } |
c13c8260 | 752 | } |
765e3d8a | 753 | EXPORT_SYMBOL(dma_async_device_unregister); |
c13c8260 | 754 | |
7405f74b DW |
755 | /** |
756 | * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses | |
757 | * @chan: DMA channel to offload copy to | |
758 | * @dest: destination address (virtual) | |
759 | * @src: source address (virtual) | |
760 | * @len: length | |
761 | * | |
762 | * Both @dest and @src must be mappable to a bus address according to the | |
763 | * DMA mapping API rules for streaming mappings. | |
764 | * Both @dest and @src must stay memory resident (kernel memory or locked | |
765 | * user space pages). | |
766 | */ | |
767 | dma_cookie_t | |
768 | dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest, | |
769 | void *src, size_t len) | |
770 | { | |
771 | struct dma_device *dev = chan->device; | |
772 | struct dma_async_tx_descriptor *tx; | |
0036731c | 773 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
774 | dma_cookie_t cookie; |
775 | int cpu; | |
776 | ||
0036731c DW |
777 | dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE); |
778 | dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
779 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
780 | DMA_CTRL_ACK); | |
0036731c DW |
781 | |
782 | if (!tx) { | |
783 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
784 | dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 785 | return -ENOMEM; |
0036731c | 786 | } |
7405f74b | 787 | |
7405f74b | 788 | tx->callback = NULL; |
7405f74b DW |
789 | cookie = tx->tx_submit(tx); |
790 | ||
791 | cpu = get_cpu(); | |
792 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
793 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
794 | put_cpu(); | |
795 | ||
796 | return cookie; | |
797 | } | |
798 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf); | |
799 | ||
800 | /** | |
801 | * dma_async_memcpy_buf_to_pg - offloaded copy from address to page | |
802 | * @chan: DMA channel to offload copy to | |
803 | * @page: destination page | |
804 | * @offset: offset in page to copy to | |
805 | * @kdata: source address (virtual) | |
806 | * @len: length | |
807 | * | |
808 | * Both @page/@offset and @kdata must be mappable to a bus address according | |
809 | * to the DMA mapping API rules for streaming mappings. | |
810 | * Both @page/@offset and @kdata must stay memory resident (kernel memory or | |
811 | * locked user space pages) | |
812 | */ | |
813 | dma_cookie_t | |
814 | dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page, | |
815 | unsigned int offset, void *kdata, size_t len) | |
816 | { | |
817 | struct dma_device *dev = chan->device; | |
818 | struct dma_async_tx_descriptor *tx; | |
0036731c | 819 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
820 | dma_cookie_t cookie; |
821 | int cpu; | |
822 | ||
0036731c DW |
823 | dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE); |
824 | dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE); | |
636bdeaa DW |
825 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
826 | DMA_CTRL_ACK); | |
0036731c DW |
827 | |
828 | if (!tx) { | |
829 | dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
830 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 831 | return -ENOMEM; |
0036731c | 832 | } |
7405f74b | 833 | |
7405f74b | 834 | tx->callback = NULL; |
7405f74b DW |
835 | cookie = tx->tx_submit(tx); |
836 | ||
837 | cpu = get_cpu(); | |
838 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
839 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
840 | put_cpu(); | |
841 | ||
842 | return cookie; | |
843 | } | |
844 | EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg); | |
845 | ||
846 | /** | |
847 | * dma_async_memcpy_pg_to_pg - offloaded copy from page to page | |
848 | * @chan: DMA channel to offload copy to | |
849 | * @dest_pg: destination page | |
850 | * @dest_off: offset in page to copy to | |
851 | * @src_pg: source page | |
852 | * @src_off: offset in page to copy from | |
853 | * @len: length | |
854 | * | |
855 | * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus | |
856 | * address according to the DMA mapping API rules for streaming mappings. | |
857 | * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident | |
858 | * (kernel memory or locked user space pages). | |
859 | */ | |
860 | dma_cookie_t | |
861 | dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg, | |
862 | unsigned int dest_off, struct page *src_pg, unsigned int src_off, | |
863 | size_t len) | |
864 | { | |
865 | struct dma_device *dev = chan->device; | |
866 | struct dma_async_tx_descriptor *tx; | |
0036731c | 867 | dma_addr_t dma_dest, dma_src; |
7405f74b DW |
868 | dma_cookie_t cookie; |
869 | int cpu; | |
870 | ||
0036731c DW |
871 | dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE); |
872 | dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len, | |
873 | DMA_FROM_DEVICE); | |
636bdeaa DW |
874 | tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, |
875 | DMA_CTRL_ACK); | |
0036731c DW |
876 | |
877 | if (!tx) { | |
878 | dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE); | |
879 | dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE); | |
7405f74b | 880 | return -ENOMEM; |
0036731c | 881 | } |
7405f74b | 882 | |
7405f74b | 883 | tx->callback = NULL; |
7405f74b DW |
884 | cookie = tx->tx_submit(tx); |
885 | ||
886 | cpu = get_cpu(); | |
887 | per_cpu_ptr(chan->local, cpu)->bytes_transferred += len; | |
888 | per_cpu_ptr(chan->local, cpu)->memcpy_count++; | |
889 | put_cpu(); | |
890 | ||
891 | return cookie; | |
892 | } | |
893 | EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg); | |
894 | ||
895 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
896 | struct dma_chan *chan) | |
897 | { | |
898 | tx->chan = chan; | |
899 | spin_lock_init(&tx->lock); | |
7405f74b DW |
900 | } |
901 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
902 | ||
07f2211e DW |
903 | /* dma_wait_for_async_tx - spin wait for a transaction to complete |
904 | * @tx: in-flight transaction to wait on | |
905 | * | |
906 | * This routine assumes that tx was obtained from a call to async_memcpy, | |
907 | * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped | |
908 | * and submitted). Walking the parent chain is only meant to cover for DMA | |
909 | * drivers that do not implement the DMA_INTERRUPT capability and may race with | |
910 | * the driver's descriptor cleanup routine. | |
911 | */ | |
912 | enum dma_status | |
913 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
914 | { | |
915 | enum dma_status status; | |
916 | struct dma_async_tx_descriptor *iter; | |
917 | struct dma_async_tx_descriptor *parent; | |
918 | ||
919 | if (!tx) | |
920 | return DMA_SUCCESS; | |
921 | ||
922 | WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for" | |
41d5e59c | 923 | " %s\n", __func__, dma_chan_name(tx->chan)); |
07f2211e DW |
924 | |
925 | /* poll through the dependency chain, return when tx is complete */ | |
926 | do { | |
927 | iter = tx; | |
928 | ||
929 | /* find the root of the unsubmitted dependency chain */ | |
930 | do { | |
931 | parent = iter->parent; | |
932 | if (!parent) | |
933 | break; | |
934 | else | |
935 | iter = parent; | |
936 | } while (parent); | |
937 | ||
938 | /* there is a small window for ->parent == NULL and | |
939 | * ->cookie == -EBUSY | |
940 | */ | |
941 | while (iter->cookie == -EBUSY) | |
942 | cpu_relax(); | |
943 | ||
944 | status = dma_sync_wait(iter->chan, iter->cookie); | |
945 | } while (status == DMA_IN_PROGRESS || (iter != tx)); | |
946 | ||
947 | return status; | |
948 | } | |
949 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
950 | ||
951 | /* dma_run_dependencies - helper routine for dma drivers to process | |
952 | * (start) dependent operations on their target channel | |
953 | * @tx: transaction with dependencies | |
954 | */ | |
955 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
956 | { | |
957 | struct dma_async_tx_descriptor *dep = tx->next; | |
958 | struct dma_async_tx_descriptor *dep_next; | |
959 | struct dma_chan *chan; | |
960 | ||
961 | if (!dep) | |
962 | return; | |
963 | ||
964 | chan = dep->chan; | |
965 | ||
966 | /* keep submitting up until a channel switch is detected | |
967 | * in that case we will be called again as a result of | |
968 | * processing the interrupt from async_tx_channel_switch | |
969 | */ | |
970 | for (; dep; dep = dep_next) { | |
971 | spin_lock_bh(&dep->lock); | |
972 | dep->parent = NULL; | |
973 | dep_next = dep->next; | |
974 | if (dep_next && dep_next->chan == chan) | |
975 | dep->next = NULL; /* ->next will be submitted */ | |
976 | else | |
977 | dep_next = NULL; /* submit current dep and terminate */ | |
978 | spin_unlock_bh(&dep->lock); | |
979 | ||
980 | dep->tx_submit(dep); | |
981 | } | |
982 | ||
983 | chan->device->device_issue_pending(chan); | |
984 | } | |
985 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
986 | ||
c13c8260 CL |
987 | static int __init dma_bus_init(void) |
988 | { | |
864498aa | 989 | idr_init(&dma_idr); |
c13c8260 CL |
990 | mutex_init(&dma_list_mutex); |
991 | return class_register(&dma_devclass); | |
992 | } | |
652afc27 | 993 | arch_initcall(dma_bus_init); |
c13c8260 | 994 | |
bec08513 | 995 |