]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/dmaengine.c
ioat: do not perform removal actions at shutdown
[mirror_ubuntu-artful-kernel.git] / drivers / dma / dmaengine.c
CommitLineData
c13c8260
CL
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
aa1e6f1a
DW
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
c13c8260 36 *
f27c580c
DW
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
c13c8260
CL
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
f27c580c 45 * See Documentation/dmaengine.txt for more details
c13c8260
CL
46 */
47
48#include <linux/init.h>
49#include <linux/module.h>
7405f74b 50#include <linux/mm.h>
c13c8260
CL
51#include <linux/device.h>
52#include <linux/dmaengine.h>
53#include <linux/hardirq.h>
54#include <linux/spinlock.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/mutex.h>
7405f74b 58#include <linux/jiffies.h>
2ba05622 59#include <linux/rculist.h>
c13c8260
CL
60
61static DEFINE_MUTEX(dma_list_mutex);
62static LIST_HEAD(dma_device_list);
6f49a57a 63static long dmaengine_ref_count;
c13c8260
CL
64
65/* --- sysfs implementation --- */
66
891f78ea 67static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
c13c8260 68{
891f78ea 69 struct dma_chan *chan = to_dma_chan(dev);
c13c8260
CL
70 unsigned long count = 0;
71 int i;
72
17f3ae08 73 for_each_possible_cpu(i)
c13c8260
CL
74 count += per_cpu_ptr(chan->local, i)->memcpy_count;
75
76 return sprintf(buf, "%lu\n", count);
77}
78
891f78ea
TJ
79static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
80 char *buf)
c13c8260 81{
891f78ea 82 struct dma_chan *chan = to_dma_chan(dev);
c13c8260
CL
83 unsigned long count = 0;
84 int i;
85
17f3ae08 86 for_each_possible_cpu(i)
c13c8260
CL
87 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
88
89 return sprintf(buf, "%lu\n", count);
90}
91
891f78ea 92static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
c13c8260 93{
891f78ea 94 struct dma_chan *chan = to_dma_chan(dev);
c13c8260 95
6f49a57a 96 return sprintf(buf, "%d\n", chan->client_count);
c13c8260
CL
97}
98
891f78ea 99static struct device_attribute dma_attrs[] = {
c13c8260
CL
100 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
101 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
102 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
103 __ATTR_NULL
104};
105
c13c8260 106static struct class dma_devclass = {
891f78ea
TJ
107 .name = "dma",
108 .dev_attrs = dma_attrs,
c13c8260
CL
109};
110
111/* --- client and device registration --- */
112
59b5ec21
DW
113#define dma_device_satisfies_mask(device, mask) \
114 __dma_device_satisfies_mask((device), &(mask))
d379b01e 115static int
59b5ec21 116__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
d379b01e
DW
117{
118 dma_cap_mask_t has;
119
59b5ec21 120 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
d379b01e
DW
121 DMA_TX_TYPE_END);
122 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
123}
124
6f49a57a
DW
125static struct module *dma_chan_to_owner(struct dma_chan *chan)
126{
127 return chan->device->dev->driver->owner;
128}
129
130/**
131 * balance_ref_count - catch up the channel reference count
132 * @chan - channel to balance ->client_count versus dmaengine_ref_count
133 *
134 * balance_ref_count must be called under dma_list_mutex
135 */
136static void balance_ref_count(struct dma_chan *chan)
137{
138 struct module *owner = dma_chan_to_owner(chan);
139
140 while (chan->client_count < dmaengine_ref_count) {
141 __module_get(owner);
142 chan->client_count++;
143 }
144}
145
146/**
147 * dma_chan_get - try to grab a dma channel's parent driver module
148 * @chan - channel to grab
149 *
150 * Must be called under dma_list_mutex
151 */
152static int dma_chan_get(struct dma_chan *chan)
153{
154 int err = -ENODEV;
155 struct module *owner = dma_chan_to_owner(chan);
156
157 if (chan->client_count) {
158 __module_get(owner);
159 err = 0;
160 } else if (try_module_get(owner))
161 err = 0;
162
163 if (err == 0)
164 chan->client_count++;
165
166 /* allocate upon first client reference */
167 if (chan->client_count == 1 && err == 0) {
aa1e6f1a 168 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
6f49a57a
DW
169
170 if (desc_cnt < 0) {
171 err = desc_cnt;
172 chan->client_count = 0;
173 module_put(owner);
59b5ec21 174 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
6f49a57a
DW
175 balance_ref_count(chan);
176 }
177
178 return err;
179}
180
181/**
182 * dma_chan_put - drop a reference to a dma channel's parent driver module
183 * @chan - channel to release
184 *
185 * Must be called under dma_list_mutex
186 */
187static void dma_chan_put(struct dma_chan *chan)
188{
189 if (!chan->client_count)
190 return; /* this channel failed alloc_chan_resources */
191 chan->client_count--;
192 module_put(dma_chan_to_owner(chan));
193 if (chan->client_count == 0)
194 chan->device->device_free_chan_resources(chan);
195}
196
7405f74b
DW
197enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
198{
199 enum dma_status status;
200 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
201
202 dma_async_issue_pending(chan);
203 do {
204 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
205 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
206 printk(KERN_ERR "dma_sync_wait_timeout!\n");
207 return DMA_ERROR;
208 }
209 } while (status == DMA_IN_PROGRESS);
210
211 return status;
212}
213EXPORT_SYMBOL(dma_sync_wait);
214
bec08513
DW
215/**
216 * dma_cap_mask_all - enable iteration over all operation types
217 */
218static dma_cap_mask_t dma_cap_mask_all;
219
220/**
221 * dma_chan_tbl_ent - tracks channel allocations per core/operation
222 * @chan - associated channel for this entry
223 */
224struct dma_chan_tbl_ent {
225 struct dma_chan *chan;
226};
227
228/**
229 * channel_table - percpu lookup table for memory-to-memory offload providers
230 */
231static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
232
233static int __init dma_channel_table_init(void)
234{
235 enum dma_transaction_type cap;
236 int err = 0;
237
238 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
239
59b5ec21
DW
240 /* 'interrupt', 'private', and 'slave' are channel capabilities,
241 * but are not associated with an operation so they do not need
242 * an entry in the channel_table
bec08513
DW
243 */
244 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
59b5ec21 245 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
bec08513
DW
246 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
247
248 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
249 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
250 if (!channel_table[cap]) {
251 err = -ENOMEM;
252 break;
253 }
254 }
255
256 if (err) {
257 pr_err("dmaengine: initialization failure\n");
258 for_each_dma_cap_mask(cap, dma_cap_mask_all)
259 if (channel_table[cap])
260 free_percpu(channel_table[cap]);
261 }
262
263 return err;
264}
265subsys_initcall(dma_channel_table_init);
266
267/**
268 * dma_find_channel - find a channel to carry out the operation
269 * @tx_type: transaction type
270 */
271struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
272{
273 struct dma_chan *chan;
274 int cpu;
275
276 WARN_ONCE(dmaengine_ref_count == 0,
277 "client called %s without a reference", __func__);
278
279 cpu = get_cpu();
280 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
281 put_cpu();
282
283 return chan;
284}
285EXPORT_SYMBOL(dma_find_channel);
286
2ba05622
DW
287/**
288 * dma_issue_pending_all - flush all pending operations across all channels
289 */
290void dma_issue_pending_all(void)
291{
292 struct dma_device *device;
293 struct dma_chan *chan;
294
295 WARN_ONCE(dmaengine_ref_count == 0,
296 "client called %s without a reference", __func__);
297
298 rcu_read_lock();
59b5ec21
DW
299 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
300 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
301 continue;
2ba05622
DW
302 list_for_each_entry(chan, &device->channels, device_node)
303 if (chan->client_count)
304 device->device_issue_pending(chan);
59b5ec21 305 }
2ba05622
DW
306 rcu_read_unlock();
307}
308EXPORT_SYMBOL(dma_issue_pending_all);
309
bec08513
DW
310/**
311 * nth_chan - returns the nth channel of the given capability
312 * @cap: capability to match
313 * @n: nth channel desired
314 *
315 * Defaults to returning the channel with the desired capability and the
316 * lowest reference count when 'n' cannot be satisfied. Must be called
317 * under dma_list_mutex.
318 */
319static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
320{
321 struct dma_device *device;
322 struct dma_chan *chan;
323 struct dma_chan *ret = NULL;
324 struct dma_chan *min = NULL;
325
326 list_for_each_entry(device, &dma_device_list, global_node) {
59b5ec21
DW
327 if (!dma_has_cap(cap, device->cap_mask) ||
328 dma_has_cap(DMA_PRIVATE, device->cap_mask))
bec08513
DW
329 continue;
330 list_for_each_entry(chan, &device->channels, device_node) {
331 if (!chan->client_count)
332 continue;
333 if (!min)
334 min = chan;
335 else if (chan->table_count < min->table_count)
336 min = chan;
337
338 if (n-- == 0) {
339 ret = chan;
340 break; /* done */
341 }
342 }
343 if (ret)
344 break; /* done */
345 }
346
347 if (!ret)
348 ret = min;
349
350 if (ret)
351 ret->table_count++;
352
353 return ret;
354}
355
356/**
357 * dma_channel_rebalance - redistribute the available channels
358 *
359 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
360 * operation type) in the SMP case, and operation isolation (avoid
361 * multi-tasking channels) in the non-SMP case. Must be called under
362 * dma_list_mutex.
363 */
364static void dma_channel_rebalance(void)
365{
366 struct dma_chan *chan;
367 struct dma_device *device;
368 int cpu;
369 int cap;
370 int n;
371
372 /* undo the last distribution */
373 for_each_dma_cap_mask(cap, dma_cap_mask_all)
374 for_each_possible_cpu(cpu)
375 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
376
59b5ec21
DW
377 list_for_each_entry(device, &dma_device_list, global_node) {
378 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
379 continue;
bec08513
DW
380 list_for_each_entry(chan, &device->channels, device_node)
381 chan->table_count = 0;
59b5ec21 382 }
bec08513
DW
383
384 /* don't populate the channel_table if no clients are available */
385 if (!dmaengine_ref_count)
386 return;
387
388 /* redistribute available channels */
389 n = 0;
390 for_each_dma_cap_mask(cap, dma_cap_mask_all)
391 for_each_online_cpu(cpu) {
392 if (num_possible_cpus() > 1)
393 chan = nth_chan(cap, n++);
394 else
395 chan = nth_chan(cap, -1);
396
397 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
398 }
399}
400
59b5ec21
DW
401static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
402{
403 struct dma_chan *chan;
404 struct dma_chan *ret = NULL;
405
406 if (!__dma_device_satisfies_mask(dev, mask)) {
407 pr_debug("%s: wrong capabilities\n", __func__);
408 return NULL;
409 }
410 /* devices with multiple channels need special handling as we need to
411 * ensure that all channels are either private or public.
412 */
413 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
414 list_for_each_entry(chan, &dev->channels, device_node) {
415 /* some channels are already publicly allocated */
416 if (chan->client_count)
417 return NULL;
418 }
419
420 list_for_each_entry(chan, &dev->channels, device_node) {
421 if (chan->client_count) {
422 pr_debug("%s: %s busy\n",
423 __func__, dev_name(&chan->dev));
424 continue;
425 }
426 ret = chan;
427 break;
428 }
429
430 return ret;
431}
432
433/**
434 * dma_request_channel - try to allocate an exclusive channel
435 * @mask: capabilities that the channel must satisfy
436 * @fn: optional callback to disposition available channels
437 * @fn_param: opaque parameter to pass to dma_filter_fn
438 */
439struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
440{
441 struct dma_device *device, *_d;
442 struct dma_chan *chan = NULL;
7dd60251 443 bool ack;
59b5ec21
DW
444 int err;
445
446 /* Find a channel */
447 mutex_lock(&dma_list_mutex);
448 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
449 chan = private_candidate(mask, device);
450 if (!chan)
451 continue;
452
453 if (fn)
454 ack = fn(chan, fn_param);
455 else
7dd60251 456 ack = true;
59b5ec21 457
7dd60251 458 if (ack) {
59b5ec21
DW
459 /* Found a suitable channel, try to grab, prep, and
460 * return it. We first set DMA_PRIVATE to disable
461 * balance_ref_count as this channel will not be
462 * published in the general-purpose allocator
463 */
464 dma_cap_set(DMA_PRIVATE, device->cap_mask);
465 err = dma_chan_get(chan);
466
467 if (err == -ENODEV) {
468 pr_debug("%s: %s module removed\n", __func__,
469 dev_name(&chan->dev));
470 list_del_rcu(&device->global_node);
471 } else if (err)
472 pr_err("dmaengine: failed to get %s: (%d)\n",
473 dev_name(&chan->dev), err);
474 else
475 break;
59b5ec21 476 } else
7dd60251
DW
477 pr_debug("%s: %s filter said false\n",
478 __func__, dev_name(&chan->dev));
59b5ec21
DW
479 chan = NULL;
480 }
481 mutex_unlock(&dma_list_mutex);
482
483 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
484 chan ? dev_name(&chan->dev) : NULL);
485
486 return chan;
487}
488EXPORT_SYMBOL_GPL(__dma_request_channel);
489
490void dma_release_channel(struct dma_chan *chan)
491{
492 mutex_lock(&dma_list_mutex);
493 WARN_ONCE(chan->client_count != 1,
494 "chan reference count %d != 1\n", chan->client_count);
495 dma_chan_put(chan);
496 mutex_unlock(&dma_list_mutex);
497}
498EXPORT_SYMBOL_GPL(dma_release_channel);
499
d379b01e 500/**
209b84a8 501 * dmaengine_get - register interest in dma_channels
d379b01e 502 */
209b84a8 503void dmaengine_get(void)
d379b01e 504{
6f49a57a
DW
505 struct dma_device *device, *_d;
506 struct dma_chan *chan;
507 int err;
508
c13c8260 509 mutex_lock(&dma_list_mutex);
6f49a57a
DW
510 dmaengine_ref_count++;
511
512 /* try to grab channels */
59b5ec21
DW
513 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
514 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
515 continue;
6f49a57a
DW
516 list_for_each_entry(chan, &device->channels, device_node) {
517 err = dma_chan_get(chan);
518 if (err == -ENODEV) {
519 /* module removed before we could use it */
2ba05622 520 list_del_rcu(&device->global_node);
6f49a57a
DW
521 break;
522 } else if (err)
523 pr_err("dmaengine: failed to get %s: (%d)\n",
524 dev_name(&chan->dev), err);
525 }
59b5ec21 526 }
6f49a57a 527
bec08513
DW
528 /* if this is the first reference and there were channels
529 * waiting we need to rebalance to get those channels
530 * incorporated into the channel table
531 */
532 if (dmaengine_ref_count == 1)
533 dma_channel_rebalance();
c13c8260 534 mutex_unlock(&dma_list_mutex);
c13c8260 535}
209b84a8 536EXPORT_SYMBOL(dmaengine_get);
c13c8260
CL
537
538/**
209b84a8 539 * dmaengine_put - let dma drivers be removed when ref_count == 0
c13c8260 540 */
209b84a8 541void dmaengine_put(void)
c13c8260 542{
d379b01e 543 struct dma_device *device;
c13c8260
CL
544 struct dma_chan *chan;
545
c13c8260 546 mutex_lock(&dma_list_mutex);
6f49a57a
DW
547 dmaengine_ref_count--;
548 BUG_ON(dmaengine_ref_count < 0);
549 /* drop channel references */
59b5ec21
DW
550 list_for_each_entry(device, &dma_device_list, global_node) {
551 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
552 continue;
6f49a57a
DW
553 list_for_each_entry(chan, &device->channels, device_node)
554 dma_chan_put(chan);
59b5ec21 555 }
c13c8260 556 mutex_unlock(&dma_list_mutex);
c13c8260 557}
209b84a8 558EXPORT_SYMBOL(dmaengine_put);
c13c8260 559
c13c8260 560/**
6508871e 561 * dma_async_device_register - registers DMA devices found
c13c8260
CL
562 * @device: &dma_device
563 */
564int dma_async_device_register(struct dma_device *device)
565{
566 static int id;
ff487fb7 567 int chancnt = 0, rc;
c13c8260
CL
568 struct dma_chan* chan;
569
570 if (!device)
571 return -ENODEV;
572
7405f74b
DW
573 /* validate device routines */
574 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
575 !device->device_prep_dma_memcpy);
576 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
577 !device->device_prep_dma_xor);
578 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
579 !device->device_prep_dma_zero_sum);
580 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
581 !device->device_prep_dma_memset);
9b941c66 582 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
7405f74b 583 !device->device_prep_dma_interrupt);
dc0ee643
HS
584 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
585 !device->device_prep_slave_sg);
586 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
587 !device->device_terminate_all);
7405f74b
DW
588
589 BUG_ON(!device->device_alloc_chan_resources);
590 BUG_ON(!device->device_free_chan_resources);
7405f74b
DW
591 BUG_ON(!device->device_is_tx_complete);
592 BUG_ON(!device->device_issue_pending);
593 BUG_ON(!device->dev);
594
b0b42b16 595 mutex_lock(&dma_list_mutex);
c13c8260 596 device->dev_id = id++;
b0b42b16 597 mutex_unlock(&dma_list_mutex);
c13c8260
CL
598
599 /* represent channels in sysfs. Probably want devs too */
600 list_for_each_entry(chan, &device->channels, device_node) {
601 chan->local = alloc_percpu(typeof(*chan->local));
602 if (chan->local == NULL)
603 continue;
604
605 chan->chan_id = chancnt++;
891f78ea 606 chan->dev.class = &dma_devclass;
1099dc79 607 chan->dev.parent = device->dev;
06190d84
KS
608 dev_set_name(&chan->dev, "dma%dchan%d",
609 device->dev_id, chan->chan_id);
c13c8260 610
891f78ea 611 rc = device_register(&chan->dev);
ff487fb7 612 if (rc) {
ff487fb7
JG
613 free_percpu(chan->local);
614 chan->local = NULL;
615 goto err_out;
616 }
7cc5bf9a 617 chan->client_count = 0;
c13c8260 618 }
59b5ec21 619 device->chancnt = chancnt;
c13c8260
CL
620
621 mutex_lock(&dma_list_mutex);
59b5ec21
DW
622 /* take references on public channels */
623 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
6f49a57a
DW
624 list_for_each_entry(chan, &device->channels, device_node) {
625 /* if clients are already waiting for channels we need
626 * to take references on their behalf
627 */
628 if (dma_chan_get(chan) == -ENODEV) {
629 /* note we can only get here for the first
630 * channel as the remaining channels are
631 * guaranteed to get a reference
632 */
633 rc = -ENODEV;
634 mutex_unlock(&dma_list_mutex);
635 goto err_out;
636 }
637 }
2ba05622 638 list_add_tail_rcu(&device->global_node, &dma_device_list);
bec08513 639 dma_channel_rebalance();
c13c8260
CL
640 mutex_unlock(&dma_list_mutex);
641
c13c8260 642 return 0;
ff487fb7
JG
643
644err_out:
645 list_for_each_entry(chan, &device->channels, device_node) {
646 if (chan->local == NULL)
647 continue;
891f78ea 648 device_unregister(&chan->dev);
ff487fb7
JG
649 free_percpu(chan->local);
650 }
651 return rc;
c13c8260 652}
765e3d8a 653EXPORT_SYMBOL(dma_async_device_register);
c13c8260 654
6508871e 655/**
6f49a57a 656 * dma_async_device_unregister - unregister a DMA device
6508871e 657 * @device: &dma_device
f27c580c
DW
658 *
659 * This routine is called by dma driver exit routines, dmaengine holds module
660 * references to prevent it being called while channels are in use.
6508871e
RD
661 */
662void dma_async_device_unregister(struct dma_device *device)
c13c8260
CL
663{
664 struct dma_chan *chan;
c13c8260
CL
665
666 mutex_lock(&dma_list_mutex);
2ba05622 667 list_del_rcu(&device->global_node);
bec08513 668 dma_channel_rebalance();
c13c8260
CL
669 mutex_unlock(&dma_list_mutex);
670
671 list_for_each_entry(chan, &device->channels, device_node) {
6f49a57a
DW
672 WARN_ONCE(chan->client_count,
673 "%s called while %d clients hold a reference\n",
674 __func__, chan->client_count);
891f78ea 675 device_unregister(&chan->dev);
c13c8260 676 }
c13c8260 677}
765e3d8a 678EXPORT_SYMBOL(dma_async_device_unregister);
c13c8260 679
7405f74b
DW
680/**
681 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
682 * @chan: DMA channel to offload copy to
683 * @dest: destination address (virtual)
684 * @src: source address (virtual)
685 * @len: length
686 *
687 * Both @dest and @src must be mappable to a bus address according to the
688 * DMA mapping API rules for streaming mappings.
689 * Both @dest and @src must stay memory resident (kernel memory or locked
690 * user space pages).
691 */
692dma_cookie_t
693dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
694 void *src, size_t len)
695{
696 struct dma_device *dev = chan->device;
697 struct dma_async_tx_descriptor *tx;
0036731c 698 dma_addr_t dma_dest, dma_src;
7405f74b
DW
699 dma_cookie_t cookie;
700 int cpu;
701
0036731c
DW
702 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
703 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
636bdeaa
DW
704 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
705 DMA_CTRL_ACK);
0036731c
DW
706
707 if (!tx) {
708 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
709 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
7405f74b 710 return -ENOMEM;
0036731c 711 }
7405f74b 712
7405f74b 713 tx->callback = NULL;
7405f74b
DW
714 cookie = tx->tx_submit(tx);
715
716 cpu = get_cpu();
717 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
718 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
719 put_cpu();
720
721 return cookie;
722}
723EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
724
725/**
726 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
727 * @chan: DMA channel to offload copy to
728 * @page: destination page
729 * @offset: offset in page to copy to
730 * @kdata: source address (virtual)
731 * @len: length
732 *
733 * Both @page/@offset and @kdata must be mappable to a bus address according
734 * to the DMA mapping API rules for streaming mappings.
735 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
736 * locked user space pages)
737 */
738dma_cookie_t
739dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
740 unsigned int offset, void *kdata, size_t len)
741{
742 struct dma_device *dev = chan->device;
743 struct dma_async_tx_descriptor *tx;
0036731c 744 dma_addr_t dma_dest, dma_src;
7405f74b
DW
745 dma_cookie_t cookie;
746 int cpu;
747
0036731c
DW
748 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
749 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
636bdeaa
DW
750 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
751 DMA_CTRL_ACK);
0036731c
DW
752
753 if (!tx) {
754 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
755 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
7405f74b 756 return -ENOMEM;
0036731c 757 }
7405f74b 758
7405f74b 759 tx->callback = NULL;
7405f74b
DW
760 cookie = tx->tx_submit(tx);
761
762 cpu = get_cpu();
763 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
764 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
765 put_cpu();
766
767 return cookie;
768}
769EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
770
771/**
772 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
773 * @chan: DMA channel to offload copy to
774 * @dest_pg: destination page
775 * @dest_off: offset in page to copy to
776 * @src_pg: source page
777 * @src_off: offset in page to copy from
778 * @len: length
779 *
780 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
781 * address according to the DMA mapping API rules for streaming mappings.
782 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
783 * (kernel memory or locked user space pages).
784 */
785dma_cookie_t
786dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
787 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
788 size_t len)
789{
790 struct dma_device *dev = chan->device;
791 struct dma_async_tx_descriptor *tx;
0036731c 792 dma_addr_t dma_dest, dma_src;
7405f74b
DW
793 dma_cookie_t cookie;
794 int cpu;
795
0036731c
DW
796 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
797 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
798 DMA_FROM_DEVICE);
636bdeaa
DW
799 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
800 DMA_CTRL_ACK);
0036731c
DW
801
802 if (!tx) {
803 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
804 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
7405f74b 805 return -ENOMEM;
0036731c 806 }
7405f74b 807
7405f74b 808 tx->callback = NULL;
7405f74b
DW
809 cookie = tx->tx_submit(tx);
810
811 cpu = get_cpu();
812 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
813 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
814 put_cpu();
815
816 return cookie;
817}
818EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
819
820void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
821 struct dma_chan *chan)
822{
823 tx->chan = chan;
824 spin_lock_init(&tx->lock);
7405f74b
DW
825}
826EXPORT_SYMBOL(dma_async_tx_descriptor_init);
827
07f2211e
DW
828/* dma_wait_for_async_tx - spin wait for a transaction to complete
829 * @tx: in-flight transaction to wait on
830 *
831 * This routine assumes that tx was obtained from a call to async_memcpy,
832 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
833 * and submitted). Walking the parent chain is only meant to cover for DMA
834 * drivers that do not implement the DMA_INTERRUPT capability and may race with
835 * the driver's descriptor cleanup routine.
836 */
837enum dma_status
838dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
839{
840 enum dma_status status;
841 struct dma_async_tx_descriptor *iter;
842 struct dma_async_tx_descriptor *parent;
843
844 if (!tx)
845 return DMA_SUCCESS;
846
847 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
848 " %s\n", __func__, dev_name(&tx->chan->dev));
849
850 /* poll through the dependency chain, return when tx is complete */
851 do {
852 iter = tx;
853
854 /* find the root of the unsubmitted dependency chain */
855 do {
856 parent = iter->parent;
857 if (!parent)
858 break;
859 else
860 iter = parent;
861 } while (parent);
862
863 /* there is a small window for ->parent == NULL and
864 * ->cookie == -EBUSY
865 */
866 while (iter->cookie == -EBUSY)
867 cpu_relax();
868
869 status = dma_sync_wait(iter->chan, iter->cookie);
870 } while (status == DMA_IN_PROGRESS || (iter != tx));
871
872 return status;
873}
874EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
875
876/* dma_run_dependencies - helper routine for dma drivers to process
877 * (start) dependent operations on their target channel
878 * @tx: transaction with dependencies
879 */
880void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
881{
882 struct dma_async_tx_descriptor *dep = tx->next;
883 struct dma_async_tx_descriptor *dep_next;
884 struct dma_chan *chan;
885
886 if (!dep)
887 return;
888
889 chan = dep->chan;
890
891 /* keep submitting up until a channel switch is detected
892 * in that case we will be called again as a result of
893 * processing the interrupt from async_tx_channel_switch
894 */
895 for (; dep; dep = dep_next) {
896 spin_lock_bh(&dep->lock);
897 dep->parent = NULL;
898 dep_next = dep->next;
899 if (dep_next && dep_next->chan == chan)
900 dep->next = NULL; /* ->next will be submitted */
901 else
902 dep_next = NULL; /* submit current dep and terminate */
903 spin_unlock_bh(&dep->lock);
904
905 dep->tx_submit(dep);
906 }
907
908 chan->device->device_issue_pending(chan);
909}
910EXPORT_SYMBOL_GPL(dma_run_dependencies);
911
c13c8260
CL
912static int __init dma_bus_init(void)
913{
914 mutex_init(&dma_list_mutex);
915 return class_register(&dma_devclass);
916}
c13c8260
CL
917subsys_initcall(dma_bus_init);
918
bec08513 919