]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify it | |
5 | * under the terms of the GNU General Public License as published by the Free | |
6 | * Software Foundation; either version 2 of the License, or (at your option) | |
7 | * any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * The full GNU General Public License is included in this distribution in the | |
15 | * file called COPYING. | |
16 | */ | |
17 | ||
18 | /* | |
19 | * This code implements the DMA subsystem. It provides a HW-neutral interface | |
20 | * for other kernel code to use asynchronous memory copy capabilities, | |
21 | * if present, and allows different HW DMA drivers to register as providing | |
22 | * this capability. | |
23 | * | |
24 | * Due to the fact we are accelerating what is already a relatively fast | |
25 | * operation, the code goes to great lengths to avoid additional overhead, | |
26 | * such as locking. | |
27 | * | |
28 | * LOCKING: | |
29 | * | |
30 | * The subsystem keeps a global list of dma_device structs it is protected by a | |
31 | * mutex, dma_list_mutex. | |
32 | * | |
33 | * A subsystem can get access to a channel by calling dmaengine_get() followed | |
34 | * by dma_find_channel(), or if it has need for an exclusive channel it can call | |
35 | * dma_request_channel(). Once a channel is allocated a reference is taken | |
36 | * against its corresponding driver to disable removal. | |
37 | * | |
38 | * Each device has a channels list, which runs unlocked but is never modified | |
39 | * once the device is registered, it's just setup by the driver. | |
40 | * | |
41 | * See Documentation/dmaengine.txt for more details | |
42 | */ | |
43 | ||
44 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | |
45 | ||
46 | #include <linux/platform_device.h> | |
47 | #include <linux/dma-mapping.h> | |
48 | #include <linux/init.h> | |
49 | #include <linux/module.h> | |
50 | #include <linux/mm.h> | |
51 | #include <linux/device.h> | |
52 | #include <linux/dmaengine.h> | |
53 | #include <linux/hardirq.h> | |
54 | #include <linux/spinlock.h> | |
55 | #include <linux/percpu.h> | |
56 | #include <linux/rcupdate.h> | |
57 | #include <linux/mutex.h> | |
58 | #include <linux/jiffies.h> | |
59 | #include <linux/rculist.h> | |
60 | #include <linux/idr.h> | |
61 | #include <linux/slab.h> | |
62 | #include <linux/acpi.h> | |
63 | #include <linux/acpi_dma.h> | |
64 | #include <linux/of_dma.h> | |
65 | #include <linux/mempool.h> | |
66 | ||
67 | static DEFINE_MUTEX(dma_list_mutex); | |
68 | static DEFINE_IDA(dma_ida); | |
69 | static LIST_HEAD(dma_device_list); | |
70 | static long dmaengine_ref_count; | |
71 | ||
72 | /* --- sysfs implementation --- */ | |
73 | ||
74 | /** | |
75 | * dev_to_dma_chan - convert a device pointer to the its sysfs container object | |
76 | * @dev - device node | |
77 | * | |
78 | * Must be called under dma_list_mutex | |
79 | */ | |
80 | static struct dma_chan *dev_to_dma_chan(struct device *dev) | |
81 | { | |
82 | struct dma_chan_dev *chan_dev; | |
83 | ||
84 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
85 | return chan_dev->chan; | |
86 | } | |
87 | ||
88 | static ssize_t memcpy_count_show(struct device *dev, | |
89 | struct device_attribute *attr, char *buf) | |
90 | { | |
91 | struct dma_chan *chan; | |
92 | unsigned long count = 0; | |
93 | int i; | |
94 | int err; | |
95 | ||
96 | mutex_lock(&dma_list_mutex); | |
97 | chan = dev_to_dma_chan(dev); | |
98 | if (chan) { | |
99 | for_each_possible_cpu(i) | |
100 | count += per_cpu_ptr(chan->local, i)->memcpy_count; | |
101 | err = sprintf(buf, "%lu\n", count); | |
102 | } else | |
103 | err = -ENODEV; | |
104 | mutex_unlock(&dma_list_mutex); | |
105 | ||
106 | return err; | |
107 | } | |
108 | static DEVICE_ATTR_RO(memcpy_count); | |
109 | ||
110 | static ssize_t bytes_transferred_show(struct device *dev, | |
111 | struct device_attribute *attr, char *buf) | |
112 | { | |
113 | struct dma_chan *chan; | |
114 | unsigned long count = 0; | |
115 | int i; | |
116 | int err; | |
117 | ||
118 | mutex_lock(&dma_list_mutex); | |
119 | chan = dev_to_dma_chan(dev); | |
120 | if (chan) { | |
121 | for_each_possible_cpu(i) | |
122 | count += per_cpu_ptr(chan->local, i)->bytes_transferred; | |
123 | err = sprintf(buf, "%lu\n", count); | |
124 | } else | |
125 | err = -ENODEV; | |
126 | mutex_unlock(&dma_list_mutex); | |
127 | ||
128 | return err; | |
129 | } | |
130 | static DEVICE_ATTR_RO(bytes_transferred); | |
131 | ||
132 | static ssize_t in_use_show(struct device *dev, struct device_attribute *attr, | |
133 | char *buf) | |
134 | { | |
135 | struct dma_chan *chan; | |
136 | int err; | |
137 | ||
138 | mutex_lock(&dma_list_mutex); | |
139 | chan = dev_to_dma_chan(dev); | |
140 | if (chan) | |
141 | err = sprintf(buf, "%d\n", chan->client_count); | |
142 | else | |
143 | err = -ENODEV; | |
144 | mutex_unlock(&dma_list_mutex); | |
145 | ||
146 | return err; | |
147 | } | |
148 | static DEVICE_ATTR_RO(in_use); | |
149 | ||
150 | static struct attribute *dma_dev_attrs[] = { | |
151 | &dev_attr_memcpy_count.attr, | |
152 | &dev_attr_bytes_transferred.attr, | |
153 | &dev_attr_in_use.attr, | |
154 | NULL, | |
155 | }; | |
156 | ATTRIBUTE_GROUPS(dma_dev); | |
157 | ||
158 | static void chan_dev_release(struct device *dev) | |
159 | { | |
160 | struct dma_chan_dev *chan_dev; | |
161 | ||
162 | chan_dev = container_of(dev, typeof(*chan_dev), device); | |
163 | if (atomic_dec_and_test(chan_dev->idr_ref)) { | |
164 | mutex_lock(&dma_list_mutex); | |
165 | ida_remove(&dma_ida, chan_dev->dev_id); | |
166 | mutex_unlock(&dma_list_mutex); | |
167 | kfree(chan_dev->idr_ref); | |
168 | } | |
169 | kfree(chan_dev); | |
170 | } | |
171 | ||
172 | static struct class dma_devclass = { | |
173 | .name = "dma", | |
174 | .dev_groups = dma_dev_groups, | |
175 | .dev_release = chan_dev_release, | |
176 | }; | |
177 | ||
178 | /* --- client and device registration --- */ | |
179 | ||
180 | #define dma_device_satisfies_mask(device, mask) \ | |
181 | __dma_device_satisfies_mask((device), &(mask)) | |
182 | static int | |
183 | __dma_device_satisfies_mask(struct dma_device *device, | |
184 | const dma_cap_mask_t *want) | |
185 | { | |
186 | dma_cap_mask_t has; | |
187 | ||
188 | bitmap_and(has.bits, want->bits, device->cap_mask.bits, | |
189 | DMA_TX_TYPE_END); | |
190 | return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); | |
191 | } | |
192 | ||
193 | static struct module *dma_chan_to_owner(struct dma_chan *chan) | |
194 | { | |
195 | return chan->device->dev->driver->owner; | |
196 | } | |
197 | ||
198 | /** | |
199 | * balance_ref_count - catch up the channel reference count | |
200 | * @chan - channel to balance ->client_count versus dmaengine_ref_count | |
201 | * | |
202 | * balance_ref_count must be called under dma_list_mutex | |
203 | */ | |
204 | static void balance_ref_count(struct dma_chan *chan) | |
205 | { | |
206 | struct module *owner = dma_chan_to_owner(chan); | |
207 | ||
208 | while (chan->client_count < dmaengine_ref_count) { | |
209 | __module_get(owner); | |
210 | chan->client_count++; | |
211 | } | |
212 | } | |
213 | ||
214 | /** | |
215 | * dma_chan_get - try to grab a dma channel's parent driver module | |
216 | * @chan - channel to grab | |
217 | * | |
218 | * Must be called under dma_list_mutex | |
219 | */ | |
220 | static int dma_chan_get(struct dma_chan *chan) | |
221 | { | |
222 | struct module *owner = dma_chan_to_owner(chan); | |
223 | int ret; | |
224 | ||
225 | /* The channel is already in use, update client count */ | |
226 | if (chan->client_count) { | |
227 | __module_get(owner); | |
228 | goto out; | |
229 | } | |
230 | ||
231 | if (!try_module_get(owner)) | |
232 | return -ENODEV; | |
233 | ||
234 | /* allocate upon first client reference */ | |
235 | if (chan->device->device_alloc_chan_resources) { | |
236 | ret = chan->device->device_alloc_chan_resources(chan); | |
237 | if (ret < 0) | |
238 | goto err_out; | |
239 | } | |
240 | ||
241 | if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) | |
242 | balance_ref_count(chan); | |
243 | ||
244 | out: | |
245 | chan->client_count++; | |
246 | return 0; | |
247 | ||
248 | err_out: | |
249 | module_put(owner); | |
250 | return ret; | |
251 | } | |
252 | ||
253 | /** | |
254 | * dma_chan_put - drop a reference to a dma channel's parent driver module | |
255 | * @chan - channel to release | |
256 | * | |
257 | * Must be called under dma_list_mutex | |
258 | */ | |
259 | static void dma_chan_put(struct dma_chan *chan) | |
260 | { | |
261 | /* This channel is not in use, bail out */ | |
262 | if (!chan->client_count) | |
263 | return; | |
264 | ||
265 | chan->client_count--; | |
266 | module_put(dma_chan_to_owner(chan)); | |
267 | ||
268 | /* This channel is not in use anymore, free it */ | |
269 | if (!chan->client_count && chan->device->device_free_chan_resources) { | |
270 | /* Make sure all operations have completed */ | |
271 | dmaengine_synchronize(chan); | |
272 | chan->device->device_free_chan_resources(chan); | |
273 | } | |
274 | ||
275 | /* If the channel is used via a DMA request router, free the mapping */ | |
276 | if (chan->router && chan->router->route_free) { | |
277 | chan->router->route_free(chan->router->dev, chan->route_data); | |
278 | chan->router = NULL; | |
279 | chan->route_data = NULL; | |
280 | } | |
281 | } | |
282 | ||
283 | enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) | |
284 | { | |
285 | enum dma_status status; | |
286 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
287 | ||
288 | dma_async_issue_pending(chan); | |
289 | do { | |
290 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); | |
291 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
292 | dev_err(chan->device->dev, "%s: timeout!\n", __func__); | |
293 | return DMA_ERROR; | |
294 | } | |
295 | if (status != DMA_IN_PROGRESS) | |
296 | break; | |
297 | cpu_relax(); | |
298 | } while (1); | |
299 | ||
300 | return status; | |
301 | } | |
302 | EXPORT_SYMBOL(dma_sync_wait); | |
303 | ||
304 | /** | |
305 | * dma_cap_mask_all - enable iteration over all operation types | |
306 | */ | |
307 | static dma_cap_mask_t dma_cap_mask_all; | |
308 | ||
309 | /** | |
310 | * dma_chan_tbl_ent - tracks channel allocations per core/operation | |
311 | * @chan - associated channel for this entry | |
312 | */ | |
313 | struct dma_chan_tbl_ent { | |
314 | struct dma_chan *chan; | |
315 | }; | |
316 | ||
317 | /** | |
318 | * channel_table - percpu lookup table for memory-to-memory offload providers | |
319 | */ | |
320 | static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; | |
321 | ||
322 | static int __init dma_channel_table_init(void) | |
323 | { | |
324 | enum dma_transaction_type cap; | |
325 | int err = 0; | |
326 | ||
327 | bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); | |
328 | ||
329 | /* 'interrupt', 'private', and 'slave' are channel capabilities, | |
330 | * but are not associated with an operation so they do not need | |
331 | * an entry in the channel_table | |
332 | */ | |
333 | clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); | |
334 | clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); | |
335 | clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); | |
336 | ||
337 | for_each_dma_cap_mask(cap, dma_cap_mask_all) { | |
338 | channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); | |
339 | if (!channel_table[cap]) { | |
340 | err = -ENOMEM; | |
341 | break; | |
342 | } | |
343 | } | |
344 | ||
345 | if (err) { | |
346 | pr_err("initialization failure\n"); | |
347 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
348 | free_percpu(channel_table[cap]); | |
349 | } | |
350 | ||
351 | return err; | |
352 | } | |
353 | arch_initcall(dma_channel_table_init); | |
354 | ||
355 | /** | |
356 | * dma_find_channel - find a channel to carry out the operation | |
357 | * @tx_type: transaction type | |
358 | */ | |
359 | struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type) | |
360 | { | |
361 | return this_cpu_read(channel_table[tx_type]->chan); | |
362 | } | |
363 | EXPORT_SYMBOL(dma_find_channel); | |
364 | ||
365 | /** | |
366 | * dma_issue_pending_all - flush all pending operations across all channels | |
367 | */ | |
368 | void dma_issue_pending_all(void) | |
369 | { | |
370 | struct dma_device *device; | |
371 | struct dma_chan *chan; | |
372 | ||
373 | rcu_read_lock(); | |
374 | list_for_each_entry_rcu(device, &dma_device_list, global_node) { | |
375 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
376 | continue; | |
377 | list_for_each_entry(chan, &device->channels, device_node) | |
378 | if (chan->client_count) | |
379 | device->device_issue_pending(chan); | |
380 | } | |
381 | rcu_read_unlock(); | |
382 | } | |
383 | EXPORT_SYMBOL(dma_issue_pending_all); | |
384 | ||
385 | /** | |
386 | * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu | |
387 | */ | |
388 | static bool dma_chan_is_local(struct dma_chan *chan, int cpu) | |
389 | { | |
390 | int node = dev_to_node(chan->device->dev); | |
391 | return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); | |
392 | } | |
393 | ||
394 | /** | |
395 | * min_chan - returns the channel with min count and in the same numa-node as the cpu | |
396 | * @cap: capability to match | |
397 | * @cpu: cpu index which the channel should be close to | |
398 | * | |
399 | * If some channels are close to the given cpu, the one with the lowest | |
400 | * reference count is returned. Otherwise, cpu is ignored and only the | |
401 | * reference count is taken into account. | |
402 | * Must be called under dma_list_mutex. | |
403 | */ | |
404 | static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) | |
405 | { | |
406 | struct dma_device *device; | |
407 | struct dma_chan *chan; | |
408 | struct dma_chan *min = NULL; | |
409 | struct dma_chan *localmin = NULL; | |
410 | ||
411 | list_for_each_entry(device, &dma_device_list, global_node) { | |
412 | if (!dma_has_cap(cap, device->cap_mask) || | |
413 | dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
414 | continue; | |
415 | list_for_each_entry(chan, &device->channels, device_node) { | |
416 | if (!chan->client_count) | |
417 | continue; | |
418 | if (!min || chan->table_count < min->table_count) | |
419 | min = chan; | |
420 | ||
421 | if (dma_chan_is_local(chan, cpu)) | |
422 | if (!localmin || | |
423 | chan->table_count < localmin->table_count) | |
424 | localmin = chan; | |
425 | } | |
426 | } | |
427 | ||
428 | chan = localmin ? localmin : min; | |
429 | ||
430 | if (chan) | |
431 | chan->table_count++; | |
432 | ||
433 | return chan; | |
434 | } | |
435 | ||
436 | /** | |
437 | * dma_channel_rebalance - redistribute the available channels | |
438 | * | |
439 | * Optimize for cpu isolation (each cpu gets a dedicated channel for an | |
440 | * operation type) in the SMP case, and operation isolation (avoid | |
441 | * multi-tasking channels) in the non-SMP case. Must be called under | |
442 | * dma_list_mutex. | |
443 | */ | |
444 | static void dma_channel_rebalance(void) | |
445 | { | |
446 | struct dma_chan *chan; | |
447 | struct dma_device *device; | |
448 | int cpu; | |
449 | int cap; | |
450 | ||
451 | /* undo the last distribution */ | |
452 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
453 | for_each_possible_cpu(cpu) | |
454 | per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; | |
455 | ||
456 | list_for_each_entry(device, &dma_device_list, global_node) { | |
457 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
458 | continue; | |
459 | list_for_each_entry(chan, &device->channels, device_node) | |
460 | chan->table_count = 0; | |
461 | } | |
462 | ||
463 | /* don't populate the channel_table if no clients are available */ | |
464 | if (!dmaengine_ref_count) | |
465 | return; | |
466 | ||
467 | /* redistribute available channels */ | |
468 | for_each_dma_cap_mask(cap, dma_cap_mask_all) | |
469 | for_each_online_cpu(cpu) { | |
470 | chan = min_chan(cap, cpu); | |
471 | per_cpu_ptr(channel_table[cap], cpu)->chan = chan; | |
472 | } | |
473 | } | |
474 | ||
475 | int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | |
476 | { | |
477 | struct dma_device *device; | |
478 | ||
479 | if (!chan || !caps) | |
480 | return -EINVAL; | |
481 | ||
482 | device = chan->device; | |
483 | ||
484 | /* check if the channel supports slave transactions */ | |
485 | if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || | |
486 | test_bit(DMA_CYCLIC, device->cap_mask.bits))) | |
487 | return -ENXIO; | |
488 | ||
489 | /* | |
490 | * Check whether it reports it uses the generic slave | |
491 | * capabilities, if not, that means it doesn't support any | |
492 | * kind of slave capabilities reporting. | |
493 | */ | |
494 | if (!device->directions) | |
495 | return -ENXIO; | |
496 | ||
497 | caps->src_addr_widths = device->src_addr_widths; | |
498 | caps->dst_addr_widths = device->dst_addr_widths; | |
499 | caps->directions = device->directions; | |
500 | caps->max_burst = device->max_burst; | |
501 | caps->residue_granularity = device->residue_granularity; | |
502 | caps->descriptor_reuse = device->descriptor_reuse; | |
503 | ||
504 | /* | |
505 | * Some devices implement only pause (e.g. to get residuum) but no | |
506 | * resume. However cmd_pause is advertised as pause AND resume. | |
507 | */ | |
508 | caps->cmd_pause = !!(device->device_pause && device->device_resume); | |
509 | caps->cmd_terminate = !!device->device_terminate_all; | |
510 | ||
511 | return 0; | |
512 | } | |
513 | EXPORT_SYMBOL_GPL(dma_get_slave_caps); | |
514 | ||
515 | static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, | |
516 | struct dma_device *dev, | |
517 | dma_filter_fn fn, void *fn_param) | |
518 | { | |
519 | struct dma_chan *chan; | |
520 | ||
521 | if (mask && !__dma_device_satisfies_mask(dev, mask)) { | |
522 | dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); | |
523 | return NULL; | |
524 | } | |
525 | /* devices with multiple channels need special handling as we need to | |
526 | * ensure that all channels are either private or public. | |
527 | */ | |
528 | if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) | |
529 | list_for_each_entry(chan, &dev->channels, device_node) { | |
530 | /* some channels are already publicly allocated */ | |
531 | if (chan->client_count) | |
532 | return NULL; | |
533 | } | |
534 | ||
535 | list_for_each_entry(chan, &dev->channels, device_node) { | |
536 | if (chan->client_count) { | |
537 | dev_dbg(dev->dev, "%s: %s busy\n", | |
538 | __func__, dma_chan_name(chan)); | |
539 | continue; | |
540 | } | |
541 | if (fn && !fn(chan, fn_param)) { | |
542 | dev_dbg(dev->dev, "%s: %s filter said false\n", | |
543 | __func__, dma_chan_name(chan)); | |
544 | continue; | |
545 | } | |
546 | return chan; | |
547 | } | |
548 | ||
549 | return NULL; | |
550 | } | |
551 | ||
552 | static struct dma_chan *find_candidate(struct dma_device *device, | |
553 | const dma_cap_mask_t *mask, | |
554 | dma_filter_fn fn, void *fn_param) | |
555 | { | |
556 | struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); | |
557 | int err; | |
558 | ||
559 | if (chan) { | |
560 | /* Found a suitable channel, try to grab, prep, and return it. | |
561 | * We first set DMA_PRIVATE to disable balance_ref_count as this | |
562 | * channel will not be published in the general-purpose | |
563 | * allocator | |
564 | */ | |
565 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
566 | device->privatecnt++; | |
567 | err = dma_chan_get(chan); | |
568 | ||
569 | if (err) { | |
570 | if (err == -ENODEV) { | |
571 | dev_dbg(device->dev, "%s: %s module removed\n", | |
572 | __func__, dma_chan_name(chan)); | |
573 | list_del_rcu(&device->global_node); | |
574 | } else | |
575 | dev_dbg(device->dev, | |
576 | "%s: failed to get %s: (%d)\n", | |
577 | __func__, dma_chan_name(chan), err); | |
578 | ||
579 | if (--device->privatecnt == 0) | |
580 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
581 | ||
582 | chan = ERR_PTR(err); | |
583 | } | |
584 | } | |
585 | ||
586 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | |
587 | } | |
588 | ||
589 | /** | |
590 | * dma_get_slave_channel - try to get specific channel exclusively | |
591 | * @chan: target channel | |
592 | */ | |
593 | struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) | |
594 | { | |
595 | int err = -EBUSY; | |
596 | ||
597 | /* lock against __dma_request_channel */ | |
598 | mutex_lock(&dma_list_mutex); | |
599 | ||
600 | if (chan->client_count == 0) { | |
601 | struct dma_device *device = chan->device; | |
602 | ||
603 | dma_cap_set(DMA_PRIVATE, device->cap_mask); | |
604 | device->privatecnt++; | |
605 | err = dma_chan_get(chan); | |
606 | if (err) { | |
607 | dev_dbg(chan->device->dev, | |
608 | "%s: failed to get %s: (%d)\n", | |
609 | __func__, dma_chan_name(chan), err); | |
610 | chan = NULL; | |
611 | if (--device->privatecnt == 0) | |
612 | dma_cap_clear(DMA_PRIVATE, device->cap_mask); | |
613 | } | |
614 | } else | |
615 | chan = NULL; | |
616 | ||
617 | mutex_unlock(&dma_list_mutex); | |
618 | ||
619 | ||
620 | return chan; | |
621 | } | |
622 | EXPORT_SYMBOL_GPL(dma_get_slave_channel); | |
623 | ||
624 | struct dma_chan *dma_get_any_slave_channel(struct dma_device *device) | |
625 | { | |
626 | dma_cap_mask_t mask; | |
627 | struct dma_chan *chan; | |
628 | ||
629 | dma_cap_zero(mask); | |
630 | dma_cap_set(DMA_SLAVE, mask); | |
631 | ||
632 | /* lock against __dma_request_channel */ | |
633 | mutex_lock(&dma_list_mutex); | |
634 | ||
635 | chan = find_candidate(device, &mask, NULL, NULL); | |
636 | ||
637 | mutex_unlock(&dma_list_mutex); | |
638 | ||
639 | return IS_ERR(chan) ? NULL : chan; | |
640 | } | |
641 | EXPORT_SYMBOL_GPL(dma_get_any_slave_channel); | |
642 | ||
643 | /** | |
644 | * __dma_request_channel - try to allocate an exclusive channel | |
645 | * @mask: capabilities that the channel must satisfy | |
646 | * @fn: optional callback to disposition available channels | |
647 | * @fn_param: opaque parameter to pass to dma_filter_fn | |
648 | * | |
649 | * Returns pointer to appropriate DMA channel on success or NULL. | |
650 | */ | |
651 | struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask, | |
652 | dma_filter_fn fn, void *fn_param) | |
653 | { | |
654 | struct dma_device *device, *_d; | |
655 | struct dma_chan *chan = NULL; | |
656 | ||
657 | /* Find a channel */ | |
658 | mutex_lock(&dma_list_mutex); | |
659 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
660 | chan = find_candidate(device, mask, fn, fn_param); | |
661 | if (!IS_ERR(chan)) | |
662 | break; | |
663 | ||
664 | chan = NULL; | |
665 | } | |
666 | mutex_unlock(&dma_list_mutex); | |
667 | ||
668 | pr_debug("%s: %s (%s)\n", | |
669 | __func__, | |
670 | chan ? "success" : "fail", | |
671 | chan ? dma_chan_name(chan) : NULL); | |
672 | ||
673 | return chan; | |
674 | } | |
675 | EXPORT_SYMBOL_GPL(__dma_request_channel); | |
676 | ||
677 | static const struct dma_slave_map *dma_filter_match(struct dma_device *device, | |
678 | const char *name, | |
679 | struct device *dev) | |
680 | { | |
681 | int i; | |
682 | ||
683 | if (!device->filter.mapcnt) | |
684 | return NULL; | |
685 | ||
686 | for (i = 0; i < device->filter.mapcnt; i++) { | |
687 | const struct dma_slave_map *map = &device->filter.map[i]; | |
688 | ||
689 | if (!strcmp(map->devname, dev_name(dev)) && | |
690 | !strcmp(map->slave, name)) | |
691 | return map; | |
692 | } | |
693 | ||
694 | return NULL; | |
695 | } | |
696 | ||
697 | /** | |
698 | * dma_request_chan - try to allocate an exclusive slave channel | |
699 | * @dev: pointer to client device structure | |
700 | * @name: slave channel name | |
701 | * | |
702 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
703 | */ | |
704 | struct dma_chan *dma_request_chan(struct device *dev, const char *name) | |
705 | { | |
706 | struct dma_device *d, *_d; | |
707 | struct dma_chan *chan = NULL; | |
708 | ||
709 | /* If device-tree is present get slave info from here */ | |
710 | if (dev->of_node) | |
711 | chan = of_dma_request_slave_channel(dev->of_node, name); | |
712 | ||
713 | /* If device was enumerated by ACPI get slave info from here */ | |
714 | if (has_acpi_companion(dev) && !chan) | |
715 | chan = acpi_dma_request_slave_chan_by_name(dev, name); | |
716 | ||
717 | if (chan) { | |
718 | /* Valid channel found or requester need to be deferred */ | |
719 | if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) | |
720 | return chan; | |
721 | } | |
722 | ||
723 | /* Try to find the channel via the DMA filter map(s) */ | |
724 | mutex_lock(&dma_list_mutex); | |
725 | list_for_each_entry_safe(d, _d, &dma_device_list, global_node) { | |
726 | dma_cap_mask_t mask; | |
727 | const struct dma_slave_map *map = dma_filter_match(d, name, dev); | |
728 | ||
729 | if (!map) | |
730 | continue; | |
731 | ||
732 | dma_cap_zero(mask); | |
733 | dma_cap_set(DMA_SLAVE, mask); | |
734 | ||
735 | chan = find_candidate(d, &mask, d->filter.fn, map->param); | |
736 | if (!IS_ERR(chan)) | |
737 | break; | |
738 | } | |
739 | mutex_unlock(&dma_list_mutex); | |
740 | ||
741 | return chan ? chan : ERR_PTR(-EPROBE_DEFER); | |
742 | } | |
743 | EXPORT_SYMBOL_GPL(dma_request_chan); | |
744 | ||
745 | /** | |
746 | * dma_request_slave_channel - try to allocate an exclusive slave channel | |
747 | * @dev: pointer to client device structure | |
748 | * @name: slave channel name | |
749 | * | |
750 | * Returns pointer to appropriate DMA channel on success or NULL. | |
751 | */ | |
752 | struct dma_chan *dma_request_slave_channel(struct device *dev, | |
753 | const char *name) | |
754 | { | |
755 | struct dma_chan *ch = dma_request_chan(dev, name); | |
756 | if (IS_ERR(ch)) | |
757 | return NULL; | |
758 | ||
759 | return ch; | |
760 | } | |
761 | EXPORT_SYMBOL_GPL(dma_request_slave_channel); | |
762 | ||
763 | /** | |
764 | * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities | |
765 | * @mask: capabilities that the channel must satisfy | |
766 | * | |
767 | * Returns pointer to appropriate DMA channel on success or an error pointer. | |
768 | */ | |
769 | struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask) | |
770 | { | |
771 | struct dma_chan *chan; | |
772 | ||
773 | if (!mask) | |
774 | return ERR_PTR(-ENODEV); | |
775 | ||
776 | chan = __dma_request_channel(mask, NULL, NULL); | |
777 | if (!chan) | |
778 | chan = ERR_PTR(-ENODEV); | |
779 | ||
780 | return chan; | |
781 | } | |
782 | EXPORT_SYMBOL_GPL(dma_request_chan_by_mask); | |
783 | ||
784 | void dma_release_channel(struct dma_chan *chan) | |
785 | { | |
786 | mutex_lock(&dma_list_mutex); | |
787 | WARN_ONCE(chan->client_count != 1, | |
788 | "chan reference count %d != 1\n", chan->client_count); | |
789 | dma_chan_put(chan); | |
790 | /* drop PRIVATE cap enabled by __dma_request_channel() */ | |
791 | if (--chan->device->privatecnt == 0) | |
792 | dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); | |
793 | mutex_unlock(&dma_list_mutex); | |
794 | } | |
795 | EXPORT_SYMBOL_GPL(dma_release_channel); | |
796 | ||
797 | /** | |
798 | * dmaengine_get - register interest in dma_channels | |
799 | */ | |
800 | void dmaengine_get(void) | |
801 | { | |
802 | struct dma_device *device, *_d; | |
803 | struct dma_chan *chan; | |
804 | int err; | |
805 | ||
806 | mutex_lock(&dma_list_mutex); | |
807 | dmaengine_ref_count++; | |
808 | ||
809 | /* try to grab channels */ | |
810 | list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { | |
811 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
812 | continue; | |
813 | list_for_each_entry(chan, &device->channels, device_node) { | |
814 | err = dma_chan_get(chan); | |
815 | if (err == -ENODEV) { | |
816 | /* module removed before we could use it */ | |
817 | list_del_rcu(&device->global_node); | |
818 | break; | |
819 | } else if (err) | |
820 | dev_dbg(chan->device->dev, | |
821 | "%s: failed to get %s: (%d)\n", | |
822 | __func__, dma_chan_name(chan), err); | |
823 | } | |
824 | } | |
825 | ||
826 | /* if this is the first reference and there were channels | |
827 | * waiting we need to rebalance to get those channels | |
828 | * incorporated into the channel table | |
829 | */ | |
830 | if (dmaengine_ref_count == 1) | |
831 | dma_channel_rebalance(); | |
832 | mutex_unlock(&dma_list_mutex); | |
833 | } | |
834 | EXPORT_SYMBOL(dmaengine_get); | |
835 | ||
836 | /** | |
837 | * dmaengine_put - let dma drivers be removed when ref_count == 0 | |
838 | */ | |
839 | void dmaengine_put(void) | |
840 | { | |
841 | struct dma_device *device; | |
842 | struct dma_chan *chan; | |
843 | ||
844 | mutex_lock(&dma_list_mutex); | |
845 | dmaengine_ref_count--; | |
846 | BUG_ON(dmaengine_ref_count < 0); | |
847 | /* drop channel references */ | |
848 | list_for_each_entry(device, &dma_device_list, global_node) { | |
849 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
850 | continue; | |
851 | list_for_each_entry(chan, &device->channels, device_node) | |
852 | dma_chan_put(chan); | |
853 | } | |
854 | mutex_unlock(&dma_list_mutex); | |
855 | } | |
856 | EXPORT_SYMBOL(dmaengine_put); | |
857 | ||
858 | static bool device_has_all_tx_types(struct dma_device *device) | |
859 | { | |
860 | /* A device that satisfies this test has channels that will never cause | |
861 | * an async_tx channel switch event as all possible operation types can | |
862 | * be handled. | |
863 | */ | |
864 | #ifdef CONFIG_ASYNC_TX_DMA | |
865 | if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) | |
866 | return false; | |
867 | #endif | |
868 | ||
869 | #if IS_ENABLED(CONFIG_ASYNC_MEMCPY) | |
870 | if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) | |
871 | return false; | |
872 | #endif | |
873 | ||
874 | #if IS_ENABLED(CONFIG_ASYNC_XOR) | |
875 | if (!dma_has_cap(DMA_XOR, device->cap_mask)) | |
876 | return false; | |
877 | ||
878 | #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA | |
879 | if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) | |
880 | return false; | |
881 | #endif | |
882 | #endif | |
883 | ||
884 | #if IS_ENABLED(CONFIG_ASYNC_PQ) | |
885 | if (!dma_has_cap(DMA_PQ, device->cap_mask)) | |
886 | return false; | |
887 | ||
888 | #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA | |
889 | if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) | |
890 | return false; | |
891 | #endif | |
892 | #endif | |
893 | ||
894 | return true; | |
895 | } | |
896 | ||
897 | static int get_dma_id(struct dma_device *device) | |
898 | { | |
899 | int rc; | |
900 | ||
901 | do { | |
902 | if (!ida_pre_get(&dma_ida, GFP_KERNEL)) | |
903 | return -ENOMEM; | |
904 | mutex_lock(&dma_list_mutex); | |
905 | rc = ida_get_new(&dma_ida, &device->dev_id); | |
906 | mutex_unlock(&dma_list_mutex); | |
907 | } while (rc == -EAGAIN); | |
908 | ||
909 | return rc; | |
910 | } | |
911 | ||
912 | /** | |
913 | * dma_async_device_register - registers DMA devices found | |
914 | * @device: &dma_device | |
915 | */ | |
916 | int dma_async_device_register(struct dma_device *device) | |
917 | { | |
918 | int chancnt = 0, rc; | |
919 | struct dma_chan* chan; | |
920 | atomic_t *idr_ref; | |
921 | ||
922 | if (!device) | |
923 | return -ENODEV; | |
924 | ||
925 | /* validate device routines */ | |
926 | BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) && | |
927 | !device->device_prep_dma_memcpy); | |
928 | BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) && | |
929 | !device->device_prep_dma_xor); | |
930 | BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) && | |
931 | !device->device_prep_dma_xor_val); | |
932 | BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) && | |
933 | !device->device_prep_dma_pq); | |
934 | BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) && | |
935 | !device->device_prep_dma_pq_val); | |
936 | BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) && | |
937 | !device->device_prep_dma_memset); | |
938 | BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) && | |
939 | !device->device_prep_dma_interrupt); | |
940 | BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) && | |
941 | !device->device_prep_dma_sg); | |
942 | BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) && | |
943 | !device->device_prep_dma_cyclic); | |
944 | BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && | |
945 | !device->device_prep_interleaved_dma); | |
946 | ||
947 | BUG_ON(!device->device_tx_status); | |
948 | BUG_ON(!device->device_issue_pending); | |
949 | BUG_ON(!device->dev); | |
950 | ||
951 | /* note: this only matters in the | |
952 | * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case | |
953 | */ | |
954 | if (device_has_all_tx_types(device)) | |
955 | dma_cap_set(DMA_ASYNC_TX, device->cap_mask); | |
956 | ||
957 | idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); | |
958 | if (!idr_ref) | |
959 | return -ENOMEM; | |
960 | rc = get_dma_id(device); | |
961 | if (rc != 0) { | |
962 | kfree(idr_ref); | |
963 | return rc; | |
964 | } | |
965 | ||
966 | atomic_set(idr_ref, 0); | |
967 | ||
968 | /* represent channels in sysfs. Probably want devs too */ | |
969 | list_for_each_entry(chan, &device->channels, device_node) { | |
970 | rc = -ENOMEM; | |
971 | chan->local = alloc_percpu(typeof(*chan->local)); | |
972 | if (chan->local == NULL) | |
973 | goto err_out; | |
974 | chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); | |
975 | if (chan->dev == NULL) { | |
976 | free_percpu(chan->local); | |
977 | chan->local = NULL; | |
978 | goto err_out; | |
979 | } | |
980 | ||
981 | chan->chan_id = chancnt++; | |
982 | chan->dev->device.class = &dma_devclass; | |
983 | chan->dev->device.parent = device->dev; | |
984 | chan->dev->chan = chan; | |
985 | chan->dev->idr_ref = idr_ref; | |
986 | chan->dev->dev_id = device->dev_id; | |
987 | atomic_inc(idr_ref); | |
988 | dev_set_name(&chan->dev->device, "dma%dchan%d", | |
989 | device->dev_id, chan->chan_id); | |
990 | ||
991 | rc = device_register(&chan->dev->device); | |
992 | if (rc) { | |
993 | free_percpu(chan->local); | |
994 | chan->local = NULL; | |
995 | kfree(chan->dev); | |
996 | atomic_dec(idr_ref); | |
997 | goto err_out; | |
998 | } | |
999 | chan->client_count = 0; | |
1000 | } | |
1001 | ||
1002 | if (!chancnt) { | |
1003 | dev_err(device->dev, "%s: device has no channels!\n", __func__); | |
1004 | rc = -ENODEV; | |
1005 | goto err_out; | |
1006 | } | |
1007 | ||
1008 | device->chancnt = chancnt; | |
1009 | ||
1010 | mutex_lock(&dma_list_mutex); | |
1011 | /* take references on public channels */ | |
1012 | if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
1013 | list_for_each_entry(chan, &device->channels, device_node) { | |
1014 | /* if clients are already waiting for channels we need | |
1015 | * to take references on their behalf | |
1016 | */ | |
1017 | if (dma_chan_get(chan) == -ENODEV) { | |
1018 | /* note we can only get here for the first | |
1019 | * channel as the remaining channels are | |
1020 | * guaranteed to get a reference | |
1021 | */ | |
1022 | rc = -ENODEV; | |
1023 | mutex_unlock(&dma_list_mutex); | |
1024 | goto err_out; | |
1025 | } | |
1026 | } | |
1027 | list_add_tail_rcu(&device->global_node, &dma_device_list); | |
1028 | if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) | |
1029 | device->privatecnt++; /* Always private */ | |
1030 | dma_channel_rebalance(); | |
1031 | mutex_unlock(&dma_list_mutex); | |
1032 | ||
1033 | return 0; | |
1034 | ||
1035 | err_out: | |
1036 | /* if we never registered a channel just release the idr */ | |
1037 | if (atomic_read(idr_ref) == 0) { | |
1038 | mutex_lock(&dma_list_mutex); | |
1039 | ida_remove(&dma_ida, device->dev_id); | |
1040 | mutex_unlock(&dma_list_mutex); | |
1041 | kfree(idr_ref); | |
1042 | return rc; | |
1043 | } | |
1044 | ||
1045 | list_for_each_entry(chan, &device->channels, device_node) { | |
1046 | if (chan->local == NULL) | |
1047 | continue; | |
1048 | mutex_lock(&dma_list_mutex); | |
1049 | chan->dev->chan = NULL; | |
1050 | mutex_unlock(&dma_list_mutex); | |
1051 | device_unregister(&chan->dev->device); | |
1052 | free_percpu(chan->local); | |
1053 | } | |
1054 | return rc; | |
1055 | } | |
1056 | EXPORT_SYMBOL(dma_async_device_register); | |
1057 | ||
1058 | /** | |
1059 | * dma_async_device_unregister - unregister a DMA device | |
1060 | * @device: &dma_device | |
1061 | * | |
1062 | * This routine is called by dma driver exit routines, dmaengine holds module | |
1063 | * references to prevent it being called while channels are in use. | |
1064 | */ | |
1065 | void dma_async_device_unregister(struct dma_device *device) | |
1066 | { | |
1067 | struct dma_chan *chan; | |
1068 | ||
1069 | mutex_lock(&dma_list_mutex); | |
1070 | list_del_rcu(&device->global_node); | |
1071 | dma_channel_rebalance(); | |
1072 | mutex_unlock(&dma_list_mutex); | |
1073 | ||
1074 | list_for_each_entry(chan, &device->channels, device_node) { | |
1075 | WARN_ONCE(chan->client_count, | |
1076 | "%s called while %d clients hold a reference\n", | |
1077 | __func__, chan->client_count); | |
1078 | mutex_lock(&dma_list_mutex); | |
1079 | chan->dev->chan = NULL; | |
1080 | mutex_unlock(&dma_list_mutex); | |
1081 | device_unregister(&chan->dev->device); | |
1082 | free_percpu(chan->local); | |
1083 | } | |
1084 | } | |
1085 | EXPORT_SYMBOL(dma_async_device_unregister); | |
1086 | ||
1087 | struct dmaengine_unmap_pool { | |
1088 | struct kmem_cache *cache; | |
1089 | const char *name; | |
1090 | mempool_t *pool; | |
1091 | size_t size; | |
1092 | }; | |
1093 | ||
1094 | #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) } | |
1095 | static struct dmaengine_unmap_pool unmap_pool[] = { | |
1096 | __UNMAP_POOL(2), | |
1097 | #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID) | |
1098 | __UNMAP_POOL(16), | |
1099 | __UNMAP_POOL(128), | |
1100 | __UNMAP_POOL(256), | |
1101 | #endif | |
1102 | }; | |
1103 | ||
1104 | static struct dmaengine_unmap_pool *__get_unmap_pool(int nr) | |
1105 | { | |
1106 | int order = get_count_order(nr); | |
1107 | ||
1108 | switch (order) { | |
1109 | case 0 ... 1: | |
1110 | return &unmap_pool[0]; | |
1111 | case 2 ... 4: | |
1112 | return &unmap_pool[1]; | |
1113 | case 5 ... 7: | |
1114 | return &unmap_pool[2]; | |
1115 | case 8: | |
1116 | return &unmap_pool[3]; | |
1117 | default: | |
1118 | BUG(); | |
1119 | return NULL; | |
1120 | } | |
1121 | } | |
1122 | ||
1123 | static void dmaengine_unmap(struct kref *kref) | |
1124 | { | |
1125 | struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref); | |
1126 | struct device *dev = unmap->dev; | |
1127 | int cnt, i; | |
1128 | ||
1129 | cnt = unmap->to_cnt; | |
1130 | for (i = 0; i < cnt; i++) | |
1131 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1132 | DMA_TO_DEVICE); | |
1133 | cnt += unmap->from_cnt; | |
1134 | for (; i < cnt; i++) | |
1135 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1136 | DMA_FROM_DEVICE); | |
1137 | cnt += unmap->bidi_cnt; | |
1138 | for (; i < cnt; i++) { | |
1139 | if (unmap->addr[i] == 0) | |
1140 | continue; | |
1141 | dma_unmap_page(dev, unmap->addr[i], unmap->len, | |
1142 | DMA_BIDIRECTIONAL); | |
1143 | } | |
1144 | cnt = unmap->map_cnt; | |
1145 | mempool_free(unmap, __get_unmap_pool(cnt)->pool); | |
1146 | } | |
1147 | ||
1148 | void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap) | |
1149 | { | |
1150 | if (unmap) | |
1151 | kref_put(&unmap->kref, dmaengine_unmap); | |
1152 | } | |
1153 | EXPORT_SYMBOL_GPL(dmaengine_unmap_put); | |
1154 | ||
1155 | static void dmaengine_destroy_unmap_pool(void) | |
1156 | { | |
1157 | int i; | |
1158 | ||
1159 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | |
1160 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1161 | ||
1162 | mempool_destroy(p->pool); | |
1163 | p->pool = NULL; | |
1164 | kmem_cache_destroy(p->cache); | |
1165 | p->cache = NULL; | |
1166 | } | |
1167 | } | |
1168 | ||
1169 | static int __init dmaengine_init_unmap_pool(void) | |
1170 | { | |
1171 | int i; | |
1172 | ||
1173 | for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { | |
1174 | struct dmaengine_unmap_pool *p = &unmap_pool[i]; | |
1175 | size_t size; | |
1176 | ||
1177 | size = sizeof(struct dmaengine_unmap_data) + | |
1178 | sizeof(dma_addr_t) * p->size; | |
1179 | ||
1180 | p->cache = kmem_cache_create(p->name, size, 0, | |
1181 | SLAB_HWCACHE_ALIGN, NULL); | |
1182 | if (!p->cache) | |
1183 | break; | |
1184 | p->pool = mempool_create_slab_pool(1, p->cache); | |
1185 | if (!p->pool) | |
1186 | break; | |
1187 | } | |
1188 | ||
1189 | if (i == ARRAY_SIZE(unmap_pool)) | |
1190 | return 0; | |
1191 | ||
1192 | dmaengine_destroy_unmap_pool(); | |
1193 | return -ENOMEM; | |
1194 | } | |
1195 | ||
1196 | struct dmaengine_unmap_data * | |
1197 | dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags) | |
1198 | { | |
1199 | struct dmaengine_unmap_data *unmap; | |
1200 | ||
1201 | unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); | |
1202 | if (!unmap) | |
1203 | return NULL; | |
1204 | ||
1205 | memset(unmap, 0, sizeof(*unmap)); | |
1206 | kref_init(&unmap->kref); | |
1207 | unmap->dev = dev; | |
1208 | unmap->map_cnt = nr; | |
1209 | ||
1210 | return unmap; | |
1211 | } | |
1212 | EXPORT_SYMBOL(dmaengine_get_unmap_data); | |
1213 | ||
1214 | void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, | |
1215 | struct dma_chan *chan) | |
1216 | { | |
1217 | tx->chan = chan; | |
1218 | #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH | |
1219 | spin_lock_init(&tx->lock); | |
1220 | #endif | |
1221 | } | |
1222 | EXPORT_SYMBOL(dma_async_tx_descriptor_init); | |
1223 | ||
1224 | /* dma_wait_for_async_tx - spin wait for a transaction to complete | |
1225 | * @tx: in-flight transaction to wait on | |
1226 | */ | |
1227 | enum dma_status | |
1228 | dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) | |
1229 | { | |
1230 | unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000); | |
1231 | ||
1232 | if (!tx) | |
1233 | return DMA_COMPLETE; | |
1234 | ||
1235 | while (tx->cookie == -EBUSY) { | |
1236 | if (time_after_eq(jiffies, dma_sync_wait_timeout)) { | |
1237 | dev_err(tx->chan->device->dev, | |
1238 | "%s timeout waiting for descriptor submission\n", | |
1239 | __func__); | |
1240 | return DMA_ERROR; | |
1241 | } | |
1242 | cpu_relax(); | |
1243 | } | |
1244 | return dma_sync_wait(tx->chan, tx->cookie); | |
1245 | } | |
1246 | EXPORT_SYMBOL_GPL(dma_wait_for_async_tx); | |
1247 | ||
1248 | /* dma_run_dependencies - helper routine for dma drivers to process | |
1249 | * (start) dependent operations on their target channel | |
1250 | * @tx: transaction with dependencies | |
1251 | */ | |
1252 | void dma_run_dependencies(struct dma_async_tx_descriptor *tx) | |
1253 | { | |
1254 | struct dma_async_tx_descriptor *dep = txd_next(tx); | |
1255 | struct dma_async_tx_descriptor *dep_next; | |
1256 | struct dma_chan *chan; | |
1257 | ||
1258 | if (!dep) | |
1259 | return; | |
1260 | ||
1261 | /* we'll submit tx->next now, so clear the link */ | |
1262 | txd_clear_next(tx); | |
1263 | chan = dep->chan; | |
1264 | ||
1265 | /* keep submitting up until a channel switch is detected | |
1266 | * in that case we will be called again as a result of | |
1267 | * processing the interrupt from async_tx_channel_switch | |
1268 | */ | |
1269 | for (; dep; dep = dep_next) { | |
1270 | txd_lock(dep); | |
1271 | txd_clear_parent(dep); | |
1272 | dep_next = txd_next(dep); | |
1273 | if (dep_next && dep_next->chan == chan) | |
1274 | txd_clear_next(dep); /* ->next will be submitted */ | |
1275 | else | |
1276 | dep_next = NULL; /* submit current dep and terminate */ | |
1277 | txd_unlock(dep); | |
1278 | ||
1279 | dep->tx_submit(dep); | |
1280 | } | |
1281 | ||
1282 | chan->device->device_issue_pending(chan); | |
1283 | } | |
1284 | EXPORT_SYMBOL_GPL(dma_run_dependencies); | |
1285 | ||
1286 | static int __init dma_bus_init(void) | |
1287 | { | |
1288 | int err = dmaengine_init_unmap_pool(); | |
1289 | ||
1290 | if (err) | |
1291 | return err; | |
1292 | return class_register(&dma_devclass); | |
1293 | } | |
1294 | arch_initcall(dma_bus_init); | |
1295 | ||
1296 |