]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/crypto/ccp/ccp-dmaengine.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-hirsute-kernel.git] / drivers / crypto / ccp / ccp-dmaengine.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
58ea8abf
GH
2/*
3 * AMD Cryptographic Coprocessor (CCP) driver
4 *
68cc652f 5 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc.
58ea8abf
GH
6 *
7 * Author: Gary R Hook <gary.hook@amd.com>
58ea8abf
GH
8 */
9
efc989fc 10#include <linux/module.h>
58ea8abf
GH
11#include <linux/kernel.h>
12#include <linux/dmaengine.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/ccp.h>
16
17#include "ccp-dev.h"
18#include "../../dma/dmaengine.h"
19
20#define CCP_DMA_WIDTH(_mask) \
21({ \
22 u64 mask = _mask + 1; \
23 (mask == 0) ? 64 : fls64(mask); \
24})
25
efc989fc
GH
26/* The CCP as a DMA provider can be configured for public or private
27 * channels. Default is specified in the vdata for the device (PCI ID).
28 * This module parameter will override for all channels on all devices:
29 * dma_chan_attr = 0x2 to force all channels public
30 * = 0x1 to force all channels private
31 * = 0x0 to defer to the vdata setting
32 * = any other value: warning, revert to 0x0
33 */
34static unsigned int dma_chan_attr = CCP_DMA_DFLT;
35module_param(dma_chan_attr, uint, 0444);
36MODULE_PARM_DESC(dma_chan_attr, "Set DMA channel visibility: 0 (default) = device defaults, 1 = make private, 2 = make public");
37
404a36a7 38static unsigned int ccp_get_dma_chan_attr(struct ccp_device *ccp)
efc989fc
GH
39{
40 switch (dma_chan_attr) {
41 case CCP_DMA_DFLT:
42 return ccp->vdata->dma_chan_attr;
43
44 case CCP_DMA_PRIV:
45 return DMA_PRIVATE;
46
47 case CCP_DMA_PUB:
48 return 0;
49
50 default:
51 dev_info_once(ccp->dev, "Invalid value for dma_chan_attr: %d\n",
52 dma_chan_attr);
53 return ccp->vdata->dma_chan_attr;
54 }
55}
56
58ea8abf
GH
57static void ccp_free_cmd_resources(struct ccp_device *ccp,
58 struct list_head *list)
59{
60 struct ccp_dma_cmd *cmd, *ctmp;
61
62 list_for_each_entry_safe(cmd, ctmp, list, entry) {
63 list_del(&cmd->entry);
64 kmem_cache_free(ccp->dma_cmd_cache, cmd);
65 }
66}
67
68static void ccp_free_desc_resources(struct ccp_device *ccp,
69 struct list_head *list)
70{
71 struct ccp_dma_desc *desc, *dtmp;
72
73 list_for_each_entry_safe(desc, dtmp, list, entry) {
74 ccp_free_cmd_resources(ccp, &desc->active);
75 ccp_free_cmd_resources(ccp, &desc->pending);
76
77 list_del(&desc->entry);
78 kmem_cache_free(ccp->dma_desc_cache, desc);
79 }
80}
81
82static void ccp_free_chan_resources(struct dma_chan *dma_chan)
83{
84 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
85 dma_chan);
86 unsigned long flags;
87
88 dev_dbg(chan->ccp->dev, "%s - chan=%p\n", __func__, chan);
89
90 spin_lock_irqsave(&chan->lock, flags);
91
92 ccp_free_desc_resources(chan->ccp, &chan->complete);
93 ccp_free_desc_resources(chan->ccp, &chan->active);
94 ccp_free_desc_resources(chan->ccp, &chan->pending);
e5da5c56 95 ccp_free_desc_resources(chan->ccp, &chan->created);
58ea8abf
GH
96
97 spin_unlock_irqrestore(&chan->lock, flags);
98}
99
100static void ccp_cleanup_desc_resources(struct ccp_device *ccp,
101 struct list_head *list)
102{
103 struct ccp_dma_desc *desc, *dtmp;
104
105 list_for_each_entry_safe_reverse(desc, dtmp, list, entry) {
106 if (!async_tx_test_ack(&desc->tx_desc))
107 continue;
108
109 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
110
111 ccp_free_cmd_resources(ccp, &desc->active);
112 ccp_free_cmd_resources(ccp, &desc->pending);
113
114 list_del(&desc->entry);
115 kmem_cache_free(ccp->dma_desc_cache, desc);
116 }
117}
118
119static void ccp_do_cleanup(unsigned long data)
120{
121 struct ccp_dma_chan *chan = (struct ccp_dma_chan *)data;
122 unsigned long flags;
123
124 dev_dbg(chan->ccp->dev, "%s - chan=%s\n", __func__,
125 dma_chan_name(&chan->dma_chan));
126
127 spin_lock_irqsave(&chan->lock, flags);
128
129 ccp_cleanup_desc_resources(chan->ccp, &chan->complete);
130
131 spin_unlock_irqrestore(&chan->lock, flags);
132}
133
134static int ccp_issue_next_cmd(struct ccp_dma_desc *desc)
135{
136 struct ccp_dma_cmd *cmd;
137 int ret;
138
139 cmd = list_first_entry(&desc->pending, struct ccp_dma_cmd, entry);
140 list_move(&cmd->entry, &desc->active);
141
142 dev_dbg(desc->ccp->dev, "%s - tx %d, cmd=%p\n", __func__,
143 desc->tx_desc.cookie, cmd);
144
145 ret = ccp_enqueue_cmd(&cmd->ccp_cmd);
146 if (!ret || (ret == -EINPROGRESS) || (ret == -EBUSY))
147 return 0;
148
149 dev_dbg(desc->ccp->dev, "%s - error: ret=%d, tx %d, cmd=%p\n", __func__,
150 ret, desc->tx_desc.cookie, cmd);
151
152 return ret;
153}
154
155static void ccp_free_active_cmd(struct ccp_dma_desc *desc)
156{
157 struct ccp_dma_cmd *cmd;
158
159 cmd = list_first_entry_or_null(&desc->active, struct ccp_dma_cmd,
160 entry);
161 if (!cmd)
162 return;
163
164 dev_dbg(desc->ccp->dev, "%s - freeing tx %d cmd=%p\n",
165 __func__, desc->tx_desc.cookie, cmd);
166
167 list_del(&cmd->entry);
168 kmem_cache_free(desc->ccp->dma_cmd_cache, cmd);
169}
170
171static struct ccp_dma_desc *__ccp_next_dma_desc(struct ccp_dma_chan *chan,
172 struct ccp_dma_desc *desc)
173{
174 /* Move current DMA descriptor to the complete list */
175 if (desc)
176 list_move(&desc->entry, &chan->complete);
177
178 /* Get the next DMA descriptor on the active list */
179 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
180 entry);
181
182 return desc;
183}
184
185static struct ccp_dma_desc *ccp_handle_active_desc(struct ccp_dma_chan *chan,
186 struct ccp_dma_desc *desc)
187{
188 struct dma_async_tx_descriptor *tx_desc;
189 unsigned long flags;
190
191 /* Loop over descriptors until one is found with commands */
192 do {
193 if (desc) {
194 /* Remove the DMA command from the list and free it */
195 ccp_free_active_cmd(desc);
196
197 if (!list_empty(&desc->pending)) {
198 /* No errors, keep going */
199 if (desc->status != DMA_ERROR)
200 return desc;
201
202 /* Error, free remaining commands and move on */
203 ccp_free_cmd_resources(desc->ccp,
204 &desc->pending);
205 }
206
207 tx_desc = &desc->tx_desc;
208 } else {
209 tx_desc = NULL;
210 }
211
212 spin_lock_irqsave(&chan->lock, flags);
213
214 if (desc) {
215 if (desc->status != DMA_ERROR)
216 desc->status = DMA_COMPLETE;
217
218 dev_dbg(desc->ccp->dev,
219 "%s - tx %d complete, status=%u\n", __func__,
220 desc->tx_desc.cookie, desc->status);
221
222 dma_cookie_complete(tx_desc);
01c4c097 223 dma_descriptor_unmap(tx_desc);
58ea8abf
GH
224 }
225
226 desc = __ccp_next_dma_desc(chan, desc);
227
228 spin_unlock_irqrestore(&chan->lock, flags);
229
230 if (tx_desc) {
c07f7c29 231 dmaengine_desc_get_callback_invoke(tx_desc, NULL);
58ea8abf
GH
232
233 dma_run_dependencies(tx_desc);
234 }
235 } while (desc);
236
237 return NULL;
238}
239
240static struct ccp_dma_desc *__ccp_pending_to_active(struct ccp_dma_chan *chan)
241{
242 struct ccp_dma_desc *desc;
243
244 if (list_empty(&chan->pending))
245 return NULL;
246
247 desc = list_empty(&chan->active)
248 ? list_first_entry(&chan->pending, struct ccp_dma_desc, entry)
249 : NULL;
250
251 list_splice_tail_init(&chan->pending, &chan->active);
252
253 return desc;
254}
255
256static void ccp_cmd_callback(void *data, int err)
257{
258 struct ccp_dma_desc *desc = data;
259 struct ccp_dma_chan *chan;
260 int ret;
261
262 if (err == -EINPROGRESS)
263 return;
264
265 chan = container_of(desc->tx_desc.chan, struct ccp_dma_chan,
266 dma_chan);
267
268 dev_dbg(chan->ccp->dev, "%s - tx %d callback, err=%d\n",
269 __func__, desc->tx_desc.cookie, err);
270
271 if (err)
272 desc->status = DMA_ERROR;
273
274 while (true) {
275 /* Check for DMA descriptor completion */
276 desc = ccp_handle_active_desc(chan, desc);
277
278 /* Don't submit cmd if no descriptor or DMA is paused */
279 if (!desc || (chan->status == DMA_PAUSED))
280 break;
281
282 ret = ccp_issue_next_cmd(desc);
283 if (!ret)
284 break;
285
286 desc->status = DMA_ERROR;
287 }
288
289 tasklet_schedule(&chan->cleanup_tasklet);
290}
291
292static dma_cookie_t ccp_tx_submit(struct dma_async_tx_descriptor *tx_desc)
293{
294 struct ccp_dma_desc *desc = container_of(tx_desc, struct ccp_dma_desc,
295 tx_desc);
296 struct ccp_dma_chan *chan;
297 dma_cookie_t cookie;
298 unsigned long flags;
299
300 chan = container_of(tx_desc->chan, struct ccp_dma_chan, dma_chan);
301
302 spin_lock_irqsave(&chan->lock, flags);
303
304 cookie = dma_cookie_assign(tx_desc);
e5da5c56 305 list_del(&desc->entry);
58ea8abf
GH
306 list_add_tail(&desc->entry, &chan->pending);
307
308 spin_unlock_irqrestore(&chan->lock, flags);
309
310 dev_dbg(chan->ccp->dev, "%s - added tx descriptor %d to pending list\n",
311 __func__, cookie);
312
313 return cookie;
314}
315
316static struct ccp_dma_cmd *ccp_alloc_dma_cmd(struct ccp_dma_chan *chan)
317{
318 struct ccp_dma_cmd *cmd;
319
320 cmd = kmem_cache_alloc(chan->ccp->dma_cmd_cache, GFP_NOWAIT);
321 if (cmd)
322 memset(cmd, 0, sizeof(*cmd));
323
324 return cmd;
325}
326
327static struct ccp_dma_desc *ccp_alloc_dma_desc(struct ccp_dma_chan *chan,
328 unsigned long flags)
329{
330 struct ccp_dma_desc *desc;
331
664f570a 332 desc = kmem_cache_zalloc(chan->ccp->dma_desc_cache, GFP_NOWAIT);
58ea8abf
GH
333 if (!desc)
334 return NULL;
335
58ea8abf
GH
336 dma_async_tx_descriptor_init(&desc->tx_desc, &chan->dma_chan);
337 desc->tx_desc.flags = flags;
338 desc->tx_desc.tx_submit = ccp_tx_submit;
339 desc->ccp = chan->ccp;
340 INIT_LIST_HEAD(&desc->pending);
341 INIT_LIST_HEAD(&desc->active);
342 desc->status = DMA_IN_PROGRESS;
343
344 return desc;
345}
346
347static struct ccp_dma_desc *ccp_create_desc(struct dma_chan *dma_chan,
348 struct scatterlist *dst_sg,
349 unsigned int dst_nents,
350 struct scatterlist *src_sg,
351 unsigned int src_nents,
352 unsigned long flags)
353{
354 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
355 dma_chan);
356 struct ccp_device *ccp = chan->ccp;
357 struct ccp_dma_desc *desc;
358 struct ccp_dma_cmd *cmd;
359 struct ccp_cmd *ccp_cmd;
360 struct ccp_passthru_nomap_engine *ccp_pt;
361 unsigned int src_offset, src_len;
362 unsigned int dst_offset, dst_len;
363 unsigned int len;
364 unsigned long sflags;
365 size_t total_len;
366
367 if (!dst_sg || !src_sg)
368 return NULL;
369
370 if (!dst_nents || !src_nents)
371 return NULL;
372
373 desc = ccp_alloc_dma_desc(chan, flags);
374 if (!desc)
375 return NULL;
376
377 total_len = 0;
378
379 src_len = sg_dma_len(src_sg);
380 src_offset = 0;
381
382 dst_len = sg_dma_len(dst_sg);
383 dst_offset = 0;
384
385 while (true) {
386 if (!src_len) {
387 src_nents--;
388 if (!src_nents)
389 break;
390
391 src_sg = sg_next(src_sg);
392 if (!src_sg)
393 break;
394
395 src_len = sg_dma_len(src_sg);
396 src_offset = 0;
397 continue;
398 }
399
400 if (!dst_len) {
401 dst_nents--;
402 if (!dst_nents)
403 break;
404
405 dst_sg = sg_next(dst_sg);
406 if (!dst_sg)
407 break;
408
409 dst_len = sg_dma_len(dst_sg);
410 dst_offset = 0;
411 continue;
412 }
413
414 len = min(dst_len, src_len);
415
416 cmd = ccp_alloc_dma_cmd(chan);
417 if (!cmd)
418 goto err;
419
420 ccp_cmd = &cmd->ccp_cmd;
7c468447 421 ccp_cmd->ccp = chan->ccp;
58ea8abf
GH
422 ccp_pt = &ccp_cmd->u.passthru_nomap;
423 ccp_cmd->flags = CCP_CMD_MAY_BACKLOG;
424 ccp_cmd->flags |= CCP_CMD_PASSTHRU_NO_DMA_MAP;
425 ccp_cmd->engine = CCP_ENGINE_PASSTHRU;
426 ccp_pt->bit_mod = CCP_PASSTHRU_BITWISE_NOOP;
427 ccp_pt->byte_swap = CCP_PASSTHRU_BYTESWAP_NOOP;
428 ccp_pt->src_dma = sg_dma_address(src_sg) + src_offset;
429 ccp_pt->dst_dma = sg_dma_address(dst_sg) + dst_offset;
430 ccp_pt->src_len = len;
431 ccp_pt->final = 1;
432 ccp_cmd->callback = ccp_cmd_callback;
433 ccp_cmd->data = desc;
434
435 list_add_tail(&cmd->entry, &desc->pending);
436
437 dev_dbg(ccp->dev,
438 "%s - cmd=%p, src=%pad, dst=%pad, len=%llu\n", __func__,
439 cmd, &ccp_pt->src_dma,
440 &ccp_pt->dst_dma, ccp_pt->src_len);
441
442 total_len += len;
443
444 src_len -= len;
445 src_offset += len;
446
447 dst_len -= len;
448 dst_offset += len;
449 }
450
451 desc->len = total_len;
452
453 if (list_empty(&desc->pending))
454 goto err;
455
456 dev_dbg(ccp->dev, "%s - desc=%p\n", __func__, desc);
457
458 spin_lock_irqsave(&chan->lock, sflags);
459
e5da5c56 460 list_add_tail(&desc->entry, &chan->created);
58ea8abf
GH
461
462 spin_unlock_irqrestore(&chan->lock, sflags);
463
464 return desc;
465
466err:
467 ccp_free_cmd_resources(ccp, &desc->pending);
468 kmem_cache_free(ccp->dma_desc_cache, desc);
469
470 return NULL;
471}
472
473static struct dma_async_tx_descriptor *ccp_prep_dma_memcpy(
474 struct dma_chan *dma_chan, dma_addr_t dst, dma_addr_t src, size_t len,
475 unsigned long flags)
476{
477 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
478 dma_chan);
479 struct ccp_dma_desc *desc;
480 struct scatterlist dst_sg, src_sg;
481
482 dev_dbg(chan->ccp->dev,
483 "%s - src=%pad, dst=%pad, len=%zu, flags=%#lx\n",
484 __func__, &src, &dst, len, flags);
485
486 sg_init_table(&dst_sg, 1);
487 sg_dma_address(&dst_sg) = dst;
488 sg_dma_len(&dst_sg) = len;
489
490 sg_init_table(&src_sg, 1);
491 sg_dma_address(&src_sg) = src;
492 sg_dma_len(&src_sg) = len;
493
494 desc = ccp_create_desc(dma_chan, &dst_sg, 1, &src_sg, 1, flags);
495 if (!desc)
496 return NULL;
497
498 return &desc->tx_desc;
499}
500
58ea8abf
GH
501static struct dma_async_tx_descriptor *ccp_prep_dma_interrupt(
502 struct dma_chan *dma_chan, unsigned long flags)
503{
504 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
505 dma_chan);
506 struct ccp_dma_desc *desc;
507
508 desc = ccp_alloc_dma_desc(chan, flags);
509 if (!desc)
510 return NULL;
511
512 return &desc->tx_desc;
513}
514
515static void ccp_issue_pending(struct dma_chan *dma_chan)
516{
517 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
518 dma_chan);
519 struct ccp_dma_desc *desc;
520 unsigned long flags;
521
522 dev_dbg(chan->ccp->dev, "%s\n", __func__);
523
524 spin_lock_irqsave(&chan->lock, flags);
525
526 desc = __ccp_pending_to_active(chan);
527
528 spin_unlock_irqrestore(&chan->lock, flags);
529
530 /* If there was nothing active, start processing */
531 if (desc)
532 ccp_cmd_callback(desc, 0);
533}
534
535static enum dma_status ccp_tx_status(struct dma_chan *dma_chan,
536 dma_cookie_t cookie,
537 struct dma_tx_state *state)
538{
539 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
540 dma_chan);
541 struct ccp_dma_desc *desc;
542 enum dma_status ret;
543 unsigned long flags;
544
545 if (chan->status == DMA_PAUSED) {
546 ret = DMA_PAUSED;
547 goto out;
548 }
549
550 ret = dma_cookie_status(dma_chan, cookie, state);
551 if (ret == DMA_COMPLETE) {
552 spin_lock_irqsave(&chan->lock, flags);
553
554 /* Get status from complete chain, if still there */
555 list_for_each_entry(desc, &chan->complete, entry) {
556 if (desc->tx_desc.cookie != cookie)
557 continue;
558
559 ret = desc->status;
560 break;
561 }
562
563 spin_unlock_irqrestore(&chan->lock, flags);
564 }
565
566out:
567 dev_dbg(chan->ccp->dev, "%s - %u\n", __func__, ret);
568
569 return ret;
570}
571
572static int ccp_pause(struct dma_chan *dma_chan)
573{
574 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
575 dma_chan);
576
577 chan->status = DMA_PAUSED;
578
579 /*TODO: Wait for active DMA to complete before returning? */
580
581 return 0;
582}
583
584static int ccp_resume(struct dma_chan *dma_chan)
585{
586 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
587 dma_chan);
588 struct ccp_dma_desc *desc;
589 unsigned long flags;
590
591 spin_lock_irqsave(&chan->lock, flags);
592
593 desc = list_first_entry_or_null(&chan->active, struct ccp_dma_desc,
594 entry);
595
596 spin_unlock_irqrestore(&chan->lock, flags);
597
598 /* Indicate the channel is running again */
599 chan->status = DMA_IN_PROGRESS;
600
601 /* If there was something active, re-start */
602 if (desc)
603 ccp_cmd_callback(desc, 0);
604
605 return 0;
606}
607
608static int ccp_terminate_all(struct dma_chan *dma_chan)
609{
610 struct ccp_dma_chan *chan = container_of(dma_chan, struct ccp_dma_chan,
611 dma_chan);
612 unsigned long flags;
613
614 dev_dbg(chan->ccp->dev, "%s\n", __func__);
615
616 /*TODO: Wait for active DMA to complete before continuing */
617
618 spin_lock_irqsave(&chan->lock, flags);
619
620 /*TODO: Purge the complete list? */
621 ccp_free_desc_resources(chan->ccp, &chan->active);
622 ccp_free_desc_resources(chan->ccp, &chan->pending);
e5da5c56 623 ccp_free_desc_resources(chan->ccp, &chan->created);
58ea8abf
GH
624
625 spin_unlock_irqrestore(&chan->lock, flags);
626
627 return 0;
628}
629
630int ccp_dmaengine_register(struct ccp_device *ccp)
631{
632 struct ccp_dma_chan *chan;
633 struct dma_device *dma_dev = &ccp->dma_dev;
634 struct dma_chan *dma_chan;
635 char *dma_cmd_cache_name;
636 char *dma_desc_cache_name;
637 unsigned int i;
638 int ret;
639
640 ccp->ccp_dma_chan = devm_kcalloc(ccp->dev, ccp->cmd_q_count,
641 sizeof(*(ccp->ccp_dma_chan)),
642 GFP_KERNEL);
643 if (!ccp->ccp_dma_chan)
644 return -ENOMEM;
645
646 dma_cmd_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
647 "%s-dmaengine-cmd-cache",
648 ccp->name);
649 if (!dma_cmd_cache_name)
650 return -ENOMEM;
651
652 ccp->dma_cmd_cache = kmem_cache_create(dma_cmd_cache_name,
653 sizeof(struct ccp_dma_cmd),
654 sizeof(void *),
655 SLAB_HWCACHE_ALIGN, NULL);
656 if (!ccp->dma_cmd_cache)
657 return -ENOMEM;
658
659 dma_desc_cache_name = devm_kasprintf(ccp->dev, GFP_KERNEL,
660 "%s-dmaengine-desc-cache",
661 ccp->name);
7514e368 662 if (!dma_desc_cache_name) {
ba22a1e2
QL
663 ret = -ENOMEM;
664 goto err_cache;
665 }
666
58ea8abf
GH
667 ccp->dma_desc_cache = kmem_cache_create(dma_desc_cache_name,
668 sizeof(struct ccp_dma_desc),
669 sizeof(void *),
670 SLAB_HWCACHE_ALIGN, NULL);
671 if (!ccp->dma_desc_cache) {
672 ret = -ENOMEM;
673 goto err_cache;
674 }
675
676 dma_dev->dev = ccp->dev;
677 dma_dev->src_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
678 dma_dev->dst_addr_widths = CCP_DMA_WIDTH(dma_get_mask(ccp->dev));
679 dma_dev->directions = DMA_MEM_TO_MEM;
680 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
681 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
58ea8abf
GH
682 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
683
efc989fc
GH
684 /* The DMA channels for this device can be set to public or private,
685 * and overridden by the module parameter dma_chan_attr.
686 * Default: according to the value in vdata (dma_chan_attr=0)
687 * dma_chan_attr=0x1: all channels private (override vdata)
688 * dma_chan_attr=0x2: all channels public (override vdata)
689 */
690 if (ccp_get_dma_chan_attr(ccp) == DMA_PRIVATE)
691 dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
692
58ea8abf
GH
693 INIT_LIST_HEAD(&dma_dev->channels);
694 for (i = 0; i < ccp->cmd_q_count; i++) {
695 chan = ccp->ccp_dma_chan + i;
696 dma_chan = &chan->dma_chan;
697
698 chan->ccp = ccp;
699
700 spin_lock_init(&chan->lock);
e5da5c56 701 INIT_LIST_HEAD(&chan->created);
58ea8abf
GH
702 INIT_LIST_HEAD(&chan->pending);
703 INIT_LIST_HEAD(&chan->active);
704 INIT_LIST_HEAD(&chan->complete);
705
706 tasklet_init(&chan->cleanup_tasklet, ccp_do_cleanup,
707 (unsigned long)chan);
708
709 dma_chan->device = dma_dev;
710 dma_cookie_init(dma_chan);
711
712 list_add_tail(&dma_chan->device_node, &dma_dev->channels);
713 }
714
715 dma_dev->device_free_chan_resources = ccp_free_chan_resources;
716 dma_dev->device_prep_dma_memcpy = ccp_prep_dma_memcpy;
58ea8abf
GH
717 dma_dev->device_prep_dma_interrupt = ccp_prep_dma_interrupt;
718 dma_dev->device_issue_pending = ccp_issue_pending;
719 dma_dev->device_tx_status = ccp_tx_status;
720 dma_dev->device_pause = ccp_pause;
721 dma_dev->device_resume = ccp_resume;
722 dma_dev->device_terminate_all = ccp_terminate_all;
723
724 ret = dma_async_device_register(dma_dev);
725 if (ret)
726 goto err_reg;
727
728 return 0;
729
730err_reg:
731 kmem_cache_destroy(ccp->dma_desc_cache);
732
733err_cache:
734 kmem_cache_destroy(ccp->dma_cmd_cache);
735
736 return ret;
737}
738
739void ccp_dmaengine_unregister(struct ccp_device *ccp)
740{
741 struct dma_device *dma_dev = &ccp->dma_dev;
742
743 dma_async_device_unregister(dma_dev);
744
745 kmem_cache_destroy(ccp->dma_desc_cache);
746 kmem_cache_destroy(ccp->dma_cmd_cache);
747}