]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/at_hdmac.c
dmaengine: provide a common function for completing a dma descriptor
[mirror_ubuntu-artful-kernel.git] / drivers / dma / at_hdmac.c
CommitLineData
dc78baa2
NF
1/*
2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
3 *
4 * Copyright (C) 2008 Atmel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 *
12 * This supports the Atmel AHB DMA Controller,
13 *
14 * The driver has currently been tested with the Atmel AT91SAM9RL
15 * and AT91SAM9G45 series.
16 */
17
18#include <linux/clk.h>
19#include <linux/dmaengine.h>
20#include <linux/dma-mapping.h>
21#include <linux/dmapool.h>
22#include <linux/interrupt.h>
23#include <linux/module.h>
24#include <linux/platform_device.h>
5a0e3ad6 25#include <linux/slab.h>
c5115953
NF
26#include <linux/of.h>
27#include <linux/of_device.h>
dc78baa2
NF
28
29#include "at_hdmac_regs.h"
d2ebfb33 30#include "dmaengine.h"
dc78baa2
NF
31
32/*
33 * Glossary
34 * --------
35 *
36 * at_hdmac : Name of the ATmel AHB DMA Controller
37 * at_dma_ / atdma : ATmel DMA controller entity related
38 * atc_ / atchan : ATmel DMA Channel entity related
39 */
40
41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42#define ATC_DEFAULT_CTRLA (0)
ae14d4b5
NF
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
dc78baa2
NF
45
46/*
47 * Initial number of descriptors to allocate for each channel. This could
48 * be increased during dma usage.
49 */
50static unsigned int init_nr_desc_per_channel = 64;
51module_param(init_nr_desc_per_channel, uint, 0644);
52MODULE_PARM_DESC(init_nr_desc_per_channel,
53 "initial descriptors per channel (default: 64)");
54
55
56/* prototypes */
57static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
58
59
60/*----------------------------------------------------------------------*/
61
62static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
63{
64 return list_first_entry(&atchan->active_list,
65 struct at_desc, desc_node);
66}
67
68static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
69{
70 return list_first_entry(&atchan->queue,
71 struct at_desc, desc_node);
72}
73
74/**
421f91d2 75 * atc_alloc_descriptor - allocate and return an initialized descriptor
dc78baa2
NF
76 * @chan: the channel to allocate descriptors for
77 * @gfp_flags: GFP allocation flags
78 *
79 * Note: The ack-bit is positioned in the descriptor flag at creation time
80 * to make initial allocation more convenient. This bit will be cleared
81 * and control will be given to client at usage time (during
82 * preparation functions).
83 */
84static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
85 gfp_t gfp_flags)
86{
87 struct at_desc *desc = NULL;
88 struct at_dma *atdma = to_at_dma(chan->device);
89 dma_addr_t phys;
90
91 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
92 if (desc) {
93 memset(desc, 0, sizeof(struct at_desc));
285a3c71 94 INIT_LIST_HEAD(&desc->tx_list);
dc78baa2
NF
95 dma_async_tx_descriptor_init(&desc->txd, chan);
96 /* txd.flags will be overwritten in prep functions */
97 desc->txd.flags = DMA_CTRL_ACK;
98 desc->txd.tx_submit = atc_tx_submit;
99 desc->txd.phys = phys;
100 }
101
102 return desc;
103}
104
105/**
af901ca1 106 * atc_desc_get - get an unused descriptor from free_list
dc78baa2
NF
107 * @atchan: channel we want a new descriptor for
108 */
109static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
110{
111 struct at_desc *desc, *_desc;
112 struct at_desc *ret = NULL;
d8cb04b0 113 unsigned long flags;
dc78baa2
NF
114 unsigned int i = 0;
115 LIST_HEAD(tmp_list);
116
d8cb04b0 117 spin_lock_irqsave(&atchan->lock, flags);
dc78baa2
NF
118 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
119 i++;
120 if (async_tx_test_ack(&desc->txd)) {
121 list_del(&desc->desc_node);
122 ret = desc;
123 break;
124 }
125 dev_dbg(chan2dev(&atchan->chan_common),
126 "desc %p not ACKed\n", desc);
127 }
d8cb04b0 128 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
129 dev_vdbg(chan2dev(&atchan->chan_common),
130 "scanned %u descriptors on freelist\n", i);
131
132 /* no more descriptor available in initial pool: create one more */
133 if (!ret) {
134 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
135 if (ret) {
d8cb04b0 136 spin_lock_irqsave(&atchan->lock, flags);
dc78baa2 137 atchan->descs_allocated++;
d8cb04b0 138 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
139 } else {
140 dev_err(chan2dev(&atchan->chan_common),
141 "not enough descriptors available\n");
142 }
143 }
144
145 return ret;
146}
147
148/**
149 * atc_desc_put - move a descriptor, including any children, to the free list
150 * @atchan: channel we work on
151 * @desc: descriptor, at the head of a chain, to move to free list
152 */
153static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
154{
155 if (desc) {
156 struct at_desc *child;
d8cb04b0 157 unsigned long flags;
dc78baa2 158
d8cb04b0 159 spin_lock_irqsave(&atchan->lock, flags);
285a3c71 160 list_for_each_entry(child, &desc->tx_list, desc_node)
dc78baa2
NF
161 dev_vdbg(chan2dev(&atchan->chan_common),
162 "moving child desc %p to freelist\n",
163 child);
285a3c71 164 list_splice_init(&desc->tx_list, &atchan->free_list);
dc78baa2
NF
165 dev_vdbg(chan2dev(&atchan->chan_common),
166 "moving desc %p to freelist\n", desc);
167 list_add(&desc->desc_node, &atchan->free_list);
d8cb04b0 168 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
169 }
170}
171
53830cc7
NF
172/**
173 * atc_desc_chain - build chain adding a descripor
174 * @first: address of first descripor of the chain
175 * @prev: address of previous descripor of the chain
176 * @desc: descriptor to queue
177 *
178 * Called from prep_* functions
179 */
180static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
181 struct at_desc *desc)
182{
183 if (!(*first)) {
184 *first = desc;
185 } else {
186 /* inform the HW lli about chaining */
187 (*prev)->lli.dscr = desc->txd.phys;
188 /* insert the link descriptor to the LD ring */
189 list_add_tail(&desc->desc_node,
190 &(*first)->tx_list);
191 }
192 *prev = desc;
193}
194
dc78baa2
NF
195/**
196 * atc_dostart - starts the DMA engine for real
197 * @atchan: the channel we want to start
198 * @first: first descriptor in the list we want to begin with
199 *
200 * Called with atchan->lock held and bh disabled
201 */
202static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
203{
204 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
205
206 /* ASSERT: channel is idle */
207 if (atc_chan_is_enabled(atchan)) {
208 dev_err(chan2dev(&atchan->chan_common),
209 "BUG: Attempted to start non-idle channel\n");
210 dev_err(chan2dev(&atchan->chan_common),
211 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
212 channel_readl(atchan, SADDR),
213 channel_readl(atchan, DADDR),
214 channel_readl(atchan, CTRLA),
215 channel_readl(atchan, CTRLB),
216 channel_readl(atchan, DSCR));
217
218 /* The tasklet will hopefully advance the queue... */
219 return;
220 }
221
222 vdbg_dump_regs(atchan);
223
224 /* clear any pending interrupt */
225 while (dma_readl(atdma, EBCISR))
226 cpu_relax();
227
228 channel_writel(atchan, SADDR, 0);
229 channel_writel(atchan, DADDR, 0);
230 channel_writel(atchan, CTRLA, 0);
231 channel_writel(atchan, CTRLB, 0);
232 channel_writel(atchan, DSCR, first->txd.phys);
233 dma_writel(atdma, CHER, atchan->mask);
234
235 vdbg_dump_regs(atchan);
236}
237
238/**
239 * atc_chain_complete - finish work for one transaction chain
240 * @atchan: channel we work on
241 * @desc: descriptor at the head of the chain we want do complete
242 *
243 * Called with atchan->lock held and bh disabled */
244static void
245atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
246{
dc78baa2
NF
247 struct dma_async_tx_descriptor *txd = &desc->txd;
248
249 dev_vdbg(chan2dev(&atchan->chan_common),
250 "descriptor %u complete\n", txd->cookie);
251
f7fbce07 252 dma_cookie_complete(txd);
dc78baa2
NF
253
254 /* move children to free_list */
285a3c71 255 list_splice_init(&desc->tx_list, &atchan->free_list);
dc78baa2
NF
256 /* move myself to free_list */
257 list_move(&desc->desc_node, &atchan->free_list);
258
ebcf9b80 259 /* unmap dma addresses (not on slave channels) */
657a77fa
AN
260 if (!atchan->chan_common.private) {
261 struct device *parent = chan2parent(&atchan->chan_common);
262 if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
263 if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
264 dma_unmap_single(parent,
265 desc->lli.daddr,
266 desc->len, DMA_FROM_DEVICE);
267 else
268 dma_unmap_page(parent,
269 desc->lli.daddr,
270 desc->len, DMA_FROM_DEVICE);
271 }
272 if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
273 if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
274 dma_unmap_single(parent,
275 desc->lli.saddr,
276 desc->len, DMA_TO_DEVICE);
277 else
278 dma_unmap_page(parent,
279 desc->lli.saddr,
280 desc->len, DMA_TO_DEVICE);
281 }
dc78baa2
NF
282 }
283
53830cc7
NF
284 /* for cyclic transfers,
285 * no need to replay callback function while stopping */
3c477482 286 if (!atc_chan_is_cyclic(atchan)) {
53830cc7
NF
287 dma_async_tx_callback callback = txd->callback;
288 void *param = txd->callback_param;
289
290 /*
291 * The API requires that no submissions are done from a
292 * callback, so we don't need to drop the lock here
293 */
294 if (callback)
295 callback(param);
296 }
dc78baa2
NF
297
298 dma_run_dependencies(txd);
299}
300
301/**
302 * atc_complete_all - finish work for all transactions
303 * @atchan: channel to complete transactions for
304 *
305 * Eventually submit queued descriptors if any
306 *
307 * Assume channel is idle while calling this function
308 * Called with atchan->lock held and bh disabled
309 */
310static void atc_complete_all(struct at_dma_chan *atchan)
311{
312 struct at_desc *desc, *_desc;
313 LIST_HEAD(list);
314
315 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
316
317 BUG_ON(atc_chan_is_enabled(atchan));
318
319 /*
320 * Submit queued descriptors ASAP, i.e. before we go through
321 * the completed ones.
322 */
323 if (!list_empty(&atchan->queue))
324 atc_dostart(atchan, atc_first_queued(atchan));
325 /* empty active_list now it is completed */
326 list_splice_init(&atchan->active_list, &list);
327 /* empty queue list by moving descriptors (if any) to active_list */
328 list_splice_init(&atchan->queue, &atchan->active_list);
329
330 list_for_each_entry_safe(desc, _desc, &list, desc_node)
331 atc_chain_complete(atchan, desc);
332}
333
334/**
335 * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
336 * @atchan: channel to be cleaned up
337 *
338 * Called with atchan->lock held and bh disabled
339 */
340static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
341{
342 struct at_desc *desc, *_desc;
343 struct at_desc *child;
344
345 dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
346
347 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
348 if (!(desc->lli.ctrla & ATC_DONE))
349 /* This one is currently in progress */
350 return;
351
285a3c71 352 list_for_each_entry(child, &desc->tx_list, desc_node)
dc78baa2
NF
353 if (!(child->lli.ctrla & ATC_DONE))
354 /* Currently in progress */
355 return;
356
357 /*
358 * No descriptors so far seem to be in progress, i.e.
359 * this chain must be done.
360 */
361 atc_chain_complete(atchan, desc);
362 }
363}
364
365/**
366 * atc_advance_work - at the end of a transaction, move forward
367 * @atchan: channel where the transaction ended
368 *
369 * Called with atchan->lock held and bh disabled
370 */
371static void atc_advance_work(struct at_dma_chan *atchan)
372{
373 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
374
375 if (list_empty(&atchan->active_list) ||
376 list_is_singular(&atchan->active_list)) {
377 atc_complete_all(atchan);
378 } else {
379 atc_chain_complete(atchan, atc_first_active(atchan));
380 /* advance work */
381 atc_dostart(atchan, atc_first_active(atchan));
382 }
383}
384
385
386/**
387 * atc_handle_error - handle errors reported by DMA controller
388 * @atchan: channel where error occurs
389 *
390 * Called with atchan->lock held and bh disabled
391 */
392static void atc_handle_error(struct at_dma_chan *atchan)
393{
394 struct at_desc *bad_desc;
395 struct at_desc *child;
396
397 /*
398 * The descriptor currently at the head of the active list is
399 * broked. Since we don't have any way to report errors, we'll
400 * just have to scream loudly and try to carry on.
401 */
402 bad_desc = atc_first_active(atchan);
403 list_del_init(&bad_desc->desc_node);
404
405 /* As we are stopped, take advantage to push queued descriptors
406 * in active_list */
407 list_splice_init(&atchan->queue, atchan->active_list.prev);
408
409 /* Try to restart the controller */
410 if (!list_empty(&atchan->active_list))
411 atc_dostart(atchan, atc_first_active(atchan));
412
413 /*
414 * KERN_CRITICAL may seem harsh, but since this only happens
415 * when someone submits a bad physical address in a
416 * descriptor, we should consider ourselves lucky that the
417 * controller flagged an error instead of scribbling over
418 * random memory locations.
419 */
420 dev_crit(chan2dev(&atchan->chan_common),
421 "Bad descriptor submitted for DMA!\n");
422 dev_crit(chan2dev(&atchan->chan_common),
423 " cookie: %d\n", bad_desc->txd.cookie);
424 atc_dump_lli(atchan, &bad_desc->lli);
285a3c71 425 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
dc78baa2
NF
426 atc_dump_lli(atchan, &child->lli);
427
428 /* Pretend the descriptor completed successfully */
429 atc_chain_complete(atchan, bad_desc);
430}
431
53830cc7
NF
432/**
433 * atc_handle_cyclic - at the end of a period, run callback function
434 * @atchan: channel used for cyclic operations
435 *
436 * Called with atchan->lock held and bh disabled
437 */
438static void atc_handle_cyclic(struct at_dma_chan *atchan)
439{
440 struct at_desc *first = atc_first_active(atchan);
441 struct dma_async_tx_descriptor *txd = &first->txd;
442 dma_async_tx_callback callback = txd->callback;
443 void *param = txd->callback_param;
444
445 dev_vdbg(chan2dev(&atchan->chan_common),
446 "new cyclic period llp 0x%08x\n",
447 channel_readl(atchan, DSCR));
448
449 if (callback)
450 callback(param);
451}
dc78baa2
NF
452
453/*-- IRQ & Tasklet ---------------------------------------------------*/
454
455static void atc_tasklet(unsigned long data)
456{
457 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
d8cb04b0 458 unsigned long flags;
dc78baa2 459
d8cb04b0 460 spin_lock_irqsave(&atchan->lock, flags);
53830cc7 461 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
dc78baa2 462 atc_handle_error(atchan);
3c477482 463 else if (atc_chan_is_cyclic(atchan))
53830cc7 464 atc_handle_cyclic(atchan);
dc78baa2
NF
465 else
466 atc_advance_work(atchan);
467
d8cb04b0 468 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
469}
470
471static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
472{
473 struct at_dma *atdma = (struct at_dma *)dev_id;
474 struct at_dma_chan *atchan;
475 int i;
476 u32 status, pending, imr;
477 int ret = IRQ_NONE;
478
479 do {
480 imr = dma_readl(atdma, EBCIMR);
481 status = dma_readl(atdma, EBCISR);
482 pending = status & imr;
483
484 if (!pending)
485 break;
486
487 dev_vdbg(atdma->dma_common.dev,
488 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
489 status, imr, pending);
490
491 for (i = 0; i < atdma->dma_common.chancnt; i++) {
492 atchan = &atdma->chan[i];
9b3aa589 493 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
dc78baa2
NF
494 if (pending & AT_DMA_ERR(i)) {
495 /* Disable channel on AHB error */
23b5e3ad
NF
496 dma_writel(atdma, CHDR,
497 AT_DMA_RES(i) | atchan->mask);
dc78baa2 498 /* Give information to tasklet */
53830cc7 499 set_bit(ATC_IS_ERROR, &atchan->status);
dc78baa2
NF
500 }
501 tasklet_schedule(&atchan->tasklet);
502 ret = IRQ_HANDLED;
503 }
504 }
505
506 } while (pending);
507
508 return ret;
509}
510
511
512/*-- DMA Engine API --------------------------------------------------*/
513
514/**
515 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
516 * @desc: descriptor at the head of the transaction chain
517 *
518 * Queue chain if DMA engine is working already
519 *
520 * Cookie increment and adding to active_list or queue must be atomic
521 */
522static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
523{
524 struct at_desc *desc = txd_to_at_desc(tx);
525 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
526 dma_cookie_t cookie;
d8cb04b0 527 unsigned long flags;
dc78baa2 528
d8cb04b0 529 spin_lock_irqsave(&atchan->lock, flags);
884485e1 530 cookie = dma_cookie_assign(tx);
dc78baa2
NF
531
532 if (list_empty(&atchan->active_list)) {
533 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
534 desc->txd.cookie);
535 atc_dostart(atchan, desc);
536 list_add_tail(&desc->desc_node, &atchan->active_list);
537 } else {
538 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
539 desc->txd.cookie);
540 list_add_tail(&desc->desc_node, &atchan->queue);
541 }
542
d8cb04b0 543 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
544
545 return cookie;
546}
547
548/**
549 * atc_prep_dma_memcpy - prepare a memcpy operation
550 * @chan: the channel to prepare operation on
551 * @dest: operation virtual destination address
552 * @src: operation virtual source address
553 * @len: operation length
554 * @flags: tx descriptor status flags
555 */
556static struct dma_async_tx_descriptor *
557atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
558 size_t len, unsigned long flags)
559{
560 struct at_dma_chan *atchan = to_at_dma_chan(chan);
561 struct at_desc *desc = NULL;
562 struct at_desc *first = NULL;
563 struct at_desc *prev = NULL;
564 size_t xfer_count;
565 size_t offset;
566 unsigned int src_width;
567 unsigned int dst_width;
568 u32 ctrla;
569 u32 ctrlb;
570
571 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
572 dest, src, len, flags);
573
574 if (unlikely(!len)) {
575 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
576 return NULL;
577 }
578
579 ctrla = ATC_DEFAULT_CTRLA;
9b3aa589 580 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
dc78baa2
NF
581 | ATC_SRC_ADDR_MODE_INCR
582 | ATC_DST_ADDR_MODE_INCR
583 | ATC_FC_MEM2MEM;
584
585 /*
586 * We can be a lot more clever here, but this should take care
587 * of the most common optimization.
588 */
589 if (!((src | dest | len) & 3)) {
590 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
591 src_width = dst_width = 2;
592 } else if (!((src | dest | len) & 1)) {
593 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
594 src_width = dst_width = 1;
595 } else {
596 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
597 src_width = dst_width = 0;
598 }
599
600 for (offset = 0; offset < len; offset += xfer_count << src_width) {
601 xfer_count = min_t(size_t, (len - offset) >> src_width,
602 ATC_BTSIZE_MAX);
603
604 desc = atc_desc_get(atchan);
605 if (!desc)
606 goto err_desc_get;
607
608 desc->lli.saddr = src + offset;
609 desc->lli.daddr = dest + offset;
610 desc->lli.ctrla = ctrla | xfer_count;
611 desc->lli.ctrlb = ctrlb;
612
613 desc->txd.cookie = 0;
dc78baa2 614
e257e156 615 atc_desc_chain(&first, &prev, desc);
dc78baa2
NF
616 }
617
618 /* First descriptor of the chain embedds additional information */
619 first->txd.cookie = -EBUSY;
620 first->len = len;
621
622 /* set end-of-link to the last link descriptor of list*/
623 set_desc_eol(desc);
624
568f7f0c 625 first->txd.flags = flags; /* client is in control of this ack */
dc78baa2
NF
626
627 return &first->txd;
628
629err_desc_get:
630 atc_desc_put(atchan, first);
631 return NULL;
632}
633
808347f6
NF
634
635/**
636 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
637 * @chan: DMA channel
638 * @sgl: scatterlist to transfer to/from
639 * @sg_len: number of entries in @scatterlist
640 * @direction: DMA direction
641 * @flags: tx descriptor status flags
642 */
643static struct dma_async_tx_descriptor *
644atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
db8196df 645 unsigned int sg_len, enum dma_transfer_direction direction,
808347f6
NF
646 unsigned long flags)
647{
648 struct at_dma_chan *atchan = to_at_dma_chan(chan);
649 struct at_dma_slave *atslave = chan->private;
650 struct at_desc *first = NULL;
651 struct at_desc *prev = NULL;
652 u32 ctrla;
653 u32 ctrlb;
654 dma_addr_t reg;
655 unsigned int reg_width;
656 unsigned int mem_width;
657 unsigned int i;
658 struct scatterlist *sg;
659 size_t total_len = 0;
660
cc52a10a
NF
661 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
662 sg_len,
db8196df 663 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
808347f6
NF
664 flags);
665
666 if (unlikely(!atslave || !sg_len)) {
667 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
668 return NULL;
669 }
670
671 reg_width = atslave->reg_width;
672
808347f6 673 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
ae14d4b5 674 ctrlb = ATC_IEN;
808347f6
NF
675
676 switch (direction) {
db8196df 677 case DMA_MEM_TO_DEV:
808347f6
NF
678 ctrla |= ATC_DST_WIDTH(reg_width);
679 ctrlb |= ATC_DST_ADDR_MODE_FIXED
680 | ATC_SRC_ADDR_MODE_INCR
ae14d4b5
NF
681 | ATC_FC_MEM2PER
682 | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
808347f6
NF
683 reg = atslave->tx_reg;
684 for_each_sg(sgl, sg, sg_len, i) {
685 struct at_desc *desc;
686 u32 len;
687 u32 mem;
688
689 desc = atc_desc_get(atchan);
690 if (!desc)
691 goto err_desc_get;
692
0f70e8ce 693 mem = sg_dma_address(sg);
808347f6
NF
694 len = sg_dma_len(sg);
695 mem_width = 2;
696 if (unlikely(mem & 3 || len & 3))
697 mem_width = 0;
698
699 desc->lli.saddr = mem;
700 desc->lli.daddr = reg;
701 desc->lli.ctrla = ctrla
702 | ATC_SRC_WIDTH(mem_width)
703 | len >> mem_width;
704 desc->lli.ctrlb = ctrlb;
705
e257e156 706 atc_desc_chain(&first, &prev, desc);
808347f6
NF
707 total_len += len;
708 }
709 break;
db8196df 710 case DMA_DEV_TO_MEM:
808347f6
NF
711 ctrla |= ATC_SRC_WIDTH(reg_width);
712 ctrlb |= ATC_DST_ADDR_MODE_INCR
713 | ATC_SRC_ADDR_MODE_FIXED
ae14d4b5
NF
714 | ATC_FC_PER2MEM
715 | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
808347f6
NF
716
717 reg = atslave->rx_reg;
718 for_each_sg(sgl, sg, sg_len, i) {
719 struct at_desc *desc;
720 u32 len;
721 u32 mem;
722
723 desc = atc_desc_get(atchan);
724 if (!desc)
725 goto err_desc_get;
726
0f70e8ce 727 mem = sg_dma_address(sg);
808347f6
NF
728 len = sg_dma_len(sg);
729 mem_width = 2;
730 if (unlikely(mem & 3 || len & 3))
731 mem_width = 0;
732
733 desc->lli.saddr = reg;
734 desc->lli.daddr = mem;
735 desc->lli.ctrla = ctrla
736 | ATC_DST_WIDTH(mem_width)
59a609d9 737 | len >> reg_width;
808347f6
NF
738 desc->lli.ctrlb = ctrlb;
739
e257e156 740 atc_desc_chain(&first, &prev, desc);
808347f6
NF
741 total_len += len;
742 }
743 break;
744 default:
745 return NULL;
746 }
747
748 /* set end-of-link to the last link descriptor of list*/
749 set_desc_eol(prev);
750
751 /* First descriptor of the chain embedds additional information */
752 first->txd.cookie = -EBUSY;
753 first->len = total_len;
754
568f7f0c
NF
755 /* first link descriptor of list is responsible of flags */
756 first->txd.flags = flags; /* client is in control of this ack */
808347f6
NF
757
758 return &first->txd;
759
760err_desc_get:
761 dev_err(chan2dev(chan), "not enough descriptors available\n");
762 atc_desc_put(atchan, first);
763 return NULL;
764}
765
53830cc7
NF
766/**
767 * atc_dma_cyclic_check_values
768 * Check for too big/unaligned periods and unaligned DMA buffer
769 */
770static int
771atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
db8196df 772 size_t period_len, enum dma_transfer_direction direction)
53830cc7
NF
773{
774 if (period_len > (ATC_BTSIZE_MAX << reg_width))
775 goto err_out;
776 if (unlikely(period_len & ((1 << reg_width) - 1)))
777 goto err_out;
778 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
779 goto err_out;
db8196df 780 if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
53830cc7
NF
781 goto err_out;
782
783 return 0;
784
785err_out:
786 return -EINVAL;
787}
788
789/**
790 * atc_dma_cyclic_fill_desc - Fill one period decriptor
791 */
792static int
793atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
794 unsigned int period_index, dma_addr_t buf_addr,
db8196df 795 size_t period_len, enum dma_transfer_direction direction)
53830cc7
NF
796{
797 u32 ctrla;
798 unsigned int reg_width = atslave->reg_width;
799
800 /* prepare common CRTLA value */
801 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
802 | ATC_DST_WIDTH(reg_width)
803 | ATC_SRC_WIDTH(reg_width)
804 | period_len >> reg_width;
805
806 switch (direction) {
db8196df 807 case DMA_MEM_TO_DEV:
53830cc7
NF
808 desc->lli.saddr = buf_addr + (period_len * period_index);
809 desc->lli.daddr = atslave->tx_reg;
810 desc->lli.ctrla = ctrla;
ae14d4b5 811 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
53830cc7 812 | ATC_SRC_ADDR_MODE_INCR
ae14d4b5
NF
813 | ATC_FC_MEM2PER
814 | ATC_SIF(AT_DMA_MEM_IF)
815 | ATC_DIF(AT_DMA_PER_IF);
53830cc7
NF
816 break;
817
db8196df 818 case DMA_DEV_TO_MEM:
53830cc7
NF
819 desc->lli.saddr = atslave->rx_reg;
820 desc->lli.daddr = buf_addr + (period_len * period_index);
821 desc->lli.ctrla = ctrla;
ae14d4b5 822 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
53830cc7 823 | ATC_SRC_ADDR_MODE_FIXED
ae14d4b5
NF
824 | ATC_FC_PER2MEM
825 | ATC_SIF(AT_DMA_PER_IF)
826 | ATC_DIF(AT_DMA_MEM_IF);
53830cc7
NF
827 break;
828
829 default:
830 return -EINVAL;
831 }
832
833 return 0;
834}
835
836/**
837 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
838 * @chan: the DMA channel to prepare
839 * @buf_addr: physical DMA address where the buffer starts
840 * @buf_len: total number of bytes for the entire buffer
841 * @period_len: number of bytes for each period
842 * @direction: transfer direction, to or from device
843 */
844static struct dma_async_tx_descriptor *
845atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
db8196df 846 size_t period_len, enum dma_transfer_direction direction)
53830cc7
NF
847{
848 struct at_dma_chan *atchan = to_at_dma_chan(chan);
849 struct at_dma_slave *atslave = chan->private;
850 struct at_desc *first = NULL;
851 struct at_desc *prev = NULL;
852 unsigned long was_cyclic;
853 unsigned int periods = buf_len / period_len;
854 unsigned int i;
855
856 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
db8196df 857 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
53830cc7
NF
858 buf_addr,
859 periods, buf_len, period_len);
860
861 if (unlikely(!atslave || !buf_len || !period_len)) {
862 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
863 return NULL;
864 }
865
866 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
867 if (was_cyclic) {
868 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
869 return NULL;
870 }
871
872 /* Check for too big/unaligned periods and unaligned DMA buffer */
873 if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
874 period_len, direction))
875 goto err_out;
876
877 /* build cyclic linked list */
878 for (i = 0; i < periods; i++) {
879 struct at_desc *desc;
880
881 desc = atc_desc_get(atchan);
882 if (!desc)
883 goto err_desc_get;
884
885 if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
886 period_len, direction))
887 goto err_desc_get;
888
889 atc_desc_chain(&first, &prev, desc);
890 }
891
892 /* lets make a cyclic list */
893 prev->lli.dscr = first->txd.phys;
894
895 /* First descriptor of the chain embedds additional information */
896 first->txd.cookie = -EBUSY;
897 first->len = buf_len;
898
899 return &first->txd;
900
901err_desc_get:
902 dev_err(chan2dev(chan), "not enough descriptors available\n");
903 atc_desc_put(atchan, first);
904err_out:
905 clear_bit(ATC_IS_CYCLIC, &atchan->status);
906 return NULL;
907}
908
909
05827630
LW
910static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
911 unsigned long arg)
808347f6
NF
912{
913 struct at_dma_chan *atchan = to_at_dma_chan(chan);
914 struct at_dma *atdma = to_at_dma(chan->device);
23b5e3ad 915 int chan_id = atchan->chan_common.chan_id;
d8cb04b0 916 unsigned long flags;
23b5e3ad 917
808347f6
NF
918 LIST_HEAD(list);
919
23b5e3ad 920 dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
c3635c78 921
23b5e3ad 922 if (cmd == DMA_PAUSE) {
d8cb04b0 923 spin_lock_irqsave(&atchan->lock, flags);
808347f6 924
23b5e3ad 925 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
23b5e3ad 926 set_bit(ATC_IS_PAUSED, &atchan->status);
808347f6 927
d8cb04b0 928 spin_unlock_irqrestore(&atchan->lock, flags);
23b5e3ad 929 } else if (cmd == DMA_RESUME) {
3c477482 930 if (!atc_chan_is_paused(atchan))
23b5e3ad 931 return 0;
808347f6 932
d8cb04b0 933 spin_lock_irqsave(&atchan->lock, flags);
808347f6 934
23b5e3ad
NF
935 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
936 clear_bit(ATC_IS_PAUSED, &atchan->status);
c3635c78 937
d8cb04b0 938 spin_unlock_irqrestore(&atchan->lock, flags);
23b5e3ad
NF
939 } else if (cmd == DMA_TERMINATE_ALL) {
940 struct at_desc *desc, *_desc;
941 /*
942 * This is only called when something went wrong elsewhere, so
943 * we don't really care about the data. Just disable the
944 * channel. We still have to poll the channel enable bit due
945 * to AHB/HSB limitations.
946 */
d8cb04b0 947 spin_lock_irqsave(&atchan->lock, flags);
23b5e3ad
NF
948
949 /* disabling channel: must also remove suspend state */
950 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
951
952 /* confirm that this channel is disabled */
953 while (dma_readl(atdma, CHSR) & atchan->mask)
954 cpu_relax();
955
956 /* active_list entries will end up before queued entries */
957 list_splice_init(&atchan->queue, &list);
958 list_splice_init(&atchan->active_list, &list);
959
960 /* Flush all pending and queued descriptors */
961 list_for_each_entry_safe(desc, _desc, &list, desc_node)
962 atc_chain_complete(atchan, desc);
963
964 clear_bit(ATC_IS_PAUSED, &atchan->status);
965 /* if channel dedicated to cyclic operations, free it */
966 clear_bit(ATC_IS_CYCLIC, &atchan->status);
967
d8cb04b0 968 spin_unlock_irqrestore(&atchan->lock, flags);
23b5e3ad
NF
969 } else {
970 return -ENXIO;
971 }
b0ebeb9c 972
c3635c78 973 return 0;
808347f6
NF
974}
975
dc78baa2 976/**
07934481 977 * atc_tx_status - poll for transaction completion
dc78baa2
NF
978 * @chan: DMA channel
979 * @cookie: transaction identifier to check status of
07934481 980 * @txstate: if not %NULL updated with transaction state
dc78baa2 981 *
07934481 982 * If @txstate is passed in, upon return it reflect the driver
dc78baa2
NF
983 * internal state and can be used with dma_async_is_complete() to check
984 * the status of multiple cookies without re-checking hardware state.
985 */
986static enum dma_status
07934481 987atc_tx_status(struct dma_chan *chan,
dc78baa2 988 dma_cookie_t cookie,
07934481 989 struct dma_tx_state *txstate)
dc78baa2
NF
990{
991 struct at_dma_chan *atchan = to_at_dma_chan(chan);
992 dma_cookie_t last_used;
993 dma_cookie_t last_complete;
d8cb04b0 994 unsigned long flags;
dc78baa2
NF
995 enum dma_status ret;
996
d8cb04b0 997 spin_lock_irqsave(&atchan->lock, flags);
dc78baa2 998
4d4e58de 999 last_complete = chan->completed_cookie;
dc78baa2
NF
1000 last_used = chan->cookie;
1001
1002 ret = dma_async_is_complete(cookie, last_complete, last_used);
1003 if (ret != DMA_SUCCESS) {
1004 atc_cleanup_descriptors(atchan);
1005
4d4e58de 1006 last_complete = chan->completed_cookie;
dc78baa2
NF
1007 last_used = chan->cookie;
1008
1009 ret = dma_async_is_complete(cookie, last_complete, last_used);
1010 }
1011
d8cb04b0 1012 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2 1013
543aabc7
NF
1014 if (ret != DMA_SUCCESS)
1015 dma_set_tx_state(txstate, last_complete, last_used,
1016 atc_first_active(atchan)->len);
1017 else
1018 dma_set_tx_state(txstate, last_complete, last_used, 0);
1019
3c477482 1020 if (atc_chan_is_paused(atchan))
23b5e3ad
NF
1021 ret = DMA_PAUSED;
1022
1023 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
1024 ret, cookie, last_complete ? last_complete : 0,
07934481 1025 last_used ? last_used : 0);
dc78baa2
NF
1026
1027 return ret;
1028}
1029
1030/**
1031 * atc_issue_pending - try to finish work
1032 * @chan: target DMA channel
1033 */
1034static void atc_issue_pending(struct dma_chan *chan)
1035{
1036 struct at_dma_chan *atchan = to_at_dma_chan(chan);
d8cb04b0 1037 unsigned long flags;
dc78baa2
NF
1038
1039 dev_vdbg(chan2dev(chan), "issue_pending\n");
1040
53830cc7 1041 /* Not needed for cyclic transfers */
3c477482 1042 if (atc_chan_is_cyclic(atchan))
53830cc7
NF
1043 return;
1044
d8cb04b0 1045 spin_lock_irqsave(&atchan->lock, flags);
dc78baa2 1046 if (!atc_chan_is_enabled(atchan)) {
dc78baa2 1047 atc_advance_work(atchan);
dc78baa2 1048 }
d8cb04b0 1049 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
1050}
1051
1052/**
1053 * atc_alloc_chan_resources - allocate resources for DMA channel
1054 * @chan: allocate descriptor resources for this channel
1055 * @client: current client requesting the channel be ready for requests
1056 *
1057 * return - the number of allocated descriptors
1058 */
1059static int atc_alloc_chan_resources(struct dma_chan *chan)
1060{
1061 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1062 struct at_dma *atdma = to_at_dma(chan->device);
1063 struct at_desc *desc;
808347f6 1064 struct at_dma_slave *atslave;
d8cb04b0 1065 unsigned long flags;
dc78baa2 1066 int i;
808347f6 1067 u32 cfg;
dc78baa2
NF
1068 LIST_HEAD(tmp_list);
1069
1070 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1071
1072 /* ASSERT: channel is idle */
1073 if (atc_chan_is_enabled(atchan)) {
1074 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1075 return -EIO;
1076 }
1077
808347f6
NF
1078 cfg = ATC_DEFAULT_CFG;
1079
1080 atslave = chan->private;
1081 if (atslave) {
1082 /*
1083 * We need controller-specific data to set up slave
1084 * transfers.
1085 */
1086 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1087
1088 /* if cfg configuration specified take it instad of default */
1089 if (atslave->cfg)
1090 cfg = atslave->cfg;
1091 }
1092
1093 /* have we already been set up?
1094 * reconfigure channel but no need to reallocate descriptors */
dc78baa2
NF
1095 if (!list_empty(&atchan->free_list))
1096 return atchan->descs_allocated;
1097
1098 /* Allocate initial pool of descriptors */
1099 for (i = 0; i < init_nr_desc_per_channel; i++) {
1100 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1101 if (!desc) {
1102 dev_err(atdma->dma_common.dev,
1103 "Only %d initial descriptors\n", i);
1104 break;
1105 }
1106 list_add_tail(&desc->desc_node, &tmp_list);
1107 }
1108
d8cb04b0 1109 spin_lock_irqsave(&atchan->lock, flags);
dc78baa2
NF
1110 atchan->descs_allocated = i;
1111 list_splice(&tmp_list, &atchan->free_list);
4d4e58de 1112 chan->completed_cookie = chan->cookie = 1;
d8cb04b0 1113 spin_unlock_irqrestore(&atchan->lock, flags);
dc78baa2
NF
1114
1115 /* channel parameters */
808347f6 1116 channel_writel(atchan, CFG, cfg);
dc78baa2
NF
1117
1118 dev_dbg(chan2dev(chan),
1119 "alloc_chan_resources: allocated %d descriptors\n",
1120 atchan->descs_allocated);
1121
1122 return atchan->descs_allocated;
1123}
1124
1125/**
1126 * atc_free_chan_resources - free all channel resources
1127 * @chan: DMA channel
1128 */
1129static void atc_free_chan_resources(struct dma_chan *chan)
1130{
1131 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1132 struct at_dma *atdma = to_at_dma(chan->device);
1133 struct at_desc *desc, *_desc;
1134 LIST_HEAD(list);
1135
1136 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1137 atchan->descs_allocated);
1138
1139 /* ASSERT: channel is idle */
1140 BUG_ON(!list_empty(&atchan->active_list));
1141 BUG_ON(!list_empty(&atchan->queue));
1142 BUG_ON(atc_chan_is_enabled(atchan));
1143
1144 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1145 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1146 list_del(&desc->desc_node);
1147 /* free link descriptor */
1148 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1149 }
1150 list_splice_init(&atchan->free_list, &list);
1151 atchan->descs_allocated = 0;
53830cc7 1152 atchan->status = 0;
dc78baa2
NF
1153
1154 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1155}
1156
1157
1158/*-- Module Management -----------------------------------------------*/
1159
02f88be9
NF
1160/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1161static struct at_dma_platform_data at91sam9rl_config = {
1162 .nr_channels = 2,
1163};
1164static struct at_dma_platform_data at91sam9g45_config = {
1165 .nr_channels = 8,
1166};
1167
c5115953
NF
1168#if defined(CONFIG_OF)
1169static const struct of_device_id atmel_dma_dt_ids[] = {
1170 {
1171 .compatible = "atmel,at91sam9rl-dma",
02f88be9 1172 .data = &at91sam9rl_config,
c5115953
NF
1173 }, {
1174 .compatible = "atmel,at91sam9g45-dma",
02f88be9 1175 .data = &at91sam9g45_config,
dcc81734
NF
1176 }, {
1177 /* sentinel */
1178 }
c5115953
NF
1179};
1180
1181MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1182#endif
1183
0ab88a01 1184static const struct platform_device_id atdma_devtypes[] = {
67348450
NF
1185 {
1186 .name = "at91sam9rl_dma",
02f88be9 1187 .driver_data = (unsigned long) &at91sam9rl_config,
67348450
NF
1188 }, {
1189 .name = "at91sam9g45_dma",
02f88be9 1190 .driver_data = (unsigned long) &at91sam9g45_config,
67348450
NF
1191 }, {
1192 /* sentinel */
1193 }
1194};
1195
02f88be9
NF
1196static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
1197 struct platform_device *pdev)
c5115953
NF
1198{
1199 if (pdev->dev.of_node) {
1200 const struct of_device_id *match;
1201 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1202 if (match == NULL)
02f88be9
NF
1203 return NULL;
1204 return match->data;
c5115953 1205 }
02f88be9
NF
1206 return (struct at_dma_platform_data *)
1207 platform_get_device_id(pdev)->driver_data;
c5115953
NF
1208}
1209
dc78baa2
NF
1210/**
1211 * at_dma_off - disable DMA controller
1212 * @atdma: the Atmel HDAMC device
1213 */
1214static void at_dma_off(struct at_dma *atdma)
1215{
1216 dma_writel(atdma, EN, 0);
1217
1218 /* disable all interrupts */
1219 dma_writel(atdma, EBCIDR, -1L);
1220
1221 /* confirm that all channels are disabled */
1222 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1223 cpu_relax();
1224}
1225
1226static int __init at_dma_probe(struct platform_device *pdev)
1227{
dc78baa2
NF
1228 struct resource *io;
1229 struct at_dma *atdma;
1230 size_t size;
1231 int irq;
1232 int err;
1233 int i;
02f88be9 1234 struct at_dma_platform_data *plat_dat;
67348450 1235
02f88be9
NF
1236 /* setup platform data for each SoC */
1237 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1238 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1239 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
67348450
NF
1240
1241 /* get DMA parameters from controller type */
02f88be9
NF
1242 plat_dat = at_dma_get_driver_data(pdev);
1243 if (!plat_dat)
1244 return -ENODEV;
dc78baa2
NF
1245
1246 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1247 if (!io)
1248 return -EINVAL;
1249
1250 irq = platform_get_irq(pdev, 0);
1251 if (irq < 0)
1252 return irq;
1253
1254 size = sizeof(struct at_dma);
02f88be9 1255 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
dc78baa2
NF
1256 atdma = kzalloc(size, GFP_KERNEL);
1257 if (!atdma)
1258 return -ENOMEM;
1259
67348450 1260 /* discover transaction capabilities */
02f88be9
NF
1261 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1262 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
dc78baa2 1263
114df7d6 1264 size = resource_size(io);
dc78baa2
NF
1265 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1266 err = -EBUSY;
1267 goto err_kfree;
1268 }
1269
1270 atdma->regs = ioremap(io->start, size);
1271 if (!atdma->regs) {
1272 err = -ENOMEM;
1273 goto err_release_r;
1274 }
1275
1276 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1277 if (IS_ERR(atdma->clk)) {
1278 err = PTR_ERR(atdma->clk);
1279 goto err_clk;
1280 }
1281 clk_enable(atdma->clk);
1282
1283 /* force dma off, just in case */
1284 at_dma_off(atdma);
1285
1286 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1287 if (err)
1288 goto err_irq;
1289
1290 platform_set_drvdata(pdev, atdma);
1291
1292 /* create a pool of consistent memory blocks for hardware descriptors */
1293 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1294 &pdev->dev, sizeof(struct at_desc),
1295 4 /* word alignment */, 0);
1296 if (!atdma->dma_desc_pool) {
1297 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1298 err = -ENOMEM;
1299 goto err_pool_create;
1300 }
1301
1302 /* clear any pending interrupt */
1303 while (dma_readl(atdma, EBCISR))
1304 cpu_relax();
1305
1306 /* initialize channels related values */
1307 INIT_LIST_HEAD(&atdma->dma_common.channels);
02f88be9 1308 for (i = 0; i < plat_dat->nr_channels; i++) {
dc78baa2
NF
1309 struct at_dma_chan *atchan = &atdma->chan[i];
1310
1311 atchan->chan_common.device = &atdma->dma_common;
4d4e58de 1312 atchan->chan_common.cookie = atchan->chan_common.completed_cookie = 1;
dc78baa2
NF
1313 list_add_tail(&atchan->chan_common.device_node,
1314 &atdma->dma_common.channels);
1315
1316 atchan->ch_regs = atdma->regs + ch_regs(i);
1317 spin_lock_init(&atchan->lock);
1318 atchan->mask = 1 << i;
1319
1320 INIT_LIST_HEAD(&atchan->active_list);
1321 INIT_LIST_HEAD(&atchan->queue);
1322 INIT_LIST_HEAD(&atchan->free_list);
1323
1324 tasklet_init(&atchan->tasklet, atc_tasklet,
1325 (unsigned long)atchan);
bda3a47c 1326 atc_enable_chan_irq(atdma, i);
dc78baa2
NF
1327 }
1328
1329 /* set base routines */
1330 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1331 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
07934481 1332 atdma->dma_common.device_tx_status = atc_tx_status;
dc78baa2
NF
1333 atdma->dma_common.device_issue_pending = atc_issue_pending;
1334 atdma->dma_common.dev = &pdev->dev;
1335
1336 /* set prep routines based on capability */
1337 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1338 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1339
d7db8080 1340 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
808347f6 1341 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
d7db8080
NF
1342 /* controller can do slave DMA: can trigger cyclic transfers */
1343 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
53830cc7 1344 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
c3635c78 1345 atdma->dma_common.device_control = atc_control;
d7db8080 1346 }
808347f6 1347
dc78baa2
NF
1348 dma_writel(atdma, EN, AT_DMA_ENABLE);
1349
1350 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
1351 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1352 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
02f88be9 1353 plat_dat->nr_channels);
dc78baa2
NF
1354
1355 dma_async_device_register(&atdma->dma_common);
1356
1357 return 0;
1358
1359err_pool_create:
1360 platform_set_drvdata(pdev, NULL);
1361 free_irq(platform_get_irq(pdev, 0), atdma);
1362err_irq:
1363 clk_disable(atdma->clk);
1364 clk_put(atdma->clk);
1365err_clk:
1366 iounmap(atdma->regs);
1367 atdma->regs = NULL;
1368err_release_r:
1369 release_mem_region(io->start, size);
1370err_kfree:
1371 kfree(atdma);
1372 return err;
1373}
1374
1375static int __exit at_dma_remove(struct platform_device *pdev)
1376{
1377 struct at_dma *atdma = platform_get_drvdata(pdev);
1378 struct dma_chan *chan, *_chan;
1379 struct resource *io;
1380
1381 at_dma_off(atdma);
1382 dma_async_device_unregister(&atdma->dma_common);
1383
1384 dma_pool_destroy(atdma->dma_desc_pool);
1385 platform_set_drvdata(pdev, NULL);
1386 free_irq(platform_get_irq(pdev, 0), atdma);
1387
1388 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1389 device_node) {
1390 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1391
1392 /* Disable interrupts */
bda3a47c 1393 atc_disable_chan_irq(atdma, chan->chan_id);
dc78baa2
NF
1394 tasklet_disable(&atchan->tasklet);
1395
1396 tasklet_kill(&atchan->tasklet);
1397 list_del(&chan->device_node);
1398 }
1399
1400 clk_disable(atdma->clk);
1401 clk_put(atdma->clk);
1402
1403 iounmap(atdma->regs);
1404 atdma->regs = NULL;
1405
1406 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
114df7d6 1407 release_mem_region(io->start, resource_size(io));
dc78baa2
NF
1408
1409 kfree(atdma);
1410
1411 return 0;
1412}
1413
1414static void at_dma_shutdown(struct platform_device *pdev)
1415{
1416 struct at_dma *atdma = platform_get_drvdata(pdev);
1417
1418 at_dma_off(platform_get_drvdata(pdev));
1419 clk_disable(atdma->clk);
1420}
1421
c0ba5947
NF
1422static int at_dma_prepare(struct device *dev)
1423{
1424 struct platform_device *pdev = to_platform_device(dev);
1425 struct at_dma *atdma = platform_get_drvdata(pdev);
1426 struct dma_chan *chan, *_chan;
1427
1428 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1429 device_node) {
1430 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1431 /* wait for transaction completion (except in cyclic case) */
3c477482 1432 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
c0ba5947
NF
1433 return -EAGAIN;
1434 }
1435 return 0;
1436}
1437
1438static void atc_suspend_cyclic(struct at_dma_chan *atchan)
1439{
1440 struct dma_chan *chan = &atchan->chan_common;
1441
1442 /* Channel should be paused by user
1443 * do it anyway even if it is not done already */
3c477482 1444 if (!atc_chan_is_paused(atchan)) {
c0ba5947
NF
1445 dev_warn(chan2dev(chan),
1446 "cyclic channel not paused, should be done by channel user\n");
1447 atc_control(chan, DMA_PAUSE, 0);
1448 }
1449
1450 /* now preserve additional data for cyclic operations */
1451 /* next descriptor address in the cyclic list */
1452 atchan->save_dscr = channel_readl(atchan, DSCR);
1453
1454 vdbg_dump_regs(atchan);
1455}
1456
33f82d14 1457static int at_dma_suspend_noirq(struct device *dev)
dc78baa2 1458{
33f82d14
DW
1459 struct platform_device *pdev = to_platform_device(dev);
1460 struct at_dma *atdma = platform_get_drvdata(pdev);
c0ba5947 1461 struct dma_chan *chan, *_chan;
dc78baa2 1462
c0ba5947
NF
1463 /* preserve data */
1464 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1465 device_node) {
1466 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1467
3c477482 1468 if (atc_chan_is_cyclic(atchan))
c0ba5947
NF
1469 atc_suspend_cyclic(atchan);
1470 atchan->save_cfg = channel_readl(atchan, CFG);
1471 }
1472 atdma->save_imr = dma_readl(atdma, EBCIMR);
1473
1474 /* disable DMA controller */
1475 at_dma_off(atdma);
dc78baa2
NF
1476 clk_disable(atdma->clk);
1477 return 0;
1478}
1479
c0ba5947
NF
1480static void atc_resume_cyclic(struct at_dma_chan *atchan)
1481{
1482 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
1483
1484 /* restore channel status for cyclic descriptors list:
1485 * next descriptor in the cyclic list at the time of suspend */
1486 channel_writel(atchan, SADDR, 0);
1487 channel_writel(atchan, DADDR, 0);
1488 channel_writel(atchan, CTRLA, 0);
1489 channel_writel(atchan, CTRLB, 0);
1490 channel_writel(atchan, DSCR, atchan->save_dscr);
1491 dma_writel(atdma, CHER, atchan->mask);
1492
1493 /* channel pause status should be removed by channel user
1494 * We cannot take the initiative to do it here */
1495
1496 vdbg_dump_regs(atchan);
1497}
1498
33f82d14 1499static int at_dma_resume_noirq(struct device *dev)
dc78baa2 1500{
33f82d14
DW
1501 struct platform_device *pdev = to_platform_device(dev);
1502 struct at_dma *atdma = platform_get_drvdata(pdev);
c0ba5947 1503 struct dma_chan *chan, *_chan;
dc78baa2 1504
c0ba5947 1505 /* bring back DMA controller */
dc78baa2
NF
1506 clk_enable(atdma->clk);
1507 dma_writel(atdma, EN, AT_DMA_ENABLE);
c0ba5947
NF
1508
1509 /* clear any pending interrupt */
1510 while (dma_readl(atdma, EBCISR))
1511 cpu_relax();
1512
1513 /* restore saved data */
1514 dma_writel(atdma, EBCIER, atdma->save_imr);
1515 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1516 device_node) {
1517 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1518
1519 channel_writel(atchan, CFG, atchan->save_cfg);
3c477482 1520 if (atc_chan_is_cyclic(atchan))
c0ba5947
NF
1521 atc_resume_cyclic(atchan);
1522 }
dc78baa2 1523 return 0;
dc78baa2
NF
1524}
1525
47145210 1526static const struct dev_pm_ops at_dma_dev_pm_ops = {
c0ba5947 1527 .prepare = at_dma_prepare,
33f82d14
DW
1528 .suspend_noirq = at_dma_suspend_noirq,
1529 .resume_noirq = at_dma_resume_noirq,
1530};
1531
dc78baa2
NF
1532static struct platform_driver at_dma_driver = {
1533 .remove = __exit_p(at_dma_remove),
1534 .shutdown = at_dma_shutdown,
67348450 1535 .id_table = atdma_devtypes,
dc78baa2
NF
1536 .driver = {
1537 .name = "at_hdmac",
33f82d14 1538 .pm = &at_dma_dev_pm_ops,
c5115953 1539 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
dc78baa2
NF
1540 },
1541};
1542
1543static int __init at_dma_init(void)
1544{
1545 return platform_driver_probe(&at_dma_driver, at_dma_probe);
1546}
93d0bec2 1547subsys_initcall(at_dma_init);
dc78baa2
NF
1548
1549static void __exit at_dma_exit(void)
1550{
1551 platform_driver_unregister(&at_dma_driver);
1552}
1553module_exit(at_dma_exit);
1554
1555MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
1556MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
1557MODULE_LICENSE("GPL");
1558MODULE_ALIAS("platform:at_hdmac");