]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/mpc512x_dma.c
dmaengine: move last completed cookie into generic dma_chan structure
[mirror_ubuntu-artful-kernel.git] / drivers / dma / mpc512x_dma.c
CommitLineData
0fb6f739
PZ
1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009
ba2eea25 4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
0fb6f739
PZ
5 *
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver
8 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
9 *
10 * Approved as OSADL project by a majority of OSADL members and funded
11 * by OSADL membership fees in 2009; for details see www.osadl.org.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the Free
15 * Software Foundation; either version 2 of the License, or (at your option)
16 * any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but WITHOUT
19 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
20 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
21 * more details.
22 *
23 * You should have received a copy of the GNU General Public License along with
24 * this program; if not, write to the Free Software Foundation, Inc., 59
25 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
26 *
27 * The full GNU General Public License is included in this distribution in the
28 * file called COPYING.
29 */
30
31/*
32 * This is initial version of MPC5121 DMA driver. Only memory to memory
33 * transfers are supported (tested using dmatest module).
34 */
35
36#include <linux/module.h>
37#include <linux/dmaengine.h>
38#include <linux/dma-mapping.h>
39#include <linux/interrupt.h>
40#include <linux/io.h>
5a0e3ad6 41#include <linux/slab.h>
0fb6f739
PZ
42#include <linux/of_device.h>
43#include <linux/of_platform.h>
44
45#include <linux/random.h>
46
47/* Number of DMA Transfer descriptors allocated per channel */
48#define MPC_DMA_DESCRIPTORS 64
49
50/* Macro definitions */
51#define MPC_DMA_CHANNELS 64
52#define MPC_DMA_TCD_OFFSET 0x1000
53
54/* Arbitration mode of group and channel */
55#define MPC_DMA_DMACR_EDCG (1 << 31)
56#define MPC_DMA_DMACR_ERGA (1 << 3)
57#define MPC_DMA_DMACR_ERCA (1 << 2)
58
59/* Error codes */
60#define MPC_DMA_DMAES_VLD (1 << 31)
61#define MPC_DMA_DMAES_GPE (1 << 15)
62#define MPC_DMA_DMAES_CPE (1 << 14)
63#define MPC_DMA_DMAES_ERRCHN(err) \
64 (((err) >> 8) & 0x3f)
65#define MPC_DMA_DMAES_SAE (1 << 7)
66#define MPC_DMA_DMAES_SOE (1 << 6)
67#define MPC_DMA_DMAES_DAE (1 << 5)
68#define MPC_DMA_DMAES_DOE (1 << 4)
69#define MPC_DMA_DMAES_NCE (1 << 3)
70#define MPC_DMA_DMAES_SGE (1 << 2)
71#define MPC_DMA_DMAES_SBE (1 << 1)
72#define MPC_DMA_DMAES_DBE (1 << 0)
73
ba2eea25
IY
74#define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6)
75
0fb6f739
PZ
76#define MPC_DMA_TSIZE_1 0x00
77#define MPC_DMA_TSIZE_2 0x01
78#define MPC_DMA_TSIZE_4 0x02
79#define MPC_DMA_TSIZE_16 0x04
80#define MPC_DMA_TSIZE_32 0x05
81
82/* MPC5121 DMA engine registers */
83struct __attribute__ ((__packed__)) mpc_dma_regs {
84 /* 0x00 */
85 u32 dmacr; /* DMA control register */
86 u32 dmaes; /* DMA error status */
87 /* 0x08 */
88 u32 dmaerqh; /* DMA enable request high(channels 63~32) */
89 u32 dmaerql; /* DMA enable request low(channels 31~0) */
90 u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */
91 u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */
92 /* 0x18 */
93 u8 dmaserq; /* DMA set enable request */
94 u8 dmacerq; /* DMA clear enable request */
95 u8 dmaseei; /* DMA set enable error interrupt */
96 u8 dmaceei; /* DMA clear enable error interrupt */
97 /* 0x1c */
98 u8 dmacint; /* DMA clear interrupt request */
99 u8 dmacerr; /* DMA clear error */
100 u8 dmassrt; /* DMA set start bit */
101 u8 dmacdne; /* DMA clear DONE status bit */
102 /* 0x20 */
103 u32 dmainth; /* DMA interrupt request high(ch63~32) */
104 u32 dmaintl; /* DMA interrupt request low(ch31~0) */
105 u32 dmaerrh; /* DMA error high(ch63~32) */
106 u32 dmaerrl; /* DMA error low(ch31~0) */
107 /* 0x30 */
108 u32 dmahrsh; /* DMA hw request status high(ch63~32) */
109 u32 dmahrsl; /* DMA hardware request status low(ch31~0) */
ba2eea25
IY
110 union {
111 u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */
112 u32 dmagpor; /* (General purpose register on MPC8308) */
113 };
0fb6f739
PZ
114 u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */
115 /* 0x40 ~ 0xff */
116 u32 reserve0[48]; /* Reserved */
117 /* 0x100 */
118 u8 dchpri[MPC_DMA_CHANNELS];
119 /* DMA channels(0~63) priority */
120};
121
122struct __attribute__ ((__packed__)) mpc_dma_tcd {
123 /* 0x00 */
124 u32 saddr; /* Source address */
125
126 u32 smod:5; /* Source address modulo */
127 u32 ssize:3; /* Source data transfer size */
128 u32 dmod:5; /* Destination address modulo */
129 u32 dsize:3; /* Destination data transfer size */
130 u32 soff:16; /* Signed source address offset */
131
132 /* 0x08 */
133 u32 nbytes; /* Inner "minor" byte count */
134 u32 slast; /* Last source address adjustment */
135 u32 daddr; /* Destination address */
136
137 /* 0x14 */
138 u32 citer_elink:1; /* Enable channel-to-channel linking on
139 * minor loop complete
140 */
141 u32 citer_linkch:6; /* Link channel for minor loop complete */
142 u32 citer:9; /* Current "major" iteration count */
143 u32 doff:16; /* Signed destination address offset */
144
145 /* 0x18 */
146 u32 dlast_sga; /* Last Destination address adjustment/scatter
147 * gather address
148 */
149
150 /* 0x1c */
151 u32 biter_elink:1; /* Enable channel-to-channel linking on major
152 * loop complete
153 */
154 u32 biter_linkch:6;
155 u32 biter:9; /* Beginning "major" iteration count */
156 u32 bwc:2; /* Bandwidth control */
157 u32 major_linkch:6; /* Link channel number */
158 u32 done:1; /* Channel done */
159 u32 active:1; /* Channel active */
160 u32 major_elink:1; /* Enable channel-to-channel linking on major
161 * loop complete
162 */
163 u32 e_sg:1; /* Enable scatter/gather processing */
164 u32 d_req:1; /* Disable request */
165 u32 int_half:1; /* Enable an interrupt when major counter is
166 * half complete
167 */
168 u32 int_maj:1; /* Enable an interrupt when major iteration
169 * count completes
170 */
171 u32 start:1; /* Channel start */
172};
173
174struct mpc_dma_desc {
175 struct dma_async_tx_descriptor desc;
176 struct mpc_dma_tcd *tcd;
177 dma_addr_t tcd_paddr;
178 int error;
179 struct list_head node;
180};
181
182struct mpc_dma_chan {
183 struct dma_chan chan;
184 struct list_head free;
185 struct list_head prepared;
186 struct list_head queued;
187 struct list_head active;
188 struct list_head completed;
189 struct mpc_dma_tcd *tcd;
190 dma_addr_t tcd_paddr;
0fb6f739
PZ
191
192 /* Lock for this structure */
193 spinlock_t lock;
194};
195
196struct mpc_dma {
197 struct dma_device dma;
198 struct tasklet_struct tasklet;
199 struct mpc_dma_chan channels[MPC_DMA_CHANNELS];
200 struct mpc_dma_regs __iomem *regs;
201 struct mpc_dma_tcd __iomem *tcd;
202 int irq;
ba2eea25 203 int irq2;
0fb6f739 204 uint error_status;
ba2eea25 205 int is_mpc8308;
0fb6f739
PZ
206
207 /* Lock for error_status field in this structure */
208 spinlock_t error_status_lock;
209};
210
211#define DRV_NAME "mpc512x_dma"
212
213/* Convert struct dma_chan to struct mpc_dma_chan */
214static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
215{
216 return container_of(c, struct mpc_dma_chan, chan);
217}
218
219/* Convert struct dma_chan to struct mpc_dma */
220static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
221{
222 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
223 return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
224}
225
226/*
227 * Execute all queued DMA descriptors.
228 *
229 * Following requirements must be met while calling mpc_dma_execute():
230 * a) mchan->lock is acquired,
231 * b) mchan->active list is empty,
232 * c) mchan->queued list contains at least one entry.
233 */
234static void mpc_dma_execute(struct mpc_dma_chan *mchan)
235{
236 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
237 struct mpc_dma_desc *first = NULL;
238 struct mpc_dma_desc *prev = NULL;
239 struct mpc_dma_desc *mdesc;
240 int cid = mchan->chan.chan_id;
241
242 /* Move all queued descriptors to active list */
243 list_splice_tail_init(&mchan->queued, &mchan->active);
244
245 /* Chain descriptors into one transaction */
246 list_for_each_entry(mdesc, &mchan->active, node) {
247 if (!first)
248 first = mdesc;
249
250 if (!prev) {
251 prev = mdesc;
252 continue;
253 }
254
255 prev->tcd->dlast_sga = mdesc->tcd_paddr;
256 prev->tcd->e_sg = 1;
257 mdesc->tcd->start = 1;
258
259 prev = mdesc;
260 }
261
0fb6f739
PZ
262 prev->tcd->int_maj = 1;
263
264 /* Send first descriptor in chain into hardware */
265 memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
6504cf34
IY
266
267 if (first != prev)
268 mdma->tcd[cid].e_sg = 1;
0fb6f739
PZ
269 out_8(&mdma->regs->dmassrt, cid);
270}
271
272/* Handle interrupt on one half of DMA controller (32 channels) */
273static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
274{
275 struct mpc_dma_chan *mchan;
276 struct mpc_dma_desc *mdesc;
277 u32 status = is | es;
278 int ch;
279
280 while ((ch = fls(status) - 1) >= 0) {
281 status &= ~(1 << ch);
282 mchan = &mdma->channels[ch + off];
283
284 spin_lock(&mchan->lock);
285
2862559e
IY
286 out_8(&mdma->regs->dmacint, ch + off);
287 out_8(&mdma->regs->dmacerr, ch + off);
288
0fb6f739
PZ
289 /* Check error status */
290 if (es & (1 << ch))
291 list_for_each_entry(mdesc, &mchan->active, node)
292 mdesc->error = -EIO;
293
294 /* Execute queued descriptors */
295 list_splice_tail_init(&mchan->active, &mchan->completed);
296 if (!list_empty(&mchan->queued))
297 mpc_dma_execute(mchan);
298
299 spin_unlock(&mchan->lock);
300 }
301}
302
303/* Interrupt handler */
304static irqreturn_t mpc_dma_irq(int irq, void *data)
305{
306 struct mpc_dma *mdma = data;
307 uint es;
308
309 /* Save error status register */
310 es = in_be32(&mdma->regs->dmaes);
311 spin_lock(&mdma->error_status_lock);
312 if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
313 mdma->error_status = es;
314 spin_unlock(&mdma->error_status_lock);
315
316 /* Handle interrupt on each channel */
ba2eea25
IY
317 if (mdma->dma.chancnt > 32) {
318 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
0fb6f739 319 in_be32(&mdma->regs->dmaerrh), 32);
ba2eea25 320 }
0fb6f739
PZ
321 mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
322 in_be32(&mdma->regs->dmaerrl), 0);
323
0fb6f739
PZ
324 /* Schedule tasklet */
325 tasklet_schedule(&mdma->tasklet);
326
327 return IRQ_HANDLED;
328}
329
25985edc 330/* process completed descriptors */
a2769913 331static void mpc_dma_process_completed(struct mpc_dma *mdma)
0fb6f739 332{
0fb6f739
PZ
333 dma_cookie_t last_cookie = 0;
334 struct mpc_dma_chan *mchan;
335 struct mpc_dma_desc *mdesc;
336 struct dma_async_tx_descriptor *desc;
337 unsigned long flags;
338 LIST_HEAD(list);
0fb6f739
PZ
339 int i;
340
a2769913
IY
341 for (i = 0; i < mdma->dma.chancnt; i++) {
342 mchan = &mdma->channels[i];
343
344 /* Get all completed descriptors */
345 spin_lock_irqsave(&mchan->lock, flags);
346 if (!list_empty(&mchan->completed))
347 list_splice_tail_init(&mchan->completed, &list);
348 spin_unlock_irqrestore(&mchan->lock, flags);
349
350 if (list_empty(&list))
351 continue;
352
353 /* Execute callbacks and run dependencies */
354 list_for_each_entry(mdesc, &list, node) {
355 desc = &mdesc->desc;
356
357 if (desc->callback)
358 desc->callback(desc->callback_param);
359
360 last_cookie = desc->cookie;
361 dma_run_dependencies(desc);
362 }
363
364 /* Free descriptors */
365 spin_lock_irqsave(&mchan->lock, flags);
366 list_splice_tail_init(&list, &mchan->free);
4d4e58de 367 mchan->chan.completed_cookie = last_cookie;
a2769913
IY
368 spin_unlock_irqrestore(&mchan->lock, flags);
369 }
370}
371
372/* DMA Tasklet */
373static void mpc_dma_tasklet(unsigned long data)
374{
375 struct mpc_dma *mdma = (void *)data;
376 unsigned long flags;
377 uint es;
378
0fb6f739
PZ
379 spin_lock_irqsave(&mdma->error_status_lock, flags);
380 es = mdma->error_status;
381 mdma->error_status = 0;
382 spin_unlock_irqrestore(&mdma->error_status_lock, flags);
383
384 /* Print nice error report */
385 if (es) {
386 dev_err(mdma->dma.dev,
387 "Hardware reported following error(s) on channel %u:\n",
388 MPC_DMA_DMAES_ERRCHN(es));
389
390 if (es & MPC_DMA_DMAES_GPE)
391 dev_err(mdma->dma.dev, "- Group Priority Error\n");
392 if (es & MPC_DMA_DMAES_CPE)
393 dev_err(mdma->dma.dev, "- Channel Priority Error\n");
394 if (es & MPC_DMA_DMAES_SAE)
395 dev_err(mdma->dma.dev, "- Source Address Error\n");
396 if (es & MPC_DMA_DMAES_SOE)
397 dev_err(mdma->dma.dev, "- Source Offset"
398 " Configuration Error\n");
399 if (es & MPC_DMA_DMAES_DAE)
400 dev_err(mdma->dma.dev, "- Destination Address"
401 " Error\n");
402 if (es & MPC_DMA_DMAES_DOE)
403 dev_err(mdma->dma.dev, "- Destination Offset"
404 " Configuration Error\n");
405 if (es & MPC_DMA_DMAES_NCE)
406 dev_err(mdma->dma.dev, "- NBytes/Citter"
407 " Configuration Error\n");
408 if (es & MPC_DMA_DMAES_SGE)
409 dev_err(mdma->dma.dev, "- Scatter/Gather"
410 " Configuration Error\n");
411 if (es & MPC_DMA_DMAES_SBE)
412 dev_err(mdma->dma.dev, "- Source Bus Error\n");
413 if (es & MPC_DMA_DMAES_DBE)
414 dev_err(mdma->dma.dev, "- Destination Bus Error\n");
415 }
416
a2769913 417 mpc_dma_process_completed(mdma);
0fb6f739
PZ
418}
419
420/* Submit descriptor to hardware */
421static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
422{
423 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
424 struct mpc_dma_desc *mdesc;
425 unsigned long flags;
426 dma_cookie_t cookie;
427
428 mdesc = container_of(txd, struct mpc_dma_desc, desc);
429
430 spin_lock_irqsave(&mchan->lock, flags);
431
432 /* Move descriptor to queue */
433 list_move_tail(&mdesc->node, &mchan->queued);
434
435 /* If channel is idle, execute all queued descriptors */
436 if (list_empty(&mchan->active))
437 mpc_dma_execute(mchan);
438
439 /* Update cookie */
440 cookie = mchan->chan.cookie + 1;
441 if (cookie <= 0)
442 cookie = 1;
443
444 mchan->chan.cookie = cookie;
445 mdesc->desc.cookie = cookie;
446
447 spin_unlock_irqrestore(&mchan->lock, flags);
448
449 return cookie;
450}
451
452/* Alloc channel resources */
453static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
454{
455 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
456 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
457 struct mpc_dma_desc *mdesc;
458 struct mpc_dma_tcd *tcd;
459 dma_addr_t tcd_paddr;
460 unsigned long flags;
461 LIST_HEAD(descs);
462 int i;
463
464 /* Alloc DMA memory for Transfer Control Descriptors */
465 tcd = dma_alloc_coherent(mdma->dma.dev,
466 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
467 &tcd_paddr, GFP_KERNEL);
468 if (!tcd)
469 return -ENOMEM;
470
471 /* Alloc descriptors for this channel */
472 for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
473 mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
474 if (!mdesc) {
475 dev_notice(mdma->dma.dev, "Memory allocation error. "
476 "Allocated only %u descriptors\n", i);
477 break;
478 }
479
480 dma_async_tx_descriptor_init(&mdesc->desc, chan);
481 mdesc->desc.flags = DMA_CTRL_ACK;
482 mdesc->desc.tx_submit = mpc_dma_tx_submit;
483
484 mdesc->tcd = &tcd[i];
485 mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
486
487 list_add_tail(&mdesc->node, &descs);
488 }
489
490 /* Return error only if no descriptors were allocated */
491 if (i == 0) {
492 dma_free_coherent(mdma->dma.dev,
493 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
494 tcd, tcd_paddr);
495 return -ENOMEM;
496 }
497
498 spin_lock_irqsave(&mchan->lock, flags);
499 mchan->tcd = tcd;
500 mchan->tcd_paddr = tcd_paddr;
501 list_splice_tail_init(&descs, &mchan->free);
502 spin_unlock_irqrestore(&mchan->lock, flags);
503
504 /* Enable Error Interrupt */
505 out_8(&mdma->regs->dmaseei, chan->chan_id);
506
507 return 0;
508}
509
510/* Free channel resources */
511static void mpc_dma_free_chan_resources(struct dma_chan *chan)
512{
513 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
514 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
515 struct mpc_dma_desc *mdesc, *tmp;
516 struct mpc_dma_tcd *tcd;
517 dma_addr_t tcd_paddr;
518 unsigned long flags;
519 LIST_HEAD(descs);
520
521 spin_lock_irqsave(&mchan->lock, flags);
522
523 /* Channel must be idle */
524 BUG_ON(!list_empty(&mchan->prepared));
525 BUG_ON(!list_empty(&mchan->queued));
526 BUG_ON(!list_empty(&mchan->active));
527 BUG_ON(!list_empty(&mchan->completed));
528
529 /* Move data */
530 list_splice_tail_init(&mchan->free, &descs);
531 tcd = mchan->tcd;
532 tcd_paddr = mchan->tcd_paddr;
533
534 spin_unlock_irqrestore(&mchan->lock, flags);
535
536 /* Free DMA memory used by descriptors */
537 dma_free_coherent(mdma->dma.dev,
538 MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
539 tcd, tcd_paddr);
540
541 /* Free descriptors */
542 list_for_each_entry_safe(mdesc, tmp, &descs, node)
543 kfree(mdesc);
544
545 /* Disable Error Interrupt */
546 out_8(&mdma->regs->dmaceei, chan->chan_id);
547}
548
549/* Send all pending descriptor to hardware */
550static void mpc_dma_issue_pending(struct dma_chan *chan)
551{
552 /*
553 * We are posting descriptors to the hardware as soon as
554 * they are ready, so this function does nothing.
555 */
556}
557
558/* Check request completion status */
559static enum dma_status
07934481
LW
560mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
561 struct dma_tx_state *txstate)
0fb6f739
PZ
562{
563 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
564 unsigned long flags;
565 dma_cookie_t last_used;
566 dma_cookie_t last_complete;
567
568 spin_lock_irqsave(&mchan->lock, flags);
569 last_used = mchan->chan.cookie;
4d4e58de 570 last_complete = mchan->chan.completed_cookie;
0fb6f739
PZ
571 spin_unlock_irqrestore(&mchan->lock, flags);
572
bca34692 573 dma_set_tx_state(txstate, last_complete, last_used, 0);
0fb6f739
PZ
574 return dma_async_is_complete(cookie, last_complete, last_used);
575}
576
577/* Prepare descriptor for memory to memory copy */
578static struct dma_async_tx_descriptor *
579mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
580 size_t len, unsigned long flags)
581{
ba2eea25 582 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
0fb6f739
PZ
583 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
584 struct mpc_dma_desc *mdesc = NULL;
585 struct mpc_dma_tcd *tcd;
586 unsigned long iflags;
587
588 /* Get free descriptor */
589 spin_lock_irqsave(&mchan->lock, iflags);
590 if (!list_empty(&mchan->free)) {
591 mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
592 node);
593 list_del(&mdesc->node);
594 }
595 spin_unlock_irqrestore(&mchan->lock, iflags);
596
a2769913
IY
597 if (!mdesc) {
598 /* try to free completed descriptors */
599 mpc_dma_process_completed(mdma);
0fb6f739 600 return NULL;
a2769913 601 }
0fb6f739
PZ
602
603 mdesc->error = 0;
604 tcd = mdesc->tcd;
605
606 /* Prepare Transfer Control Descriptor for this transaction */
607 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
608
609 if (IS_ALIGNED(src | dst | len, 32)) {
610 tcd->ssize = MPC_DMA_TSIZE_32;
611 tcd->dsize = MPC_DMA_TSIZE_32;
612 tcd->soff = 32;
613 tcd->doff = 32;
ba2eea25
IY
614 } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
615 /* MPC8308 doesn't support 16 byte transfers */
0fb6f739
PZ
616 tcd->ssize = MPC_DMA_TSIZE_16;
617 tcd->dsize = MPC_DMA_TSIZE_16;
618 tcd->soff = 16;
619 tcd->doff = 16;
620 } else if (IS_ALIGNED(src | dst | len, 4)) {
621 tcd->ssize = MPC_DMA_TSIZE_4;
622 tcd->dsize = MPC_DMA_TSIZE_4;
623 tcd->soff = 4;
624 tcd->doff = 4;
625 } else if (IS_ALIGNED(src | dst | len, 2)) {
626 tcd->ssize = MPC_DMA_TSIZE_2;
627 tcd->dsize = MPC_DMA_TSIZE_2;
628 tcd->soff = 2;
629 tcd->doff = 2;
630 } else {
631 tcd->ssize = MPC_DMA_TSIZE_1;
632 tcd->dsize = MPC_DMA_TSIZE_1;
633 tcd->soff = 1;
634 tcd->doff = 1;
635 }
636
637 tcd->saddr = src;
638 tcd->daddr = dst;
639 tcd->nbytes = len;
640 tcd->biter = 1;
641 tcd->citer = 1;
642
643 /* Place descriptor in prepared list */
644 spin_lock_irqsave(&mchan->lock, iflags);
645 list_add_tail(&mdesc->node, &mchan->prepared);
646 spin_unlock_irqrestore(&mchan->lock, iflags);
647
648 return &mdesc->desc;
649}
650
00006124 651static int __devinit mpc_dma_probe(struct platform_device *op)
0fb6f739 652{
b4a75c91 653 struct device_node *dn = op->dev.of_node;
0fb6f739
PZ
654 struct device *dev = &op->dev;
655 struct dma_device *dma;
656 struct mpc_dma *mdma;
657 struct mpc_dma_chan *mchan;
658 struct resource res;
659 ulong regs_start, regs_size;
660 int retval, i;
661
662 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
663 if (!mdma) {
664 dev_err(dev, "Memory exhausted!\n");
665 return -ENOMEM;
666 }
667
668 mdma->irq = irq_of_parse_and_map(dn, 0);
669 if (mdma->irq == NO_IRQ) {
670 dev_err(dev, "Error mapping IRQ!\n");
671 return -EINVAL;
672 }
673
ba2eea25
IY
674 if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
675 mdma->is_mpc8308 = 1;
676 mdma->irq2 = irq_of_parse_and_map(dn, 1);
677 if (mdma->irq2 == NO_IRQ) {
678 dev_err(dev, "Error mapping IRQ!\n");
679 return -EINVAL;
680 }
681 }
682
0fb6f739
PZ
683 retval = of_address_to_resource(dn, 0, &res);
684 if (retval) {
685 dev_err(dev, "Error parsing memory region!\n");
686 return retval;
687 }
688
689 regs_start = res.start;
8381fc35 690 regs_size = resource_size(&res);
0fb6f739
PZ
691
692 if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
693 dev_err(dev, "Error requesting memory region!\n");
694 return -EBUSY;
695 }
696
697 mdma->regs = devm_ioremap(dev, regs_start, regs_size);
698 if (!mdma->regs) {
699 dev_err(dev, "Error mapping memory region!\n");
700 return -ENOMEM;
701 }
702
703 mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
704 + MPC_DMA_TCD_OFFSET);
705
706 retval = devm_request_irq(dev, mdma->irq, &mpc_dma_irq, 0, DRV_NAME,
707 mdma);
708 if (retval) {
709 dev_err(dev, "Error requesting IRQ!\n");
710 return -EINVAL;
711 }
712
ba2eea25
IY
713 if (mdma->is_mpc8308) {
714 retval = devm_request_irq(dev, mdma->irq2, &mpc_dma_irq, 0,
715 DRV_NAME, mdma);
716 if (retval) {
717 dev_err(dev, "Error requesting IRQ2!\n");
718 return -EINVAL;
719 }
720 }
721
0fb6f739
PZ
722 spin_lock_init(&mdma->error_status_lock);
723
724 dma = &mdma->dma;
725 dma->dev = dev;
ba2eea25
IY
726 if (!mdma->is_mpc8308)
727 dma->chancnt = MPC_DMA_CHANNELS;
728 else
729 dma->chancnt = 16; /* MPC8308 DMA has only 16 channels */
0fb6f739
PZ
730 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
731 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
732 dma->device_issue_pending = mpc_dma_issue_pending;
07934481 733 dma->device_tx_status = mpc_dma_tx_status;
0fb6f739
PZ
734 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
735
736 INIT_LIST_HEAD(&dma->channels);
737 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
738
739 for (i = 0; i < dma->chancnt; i++) {
740 mchan = &mdma->channels[i];
741
742 mchan->chan.device = dma;
0fb6f739 743 mchan->chan.cookie = 1;
4d4e58de 744 mchan->chan.completed_cookie = mchan->chan.cookie;
0fb6f739
PZ
745
746 INIT_LIST_HEAD(&mchan->free);
747 INIT_LIST_HEAD(&mchan->prepared);
748 INIT_LIST_HEAD(&mchan->queued);
749 INIT_LIST_HEAD(&mchan->active);
750 INIT_LIST_HEAD(&mchan->completed);
751
752 spin_lock_init(&mchan->lock);
753 list_add_tail(&mchan->chan.device_node, &dma->channels);
754 }
755
756 tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
757
758 /*
759 * Configure DMA Engine:
760 * - Dynamic clock,
761 * - Round-robin group arbitration,
762 * - Round-robin channel arbitration.
763 */
ba2eea25
IY
764 if (!mdma->is_mpc8308) {
765 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
766 MPC_DMA_DMACR_ERGA | MPC_DMA_DMACR_ERCA);
767
768 /* Disable hardware DMA requests */
769 out_be32(&mdma->regs->dmaerqh, 0);
770 out_be32(&mdma->regs->dmaerql, 0);
771
772 /* Disable error interrupts */
773 out_be32(&mdma->regs->dmaeeih, 0);
774 out_be32(&mdma->regs->dmaeeil, 0);
775
776 /* Clear interrupts status */
777 out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
778 out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
779 out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
780 out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
781
782 /* Route interrupts to IPIC */
783 out_be32(&mdma->regs->dmaihsa, 0);
784 out_be32(&mdma->regs->dmailsa, 0);
785 } else {
786 /* MPC8308 has 16 channels and lacks some registers */
787 out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
788
789 /* enable snooping */
790 out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
791 /* Disable error interrupts */
792 out_be32(&mdma->regs->dmaeeil, 0);
793
794 /* Clear interrupts status */
795 out_be32(&mdma->regs->dmaintl, 0xFFFF);
796 out_be32(&mdma->regs->dmaerrl, 0xFFFF);
797 }
0fb6f739
PZ
798
799 /* Register DMA engine */
800 dev_set_drvdata(dev, mdma);
801 retval = dma_async_device_register(dma);
802 if (retval) {
803 devm_free_irq(dev, mdma->irq, mdma);
804 irq_dispose_mapping(mdma->irq);
805 }
806
807 return retval;
808}
809
2dc11581 810static int __devexit mpc_dma_remove(struct platform_device *op)
0fb6f739
PZ
811{
812 struct device *dev = &op->dev;
813 struct mpc_dma *mdma = dev_get_drvdata(dev);
814
815 dma_async_device_unregister(&mdma->dma);
816 devm_free_irq(dev, mdma->irq, mdma);
817 irq_dispose_mapping(mdma->irq);
818
819 return 0;
820}
821
822static struct of_device_id mpc_dma_match[] = {
823 { .compatible = "fsl,mpc5121-dma", },
824 {},
825};
826
00006124 827static struct platform_driver mpc_dma_driver = {
0fb6f739
PZ
828 .probe = mpc_dma_probe,
829 .remove = __devexit_p(mpc_dma_remove),
b4a75c91
AG
830 .driver = {
831 .name = DRV_NAME,
832 .owner = THIS_MODULE,
833 .of_match_table = mpc_dma_match,
0fb6f739
PZ
834 },
835};
836
c94e9105 837module_platform_driver(mpc_dma_driver);
0fb6f739
PZ
838
839MODULE_LICENSE("GPL");
840MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");