]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/rapidio/devices/tsi721_dma.c
Merge branch 'stable/for-jens-4.2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / drivers / rapidio / devices / tsi721_dma.c
CommitLineData
9eaa3d9b
AB
1/*
2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3 *
50835e97 4 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
9eaa3d9b
AB
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
50835e97
AB
17 * The full GNU General Public License is included in this distribution in the
18 * file called COPYING.
9eaa3d9b
AB
19 */
20
21#include <linux/io.h>
22#include <linux/errno.h>
23#include <linux/init.h>
24#include <linux/ioport.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/rio.h>
29#include <linux/rio_drv.h>
30#include <linux/dma-mapping.h>
31#include <linux/interrupt.h>
32#include <linux/kfifo.h>
33#include <linux/delay.h>
50835e97 34#include "../../dma/dmaengine.h"
9eaa3d9b
AB
35
36#include "tsi721.h"
37
50835e97
AB
38#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */
39
40#ifdef CONFIG_PCI_MSI
41static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
42#endif
43static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
44
45static unsigned int dma_desc_per_channel = 128;
46module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
47MODULE_PARM_DESC(dma_desc_per_channel,
48 "Number of DMA descriptors per channel (default: 128)");
49
9eaa3d9b
AB
50static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
51{
52 return container_of(chan, struct tsi721_bdma_chan, dchan);
53}
54
55static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
56{
57 return container_of(ddev, struct rio_mport, dma)->priv;
58}
59
60static inline
61struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
62{
63 return container_of(txd, struct tsi721_tx_desc, txd);
64}
65
66static inline
67struct tsi721_tx_desc *tsi721_dma_first_active(
68 struct tsi721_bdma_chan *bdma_chan)
69{
70 return list_first_entry(&bdma_chan->active_list,
71 struct tsi721_tx_desc, desc_node);
72}
73
50835e97 74static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
9eaa3d9b
AB
75{
76 struct tsi721_dma_desc *bd_ptr;
77 struct device *dev = bdma_chan->dchan.device->dev;
78 u64 *sts_ptr;
79 dma_addr_t bd_phys;
80 dma_addr_t sts_phys;
81 int sts_size;
50835e97
AB
82#ifdef CONFIG_PCI_MSI
83 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
84#endif
9eaa3d9b
AB
85
86 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
87
50835e97
AB
88 /*
89 * Allocate space for DMA descriptors
90 * (add an extra element for link descriptor)
91 */
9eaa3d9b 92 bd_ptr = dma_zalloc_coherent(dev,
50835e97 93 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
9eaa3d9b
AB
94 &bd_phys, GFP_KERNEL);
95 if (!bd_ptr)
96 return -ENOMEM;
97
50835e97 98 bdma_chan->bd_num = bd_num;
9eaa3d9b
AB
99 bdma_chan->bd_phys = bd_phys;
100 bdma_chan->bd_base = bd_ptr;
101
102 dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
103 bd_ptr, (unsigned long long)bd_phys);
104
105 /* Allocate space for descriptor status FIFO */
50835e97
AB
106 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
107 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
9eaa3d9b
AB
108 sts_size = roundup_pow_of_two(sts_size);
109 sts_ptr = dma_zalloc_coherent(dev,
110 sts_size * sizeof(struct tsi721_dma_sts),
111 &sts_phys, GFP_KERNEL);
112 if (!sts_ptr) {
113 /* Free space allocated for DMA descriptors */
114 dma_free_coherent(dev,
50835e97 115 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
9eaa3d9b
AB
116 bd_ptr, bd_phys);
117 bdma_chan->bd_base = NULL;
118 return -ENOMEM;
119 }
120
121 bdma_chan->sts_phys = sts_phys;
122 bdma_chan->sts_base = sts_ptr;
123 bdma_chan->sts_size = sts_size;
124
125 dev_dbg(dev,
126 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
127 sts_ptr, (unsigned long long)sts_phys, sts_size);
128
50835e97
AB
129 /* Initialize DMA descriptors ring using added link descriptor */
130 bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
131 bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
9eaa3d9b 132 TSI721_DMAC_DPTRL_MASK);
50835e97 133 bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
9eaa3d9b
AB
134
135 /* Setup DMA descriptor pointers */
136 iowrite32(((u64)bd_phys >> 32),
137 bdma_chan->regs + TSI721_DMAC_DPTRH);
138 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
139 bdma_chan->regs + TSI721_DMAC_DPTRL);
140
141 /* Setup descriptor status FIFO */
142 iowrite32(((u64)sts_phys >> 32),
143 bdma_chan->regs + TSI721_DMAC_DSBH);
144 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
145 bdma_chan->regs + TSI721_DMAC_DSBL);
146 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
147 bdma_chan->regs + TSI721_DMAC_DSSZ);
148
149 /* Clear interrupt bits */
150 iowrite32(TSI721_DMAC_INT_ALL,
151 bdma_chan->regs + TSI721_DMAC_INT);
152
153 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
154
50835e97
AB
155#ifdef CONFIG_PCI_MSI
156 /* Request interrupt service if we are in MSI-X mode */
157 if (priv->flags & TSI721_USING_MSIX) {
158 int rc, idx;
159
160 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
161
162 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
163 priv->msix[idx].irq_name, (void *)bdma_chan);
164
165 if (rc) {
166 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
167 bdma_chan->id);
168 goto err_out;
169 }
170
171 idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
172
173 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
174 priv->msix[idx].irq_name, (void *)bdma_chan);
175
176 if (rc) {
177 dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
178 bdma_chan->id);
179 free_irq(
180 priv->msix[TSI721_VECT_DMA0_DONE +
181 bdma_chan->id].vector,
182 (void *)bdma_chan);
183 }
184
185err_out:
186 if (rc) {
187 /* Free space allocated for DMA descriptors */
188 dma_free_coherent(dev,
189 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
190 bd_ptr, bd_phys);
191 bdma_chan->bd_base = NULL;
192
193 /* Free space allocated for status descriptors */
194 dma_free_coherent(dev,
195 sts_size * sizeof(struct tsi721_dma_sts),
196 sts_ptr, sts_phys);
197 bdma_chan->sts_base = NULL;
198
199 return -EIO;
200 }
201 }
202#endif /* CONFIG_PCI_MSI */
203
9eaa3d9b
AB
204 /* Toggle DMA channel initialization */
205 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
206 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
207 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
208 bdma_chan->sts_rdptr = 0;
209 udelay(10);
210
211 return 0;
212}
213
214static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
215{
216 u32 ch_stat;
50835e97
AB
217#ifdef CONFIG_PCI_MSI
218 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
219#endif
9eaa3d9b
AB
220
221 if (bdma_chan->bd_base == NULL)
222 return 0;
223
224 /* Check if DMA channel still running */
225 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
226 if (ch_stat & TSI721_DMAC_STS_RUN)
227 return -EFAULT;
228
229 /* Put DMA channel into init state */
230 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
231
50835e97
AB
232#ifdef CONFIG_PCI_MSI
233 if (priv->flags & TSI721_USING_MSIX) {
234 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
235 bdma_chan->id].vector, (void *)bdma_chan);
236 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
237 bdma_chan->id].vector, (void *)bdma_chan);
238 }
239#endif /* CONFIG_PCI_MSI */
240
9eaa3d9b
AB
241 /* Free space allocated for DMA descriptors */
242 dma_free_coherent(bdma_chan->dchan.device->dev,
50835e97 243 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
9eaa3d9b
AB
244 bdma_chan->bd_base, bdma_chan->bd_phys);
245 bdma_chan->bd_base = NULL;
246
247 /* Free space allocated for status FIFO */
248 dma_free_coherent(bdma_chan->dchan.device->dev,
249 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
250 bdma_chan->sts_base, bdma_chan->sts_phys);
251 bdma_chan->sts_base = NULL;
252 return 0;
253}
254
255static void
256tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
257{
258 if (enable) {
259 /* Clear pending BDMA channel interrupts */
260 iowrite32(TSI721_DMAC_INT_ALL,
261 bdma_chan->regs + TSI721_DMAC_INT);
262 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
263 /* Enable BDMA channel interrupts */
264 iowrite32(TSI721_DMAC_INT_ALL,
265 bdma_chan->regs + TSI721_DMAC_INTE);
266 } else {
267 /* Disable BDMA channel interrupts */
268 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
269 /* Clear pending BDMA channel interrupts */
270 iowrite32(TSI721_DMAC_INT_ALL,
271 bdma_chan->regs + TSI721_DMAC_INT);
272 }
273
274}
275
276static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
277{
278 u32 sts;
279
280 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
281 return ((sts & TSI721_DMAC_STS_RUN) == 0);
282}
283
284void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
285{
286 /* Disable BDMA channel interrupts */
287 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
04379dff
AB
288 if (bdma_chan->active)
289 tasklet_schedule(&bdma_chan->tasklet);
9eaa3d9b
AB
290}
291
292#ifdef CONFIG_PCI_MSI
293/**
294 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
295 * @irq: Linux interrupt number
296 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
297 *
298 * Handles BDMA channel interrupts signaled using MSI-X.
299 */
300static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
301{
302 struct tsi721_bdma_chan *bdma_chan = ptr;
303
304 tsi721_bdma_handler(bdma_chan);
305 return IRQ_HANDLED;
306}
307#endif /* CONFIG_PCI_MSI */
308
309/* Must be called with the spinlock held */
310static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
311{
312 if (!tsi721_dma_is_idle(bdma_chan)) {
313 dev_err(bdma_chan->dchan.device->dev,
314 "BUG: Attempt to start non-idle channel\n");
315 return;
316 }
317
318 if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
319 dev_err(bdma_chan->dchan.device->dev,
320 "BUG: Attempt to start DMA with no BDs ready\n");
321 return;
322 }
323
324 dev_dbg(bdma_chan->dchan.device->dev,
50835e97
AB
325 "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
326 bdma_chan->wr_count_next);
9eaa3d9b
AB
327
328 iowrite32(bdma_chan->wr_count_next,
329 bdma_chan->regs + TSI721_DMAC_DWRCNT);
330 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
331
332 bdma_chan->wr_count = bdma_chan->wr_count_next;
333}
334
9eaa3d9b 335static int
50835e97
AB
336tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
337 struct tsi721_dma_desc *bd_ptr,
338 struct scatterlist *sg, u32 sys_size)
9eaa3d9b 339{
9eaa3d9b
AB
340 u64 rio_addr;
341
50835e97
AB
342 if (bd_ptr == NULL)
343 return -EINVAL;
344
9eaa3d9b
AB
345 /* Initialize DMA descriptor */
346 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
50835e97 347 (desc->rtype << 19) | desc->destid);
9eaa3d9b 348 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
40f847ba 349 (sys_size << 26));
9eaa3d9b
AB
350 rio_addr = (desc->rio_addr >> 2) |
351 ((u64)(desc->rio_addr_u & 0x3) << 62);
352 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
353 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
354 bd_ptr->t1.bufptr_lo = cpu_to_le32(
355 (u64)sg_dma_address(sg) & 0xffffffff);
356 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
357 bd_ptr->t1.s_dist = 0;
358 bd_ptr->t1.s_size = 0;
359
360 return 0;
361}
362
40f847ba 363static int
50835e97 364tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
40f847ba 365{
50835e97
AB
366 if (bd_ptr == NULL)
367 return -EINVAL;
40f847ba
AB
368
369 /* Update DMA descriptor */
50835e97 370 if (interrupt)
40f847ba 371 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
50835e97 372 bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
40f847ba
AB
373
374 return 0;
375}
376
50835e97
AB
377static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
378 struct tsi721_tx_desc *desc)
9eaa3d9b
AB
379{
380 struct dma_async_tx_descriptor *txd = &desc->txd;
381 dma_async_tx_callback callback = txd->callback;
382 void *param = txd->callback_param;
383
9eaa3d9b 384 list_move(&desc->desc_node, &bdma_chan->free_list);
9eaa3d9b
AB
385
386 if (callback)
387 callback(param);
388}
389
9eaa3d9b
AB
390static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
391{
392 u32 srd_ptr;
393 u64 *sts_ptr;
394 int i, j;
395
396 /* Check and clear descriptor status FIFO entries */
397 srd_ptr = bdma_chan->sts_rdptr;
398 sts_ptr = bdma_chan->sts_base;
399 j = srd_ptr * 8;
400 while (sts_ptr[j]) {
401 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
402 sts_ptr[j] = 0;
403
404 ++srd_ptr;
405 srd_ptr %= bdma_chan->sts_size;
406 j = srd_ptr * 8;
407 }
408
409 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
410 bdma_chan->sts_rdptr = srd_ptr;
411}
412
50835e97
AB
413/* Must be called with the channel spinlock held */
414static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
415{
416 struct dma_chan *dchan = desc->txd.chan;
417 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
418 u32 sys_size;
419 u64 rio_addr;
420 dma_addr_t next_addr;
421 u32 bcount;
422 struct scatterlist *sg;
423 unsigned int i;
424 int err = 0;
425 struct tsi721_dma_desc *bd_ptr = NULL;
426 u32 idx, rd_idx;
427 u32 add_count = 0;
428
429 if (!tsi721_dma_is_idle(bdma_chan)) {
430 dev_err(bdma_chan->dchan.device->dev,
431 "BUG: Attempt to use non-idle channel\n");
432 return -EIO;
433 }
434
435 /*
436 * Fill DMA channel's hardware buffer descriptors.
437 * (NOTE: RapidIO destination address is limited to 64 bits for now)
438 */
439 rio_addr = desc->rio_addr;
440 next_addr = -1;
441 bcount = 0;
442 sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
443
444 rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
445 rd_idx %= (bdma_chan->bd_num + 1);
446
447 idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
448 if (idx == bdma_chan->bd_num) {
449 /* wrap around link descriptor */
450 idx = 0;
451 add_count++;
452 }
453
454 dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
455 __func__, rd_idx, idx);
456
457 for_each_sg(desc->sg, sg, desc->sg_len, i) {
458
459 dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
460 i, desc->sg_len,
461 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
462
463 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
464 dev_err(dchan->device->dev,
465 "%s: SG entry %d is too large\n", __func__, i);
466 err = -EINVAL;
467 break;
468 }
469
470 /*
471 * If this sg entry forms contiguous block with previous one,
472 * try to merge it into existing DMA descriptor
473 */
474 if (next_addr == sg_dma_address(sg) &&
475 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
476 /* Adjust byte count of the descriptor */
477 bcount += sg_dma_len(sg);
478 goto entry_done;
479 } else if (next_addr != -1) {
480 /* Finalize descriptor using total byte count value */
481 tsi721_desc_fill_end(bd_ptr, bcount, 0);
482 dev_dbg(dchan->device->dev,
483 "%s: prev desc final len: %d\n",
484 __func__, bcount);
485 }
486
487 desc->rio_addr = rio_addr;
488
489 if (i && idx == rd_idx) {
490 dev_dbg(dchan->device->dev,
491 "%s: HW descriptor ring is full @ %d\n",
492 __func__, i);
493 desc->sg = sg;
494 desc->sg_len -= i;
495 break;
496 }
497
498 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
499 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
500 if (err) {
501 dev_err(dchan->device->dev,
502 "Failed to build desc: err=%d\n", err);
503 break;
504 }
505
506 dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
507 bd_ptr, desc->destid, desc->rio_addr);
508
509 next_addr = sg_dma_address(sg);
510 bcount = sg_dma_len(sg);
511
512 add_count++;
513 if (++idx == bdma_chan->bd_num) {
514 /* wrap around link descriptor */
515 idx = 0;
516 add_count++;
517 }
518
519entry_done:
520 if (sg_is_last(sg)) {
521 tsi721_desc_fill_end(bd_ptr, bcount, 0);
522 dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
523 __func__, bcount);
524 desc->sg_len = 0;
525 } else {
526 rio_addr += sg_dma_len(sg);
527 next_addr += sg_dma_len(sg);
528 }
529 }
530
531 if (!err)
532 bdma_chan->wr_count_next += add_count;
533
534 return err;
535}
536
9eaa3d9b
AB
537static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
538{
50835e97
AB
539 struct tsi721_tx_desc *desc;
540 int err;
541
542 dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
543
544 /*
545 * If there are any new transactions in the queue add them
546 * into the processing list
547 */
548 if (!list_empty(&bdma_chan->queue))
549 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
550
551 /* Start new transaction (if available) */
552 if (!list_empty(&bdma_chan->active_list)) {
553 desc = tsi721_dma_first_active(bdma_chan);
554 err = tsi721_submit_sg(desc);
555 if (!err)
556 tsi721_start_dma(bdma_chan);
557 else {
558 tsi721_dma_tx_err(bdma_chan, desc);
559 dev_dbg(bdma_chan->dchan.device->dev,
560 "ERR: tsi721_submit_sg failed with err=%d\n",
561 err);
562 }
9eaa3d9b 563 }
50835e97
AB
564
565 dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
9eaa3d9b
AB
566}
567
568static void tsi721_dma_tasklet(unsigned long data)
569{
570 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
571 u32 dmac_int, dmac_sts;
572
573 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
574 dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
575 __func__, bdma_chan->id, dmac_int);
576 /* Clear channel interrupts */
577 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
578
579 if (dmac_int & TSI721_DMAC_INT_ERR) {
580 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
581 dev_err(bdma_chan->dchan.device->dev,
582 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
583 __func__, bdma_chan->id, dmac_sts);
584 }
585
586 if (dmac_int & TSI721_DMAC_INT_STFULL) {
587 dev_err(bdma_chan->dchan.device->dev,
588 "%s: DMAC%d descriptor status FIFO is full\n",
589 __func__, bdma_chan->id);
590 }
591
592 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
50835e97
AB
593 struct tsi721_tx_desc *desc;
594
9eaa3d9b
AB
595 tsi721_clr_stat(bdma_chan);
596 spin_lock(&bdma_chan->lock);
50835e97
AB
597 desc = tsi721_dma_first_active(bdma_chan);
598
599 if (desc->sg_len == 0) {
600 dma_async_tx_callback callback = NULL;
601 void *param = NULL;
602
603 desc->status = DMA_COMPLETE;
604 dma_cookie_complete(&desc->txd);
605 if (desc->txd.flags & DMA_PREP_INTERRUPT) {
606 callback = desc->txd.callback;
607 param = desc->txd.callback_param;
608 }
609 list_move(&desc->desc_node, &bdma_chan->free_list);
610 spin_unlock(&bdma_chan->lock);
611 if (callback)
612 callback(param);
613 spin_lock(&bdma_chan->lock);
614 }
615
9eaa3d9b
AB
616 tsi721_advance_work(bdma_chan);
617 spin_unlock(&bdma_chan->lock);
618 }
619
620 /* Re-Enable BDMA channel interrupts */
621 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
622}
623
624static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
625{
626 struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
627 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
628 dma_cookie_t cookie;
629
50835e97
AB
630 /* Check if the descriptor is detached from any lists */
631 if (!list_empty(&desc->desc_node)) {
632 dev_err(bdma_chan->dchan.device->dev,
633 "%s: wrong state of descriptor %p\n", __func__, txd);
634 return -EIO;
635 }
9eaa3d9b 636
50835e97 637 spin_lock_bh(&bdma_chan->lock);
9eaa3d9b 638
50835e97
AB
639 if (!bdma_chan->active) {
640 spin_unlock_bh(&bdma_chan->lock);
641 return -ENODEV;
9eaa3d9b
AB
642 }
643
50835e97
AB
644 cookie = dma_cookie_assign(txd);
645 desc->status = DMA_IN_PROGRESS;
646 list_add_tail(&desc->desc_node, &bdma_chan->queue);
647
9eaa3d9b
AB
648 spin_unlock_bh(&bdma_chan->lock);
649 return cookie;
650}
651
652static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
653{
654 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
9eaa3d9b 655 struct tsi721_tx_desc *desc = NULL;
9eaa3d9b 656 int i;
50835e97
AB
657
658 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
659 __func__, bdma_chan->id);
9eaa3d9b
AB
660
661 if (bdma_chan->bd_base)
50835e97 662 return TSI721_DMA_TX_QUEUE_SZ;
9eaa3d9b
AB
663
664 /* Initialize BDMA channel */
50835e97 665 if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
9eaa3d9b
AB
666 dev_err(dchan->device->dev, "Unable to initialize data DMA"
667 " channel %d, aborting\n", bdma_chan->id);
50835e97 668 return -ENODEV;
9eaa3d9b
AB
669 }
670
50835e97
AB
671 /* Allocate queue of transaction descriptors */
672 desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
9eaa3d9b
AB
673 GFP_KERNEL);
674 if (!desc) {
675 dev_err(dchan->device->dev,
676 "Failed to allocate logical descriptors\n");
50835e97
AB
677 tsi721_bdma_ch_free(bdma_chan);
678 return -ENOMEM;
9eaa3d9b
AB
679 }
680
681 bdma_chan->tx_desc = desc;
682
50835e97 683 for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
9eaa3d9b
AB
684 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
685 desc[i].txd.tx_submit = tsi721_tx_submit;
686 desc[i].txd.flags = DMA_CTRL_ACK;
50835e97 687 list_add(&desc[i].desc_node, &bdma_chan->free_list);
9eaa3d9b
AB
688 }
689
50835e97 690 dma_cookie_init(dchan);
9eaa3d9b 691
04379dff 692 bdma_chan->active = true;
9eaa3d9b
AB
693 tsi721_bdma_interrupt_enable(bdma_chan, 1);
694
50835e97 695 return TSI721_DMA_TX_QUEUE_SZ;
9eaa3d9b
AB
696}
697
50835e97 698static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
9eaa3d9b 699{
50835e97 700 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
04379dff
AB
701
702#ifdef CONFIG_PCI_MSI
703 if (priv->flags & TSI721_USING_MSIX) {
704 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
705 bdma_chan->id].vector);
706 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
707 bdma_chan->id].vector);
708 } else
709#endif
710 synchronize_irq(priv->pdev->irq);
50835e97 711}
04379dff 712
50835e97
AB
713static void tsi721_free_chan_resources(struct dma_chan *dchan)
714{
715 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
9eaa3d9b 716
50835e97
AB
717 dev_dbg(dchan->device->dev, "%s: for channel %d\n",
718 __func__, bdma_chan->id);
9eaa3d9b 719
50835e97
AB
720 if (bdma_chan->bd_base == NULL)
721 return;
9eaa3d9b 722
50835e97
AB
723 BUG_ON(!list_empty(&bdma_chan->active_list));
724 BUG_ON(!list_empty(&bdma_chan->queue));
725
726 tsi721_bdma_interrupt_enable(bdma_chan, 0);
727 bdma_chan->active = false;
728 tsi721_sync_dma_irq(bdma_chan);
729 tasklet_kill(&bdma_chan->tasklet);
730 INIT_LIST_HEAD(&bdma_chan->free_list);
9eaa3d9b 731 kfree(bdma_chan->tx_desc);
50835e97 732 tsi721_bdma_ch_free(bdma_chan);
9eaa3d9b
AB
733}
734
735static
736enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
737 struct dma_tx_state *txstate)
738{
50835e97 739 return dma_cookie_status(dchan, cookie, txstate);
9eaa3d9b
AB
740}
741
742static void tsi721_issue_pending(struct dma_chan *dchan)
743{
744 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
745
50835e97 746 dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
9eaa3d9b 747
50835e97 748 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
9eaa3d9b
AB
749 spin_lock_bh(&bdma_chan->lock);
750 tsi721_advance_work(bdma_chan);
751 spin_unlock_bh(&bdma_chan->lock);
50835e97 752 }
9eaa3d9b
AB
753}
754
755static
756struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
757 struct scatterlist *sgl, unsigned int sg_len,
758 enum dma_transfer_direction dir, unsigned long flags,
759 void *tinfo)
760{
761 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
50835e97 762 struct tsi721_tx_desc *desc, *_d;
9eaa3d9b 763 struct rio_dma_ext *rext = tinfo;
9eaa3d9b 764 enum dma_rtype rtype;
50835e97 765 struct dma_async_tx_descriptor *txd = NULL;
9eaa3d9b
AB
766
767 if (!sgl || !sg_len) {
768 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
769 return NULL;
770 }
771
50835e97
AB
772 dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
773 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
774
9eaa3d9b
AB
775 if (dir == DMA_DEV_TO_MEM)
776 rtype = NREAD;
777 else if (dir == DMA_MEM_TO_DEV) {
778 switch (rext->wr_type) {
779 case RDW_ALL_NWRITE:
780 rtype = ALL_NWRITE;
781 break;
782 case RDW_ALL_NWRITE_R:
783 rtype = ALL_NWRITE_R;
784 break;
785 case RDW_LAST_NWRITE_R:
786 default:
787 rtype = LAST_NWRITE_R;
788 break;
789 }
790 } else {
791 dev_err(dchan->device->dev,
792 "%s: Unsupported DMA direction option\n", __func__);
793 return NULL;
794 }
795
50835e97 796 spin_lock_bh(&bdma_chan->lock);
40f847ba 797
50835e97
AB
798 list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
799 if (async_tx_test_ack(&desc->txd)) {
800 list_del_init(&desc->desc_node);
801 desc->destid = rext->destid;
802 desc->rio_addr = rext->rio_addr;
803 desc->rio_addr_u = 0;
804 desc->rtype = rtype;
805 desc->sg_len = sg_len;
806 desc->sg = sgl;
807 txd = &desc->txd;
808 txd->flags = flags;
809 break;
40f847ba 810 }
9eaa3d9b
AB
811 }
812
50835e97 813 spin_unlock_bh(&bdma_chan->lock);
9eaa3d9b 814
50835e97 815 return txd;
9eaa3d9b
AB
816}
817
7664cfe0 818static int tsi721_terminate_all(struct dma_chan *dchan)
9eaa3d9b
AB
819{
820 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
821 struct tsi721_tx_desc *desc, *_d;
50835e97 822 u32 dmac_int;
9eaa3d9b
AB
823 LIST_HEAD(list);
824
825 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
826
9eaa3d9b
AB
827 spin_lock_bh(&bdma_chan->lock);
828
50835e97
AB
829 bdma_chan->active = false;
830
831 if (!tsi721_dma_is_idle(bdma_chan)) {
832 /* make sure to stop the transfer */
833 iowrite32(TSI721_DMAC_CTL_SUSP,
834 bdma_chan->regs + TSI721_DMAC_CTL);
835
836 /* Wait until DMA channel stops */
837 do {
838 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
839 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
840 }
9eaa3d9b
AB
841
842 list_splice_init(&bdma_chan->active_list, &list);
843 list_splice_init(&bdma_chan->queue, &list);
844
845 list_for_each_entry_safe(desc, _d, &list, desc_node)
50835e97 846 tsi721_dma_tx_err(bdma_chan, desc);
9eaa3d9b
AB
847
848 spin_unlock_bh(&bdma_chan->lock);
849
850 return 0;
851}
852
305c891e 853int tsi721_register_dma(struct tsi721_device *priv)
9eaa3d9b
AB
854{
855 int i;
50835e97 856 int nr_channels = 0;
9eaa3d9b
AB
857 int err;
858 struct rio_mport *mport = priv->mport;
859
9eaa3d9b
AB
860 INIT_LIST_HEAD(&mport->dma.channels);
861
50835e97 862 for (i = 0; i < TSI721_DMA_MAXCH; i++) {
9eaa3d9b
AB
863 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
864
865 if (i == TSI721_DMACH_MAINT)
866 continue;
867
9eaa3d9b
AB
868 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
869
870 bdma_chan->dchan.device = &mport->dma;
871 bdma_chan->dchan.cookie = 1;
872 bdma_chan->dchan.chan_id = i;
873 bdma_chan->id = i;
04379dff 874 bdma_chan->active = false;
9eaa3d9b
AB
875
876 spin_lock_init(&bdma_chan->lock);
877
878 INIT_LIST_HEAD(&bdma_chan->active_list);
879 INIT_LIST_HEAD(&bdma_chan->queue);
880 INIT_LIST_HEAD(&bdma_chan->free_list);
881
882 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
883 (unsigned long)bdma_chan);
9eaa3d9b
AB
884 list_add_tail(&bdma_chan->dchan.device_node,
885 &mport->dma.channels);
50835e97 886 nr_channels++;
9eaa3d9b
AB
887 }
888
50835e97 889 mport->dma.chancnt = nr_channels;
9eaa3d9b
AB
890 dma_cap_zero(mport->dma.cap_mask);
891 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
892 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
893
50835e97 894 mport->dma.dev = &priv->pdev->dev;
9eaa3d9b
AB
895 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
896 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
897 mport->dma.device_tx_status = tsi721_tx_status;
898 mport->dma.device_issue_pending = tsi721_issue_pending;
899 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
7664cfe0 900 mport->dma.device_terminate_all = tsi721_terminate_all;
9eaa3d9b
AB
901
902 err = dma_async_device_register(&mport->dma);
903 if (err)
904 dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
905
906 return err;
907}