]>
Commit | Line | Data |
---|---|---|
9eaa3d9b AB |
1 | /* |
2 | * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge | |
3 | * | |
50835e97 | 4 | * Copyright (c) 2011-2014 Integrated Device Technology, Inc. |
9eaa3d9b AB |
5 | * Alexandre Bounine <alexandre.bounine@idt.com> |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify it | |
8 | * under the terms of the GNU General Public License as published by the Free | |
9 | * Software Foundation; either version 2 of the License, or (at your option) | |
10 | * any later version. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
13 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
15 | * more details. | |
16 | * | |
50835e97 AB |
17 | * The full GNU General Public License is included in this distribution in the |
18 | * file called COPYING. | |
9eaa3d9b AB |
19 | */ |
20 | ||
21 | #include <linux/io.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/init.h> | |
24 | #include <linux/ioport.h> | |
25 | #include <linux/kernel.h> | |
26 | #include <linux/module.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/rio.h> | |
29 | #include <linux/rio_drv.h> | |
30 | #include <linux/dma-mapping.h> | |
31 | #include <linux/interrupt.h> | |
32 | #include <linux/kfifo.h> | |
72d8a0d2 | 33 | #include <linux/sched.h> |
9eaa3d9b | 34 | #include <linux/delay.h> |
50835e97 | 35 | #include "../../dma/dmaengine.h" |
9eaa3d9b AB |
36 | |
37 | #include "tsi721.h" | |
38 | ||
50835e97 AB |
39 | #ifdef CONFIG_PCI_MSI |
40 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); | |
41 | #endif | |
42 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc); | |
43 | ||
44 | static unsigned int dma_desc_per_channel = 128; | |
4498c31a | 45 | module_param(dma_desc_per_channel, uint, S_IRUGO); |
50835e97 AB |
46 | MODULE_PARM_DESC(dma_desc_per_channel, |
47 | "Number of DMA descriptors per channel (default: 128)"); | |
48 | ||
4498c31a AB |
49 | static unsigned int dma_txqueue_sz = 16; |
50 | module_param(dma_txqueue_sz, uint, S_IRUGO); | |
51 | MODULE_PARM_DESC(dma_txqueue_sz, | |
52 | "DMA Transactions Queue Size (default: 16)"); | |
53 | ||
54 | static u8 dma_sel = 0x7f; | |
55 | module_param(dma_sel, byte, S_IRUGO); | |
56 | MODULE_PARM_DESC(dma_sel, | |
57 | "DMA Channel Selection Mask (default: 0x7f = all)"); | |
58 | ||
9eaa3d9b AB |
59 | static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) |
60 | { | |
61 | return container_of(chan, struct tsi721_bdma_chan, dchan); | |
62 | } | |
63 | ||
64 | static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) | |
65 | { | |
66 | return container_of(ddev, struct rio_mport, dma)->priv; | |
67 | } | |
68 | ||
69 | static inline | |
70 | struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) | |
71 | { | |
72 | return container_of(txd, struct tsi721_tx_desc, txd); | |
73 | } | |
74 | ||
50835e97 | 75 | static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) |
9eaa3d9b AB |
76 | { |
77 | struct tsi721_dma_desc *bd_ptr; | |
78 | struct device *dev = bdma_chan->dchan.device->dev; | |
79 | u64 *sts_ptr; | |
80 | dma_addr_t bd_phys; | |
81 | dma_addr_t sts_phys; | |
82 | int sts_size; | |
50835e97 AB |
83 | #ifdef CONFIG_PCI_MSI |
84 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
85 | #endif | |
9eaa3d9b | 86 | |
72d8a0d2 | 87 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 88 | |
50835e97 AB |
89 | /* |
90 | * Allocate space for DMA descriptors | |
91 | * (add an extra element for link descriptor) | |
92 | */ | |
9eaa3d9b | 93 | bd_ptr = dma_zalloc_coherent(dev, |
50835e97 | 94 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
e680b672 | 95 | &bd_phys, GFP_ATOMIC); |
9eaa3d9b AB |
96 | if (!bd_ptr) |
97 | return -ENOMEM; | |
98 | ||
50835e97 | 99 | bdma_chan->bd_num = bd_num; |
9eaa3d9b AB |
100 | bdma_chan->bd_phys = bd_phys; |
101 | bdma_chan->bd_base = bd_ptr; | |
102 | ||
72d8a0d2 AB |
103 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
104 | "DMAC%d descriptors @ %p (phys = %pad)", | |
105 | bdma_chan->id, bd_ptr, &bd_phys); | |
9eaa3d9b AB |
106 | |
107 | /* Allocate space for descriptor status FIFO */ | |
50835e97 AB |
108 | sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? |
109 | (bd_num + 1) : TSI721_DMA_MINSTSSZ; | |
9eaa3d9b AB |
110 | sts_size = roundup_pow_of_two(sts_size); |
111 | sts_ptr = dma_zalloc_coherent(dev, | |
112 | sts_size * sizeof(struct tsi721_dma_sts), | |
e680b672 | 113 | &sts_phys, GFP_ATOMIC); |
9eaa3d9b AB |
114 | if (!sts_ptr) { |
115 | /* Free space allocated for DMA descriptors */ | |
116 | dma_free_coherent(dev, | |
50835e97 | 117 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
118 | bd_ptr, bd_phys); |
119 | bdma_chan->bd_base = NULL; | |
120 | return -ENOMEM; | |
121 | } | |
122 | ||
123 | bdma_chan->sts_phys = sts_phys; | |
124 | bdma_chan->sts_base = sts_ptr; | |
125 | bdma_chan->sts_size = sts_size; | |
126 | ||
72d8a0d2 AB |
127 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
128 | "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x", | |
129 | bdma_chan->id, sts_ptr, &sts_phys, sts_size); | |
9eaa3d9b | 130 | |
50835e97 AB |
131 | /* Initialize DMA descriptors ring using added link descriptor */ |
132 | bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); | |
133 | bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & | |
9eaa3d9b | 134 | TSI721_DMAC_DPTRL_MASK); |
50835e97 | 135 | bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); |
9eaa3d9b AB |
136 | |
137 | /* Setup DMA descriptor pointers */ | |
138 | iowrite32(((u64)bd_phys >> 32), | |
139 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
140 | iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), | |
141 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
142 | ||
143 | /* Setup descriptor status FIFO */ | |
144 | iowrite32(((u64)sts_phys >> 32), | |
145 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
146 | iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), | |
147 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
148 | iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), | |
149 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
150 | ||
151 | /* Clear interrupt bits */ | |
152 | iowrite32(TSI721_DMAC_INT_ALL, | |
153 | bdma_chan->regs + TSI721_DMAC_INT); | |
154 | ||
155 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
156 | ||
50835e97 AB |
157 | #ifdef CONFIG_PCI_MSI |
158 | /* Request interrupt service if we are in MSI-X mode */ | |
159 | if (priv->flags & TSI721_USING_MSIX) { | |
160 | int rc, idx; | |
161 | ||
162 | idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; | |
163 | ||
164 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
165 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
166 | ||
167 | if (rc) { | |
72d8a0d2 AB |
168 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
169 | "Unable to get MSI-X for DMAC%d-DONE", | |
170 | bdma_chan->id); | |
50835e97 AB |
171 | goto err_out; |
172 | } | |
173 | ||
174 | idx = TSI721_VECT_DMA0_INT + bdma_chan->id; | |
175 | ||
176 | rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, | |
177 | priv->msix[idx].irq_name, (void *)bdma_chan); | |
178 | ||
179 | if (rc) { | |
72d8a0d2 AB |
180 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
181 | "Unable to get MSI-X for DMAC%d-INT", | |
182 | bdma_chan->id); | |
50835e97 AB |
183 | free_irq( |
184 | priv->msix[TSI721_VECT_DMA0_DONE + | |
185 | bdma_chan->id].vector, | |
186 | (void *)bdma_chan); | |
187 | } | |
188 | ||
189 | err_out: | |
190 | if (rc) { | |
191 | /* Free space allocated for DMA descriptors */ | |
192 | dma_free_coherent(dev, | |
193 | (bd_num + 1) * sizeof(struct tsi721_dma_desc), | |
194 | bd_ptr, bd_phys); | |
195 | bdma_chan->bd_base = NULL; | |
196 | ||
197 | /* Free space allocated for status descriptors */ | |
198 | dma_free_coherent(dev, | |
199 | sts_size * sizeof(struct tsi721_dma_sts), | |
200 | sts_ptr, sts_phys); | |
201 | bdma_chan->sts_base = NULL; | |
202 | ||
203 | return -EIO; | |
204 | } | |
205 | } | |
206 | #endif /* CONFIG_PCI_MSI */ | |
207 | ||
9eaa3d9b AB |
208 | /* Toggle DMA channel initialization */ |
209 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
210 | ioread32(bdma_chan->regs + TSI721_DMAC_CTL); | |
211 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
212 | bdma_chan->sts_rdptr = 0; | |
213 | udelay(10); | |
214 | ||
215 | return 0; | |
216 | } | |
217 | ||
218 | static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) | |
219 | { | |
220 | u32 ch_stat; | |
50835e97 AB |
221 | #ifdef CONFIG_PCI_MSI |
222 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); | |
223 | #endif | |
9eaa3d9b AB |
224 | |
225 | if (bdma_chan->bd_base == NULL) | |
226 | return 0; | |
227 | ||
228 | /* Check if DMA channel still running */ | |
229 | ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
230 | if (ch_stat & TSI721_DMAC_STS_RUN) | |
231 | return -EFAULT; | |
232 | ||
233 | /* Put DMA channel into init state */ | |
234 | iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); | |
235 | ||
50835e97 AB |
236 | #ifdef CONFIG_PCI_MSI |
237 | if (priv->flags & TSI721_USING_MSIX) { | |
238 | free_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
239 | bdma_chan->id].vector, (void *)bdma_chan); | |
240 | free_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
241 | bdma_chan->id].vector, (void *)bdma_chan); | |
242 | } | |
243 | #endif /* CONFIG_PCI_MSI */ | |
244 | ||
9eaa3d9b AB |
245 | /* Free space allocated for DMA descriptors */ |
246 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
50835e97 | 247 | (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), |
9eaa3d9b AB |
248 | bdma_chan->bd_base, bdma_chan->bd_phys); |
249 | bdma_chan->bd_base = NULL; | |
250 | ||
251 | /* Free space allocated for status FIFO */ | |
252 | dma_free_coherent(bdma_chan->dchan.device->dev, | |
253 | bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), | |
254 | bdma_chan->sts_base, bdma_chan->sts_phys); | |
255 | bdma_chan->sts_base = NULL; | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static void | |
260 | tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) | |
261 | { | |
262 | if (enable) { | |
263 | /* Clear pending BDMA channel interrupts */ | |
264 | iowrite32(TSI721_DMAC_INT_ALL, | |
265 | bdma_chan->regs + TSI721_DMAC_INT); | |
266 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
267 | /* Enable BDMA channel interrupts */ | |
268 | iowrite32(TSI721_DMAC_INT_ALL, | |
269 | bdma_chan->regs + TSI721_DMAC_INTE); | |
270 | } else { | |
271 | /* Disable BDMA channel interrupts */ | |
272 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
273 | /* Clear pending BDMA channel interrupts */ | |
274 | iowrite32(TSI721_DMAC_INT_ALL, | |
275 | bdma_chan->regs + TSI721_DMAC_INT); | |
276 | } | |
277 | ||
278 | } | |
279 | ||
280 | static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) | |
281 | { | |
282 | u32 sts; | |
283 | ||
284 | sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
285 | return ((sts & TSI721_DMAC_STS_RUN) == 0); | |
286 | } | |
287 | ||
288 | void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) | |
289 | { | |
290 | /* Disable BDMA channel interrupts */ | |
291 | iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); | |
04379dff | 292 | if (bdma_chan->active) |
458bdf6e | 293 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
294 | } |
295 | ||
296 | #ifdef CONFIG_PCI_MSI | |
297 | /** | |
298 | * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels | |
299 | * @irq: Linux interrupt number | |
300 | * @ptr: Pointer to interrupt-specific data (BDMA channel structure) | |
301 | * | |
302 | * Handles BDMA channel interrupts signaled using MSI-X. | |
303 | */ | |
304 | static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) | |
305 | { | |
306 | struct tsi721_bdma_chan *bdma_chan = ptr; | |
307 | ||
e680b672 | 308 | if (bdma_chan->active) |
458bdf6e | 309 | tasklet_hi_schedule(&bdma_chan->tasklet); |
9eaa3d9b AB |
310 | return IRQ_HANDLED; |
311 | } | |
312 | #endif /* CONFIG_PCI_MSI */ | |
313 | ||
314 | /* Must be called with the spinlock held */ | |
315 | static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) | |
316 | { | |
317 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
318 | tsi_err(&bdma_chan->dchan.dev->device, |
319 | "DMAC%d Attempt to start non-idle channel", | |
320 | bdma_chan->id); | |
9eaa3d9b AB |
321 | return; |
322 | } | |
323 | ||
324 | if (bdma_chan->wr_count == bdma_chan->wr_count_next) { | |
72d8a0d2 AB |
325 | tsi_err(&bdma_chan->dchan.dev->device, |
326 | "DMAC%d Attempt to start DMA with no BDs ready %d", | |
327 | bdma_chan->id, task_pid_nr(current)); | |
9eaa3d9b AB |
328 | return; |
329 | } | |
330 | ||
72d8a0d2 AB |
331 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d", |
332 | bdma_chan->id, bdma_chan->wr_count_next, | |
333 | task_pid_nr(current)); | |
9eaa3d9b AB |
334 | |
335 | iowrite32(bdma_chan->wr_count_next, | |
336 | bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
337 | ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); | |
338 | ||
339 | bdma_chan->wr_count = bdma_chan->wr_count_next; | |
340 | } | |
341 | ||
9eaa3d9b | 342 | static int |
50835e97 AB |
343 | tsi721_desc_fill_init(struct tsi721_tx_desc *desc, |
344 | struct tsi721_dma_desc *bd_ptr, | |
345 | struct scatterlist *sg, u32 sys_size) | |
9eaa3d9b | 346 | { |
9eaa3d9b AB |
347 | u64 rio_addr; |
348 | ||
50835e97 AB |
349 | if (bd_ptr == NULL) |
350 | return -EINVAL; | |
351 | ||
9eaa3d9b AB |
352 | /* Initialize DMA descriptor */ |
353 | bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | | |
50835e97 | 354 | (desc->rtype << 19) | desc->destid); |
9eaa3d9b | 355 | bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | |
40f847ba | 356 | (sys_size << 26)); |
9eaa3d9b AB |
357 | rio_addr = (desc->rio_addr >> 2) | |
358 | ((u64)(desc->rio_addr_u & 0x3) << 62); | |
359 | bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); | |
360 | bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); | |
361 | bd_ptr->t1.bufptr_lo = cpu_to_le32( | |
362 | (u64)sg_dma_address(sg) & 0xffffffff); | |
363 | bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); | |
364 | bd_ptr->t1.s_dist = 0; | |
365 | bd_ptr->t1.s_size = 0; | |
366 | ||
367 | return 0; | |
368 | } | |
369 | ||
40f847ba | 370 | static int |
50835e97 | 371 | tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) |
40f847ba | 372 | { |
50835e97 AB |
373 | if (bd_ptr == NULL) |
374 | return -EINVAL; | |
40f847ba AB |
375 | |
376 | /* Update DMA descriptor */ | |
50835e97 | 377 | if (interrupt) |
40f847ba | 378 | bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); |
50835e97 | 379 | bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); |
40f847ba AB |
380 | |
381 | return 0; | |
382 | } | |
383 | ||
50835e97 AB |
384 | static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, |
385 | struct tsi721_tx_desc *desc) | |
9eaa3d9b AB |
386 | { |
387 | struct dma_async_tx_descriptor *txd = &desc->txd; | |
388 | dma_async_tx_callback callback = txd->callback; | |
389 | void *param = txd->callback_param; | |
390 | ||
9eaa3d9b | 391 | list_move(&desc->desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
392 | |
393 | if (callback) | |
394 | callback(param); | |
395 | } | |
396 | ||
9eaa3d9b AB |
397 | static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) |
398 | { | |
399 | u32 srd_ptr; | |
400 | u64 *sts_ptr; | |
401 | int i, j; | |
402 | ||
403 | /* Check and clear descriptor status FIFO entries */ | |
404 | srd_ptr = bdma_chan->sts_rdptr; | |
405 | sts_ptr = bdma_chan->sts_base; | |
406 | j = srd_ptr * 8; | |
407 | while (sts_ptr[j]) { | |
408 | for (i = 0; i < 8 && sts_ptr[j]; i++, j++) | |
409 | sts_ptr[j] = 0; | |
410 | ||
411 | ++srd_ptr; | |
412 | srd_ptr %= bdma_chan->sts_size; | |
413 | j = srd_ptr * 8; | |
414 | } | |
415 | ||
416 | iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); | |
417 | bdma_chan->sts_rdptr = srd_ptr; | |
418 | } | |
419 | ||
50835e97 AB |
420 | /* Must be called with the channel spinlock held */ |
421 | static int tsi721_submit_sg(struct tsi721_tx_desc *desc) | |
422 | { | |
423 | struct dma_chan *dchan = desc->txd.chan; | |
424 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
425 | u32 sys_size; | |
426 | u64 rio_addr; | |
427 | dma_addr_t next_addr; | |
428 | u32 bcount; | |
429 | struct scatterlist *sg; | |
430 | unsigned int i; | |
431 | int err = 0; | |
432 | struct tsi721_dma_desc *bd_ptr = NULL; | |
433 | u32 idx, rd_idx; | |
434 | u32 add_count = 0; | |
72d8a0d2 | 435 | struct device *ch_dev = &dchan->dev->device; |
50835e97 AB |
436 | |
437 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
72d8a0d2 AB |
438 | tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel", |
439 | bdma_chan->id); | |
50835e97 AB |
440 | return -EIO; |
441 | } | |
442 | ||
443 | /* | |
444 | * Fill DMA channel's hardware buffer descriptors. | |
445 | * (NOTE: RapidIO destination address is limited to 64 bits for now) | |
446 | */ | |
447 | rio_addr = desc->rio_addr; | |
448 | next_addr = -1; | |
449 | bcount = 0; | |
72d8a0d2 | 450 | sys_size = dma_to_mport(dchan->device)->sys_size; |
50835e97 AB |
451 | |
452 | rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); | |
453 | rd_idx %= (bdma_chan->bd_num + 1); | |
454 | ||
455 | idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); | |
456 | if (idx == bdma_chan->bd_num) { | |
457 | /* wrap around link descriptor */ | |
458 | idx = 0; | |
459 | add_count++; | |
460 | } | |
461 | ||
72d8a0d2 AB |
462 | tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d", |
463 | bdma_chan->id, rd_idx, idx); | |
50835e97 AB |
464 | |
465 | for_each_sg(desc->sg, sg, desc->sg_len, i) { | |
466 | ||
72d8a0d2 AB |
467 | tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d", |
468 | bdma_chan->id, i, desc->sg_len, | |
50835e97 AB |
469 | (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); |
470 | ||
471 | if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { | |
72d8a0d2 AB |
472 | tsi_err(ch_dev, "DMAC%d SG entry %d is too large", |
473 | bdma_chan->id, i); | |
50835e97 AB |
474 | err = -EINVAL; |
475 | break; | |
476 | } | |
477 | ||
478 | /* | |
479 | * If this sg entry forms contiguous block with previous one, | |
480 | * try to merge it into existing DMA descriptor | |
481 | */ | |
482 | if (next_addr == sg_dma_address(sg) && | |
483 | bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { | |
484 | /* Adjust byte count of the descriptor */ | |
485 | bcount += sg_dma_len(sg); | |
486 | goto entry_done; | |
487 | } else if (next_addr != -1) { | |
488 | /* Finalize descriptor using total byte count value */ | |
489 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
490 | tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d", |
491 | bdma_chan->id, bcount); | |
50835e97 AB |
492 | } |
493 | ||
494 | desc->rio_addr = rio_addr; | |
495 | ||
496 | if (i && idx == rd_idx) { | |
72d8a0d2 AB |
497 | tsi_debug(DMAV, ch_dev, |
498 | "DMAC%d HW descriptor ring is full @ %d", | |
499 | bdma_chan->id, i); | |
50835e97 AB |
500 | desc->sg = sg; |
501 | desc->sg_len -= i; | |
502 | break; | |
503 | } | |
504 | ||
505 | bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; | |
506 | err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); | |
507 | if (err) { | |
72d8a0d2 | 508 | tsi_err(ch_dev, "Failed to build desc: err=%d", err); |
50835e97 AB |
509 | break; |
510 | } | |
511 | ||
72d8a0d2 AB |
512 | tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx", |
513 | bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr); | |
50835e97 AB |
514 | |
515 | next_addr = sg_dma_address(sg); | |
516 | bcount = sg_dma_len(sg); | |
517 | ||
518 | add_count++; | |
519 | if (++idx == bdma_chan->bd_num) { | |
520 | /* wrap around link descriptor */ | |
521 | idx = 0; | |
522 | add_count++; | |
523 | } | |
524 | ||
525 | entry_done: | |
526 | if (sg_is_last(sg)) { | |
527 | tsi721_desc_fill_end(bd_ptr, bcount, 0); | |
72d8a0d2 AB |
528 | tsi_debug(DMAV, ch_dev, |
529 | "DMAC%d last desc final len: %d", | |
530 | bdma_chan->id, bcount); | |
50835e97 AB |
531 | desc->sg_len = 0; |
532 | } else { | |
533 | rio_addr += sg_dma_len(sg); | |
534 | next_addr += sg_dma_len(sg); | |
535 | } | |
536 | } | |
537 | ||
538 | if (!err) | |
539 | bdma_chan->wr_count_next += add_count; | |
540 | ||
541 | return err; | |
542 | } | |
543 | ||
d2a321f3 AB |
544 | static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan, |
545 | struct tsi721_tx_desc *desc) | |
9eaa3d9b | 546 | { |
50835e97 AB |
547 | int err; |
548 | ||
72d8a0d2 | 549 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id); |
50835e97 | 550 | |
d2a321f3 AB |
551 | if (!tsi721_dma_is_idle(bdma_chan)) |
552 | return; | |
553 | ||
50835e97 | 554 | /* |
d2a321f3 AB |
555 | * If there is no data transfer in progress, fetch new descriptor from |
556 | * the pending queue. | |
557 | */ | |
558 | ||
559 | if (desc == NULL && bdma_chan->active_tx == NULL && | |
560 | !list_empty(&bdma_chan->queue)) { | |
561 | desc = list_first_entry(&bdma_chan->queue, | |
562 | struct tsi721_tx_desc, desc_node); | |
563 | list_del_init((&desc->desc_node)); | |
564 | bdma_chan->active_tx = desc; | |
565 | } | |
50835e97 | 566 | |
d2a321f3 | 567 | if (desc) { |
50835e97 AB |
568 | err = tsi721_submit_sg(desc); |
569 | if (!err) | |
570 | tsi721_start_dma(bdma_chan); | |
571 | else { | |
572 | tsi721_dma_tx_err(bdma_chan, desc); | |
72d8a0d2 AB |
573 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, |
574 | "DMAC%d ERR: tsi721_submit_sg failed with err=%d", | |
575 | bdma_chan->id, err); | |
50835e97 | 576 | } |
9eaa3d9b | 577 | } |
50835e97 | 578 | |
72d8a0d2 AB |
579 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit", |
580 | bdma_chan->id); | |
9eaa3d9b AB |
581 | } |
582 | ||
583 | static void tsi721_dma_tasklet(unsigned long data) | |
584 | { | |
585 | struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; | |
586 | u32 dmac_int, dmac_sts; | |
587 | ||
588 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
72d8a0d2 AB |
589 | tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x", |
590 | bdma_chan->id, dmac_int); | |
9eaa3d9b AB |
591 | /* Clear channel interrupts */ |
592 | iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); | |
593 | ||
594 | if (dmac_int & TSI721_DMAC_INT_ERR) { | |
458bdf6e AB |
595 | int i = 10000; |
596 | struct tsi721_tx_desc *desc; | |
597 | ||
598 | desc = bdma_chan->active_tx; | |
9eaa3d9b | 599 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); |
72d8a0d2 | 600 | tsi_err(&bdma_chan->dchan.dev->device, |
458bdf6e AB |
601 | "DMAC%d_STS = 0x%x did=%d raddr=0x%llx", |
602 | bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr); | |
603 | ||
604 | /* Re-initialize DMA channel if possible */ | |
605 | ||
606 | if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0) | |
607 | goto err_out; | |
608 | ||
609 | tsi721_clr_stat(bdma_chan); | |
d2a321f3 AB |
610 | |
611 | spin_lock(&bdma_chan->lock); | |
458bdf6e AB |
612 | |
613 | /* Put DMA channel into init state */ | |
614 | iowrite32(TSI721_DMAC_CTL_INIT, | |
615 | bdma_chan->regs + TSI721_DMAC_CTL); | |
616 | do { | |
617 | udelay(1); | |
618 | dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); | |
619 | i--; | |
620 | } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i); | |
621 | ||
622 | if (dmac_sts & TSI721_DMAC_STS_ABORT) { | |
623 | tsi_err(&bdma_chan->dchan.dev->device, | |
624 | "Failed to re-initiate DMAC%d", bdma_chan->id); | |
625 | spin_unlock(&bdma_chan->lock); | |
626 | goto err_out; | |
627 | } | |
628 | ||
629 | /* Setup DMA descriptor pointers */ | |
630 | iowrite32(((u64)bdma_chan->bd_phys >> 32), | |
631 | bdma_chan->regs + TSI721_DMAC_DPTRH); | |
632 | iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK), | |
633 | bdma_chan->regs + TSI721_DMAC_DPTRL); | |
634 | ||
635 | /* Setup descriptor status FIFO */ | |
636 | iowrite32(((u64)bdma_chan->sts_phys >> 32), | |
637 | bdma_chan->regs + TSI721_DMAC_DSBH); | |
638 | iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK), | |
639 | bdma_chan->regs + TSI721_DMAC_DSBL); | |
640 | iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size), | |
641 | bdma_chan->regs + TSI721_DMAC_DSSZ); | |
642 | ||
643 | /* Clear interrupt bits */ | |
644 | iowrite32(TSI721_DMAC_INT_ALL, | |
645 | bdma_chan->regs + TSI721_DMAC_INT); | |
646 | ||
647 | ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
648 | ||
649 | bdma_chan->wr_count = bdma_chan->wr_count_next = 0; | |
650 | bdma_chan->sts_rdptr = 0; | |
651 | udelay(10); | |
652 | ||
653 | desc = bdma_chan->active_tx; | |
654 | desc->status = DMA_ERROR; | |
655 | dma_cookie_complete(&desc->txd); | |
656 | list_add(&desc->desc_node, &bdma_chan->free_list); | |
d2a321f3 | 657 | bdma_chan->active_tx = NULL; |
458bdf6e AB |
658 | if (bdma_chan->active) |
659 | tsi721_advance_work(bdma_chan, NULL); | |
d2a321f3 | 660 | spin_unlock(&bdma_chan->lock); |
9eaa3d9b AB |
661 | } |
662 | ||
663 | if (dmac_int & TSI721_DMAC_INT_STFULL) { | |
72d8a0d2 AB |
664 | tsi_err(&bdma_chan->dchan.dev->device, |
665 | "DMAC%d descriptor status FIFO is full", | |
666 | bdma_chan->id); | |
9eaa3d9b AB |
667 | } |
668 | ||
669 | if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { | |
50835e97 AB |
670 | struct tsi721_tx_desc *desc; |
671 | ||
9eaa3d9b AB |
672 | tsi721_clr_stat(bdma_chan); |
673 | spin_lock(&bdma_chan->lock); | |
d2a321f3 | 674 | desc = bdma_chan->active_tx; |
50835e97 AB |
675 | |
676 | if (desc->sg_len == 0) { | |
677 | dma_async_tx_callback callback = NULL; | |
678 | void *param = NULL; | |
679 | ||
680 | desc->status = DMA_COMPLETE; | |
681 | dma_cookie_complete(&desc->txd); | |
682 | if (desc->txd.flags & DMA_PREP_INTERRUPT) { | |
683 | callback = desc->txd.callback; | |
684 | param = desc->txd.callback_param; | |
685 | } | |
d2a321f3 AB |
686 | list_add(&desc->desc_node, &bdma_chan->free_list); |
687 | bdma_chan->active_tx = NULL; | |
458bdf6e AB |
688 | if (bdma_chan->active) |
689 | tsi721_advance_work(bdma_chan, NULL); | |
50835e97 AB |
690 | spin_unlock(&bdma_chan->lock); |
691 | if (callback) | |
692 | callback(param); | |
e680b672 | 693 | } else { |
458bdf6e AB |
694 | if (bdma_chan->active) |
695 | tsi721_advance_work(bdma_chan, | |
696 | bdma_chan->active_tx); | |
e680b672 | 697 | spin_unlock(&bdma_chan->lock); |
50835e97 | 698 | } |
9eaa3d9b | 699 | } |
458bdf6e | 700 | err_out: |
9eaa3d9b AB |
701 | /* Re-Enable BDMA channel interrupts */ |
702 | iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); | |
703 | } | |
704 | ||
705 | static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) | |
706 | { | |
707 | struct tsi721_tx_desc *desc = to_tsi721_desc(txd); | |
708 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); | |
709 | dma_cookie_t cookie; | |
710 | ||
50835e97 AB |
711 | /* Check if the descriptor is detached from any lists */ |
712 | if (!list_empty(&desc->desc_node)) { | |
72d8a0d2 AB |
713 | tsi_err(&bdma_chan->dchan.dev->device, |
714 | "DMAC%d wrong state of descriptor %p", | |
715 | bdma_chan->id, txd); | |
50835e97 AB |
716 | return -EIO; |
717 | } | |
9eaa3d9b | 718 | |
50835e97 | 719 | spin_lock_bh(&bdma_chan->lock); |
9eaa3d9b | 720 | |
50835e97 AB |
721 | if (!bdma_chan->active) { |
722 | spin_unlock_bh(&bdma_chan->lock); | |
723 | return -ENODEV; | |
9eaa3d9b AB |
724 | } |
725 | ||
50835e97 AB |
726 | cookie = dma_cookie_assign(txd); |
727 | desc->status = DMA_IN_PROGRESS; | |
728 | list_add_tail(&desc->desc_node, &bdma_chan->queue); | |
729 | ||
9eaa3d9b AB |
730 | spin_unlock_bh(&bdma_chan->lock); |
731 | return cookie; | |
732 | } | |
733 | ||
734 | static int tsi721_alloc_chan_resources(struct dma_chan *dchan) | |
735 | { | |
736 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
9eaa3d9b | 737 | struct tsi721_tx_desc *desc = NULL; |
9eaa3d9b | 738 | int i; |
50835e97 | 739 | |
72d8a0d2 | 740 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b AB |
741 | |
742 | if (bdma_chan->bd_base) | |
4498c31a | 743 | return dma_txqueue_sz; |
9eaa3d9b AB |
744 | |
745 | /* Initialize BDMA channel */ | |
50835e97 | 746 | if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { |
72d8a0d2 AB |
747 | tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d", |
748 | bdma_chan->id); | |
50835e97 | 749 | return -ENODEV; |
9eaa3d9b AB |
750 | } |
751 | ||
50835e97 | 752 | /* Allocate queue of transaction descriptors */ |
4498c31a | 753 | desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc), |
e680b672 | 754 | GFP_ATOMIC); |
9eaa3d9b | 755 | if (!desc) { |
72d8a0d2 AB |
756 | tsi_err(&dchan->dev->device, |
757 | "DMAC%d Failed to allocate logical descriptors", | |
758 | bdma_chan->id); | |
50835e97 AB |
759 | tsi721_bdma_ch_free(bdma_chan); |
760 | return -ENOMEM; | |
9eaa3d9b AB |
761 | } |
762 | ||
763 | bdma_chan->tx_desc = desc; | |
764 | ||
4498c31a | 765 | for (i = 0; i < dma_txqueue_sz; i++) { |
9eaa3d9b AB |
766 | dma_async_tx_descriptor_init(&desc[i].txd, dchan); |
767 | desc[i].txd.tx_submit = tsi721_tx_submit; | |
768 | desc[i].txd.flags = DMA_CTRL_ACK; | |
50835e97 | 769 | list_add(&desc[i].desc_node, &bdma_chan->free_list); |
9eaa3d9b AB |
770 | } |
771 | ||
50835e97 | 772 | dma_cookie_init(dchan); |
9eaa3d9b | 773 | |
04379dff | 774 | bdma_chan->active = true; |
9eaa3d9b AB |
775 | tsi721_bdma_interrupt_enable(bdma_chan, 1); |
776 | ||
4498c31a | 777 | return dma_txqueue_sz; |
9eaa3d9b AB |
778 | } |
779 | ||
50835e97 | 780 | static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) |
9eaa3d9b | 781 | { |
50835e97 | 782 | struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); |
04379dff AB |
783 | |
784 | #ifdef CONFIG_PCI_MSI | |
785 | if (priv->flags & TSI721_USING_MSIX) { | |
786 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + | |
787 | bdma_chan->id].vector); | |
788 | synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + | |
789 | bdma_chan->id].vector); | |
790 | } else | |
791 | #endif | |
792 | synchronize_irq(priv->pdev->irq); | |
50835e97 | 793 | } |
04379dff | 794 | |
50835e97 AB |
795 | static void tsi721_free_chan_resources(struct dma_chan *dchan) |
796 | { | |
797 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
9eaa3d9b | 798 | |
72d8a0d2 | 799 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 800 | |
50835e97 AB |
801 | if (bdma_chan->bd_base == NULL) |
802 | return; | |
9eaa3d9b | 803 | |
50835e97 AB |
804 | tsi721_bdma_interrupt_enable(bdma_chan, 0); |
805 | bdma_chan->active = false; | |
806 | tsi721_sync_dma_irq(bdma_chan); | |
807 | tasklet_kill(&bdma_chan->tasklet); | |
808 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
9eaa3d9b | 809 | kfree(bdma_chan->tx_desc); |
50835e97 | 810 | tsi721_bdma_ch_free(bdma_chan); |
9eaa3d9b AB |
811 | } |
812 | ||
813 | static | |
814 | enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, | |
815 | struct dma_tx_state *txstate) | |
816 | { | |
e680b672 AB |
817 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); |
818 | enum dma_status status; | |
819 | ||
820 | spin_lock_bh(&bdma_chan->lock); | |
821 | status = dma_cookie_status(dchan, cookie, txstate); | |
822 | spin_unlock_bh(&bdma_chan->lock); | |
823 | return status; | |
9eaa3d9b AB |
824 | } |
825 | ||
826 | static void tsi721_issue_pending(struct dma_chan *dchan) | |
827 | { | |
828 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
829 | ||
72d8a0d2 | 830 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 831 | |
d2a321f3 | 832 | spin_lock_bh(&bdma_chan->lock); |
50835e97 | 833 | if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { |
d2a321f3 | 834 | tsi721_advance_work(bdma_chan, NULL); |
50835e97 | 835 | } |
d2a321f3 | 836 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b AB |
837 | } |
838 | ||
839 | static | |
840 | struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, | |
841 | struct scatterlist *sgl, unsigned int sg_len, | |
842 | enum dma_transfer_direction dir, unsigned long flags, | |
843 | void *tinfo) | |
844 | { | |
845 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
83472457 | 846 | struct tsi721_tx_desc *desc; |
9eaa3d9b | 847 | struct rio_dma_ext *rext = tinfo; |
9eaa3d9b | 848 | enum dma_rtype rtype; |
50835e97 | 849 | struct dma_async_tx_descriptor *txd = NULL; |
9eaa3d9b AB |
850 | |
851 | if (!sgl || !sg_len) { | |
72d8a0d2 AB |
852 | tsi_err(&dchan->dev->device, "DMAC%d No SG list", |
853 | bdma_chan->id); | |
83472457 | 854 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
855 | } |
856 | ||
72d8a0d2 AB |
857 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id, |
858 | (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); | |
50835e97 | 859 | |
9eaa3d9b AB |
860 | if (dir == DMA_DEV_TO_MEM) |
861 | rtype = NREAD; | |
862 | else if (dir == DMA_MEM_TO_DEV) { | |
863 | switch (rext->wr_type) { | |
864 | case RDW_ALL_NWRITE: | |
865 | rtype = ALL_NWRITE; | |
866 | break; | |
867 | case RDW_ALL_NWRITE_R: | |
868 | rtype = ALL_NWRITE_R; | |
869 | break; | |
870 | case RDW_LAST_NWRITE_R: | |
871 | default: | |
872 | rtype = LAST_NWRITE_R; | |
873 | break; | |
874 | } | |
875 | } else { | |
72d8a0d2 AB |
876 | tsi_err(&dchan->dev->device, |
877 | "DMAC%d Unsupported DMA direction option", | |
878 | bdma_chan->id); | |
83472457 | 879 | return ERR_PTR(-EINVAL); |
9eaa3d9b AB |
880 | } |
881 | ||
50835e97 | 882 | spin_lock_bh(&bdma_chan->lock); |
40f847ba | 883 | |
83472457 AB |
884 | if (!list_empty(&bdma_chan->free_list)) { |
885 | desc = list_first_entry(&bdma_chan->free_list, | |
886 | struct tsi721_tx_desc, desc_node); | |
887 | list_del_init(&desc->desc_node); | |
888 | desc->destid = rext->destid; | |
889 | desc->rio_addr = rext->rio_addr; | |
890 | desc->rio_addr_u = 0; | |
891 | desc->rtype = rtype; | |
892 | desc->sg_len = sg_len; | |
893 | desc->sg = sgl; | |
894 | txd = &desc->txd; | |
895 | txd->flags = flags; | |
9eaa3d9b AB |
896 | } |
897 | ||
50835e97 | 898 | spin_unlock_bh(&bdma_chan->lock); |
9eaa3d9b | 899 | |
83472457 AB |
900 | if (!txd) { |
901 | tsi_debug(DMA, &dchan->dev->device, | |
902 | "DMAC%d free TXD is not available", bdma_chan->id); | |
903 | return ERR_PTR(-EBUSY); | |
904 | } | |
905 | ||
50835e97 | 906 | return txd; |
9eaa3d9b AB |
907 | } |
908 | ||
7664cfe0 | 909 | static int tsi721_terminate_all(struct dma_chan *dchan) |
9eaa3d9b AB |
910 | { |
911 | struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); | |
912 | struct tsi721_tx_desc *desc, *_d; | |
913 | LIST_HEAD(list); | |
914 | ||
72d8a0d2 | 915 | tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id); |
9eaa3d9b | 916 | |
9eaa3d9b AB |
917 | spin_lock_bh(&bdma_chan->lock); |
918 | ||
50835e97 AB |
919 | bdma_chan->active = false; |
920 | ||
458bdf6e AB |
921 | while (!tsi721_dma_is_idle(bdma_chan)) { |
922 | ||
923 | udelay(5); | |
924 | #if (0) | |
50835e97 AB |
925 | /* make sure to stop the transfer */ |
926 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
927 | bdma_chan->regs + TSI721_DMAC_CTL); | |
928 | ||
929 | /* Wait until DMA channel stops */ | |
930 | do { | |
931 | dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); | |
932 | } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); | |
458bdf6e | 933 | #endif |
50835e97 | 934 | } |
9eaa3d9b | 935 | |
d2a321f3 AB |
936 | if (bdma_chan->active_tx) |
937 | list_add(&bdma_chan->active_tx->desc_node, &list); | |
9eaa3d9b AB |
938 | list_splice_init(&bdma_chan->queue, &list); |
939 | ||
940 | list_for_each_entry_safe(desc, _d, &list, desc_node) | |
50835e97 | 941 | tsi721_dma_tx_err(bdma_chan, desc); |
9eaa3d9b AB |
942 | |
943 | spin_unlock_bh(&bdma_chan->lock); | |
944 | ||
945 | return 0; | |
946 | } | |
947 | ||
e3dd8cd4 AB |
948 | static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan) |
949 | { | |
950 | if (!bdma_chan->active) | |
951 | return; | |
952 | spin_lock_bh(&bdma_chan->lock); | |
953 | if (!tsi721_dma_is_idle(bdma_chan)) { | |
954 | int timeout = 100000; | |
955 | ||
956 | /* stop the transfer in progress */ | |
957 | iowrite32(TSI721_DMAC_CTL_SUSP, | |
958 | bdma_chan->regs + TSI721_DMAC_CTL); | |
959 | ||
960 | /* Wait until DMA channel stops */ | |
961 | while (!tsi721_dma_is_idle(bdma_chan) && --timeout) | |
962 | udelay(1); | |
963 | } | |
964 | ||
965 | spin_unlock_bh(&bdma_chan->lock); | |
966 | } | |
967 | ||
968 | void tsi721_dma_stop_all(struct tsi721_device *priv) | |
969 | { | |
970 | int i; | |
971 | ||
972 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { | |
4498c31a | 973 | if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i))) |
e3dd8cd4 AB |
974 | tsi721_dma_stop(&priv->bdma[i]); |
975 | } | |
976 | } | |
977 | ||
305c891e | 978 | int tsi721_register_dma(struct tsi721_device *priv) |
9eaa3d9b AB |
979 | { |
980 | int i; | |
50835e97 | 981 | int nr_channels = 0; |
9eaa3d9b | 982 | int err; |
748353cc | 983 | struct rio_mport *mport = &priv->mport; |
9eaa3d9b | 984 | |
9eaa3d9b AB |
985 | INIT_LIST_HEAD(&mport->dma.channels); |
986 | ||
50835e97 | 987 | for (i = 0; i < TSI721_DMA_MAXCH; i++) { |
9eaa3d9b AB |
988 | struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; |
989 | ||
4498c31a | 990 | if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0) |
9eaa3d9b AB |
991 | continue; |
992 | ||
9eaa3d9b AB |
993 | bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); |
994 | ||
995 | bdma_chan->dchan.device = &mport->dma; | |
996 | bdma_chan->dchan.cookie = 1; | |
997 | bdma_chan->dchan.chan_id = i; | |
998 | bdma_chan->id = i; | |
04379dff | 999 | bdma_chan->active = false; |
9eaa3d9b AB |
1000 | |
1001 | spin_lock_init(&bdma_chan->lock); | |
1002 | ||
d2a321f3 | 1003 | bdma_chan->active_tx = NULL; |
9eaa3d9b AB |
1004 | INIT_LIST_HEAD(&bdma_chan->queue); |
1005 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
1006 | ||
1007 | tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, | |
1008 | (unsigned long)bdma_chan); | |
9eaa3d9b AB |
1009 | list_add_tail(&bdma_chan->dchan.device_node, |
1010 | &mport->dma.channels); | |
50835e97 | 1011 | nr_channels++; |
9eaa3d9b AB |
1012 | } |
1013 | ||
50835e97 | 1014 | mport->dma.chancnt = nr_channels; |
9eaa3d9b AB |
1015 | dma_cap_zero(mport->dma.cap_mask); |
1016 | dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); | |
1017 | dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); | |
1018 | ||
50835e97 | 1019 | mport->dma.dev = &priv->pdev->dev; |
9eaa3d9b AB |
1020 | mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; |
1021 | mport->dma.device_free_chan_resources = tsi721_free_chan_resources; | |
1022 | mport->dma.device_tx_status = tsi721_tx_status; | |
1023 | mport->dma.device_issue_pending = tsi721_issue_pending; | |
1024 | mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; | |
7664cfe0 | 1025 | mport->dma.device_terminate_all = tsi721_terminate_all; |
9eaa3d9b AB |
1026 | |
1027 | err = dma_async_device_register(&mport->dma); | |
1028 | if (err) | |
72d8a0d2 | 1029 | tsi_err(&priv->pdev->dev, "Failed to register DMA device"); |
9eaa3d9b AB |
1030 | |
1031 | return err; | |
1032 | } | |
748353cc AB |
1033 | |
1034 | void tsi721_unregister_dma(struct tsi721_device *priv) | |
1035 | { | |
1036 | struct rio_mport *mport = &priv->mport; | |
1037 | struct dma_chan *chan, *_c; | |
1038 | struct tsi721_bdma_chan *bdma_chan; | |
1039 | ||
1040 | tsi721_dma_stop_all(priv); | |
1041 | dma_async_device_unregister(&mport->dma); | |
1042 | ||
1043 | list_for_each_entry_safe(chan, _c, &mport->dma.channels, | |
1044 | device_node) { | |
1045 | bdma_chan = to_tsi721_chan(chan); | |
1046 | if (bdma_chan->active) { | |
1047 | tsi721_bdma_interrupt_enable(bdma_chan, 0); | |
1048 | bdma_chan->active = false; | |
1049 | tsi721_sync_dma_irq(bdma_chan); | |
1050 | tasklet_kill(&bdma_chan->tasklet); | |
1051 | INIT_LIST_HEAD(&bdma_chan->free_list); | |
1052 | kfree(bdma_chan->tx_desc); | |
1053 | tsi721_bdma_ch_free(bdma_chan); | |
1054 | } | |
1055 | ||
1056 | list_del(&chan->device_node); | |
1057 | } | |
1058 | } |