2 * Renesas R-Car Gen2 DMA Controller Driver
4 * Copyright (C) 2014 Renesas Electronics Inc.
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include "../dmaengine.h"
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
36 struct rcar_dmac_xfer_chunk
{
37 struct list_head node
;
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
50 struct rcar_dmac_hw_desc
{
55 } __attribute__((__packed__
));
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
74 struct rcar_dmac_desc
{
75 struct dma_async_tx_descriptor async_tx
;
76 enum dma_transfer_direction direction
;
77 unsigned int xfer_shift
;
80 struct list_head node
;
81 struct list_head chunks
;
82 struct rcar_dmac_xfer_chunk
*running
;
87 struct rcar_dmac_hw_desc
*mem
;
96 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
104 struct rcar_dmac_desc_page
{
105 struct list_head node
;
108 struct rcar_dmac_desc descs
[0];
109 struct rcar_dmac_xfer_chunk chunks
[0];
113 #define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
121 * struct rcar_dmac_chan_slave - Slave configuration
122 * @slave_addr: slave memory address
123 * @xfer_size: size (in bytes) of hardware transfers
125 struct rcar_dmac_chan_slave
{
126 phys_addr_t slave_addr
;
127 unsigned int xfer_size
;
131 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
132 * @addr: slave dma address
133 * @dir: direction of mapping
134 * @slave: slave configuration that is mapped
136 struct rcar_dmac_chan_map
{
138 enum dma_data_direction dir
;
139 struct rcar_dmac_chan_slave slave
;
143 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
144 * @chan: base DMA channel object
145 * @iomem: channel I/O memory base
146 * @index: index of this channel in the controller
147 * @src: slave memory address and size on the source side
148 * @dst: slave memory address and size on the destination side
149 * @mid_rid: hardware MID/RID for the DMA client using this channel
150 * @lock: protects the channel CHCR register and the desc members
151 * @desc.free: list of free descriptors
152 * @desc.pending: list of pending descriptors (submitted with tx_submit)
153 * @desc.active: list of active descriptors (activated with issue_pending)
154 * @desc.done: list of completed descriptors
155 * @desc.wait: list of descriptors waiting for an ack
156 * @desc.running: the descriptor being processed (a member of the active list)
157 * @desc.chunks_free: list of free transfer chunk descriptors
158 * @desc.pages: list of pages used by allocated descriptors
160 struct rcar_dmac_chan
{
161 struct dma_chan chan
;
165 struct rcar_dmac_chan_slave src
;
166 struct rcar_dmac_chan_slave dst
;
167 struct rcar_dmac_chan_map map
;
173 struct list_head free
;
174 struct list_head pending
;
175 struct list_head active
;
176 struct list_head done
;
177 struct list_head wait
;
178 struct rcar_dmac_desc
*running
;
180 struct list_head chunks_free
;
182 struct list_head pages
;
186 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
189 * struct rcar_dmac - R-Car Gen2 DMA Controller
190 * @engine: base DMA engine object
191 * @dev: the hardware device
192 * @iomem: remapped I/O memory base
193 * @n_channels: number of available channels
194 * @channels: array of DMAC channels
195 * @modules: bitmask of client modules in use
198 struct dma_device engine
;
202 unsigned int n_channels
;
203 struct rcar_dmac_chan
*channels
;
205 DECLARE_BITMAP(modules
, 256);
208 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
210 /* -----------------------------------------------------------------------------
214 #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
216 #define RCAR_DMAISTA 0x0020
217 #define RCAR_DMASEC 0x0030
218 #define RCAR_DMAOR 0x0060
219 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
220 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
221 #define RCAR_DMAOR_AE (1 << 2)
222 #define RCAR_DMAOR_DME (1 << 0)
223 #define RCAR_DMACHCLR 0x0080
224 #define RCAR_DMADPSEC 0x00a0
226 #define RCAR_DMASAR 0x0000
227 #define RCAR_DMADAR 0x0004
228 #define RCAR_DMATCR 0x0008
229 #define RCAR_DMATCR_MASK 0x00ffffff
230 #define RCAR_DMATSR 0x0028
231 #define RCAR_DMACHCR 0x000c
232 #define RCAR_DMACHCR_CAE (1 << 31)
233 #define RCAR_DMACHCR_CAIE (1 << 30)
234 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
235 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
236 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
237 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
238 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
239 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
240 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
241 #define RCAR_DMACHCR_DPB (1 << 22)
242 #define RCAR_DMACHCR_DSE (1 << 19)
243 #define RCAR_DMACHCR_DSIE (1 << 18)
244 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
245 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
246 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
247 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
248 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
249 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
250 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
251 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
252 #define RCAR_DMACHCR_DM_INC (1 << 14)
253 #define RCAR_DMACHCR_DM_DEC (2 << 14)
254 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
255 #define RCAR_DMACHCR_SM_INC (1 << 12)
256 #define RCAR_DMACHCR_SM_DEC (2 << 12)
257 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
258 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
259 #define RCAR_DMACHCR_IE (1 << 2)
260 #define RCAR_DMACHCR_TE (1 << 1)
261 #define RCAR_DMACHCR_DE (1 << 0)
262 #define RCAR_DMATCRB 0x0018
263 #define RCAR_DMATSRB 0x0038
264 #define RCAR_DMACHCRB 0x001c
265 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
266 #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
267 #define RCAR_DMACHCRB_DPTR_SHIFT 16
268 #define RCAR_DMACHCRB_DRST (1 << 15)
269 #define RCAR_DMACHCRB_DTS (1 << 8)
270 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
271 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
272 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
273 #define RCAR_DMARS 0x0040
274 #define RCAR_DMABUFCR 0x0048
275 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
276 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
277 #define RCAR_DMADPBASE 0x0050
278 #define RCAR_DMADPBASE_MASK 0xfffffff0
279 #define RCAR_DMADPBASE_SEL (1 << 0)
280 #define RCAR_DMADPCR 0x0054
281 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
282 #define RCAR_DMAFIXSAR 0x0010
283 #define RCAR_DMAFIXDAR 0x0014
284 #define RCAR_DMAFIXDPBASE 0x0060
286 /* Hardcode the MEMCPY transfer size to 4 bytes. */
287 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
289 /* -----------------------------------------------------------------------------
293 static void rcar_dmac_write(struct rcar_dmac
*dmac
, u32 reg
, u32 data
)
295 if (reg
== RCAR_DMAOR
)
296 writew(data
, dmac
->iomem
+ reg
);
298 writel(data
, dmac
->iomem
+ reg
);
301 static u32
rcar_dmac_read(struct rcar_dmac
*dmac
, u32 reg
)
303 if (reg
== RCAR_DMAOR
)
304 return readw(dmac
->iomem
+ reg
);
306 return readl(dmac
->iomem
+ reg
);
309 static u32
rcar_dmac_chan_read(struct rcar_dmac_chan
*chan
, u32 reg
)
311 if (reg
== RCAR_DMARS
)
312 return readw(chan
->iomem
+ reg
);
314 return readl(chan
->iomem
+ reg
);
317 static void rcar_dmac_chan_write(struct rcar_dmac_chan
*chan
, u32 reg
, u32 data
)
319 if (reg
== RCAR_DMARS
)
320 writew(data
, chan
->iomem
+ reg
);
322 writel(data
, chan
->iomem
+ reg
);
325 /* -----------------------------------------------------------------------------
326 * Initialization and configuration
329 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan
*chan
)
331 u32 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
333 return !!(chcr
& (RCAR_DMACHCR_DE
| RCAR_DMACHCR_TE
));
336 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan
*chan
)
338 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
339 u32 chcr
= desc
->chcr
;
341 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan
));
343 if (chan
->mid_rid
>= 0)
344 rcar_dmac_chan_write(chan
, RCAR_DMARS
, chan
->mid_rid
);
346 if (desc
->hwdescs
.use
) {
347 struct rcar_dmac_xfer_chunk
*chunk
;
349 dev_dbg(chan
->chan
.device
->dev
,
350 "chan%u: queue desc %p: %u@%pad\n",
351 chan
->index
, desc
, desc
->nchunks
, &desc
->hwdescs
.dma
);
353 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
354 rcar_dmac_chan_write(chan
, RCAR_DMAFIXDPBASE
,
355 desc
->hwdescs
.dma
>> 32);
357 rcar_dmac_chan_write(chan
, RCAR_DMADPBASE
,
358 (desc
->hwdescs
.dma
& 0xfffffff0) |
360 rcar_dmac_chan_write(chan
, RCAR_DMACHCRB
,
361 RCAR_DMACHCRB_DCNT(desc
->nchunks
- 1) |
365 * Errata: When descriptor memory is accessed through an IOMMU
366 * the DMADAR register isn't initialized automatically from the
367 * first descriptor at beginning of transfer by the DMAC like it
368 * should. Initialize it manually with the destination address
369 * of the first chunk.
371 chunk
= list_first_entry(&desc
->chunks
,
372 struct rcar_dmac_xfer_chunk
, node
);
373 rcar_dmac_chan_write(chan
, RCAR_DMADAR
,
374 chunk
->dst_addr
& 0xffffffff);
377 * Program the descriptor stage interrupt to occur after the end
378 * of the first stage.
380 rcar_dmac_chan_write(chan
, RCAR_DMADPCR
, RCAR_DMADPCR_DIPT(1));
382 chcr
|= RCAR_DMACHCR_RPT_SAR
| RCAR_DMACHCR_RPT_DAR
383 | RCAR_DMACHCR_RPT_TCR
| RCAR_DMACHCR_DPB
;
386 * If the descriptor isn't cyclic enable normal descriptor mode
387 * and the transfer completion interrupt.
390 chcr
|= RCAR_DMACHCR_DPM_ENABLED
| RCAR_DMACHCR_IE
;
392 * If the descriptor is cyclic and has a callback enable the
393 * descriptor stage interrupt in infinite repeat mode.
395 else if (desc
->async_tx
.callback
)
396 chcr
|= RCAR_DMACHCR_DPM_INFINITE
| RCAR_DMACHCR_DSIE
;
398 * Otherwise just select infinite repeat mode without any
402 chcr
|= RCAR_DMACHCR_DPM_INFINITE
;
404 struct rcar_dmac_xfer_chunk
*chunk
= desc
->running
;
406 dev_dbg(chan
->chan
.device
->dev
,
407 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
408 chan
->index
, chunk
, chunk
->size
, &chunk
->src_addr
,
411 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
412 rcar_dmac_chan_write(chan
, RCAR_DMAFIXSAR
,
413 chunk
->src_addr
>> 32);
414 rcar_dmac_chan_write(chan
, RCAR_DMAFIXDAR
,
415 chunk
->dst_addr
>> 32);
417 rcar_dmac_chan_write(chan
, RCAR_DMASAR
,
418 chunk
->src_addr
& 0xffffffff);
419 rcar_dmac_chan_write(chan
, RCAR_DMADAR
,
420 chunk
->dst_addr
& 0xffffffff);
421 rcar_dmac_chan_write(chan
, RCAR_DMATCR
,
422 chunk
->size
>> desc
->xfer_shift
);
424 chcr
|= RCAR_DMACHCR_DPM_DISABLED
| RCAR_DMACHCR_IE
;
427 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
| RCAR_DMACHCR_DE
);
430 static int rcar_dmac_init(struct rcar_dmac
*dmac
)
434 /* Clear all channels and enable the DMAC globally. */
435 rcar_dmac_write(dmac
, RCAR_DMACHCLR
, GENMASK(dmac
->n_channels
- 1, 0));
436 rcar_dmac_write(dmac
, RCAR_DMAOR
,
437 RCAR_DMAOR_PRI_FIXED
| RCAR_DMAOR_DME
);
439 dmaor
= rcar_dmac_read(dmac
, RCAR_DMAOR
);
440 if ((dmaor
& (RCAR_DMAOR_AE
| RCAR_DMAOR_DME
)) != RCAR_DMAOR_DME
) {
441 dev_warn(dmac
->dev
, "DMAOR initialization failed.\n");
448 /* -----------------------------------------------------------------------------
449 * Descriptors submission
452 static dma_cookie_t
rcar_dmac_tx_submit(struct dma_async_tx_descriptor
*tx
)
454 struct rcar_dmac_chan
*chan
= to_rcar_dmac_chan(tx
->chan
);
455 struct rcar_dmac_desc
*desc
= to_rcar_dmac_desc(tx
);
459 spin_lock_irqsave(&chan
->lock
, flags
);
461 cookie
= dma_cookie_assign(tx
);
463 dev_dbg(chan
->chan
.device
->dev
, "chan%u: submit #%d@%p\n",
464 chan
->index
, tx
->cookie
, desc
);
466 list_add_tail(&desc
->node
, &chan
->desc
.pending
);
467 desc
->running
= list_first_entry(&desc
->chunks
,
468 struct rcar_dmac_xfer_chunk
, node
);
470 spin_unlock_irqrestore(&chan
->lock
, flags
);
475 /* -----------------------------------------------------------------------------
476 * Descriptors allocation and free
480 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
481 * @chan: the DMA channel
482 * @gfp: allocation flags
484 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan
*chan
, gfp_t gfp
)
486 struct rcar_dmac_desc_page
*page
;
491 page
= (void *)get_zeroed_page(gfp
);
495 for (i
= 0; i
< RCAR_DMAC_DESCS_PER_PAGE
; ++i
) {
496 struct rcar_dmac_desc
*desc
= &page
->descs
[i
];
498 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->chan
);
499 desc
->async_tx
.tx_submit
= rcar_dmac_tx_submit
;
500 INIT_LIST_HEAD(&desc
->chunks
);
502 list_add_tail(&desc
->node
, &list
);
505 spin_lock_irqsave(&chan
->lock
, flags
);
506 list_splice_tail(&list
, &chan
->desc
.free
);
507 list_add_tail(&page
->node
, &chan
->desc
.pages
);
508 spin_unlock_irqrestore(&chan
->lock
, flags
);
514 * rcar_dmac_desc_put - Release a DMA transfer descriptor
515 * @chan: the DMA channel
516 * @desc: the descriptor
518 * Put the descriptor and its transfer chunk descriptors back in the channel's
519 * free descriptors lists. The descriptor's chunks list will be reinitialized to
520 * an empty list as a result.
522 * The descriptor must have been removed from the channel's lists before calling
525 static void rcar_dmac_desc_put(struct rcar_dmac_chan
*chan
,
526 struct rcar_dmac_desc
*desc
)
530 spin_lock_irqsave(&chan
->lock
, flags
);
531 list_splice_tail_init(&desc
->chunks
, &chan
->desc
.chunks_free
);
532 list_add(&desc
->node
, &chan
->desc
.free
);
533 spin_unlock_irqrestore(&chan
->lock
, flags
);
536 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan
*chan
)
538 struct rcar_dmac_desc
*desc
, *_desc
;
543 * We have to temporarily move all descriptors from the wait list to a
544 * local list as iterating over the wait list, even with
545 * list_for_each_entry_safe, isn't safe if we release the channel lock
546 * around the rcar_dmac_desc_put() call.
548 spin_lock_irqsave(&chan
->lock
, flags
);
549 list_splice_init(&chan
->desc
.wait
, &list
);
550 spin_unlock_irqrestore(&chan
->lock
, flags
);
552 list_for_each_entry_safe(desc
, _desc
, &list
, node
) {
553 if (async_tx_test_ack(&desc
->async_tx
)) {
554 list_del(&desc
->node
);
555 rcar_dmac_desc_put(chan
, desc
);
559 if (list_empty(&list
))
562 /* Put the remaining descriptors back in the wait list. */
563 spin_lock_irqsave(&chan
->lock
, flags
);
564 list_splice(&list
, &chan
->desc
.wait
);
565 spin_unlock_irqrestore(&chan
->lock
, flags
);
569 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
570 * @chan: the DMA channel
572 * Locking: This function must be called in a non-atomic context.
574 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
577 static struct rcar_dmac_desc
*rcar_dmac_desc_get(struct rcar_dmac_chan
*chan
)
579 struct rcar_dmac_desc
*desc
;
583 /* Recycle acked descriptors before attempting allocation. */
584 rcar_dmac_desc_recycle_acked(chan
);
586 spin_lock_irqsave(&chan
->lock
, flags
);
588 while (list_empty(&chan
->desc
.free
)) {
590 * No free descriptors, allocate a page worth of them and try
591 * again, as someone else could race us to get the newly
592 * allocated descriptors. If the allocation fails return an
595 spin_unlock_irqrestore(&chan
->lock
, flags
);
596 ret
= rcar_dmac_desc_alloc(chan
, GFP_NOWAIT
);
599 spin_lock_irqsave(&chan
->lock
, flags
);
602 desc
= list_first_entry(&chan
->desc
.free
, struct rcar_dmac_desc
, node
);
603 list_del(&desc
->node
);
605 spin_unlock_irqrestore(&chan
->lock
, flags
);
611 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
612 * @chan: the DMA channel
613 * @gfp: allocation flags
615 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan
*chan
, gfp_t gfp
)
617 struct rcar_dmac_desc_page
*page
;
622 page
= (void *)get_zeroed_page(gfp
);
626 for (i
= 0; i
< RCAR_DMAC_XFER_CHUNKS_PER_PAGE
; ++i
) {
627 struct rcar_dmac_xfer_chunk
*chunk
= &page
->chunks
[i
];
629 list_add_tail(&chunk
->node
, &list
);
632 spin_lock_irqsave(&chan
->lock
, flags
);
633 list_splice_tail(&list
, &chan
->desc
.chunks_free
);
634 list_add_tail(&page
->node
, &chan
->desc
.pages
);
635 spin_unlock_irqrestore(&chan
->lock
, flags
);
641 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
642 * @chan: the DMA channel
644 * Locking: This function must be called in a non-atomic context.
646 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
647 * descriptor can be allocated.
649 static struct rcar_dmac_xfer_chunk
*
650 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan
*chan
)
652 struct rcar_dmac_xfer_chunk
*chunk
;
656 spin_lock_irqsave(&chan
->lock
, flags
);
658 while (list_empty(&chan
->desc
.chunks_free
)) {
660 * No free descriptors, allocate a page worth of them and try
661 * again, as someone else could race us to get the newly
662 * allocated descriptors. If the allocation fails return an
665 spin_unlock_irqrestore(&chan
->lock
, flags
);
666 ret
= rcar_dmac_xfer_chunk_alloc(chan
, GFP_NOWAIT
);
669 spin_lock_irqsave(&chan
->lock
, flags
);
672 chunk
= list_first_entry(&chan
->desc
.chunks_free
,
673 struct rcar_dmac_xfer_chunk
, node
);
674 list_del(&chunk
->node
);
676 spin_unlock_irqrestore(&chan
->lock
, flags
);
681 static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan
*chan
,
682 struct rcar_dmac_desc
*desc
, size_t size
)
685 * dma_alloc_coherent() allocates memory in page size increments. To
686 * avoid reallocating the hardware descriptors when the allocated size
687 * wouldn't change align the requested size to a multiple of the page
690 size
= PAGE_ALIGN(size
);
692 if (desc
->hwdescs
.size
== size
)
695 if (desc
->hwdescs
.mem
) {
696 dma_free_coherent(chan
->chan
.device
->dev
, desc
->hwdescs
.size
,
697 desc
->hwdescs
.mem
, desc
->hwdescs
.dma
);
698 desc
->hwdescs
.mem
= NULL
;
699 desc
->hwdescs
.size
= 0;
705 desc
->hwdescs
.mem
= dma_alloc_coherent(chan
->chan
.device
->dev
, size
,
706 &desc
->hwdescs
.dma
, GFP_NOWAIT
);
707 if (!desc
->hwdescs
.mem
)
710 desc
->hwdescs
.size
= size
;
713 static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan
*chan
,
714 struct rcar_dmac_desc
*desc
)
716 struct rcar_dmac_xfer_chunk
*chunk
;
717 struct rcar_dmac_hw_desc
*hwdesc
;
719 rcar_dmac_realloc_hwdesc(chan
, desc
, desc
->nchunks
* sizeof(*hwdesc
));
721 hwdesc
= desc
->hwdescs
.mem
;
725 list_for_each_entry(chunk
, &desc
->chunks
, node
) {
726 hwdesc
->sar
= chunk
->src_addr
;
727 hwdesc
->dar
= chunk
->dst_addr
;
728 hwdesc
->tcr
= chunk
->size
>> desc
->xfer_shift
;
735 /* -----------------------------------------------------------------------------
739 static void rcar_dmac_chan_halt(struct rcar_dmac_chan
*chan
)
741 u32 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
743 chcr
&= ~(RCAR_DMACHCR_DSE
| RCAR_DMACHCR_DSIE
| RCAR_DMACHCR_IE
|
744 RCAR_DMACHCR_TE
| RCAR_DMACHCR_DE
);
745 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
);
748 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan
*chan
)
750 struct rcar_dmac_desc
*desc
, *_desc
;
754 spin_lock_irqsave(&chan
->lock
, flags
);
756 /* Move all non-free descriptors to the local lists. */
757 list_splice_init(&chan
->desc
.pending
, &descs
);
758 list_splice_init(&chan
->desc
.active
, &descs
);
759 list_splice_init(&chan
->desc
.done
, &descs
);
760 list_splice_init(&chan
->desc
.wait
, &descs
);
762 chan
->desc
.running
= NULL
;
764 spin_unlock_irqrestore(&chan
->lock
, flags
);
766 list_for_each_entry_safe(desc
, _desc
, &descs
, node
) {
767 list_del(&desc
->node
);
768 rcar_dmac_desc_put(chan
, desc
);
772 static void rcar_dmac_stop(struct rcar_dmac
*dmac
)
774 rcar_dmac_write(dmac
, RCAR_DMAOR
, 0);
777 static void rcar_dmac_abort(struct rcar_dmac
*dmac
)
781 /* Stop all channels. */
782 for (i
= 0; i
< dmac
->n_channels
; ++i
) {
783 struct rcar_dmac_chan
*chan
= &dmac
->channels
[i
];
785 /* Stop and reinitialize the channel. */
786 spin_lock(&chan
->lock
);
787 rcar_dmac_chan_halt(chan
);
788 spin_unlock(&chan
->lock
);
790 rcar_dmac_chan_reinit(chan
);
794 /* -----------------------------------------------------------------------------
795 * Descriptors preparation
798 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan
*chan
,
799 struct rcar_dmac_desc
*desc
)
801 static const u32 chcr_ts
[] = {
802 RCAR_DMACHCR_TS_1B
, RCAR_DMACHCR_TS_2B
,
803 RCAR_DMACHCR_TS_4B
, RCAR_DMACHCR_TS_8B
,
804 RCAR_DMACHCR_TS_16B
, RCAR_DMACHCR_TS_32B
,
808 unsigned int xfer_size
;
811 switch (desc
->direction
) {
813 chcr
= RCAR_DMACHCR_DM_INC
| RCAR_DMACHCR_SM_FIXED
814 | RCAR_DMACHCR_RS_DMARS
;
815 xfer_size
= chan
->src
.xfer_size
;
819 chcr
= RCAR_DMACHCR_DM_FIXED
| RCAR_DMACHCR_SM_INC
820 | RCAR_DMACHCR_RS_DMARS
;
821 xfer_size
= chan
->dst
.xfer_size
;
826 chcr
= RCAR_DMACHCR_DM_INC
| RCAR_DMACHCR_SM_INC
827 | RCAR_DMACHCR_RS_AUTO
;
828 xfer_size
= RCAR_DMAC_MEMCPY_XFER_SIZE
;
832 desc
->xfer_shift
= ilog2(xfer_size
);
833 desc
->chcr
= chcr
| chcr_ts
[desc
->xfer_shift
];
837 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
839 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
840 * converted to scatter-gather to guarantee consistent locking and a correct
841 * list manipulation. For slave DMA direction carries the usual meaning, and,
842 * logically, the SG list is RAM and the addr variable contains slave address,
843 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
844 * and the SG list contains only one element and points at the source buffer.
846 static struct dma_async_tx_descriptor
*
847 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan
*chan
, struct scatterlist
*sgl
,
848 unsigned int sg_len
, dma_addr_t dev_addr
,
849 enum dma_transfer_direction dir
, unsigned long dma_flags
,
852 struct rcar_dmac_xfer_chunk
*chunk
;
853 struct rcar_dmac_desc
*desc
;
854 struct scatterlist
*sg
;
855 unsigned int nchunks
= 0;
856 unsigned int max_chunk_size
;
857 unsigned int full_size
= 0;
858 bool highmem
= false;
861 desc
= rcar_dmac_desc_get(chan
);
865 desc
->async_tx
.flags
= dma_flags
;
866 desc
->async_tx
.cookie
= -EBUSY
;
868 desc
->cyclic
= cyclic
;
869 desc
->direction
= dir
;
871 rcar_dmac_chan_configure_desc(chan
, desc
);
873 max_chunk_size
= (RCAR_DMATCR_MASK
+ 1) << desc
->xfer_shift
;
876 * Allocate and fill the transfer chunk descriptors. We own the only
877 * reference to the DMA descriptor, there's no need for locking.
879 for_each_sg(sgl
, sg
, sg_len
, i
) {
880 dma_addr_t mem_addr
= sg_dma_address(sg
);
881 unsigned int len
= sg_dma_len(sg
);
886 unsigned int size
= min(len
, max_chunk_size
);
888 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
890 * Prevent individual transfers from crossing 4GB
893 if (dev_addr
>> 32 != (dev_addr
+ size
- 1) >> 32)
894 size
= ALIGN(dev_addr
, 1ULL << 32) - dev_addr
;
895 if (mem_addr
>> 32 != (mem_addr
+ size
- 1) >> 32)
896 size
= ALIGN(mem_addr
, 1ULL << 32) - mem_addr
;
899 * Check if either of the source or destination address
900 * can't be expressed in 32 bits. If so we can't use
901 * hardware descriptor lists.
903 if (dev_addr
>> 32 || mem_addr
>> 32)
907 chunk
= rcar_dmac_xfer_chunk_get(chan
);
909 rcar_dmac_desc_put(chan
, desc
);
913 if (dir
== DMA_DEV_TO_MEM
) {
914 chunk
->src_addr
= dev_addr
;
915 chunk
->dst_addr
= mem_addr
;
917 chunk
->src_addr
= mem_addr
;
918 chunk
->dst_addr
= dev_addr
;
923 dev_dbg(chan
->chan
.device
->dev
,
924 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
925 chan
->index
, chunk
, desc
, i
, sg
, size
, len
,
926 &chunk
->src_addr
, &chunk
->dst_addr
);
929 if (dir
== DMA_MEM_TO_MEM
)
934 list_add_tail(&chunk
->node
, &desc
->chunks
);
939 desc
->nchunks
= nchunks
;
940 desc
->size
= full_size
;
943 * Use hardware descriptor lists if possible when more than one chunk
944 * needs to be transferred (otherwise they don't make much sense).
946 * The highmem check currently covers the whole transfer. As an
947 * optimization we could use descriptor lists for consecutive lowmem
948 * chunks and direct manual mode for highmem chunks. Whether the
949 * performance improvement would be significant enough compared to the
950 * additional complexity remains to be investigated.
952 desc
->hwdescs
.use
= !highmem
&& nchunks
> 1;
953 if (desc
->hwdescs
.use
) {
954 if (rcar_dmac_fill_hwdesc(chan
, desc
) < 0)
955 desc
->hwdescs
.use
= false;
958 return &desc
->async_tx
;
961 /* -----------------------------------------------------------------------------
962 * DMA engine operations
965 static int rcar_dmac_alloc_chan_resources(struct dma_chan
*chan
)
967 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
970 INIT_LIST_HEAD(&rchan
->desc
.chunks_free
);
971 INIT_LIST_HEAD(&rchan
->desc
.pages
);
973 /* Preallocate descriptors. */
974 ret
= rcar_dmac_xfer_chunk_alloc(rchan
, GFP_KERNEL
);
978 ret
= rcar_dmac_desc_alloc(rchan
, GFP_KERNEL
);
982 return pm_runtime_get_sync(chan
->device
->dev
);
985 static void rcar_dmac_free_chan_resources(struct dma_chan
*chan
)
987 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
988 struct rcar_dmac
*dmac
= to_rcar_dmac(chan
->device
);
989 struct rcar_dmac_chan_map
*map
= &rchan
->map
;
990 struct rcar_dmac_desc_page
*page
, *_page
;
991 struct rcar_dmac_desc
*desc
;
994 /* Protect against ISR */
995 spin_lock_irq(&rchan
->lock
);
996 rcar_dmac_chan_halt(rchan
);
997 spin_unlock_irq(&rchan
->lock
);
999 /* Now no new interrupts will occur */
1001 if (rchan
->mid_rid
>= 0) {
1002 /* The caller is holding dma_list_mutex */
1003 clear_bit(rchan
->mid_rid
, dmac
->modules
);
1004 rchan
->mid_rid
= -EINVAL
;
1007 list_splice_init(&rchan
->desc
.free
, &list
);
1008 list_splice_init(&rchan
->desc
.pending
, &list
);
1009 list_splice_init(&rchan
->desc
.active
, &list
);
1010 list_splice_init(&rchan
->desc
.done
, &list
);
1011 list_splice_init(&rchan
->desc
.wait
, &list
);
1013 rchan
->desc
.running
= NULL
;
1015 list_for_each_entry(desc
, &list
, node
)
1016 rcar_dmac_realloc_hwdesc(rchan
, desc
, 0);
1018 list_for_each_entry_safe(page
, _page
, &rchan
->desc
.pages
, node
) {
1019 list_del(&page
->node
);
1020 free_page((unsigned long)page
);
1023 /* Remove slave mapping if present. */
1024 if (map
->slave
.xfer_size
) {
1025 dma_unmap_resource(chan
->device
->dev
, map
->addr
,
1026 map
->slave
.xfer_size
, map
->dir
, 0);
1027 map
->slave
.xfer_size
= 0;
1030 pm_runtime_put(chan
->device
->dev
);
1033 static struct dma_async_tx_descriptor
*
1034 rcar_dmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dma_dest
,
1035 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1037 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1038 struct scatterlist sgl
;
1043 sg_init_table(&sgl
, 1);
1044 sg_set_page(&sgl
, pfn_to_page(PFN_DOWN(dma_src
)), len
,
1045 offset_in_page(dma_src
));
1046 sg_dma_address(&sgl
) = dma_src
;
1047 sg_dma_len(&sgl
) = len
;
1049 return rcar_dmac_chan_prep_sg(rchan
, &sgl
, 1, dma_dest
,
1050 DMA_MEM_TO_MEM
, flags
, false);
1053 static int rcar_dmac_map_slave_addr(struct dma_chan
*chan
,
1054 enum dma_transfer_direction dir
)
1056 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1057 struct rcar_dmac_chan_map
*map
= &rchan
->map
;
1058 phys_addr_t dev_addr
;
1060 enum dma_data_direction dev_dir
;
1062 if (dir
== DMA_DEV_TO_MEM
) {
1063 dev_addr
= rchan
->src
.slave_addr
;
1064 dev_size
= rchan
->src
.xfer_size
;
1065 dev_dir
= DMA_TO_DEVICE
;
1067 dev_addr
= rchan
->dst
.slave_addr
;
1068 dev_size
= rchan
->dst
.xfer_size
;
1069 dev_dir
= DMA_FROM_DEVICE
;
1072 /* Reuse current map if possible. */
1073 if (dev_addr
== map
->slave
.slave_addr
&&
1074 dev_size
== map
->slave
.xfer_size
&&
1075 dev_dir
== map
->dir
)
1078 /* Remove old mapping if present. */
1079 if (map
->slave
.xfer_size
)
1080 dma_unmap_resource(chan
->device
->dev
, map
->addr
,
1081 map
->slave
.xfer_size
, map
->dir
, 0);
1082 map
->slave
.xfer_size
= 0;
1084 /* Create new slave address map. */
1085 map
->addr
= dma_map_resource(chan
->device
->dev
, dev_addr
, dev_size
,
1088 if (dma_mapping_error(chan
->device
->dev
, map
->addr
)) {
1089 dev_err(chan
->device
->dev
,
1090 "chan%u: failed to map %zx@%pap", rchan
->index
,
1091 dev_size
, &dev_addr
);
1095 dev_dbg(chan
->device
->dev
, "chan%u: map %zx@%pap to %pad dir: %s\n",
1096 rchan
->index
, dev_size
, &dev_addr
, &map
->addr
,
1097 dev_dir
== DMA_TO_DEVICE
? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1099 map
->slave
.slave_addr
= dev_addr
;
1100 map
->slave
.xfer_size
= dev_size
;
1106 static struct dma_async_tx_descriptor
*
1107 rcar_dmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1108 unsigned int sg_len
, enum dma_transfer_direction dir
,
1109 unsigned long flags
, void *context
)
1111 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1113 /* Someone calling slave DMA on a generic channel? */
1114 if (rchan
->mid_rid
< 0 || !sg_len
) {
1115 dev_warn(chan
->device
->dev
,
1116 "%s: bad parameter: len=%d, id=%d\n",
1117 __func__
, sg_len
, rchan
->mid_rid
);
1121 if (rcar_dmac_map_slave_addr(chan
, dir
))
1124 return rcar_dmac_chan_prep_sg(rchan
, sgl
, sg_len
, rchan
->map
.addr
,
1128 #define RCAR_DMAC_MAX_SG_LEN 32
1130 static struct dma_async_tx_descriptor
*
1131 rcar_dmac_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
,
1132 size_t buf_len
, size_t period_len
,
1133 enum dma_transfer_direction dir
, unsigned long flags
)
1135 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1136 struct dma_async_tx_descriptor
*desc
;
1137 struct scatterlist
*sgl
;
1138 unsigned int sg_len
;
1141 /* Someone calling slave DMA on a generic channel? */
1142 if (rchan
->mid_rid
< 0 || buf_len
< period_len
) {
1143 dev_warn(chan
->device
->dev
,
1144 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1145 __func__
, buf_len
, period_len
, rchan
->mid_rid
);
1149 if (rcar_dmac_map_slave_addr(chan
, dir
))
1152 sg_len
= buf_len
/ period_len
;
1153 if (sg_len
> RCAR_DMAC_MAX_SG_LEN
) {
1154 dev_err(chan
->device
->dev
,
1155 "chan%u: sg length %d exceds limit %d",
1156 rchan
->index
, sg_len
, RCAR_DMAC_MAX_SG_LEN
);
1161 * Allocate the sg list dynamically as it would consume too much stack
1164 sgl
= kcalloc(sg_len
, sizeof(*sgl
), GFP_NOWAIT
);
1168 sg_init_table(sgl
, sg_len
);
1170 for (i
= 0; i
< sg_len
; ++i
) {
1171 dma_addr_t src
= buf_addr
+ (period_len
* i
);
1173 sg_set_page(&sgl
[i
], pfn_to_page(PFN_DOWN(src
)), period_len
,
1174 offset_in_page(src
));
1175 sg_dma_address(&sgl
[i
]) = src
;
1176 sg_dma_len(&sgl
[i
]) = period_len
;
1179 desc
= rcar_dmac_chan_prep_sg(rchan
, sgl
, sg_len
, rchan
->map
.addr
,
1186 static int rcar_dmac_device_config(struct dma_chan
*chan
,
1187 struct dma_slave_config
*cfg
)
1189 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1192 * We could lock this, but you shouldn't be configuring the
1193 * channel, while using it...
1195 rchan
->src
.slave_addr
= cfg
->src_addr
;
1196 rchan
->dst
.slave_addr
= cfg
->dst_addr
;
1197 rchan
->src
.xfer_size
= cfg
->src_addr_width
;
1198 rchan
->dst
.xfer_size
= cfg
->dst_addr_width
;
1203 static int rcar_dmac_chan_terminate_all(struct dma_chan
*chan
)
1205 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1206 unsigned long flags
;
1208 spin_lock_irqsave(&rchan
->lock
, flags
);
1209 rcar_dmac_chan_halt(rchan
);
1210 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1213 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1217 rcar_dmac_chan_reinit(rchan
);
1222 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan
*chan
,
1223 dma_cookie_t cookie
)
1225 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1226 struct rcar_dmac_xfer_chunk
*running
= NULL
;
1227 struct rcar_dmac_xfer_chunk
*chunk
;
1228 enum dma_status status
;
1229 unsigned int residue
= 0;
1230 unsigned int dptr
= 0;
1236 * If the cookie corresponds to a descriptor that has been completed
1237 * there is no residue. The same check has already been performed by the
1238 * caller but without holding the channel lock, so the descriptor could
1241 status
= dma_cookie_status(&chan
->chan
, cookie
, NULL
);
1242 if (status
== DMA_COMPLETE
)
1246 * If the cookie doesn't correspond to the currently running transfer
1247 * then the descriptor hasn't been processed yet, and the residue is
1248 * equal to the full descriptor size.
1250 if (cookie
!= desc
->async_tx
.cookie
) {
1251 list_for_each_entry(desc
, &chan
->desc
.pending
, node
) {
1252 if (cookie
== desc
->async_tx
.cookie
)
1255 list_for_each_entry(desc
, &chan
->desc
.active
, node
) {
1256 if (cookie
== desc
->async_tx
.cookie
)
1261 * No descriptor found for the cookie, there's thus no residue.
1262 * This shouldn't happen if the calling driver passes a correct
1265 WARN(1, "No descriptor for cookie!");
1270 * In descriptor mode the descriptor running pointer is not maintained
1271 * by the interrupt handler, find the running descriptor from the
1272 * descriptor pointer field in the CHCRB register. In non-descriptor
1273 * mode just use the running descriptor pointer.
1275 if (desc
->hwdescs
.use
) {
1276 dptr
= (rcar_dmac_chan_read(chan
, RCAR_DMACHCRB
) &
1277 RCAR_DMACHCRB_DPTR_MASK
) >> RCAR_DMACHCRB_DPTR_SHIFT
;
1278 WARN_ON(dptr
>= desc
->nchunks
);
1280 running
= desc
->running
;
1283 /* Compute the size of all chunks still to be transferred. */
1284 list_for_each_entry_reverse(chunk
, &desc
->chunks
, node
) {
1285 if (chunk
== running
|| ++dptr
== desc
->nchunks
)
1288 residue
+= chunk
->size
;
1291 /* Add the residue for the current chunk. */
1292 residue
+= rcar_dmac_chan_read(chan
, RCAR_DMATCR
) << desc
->xfer_shift
;
1297 static enum dma_status
rcar_dmac_tx_status(struct dma_chan
*chan
,
1298 dma_cookie_t cookie
,
1299 struct dma_tx_state
*txstate
)
1301 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1302 enum dma_status status
;
1303 unsigned long flags
;
1304 unsigned int residue
;
1306 status
= dma_cookie_status(chan
, cookie
, txstate
);
1307 if (status
== DMA_COMPLETE
|| !txstate
)
1310 spin_lock_irqsave(&rchan
->lock
, flags
);
1311 residue
= rcar_dmac_chan_get_residue(rchan
, cookie
);
1312 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1314 /* if there's no residue, the cookie is complete */
1316 return DMA_COMPLETE
;
1318 dma_set_residue(txstate
, residue
);
1323 static void rcar_dmac_issue_pending(struct dma_chan
*chan
)
1325 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1326 unsigned long flags
;
1328 spin_lock_irqsave(&rchan
->lock
, flags
);
1330 if (list_empty(&rchan
->desc
.pending
))
1333 /* Append the pending list to the active list. */
1334 list_splice_tail_init(&rchan
->desc
.pending
, &rchan
->desc
.active
);
1337 * If no transfer is running pick the first descriptor from the active
1338 * list and start the transfer.
1340 if (!rchan
->desc
.running
) {
1341 struct rcar_dmac_desc
*desc
;
1343 desc
= list_first_entry(&rchan
->desc
.active
,
1344 struct rcar_dmac_desc
, node
);
1345 rchan
->desc
.running
= desc
;
1347 rcar_dmac_chan_start_xfer(rchan
);
1351 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1354 /* -----------------------------------------------------------------------------
1358 static irqreturn_t
rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan
*chan
)
1360 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1363 if (WARN_ON(!desc
|| !desc
->cyclic
)) {
1365 * This should never happen, there should always be a running
1366 * cyclic descriptor when a descriptor stage end interrupt is
1367 * triggered. Warn and return.
1372 /* Program the interrupt pointer to the next stage. */
1373 stage
= (rcar_dmac_chan_read(chan
, RCAR_DMACHCRB
) &
1374 RCAR_DMACHCRB_DPTR_MASK
) >> RCAR_DMACHCRB_DPTR_SHIFT
;
1375 rcar_dmac_chan_write(chan
, RCAR_DMADPCR
, RCAR_DMADPCR_DIPT(stage
));
1377 return IRQ_WAKE_THREAD
;
1380 static irqreturn_t
rcar_dmac_isr_transfer_end(struct rcar_dmac_chan
*chan
)
1382 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1383 irqreturn_t ret
= IRQ_WAKE_THREAD
;
1385 if (WARN_ON_ONCE(!desc
)) {
1387 * This should never happen, there should always be a running
1388 * descriptor when a transfer end interrupt is triggered. Warn
1395 * The transfer end interrupt isn't generated for each chunk when using
1396 * descriptor mode. Only update the running chunk pointer in
1397 * non-descriptor mode.
1399 if (!desc
->hwdescs
.use
) {
1401 * If we haven't completed the last transfer chunk simply move
1402 * to the next one. Only wake the IRQ thread if the transfer is
1405 if (!list_is_last(&desc
->running
->node
, &desc
->chunks
)) {
1406 desc
->running
= list_next_entry(desc
->running
, node
);
1413 * We've completed the last transfer chunk. If the transfer is
1414 * cyclic, move back to the first one.
1418 list_first_entry(&desc
->chunks
,
1419 struct rcar_dmac_xfer_chunk
,
1425 /* The descriptor is complete, move it to the done list. */
1426 list_move_tail(&desc
->node
, &chan
->desc
.done
);
1428 /* Queue the next descriptor, if any. */
1429 if (!list_empty(&chan
->desc
.active
))
1430 chan
->desc
.running
= list_first_entry(&chan
->desc
.active
,
1431 struct rcar_dmac_desc
,
1434 chan
->desc
.running
= NULL
;
1437 if (chan
->desc
.running
)
1438 rcar_dmac_chan_start_xfer(chan
);
1443 static irqreturn_t
rcar_dmac_isr_channel(int irq
, void *dev
)
1445 u32 mask
= RCAR_DMACHCR_DSE
| RCAR_DMACHCR_TE
;
1446 struct rcar_dmac_chan
*chan
= dev
;
1447 irqreturn_t ret
= IRQ_NONE
;
1450 spin_lock(&chan
->lock
);
1452 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
1453 if (chcr
& RCAR_DMACHCR_TE
)
1454 mask
|= RCAR_DMACHCR_DE
;
1455 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
& ~mask
);
1457 if (chcr
& RCAR_DMACHCR_DSE
)
1458 ret
|= rcar_dmac_isr_desc_stage_end(chan
);
1460 if (chcr
& RCAR_DMACHCR_TE
)
1461 ret
|= rcar_dmac_isr_transfer_end(chan
);
1463 spin_unlock(&chan
->lock
);
1468 static irqreturn_t
rcar_dmac_isr_channel_thread(int irq
, void *dev
)
1470 struct rcar_dmac_chan
*chan
= dev
;
1471 struct rcar_dmac_desc
*desc
;
1472 struct dmaengine_desc_callback cb
;
1474 spin_lock_irq(&chan
->lock
);
1476 /* For cyclic transfers notify the user after every chunk. */
1477 if (chan
->desc
.running
&& chan
->desc
.running
->cyclic
) {
1478 desc
= chan
->desc
.running
;
1479 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
1481 if (dmaengine_desc_callback_valid(&cb
)) {
1482 spin_unlock_irq(&chan
->lock
);
1483 dmaengine_desc_callback_invoke(&cb
, NULL
);
1484 spin_lock_irq(&chan
->lock
);
1489 * Call the callback function for all descriptors on the done list and
1490 * move them to the ack wait list.
1492 while (!list_empty(&chan
->desc
.done
)) {
1493 desc
= list_first_entry(&chan
->desc
.done
, struct rcar_dmac_desc
,
1495 dma_cookie_complete(&desc
->async_tx
);
1496 list_del(&desc
->node
);
1498 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
1499 if (dmaengine_desc_callback_valid(&cb
)) {
1500 spin_unlock_irq(&chan
->lock
);
1502 * We own the only reference to this descriptor, we can
1503 * safely dereference it without holding the channel
1506 dmaengine_desc_callback_invoke(&cb
, NULL
);
1507 spin_lock_irq(&chan
->lock
);
1510 list_add_tail(&desc
->node
, &chan
->desc
.wait
);
1513 spin_unlock_irq(&chan
->lock
);
1515 /* Recycle all acked descriptors. */
1516 rcar_dmac_desc_recycle_acked(chan
);
1521 static irqreturn_t
rcar_dmac_isr_error(int irq
, void *data
)
1523 struct rcar_dmac
*dmac
= data
;
1525 if (!(rcar_dmac_read(dmac
, RCAR_DMAOR
) & RCAR_DMAOR_AE
))
1529 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1530 * abort transfers on all channels, and reinitialize the DMAC.
1532 rcar_dmac_stop(dmac
);
1533 rcar_dmac_abort(dmac
);
1534 rcar_dmac_init(dmac
);
1539 /* -----------------------------------------------------------------------------
1540 * OF xlate and channel filter
1543 static bool rcar_dmac_chan_filter(struct dma_chan
*chan
, void *arg
)
1545 struct rcar_dmac
*dmac
= to_rcar_dmac(chan
->device
);
1546 struct of_phandle_args
*dma_spec
= arg
;
1549 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1550 * function knows from which device it wants to allocate a channel from,
1551 * and would be perfectly capable of selecting the channel it wants.
1552 * Forcing it to call dma_request_channel() and iterate through all
1553 * channels from all controllers is just pointless.
1555 if (chan
->device
->device_config
!= rcar_dmac_device_config
||
1556 dma_spec
->np
!= chan
->device
->dev
->of_node
)
1559 return !test_and_set_bit(dma_spec
->args
[0], dmac
->modules
);
1562 static struct dma_chan
*rcar_dmac_of_xlate(struct of_phandle_args
*dma_spec
,
1563 struct of_dma
*ofdma
)
1565 struct rcar_dmac_chan
*rchan
;
1566 struct dma_chan
*chan
;
1567 dma_cap_mask_t mask
;
1569 if (dma_spec
->args_count
!= 1)
1572 /* Only slave DMA channels can be allocated via DT */
1574 dma_cap_set(DMA_SLAVE
, mask
);
1576 chan
= dma_request_channel(mask
, rcar_dmac_chan_filter
, dma_spec
);
1580 rchan
= to_rcar_dmac_chan(chan
);
1581 rchan
->mid_rid
= dma_spec
->args
[0];
1586 /* -----------------------------------------------------------------------------
1590 #ifdef CONFIG_PM_SLEEP
1591 static int rcar_dmac_sleep_suspend(struct device
*dev
)
1594 * TODO: Wait for the current transfer to complete and stop the device.
1599 static int rcar_dmac_sleep_resume(struct device
*dev
)
1601 /* TODO: Resume transfers, if any. */
1607 static int rcar_dmac_runtime_suspend(struct device
*dev
)
1612 static int rcar_dmac_runtime_resume(struct device
*dev
)
1614 struct rcar_dmac
*dmac
= dev_get_drvdata(dev
);
1616 return rcar_dmac_init(dmac
);
1620 static const struct dev_pm_ops rcar_dmac_pm
= {
1621 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend
, rcar_dmac_sleep_resume
)
1622 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend
, rcar_dmac_runtime_resume
,
1626 /* -----------------------------------------------------------------------------
1630 static int rcar_dmac_chan_probe(struct rcar_dmac
*dmac
,
1631 struct rcar_dmac_chan
*rchan
,
1634 struct platform_device
*pdev
= to_platform_device(dmac
->dev
);
1635 struct dma_chan
*chan
= &rchan
->chan
;
1636 char pdev_irqname
[5];
1641 rchan
->index
= index
;
1642 rchan
->iomem
= dmac
->iomem
+ RCAR_DMAC_CHAN_OFFSET(index
);
1643 rchan
->mid_rid
= -EINVAL
;
1645 spin_lock_init(&rchan
->lock
);
1647 INIT_LIST_HEAD(&rchan
->desc
.free
);
1648 INIT_LIST_HEAD(&rchan
->desc
.pending
);
1649 INIT_LIST_HEAD(&rchan
->desc
.active
);
1650 INIT_LIST_HEAD(&rchan
->desc
.done
);
1651 INIT_LIST_HEAD(&rchan
->desc
.wait
);
1653 /* Request the channel interrupt. */
1654 sprintf(pdev_irqname
, "ch%u", index
);
1655 irq
= platform_get_irq_byname(pdev
, pdev_irqname
);
1657 dev_err(dmac
->dev
, "no IRQ specified for channel %u\n", index
);
1661 irqname
= devm_kasprintf(dmac
->dev
, GFP_KERNEL
, "%s:%u",
1662 dev_name(dmac
->dev
), index
);
1666 ret
= devm_request_threaded_irq(dmac
->dev
, irq
, rcar_dmac_isr_channel
,
1667 rcar_dmac_isr_channel_thread
, 0,
1670 dev_err(dmac
->dev
, "failed to request IRQ %u (%d)\n", irq
, ret
);
1675 * Initialize the DMA engine channel and add it to the DMA engine
1678 chan
->device
= &dmac
->engine
;
1679 dma_cookie_init(chan
);
1681 list_add_tail(&chan
->device_node
, &dmac
->engine
.channels
);
1686 static int rcar_dmac_parse_of(struct device
*dev
, struct rcar_dmac
*dmac
)
1688 struct device_node
*np
= dev
->of_node
;
1691 ret
= of_property_read_u32(np
, "dma-channels", &dmac
->n_channels
);
1693 dev_err(dev
, "unable to read dma-channels property\n");
1697 if (dmac
->n_channels
<= 0 || dmac
->n_channels
>= 100) {
1698 dev_err(dev
, "invalid number of channels %u\n",
1706 static int rcar_dmac_probe(struct platform_device
*pdev
)
1708 const enum dma_slave_buswidth widths
= DMA_SLAVE_BUSWIDTH_1_BYTE
|
1709 DMA_SLAVE_BUSWIDTH_2_BYTES
| DMA_SLAVE_BUSWIDTH_4_BYTES
|
1710 DMA_SLAVE_BUSWIDTH_8_BYTES
| DMA_SLAVE_BUSWIDTH_16_BYTES
|
1711 DMA_SLAVE_BUSWIDTH_32_BYTES
| DMA_SLAVE_BUSWIDTH_64_BYTES
;
1712 unsigned int channels_offset
= 0;
1713 struct dma_device
*engine
;
1714 struct rcar_dmac
*dmac
;
1715 struct resource
*mem
;
1721 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
1725 dmac
->dev
= &pdev
->dev
;
1726 platform_set_drvdata(pdev
, dmac
);
1728 ret
= rcar_dmac_parse_of(&pdev
->dev
, dmac
);
1733 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1734 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1735 * is connected to microTLB 0 on currently supported platforms, so we
1736 * can't use it with the IPMMU. As the IOMMU API operates at the device
1737 * level we can't disable it selectively, so ignore channel 0 for now if
1738 * the device is part of an IOMMU group.
1740 if (pdev
->dev
.iommu_group
) {
1742 channels_offset
= 1;
1745 dmac
->channels
= devm_kcalloc(&pdev
->dev
, dmac
->n_channels
,
1746 sizeof(*dmac
->channels
), GFP_KERNEL
);
1747 if (!dmac
->channels
)
1750 /* Request resources. */
1751 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1752 dmac
->iomem
= devm_ioremap_resource(&pdev
->dev
, mem
);
1753 if (IS_ERR(dmac
->iomem
))
1754 return PTR_ERR(dmac
->iomem
);
1756 irq
= platform_get_irq_byname(pdev
, "error");
1758 dev_err(&pdev
->dev
, "no error IRQ specified\n");
1762 irqname
= devm_kasprintf(dmac
->dev
, GFP_KERNEL
, "%s:error",
1763 dev_name(dmac
->dev
));
1767 ret
= devm_request_irq(&pdev
->dev
, irq
, rcar_dmac_isr_error
, 0,
1770 dev_err(&pdev
->dev
, "failed to request IRQ %u (%d)\n",
1775 /* Enable runtime PM and initialize the device. */
1776 pm_runtime_enable(&pdev
->dev
);
1777 ret
= pm_runtime_get_sync(&pdev
->dev
);
1779 dev_err(&pdev
->dev
, "runtime PM get sync failed (%d)\n", ret
);
1783 ret
= rcar_dmac_init(dmac
);
1784 pm_runtime_put(&pdev
->dev
);
1787 dev_err(&pdev
->dev
, "failed to reset device\n");
1791 /* Initialize the channels. */
1792 INIT_LIST_HEAD(&dmac
->engine
.channels
);
1794 for (i
= 0; i
< dmac
->n_channels
; ++i
) {
1795 ret
= rcar_dmac_chan_probe(dmac
, &dmac
->channels
[i
],
1796 i
+ channels_offset
);
1801 /* Register the DMAC as a DMA provider for DT. */
1802 ret
= of_dma_controller_register(pdev
->dev
.of_node
, rcar_dmac_of_xlate
,
1808 * Register the DMA engine device.
1810 * Default transfer size of 32 bytes requires 32-byte alignment.
1812 engine
= &dmac
->engine
;
1813 dma_cap_set(DMA_MEMCPY
, engine
->cap_mask
);
1814 dma_cap_set(DMA_SLAVE
, engine
->cap_mask
);
1816 engine
->dev
= &pdev
->dev
;
1817 engine
->copy_align
= ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE
);
1819 engine
->src_addr_widths
= widths
;
1820 engine
->dst_addr_widths
= widths
;
1821 engine
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1822 engine
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1824 engine
->device_alloc_chan_resources
= rcar_dmac_alloc_chan_resources
;
1825 engine
->device_free_chan_resources
= rcar_dmac_free_chan_resources
;
1826 engine
->device_prep_dma_memcpy
= rcar_dmac_prep_dma_memcpy
;
1827 engine
->device_prep_slave_sg
= rcar_dmac_prep_slave_sg
;
1828 engine
->device_prep_dma_cyclic
= rcar_dmac_prep_dma_cyclic
;
1829 engine
->device_config
= rcar_dmac_device_config
;
1830 engine
->device_terminate_all
= rcar_dmac_chan_terminate_all
;
1831 engine
->device_tx_status
= rcar_dmac_tx_status
;
1832 engine
->device_issue_pending
= rcar_dmac_issue_pending
;
1834 ret
= dma_async_device_register(engine
);
1841 of_dma_controller_free(pdev
->dev
.of_node
);
1842 pm_runtime_disable(&pdev
->dev
);
1846 static int rcar_dmac_remove(struct platform_device
*pdev
)
1848 struct rcar_dmac
*dmac
= platform_get_drvdata(pdev
);
1850 of_dma_controller_free(pdev
->dev
.of_node
);
1851 dma_async_device_unregister(&dmac
->engine
);
1853 pm_runtime_disable(&pdev
->dev
);
1858 static void rcar_dmac_shutdown(struct platform_device
*pdev
)
1860 struct rcar_dmac
*dmac
= platform_get_drvdata(pdev
);
1862 rcar_dmac_stop(dmac
);
1865 static const struct of_device_id rcar_dmac_of_ids
[] = {
1866 { .compatible
= "renesas,rcar-dmac", },
1869 MODULE_DEVICE_TABLE(of
, rcar_dmac_of_ids
);
1871 static struct platform_driver rcar_dmac_driver
= {
1873 .pm
= &rcar_dmac_pm
,
1874 .name
= "rcar-dmac",
1875 .of_match_table
= rcar_dmac_of_ids
,
1877 .probe
= rcar_dmac_probe
,
1878 .remove
= rcar_dmac_remove
,
1879 .shutdown
= rcar_dmac_shutdown
,
1882 module_platform_driver(rcar_dmac_driver
);
1884 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1885 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1886 MODULE_LICENSE("GPL v2");