2 * Renesas R-Car Gen2 DMA Controller Driver
4 * Copyright (C) 2014 Renesas Electronics Inc.
6 * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of version 2 of the GNU General Public License as
10 * published by the Free Software Foundation.
13 #include <linux/dma-mapping.h>
14 #include <linux/dmaengine.h>
15 #include <linux/interrupt.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
20 #include <linux/of_dma.h>
21 #include <linux/of_platform.h>
22 #include <linux/platform_device.h>
23 #include <linux/pm_runtime.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
27 #include "../dmaengine.h"
30 * struct rcar_dmac_xfer_chunk - Descriptor for a hardware transfer
31 * @node: entry in the parent's chunks list
32 * @src_addr: device source address
33 * @dst_addr: device destination address
34 * @size: transfer size in bytes
36 struct rcar_dmac_xfer_chunk
{
37 struct list_head node
;
45 * struct rcar_dmac_hw_desc - Hardware descriptor for a transfer chunk
46 * @sar: value of the SAR register (source address)
47 * @dar: value of the DAR register (destination address)
48 * @tcr: value of the TCR register (transfer count)
50 struct rcar_dmac_hw_desc
{
55 } __attribute__((__packed__
));
58 * struct rcar_dmac_desc - R-Car Gen2 DMA Transfer Descriptor
59 * @async_tx: base DMA asynchronous transaction descriptor
60 * @direction: direction of the DMA transfer
61 * @xfer_shift: log2 of the transfer size
62 * @chcr: value of the channel configuration register for this transfer
63 * @node: entry in the channel's descriptors lists
64 * @chunks: list of transfer chunks for this transfer
65 * @running: the transfer chunk being currently processed
66 * @nchunks: number of transfer chunks for this transfer
67 * @hwdescs.use: whether the transfer descriptor uses hardware descriptors
68 * @hwdescs.mem: hardware descriptors memory for the transfer
69 * @hwdescs.dma: device address of the hardware descriptors memory
70 * @hwdescs.size: size of the hardware descriptors in bytes
71 * @size: transfer size in bytes
72 * @cyclic: when set indicates that the DMA transfer is cyclic
74 struct rcar_dmac_desc
{
75 struct dma_async_tx_descriptor async_tx
;
76 enum dma_transfer_direction direction
;
77 unsigned int xfer_shift
;
80 struct list_head node
;
81 struct list_head chunks
;
82 struct rcar_dmac_xfer_chunk
*running
;
87 struct rcar_dmac_hw_desc
*mem
;
96 #define to_rcar_dmac_desc(d) container_of(d, struct rcar_dmac_desc, async_tx)
99 * struct rcar_dmac_desc_page - One page worth of descriptors
100 * @node: entry in the channel's pages list
101 * @descs: array of DMA descriptors
102 * @chunks: array of transfer chunk descriptors
104 struct rcar_dmac_desc_page
{
105 struct list_head node
;
108 struct rcar_dmac_desc descs
[0];
109 struct rcar_dmac_xfer_chunk chunks
[0];
113 #define RCAR_DMAC_DESCS_PER_PAGE \
114 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, descs)) / \
115 sizeof(struct rcar_dmac_desc))
116 #define RCAR_DMAC_XFER_CHUNKS_PER_PAGE \
117 ((PAGE_SIZE - offsetof(struct rcar_dmac_desc_page, chunks)) / \
118 sizeof(struct rcar_dmac_xfer_chunk))
121 * struct rcar_dmac_chan_slave - Slave configuration
122 * @slave_addr: slave memory address
123 * @xfer_size: size (in bytes) of hardware transfers
125 struct rcar_dmac_chan_slave
{
126 phys_addr_t slave_addr
;
127 unsigned int xfer_size
;
131 * struct rcar_dmac_chan_map - Map of slave device phys to dma address
132 * @addr: slave dma address
133 * @dir: direction of mapping
134 * @slave: slave configuration that is mapped
136 struct rcar_dmac_chan_map
{
138 enum dma_data_direction dir
;
139 struct rcar_dmac_chan_slave slave
;
143 * struct rcar_dmac_chan - R-Car Gen2 DMA Controller Channel
144 * @chan: base DMA channel object
145 * @iomem: channel I/O memory base
146 * @index: index of this channel in the controller
148 * @src: slave memory address and size on the source side
149 * @dst: slave memory address and size on the destination side
150 * @mid_rid: hardware MID/RID for the DMA client using this channel
151 * @lock: protects the channel CHCR register and the desc members
152 * @desc.free: list of free descriptors
153 * @desc.pending: list of pending descriptors (submitted with tx_submit)
154 * @desc.active: list of active descriptors (activated with issue_pending)
155 * @desc.done: list of completed descriptors
156 * @desc.wait: list of descriptors waiting for an ack
157 * @desc.running: the descriptor being processed (a member of the active list)
158 * @desc.chunks_free: list of free transfer chunk descriptors
159 * @desc.pages: list of pages used by allocated descriptors
161 struct rcar_dmac_chan
{
162 struct dma_chan chan
;
167 struct rcar_dmac_chan_slave src
;
168 struct rcar_dmac_chan_slave dst
;
169 struct rcar_dmac_chan_map map
;
175 struct list_head free
;
176 struct list_head pending
;
177 struct list_head active
;
178 struct list_head done
;
179 struct list_head wait
;
180 struct rcar_dmac_desc
*running
;
182 struct list_head chunks_free
;
184 struct list_head pages
;
188 #define to_rcar_dmac_chan(c) container_of(c, struct rcar_dmac_chan, chan)
191 * struct rcar_dmac - R-Car Gen2 DMA Controller
192 * @engine: base DMA engine object
193 * @dev: the hardware device
194 * @iomem: remapped I/O memory base
195 * @n_channels: number of available channels
196 * @channels: array of DMAC channels
197 * @modules: bitmask of client modules in use
200 struct dma_device engine
;
204 unsigned int n_channels
;
205 struct rcar_dmac_chan
*channels
;
207 DECLARE_BITMAP(modules
, 256);
210 #define to_rcar_dmac(d) container_of(d, struct rcar_dmac, engine)
212 /* -----------------------------------------------------------------------------
216 #define RCAR_DMAC_CHAN_OFFSET(i) (0x8000 + 0x80 * (i))
218 #define RCAR_DMAISTA 0x0020
219 #define RCAR_DMASEC 0x0030
220 #define RCAR_DMAOR 0x0060
221 #define RCAR_DMAOR_PRI_FIXED (0 << 8)
222 #define RCAR_DMAOR_PRI_ROUND_ROBIN (3 << 8)
223 #define RCAR_DMAOR_AE (1 << 2)
224 #define RCAR_DMAOR_DME (1 << 0)
225 #define RCAR_DMACHCLR 0x0080
226 #define RCAR_DMADPSEC 0x00a0
228 #define RCAR_DMASAR 0x0000
229 #define RCAR_DMADAR 0x0004
230 #define RCAR_DMATCR 0x0008
231 #define RCAR_DMATCR_MASK 0x00ffffff
232 #define RCAR_DMATSR 0x0028
233 #define RCAR_DMACHCR 0x000c
234 #define RCAR_DMACHCR_CAE (1 << 31)
235 #define RCAR_DMACHCR_CAIE (1 << 30)
236 #define RCAR_DMACHCR_DPM_DISABLED (0 << 28)
237 #define RCAR_DMACHCR_DPM_ENABLED (1 << 28)
238 #define RCAR_DMACHCR_DPM_REPEAT (2 << 28)
239 #define RCAR_DMACHCR_DPM_INFINITE (3 << 28)
240 #define RCAR_DMACHCR_RPT_SAR (1 << 27)
241 #define RCAR_DMACHCR_RPT_DAR (1 << 26)
242 #define RCAR_DMACHCR_RPT_TCR (1 << 25)
243 #define RCAR_DMACHCR_DPB (1 << 22)
244 #define RCAR_DMACHCR_DSE (1 << 19)
245 #define RCAR_DMACHCR_DSIE (1 << 18)
246 #define RCAR_DMACHCR_TS_1B ((0 << 20) | (0 << 3))
247 #define RCAR_DMACHCR_TS_2B ((0 << 20) | (1 << 3))
248 #define RCAR_DMACHCR_TS_4B ((0 << 20) | (2 << 3))
249 #define RCAR_DMACHCR_TS_16B ((0 << 20) | (3 << 3))
250 #define RCAR_DMACHCR_TS_32B ((1 << 20) | (0 << 3))
251 #define RCAR_DMACHCR_TS_64B ((1 << 20) | (1 << 3))
252 #define RCAR_DMACHCR_TS_8B ((1 << 20) | (3 << 3))
253 #define RCAR_DMACHCR_DM_FIXED (0 << 14)
254 #define RCAR_DMACHCR_DM_INC (1 << 14)
255 #define RCAR_DMACHCR_DM_DEC (2 << 14)
256 #define RCAR_DMACHCR_SM_FIXED (0 << 12)
257 #define RCAR_DMACHCR_SM_INC (1 << 12)
258 #define RCAR_DMACHCR_SM_DEC (2 << 12)
259 #define RCAR_DMACHCR_RS_AUTO (4 << 8)
260 #define RCAR_DMACHCR_RS_DMARS (8 << 8)
261 #define RCAR_DMACHCR_IE (1 << 2)
262 #define RCAR_DMACHCR_TE (1 << 1)
263 #define RCAR_DMACHCR_DE (1 << 0)
264 #define RCAR_DMATCRB 0x0018
265 #define RCAR_DMATSRB 0x0038
266 #define RCAR_DMACHCRB 0x001c
267 #define RCAR_DMACHCRB_DCNT(n) ((n) << 24)
268 #define RCAR_DMACHCRB_DPTR_MASK (0xff << 16)
269 #define RCAR_DMACHCRB_DPTR_SHIFT 16
270 #define RCAR_DMACHCRB_DRST (1 << 15)
271 #define RCAR_DMACHCRB_DTS (1 << 8)
272 #define RCAR_DMACHCRB_SLM_NORMAL (0 << 4)
273 #define RCAR_DMACHCRB_SLM_CLK(n) ((8 | (n)) << 4)
274 #define RCAR_DMACHCRB_PRI(n) ((n) << 0)
275 #define RCAR_DMARS 0x0040
276 #define RCAR_DMABUFCR 0x0048
277 #define RCAR_DMABUFCR_MBU(n) ((n) << 16)
278 #define RCAR_DMABUFCR_ULB(n) ((n) << 0)
279 #define RCAR_DMADPBASE 0x0050
280 #define RCAR_DMADPBASE_MASK 0xfffffff0
281 #define RCAR_DMADPBASE_SEL (1 << 0)
282 #define RCAR_DMADPCR 0x0054
283 #define RCAR_DMADPCR_DIPT(n) ((n) << 24)
284 #define RCAR_DMAFIXSAR 0x0010
285 #define RCAR_DMAFIXDAR 0x0014
286 #define RCAR_DMAFIXDPBASE 0x0060
288 /* Hardcode the MEMCPY transfer size to 4 bytes. */
289 #define RCAR_DMAC_MEMCPY_XFER_SIZE 4
291 /* -----------------------------------------------------------------------------
295 static void rcar_dmac_write(struct rcar_dmac
*dmac
, u32 reg
, u32 data
)
297 if (reg
== RCAR_DMAOR
)
298 writew(data
, dmac
->iomem
+ reg
);
300 writel(data
, dmac
->iomem
+ reg
);
303 static u32
rcar_dmac_read(struct rcar_dmac
*dmac
, u32 reg
)
305 if (reg
== RCAR_DMAOR
)
306 return readw(dmac
->iomem
+ reg
);
308 return readl(dmac
->iomem
+ reg
);
311 static u32
rcar_dmac_chan_read(struct rcar_dmac_chan
*chan
, u32 reg
)
313 if (reg
== RCAR_DMARS
)
314 return readw(chan
->iomem
+ reg
);
316 return readl(chan
->iomem
+ reg
);
319 static void rcar_dmac_chan_write(struct rcar_dmac_chan
*chan
, u32 reg
, u32 data
)
321 if (reg
== RCAR_DMARS
)
322 writew(data
, chan
->iomem
+ reg
);
324 writel(data
, chan
->iomem
+ reg
);
327 /* -----------------------------------------------------------------------------
328 * Initialization and configuration
331 static bool rcar_dmac_chan_is_busy(struct rcar_dmac_chan
*chan
)
333 u32 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
335 return !!(chcr
& (RCAR_DMACHCR_DE
| RCAR_DMACHCR_TE
));
338 static void rcar_dmac_chan_start_xfer(struct rcar_dmac_chan
*chan
)
340 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
341 u32 chcr
= desc
->chcr
;
343 WARN_ON_ONCE(rcar_dmac_chan_is_busy(chan
));
345 if (chan
->mid_rid
>= 0)
346 rcar_dmac_chan_write(chan
, RCAR_DMARS
, chan
->mid_rid
);
348 if (desc
->hwdescs
.use
) {
349 struct rcar_dmac_xfer_chunk
*chunk
=
350 list_first_entry(&desc
->chunks
,
351 struct rcar_dmac_xfer_chunk
, node
);
353 dev_dbg(chan
->chan
.device
->dev
,
354 "chan%u: queue desc %p: %u@%pad\n",
355 chan
->index
, desc
, desc
->nchunks
, &desc
->hwdescs
.dma
);
357 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
358 rcar_dmac_chan_write(chan
, RCAR_DMAFIXSAR
,
359 chunk
->src_addr
>> 32);
360 rcar_dmac_chan_write(chan
, RCAR_DMAFIXDAR
,
361 chunk
->dst_addr
>> 32);
362 rcar_dmac_chan_write(chan
, RCAR_DMAFIXDPBASE
,
363 desc
->hwdescs
.dma
>> 32);
365 rcar_dmac_chan_write(chan
, RCAR_DMADPBASE
,
366 (desc
->hwdescs
.dma
& 0xfffffff0) |
368 rcar_dmac_chan_write(chan
, RCAR_DMACHCRB
,
369 RCAR_DMACHCRB_DCNT(desc
->nchunks
- 1) |
373 * Errata: When descriptor memory is accessed through an IOMMU
374 * the DMADAR register isn't initialized automatically from the
375 * first descriptor at beginning of transfer by the DMAC like it
376 * should. Initialize it manually with the destination address
377 * of the first chunk.
379 rcar_dmac_chan_write(chan
, RCAR_DMADAR
,
380 chunk
->dst_addr
& 0xffffffff);
383 * Program the descriptor stage interrupt to occur after the end
384 * of the first stage.
386 rcar_dmac_chan_write(chan
, RCAR_DMADPCR
, RCAR_DMADPCR_DIPT(1));
388 chcr
|= RCAR_DMACHCR_RPT_SAR
| RCAR_DMACHCR_RPT_DAR
389 | RCAR_DMACHCR_RPT_TCR
| RCAR_DMACHCR_DPB
;
392 * If the descriptor isn't cyclic enable normal descriptor mode
393 * and the transfer completion interrupt.
396 chcr
|= RCAR_DMACHCR_DPM_ENABLED
| RCAR_DMACHCR_IE
;
398 * If the descriptor is cyclic and has a callback enable the
399 * descriptor stage interrupt in infinite repeat mode.
401 else if (desc
->async_tx
.callback
)
402 chcr
|= RCAR_DMACHCR_DPM_INFINITE
| RCAR_DMACHCR_DSIE
;
404 * Otherwise just select infinite repeat mode without any
408 chcr
|= RCAR_DMACHCR_DPM_INFINITE
;
410 struct rcar_dmac_xfer_chunk
*chunk
= desc
->running
;
412 dev_dbg(chan
->chan
.device
->dev
,
413 "chan%u: queue chunk %p: %u@%pad -> %pad\n",
414 chan
->index
, chunk
, chunk
->size
, &chunk
->src_addr
,
417 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
418 rcar_dmac_chan_write(chan
, RCAR_DMAFIXSAR
,
419 chunk
->src_addr
>> 32);
420 rcar_dmac_chan_write(chan
, RCAR_DMAFIXDAR
,
421 chunk
->dst_addr
>> 32);
423 rcar_dmac_chan_write(chan
, RCAR_DMASAR
,
424 chunk
->src_addr
& 0xffffffff);
425 rcar_dmac_chan_write(chan
, RCAR_DMADAR
,
426 chunk
->dst_addr
& 0xffffffff);
427 rcar_dmac_chan_write(chan
, RCAR_DMATCR
,
428 chunk
->size
>> desc
->xfer_shift
);
430 chcr
|= RCAR_DMACHCR_DPM_DISABLED
| RCAR_DMACHCR_IE
;
433 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
| RCAR_DMACHCR_DE
);
436 static int rcar_dmac_init(struct rcar_dmac
*dmac
)
440 /* Clear all channels and enable the DMAC globally. */
441 rcar_dmac_write(dmac
, RCAR_DMACHCLR
, GENMASK(dmac
->n_channels
- 1, 0));
442 rcar_dmac_write(dmac
, RCAR_DMAOR
,
443 RCAR_DMAOR_PRI_FIXED
| RCAR_DMAOR_DME
);
445 dmaor
= rcar_dmac_read(dmac
, RCAR_DMAOR
);
446 if ((dmaor
& (RCAR_DMAOR_AE
| RCAR_DMAOR_DME
)) != RCAR_DMAOR_DME
) {
447 dev_warn(dmac
->dev
, "DMAOR initialization failed.\n");
454 /* -----------------------------------------------------------------------------
455 * Descriptors submission
458 static dma_cookie_t
rcar_dmac_tx_submit(struct dma_async_tx_descriptor
*tx
)
460 struct rcar_dmac_chan
*chan
= to_rcar_dmac_chan(tx
->chan
);
461 struct rcar_dmac_desc
*desc
= to_rcar_dmac_desc(tx
);
465 spin_lock_irqsave(&chan
->lock
, flags
);
467 cookie
= dma_cookie_assign(tx
);
469 dev_dbg(chan
->chan
.device
->dev
, "chan%u: submit #%d@%p\n",
470 chan
->index
, tx
->cookie
, desc
);
472 list_add_tail(&desc
->node
, &chan
->desc
.pending
);
473 desc
->running
= list_first_entry(&desc
->chunks
,
474 struct rcar_dmac_xfer_chunk
, node
);
476 spin_unlock_irqrestore(&chan
->lock
, flags
);
481 /* -----------------------------------------------------------------------------
482 * Descriptors allocation and free
486 * rcar_dmac_desc_alloc - Allocate a page worth of DMA descriptors
487 * @chan: the DMA channel
488 * @gfp: allocation flags
490 static int rcar_dmac_desc_alloc(struct rcar_dmac_chan
*chan
, gfp_t gfp
)
492 struct rcar_dmac_desc_page
*page
;
497 page
= (void *)get_zeroed_page(gfp
);
501 for (i
= 0; i
< RCAR_DMAC_DESCS_PER_PAGE
; ++i
) {
502 struct rcar_dmac_desc
*desc
= &page
->descs
[i
];
504 dma_async_tx_descriptor_init(&desc
->async_tx
, &chan
->chan
);
505 desc
->async_tx
.tx_submit
= rcar_dmac_tx_submit
;
506 INIT_LIST_HEAD(&desc
->chunks
);
508 list_add_tail(&desc
->node
, &list
);
511 spin_lock_irqsave(&chan
->lock
, flags
);
512 list_splice_tail(&list
, &chan
->desc
.free
);
513 list_add_tail(&page
->node
, &chan
->desc
.pages
);
514 spin_unlock_irqrestore(&chan
->lock
, flags
);
520 * rcar_dmac_desc_put - Release a DMA transfer descriptor
521 * @chan: the DMA channel
522 * @desc: the descriptor
524 * Put the descriptor and its transfer chunk descriptors back in the channel's
525 * free descriptors lists. The descriptor's chunks list will be reinitialized to
526 * an empty list as a result.
528 * The descriptor must have been removed from the channel's lists before calling
531 static void rcar_dmac_desc_put(struct rcar_dmac_chan
*chan
,
532 struct rcar_dmac_desc
*desc
)
536 spin_lock_irqsave(&chan
->lock
, flags
);
537 list_splice_tail_init(&desc
->chunks
, &chan
->desc
.chunks_free
);
538 list_add(&desc
->node
, &chan
->desc
.free
);
539 spin_unlock_irqrestore(&chan
->lock
, flags
);
542 static void rcar_dmac_desc_recycle_acked(struct rcar_dmac_chan
*chan
)
544 struct rcar_dmac_desc
*desc
, *_desc
;
549 * We have to temporarily move all descriptors from the wait list to a
550 * local list as iterating over the wait list, even with
551 * list_for_each_entry_safe, isn't safe if we release the channel lock
552 * around the rcar_dmac_desc_put() call.
554 spin_lock_irqsave(&chan
->lock
, flags
);
555 list_splice_init(&chan
->desc
.wait
, &list
);
556 spin_unlock_irqrestore(&chan
->lock
, flags
);
558 list_for_each_entry_safe(desc
, _desc
, &list
, node
) {
559 if (async_tx_test_ack(&desc
->async_tx
)) {
560 list_del(&desc
->node
);
561 rcar_dmac_desc_put(chan
, desc
);
565 if (list_empty(&list
))
568 /* Put the remaining descriptors back in the wait list. */
569 spin_lock_irqsave(&chan
->lock
, flags
);
570 list_splice(&list
, &chan
->desc
.wait
);
571 spin_unlock_irqrestore(&chan
->lock
, flags
);
575 * rcar_dmac_desc_get - Allocate a descriptor for a DMA transfer
576 * @chan: the DMA channel
578 * Locking: This function must be called in a non-atomic context.
580 * Return: A pointer to the allocated descriptor or NULL if no descriptor can
583 static struct rcar_dmac_desc
*rcar_dmac_desc_get(struct rcar_dmac_chan
*chan
)
585 struct rcar_dmac_desc
*desc
;
589 /* Recycle acked descriptors before attempting allocation. */
590 rcar_dmac_desc_recycle_acked(chan
);
592 spin_lock_irqsave(&chan
->lock
, flags
);
594 while (list_empty(&chan
->desc
.free
)) {
596 * No free descriptors, allocate a page worth of them and try
597 * again, as someone else could race us to get the newly
598 * allocated descriptors. If the allocation fails return an
601 spin_unlock_irqrestore(&chan
->lock
, flags
);
602 ret
= rcar_dmac_desc_alloc(chan
, GFP_NOWAIT
);
605 spin_lock_irqsave(&chan
->lock
, flags
);
608 desc
= list_first_entry(&chan
->desc
.free
, struct rcar_dmac_desc
, node
);
609 list_del(&desc
->node
);
611 spin_unlock_irqrestore(&chan
->lock
, flags
);
617 * rcar_dmac_xfer_chunk_alloc - Allocate a page worth of transfer chunks
618 * @chan: the DMA channel
619 * @gfp: allocation flags
621 static int rcar_dmac_xfer_chunk_alloc(struct rcar_dmac_chan
*chan
, gfp_t gfp
)
623 struct rcar_dmac_desc_page
*page
;
628 page
= (void *)get_zeroed_page(gfp
);
632 for (i
= 0; i
< RCAR_DMAC_XFER_CHUNKS_PER_PAGE
; ++i
) {
633 struct rcar_dmac_xfer_chunk
*chunk
= &page
->chunks
[i
];
635 list_add_tail(&chunk
->node
, &list
);
638 spin_lock_irqsave(&chan
->lock
, flags
);
639 list_splice_tail(&list
, &chan
->desc
.chunks_free
);
640 list_add_tail(&page
->node
, &chan
->desc
.pages
);
641 spin_unlock_irqrestore(&chan
->lock
, flags
);
647 * rcar_dmac_xfer_chunk_get - Allocate a transfer chunk for a DMA transfer
648 * @chan: the DMA channel
650 * Locking: This function must be called in a non-atomic context.
652 * Return: A pointer to the allocated transfer chunk descriptor or NULL if no
653 * descriptor can be allocated.
655 static struct rcar_dmac_xfer_chunk
*
656 rcar_dmac_xfer_chunk_get(struct rcar_dmac_chan
*chan
)
658 struct rcar_dmac_xfer_chunk
*chunk
;
662 spin_lock_irqsave(&chan
->lock
, flags
);
664 while (list_empty(&chan
->desc
.chunks_free
)) {
666 * No free descriptors, allocate a page worth of them and try
667 * again, as someone else could race us to get the newly
668 * allocated descriptors. If the allocation fails return an
671 spin_unlock_irqrestore(&chan
->lock
, flags
);
672 ret
= rcar_dmac_xfer_chunk_alloc(chan
, GFP_NOWAIT
);
675 spin_lock_irqsave(&chan
->lock
, flags
);
678 chunk
= list_first_entry(&chan
->desc
.chunks_free
,
679 struct rcar_dmac_xfer_chunk
, node
);
680 list_del(&chunk
->node
);
682 spin_unlock_irqrestore(&chan
->lock
, flags
);
687 static void rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan
*chan
,
688 struct rcar_dmac_desc
*desc
, size_t size
)
691 * dma_alloc_coherent() allocates memory in page size increments. To
692 * avoid reallocating the hardware descriptors when the allocated size
693 * wouldn't change align the requested size to a multiple of the page
696 size
= PAGE_ALIGN(size
);
698 if (desc
->hwdescs
.size
== size
)
701 if (desc
->hwdescs
.mem
) {
702 dma_free_coherent(chan
->chan
.device
->dev
, desc
->hwdescs
.size
,
703 desc
->hwdescs
.mem
, desc
->hwdescs
.dma
);
704 desc
->hwdescs
.mem
= NULL
;
705 desc
->hwdescs
.size
= 0;
711 desc
->hwdescs
.mem
= dma_alloc_coherent(chan
->chan
.device
->dev
, size
,
712 &desc
->hwdescs
.dma
, GFP_NOWAIT
);
713 if (!desc
->hwdescs
.mem
)
716 desc
->hwdescs
.size
= size
;
719 static int rcar_dmac_fill_hwdesc(struct rcar_dmac_chan
*chan
,
720 struct rcar_dmac_desc
*desc
)
722 struct rcar_dmac_xfer_chunk
*chunk
;
723 struct rcar_dmac_hw_desc
*hwdesc
;
725 rcar_dmac_realloc_hwdesc(chan
, desc
, desc
->nchunks
* sizeof(*hwdesc
));
727 hwdesc
= desc
->hwdescs
.mem
;
731 list_for_each_entry(chunk
, &desc
->chunks
, node
) {
732 hwdesc
->sar
= chunk
->src_addr
;
733 hwdesc
->dar
= chunk
->dst_addr
;
734 hwdesc
->tcr
= chunk
->size
>> desc
->xfer_shift
;
741 /* -----------------------------------------------------------------------------
745 static void rcar_dmac_chan_halt(struct rcar_dmac_chan
*chan
)
747 u32 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
749 chcr
&= ~(RCAR_DMACHCR_DSE
| RCAR_DMACHCR_DSIE
| RCAR_DMACHCR_IE
|
750 RCAR_DMACHCR_TE
| RCAR_DMACHCR_DE
);
751 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
);
754 static void rcar_dmac_chan_reinit(struct rcar_dmac_chan
*chan
)
756 struct rcar_dmac_desc
*desc
, *_desc
;
760 spin_lock_irqsave(&chan
->lock
, flags
);
762 /* Move all non-free descriptors to the local lists. */
763 list_splice_init(&chan
->desc
.pending
, &descs
);
764 list_splice_init(&chan
->desc
.active
, &descs
);
765 list_splice_init(&chan
->desc
.done
, &descs
);
766 list_splice_init(&chan
->desc
.wait
, &descs
);
768 chan
->desc
.running
= NULL
;
770 spin_unlock_irqrestore(&chan
->lock
, flags
);
772 list_for_each_entry_safe(desc
, _desc
, &descs
, node
) {
773 list_del(&desc
->node
);
774 rcar_dmac_desc_put(chan
, desc
);
778 static void rcar_dmac_stop(struct rcar_dmac
*dmac
)
780 rcar_dmac_write(dmac
, RCAR_DMAOR
, 0);
783 static void rcar_dmac_abort(struct rcar_dmac
*dmac
)
787 /* Stop all channels. */
788 for (i
= 0; i
< dmac
->n_channels
; ++i
) {
789 struct rcar_dmac_chan
*chan
= &dmac
->channels
[i
];
791 /* Stop and reinitialize the channel. */
792 spin_lock(&chan
->lock
);
793 rcar_dmac_chan_halt(chan
);
794 spin_unlock(&chan
->lock
);
796 rcar_dmac_chan_reinit(chan
);
800 /* -----------------------------------------------------------------------------
801 * Descriptors preparation
804 static void rcar_dmac_chan_configure_desc(struct rcar_dmac_chan
*chan
,
805 struct rcar_dmac_desc
*desc
)
807 static const u32 chcr_ts
[] = {
808 RCAR_DMACHCR_TS_1B
, RCAR_DMACHCR_TS_2B
,
809 RCAR_DMACHCR_TS_4B
, RCAR_DMACHCR_TS_8B
,
810 RCAR_DMACHCR_TS_16B
, RCAR_DMACHCR_TS_32B
,
814 unsigned int xfer_size
;
817 switch (desc
->direction
) {
819 chcr
= RCAR_DMACHCR_DM_INC
| RCAR_DMACHCR_SM_FIXED
820 | RCAR_DMACHCR_RS_DMARS
;
821 xfer_size
= chan
->src
.xfer_size
;
825 chcr
= RCAR_DMACHCR_DM_FIXED
| RCAR_DMACHCR_SM_INC
826 | RCAR_DMACHCR_RS_DMARS
;
827 xfer_size
= chan
->dst
.xfer_size
;
832 chcr
= RCAR_DMACHCR_DM_INC
| RCAR_DMACHCR_SM_INC
833 | RCAR_DMACHCR_RS_AUTO
;
834 xfer_size
= RCAR_DMAC_MEMCPY_XFER_SIZE
;
838 desc
->xfer_shift
= ilog2(xfer_size
);
839 desc
->chcr
= chcr
| chcr_ts
[desc
->xfer_shift
];
843 * rcar_dmac_chan_prep_sg - prepare transfer descriptors from an SG list
845 * Common routine for public (MEMCPY) and slave DMA. The MEMCPY case is also
846 * converted to scatter-gather to guarantee consistent locking and a correct
847 * list manipulation. For slave DMA direction carries the usual meaning, and,
848 * logically, the SG list is RAM and the addr variable contains slave address,
849 * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
850 * and the SG list contains only one element and points at the source buffer.
852 static struct dma_async_tx_descriptor
*
853 rcar_dmac_chan_prep_sg(struct rcar_dmac_chan
*chan
, struct scatterlist
*sgl
,
854 unsigned int sg_len
, dma_addr_t dev_addr
,
855 enum dma_transfer_direction dir
, unsigned long dma_flags
,
858 struct rcar_dmac_xfer_chunk
*chunk
;
859 struct rcar_dmac_desc
*desc
;
860 struct scatterlist
*sg
;
861 unsigned int nchunks
= 0;
862 unsigned int max_chunk_size
;
863 unsigned int full_size
= 0;
864 bool cross_boundary
= false;
866 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
871 desc
= rcar_dmac_desc_get(chan
);
875 desc
->async_tx
.flags
= dma_flags
;
876 desc
->async_tx
.cookie
= -EBUSY
;
878 desc
->cyclic
= cyclic
;
879 desc
->direction
= dir
;
881 rcar_dmac_chan_configure_desc(chan
, desc
);
883 max_chunk_size
= (RCAR_DMATCR_MASK
+ 1) << desc
->xfer_shift
;
886 * Allocate and fill the transfer chunk descriptors. We own the only
887 * reference to the DMA descriptor, there's no need for locking.
889 for_each_sg(sgl
, sg
, sg_len
, i
) {
890 dma_addr_t mem_addr
= sg_dma_address(sg
);
891 unsigned int len
= sg_dma_len(sg
);
895 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
897 high_dev_addr
= dev_addr
>> 32;
898 high_mem_addr
= mem_addr
>> 32;
901 if ((dev_addr
>> 32 != high_dev_addr
) ||
902 (mem_addr
>> 32 != high_mem_addr
))
903 cross_boundary
= true;
906 unsigned int size
= min(len
, max_chunk_size
);
908 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
910 * Prevent individual transfers from crossing 4GB
913 if (dev_addr
>> 32 != (dev_addr
+ size
- 1) >> 32) {
914 size
= ALIGN(dev_addr
, 1ULL << 32) - dev_addr
;
915 cross_boundary
= true;
917 if (mem_addr
>> 32 != (mem_addr
+ size
- 1) >> 32) {
918 size
= ALIGN(mem_addr
, 1ULL << 32) - mem_addr
;
919 cross_boundary
= true;
923 chunk
= rcar_dmac_xfer_chunk_get(chan
);
925 rcar_dmac_desc_put(chan
, desc
);
929 if (dir
== DMA_DEV_TO_MEM
) {
930 chunk
->src_addr
= dev_addr
;
931 chunk
->dst_addr
= mem_addr
;
933 chunk
->src_addr
= mem_addr
;
934 chunk
->dst_addr
= dev_addr
;
939 dev_dbg(chan
->chan
.device
->dev
,
940 "chan%u: chunk %p/%p sgl %u@%p, %u/%u %pad -> %pad\n",
941 chan
->index
, chunk
, desc
, i
, sg
, size
, len
,
942 &chunk
->src_addr
, &chunk
->dst_addr
);
945 if (dir
== DMA_MEM_TO_MEM
)
950 list_add_tail(&chunk
->node
, &desc
->chunks
);
955 desc
->nchunks
= nchunks
;
956 desc
->size
= full_size
;
959 * Use hardware descriptor lists if possible when more than one chunk
960 * needs to be transferred (otherwise they don't make much sense).
962 * Source/Destination address should be located in same 4GiB region
963 * in the 40bit address space when it uses Hardware descriptor,
964 * and cross_boundary is checking it.
966 desc
->hwdescs
.use
= !cross_boundary
&& nchunks
> 1;
967 if (desc
->hwdescs
.use
) {
968 if (rcar_dmac_fill_hwdesc(chan
, desc
) < 0)
969 desc
->hwdescs
.use
= false;
972 return &desc
->async_tx
;
975 /* -----------------------------------------------------------------------------
976 * DMA engine operations
979 static int rcar_dmac_alloc_chan_resources(struct dma_chan
*chan
)
981 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
984 INIT_LIST_HEAD(&rchan
->desc
.chunks_free
);
985 INIT_LIST_HEAD(&rchan
->desc
.pages
);
987 /* Preallocate descriptors. */
988 ret
= rcar_dmac_xfer_chunk_alloc(rchan
, GFP_KERNEL
);
992 ret
= rcar_dmac_desc_alloc(rchan
, GFP_KERNEL
);
996 return pm_runtime_get_sync(chan
->device
->dev
);
999 static void rcar_dmac_free_chan_resources(struct dma_chan
*chan
)
1001 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1002 struct rcar_dmac
*dmac
= to_rcar_dmac(chan
->device
);
1003 struct rcar_dmac_chan_map
*map
= &rchan
->map
;
1004 struct rcar_dmac_desc_page
*page
, *_page
;
1005 struct rcar_dmac_desc
*desc
;
1008 /* Protect against ISR */
1009 spin_lock_irq(&rchan
->lock
);
1010 rcar_dmac_chan_halt(rchan
);
1011 spin_unlock_irq(&rchan
->lock
);
1014 * Now no new interrupts will occur, but one might already be
1015 * running. Wait for it to finish before freeing resources.
1017 synchronize_irq(rchan
->irq
);
1019 if (rchan
->mid_rid
>= 0) {
1020 /* The caller is holding dma_list_mutex */
1021 clear_bit(rchan
->mid_rid
, dmac
->modules
);
1022 rchan
->mid_rid
= -EINVAL
;
1025 list_splice_init(&rchan
->desc
.free
, &list
);
1026 list_splice_init(&rchan
->desc
.pending
, &list
);
1027 list_splice_init(&rchan
->desc
.active
, &list
);
1028 list_splice_init(&rchan
->desc
.done
, &list
);
1029 list_splice_init(&rchan
->desc
.wait
, &list
);
1031 rchan
->desc
.running
= NULL
;
1033 list_for_each_entry(desc
, &list
, node
)
1034 rcar_dmac_realloc_hwdesc(rchan
, desc
, 0);
1036 list_for_each_entry_safe(page
, _page
, &rchan
->desc
.pages
, node
) {
1037 list_del(&page
->node
);
1038 free_page((unsigned long)page
);
1041 /* Remove slave mapping if present. */
1042 if (map
->slave
.xfer_size
) {
1043 dma_unmap_resource(chan
->device
->dev
, map
->addr
,
1044 map
->slave
.xfer_size
, map
->dir
, 0);
1045 map
->slave
.xfer_size
= 0;
1048 pm_runtime_put(chan
->device
->dev
);
1051 static struct dma_async_tx_descriptor
*
1052 rcar_dmac_prep_dma_memcpy(struct dma_chan
*chan
, dma_addr_t dma_dest
,
1053 dma_addr_t dma_src
, size_t len
, unsigned long flags
)
1055 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1056 struct scatterlist sgl
;
1061 sg_init_table(&sgl
, 1);
1062 sg_set_page(&sgl
, pfn_to_page(PFN_DOWN(dma_src
)), len
,
1063 offset_in_page(dma_src
));
1064 sg_dma_address(&sgl
) = dma_src
;
1065 sg_dma_len(&sgl
) = len
;
1067 return rcar_dmac_chan_prep_sg(rchan
, &sgl
, 1, dma_dest
,
1068 DMA_MEM_TO_MEM
, flags
, false);
1071 static int rcar_dmac_map_slave_addr(struct dma_chan
*chan
,
1072 enum dma_transfer_direction dir
)
1074 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1075 struct rcar_dmac_chan_map
*map
= &rchan
->map
;
1076 phys_addr_t dev_addr
;
1078 enum dma_data_direction dev_dir
;
1080 if (dir
== DMA_DEV_TO_MEM
) {
1081 dev_addr
= rchan
->src
.slave_addr
;
1082 dev_size
= rchan
->src
.xfer_size
;
1083 dev_dir
= DMA_TO_DEVICE
;
1085 dev_addr
= rchan
->dst
.slave_addr
;
1086 dev_size
= rchan
->dst
.xfer_size
;
1087 dev_dir
= DMA_FROM_DEVICE
;
1090 /* Reuse current map if possible. */
1091 if (dev_addr
== map
->slave
.slave_addr
&&
1092 dev_size
== map
->slave
.xfer_size
&&
1093 dev_dir
== map
->dir
)
1096 /* Remove old mapping if present. */
1097 if (map
->slave
.xfer_size
)
1098 dma_unmap_resource(chan
->device
->dev
, map
->addr
,
1099 map
->slave
.xfer_size
, map
->dir
, 0);
1100 map
->slave
.xfer_size
= 0;
1102 /* Create new slave address map. */
1103 map
->addr
= dma_map_resource(chan
->device
->dev
, dev_addr
, dev_size
,
1106 if (dma_mapping_error(chan
->device
->dev
, map
->addr
)) {
1107 dev_err(chan
->device
->dev
,
1108 "chan%u: failed to map %zx@%pap", rchan
->index
,
1109 dev_size
, &dev_addr
);
1113 dev_dbg(chan
->device
->dev
, "chan%u: map %zx@%pap to %pad dir: %s\n",
1114 rchan
->index
, dev_size
, &dev_addr
, &map
->addr
,
1115 dev_dir
== DMA_TO_DEVICE
? "DMA_TO_DEVICE" : "DMA_FROM_DEVICE");
1117 map
->slave
.slave_addr
= dev_addr
;
1118 map
->slave
.xfer_size
= dev_size
;
1124 static struct dma_async_tx_descriptor
*
1125 rcar_dmac_prep_slave_sg(struct dma_chan
*chan
, struct scatterlist
*sgl
,
1126 unsigned int sg_len
, enum dma_transfer_direction dir
,
1127 unsigned long flags
, void *context
)
1129 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1131 /* Someone calling slave DMA on a generic channel? */
1132 if (rchan
->mid_rid
< 0 || !sg_len
) {
1133 dev_warn(chan
->device
->dev
,
1134 "%s: bad parameter: len=%d, id=%d\n",
1135 __func__
, sg_len
, rchan
->mid_rid
);
1139 if (rcar_dmac_map_slave_addr(chan
, dir
))
1142 return rcar_dmac_chan_prep_sg(rchan
, sgl
, sg_len
, rchan
->map
.addr
,
1146 #define RCAR_DMAC_MAX_SG_LEN 32
1148 static struct dma_async_tx_descriptor
*
1149 rcar_dmac_prep_dma_cyclic(struct dma_chan
*chan
, dma_addr_t buf_addr
,
1150 size_t buf_len
, size_t period_len
,
1151 enum dma_transfer_direction dir
, unsigned long flags
)
1153 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1154 struct dma_async_tx_descriptor
*desc
;
1155 struct scatterlist
*sgl
;
1156 unsigned int sg_len
;
1159 /* Someone calling slave DMA on a generic channel? */
1160 if (rchan
->mid_rid
< 0 || buf_len
< period_len
) {
1161 dev_warn(chan
->device
->dev
,
1162 "%s: bad parameter: buf_len=%zu, period_len=%zu, id=%d\n",
1163 __func__
, buf_len
, period_len
, rchan
->mid_rid
);
1167 if (rcar_dmac_map_slave_addr(chan
, dir
))
1170 sg_len
= buf_len
/ period_len
;
1171 if (sg_len
> RCAR_DMAC_MAX_SG_LEN
) {
1172 dev_err(chan
->device
->dev
,
1173 "chan%u: sg length %d exceds limit %d",
1174 rchan
->index
, sg_len
, RCAR_DMAC_MAX_SG_LEN
);
1179 * Allocate the sg list dynamically as it would consume too much stack
1182 sgl
= kcalloc(sg_len
, sizeof(*sgl
), GFP_NOWAIT
);
1186 sg_init_table(sgl
, sg_len
);
1188 for (i
= 0; i
< sg_len
; ++i
) {
1189 dma_addr_t src
= buf_addr
+ (period_len
* i
);
1191 sg_set_page(&sgl
[i
], pfn_to_page(PFN_DOWN(src
)), period_len
,
1192 offset_in_page(src
));
1193 sg_dma_address(&sgl
[i
]) = src
;
1194 sg_dma_len(&sgl
[i
]) = period_len
;
1197 desc
= rcar_dmac_chan_prep_sg(rchan
, sgl
, sg_len
, rchan
->map
.addr
,
1204 static int rcar_dmac_device_config(struct dma_chan
*chan
,
1205 struct dma_slave_config
*cfg
)
1207 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1210 * We could lock this, but you shouldn't be configuring the
1211 * channel, while using it...
1213 rchan
->src
.slave_addr
= cfg
->src_addr
;
1214 rchan
->dst
.slave_addr
= cfg
->dst_addr
;
1215 rchan
->src
.xfer_size
= cfg
->src_addr_width
;
1216 rchan
->dst
.xfer_size
= cfg
->dst_addr_width
;
1221 static int rcar_dmac_chan_terminate_all(struct dma_chan
*chan
)
1223 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1224 unsigned long flags
;
1226 spin_lock_irqsave(&rchan
->lock
, flags
);
1227 rcar_dmac_chan_halt(rchan
);
1228 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1231 * FIXME: No new interrupt can occur now, but the IRQ thread might still
1235 rcar_dmac_chan_reinit(rchan
);
1240 static unsigned int rcar_dmac_chan_get_residue(struct rcar_dmac_chan
*chan
,
1241 dma_cookie_t cookie
)
1243 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1244 struct rcar_dmac_xfer_chunk
*running
= NULL
;
1245 struct rcar_dmac_xfer_chunk
*chunk
;
1246 enum dma_status status
;
1247 unsigned int residue
= 0;
1248 unsigned int dptr
= 0;
1254 * If the cookie corresponds to a descriptor that has been completed
1255 * there is no residue. The same check has already been performed by the
1256 * caller but without holding the channel lock, so the descriptor could
1259 status
= dma_cookie_status(&chan
->chan
, cookie
, NULL
);
1260 if (status
== DMA_COMPLETE
)
1264 * If the cookie doesn't correspond to the currently running transfer
1265 * then the descriptor hasn't been processed yet, and the residue is
1266 * equal to the full descriptor size.
1268 if (cookie
!= desc
->async_tx
.cookie
) {
1269 list_for_each_entry(desc
, &chan
->desc
.pending
, node
) {
1270 if (cookie
== desc
->async_tx
.cookie
)
1273 list_for_each_entry(desc
, &chan
->desc
.active
, node
) {
1274 if (cookie
== desc
->async_tx
.cookie
)
1279 * No descriptor found for the cookie, there's thus no residue.
1280 * This shouldn't happen if the calling driver passes a correct
1283 WARN(1, "No descriptor for cookie!");
1288 * In descriptor mode the descriptor running pointer is not maintained
1289 * by the interrupt handler, find the running descriptor from the
1290 * descriptor pointer field in the CHCRB register. In non-descriptor
1291 * mode just use the running descriptor pointer.
1293 if (desc
->hwdescs
.use
) {
1294 dptr
= (rcar_dmac_chan_read(chan
, RCAR_DMACHCRB
) &
1295 RCAR_DMACHCRB_DPTR_MASK
) >> RCAR_DMACHCRB_DPTR_SHIFT
;
1296 WARN_ON(dptr
>= desc
->nchunks
);
1298 running
= desc
->running
;
1301 /* Compute the size of all chunks still to be transferred. */
1302 list_for_each_entry_reverse(chunk
, &desc
->chunks
, node
) {
1303 if (chunk
== running
|| ++dptr
== desc
->nchunks
)
1306 residue
+= chunk
->size
;
1309 /* Add the residue for the current chunk. */
1310 residue
+= rcar_dmac_chan_read(chan
, RCAR_DMATCR
) << desc
->xfer_shift
;
1315 static enum dma_status
rcar_dmac_tx_status(struct dma_chan
*chan
,
1316 dma_cookie_t cookie
,
1317 struct dma_tx_state
*txstate
)
1319 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1320 enum dma_status status
;
1321 unsigned long flags
;
1322 unsigned int residue
;
1324 status
= dma_cookie_status(chan
, cookie
, txstate
);
1325 if (status
== DMA_COMPLETE
|| !txstate
)
1328 spin_lock_irqsave(&rchan
->lock
, flags
);
1329 residue
= rcar_dmac_chan_get_residue(rchan
, cookie
);
1330 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1332 /* if there's no residue, the cookie is complete */
1334 return DMA_COMPLETE
;
1336 dma_set_residue(txstate
, residue
);
1341 static void rcar_dmac_issue_pending(struct dma_chan
*chan
)
1343 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1344 unsigned long flags
;
1346 spin_lock_irqsave(&rchan
->lock
, flags
);
1348 if (list_empty(&rchan
->desc
.pending
))
1351 /* Append the pending list to the active list. */
1352 list_splice_tail_init(&rchan
->desc
.pending
, &rchan
->desc
.active
);
1355 * If no transfer is running pick the first descriptor from the active
1356 * list and start the transfer.
1358 if (!rchan
->desc
.running
) {
1359 struct rcar_dmac_desc
*desc
;
1361 desc
= list_first_entry(&rchan
->desc
.active
,
1362 struct rcar_dmac_desc
, node
);
1363 rchan
->desc
.running
= desc
;
1365 rcar_dmac_chan_start_xfer(rchan
);
1369 spin_unlock_irqrestore(&rchan
->lock
, flags
);
1372 static void rcar_dmac_device_synchronize(struct dma_chan
*chan
)
1374 struct rcar_dmac_chan
*rchan
= to_rcar_dmac_chan(chan
);
1376 synchronize_irq(rchan
->irq
);
1379 /* -----------------------------------------------------------------------------
1383 static irqreturn_t
rcar_dmac_isr_desc_stage_end(struct rcar_dmac_chan
*chan
)
1385 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1388 if (WARN_ON(!desc
|| !desc
->cyclic
)) {
1390 * This should never happen, there should always be a running
1391 * cyclic descriptor when a descriptor stage end interrupt is
1392 * triggered. Warn and return.
1397 /* Program the interrupt pointer to the next stage. */
1398 stage
= (rcar_dmac_chan_read(chan
, RCAR_DMACHCRB
) &
1399 RCAR_DMACHCRB_DPTR_MASK
) >> RCAR_DMACHCRB_DPTR_SHIFT
;
1400 rcar_dmac_chan_write(chan
, RCAR_DMADPCR
, RCAR_DMADPCR_DIPT(stage
));
1402 return IRQ_WAKE_THREAD
;
1405 static irqreturn_t
rcar_dmac_isr_transfer_end(struct rcar_dmac_chan
*chan
)
1407 struct rcar_dmac_desc
*desc
= chan
->desc
.running
;
1408 irqreturn_t ret
= IRQ_WAKE_THREAD
;
1410 if (WARN_ON_ONCE(!desc
)) {
1412 * This should never happen, there should always be a running
1413 * descriptor when a transfer end interrupt is triggered. Warn
1420 * The transfer end interrupt isn't generated for each chunk when using
1421 * descriptor mode. Only update the running chunk pointer in
1422 * non-descriptor mode.
1424 if (!desc
->hwdescs
.use
) {
1426 * If we haven't completed the last transfer chunk simply move
1427 * to the next one. Only wake the IRQ thread if the transfer is
1430 if (!list_is_last(&desc
->running
->node
, &desc
->chunks
)) {
1431 desc
->running
= list_next_entry(desc
->running
, node
);
1438 * We've completed the last transfer chunk. If the transfer is
1439 * cyclic, move back to the first one.
1443 list_first_entry(&desc
->chunks
,
1444 struct rcar_dmac_xfer_chunk
,
1450 /* The descriptor is complete, move it to the done list. */
1451 list_move_tail(&desc
->node
, &chan
->desc
.done
);
1453 /* Queue the next descriptor, if any. */
1454 if (!list_empty(&chan
->desc
.active
))
1455 chan
->desc
.running
= list_first_entry(&chan
->desc
.active
,
1456 struct rcar_dmac_desc
,
1459 chan
->desc
.running
= NULL
;
1462 if (chan
->desc
.running
)
1463 rcar_dmac_chan_start_xfer(chan
);
1468 static irqreturn_t
rcar_dmac_isr_channel(int irq
, void *dev
)
1470 u32 mask
= RCAR_DMACHCR_DSE
| RCAR_DMACHCR_TE
;
1471 struct rcar_dmac_chan
*chan
= dev
;
1472 irqreturn_t ret
= IRQ_NONE
;
1475 spin_lock(&chan
->lock
);
1477 chcr
= rcar_dmac_chan_read(chan
, RCAR_DMACHCR
);
1478 if (chcr
& RCAR_DMACHCR_TE
)
1479 mask
|= RCAR_DMACHCR_DE
;
1480 rcar_dmac_chan_write(chan
, RCAR_DMACHCR
, chcr
& ~mask
);
1482 if (chcr
& RCAR_DMACHCR_DSE
)
1483 ret
|= rcar_dmac_isr_desc_stage_end(chan
);
1485 if (chcr
& RCAR_DMACHCR_TE
)
1486 ret
|= rcar_dmac_isr_transfer_end(chan
);
1488 spin_unlock(&chan
->lock
);
1493 static irqreturn_t
rcar_dmac_isr_channel_thread(int irq
, void *dev
)
1495 struct rcar_dmac_chan
*chan
= dev
;
1496 struct rcar_dmac_desc
*desc
;
1497 struct dmaengine_desc_callback cb
;
1499 spin_lock_irq(&chan
->lock
);
1501 /* For cyclic transfers notify the user after every chunk. */
1502 if (chan
->desc
.running
&& chan
->desc
.running
->cyclic
) {
1503 desc
= chan
->desc
.running
;
1504 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
1506 if (dmaengine_desc_callback_valid(&cb
)) {
1507 spin_unlock_irq(&chan
->lock
);
1508 dmaengine_desc_callback_invoke(&cb
, NULL
);
1509 spin_lock_irq(&chan
->lock
);
1514 * Call the callback function for all descriptors on the done list and
1515 * move them to the ack wait list.
1517 while (!list_empty(&chan
->desc
.done
)) {
1518 desc
= list_first_entry(&chan
->desc
.done
, struct rcar_dmac_desc
,
1520 dma_cookie_complete(&desc
->async_tx
);
1521 list_del(&desc
->node
);
1523 dmaengine_desc_get_callback(&desc
->async_tx
, &cb
);
1524 if (dmaengine_desc_callback_valid(&cb
)) {
1525 spin_unlock_irq(&chan
->lock
);
1527 * We own the only reference to this descriptor, we can
1528 * safely dereference it without holding the channel
1531 dmaengine_desc_callback_invoke(&cb
, NULL
);
1532 spin_lock_irq(&chan
->lock
);
1535 list_add_tail(&desc
->node
, &chan
->desc
.wait
);
1538 spin_unlock_irq(&chan
->lock
);
1540 /* Recycle all acked descriptors. */
1541 rcar_dmac_desc_recycle_acked(chan
);
1546 static irqreturn_t
rcar_dmac_isr_error(int irq
, void *data
)
1548 struct rcar_dmac
*dmac
= data
;
1550 if (!(rcar_dmac_read(dmac
, RCAR_DMAOR
) & RCAR_DMAOR_AE
))
1554 * An unrecoverable error occurred on an unknown channel. Halt the DMAC,
1555 * abort transfers on all channels, and reinitialize the DMAC.
1557 rcar_dmac_stop(dmac
);
1558 rcar_dmac_abort(dmac
);
1559 rcar_dmac_init(dmac
);
1564 /* -----------------------------------------------------------------------------
1565 * OF xlate and channel filter
1568 static bool rcar_dmac_chan_filter(struct dma_chan
*chan
, void *arg
)
1570 struct rcar_dmac
*dmac
= to_rcar_dmac(chan
->device
);
1571 struct of_phandle_args
*dma_spec
= arg
;
1574 * FIXME: Using a filter on OF platforms is a nonsense. The OF xlate
1575 * function knows from which device it wants to allocate a channel from,
1576 * and would be perfectly capable of selecting the channel it wants.
1577 * Forcing it to call dma_request_channel() and iterate through all
1578 * channels from all controllers is just pointless.
1580 if (chan
->device
->device_config
!= rcar_dmac_device_config
||
1581 dma_spec
->np
!= chan
->device
->dev
->of_node
)
1584 return !test_and_set_bit(dma_spec
->args
[0], dmac
->modules
);
1587 static struct dma_chan
*rcar_dmac_of_xlate(struct of_phandle_args
*dma_spec
,
1588 struct of_dma
*ofdma
)
1590 struct rcar_dmac_chan
*rchan
;
1591 struct dma_chan
*chan
;
1592 dma_cap_mask_t mask
;
1594 if (dma_spec
->args_count
!= 1)
1597 /* Only slave DMA channels can be allocated via DT */
1599 dma_cap_set(DMA_SLAVE
, mask
);
1601 chan
= dma_request_channel(mask
, rcar_dmac_chan_filter
, dma_spec
);
1605 rchan
= to_rcar_dmac_chan(chan
);
1606 rchan
->mid_rid
= dma_spec
->args
[0];
1611 /* -----------------------------------------------------------------------------
1615 #ifdef CONFIG_PM_SLEEP
1616 static int rcar_dmac_sleep_suspend(struct device
*dev
)
1619 * TODO: Wait for the current transfer to complete and stop the device.
1624 static int rcar_dmac_sleep_resume(struct device
*dev
)
1626 /* TODO: Resume transfers, if any. */
1632 static int rcar_dmac_runtime_suspend(struct device
*dev
)
1637 static int rcar_dmac_runtime_resume(struct device
*dev
)
1639 struct rcar_dmac
*dmac
= dev_get_drvdata(dev
);
1641 return rcar_dmac_init(dmac
);
1645 static const struct dev_pm_ops rcar_dmac_pm
= {
1646 SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend
, rcar_dmac_sleep_resume
)
1647 SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend
, rcar_dmac_runtime_resume
,
1651 /* -----------------------------------------------------------------------------
1655 static int rcar_dmac_chan_probe(struct rcar_dmac
*dmac
,
1656 struct rcar_dmac_chan
*rchan
,
1659 struct platform_device
*pdev
= to_platform_device(dmac
->dev
);
1660 struct dma_chan
*chan
= &rchan
->chan
;
1661 char pdev_irqname
[5];
1665 rchan
->index
= index
;
1666 rchan
->iomem
= dmac
->iomem
+ RCAR_DMAC_CHAN_OFFSET(index
);
1667 rchan
->mid_rid
= -EINVAL
;
1669 spin_lock_init(&rchan
->lock
);
1671 INIT_LIST_HEAD(&rchan
->desc
.free
);
1672 INIT_LIST_HEAD(&rchan
->desc
.pending
);
1673 INIT_LIST_HEAD(&rchan
->desc
.active
);
1674 INIT_LIST_HEAD(&rchan
->desc
.done
);
1675 INIT_LIST_HEAD(&rchan
->desc
.wait
);
1677 /* Request the channel interrupt. */
1678 sprintf(pdev_irqname
, "ch%u", index
);
1679 rchan
->irq
= platform_get_irq_byname(pdev
, pdev_irqname
);
1680 if (rchan
->irq
< 0) {
1681 dev_err(dmac
->dev
, "no IRQ specified for channel %u\n", index
);
1685 irqname
= devm_kasprintf(dmac
->dev
, GFP_KERNEL
, "%s:%u",
1686 dev_name(dmac
->dev
), index
);
1690 ret
= devm_request_threaded_irq(dmac
->dev
, rchan
->irq
,
1691 rcar_dmac_isr_channel
,
1692 rcar_dmac_isr_channel_thread
, 0,
1695 dev_err(dmac
->dev
, "failed to request IRQ %u (%d)\n",
1701 * Initialize the DMA engine channel and add it to the DMA engine
1704 chan
->device
= &dmac
->engine
;
1705 dma_cookie_init(chan
);
1707 list_add_tail(&chan
->device_node
, &dmac
->engine
.channels
);
1712 static int rcar_dmac_parse_of(struct device
*dev
, struct rcar_dmac
*dmac
)
1714 struct device_node
*np
= dev
->of_node
;
1717 ret
= of_property_read_u32(np
, "dma-channels", &dmac
->n_channels
);
1719 dev_err(dev
, "unable to read dma-channels property\n");
1723 if (dmac
->n_channels
<= 0 || dmac
->n_channels
>= 100) {
1724 dev_err(dev
, "invalid number of channels %u\n",
1732 static int rcar_dmac_probe(struct platform_device
*pdev
)
1734 const enum dma_slave_buswidth widths
= DMA_SLAVE_BUSWIDTH_1_BYTE
|
1735 DMA_SLAVE_BUSWIDTH_2_BYTES
| DMA_SLAVE_BUSWIDTH_4_BYTES
|
1736 DMA_SLAVE_BUSWIDTH_8_BYTES
| DMA_SLAVE_BUSWIDTH_16_BYTES
|
1737 DMA_SLAVE_BUSWIDTH_32_BYTES
| DMA_SLAVE_BUSWIDTH_64_BYTES
;
1738 unsigned int channels_offset
= 0;
1739 struct dma_device
*engine
;
1740 struct rcar_dmac
*dmac
;
1741 struct resource
*mem
;
1747 dmac
= devm_kzalloc(&pdev
->dev
, sizeof(*dmac
), GFP_KERNEL
);
1751 dmac
->dev
= &pdev
->dev
;
1752 platform_set_drvdata(pdev
, dmac
);
1753 dma_set_mask_and_coherent(dmac
->dev
, DMA_BIT_MASK(40));
1755 ret
= rcar_dmac_parse_of(&pdev
->dev
, dmac
);
1760 * A still unconfirmed hardware bug prevents the IPMMU microTLB 0 to be
1761 * flushed correctly, resulting in memory corruption. DMAC 0 channel 0
1762 * is connected to microTLB 0 on currently supported platforms, so we
1763 * can't use it with the IPMMU. As the IOMMU API operates at the device
1764 * level we can't disable it selectively, so ignore channel 0 for now if
1765 * the device is part of an IOMMU group.
1767 if (pdev
->dev
.iommu_group
) {
1769 channels_offset
= 1;
1772 dmac
->channels
= devm_kcalloc(&pdev
->dev
, dmac
->n_channels
,
1773 sizeof(*dmac
->channels
), GFP_KERNEL
);
1774 if (!dmac
->channels
)
1777 /* Request resources. */
1778 mem
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
1779 dmac
->iomem
= devm_ioremap_resource(&pdev
->dev
, mem
);
1780 if (IS_ERR(dmac
->iomem
))
1781 return PTR_ERR(dmac
->iomem
);
1783 irq
= platform_get_irq_byname(pdev
, "error");
1785 dev_err(&pdev
->dev
, "no error IRQ specified\n");
1789 irqname
= devm_kasprintf(dmac
->dev
, GFP_KERNEL
, "%s:error",
1790 dev_name(dmac
->dev
));
1794 ret
= devm_request_irq(&pdev
->dev
, irq
, rcar_dmac_isr_error
, 0,
1797 dev_err(&pdev
->dev
, "failed to request IRQ %u (%d)\n",
1802 /* Enable runtime PM and initialize the device. */
1803 pm_runtime_enable(&pdev
->dev
);
1804 ret
= pm_runtime_get_sync(&pdev
->dev
);
1806 dev_err(&pdev
->dev
, "runtime PM get sync failed (%d)\n", ret
);
1810 ret
= rcar_dmac_init(dmac
);
1811 pm_runtime_put(&pdev
->dev
);
1814 dev_err(&pdev
->dev
, "failed to reset device\n");
1818 /* Initialize the channels. */
1819 INIT_LIST_HEAD(&dmac
->engine
.channels
);
1821 for (i
= 0; i
< dmac
->n_channels
; ++i
) {
1822 ret
= rcar_dmac_chan_probe(dmac
, &dmac
->channels
[i
],
1823 i
+ channels_offset
);
1828 /* Register the DMAC as a DMA provider for DT. */
1829 ret
= of_dma_controller_register(pdev
->dev
.of_node
, rcar_dmac_of_xlate
,
1835 * Register the DMA engine device.
1837 * Default transfer size of 32 bytes requires 32-byte alignment.
1839 engine
= &dmac
->engine
;
1840 dma_cap_set(DMA_MEMCPY
, engine
->cap_mask
);
1841 dma_cap_set(DMA_SLAVE
, engine
->cap_mask
);
1843 engine
->dev
= &pdev
->dev
;
1844 engine
->copy_align
= ilog2(RCAR_DMAC_MEMCPY_XFER_SIZE
);
1846 engine
->src_addr_widths
= widths
;
1847 engine
->dst_addr_widths
= widths
;
1848 engine
->directions
= BIT(DMA_MEM_TO_DEV
) | BIT(DMA_DEV_TO_MEM
);
1849 engine
->residue_granularity
= DMA_RESIDUE_GRANULARITY_BURST
;
1851 engine
->device_alloc_chan_resources
= rcar_dmac_alloc_chan_resources
;
1852 engine
->device_free_chan_resources
= rcar_dmac_free_chan_resources
;
1853 engine
->device_prep_dma_memcpy
= rcar_dmac_prep_dma_memcpy
;
1854 engine
->device_prep_slave_sg
= rcar_dmac_prep_slave_sg
;
1855 engine
->device_prep_dma_cyclic
= rcar_dmac_prep_dma_cyclic
;
1856 engine
->device_config
= rcar_dmac_device_config
;
1857 engine
->device_terminate_all
= rcar_dmac_chan_terminate_all
;
1858 engine
->device_tx_status
= rcar_dmac_tx_status
;
1859 engine
->device_issue_pending
= rcar_dmac_issue_pending
;
1860 engine
->device_synchronize
= rcar_dmac_device_synchronize
;
1862 ret
= dma_async_device_register(engine
);
1869 of_dma_controller_free(pdev
->dev
.of_node
);
1870 pm_runtime_disable(&pdev
->dev
);
1874 static int rcar_dmac_remove(struct platform_device
*pdev
)
1876 struct rcar_dmac
*dmac
= platform_get_drvdata(pdev
);
1878 of_dma_controller_free(pdev
->dev
.of_node
);
1879 dma_async_device_unregister(&dmac
->engine
);
1881 pm_runtime_disable(&pdev
->dev
);
1886 static void rcar_dmac_shutdown(struct platform_device
*pdev
)
1888 struct rcar_dmac
*dmac
= platform_get_drvdata(pdev
);
1890 rcar_dmac_stop(dmac
);
1893 static const struct of_device_id rcar_dmac_of_ids
[] = {
1894 { .compatible
= "renesas,rcar-dmac", },
1897 MODULE_DEVICE_TABLE(of
, rcar_dmac_of_ids
);
1899 static struct platform_driver rcar_dmac_driver
= {
1901 .pm
= &rcar_dmac_pm
,
1902 .name
= "rcar-dmac",
1903 .of_match_table
= rcar_dmac_of_ids
,
1905 .probe
= rcar_dmac_probe
,
1906 .remove
= rcar_dmac_remove
,
1907 .shutdown
= rcar_dmac_shutdown
,
1910 module_platform_driver(rcar_dmac_driver
);
1912 MODULE_DESCRIPTION("R-Car Gen2 DMA Controller Driver");
1913 MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
1914 MODULE_LICENSE("GPL v2");