]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/sun6i-dma.c
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / sun6i-dma.c
CommitLineData
2874c5fd 1// SPDX-License-Identifier: GPL-2.0-or-later
55585930
MR
2/*
3 * Copyright (C) 2013-2014 Allwinner Tech Co., Ltd
4 * Author: Sugar <shuge@allwinnertech.com>
5 *
6 * Copyright (C) 2014 Maxime Ripard
7 * Maxime Ripard <maxime.ripard@free-electrons.com>
55585930
MR
8 */
9
10#include <linux/clk.h>
11#include <linux/delay.h>
12#include <linux/dmaengine.h>
13#include <linux/dmapool.h>
14#include <linux/interrupt.h>
15#include <linux/module.h>
16#include <linux/of_dma.h>
25a37c2f 17#include <linux/of_device.h>
55585930
MR
18#include <linux/platform_device.h>
19#include <linux/reset.h>
20#include <linux/slab.h>
21#include <linux/types.h>
22
23#include "virt-dma.h"
24
55585930
MR
25/*
26 * Common registers
27 */
28#define DMA_IRQ_EN(x) ((x) * 0x04)
29#define DMA_IRQ_HALF BIT(0)
30#define DMA_IRQ_PKG BIT(1)
31#define DMA_IRQ_QUEUE BIT(2)
32
33#define DMA_IRQ_CHAN_NR 8
34#define DMA_IRQ_CHAN_WIDTH 4
35
36
37#define DMA_IRQ_STAT(x) ((x) * 0x04 + 0x10)
38
39#define DMA_STAT 0x30
40
464aa6f5
SB
41/* Offset between DMA_IRQ_EN and DMA_IRQ_STAT limits number of channels */
42#define DMA_MAX_CHANNELS (DMA_IRQ_CHAN_NR * 0x10 / 4)
43
0b04ddf8
CYT
44/*
45 * sun8i specific registers
46 */
47#define SUN8I_DMA_GATE 0x20
48#define SUN8I_DMA_GATE_ENABLE 0x4
49
50b12497
SB
50#define SUNXI_H3_SECURE_REG 0x20
51#define SUNXI_H3_DMA_GATE 0x28
52#define SUNXI_H3_DMA_GATE_ENABLE 0x4
55585930
MR
53/*
54 * Channels specific registers
55 */
56#define DMA_CHAN_ENABLE 0x00
57#define DMA_CHAN_ENABLE_START BIT(0)
58#define DMA_CHAN_ENABLE_STOP 0
59
60#define DMA_CHAN_PAUSE 0x04
61#define DMA_CHAN_PAUSE_PAUSE BIT(1)
62#define DMA_CHAN_PAUSE_RESUME 0
63
64#define DMA_CHAN_LLI_ADDR 0x08
65
66#define DMA_CHAN_CUR_CFG 0x0c
464aa6f5
SB
67#define DMA_CHAN_MAX_DRQ 0x1f
68#define DMA_CHAN_CFG_SRC_DRQ(x) ((x) & DMA_CHAN_MAX_DRQ)
55585930
MR
69#define DMA_CHAN_CFG_SRC_IO_MODE BIT(5)
70#define DMA_CHAN_CFG_SRC_LINEAR_MODE (0 << 5)
5a6a6202
SB
71#define DMA_CHAN_CFG_SRC_BURST_A31(x) (((x) & 0x3) << 7)
72#define DMA_CHAN_CFG_SRC_BURST_H3(x) (((x) & 0x3) << 6)
55585930
MR
73#define DMA_CHAN_CFG_SRC_WIDTH(x) (((x) & 0x3) << 9)
74
75#define DMA_CHAN_CFG_DST_DRQ(x) (DMA_CHAN_CFG_SRC_DRQ(x) << 16)
76#define DMA_CHAN_CFG_DST_IO_MODE (DMA_CHAN_CFG_SRC_IO_MODE << 16)
77#define DMA_CHAN_CFG_DST_LINEAR_MODE (DMA_CHAN_CFG_SRC_LINEAR_MODE << 16)
5a6a6202
SB
78#define DMA_CHAN_CFG_DST_BURST_A31(x) (DMA_CHAN_CFG_SRC_BURST_A31(x) << 16)
79#define DMA_CHAN_CFG_DST_BURST_H3(x) (DMA_CHAN_CFG_SRC_BURST_H3(x) << 16)
55585930
MR
80#define DMA_CHAN_CFG_DST_WIDTH(x) (DMA_CHAN_CFG_SRC_WIDTH(x) << 16)
81
82#define DMA_CHAN_CUR_SRC 0x10
83
84#define DMA_CHAN_CUR_DST 0x14
85
86#define DMA_CHAN_CUR_CNT 0x18
87
88#define DMA_CHAN_CUR_PARA 0x1c
89
90
91/*
92 * Various hardware related defines
93 */
94#define LLI_LAST_ITEM 0xfffff800
95#define NORMAL_WAIT 8
96#define DRQ_SDRAM 1
97
50b12497
SB
98/* forward declaration */
99struct sun6i_dma_dev;
100
25a37c2f
CYT
101/*
102 * Hardware channels / ports representation
103 *
104 * The hardware is used in several SoCs, with differing numbers
105 * of channels and endpoints. This structure ties those numbers
106 * to a certain compatible string.
107 */
108struct sun6i_dma_config {
109 u32 nr_max_channels;
110 u32 nr_max_requests;
111 u32 nr_max_vchans;
0430a7c7
IZ
112 /*
113 * In the datasheets/user manuals of newer Allwinner SoCs, a special
114 * bit (bit 2 at register 0x20) is present.
115 * It's named "DMA MCLK interface circuit auto gating bit" in the
116 * documents, and the footnote of this register says that this bit
117 * should be set up when initializing the DMA controller.
118 * Allwinner A23/A33 user manuals do not have this bit documented,
119 * however these SoCs really have and need this bit, as seen in the
120 * BSP kernel source code.
121 */
50b12497 122 void (*clock_autogate_enable)(struct sun6i_dma_dev *);
5a6a6202 123 void (*set_burst_length)(u32 *p_cfg, s8 src_burst, s8 dst_burst);
88d8622c
SB
124 u32 src_burst_lengths;
125 u32 dst_burst_lengths;
d5f6d8cf
SB
126 u32 src_addr_widths;
127 u32 dst_addr_widths;
25a37c2f
CYT
128};
129
55585930
MR
130/*
131 * Hardware representation of the LLI
132 *
133 * The hardware will be fed the physical address of this structure,
134 * and read its content in order to start the transfer.
135 */
136struct sun6i_dma_lli {
137 u32 cfg;
138 u32 src;
139 u32 dst;
140 u32 len;
141 u32 para;
142 u32 p_lli_next;
143
144 /*
145 * This field is not used by the DMA controller, but will be
146 * used by the CPU to go through the list (mostly for dumping
147 * or freeing it).
148 */
149 struct sun6i_dma_lli *v_lli_next;
150};
151
152
153struct sun6i_desc {
154 struct virt_dma_desc vd;
155 dma_addr_t p_lli;
156 struct sun6i_dma_lli *v_lli;
157};
158
159struct sun6i_pchan {
160 u32 idx;
161 void __iomem *base;
162 struct sun6i_vchan *vchan;
163 struct sun6i_desc *desc;
164 struct sun6i_desc *done;
165};
166
167struct sun6i_vchan {
168 struct virt_dma_chan vc;
169 struct list_head node;
170 struct dma_slave_config cfg;
171 struct sun6i_pchan *phy;
172 u8 port;
a90e173f
JFM
173 u8 irq_type;
174 bool cyclic;
55585930
MR
175};
176
177struct sun6i_dma_dev {
178 struct dma_device slave;
179 void __iomem *base;
180 struct clk *clk;
181 int irq;
182 spinlock_t lock;
183 struct reset_control *rstc;
184 struct tasklet_struct task;
185 atomic_t tasklet_shutdown;
186 struct list_head pending;
187 struct dma_pool *pool;
188 struct sun6i_pchan *pchans;
189 struct sun6i_vchan *vchans;
25a37c2f 190 const struct sun6i_dma_config *cfg;
500fa9e7
SB
191 u32 num_pchans;
192 u32 num_vchans;
193 u32 max_request;
55585930
MR
194};
195
196static struct device *chan2dev(struct dma_chan *chan)
197{
198 return &chan->dev->device;
199}
200
201static inline struct sun6i_dma_dev *to_sun6i_dma_dev(struct dma_device *d)
202{
203 return container_of(d, struct sun6i_dma_dev, slave);
204}
205
206static inline struct sun6i_vchan *to_sun6i_vchan(struct dma_chan *chan)
207{
208 return container_of(chan, struct sun6i_vchan, vc.chan);
209}
210
211static inline struct sun6i_desc *
212to_sun6i_desc(struct dma_async_tx_descriptor *tx)
213{
214 return container_of(tx, struct sun6i_desc, vd.tx);
215}
216
217static inline void sun6i_dma_dump_com_regs(struct sun6i_dma_dev *sdev)
218{
219 dev_dbg(sdev->slave.dev, "Common register:\n"
220 "\tmask0(%04x): 0x%08x\n"
221 "\tmask1(%04x): 0x%08x\n"
222 "\tpend0(%04x): 0x%08x\n"
223 "\tpend1(%04x): 0x%08x\n"
224 "\tstats(%04x): 0x%08x\n",
225 DMA_IRQ_EN(0), readl(sdev->base + DMA_IRQ_EN(0)),
226 DMA_IRQ_EN(1), readl(sdev->base + DMA_IRQ_EN(1)),
227 DMA_IRQ_STAT(0), readl(sdev->base + DMA_IRQ_STAT(0)),
228 DMA_IRQ_STAT(1), readl(sdev->base + DMA_IRQ_STAT(1)),
229 DMA_STAT, readl(sdev->base + DMA_STAT));
230}
231
232static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
233 struct sun6i_pchan *pchan)
234{
42c0d54e 235 phys_addr_t reg = virt_to_phys(pchan->base);
55585930
MR
236
237 dev_dbg(sdev->slave.dev, "Chan %d reg: %pa\n"
238 "\t___en(%04x): \t0x%08x\n"
239 "\tpause(%04x): \t0x%08x\n"
240 "\tstart(%04x): \t0x%08x\n"
241 "\t__cfg(%04x): \t0x%08x\n"
242 "\t__src(%04x): \t0x%08x\n"
243 "\t__dst(%04x): \t0x%08x\n"
244 "\tcount(%04x): \t0x%08x\n"
245 "\t_para(%04x): \t0x%08x\n\n",
246 pchan->idx, &reg,
247 DMA_CHAN_ENABLE,
248 readl(pchan->base + DMA_CHAN_ENABLE),
249 DMA_CHAN_PAUSE,
250 readl(pchan->base + DMA_CHAN_PAUSE),
251 DMA_CHAN_LLI_ADDR,
252 readl(pchan->base + DMA_CHAN_LLI_ADDR),
253 DMA_CHAN_CUR_CFG,
254 readl(pchan->base + DMA_CHAN_CUR_CFG),
255 DMA_CHAN_CUR_SRC,
256 readl(pchan->base + DMA_CHAN_CUR_SRC),
257 DMA_CHAN_CUR_DST,
258 readl(pchan->base + DMA_CHAN_CUR_DST),
259 DMA_CHAN_CUR_CNT,
260 readl(pchan->base + DMA_CHAN_CUR_CNT),
261 DMA_CHAN_CUR_PARA,
262 readl(pchan->base + DMA_CHAN_CUR_PARA));
263}
264
1f9cd915 265static inline s8 convert_burst(u32 maxburst)
55585930
MR
266{
267 switch (maxburst) {
268 case 1:
1f9cd915 269 return 0;
d5f6d8cf
SB
270 case 4:
271 return 1;
55585930 272 case 8:
1f9cd915 273 return 2;
d5f6d8cf
SB
274 case 16:
275 return 3;
55585930
MR
276 default:
277 return -EINVAL;
278 }
55585930
MR
279}
280
1f9cd915 281static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
55585930 282{
d5f6d8cf 283 return ilog2(addr_width);
55585930
MR
284}
285
50b12497
SB
286static void sun6i_enable_clock_autogate_a23(struct sun6i_dma_dev *sdev)
287{
288 writel(SUN8I_DMA_GATE_ENABLE, sdev->base + SUN8I_DMA_GATE);
289}
290
291static void sun6i_enable_clock_autogate_h3(struct sun6i_dma_dev *sdev)
292{
293 writel(SUNXI_H3_DMA_GATE_ENABLE, sdev->base + SUNXI_H3_DMA_GATE);
294}
295
5a6a6202
SB
296static void sun6i_set_burst_length_a31(u32 *p_cfg, s8 src_burst, s8 dst_burst)
297{
298 *p_cfg |= DMA_CHAN_CFG_SRC_BURST_A31(src_burst) |
299 DMA_CHAN_CFG_DST_BURST_A31(dst_burst);
300}
301
302static void sun6i_set_burst_length_h3(u32 *p_cfg, s8 src_burst, s8 dst_burst)
303{
304 *p_cfg |= DMA_CHAN_CFG_SRC_BURST_H3(src_burst) |
305 DMA_CHAN_CFG_DST_BURST_H3(dst_burst);
306}
307
a90e173f
JFM
308static size_t sun6i_get_chan_size(struct sun6i_pchan *pchan)
309{
310 struct sun6i_desc *txd = pchan->desc;
311 struct sun6i_dma_lli *lli;
312 size_t bytes;
313 dma_addr_t pos;
314
315 pos = readl(pchan->base + DMA_CHAN_LLI_ADDR);
316 bytes = readl(pchan->base + DMA_CHAN_CUR_CNT);
317
318 if (pos == LLI_LAST_ITEM)
319 return bytes;
320
321 for (lli = txd->v_lli; lli; lli = lli->v_lli_next) {
322 if (lli->p_lli_next == pos) {
323 for (lli = lli->v_lli_next; lli; lli = lli->v_lli_next)
324 bytes += lli->len;
325 break;
326 }
327 }
328
329 return bytes;
330}
331
55585930
MR
332static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
333 struct sun6i_dma_lli *next,
334 dma_addr_t next_phy,
335 struct sun6i_desc *txd)
336{
337 if ((!prev && !txd) || !next)
338 return NULL;
339
340 if (!prev) {
341 txd->p_lli = next_phy;
342 txd->v_lli = next;
343 } else {
344 prev->p_lli_next = next_phy;
345 prev->v_lli_next = next;
346 }
347
348 next->p_lli_next = LLI_LAST_ITEM;
349 next->v_lli_next = NULL;
350
351 return next;
352}
353
55585930
MR
354static inline void sun6i_dma_dump_lli(struct sun6i_vchan *vchan,
355 struct sun6i_dma_lli *lli)
356{
42c0d54e 357 phys_addr_t p_lli = virt_to_phys(lli);
55585930
MR
358
359 dev_dbg(chan2dev(&vchan->vc.chan),
360 "\n\tdesc: p - %pa v - 0x%p\n"
361 "\t\tc - 0x%08x s - 0x%08x d - 0x%08x\n"
362 "\t\tl - 0x%08x p - 0x%08x n - 0x%08x\n",
363 &p_lli, lli,
364 lli->cfg, lli->src, lli->dst,
365 lli->len, lli->para, lli->p_lli_next);
366}
367
368static void sun6i_dma_free_desc(struct virt_dma_desc *vd)
369{
370 struct sun6i_desc *txd = to_sun6i_desc(&vd->tx);
371 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vd->tx.chan->device);
372 struct sun6i_dma_lli *v_lli, *v_next;
373 dma_addr_t p_lli, p_next;
374
375 if (unlikely(!txd))
376 return;
377
378 p_lli = txd->p_lli;
379 v_lli = txd->v_lli;
380
381 while (v_lli) {
382 v_next = v_lli->v_lli_next;
383 p_next = v_lli->p_lli_next;
384
385 dma_pool_free(sdev->pool, v_lli, p_lli);
386
387 v_lli = v_next;
388 p_lli = p_next;
389 }
390
391 kfree(txd);
392}
393
55585930
MR
394static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
395{
396 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(vchan->vc.chan.device);
397 struct virt_dma_desc *desc = vchan_next_desc(&vchan->vc);
398 struct sun6i_pchan *pchan = vchan->phy;
399 u32 irq_val, irq_reg, irq_offset;
400
401 if (!pchan)
402 return -EAGAIN;
403
404 if (!desc) {
405 pchan->desc = NULL;
406 pchan->done = NULL;
407 return -EAGAIN;
408 }
409
410 list_del(&desc->node);
411
412 pchan->desc = to_sun6i_desc(&desc->tx);
413 pchan->done = NULL;
414
415 sun6i_dma_dump_lli(vchan, pchan->desc->v_lli);
416
417 irq_reg = pchan->idx / DMA_IRQ_CHAN_NR;
418 irq_offset = pchan->idx % DMA_IRQ_CHAN_NR;
419
a90e173f
JFM
420 vchan->irq_type = vchan->cyclic ? DMA_IRQ_PKG : DMA_IRQ_QUEUE;
421
128fe7e9 422 irq_val = readl(sdev->base + DMA_IRQ_EN(irq_reg));
a90e173f
JFM
423 irq_val &= ~((DMA_IRQ_HALF | DMA_IRQ_PKG | DMA_IRQ_QUEUE) <<
424 (irq_offset * DMA_IRQ_CHAN_WIDTH));
425 irq_val |= vchan->irq_type << (irq_offset * DMA_IRQ_CHAN_WIDTH);
128fe7e9 426 writel(irq_val, sdev->base + DMA_IRQ_EN(irq_reg));
55585930
MR
427
428 writel(pchan->desc->p_lli, pchan->base + DMA_CHAN_LLI_ADDR);
429 writel(DMA_CHAN_ENABLE_START, pchan->base + DMA_CHAN_ENABLE);
430
431 sun6i_dma_dump_com_regs(sdev);
432 sun6i_dma_dump_chan_regs(sdev, pchan);
433
434 return 0;
435}
436
437static void sun6i_dma_tasklet(unsigned long data)
438{
439 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
440 struct sun6i_vchan *vchan;
441 struct sun6i_pchan *pchan;
442 unsigned int pchan_alloc = 0;
443 unsigned int pchan_idx;
444
445 list_for_each_entry(vchan, &sdev->slave.channels, vc.chan.device_node) {
446 spin_lock_irq(&vchan->vc.lock);
447
448 pchan = vchan->phy;
449
450 if (pchan && pchan->done) {
451 if (sun6i_dma_start_desc(vchan)) {
452 /*
453 * No current txd associated with this channel
454 */
455 dev_dbg(sdev->slave.dev, "pchan %u: free\n",
456 pchan->idx);
457
458 /* Mark this channel free */
459 vchan->phy = NULL;
460 pchan->vchan = NULL;
461 }
462 }
463 spin_unlock_irq(&vchan->vc.lock);
464 }
465
466 spin_lock_irq(&sdev->lock);
500fa9e7 467 for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
55585930
MR
468 pchan = &sdev->pchans[pchan_idx];
469
470 if (pchan->vchan || list_empty(&sdev->pending))
471 continue;
472
473 vchan = list_first_entry(&sdev->pending,
474 struct sun6i_vchan, node);
475
476 /* Remove from pending channels */
477 list_del_init(&vchan->node);
478 pchan_alloc |= BIT(pchan_idx);
479
480 /* Mark this channel allocated */
481 pchan->vchan = vchan;
482 vchan->phy = pchan;
483 dev_dbg(sdev->slave.dev, "pchan %u: alloc vchan %p\n",
484 pchan->idx, &vchan->vc);
485 }
486 spin_unlock_irq(&sdev->lock);
487
500fa9e7 488 for (pchan_idx = 0; pchan_idx < sdev->num_pchans; pchan_idx++) {
55585930
MR
489 if (!(pchan_alloc & BIT(pchan_idx)))
490 continue;
491
492 pchan = sdev->pchans + pchan_idx;
493 vchan = pchan->vchan;
494 if (vchan) {
495 spin_lock_irq(&vchan->vc.lock);
496 sun6i_dma_start_desc(vchan);
497 spin_unlock_irq(&vchan->vc.lock);
498 }
499 }
500}
501
502static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
503{
504 struct sun6i_dma_dev *sdev = dev_id;
505 struct sun6i_vchan *vchan;
506 struct sun6i_pchan *pchan;
507 int i, j, ret = IRQ_NONE;
508 u32 status;
509
500fa9e7 510 for (i = 0; i < sdev->num_pchans / DMA_IRQ_CHAN_NR; i++) {
55585930
MR
511 status = readl(sdev->base + DMA_IRQ_STAT(i));
512 if (!status)
513 continue;
514
515 dev_dbg(sdev->slave.dev, "DMA irq status %s: 0x%x\n",
516 i ? "high" : "low", status);
517
518 writel(status, sdev->base + DMA_IRQ_STAT(i));
519
25a37c2f 520 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
a90e173f
JFM
521 pchan = sdev->pchans + j;
522 vchan = pchan->vchan;
523 if (vchan && (status & vchan->irq_type)) {
524 if (vchan->cyclic) {
525 vchan_cyclic_callback(&pchan->desc->vd);
526 } else {
55585930
MR
527 spin_lock(&vchan->vc.lock);
528 vchan_cookie_complete(&pchan->desc->vd);
529 pchan->done = pchan->desc;
530 spin_unlock(&vchan->vc.lock);
531 }
532 }
533
25a37c2f 534 status = status >> DMA_IRQ_CHAN_WIDTH;
55585930
MR
535 }
536
537 if (!atomic_read(&sdev->tasklet_shutdown))
538 tasklet_schedule(&sdev->task);
539 ret = IRQ_HANDLED;
540 }
541
542 return ret;
543}
544
52c87179
JFM
545static int set_config(struct sun6i_dma_dev *sdev,
546 struct dma_slave_config *sconfig,
547 enum dma_transfer_direction direction,
548 u32 *p_cfg)
549{
88d8622c
SB
550 enum dma_slave_buswidth src_addr_width, dst_addr_width;
551 u32 src_maxburst, dst_maxburst;
52c87179
JFM
552 s8 src_width, dst_width, src_burst, dst_burst;
553
88d8622c
SB
554 src_addr_width = sconfig->src_addr_width;
555 dst_addr_width = sconfig->dst_addr_width;
556 src_maxburst = sconfig->src_maxburst;
557 dst_maxburst = sconfig->dst_maxburst;
558
a4eb36b0
JFM
559 switch (direction) {
560 case DMA_MEM_TO_DEV:
88d8622c
SB
561 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
562 src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
563 src_maxburst = src_maxburst ? src_maxburst : 8;
a4eb36b0
JFM
564 break;
565 case DMA_DEV_TO_MEM:
88d8622c
SB
566 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
567 dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
568 dst_maxburst = dst_maxburst ? dst_maxburst : 8;
a4eb36b0
JFM
569 break;
570 default:
571 return -EINVAL;
572 }
52c87179 573
88d8622c
SB
574 if (!(BIT(src_addr_width) & sdev->slave.src_addr_widths))
575 return -EINVAL;
576 if (!(BIT(dst_addr_width) & sdev->slave.dst_addr_widths))
577 return -EINVAL;
578 if (!(BIT(src_maxburst) & sdev->cfg->src_burst_lengths))
579 return -EINVAL;
580 if (!(BIT(dst_maxburst) & sdev->cfg->dst_burst_lengths))
581 return -EINVAL;
582
583 src_width = convert_buswidth(src_addr_width);
584 dst_width = convert_buswidth(dst_addr_width);
585 dst_burst = convert_burst(dst_maxburst);
586 src_burst = convert_burst(src_maxburst);
52c87179 587
5a6a6202 588 *p_cfg = DMA_CHAN_CFG_SRC_WIDTH(src_width) |
52c87179
JFM
589 DMA_CHAN_CFG_DST_WIDTH(dst_width);
590
5a6a6202
SB
591 sdev->cfg->set_burst_length(p_cfg, src_burst, dst_burst);
592
52c87179
JFM
593 return 0;
594}
595
55585930
MR
596static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
597 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
598 size_t len, unsigned long flags)
599{
600 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
601 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
55585930
MR
602 struct sun6i_dma_lli *v_lli;
603 struct sun6i_desc *txd;
604 dma_addr_t p_lli;
1f9cd915 605 s8 burst, width;
55585930
MR
606
607 dev_dbg(chan2dev(chan),
608 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
609 __func__, vchan->vc.chan.chan_id, &dest, &src, len, flags);
610
611 if (!len)
612 return NULL;
613
614 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
615 if (!txd)
616 return NULL;
617
618 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
619 if (!v_lli) {
620 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
4fbd804e 621 goto err_txd_free;
55585930
MR
622 }
623
1f9cd915
MR
624 v_lli->src = src;
625 v_lli->dst = dest;
626 v_lli->len = len;
627 v_lli->para = NORMAL_WAIT;
55585930 628
1f9cd915
MR
629 burst = convert_burst(8);
630 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
f732c5b7 631 v_lli->cfg = DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
55585930
MR
632 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
633 DMA_CHAN_CFG_DST_LINEAR_MODE |
1f9cd915 634 DMA_CHAN_CFG_SRC_LINEAR_MODE |
1f9cd915 635 DMA_CHAN_CFG_SRC_WIDTH(width) |
1f9cd915 636 DMA_CHAN_CFG_DST_WIDTH(width);
55585930 637
5a6a6202
SB
638 sdev->cfg->set_burst_length(&v_lli->cfg, burst, burst);
639
55585930
MR
640 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
641
642 sun6i_dma_dump_lli(vchan, v_lli);
643
644 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
645
4fbd804e
MR
646err_txd_free:
647 kfree(txd);
55585930
MR
648 return NULL;
649}
650
651static struct dma_async_tx_descriptor *sun6i_dma_prep_slave_sg(
652 struct dma_chan *chan, struct scatterlist *sgl,
653 unsigned int sg_len, enum dma_transfer_direction dir,
654 unsigned long flags, void *context)
655{
656 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
657 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
658 struct dma_slave_config *sconfig = &vchan->cfg;
659 struct sun6i_dma_lli *v_lli, *prev = NULL;
660 struct sun6i_desc *txd;
661 struct scatterlist *sg;
662 dma_addr_t p_lli;
52c87179 663 u32 lli_cfg;
55585930
MR
664 int i, ret;
665
666 if (!sgl)
667 return NULL;
668
52c87179
JFM
669 ret = set_config(sdev, sconfig, dir, &lli_cfg);
670 if (ret) {
671 dev_err(chan2dev(chan), "Invalid DMA configuration\n");
672 return NULL;
673 }
674
55585930
MR
675 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
676 if (!txd)
677 return NULL;
678
679 for_each_sg(sgl, sg, sg_len, i) {
680 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
4fbd804e
MR
681 if (!v_lli)
682 goto err_lli_free;
55585930 683
52c87179
JFM
684 v_lli->len = sg_dma_len(sg);
685 v_lli->para = NORMAL_WAIT;
55585930 686
52c87179
JFM
687 if (dir == DMA_MEM_TO_DEV) {
688 v_lli->src = sg_dma_address(sg);
689 v_lli->dst = sconfig->dst_addr;
690 v_lli->cfg = lli_cfg |
691 DMA_CHAN_CFG_DST_IO_MODE |
55585930
MR
692 DMA_CHAN_CFG_SRC_LINEAR_MODE |
693 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
694 DMA_CHAN_CFG_DST_DRQ(vchan->port);
695
696 dev_dbg(chan2dev(chan),
7f5e03e7 697 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
55585930
MR
698 __func__, vchan->vc.chan.chan_id,
699 &sconfig->dst_addr, &sg_dma_address(sg),
700 sg_dma_len(sg), flags);
701
702 } else {
52c87179
JFM
703 v_lli->src = sconfig->src_addr;
704 v_lli->dst = sg_dma_address(sg);
705 v_lli->cfg = lli_cfg |
706 DMA_CHAN_CFG_DST_LINEAR_MODE |
55585930
MR
707 DMA_CHAN_CFG_SRC_IO_MODE |
708 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
709 DMA_CHAN_CFG_SRC_DRQ(vchan->port);
710
711 dev_dbg(chan2dev(chan),
7f5e03e7 712 "%s; chan: %d, dest: %pad, src: %pad, len: %u. flags: 0x%08lx\n",
55585930
MR
713 __func__, vchan->vc.chan.chan_id,
714 &sg_dma_address(sg), &sconfig->src_addr,
715 sg_dma_len(sg), flags);
716 }
717
718 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
719 }
720
721 dev_dbg(chan2dev(chan), "First: %pad\n", &txd->p_lli);
722 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
723 sun6i_dma_dump_lli(vchan, prev);
724
725 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
726
4fbd804e
MR
727err_lli_free:
728 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
729 dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
730 kfree(txd);
55585930
MR
731 return NULL;
732}
733
a90e173f
JFM
734static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_cyclic(
735 struct dma_chan *chan,
736 dma_addr_t buf_addr,
737 size_t buf_len,
738 size_t period_len,
739 enum dma_transfer_direction dir,
740 unsigned long flags)
741{
742 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
743 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
744 struct dma_slave_config *sconfig = &vchan->cfg;
745 struct sun6i_dma_lli *v_lli, *prev = NULL;
746 struct sun6i_desc *txd;
747 dma_addr_t p_lli;
748 u32 lli_cfg;
749 unsigned int i, periods = buf_len / period_len;
750 int ret;
751
752 ret = set_config(sdev, sconfig, dir, &lli_cfg);
753 if (ret) {
754 dev_err(chan2dev(chan), "Invalid DMA configuration\n");
755 return NULL;
756 }
757
758 txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
759 if (!txd)
760 return NULL;
761
762 for (i = 0; i < periods; i++) {
763 v_lli = dma_pool_alloc(sdev->pool, GFP_NOWAIT, &p_lli);
764 if (!v_lli) {
765 dev_err(sdev->slave.dev, "Failed to alloc lli memory\n");
766 goto err_lli_free;
767 }
768
769 v_lli->len = period_len;
770 v_lli->para = NORMAL_WAIT;
771
772 if (dir == DMA_MEM_TO_DEV) {
773 v_lli->src = buf_addr + period_len * i;
774 v_lli->dst = sconfig->dst_addr;
775 v_lli->cfg = lli_cfg |
776 DMA_CHAN_CFG_DST_IO_MODE |
777 DMA_CHAN_CFG_SRC_LINEAR_MODE |
778 DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
779 DMA_CHAN_CFG_DST_DRQ(vchan->port);
780 } else {
781 v_lli->src = sconfig->src_addr;
782 v_lli->dst = buf_addr + period_len * i;
783 v_lli->cfg = lli_cfg |
784 DMA_CHAN_CFG_DST_LINEAR_MODE |
785 DMA_CHAN_CFG_SRC_IO_MODE |
786 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
787 DMA_CHAN_CFG_SRC_DRQ(vchan->port);
788 }
789
790 prev = sun6i_dma_lli_add(prev, v_lli, p_lli, txd);
791 }
792
793 prev->p_lli_next = txd->p_lli; /* cyclic list */
794
795 vchan->cyclic = true;
796
797 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
798
799err_lli_free:
800 for (prev = txd->v_lli; prev; prev = prev->v_lli_next)
801 dma_pool_free(sdev->pool, prev, virt_to_phys(prev));
802 kfree(txd);
803 return NULL;
804}
805
826b15a7
MR
806static int sun6i_dma_config(struct dma_chan *chan,
807 struct dma_slave_config *config)
808{
809 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
810
811 memcpy(&vchan->cfg, config, sizeof(*config));
812
813 return 0;
814}
815
816static int sun6i_dma_pause(struct dma_chan *chan)
817{
818 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
819 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
820 struct sun6i_pchan *pchan = vchan->phy;
821
822 dev_dbg(chan2dev(chan), "vchan %p: pause\n", &vchan->vc);
823
824 if (pchan) {
825 writel(DMA_CHAN_PAUSE_PAUSE,
826 pchan->base + DMA_CHAN_PAUSE);
827 } else {
828 spin_lock(&sdev->lock);
829 list_del_init(&vchan->node);
830 spin_unlock(&sdev->lock);
831 }
832
833 return 0;
834}
835
836static int sun6i_dma_resume(struct dma_chan *chan)
55585930
MR
837{
838 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
839 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
840 struct sun6i_pchan *pchan = vchan->phy;
841 unsigned long flags;
55585930 842
826b15a7 843 dev_dbg(chan2dev(chan), "vchan %p: resume\n", &vchan->vc);
55585930 844
826b15a7 845 spin_lock_irqsave(&vchan->vc.lock, flags);
55585930 846
826b15a7
MR
847 if (pchan) {
848 writel(DMA_CHAN_PAUSE_RESUME,
849 pchan->base + DMA_CHAN_PAUSE);
850 } else if (!list_empty(&vchan->vc.desc_issued)) {
851 spin_lock(&sdev->lock);
852 list_add_tail(&vchan->node, &sdev->pending);
853 spin_unlock(&sdev->lock);
854 }
55585930 855
826b15a7 856 spin_unlock_irqrestore(&vchan->vc.lock, flags);
55585930 857
826b15a7
MR
858 return 0;
859}
55585930 860
826b15a7
MR
861static int sun6i_dma_terminate_all(struct dma_chan *chan)
862{
863 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
864 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
865 struct sun6i_pchan *pchan = vchan->phy;
866 unsigned long flags;
867 LIST_HEAD(head);
868
869 spin_lock(&sdev->lock);
870 list_del_init(&vchan->node);
871 spin_unlock(&sdev->lock);
872
873 spin_lock_irqsave(&vchan->vc.lock, flags);
874
a90e173f
JFM
875 if (vchan->cyclic) {
876 vchan->cyclic = false;
877 if (pchan && pchan->desc) {
878 struct virt_dma_desc *vd = &pchan->desc->vd;
879 struct virt_dma_chan *vc = &vchan->vc;
880
881 list_add_tail(&vd->node, &vc->desc_completed);
882 }
883 }
884
826b15a7
MR
885 vchan_get_all_descriptors(&vchan->vc, &head);
886
887 if (pchan) {
888 writel(DMA_CHAN_ENABLE_STOP, pchan->base + DMA_CHAN_ENABLE);
889 writel(DMA_CHAN_PAUSE_RESUME, pchan->base + DMA_CHAN_PAUSE);
890
891 vchan->phy = NULL;
892 pchan->vchan = NULL;
893 pchan->desc = NULL;
894 pchan->done = NULL;
55585930 895 }
826b15a7
MR
896
897 spin_unlock_irqrestore(&vchan->vc.lock, flags);
898
899 vchan_dma_desc_free_list(&vchan->vc, &head);
900
901 return 0;
55585930
MR
902}
903
904static enum dma_status sun6i_dma_tx_status(struct dma_chan *chan,
905 dma_cookie_t cookie,
906 struct dma_tx_state *state)
907{
908 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
909 struct sun6i_pchan *pchan = vchan->phy;
910 struct sun6i_dma_lli *lli;
911 struct virt_dma_desc *vd;
912 struct sun6i_desc *txd;
913 enum dma_status ret;
914 unsigned long flags;
915 size_t bytes = 0;
916
917 ret = dma_cookie_status(chan, cookie, state);
b9ab9d10 918 if (ret == DMA_COMPLETE || !state)
55585930
MR
919 return ret;
920
921 spin_lock_irqsave(&vchan->vc.lock, flags);
922
923 vd = vchan_find_desc(&vchan->vc, cookie);
924 txd = to_sun6i_desc(&vd->tx);
925
926 if (vd) {
927 for (lli = txd->v_lli; lli != NULL; lli = lli->v_lli_next)
928 bytes += lli->len;
929 } else if (!pchan || !pchan->desc) {
930 bytes = 0;
931 } else {
a90e173f 932 bytes = sun6i_get_chan_size(pchan);
55585930
MR
933 }
934
935 spin_unlock_irqrestore(&vchan->vc.lock, flags);
936
937 dma_set_residue(state, bytes);
938
939 return ret;
940}
941
942static void sun6i_dma_issue_pending(struct dma_chan *chan)
943{
944 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
945 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
946 unsigned long flags;
947
948 spin_lock_irqsave(&vchan->vc.lock, flags);
949
950 if (vchan_issue_pending(&vchan->vc)) {
951 spin_lock(&sdev->lock);
952
953 if (!vchan->phy && list_empty(&vchan->node)) {
954 list_add_tail(&vchan->node, &sdev->pending);
955 tasklet_schedule(&sdev->task);
956 dev_dbg(chan2dev(chan), "vchan %p: issued\n",
957 &vchan->vc);
958 }
959
960 spin_unlock(&sdev->lock);
961 } else {
962 dev_dbg(chan2dev(chan), "vchan %p: nothing to issue\n",
963 &vchan->vc);
964 }
965
966 spin_unlock_irqrestore(&vchan->vc.lock, flags);
967}
968
55585930
MR
969static void sun6i_dma_free_chan_resources(struct dma_chan *chan)
970{
971 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
972 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
973 unsigned long flags;
974
975 spin_lock_irqsave(&sdev->lock, flags);
976 list_del_init(&vchan->node);
977 spin_unlock_irqrestore(&sdev->lock, flags);
978
979 vchan_free_chan_resources(&vchan->vc);
980}
981
982static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
983 struct of_dma *ofdma)
984{
985 struct sun6i_dma_dev *sdev = ofdma->of_dma_data;
986 struct sun6i_vchan *vchan;
987 struct dma_chan *chan;
988 u8 port = dma_spec->args[0];
989
500fa9e7 990 if (port > sdev->max_request)
55585930
MR
991 return NULL;
992
993 chan = dma_get_any_slave_channel(&sdev->slave);
994 if (!chan)
995 return NULL;
996
997 vchan = to_sun6i_vchan(chan);
998 vchan->port = port;
999
1000 return chan;
1001}
1002
1003static inline void sun6i_kill_tasklet(struct sun6i_dma_dev *sdev)
1004{
1005 /* Disable all interrupts from DMA */
1006 writel(0, sdev->base + DMA_IRQ_EN(0));
1007 writel(0, sdev->base + DMA_IRQ_EN(1));
1008
1009 /* Prevent spurious interrupts from scheduling the tasklet */
1010 atomic_inc(&sdev->tasklet_shutdown);
1011
174427c1
MR
1012 /* Make sure we won't have any further interrupts */
1013 devm_free_irq(sdev->slave.dev, sdev->irq, sdev);
55585930
MR
1014
1015 /* Actually prevent the tasklet from being scheduled */
1016 tasklet_kill(&sdev->task);
1017}
1018
1019static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
1020{
1021 int i;
1022
500fa9e7 1023 for (i = 0; i < sdev->num_vchans; i++) {
55585930
MR
1024 struct sun6i_vchan *vchan = &sdev->vchans[i];
1025
1026 list_del(&vchan->vc.chan.device_node);
1027 tasklet_kill(&vchan->vc.task);
1028 }
1029}
1030
25a37c2f
CYT
1031/*
1032 * For A31:
1033 *
1034 * There's 16 physical channels that can work in parallel.
1035 *
1036 * However we have 30 different endpoints for our requests.
1037 *
1038 * Since the channels are able to handle only an unidirectional
1039 * transfer, we need to allocate more virtual channels so that
1040 * everyone can grab one channel.
1041 *
1042 * Some devices can't work in both direction (mostly because it
1043 * wouldn't make sense), so we have a bit fewer virtual channels than
1044 * 2 channels per endpoints.
1045 */
1046
1047static struct sun6i_dma_config sun6i_a31_dma_cfg = {
1048 .nr_max_channels = 16,
1049 .nr_max_requests = 30,
1050 .nr_max_vchans = 53,
5a6a6202 1051 .set_burst_length = sun6i_set_burst_length_a31,
88d8622c
SB
1052 .src_burst_lengths = BIT(1) | BIT(8),
1053 .dst_burst_lengths = BIT(1) | BIT(8),
d5f6d8cf
SB
1054 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1055 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1056 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
1057 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1058 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1059 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
25a37c2f
CYT
1060};
1061
0b04ddf8
CYT
1062/*
1063 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
1064 * and a total of 37 usable source and destination endpoints.
1065 */
1066
1067static struct sun6i_dma_config sun8i_a23_dma_cfg = {
1068 .nr_max_channels = 8,
1069 .nr_max_requests = 24,
1070 .nr_max_vchans = 37,
50b12497 1071 .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
5a6a6202 1072 .set_burst_length = sun6i_set_burst_length_a31,
88d8622c
SB
1073 .src_burst_lengths = BIT(1) | BIT(8),
1074 .dst_burst_lengths = BIT(1) | BIT(8),
d5f6d8cf
SB
1075 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1076 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1077 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
1078 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1079 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1080 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
0b04ddf8
CYT
1081};
1082
3a03ea76
JFM
1083static struct sun6i_dma_config sun8i_a83t_dma_cfg = {
1084 .nr_max_channels = 8,
1085 .nr_max_requests = 28,
1086 .nr_max_vchans = 39,
50b12497 1087 .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
5a6a6202 1088 .set_burst_length = sun6i_set_burst_length_a31,
88d8622c
SB
1089 .src_burst_lengths = BIT(1) | BIT(8),
1090 .dst_burst_lengths = BIT(1) | BIT(8),
d5f6d8cf
SB
1091 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1092 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1093 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
1094 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1095 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1096 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
3a03ea76
JFM
1097};
1098
f008db8c
JK
1099/*
1100 * The H3 has 12 physical channels, a maximum DRQ port id of 27,
1101 * and a total of 34 usable source and destination endpoints.
d5f6d8cf
SB
1102 * It also supports additional burst lengths and bus widths,
1103 * and the burst length fields have different offsets.
f008db8c
JK
1104 */
1105
1106static struct sun6i_dma_config sun8i_h3_dma_cfg = {
1107 .nr_max_channels = 12,
1108 .nr_max_requests = 27,
1109 .nr_max_vchans = 34,
50b12497 1110 .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
5a6a6202 1111 .set_burst_length = sun6i_set_burst_length_h3,
d5f6d8cf
SB
1112 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
1113 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
1114 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1115 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1116 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1117 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
1118 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1119 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1120 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1121 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
f008db8c
JK
1122};
1123
12e01770
SB
1124/*
1125 * The A64 binding uses the number of dma channels from the
1126 * device tree node.
1127 */
1128static struct sun6i_dma_config sun50i_a64_dma_cfg = {
1129 .clock_autogate_enable = sun6i_enable_clock_autogate_h3,
1130 .set_burst_length = sun6i_set_burst_length_h3,
1131 .src_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
1132 .dst_burst_lengths = BIT(1) | BIT(4) | BIT(8) | BIT(16),
1133 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1134 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1135 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1136 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
1137 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1138 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1139 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1140 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES),
1141};
1142
a702e47e
IZ
1143/*
1144 * The V3s have only 8 physical channels, a maximum DRQ port id of 23,
1145 * and a total of 24 usable source and destination endpoints.
1146 */
1147
1148static struct sun6i_dma_config sun8i_v3s_dma_cfg = {
1149 .nr_max_channels = 8,
1150 .nr_max_requests = 23,
1151 .nr_max_vchans = 24,
50b12497 1152 .clock_autogate_enable = sun6i_enable_clock_autogate_a23,
5a6a6202 1153 .set_burst_length = sun6i_set_burst_length_a31,
88d8622c
SB
1154 .src_burst_lengths = BIT(1) | BIT(8),
1155 .dst_burst_lengths = BIT(1) | BIT(8),
d5f6d8cf
SB
1156 .src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1157 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1158 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
1159 .dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1160 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1161 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES),
a702e47e
IZ
1162};
1163
57c03422 1164static const struct of_device_id sun6i_dma_match[] = {
25a37c2f 1165 { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
0b04ddf8 1166 { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
3a03ea76 1167 { .compatible = "allwinner,sun8i-a83t-dma", .data = &sun8i_a83t_dma_cfg },
f008db8c 1168 { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg },
a702e47e 1169 { .compatible = "allwinner,sun8i-v3s-dma", .data = &sun8i_v3s_dma_cfg },
12e01770 1170 { .compatible = "allwinner,sun50i-a64-dma", .data = &sun50i_a64_dma_cfg },
25a37c2f
CYT
1171 { /* sentinel */ }
1172};
c719d7fa 1173MODULE_DEVICE_TABLE(of, sun6i_dma_match);
25a37c2f 1174
55585930
MR
1175static int sun6i_dma_probe(struct platform_device *pdev)
1176{
464aa6f5 1177 struct device_node *np = pdev->dev.of_node;
55585930
MR
1178 struct sun6i_dma_dev *sdc;
1179 struct resource *res;
55585930
MR
1180 int ret, i;
1181
1182 sdc = devm_kzalloc(&pdev->dev, sizeof(*sdc), GFP_KERNEL);
1183 if (!sdc)
1184 return -ENOMEM;
1185
8f3b0034
CL
1186 sdc->cfg = of_device_get_match_data(&pdev->dev);
1187 if (!sdc->cfg)
25a37c2f 1188 return -ENODEV;
25a37c2f 1189
55585930
MR
1190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1191 sdc->base = devm_ioremap_resource(&pdev->dev, res);
1192 if (IS_ERR(sdc->base))
1193 return PTR_ERR(sdc->base);
1194
1195 sdc->irq = platform_get_irq(pdev, 0);
1196 if (sdc->irq < 0) {
1197 dev_err(&pdev->dev, "Cannot claim IRQ\n");
1198 return sdc->irq;
1199 }
1200
1201 sdc->clk = devm_clk_get(&pdev->dev, NULL);
1202 if (IS_ERR(sdc->clk)) {
1203 dev_err(&pdev->dev, "No clock specified\n");
1204 return PTR_ERR(sdc->clk);
1205 }
1206
55585930
MR
1207 sdc->rstc = devm_reset_control_get(&pdev->dev, NULL);
1208 if (IS_ERR(sdc->rstc)) {
1209 dev_err(&pdev->dev, "No reset controller specified\n");
1210 return PTR_ERR(sdc->rstc);
1211 }
1212
1213 sdc->pool = dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1214 sizeof(struct sun6i_dma_lli), 4, 0);
1215 if (!sdc->pool) {
1216 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1217 return -ENOMEM;
1218 }
1219
1220 platform_set_drvdata(pdev, sdc);
1221 INIT_LIST_HEAD(&sdc->pending);
1222 spin_lock_init(&sdc->lock);
1223
1224 dma_cap_set(DMA_PRIVATE, sdc->slave.cap_mask);
1225 dma_cap_set(DMA_MEMCPY, sdc->slave.cap_mask);
1226 dma_cap_set(DMA_SLAVE, sdc->slave.cap_mask);
a90e173f 1227 dma_cap_set(DMA_CYCLIC, sdc->slave.cap_mask);
55585930
MR
1228
1229 INIT_LIST_HEAD(&sdc->slave.channels);
55585930
MR
1230 sdc->slave.device_free_chan_resources = sun6i_dma_free_chan_resources;
1231 sdc->slave.device_tx_status = sun6i_dma_tx_status;
1232 sdc->slave.device_issue_pending = sun6i_dma_issue_pending;
1233 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
1234 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
a90e173f 1235 sdc->slave.device_prep_dma_cyclic = sun6i_dma_prep_dma_cyclic;
77a68e56 1236 sdc->slave.copy_align = DMAENGINE_ALIGN_4_BYTES;
826b15a7
MR
1237 sdc->slave.device_config = sun6i_dma_config;
1238 sdc->slave.device_pause = sun6i_dma_pause;
1239 sdc->slave.device_resume = sun6i_dma_resume;
1240 sdc->slave.device_terminate_all = sun6i_dma_terminate_all;
d5f6d8cf
SB
1241 sdc->slave.src_addr_widths = sdc->cfg->src_addr_widths;
1242 sdc->slave.dst_addr_widths = sdc->cfg->dst_addr_widths;
1cac81b4
MR
1243 sdc->slave.directions = BIT(DMA_DEV_TO_MEM) |
1244 BIT(DMA_MEM_TO_DEV);
1245 sdc->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
55585930
MR
1246 sdc->slave.dev = &pdev->dev;
1247
500fa9e7
SB
1248 sdc->num_pchans = sdc->cfg->nr_max_channels;
1249 sdc->num_vchans = sdc->cfg->nr_max_vchans;
1250 sdc->max_request = sdc->cfg->nr_max_requests;
1251
464aa6f5
SB
1252 ret = of_property_read_u32(np, "dma-channels", &sdc->num_pchans);
1253 if (ret && !sdc->num_pchans) {
1254 dev_err(&pdev->dev, "Can't get dma-channels.\n");
1255 return ret;
1256 }
1257
1258 ret = of_property_read_u32(np, "dma-requests", &sdc->max_request);
1259 if (ret && !sdc->max_request) {
1260 dev_info(&pdev->dev, "Missing dma-requests, using %u.\n",
1261 DMA_CHAN_MAX_DRQ);
1262 sdc->max_request = DMA_CHAN_MAX_DRQ;
1263 }
1264
1265 /*
1266 * If the number of vchans is not specified, derive it from the
1267 * highest port number, at most one channel per port and direction.
1268 */
1269 if (!sdc->num_vchans)
1270 sdc->num_vchans = 2 * (sdc->max_request + 1);
1271
500fa9e7 1272 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->num_pchans,
55585930
MR
1273 sizeof(struct sun6i_pchan), GFP_KERNEL);
1274 if (!sdc->pchans)
1275 return -ENOMEM;
1276
500fa9e7 1277 sdc->vchans = devm_kcalloc(&pdev->dev, sdc->num_vchans,
55585930
MR
1278 sizeof(struct sun6i_vchan), GFP_KERNEL);
1279 if (!sdc->vchans)
1280 return -ENOMEM;
1281
1282 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
1283
500fa9e7 1284 for (i = 0; i < sdc->num_pchans; i++) {
55585930
MR
1285 struct sun6i_pchan *pchan = &sdc->pchans[i];
1286
1287 pchan->idx = i;
1288 pchan->base = sdc->base + 0x100 + i * 0x40;
1289 }
1290
500fa9e7 1291 for (i = 0; i < sdc->num_vchans; i++) {
55585930
MR
1292 struct sun6i_vchan *vchan = &sdc->vchans[i];
1293
1294 INIT_LIST_HEAD(&vchan->node);
1295 vchan->vc.desc_free = sun6i_dma_free_desc;
1296 vchan_init(&vchan->vc, &sdc->slave);
1297 }
1298
1299 ret = reset_control_deassert(sdc->rstc);
1300 if (ret) {
1301 dev_err(&pdev->dev, "Couldn't deassert the device from reset\n");
1302 goto err_chan_free;
1303 }
1304
1305 ret = clk_prepare_enable(sdc->clk);
1306 if (ret) {
1307 dev_err(&pdev->dev, "Couldn't enable the clock\n");
1308 goto err_reset_assert;
1309 }
1310
1311 ret = devm_request_irq(&pdev->dev, sdc->irq, sun6i_dma_interrupt, 0,
1312 dev_name(&pdev->dev), sdc);
1313 if (ret) {
1314 dev_err(&pdev->dev, "Cannot request IRQ\n");
1315 goto err_clk_disable;
1316 }
1317
1318 ret = dma_async_device_register(&sdc->slave);
1319 if (ret) {
1320 dev_warn(&pdev->dev, "Failed to register DMA engine device\n");
1321 goto err_irq_disable;
1322 }
1323
1324 ret = of_dma_controller_register(pdev->dev.of_node, sun6i_dma_of_xlate,
1325 sdc);
1326 if (ret) {
1327 dev_err(&pdev->dev, "of_dma_controller_register failed\n");
1328 goto err_dma_unregister;
1329 }
1330
50b12497
SB
1331 if (sdc->cfg->clock_autogate_enable)
1332 sdc->cfg->clock_autogate_enable(sdc);
0b04ddf8 1333
55585930
MR
1334 return 0;
1335
1336err_dma_unregister:
1337 dma_async_device_unregister(&sdc->slave);
1338err_irq_disable:
1339 sun6i_kill_tasklet(sdc);
1340err_clk_disable:
1341 clk_disable_unprepare(sdc->clk);
1342err_reset_assert:
1343 reset_control_assert(sdc->rstc);
1344err_chan_free:
1345 sun6i_dma_free(sdc);
1346 return ret;
1347}
1348
1349static int sun6i_dma_remove(struct platform_device *pdev)
1350{
1351 struct sun6i_dma_dev *sdc = platform_get_drvdata(pdev);
1352
1353 of_dma_controller_free(pdev->dev.of_node);
1354 dma_async_device_unregister(&sdc->slave);
1355
1356 sun6i_kill_tasklet(sdc);
1357
1358 clk_disable_unprepare(sdc->clk);
1359 reset_control_assert(sdc->rstc);
1360
1361 sun6i_dma_free(sdc);
1362
1363 return 0;
1364}
1365
55585930
MR
1366static struct platform_driver sun6i_dma_driver = {
1367 .probe = sun6i_dma_probe,
1368 .remove = sun6i_dma_remove,
1369 .driver = {
1370 .name = "sun6i-dma",
1371 .of_match_table = sun6i_dma_match,
1372 },
1373};
1374module_platform_driver(sun6i_dma_driver);
1375
1376MODULE_DESCRIPTION("Allwinner A31 DMA Controller Driver");
1377MODULE_AUTHOR("Sugar <shuge@allwinnertech.com>");
1378MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1379MODULE_LICENSE("GPL");