]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/dma/sprd-dma.c
dmaengine: xilinx_dma: Fix control reg update in vdma_channel_set_config
[mirror_ubuntu-jammy-kernel.git] / drivers / dma / sprd-dma.c
CommitLineData
9b3b8171
BW
1/*
2 * Copyright (C) 2017 Spreadtrum Communications Inc.
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6
7#include <linux/clk.h>
8#include <linux/dma-mapping.h>
ab42ddb9 9#include <linux/dma/sprd-dma.h>
9b3b8171
BW
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/of.h>
17#include <linux/of_dma.h>
18#include <linux/of_device.h>
19#include <linux/pm_runtime.h>
20#include <linux/slab.h>
21
22#include "virt-dma.h"
23
24#define SPRD_DMA_CHN_REG_OFFSET 0x1000
25#define SPRD_DMA_CHN_REG_LENGTH 0x40
26#define SPRD_DMA_MEMCPY_MIN_SIZE 64
27
28/* DMA global registers definition */
29#define SPRD_DMA_GLB_PAUSE 0x0
30#define SPRD_DMA_GLB_FRAG_WAIT 0x4
31#define SPRD_DMA_GLB_REQ_PEND0_EN 0x8
32#define SPRD_DMA_GLB_REQ_PEND1_EN 0xc
33#define SPRD_DMA_GLB_INT_RAW_STS 0x10
34#define SPRD_DMA_GLB_INT_MSK_STS 0x14
35#define SPRD_DMA_GLB_REQ_STS 0x18
36#define SPRD_DMA_GLB_CHN_EN_STS 0x1c
37#define SPRD_DMA_GLB_DEBUG_STS 0x20
38#define SPRD_DMA_GLB_ARB_SEL_STS 0x24
770399df
EL
39#define SPRD_DMA_GLB_2STAGE_GRP1 0x28
40#define SPRD_DMA_GLB_2STAGE_GRP2 0x2c
9b3b8171
BW
41#define SPRD_DMA_GLB_REQ_UID(uid) (0x4 * ((uid) - 1))
42#define SPRD_DMA_GLB_REQ_UID_OFFSET 0x2000
43
44/* DMA channel registers definition */
45#define SPRD_DMA_CHN_PAUSE 0x0
46#define SPRD_DMA_CHN_REQ 0x4
47#define SPRD_DMA_CHN_CFG 0x8
48#define SPRD_DMA_CHN_INTC 0xc
49#define SPRD_DMA_CHN_SRC_ADDR 0x10
50#define SPRD_DMA_CHN_DES_ADDR 0x14
51#define SPRD_DMA_CHN_FRG_LEN 0x18
52#define SPRD_DMA_CHN_BLK_LEN 0x1c
53#define SPRD_DMA_CHN_TRSC_LEN 0x20
54#define SPRD_DMA_CHN_TRSF_STEP 0x24
55#define SPRD_DMA_CHN_WARP_PTR 0x28
56#define SPRD_DMA_CHN_WARP_TO 0x2c
57#define SPRD_DMA_CHN_LLIST_PTR 0x30
58#define SPRD_DMA_CHN_FRAG_STEP 0x34
59#define SPRD_DMA_CHN_SRC_BLK_STEP 0x38
60#define SPRD_DMA_CHN_DES_BLK_STEP 0x3c
61
770399df
EL
62/* SPRD_DMA_GLB_2STAGE_GRP register definition */
63#define SPRD_DMA_GLB_2STAGE_EN BIT(24)
64#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
9bb9fe0c
BW
65#define SPRD_DMA_GLB_DEST_INT BIT(22)
66#define SPRD_DMA_GLB_SRC_INT BIT(20)
770399df
EL
67#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
68#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
69#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
70#define SPRD_DMA_GLB_FRAG_DONE_TRG BIT(16)
71#define SPRD_DMA_GLB_TRG_OFFSET 16
72#define SPRD_DMA_GLB_DEST_CHN_MASK GENMASK(13, 8)
73#define SPRD_DMA_GLB_DEST_CHN_OFFSET 8
74#define SPRD_DMA_GLB_SRC_CHN_MASK GENMASK(5, 0)
75
9b3b8171
BW
76/* SPRD_DMA_CHN_INTC register definition */
77#define SPRD_DMA_INT_MASK GENMASK(4, 0)
78#define SPRD_DMA_INT_CLR_OFFSET 24
79#define SPRD_DMA_FRAG_INT_EN BIT(0)
80#define SPRD_DMA_BLK_INT_EN BIT(1)
81#define SPRD_DMA_TRANS_INT_EN BIT(2)
82#define SPRD_DMA_LIST_INT_EN BIT(3)
83#define SPRD_DMA_CFG_ERR_INT_EN BIT(4)
84
85/* SPRD_DMA_CHN_CFG register definition */
86#define SPRD_DMA_CHN_EN BIT(0)
4ac69546 87#define SPRD_DMA_LINKLIST_EN BIT(4)
9b3b8171
BW
88#define SPRD_DMA_WAIT_BDONE_OFFSET 24
89#define SPRD_DMA_DONOT_WAIT_BDONE 1
90
91/* SPRD_DMA_CHN_REQ register definition */
92#define SPRD_DMA_REQ_EN BIT(0)
93
94/* SPRD_DMA_CHN_PAUSE register definition */
95#define SPRD_DMA_PAUSE_EN BIT(0)
96#define SPRD_DMA_PAUSE_STS BIT(2)
97#define SPRD_DMA_PAUSE_CNT 0x2000
98
99/* DMA_CHN_WARP_* register definition */
100#define SPRD_DMA_HIGH_ADDR_MASK GENMASK(31, 28)
101#define SPRD_DMA_LOW_ADDR_MASK GENMASK(31, 0)
102#define SPRD_DMA_HIGH_ADDR_OFFSET 4
103
104/* SPRD_DMA_CHN_INTC register definition */
105#define SPRD_DMA_FRAG_INT_STS BIT(16)
106#define SPRD_DMA_BLK_INT_STS BIT(17)
107#define SPRD_DMA_TRSC_INT_STS BIT(18)
108#define SPRD_DMA_LIST_INT_STS BIT(19)
109#define SPRD_DMA_CFGERR_INT_STS BIT(20)
110#define SPRD_DMA_CHN_INT_STS \
111 (SPRD_DMA_FRAG_INT_STS | SPRD_DMA_BLK_INT_STS | \
112 SPRD_DMA_TRSC_INT_STS | SPRD_DMA_LIST_INT_STS | \
113 SPRD_DMA_CFGERR_INT_STS)
114
115/* SPRD_DMA_CHN_FRG_LEN register definition */
116#define SPRD_DMA_SRC_DATAWIDTH_OFFSET 30
117#define SPRD_DMA_DES_DATAWIDTH_OFFSET 28
118#define SPRD_DMA_SWT_MODE_OFFSET 26
119#define SPRD_DMA_REQ_MODE_OFFSET 24
120#define SPRD_DMA_REQ_MODE_MASK GENMASK(1, 0)
121#define SPRD_DMA_FIX_SEL_OFFSET 21
122#define SPRD_DMA_FIX_EN_OFFSET 20
4ac69546 123#define SPRD_DMA_LLIST_END BIT(19)
9b3b8171
BW
124#define SPRD_DMA_FRG_LEN_MASK GENMASK(16, 0)
125
126/* SPRD_DMA_CHN_BLK_LEN register definition */
127#define SPRD_DMA_BLK_LEN_MASK GENMASK(16, 0)
128
129/* SPRD_DMA_CHN_TRSC_LEN register definition */
130#define SPRD_DMA_TRSC_LEN_MASK GENMASK(27, 0)
131
132/* SPRD_DMA_CHN_TRSF_STEP register definition */
133#define SPRD_DMA_DEST_TRSF_STEP_OFFSET 16
134#define SPRD_DMA_SRC_TRSF_STEP_OFFSET 0
135#define SPRD_DMA_TRSF_STEP_MASK GENMASK(15, 0)
136
8b6bc5fd
ZW
137/* SPRD DMA_SRC_BLK_STEP register definition */
138#define SPRD_DMA_LLIST_HIGH_MASK GENMASK(31, 28)
139#define SPRD_DMA_LLIST_HIGH_SHIFT 28
140
770399df
EL
141/* define DMA channel mode & trigger mode mask */
142#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
143#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
9bb9fe0c 144#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
770399df 145
6b1d255e
EL
146/* define the DMA transfer step type */
147#define SPRD_DMA_NONE_STEP 0
148#define SPRD_DMA_BYTE_STEP 1
149#define SPRD_DMA_SHORT_STEP 2
150#define SPRD_DMA_WORD_STEP 4
151#define SPRD_DMA_DWORD_STEP 8
152
9b3b8171
BW
153#define SPRD_DMA_SOFTWARE_UID 0
154
d7c33cf8
BW
155/* dma data width values */
156enum sprd_dma_datawidth {
157 SPRD_DMA_DATAWIDTH_1_BYTE,
158 SPRD_DMA_DATAWIDTH_2_BYTES,
159 SPRD_DMA_DATAWIDTH_4_BYTES,
160 SPRD_DMA_DATAWIDTH_8_BYTES,
9b3b8171
BW
161};
162
163/* dma channel hardware configuration */
164struct sprd_dma_chn_hw {
165 u32 pause;
166 u32 req;
167 u32 cfg;
168 u32 intc;
169 u32 src_addr;
170 u32 des_addr;
171 u32 frg_len;
172 u32 blk_len;
173 u32 trsc_len;
174 u32 trsf_step;
175 u32 wrap_ptr;
176 u32 wrap_to;
177 u32 llist_ptr;
178 u32 frg_step;
179 u32 src_blk_step;
180 u32 des_blk_step;
181};
182
183/* dma request description */
184struct sprd_dma_desc {
185 struct virt_dma_desc vd;
186 struct sprd_dma_chn_hw chn_hw;
d762ab33 187 enum dma_transfer_direction dir;
9b3b8171
BW
188};
189
190/* dma channel description */
191struct sprd_dma_chn {
192 struct virt_dma_chan vc;
193 void __iomem *chn_base;
4ac69546 194 struct sprd_dma_linklist linklist;
ca1b7d3d 195 struct dma_slave_config slave_cfg;
9b3b8171
BW
196 u32 chn_num;
197 u32 dev_id;
770399df
EL
198 enum sprd_dma_chn_mode chn_mode;
199 enum sprd_dma_trg_mode trg_mode;
9bb9fe0c 200 enum sprd_dma_int_type int_type;
9b3b8171
BW
201 struct sprd_dma_desc *cur_desc;
202};
203
204/* SPRD dma device */
205struct sprd_dma_dev {
206 struct dma_device dma_dev;
207 void __iomem *glb_base;
208 struct clk *clk;
209 struct clk *ashb_clk;
210 int irq;
211 u32 total_chns;
212 struct sprd_dma_chn channels[0];
213};
214
215static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param);
216static struct of_dma_filter_info sprd_dma_info = {
217 .filter_fn = sprd_dma_filter_fn,
218};
219
220static inline struct sprd_dma_chn *to_sprd_dma_chan(struct dma_chan *c)
221{
222 return container_of(c, struct sprd_dma_chn, vc.chan);
223}
224
225static inline struct sprd_dma_dev *to_sprd_dma_dev(struct dma_chan *c)
226{
227 struct sprd_dma_chn *schan = to_sprd_dma_chan(c);
228
229 return container_of(schan, struct sprd_dma_dev, channels[c->chan_id]);
230}
231
232static inline struct sprd_dma_desc *to_sprd_dma_desc(struct virt_dma_desc *vd)
233{
234 return container_of(vd, struct sprd_dma_desc, vd);
235}
236
770399df
EL
237static void sprd_dma_glb_update(struct sprd_dma_dev *sdev, u32 reg,
238 u32 mask, u32 val)
239{
240 u32 orig = readl(sdev->glb_base + reg);
241 u32 tmp;
242
243 tmp = (orig & ~mask) | val;
244 writel(tmp, sdev->glb_base + reg);
245}
246
9b3b8171
BW
247static void sprd_dma_chn_update(struct sprd_dma_chn *schan, u32 reg,
248 u32 mask, u32 val)
249{
250 u32 orig = readl(schan->chn_base + reg);
251 u32 tmp;
252
253 tmp = (orig & ~mask) | val;
254 writel(tmp, schan->chn_base + reg);
255}
256
257static int sprd_dma_enable(struct sprd_dma_dev *sdev)
258{
259 int ret;
260
261 ret = clk_prepare_enable(sdev->clk);
262 if (ret)
263 return ret;
264
265 /*
266 * The ashb_clk is optional and only for AGCP DMA controller, so we
267 * need add one condition to check if the ashb_clk need enable.
268 */
269 if (!IS_ERR(sdev->ashb_clk))
270 ret = clk_prepare_enable(sdev->ashb_clk);
271
272 return ret;
273}
274
275static void sprd_dma_disable(struct sprd_dma_dev *sdev)
276{
277 clk_disable_unprepare(sdev->clk);
278
279 /*
280 * Need to check if we need disable the optional ashb_clk for AGCP DMA.
281 */
282 if (!IS_ERR(sdev->ashb_clk))
283 clk_disable_unprepare(sdev->ashb_clk);
284}
285
286static void sprd_dma_set_uid(struct sprd_dma_chn *schan)
287{
288 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
289 u32 dev_id = schan->dev_id;
290
291 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
292 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
293 SPRD_DMA_GLB_REQ_UID(dev_id);
294
295 writel(schan->chn_num + 1, sdev->glb_base + uid_offset);
296 }
297}
298
299static void sprd_dma_unset_uid(struct sprd_dma_chn *schan)
300{
301 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
302 u32 dev_id = schan->dev_id;
303
304 if (dev_id != SPRD_DMA_SOFTWARE_UID) {
305 u32 uid_offset = SPRD_DMA_GLB_REQ_UID_OFFSET +
306 SPRD_DMA_GLB_REQ_UID(dev_id);
307
308 writel(0, sdev->glb_base + uid_offset);
309 }
310}
311
312static void sprd_dma_clear_int(struct sprd_dma_chn *schan)
313{
314 sprd_dma_chn_update(schan, SPRD_DMA_CHN_INTC,
315 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET,
316 SPRD_DMA_INT_MASK << SPRD_DMA_INT_CLR_OFFSET);
317}
318
319static void sprd_dma_enable_chn(struct sprd_dma_chn *schan)
320{
321 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN,
322 SPRD_DMA_CHN_EN);
323}
324
325static void sprd_dma_disable_chn(struct sprd_dma_chn *schan)
326{
327 sprd_dma_chn_update(schan, SPRD_DMA_CHN_CFG, SPRD_DMA_CHN_EN, 0);
328}
329
330static void sprd_dma_soft_request(struct sprd_dma_chn *schan)
331{
332 sprd_dma_chn_update(schan, SPRD_DMA_CHN_REQ, SPRD_DMA_REQ_EN,
333 SPRD_DMA_REQ_EN);
334}
335
336static void sprd_dma_pause_resume(struct sprd_dma_chn *schan, bool enable)
337{
338 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
339 u32 pause, timeout = SPRD_DMA_PAUSE_CNT;
340
341 if (enable) {
342 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
343 SPRD_DMA_PAUSE_EN, SPRD_DMA_PAUSE_EN);
344
345 do {
346 pause = readl(schan->chn_base + SPRD_DMA_CHN_PAUSE);
347 if (pause & SPRD_DMA_PAUSE_STS)
348 break;
349
350 cpu_relax();
351 } while (--timeout > 0);
352
353 if (!timeout)
354 dev_warn(sdev->dma_dev.dev,
355 "pause dma controller timeout\n");
356 } else {
357 sprd_dma_chn_update(schan, SPRD_DMA_CHN_PAUSE,
358 SPRD_DMA_PAUSE_EN, 0);
359 }
360}
361
362static void sprd_dma_stop_and_disable(struct sprd_dma_chn *schan)
363{
364 u32 cfg = readl(schan->chn_base + SPRD_DMA_CHN_CFG);
365
366 if (!(cfg & SPRD_DMA_CHN_EN))
367 return;
368
369 sprd_dma_pause_resume(schan, true);
370 sprd_dma_disable_chn(schan);
371}
372
d762ab33
EL
373static unsigned long sprd_dma_get_src_addr(struct sprd_dma_chn *schan)
374{
375 unsigned long addr, addr_high;
376
377 addr = readl(schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
378 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_PTR) &
379 SPRD_DMA_HIGH_ADDR_MASK;
380
381 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
382}
383
9b3b8171
BW
384static unsigned long sprd_dma_get_dst_addr(struct sprd_dma_chn *schan)
385{
386 unsigned long addr, addr_high;
387
388 addr = readl(schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
389 addr_high = readl(schan->chn_base + SPRD_DMA_CHN_WARP_TO) &
390 SPRD_DMA_HIGH_ADDR_MASK;
391
392 return addr | (addr_high << SPRD_DMA_HIGH_ADDR_OFFSET);
393}
394
395static enum sprd_dma_int_type sprd_dma_get_int_type(struct sprd_dma_chn *schan)
396{
397 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
398 u32 intc_sts = readl(schan->chn_base + SPRD_DMA_CHN_INTC) &
399 SPRD_DMA_CHN_INT_STS;
400
401 switch (intc_sts) {
402 case SPRD_DMA_CFGERR_INT_STS:
403 return SPRD_DMA_CFGERR_INT;
404
405 case SPRD_DMA_LIST_INT_STS:
406 return SPRD_DMA_LIST_INT;
407
408 case SPRD_DMA_TRSC_INT_STS:
409 return SPRD_DMA_TRANS_INT;
410
411 case SPRD_DMA_BLK_INT_STS:
412 return SPRD_DMA_BLK_INT;
413
414 case SPRD_DMA_FRAG_INT_STS:
415 return SPRD_DMA_FRAG_INT;
416
417 default:
418 dev_warn(sdev->dma_dev.dev, "incorrect dma interrupt type\n");
419 return SPRD_DMA_NO_INT;
420 }
421}
422
423static enum sprd_dma_req_mode sprd_dma_get_req_type(struct sprd_dma_chn *schan)
424{
425 u32 frag_reg = readl(schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
426
427 return (frag_reg >> SPRD_DMA_REQ_MODE_OFFSET) & SPRD_DMA_REQ_MODE_MASK;
428}
429
770399df
EL
430static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
431{
432 struct sprd_dma_dev *sdev = to_sprd_dma_dev(&schan->vc.chan);
433 u32 val, chn = schan->chn_num + 1;
434
435 switch (schan->chn_mode) {
436 case SPRD_DMA_SRC_CHN0:
437 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
438 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
439 val |= SPRD_DMA_GLB_2STAGE_EN;
9bb9fe0c
BW
440 if (schan->int_type != SPRD_DMA_NO_INT)
441 val |= SPRD_DMA_GLB_SRC_INT;
442
770399df
EL
443 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
444 break;
445
446 case SPRD_DMA_SRC_CHN1:
447 val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
448 val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
449 val |= SPRD_DMA_GLB_2STAGE_EN;
9bb9fe0c
BW
450 if (schan->int_type != SPRD_DMA_NO_INT)
451 val |= SPRD_DMA_GLB_SRC_INT;
452
770399df
EL
453 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
454 break;
455
456 case SPRD_DMA_DST_CHN0:
457 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
458 SPRD_DMA_GLB_DEST_CHN_MASK;
459 val |= SPRD_DMA_GLB_2STAGE_EN;
9bb9fe0c
BW
460 if (schan->int_type != SPRD_DMA_NO_INT)
461 val |= SPRD_DMA_GLB_DEST_INT;
462
770399df
EL
463 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
464 break;
465
466 case SPRD_DMA_DST_CHN1:
467 val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
468 SPRD_DMA_GLB_DEST_CHN_MASK;
469 val |= SPRD_DMA_GLB_2STAGE_EN;
9bb9fe0c
BW
470 if (schan->int_type != SPRD_DMA_NO_INT)
471 val |= SPRD_DMA_GLB_DEST_INT;
472
770399df
EL
473 sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
474 break;
475
476 default:
477 dev_err(sdev->dma_dev.dev, "invalid channel mode setting %d\n",
478 schan->chn_mode);
479 return -EINVAL;
480 }
481
482 return 0;
483}
484
9b3b8171
BW
485static void sprd_dma_set_chn_config(struct sprd_dma_chn *schan,
486 struct sprd_dma_desc *sdesc)
487{
488 struct sprd_dma_chn_hw *cfg = &sdesc->chn_hw;
489
490 writel(cfg->pause, schan->chn_base + SPRD_DMA_CHN_PAUSE);
491 writel(cfg->cfg, schan->chn_base + SPRD_DMA_CHN_CFG);
492 writel(cfg->intc, schan->chn_base + SPRD_DMA_CHN_INTC);
493 writel(cfg->src_addr, schan->chn_base + SPRD_DMA_CHN_SRC_ADDR);
494 writel(cfg->des_addr, schan->chn_base + SPRD_DMA_CHN_DES_ADDR);
495 writel(cfg->frg_len, schan->chn_base + SPRD_DMA_CHN_FRG_LEN);
496 writel(cfg->blk_len, schan->chn_base + SPRD_DMA_CHN_BLK_LEN);
497 writel(cfg->trsc_len, schan->chn_base + SPRD_DMA_CHN_TRSC_LEN);
498 writel(cfg->trsf_step, schan->chn_base + SPRD_DMA_CHN_TRSF_STEP);
499 writel(cfg->wrap_ptr, schan->chn_base + SPRD_DMA_CHN_WARP_PTR);
500 writel(cfg->wrap_to, schan->chn_base + SPRD_DMA_CHN_WARP_TO);
501 writel(cfg->llist_ptr, schan->chn_base + SPRD_DMA_CHN_LLIST_PTR);
502 writel(cfg->frg_step, schan->chn_base + SPRD_DMA_CHN_FRAG_STEP);
503 writel(cfg->src_blk_step, schan->chn_base + SPRD_DMA_CHN_SRC_BLK_STEP);
504 writel(cfg->des_blk_step, schan->chn_base + SPRD_DMA_CHN_DES_BLK_STEP);
505 writel(cfg->req, schan->chn_base + SPRD_DMA_CHN_REQ);
506}
507
508static void sprd_dma_start(struct sprd_dma_chn *schan)
509{
510 struct virt_dma_desc *vd = vchan_next_desc(&schan->vc);
511
512 if (!vd)
513 return;
514
515 list_del(&vd->node);
516 schan->cur_desc = to_sprd_dma_desc(vd);
517
770399df
EL
518 /*
519 * Set 2-stage configuration if the channel starts one 2-stage
520 * transfer.
521 */
522 if (schan->chn_mode && sprd_dma_set_2stage_config(schan))
523 return;
524
9b3b8171
BW
525 /*
526 * Copy the DMA configuration from DMA descriptor to this hardware
527 * channel.
528 */
529 sprd_dma_set_chn_config(schan, schan->cur_desc);
530 sprd_dma_set_uid(schan);
531 sprd_dma_enable_chn(schan);
532
3d626a97
EL
533 if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
534 schan->chn_mode != SPRD_DMA_DST_CHN0 &&
535 schan->chn_mode != SPRD_DMA_DST_CHN1)
9b3b8171
BW
536 sprd_dma_soft_request(schan);
537}
538
539static void sprd_dma_stop(struct sprd_dma_chn *schan)
540{
541 sprd_dma_stop_and_disable(schan);
542 sprd_dma_unset_uid(schan);
543 sprd_dma_clear_int(schan);
0e5d7b1e 544 schan->cur_desc = NULL;
9b3b8171
BW
545}
546
547static bool sprd_dma_check_trans_done(struct sprd_dma_desc *sdesc,
548 enum sprd_dma_int_type int_type,
549 enum sprd_dma_req_mode req_mode)
550{
551 if (int_type == SPRD_DMA_NO_INT)
552 return false;
553
554 if (int_type >= req_mode + 1)
555 return true;
556 else
557 return false;
558}
559
560static irqreturn_t dma_irq_handle(int irq, void *dev_id)
561{
562 struct sprd_dma_dev *sdev = (struct sprd_dma_dev *)dev_id;
563 u32 irq_status = readl(sdev->glb_base + SPRD_DMA_GLB_INT_MSK_STS);
564 struct sprd_dma_chn *schan;
565 struct sprd_dma_desc *sdesc;
566 enum sprd_dma_req_mode req_type;
567 enum sprd_dma_int_type int_type;
97dbd6ea 568 bool trans_done = false, cyclic = false;
9b3b8171
BW
569 u32 i;
570
571 while (irq_status) {
572 i = __ffs(irq_status);
573 irq_status &= (irq_status - 1);
574 schan = &sdev->channels[i];
575
576 spin_lock(&schan->vc.lock);
58152b0e
BW
577
578 sdesc = schan->cur_desc;
579 if (!sdesc) {
580 spin_unlock(&schan->vc.lock);
581 return IRQ_HANDLED;
582 }
583
9b3b8171
BW
584 int_type = sprd_dma_get_int_type(schan);
585 req_type = sprd_dma_get_req_type(schan);
586 sprd_dma_clear_int(schan);
587
97dbd6ea
EL
588 /* cyclic mode schedule callback */
589 cyclic = schan->linklist.phy_addr ? true : false;
590 if (cyclic == true) {
591 vchan_cyclic_callback(&sdesc->vd);
592 } else {
593 /* Check if the dma request descriptor is done. */
594 trans_done = sprd_dma_check_trans_done(sdesc, int_type,
595 req_type);
596 if (trans_done == true) {
597 vchan_cookie_complete(&sdesc->vd);
598 schan->cur_desc = NULL;
599 sprd_dma_start(schan);
600 }
9b3b8171
BW
601 }
602 spin_unlock(&schan->vc.lock);
603 }
604
605 return IRQ_HANDLED;
606}
607
608static int sprd_dma_alloc_chan_resources(struct dma_chan *chan)
609{
ffb5be7c 610 return pm_runtime_get_sync(chan->device->dev);
9b3b8171
BW
611}
612
613static void sprd_dma_free_chan_resources(struct dma_chan *chan)
614{
615 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
616 unsigned long flags;
617
618 spin_lock_irqsave(&schan->vc.lock, flags);
619 sprd_dma_stop(schan);
620 spin_unlock_irqrestore(&schan->vc.lock, flags);
621
622 vchan_free_chan_resources(&schan->vc);
623 pm_runtime_put(chan->device->dev);
624}
625
626static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
627 dma_cookie_t cookie,
628 struct dma_tx_state *txstate)
629{
630 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
631 struct virt_dma_desc *vd;
632 unsigned long flags;
633 enum dma_status ret;
634 u32 pos;
635
636 ret = dma_cookie_status(chan, cookie, txstate);
637 if (ret == DMA_COMPLETE || !txstate)
638 return ret;
639
640 spin_lock_irqsave(&schan->vc.lock, flags);
641 vd = vchan_find_desc(&schan->vc, cookie);
642 if (vd) {
643 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
644 struct sprd_dma_chn_hw *hw = &sdesc->chn_hw;
645
646 if (hw->trsc_len > 0)
647 pos = hw->trsc_len;
648 else if (hw->blk_len > 0)
649 pos = hw->blk_len;
650 else if (hw->frg_len > 0)
651 pos = hw->frg_len;
652 else
653 pos = 0;
654 } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
16d0f85e 655 struct sprd_dma_desc *sdesc = schan->cur_desc;
d762ab33
EL
656
657 if (sdesc->dir == DMA_DEV_TO_MEM)
658 pos = sprd_dma_get_dst_addr(schan);
659 else
660 pos = sprd_dma_get_src_addr(schan);
9b3b8171
BW
661 } else {
662 pos = 0;
663 }
664 spin_unlock_irqrestore(&schan->vc.lock, flags);
665
666 dma_set_residue(txstate, pos);
667 return ret;
668}
669
670static void sprd_dma_issue_pending(struct dma_chan *chan)
671{
672 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
673 unsigned long flags;
674
675 spin_lock_irqsave(&schan->vc.lock, flags);
676 if (vchan_issue_pending(&schan->vc) && !schan->cur_desc)
677 sprd_dma_start(schan);
678 spin_unlock_irqrestore(&schan->vc.lock, flags);
679}
680
ca1b7d3d
EL
681static int sprd_dma_get_datawidth(enum dma_slave_buswidth buswidth)
682{
683 switch (buswidth) {
684 case DMA_SLAVE_BUSWIDTH_1_BYTE:
685 case DMA_SLAVE_BUSWIDTH_2_BYTES:
686 case DMA_SLAVE_BUSWIDTH_4_BYTES:
687 case DMA_SLAVE_BUSWIDTH_8_BYTES:
688 return ffs(buswidth) - 1;
689
690 default:
691 return -EINVAL;
692 }
693}
694
695static int sprd_dma_get_step(enum dma_slave_buswidth buswidth)
696{
697 switch (buswidth) {
698 case DMA_SLAVE_BUSWIDTH_1_BYTE:
699 case DMA_SLAVE_BUSWIDTH_2_BYTES:
700 case DMA_SLAVE_BUSWIDTH_4_BYTES:
701 case DMA_SLAVE_BUSWIDTH_8_BYTES:
702 return buswidth;
703
704 default:
705 return -EINVAL;
706 }
707}
708
709static int sprd_dma_fill_desc(struct dma_chan *chan,
4ac69546
EL
710 struct sprd_dma_chn_hw *hw,
711 unsigned int sglen, int sg_index,
ca1b7d3d
EL
712 dma_addr_t src, dma_addr_t dst, u32 len,
713 enum dma_transfer_direction dir,
714 unsigned long flags,
715 struct dma_slave_config *slave_cfg)
9b3b8171
BW
716{
717 struct sprd_dma_dev *sdev = to_sprd_dma_dev(chan);
ca1b7d3d 718 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
770399df 719 enum sprd_dma_chn_mode chn_mode = schan->chn_mode;
ca1b7d3d
EL
720 u32 req_mode = (flags >> SPRD_DMA_REQ_SHIFT) & SPRD_DMA_REQ_MODE_MASK;
721 u32 int_mode = flags & SPRD_DMA_INT_MASK;
722 int src_datawidth, dst_datawidth, src_step, dst_step;
723 u32 temp, fix_mode = 0, fix_en = 0;
8b6bc5fd 724 phys_addr_t llist_ptr;
ca1b7d3d
EL
725
726 if (dir == DMA_MEM_TO_DEV) {
727 src_step = sprd_dma_get_step(slave_cfg->src_addr_width);
728 if (src_step < 0) {
729 dev_err(sdev->dma_dev.dev, "invalid source step\n");
730 return src_step;
731 }
770399df
EL
732
733 /*
734 * For 2-stage transfer, destination channel step can not be 0,
735 * since destination device is AON IRAM.
736 */
737 if (chn_mode == SPRD_DMA_DST_CHN0 ||
738 chn_mode == SPRD_DMA_DST_CHN1)
739 dst_step = src_step;
740 else
741 dst_step = SPRD_DMA_NONE_STEP;
9b3b8171 742 } else {
ca1b7d3d
EL
743 dst_step = sprd_dma_get_step(slave_cfg->dst_addr_width);
744 if (dst_step < 0) {
745 dev_err(sdev->dma_dev.dev, "invalid destination step\n");
746 return dst_step;
747 }
748 src_step = SPRD_DMA_NONE_STEP;
9b3b8171
BW
749 }
750
ca1b7d3d
EL
751 src_datawidth = sprd_dma_get_datawidth(slave_cfg->src_addr_width);
752 if (src_datawidth < 0) {
753 dev_err(sdev->dma_dev.dev, "invalid source datawidth\n");
754 return src_datawidth;
9b3b8171
BW
755 }
756
ca1b7d3d
EL
757 dst_datawidth = sprd_dma_get_datawidth(slave_cfg->dst_addr_width);
758 if (dst_datawidth < 0) {
759 dev_err(sdev->dma_dev.dev, "invalid destination datawidth\n");
760 return dst_datawidth;
761 }
762
763 if (slave_cfg->slave_id)
764 schan->dev_id = slave_cfg->slave_id;
765
9b3b8171 766 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
9b3b8171 767
ca1b7d3d
EL
768 /*
769 * wrap_ptr and wrap_to will save the high 4 bits source address and
770 * destination address.
771 */
772 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
773 hw->wrap_to = (dst >> SPRD_DMA_HIGH_ADDR_OFFSET) & SPRD_DMA_HIGH_ADDR_MASK;
774 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
775 hw->des_addr = dst & SPRD_DMA_LOW_ADDR_MASK;
9b3b8171 776
ca1b7d3d
EL
777 /*
778 * If the src step and dst step both are 0 or both are not 0, that means
779 * we can not enable the fix mode. If one is 0 and another one is not,
780 * we can enable the fix mode.
781 */
782 if ((src_step != 0 && dst_step != 0) || (src_step | dst_step) == 0) {
9b3b8171
BW
783 fix_en = 0;
784 } else {
785 fix_en = 1;
786 if (src_step)
787 fix_mode = 1;
788 else
789 fix_mode = 0;
790 }
791
ca1b7d3d 792 hw->intc = int_mode | SPRD_DMA_CFG_ERR_INT_EN;
9b3b8171 793
ca1b7d3d
EL
794 temp = src_datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
795 temp |= dst_datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
796 temp |= req_mode << SPRD_DMA_REQ_MODE_OFFSET;
797 temp |= fix_mode << SPRD_DMA_FIX_SEL_OFFSET;
798 temp |= fix_en << SPRD_DMA_FIX_EN_OFFSET;
799 temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
800 hw->frg_len = temp;
9b3b8171 801
89d03b3c 802 hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
ca1b7d3d 803 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
9b3b8171 804
ca1b7d3d
EL
805 temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
806 temp |= (src_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
807 hw->trsf_step = temp;
9b3b8171 808
4ac69546
EL
809 /* link-list configuration */
810 if (schan->linklist.phy_addr) {
4ac69546
EL
811 hw->cfg |= SPRD_DMA_LINKLIST_EN;
812
813 /* link-list index */
13e89979
EL
814 temp = sglen ? (sg_index + 1) % sglen : 0;
815
4ac69546
EL
816 /* Next link-list configuration's physical address offset */
817 temp = temp * sizeof(*hw) + SPRD_DMA_CHN_SRC_ADDR;
818 /*
819 * Set the link-list pointer point to next link-list
820 * configuration's physical address.
821 */
8b6bc5fd
ZW
822 llist_ptr = schan->linklist.phy_addr + temp;
823 hw->llist_ptr = lower_32_bits(llist_ptr);
824 hw->src_blk_step = (upper_32_bits(llist_ptr) << SPRD_DMA_LLIST_HIGH_SHIFT) &
825 SPRD_DMA_LLIST_HIGH_MASK;
4ac69546
EL
826 } else {
827 hw->llist_ptr = 0;
8b6bc5fd 828 hw->src_blk_step = 0;
4ac69546
EL
829 }
830
ca1b7d3d 831 hw->frg_step = 0;
ca1b7d3d
EL
832 hw->des_blk_step = 0;
833 return 0;
834}
9b3b8171 835
4ac69546
EL
836static int sprd_dma_fill_linklist_desc(struct dma_chan *chan,
837 unsigned int sglen, int sg_index,
838 dma_addr_t src, dma_addr_t dst, u32 len,
839 enum dma_transfer_direction dir,
840 unsigned long flags,
841 struct dma_slave_config *slave_cfg)
842{
843 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
844 struct sprd_dma_chn_hw *hw;
845
846 if (!schan->linklist.virt_addr)
847 return -EINVAL;
848
849 hw = (struct sprd_dma_chn_hw *)(schan->linklist.virt_addr +
850 sg_index * sizeof(*hw));
851
852 return sprd_dma_fill_desc(chan, hw, sglen, sg_index, src, dst, len,
853 dir, flags, slave_cfg);
854}
855
1ab8da11 856static struct dma_async_tx_descriptor *
9b3b8171
BW
857sprd_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
858 size_t len, unsigned long flags)
859{
860 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
861 struct sprd_dma_desc *sdesc;
32fa2013
EL
862 struct sprd_dma_chn_hw *hw;
863 enum sprd_dma_datawidth datawidth;
864 u32 step, temp;
9b3b8171 865
9b3b8171
BW
866 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
867 if (!sdesc)
868 return NULL;
9b3b8171 869
32fa2013 870 hw = &sdesc->chn_hw;
9b3b8171 871
32fa2013
EL
872 hw->cfg = SPRD_DMA_DONOT_WAIT_BDONE << SPRD_DMA_WAIT_BDONE_OFFSET;
873 hw->intc = SPRD_DMA_TRANS_INT | SPRD_DMA_CFG_ERR_INT_EN;
874 hw->src_addr = src & SPRD_DMA_LOW_ADDR_MASK;
875 hw->des_addr = dest & SPRD_DMA_LOW_ADDR_MASK;
876 hw->wrap_ptr = (src >> SPRD_DMA_HIGH_ADDR_OFFSET) &
877 SPRD_DMA_HIGH_ADDR_MASK;
878 hw->wrap_to = (dest >> SPRD_DMA_HIGH_ADDR_OFFSET) &
879 SPRD_DMA_HIGH_ADDR_MASK;
880
881 if (IS_ALIGNED(len, 8)) {
882 datawidth = SPRD_DMA_DATAWIDTH_8_BYTES;
883 step = SPRD_DMA_DWORD_STEP;
884 } else if (IS_ALIGNED(len, 4)) {
885 datawidth = SPRD_DMA_DATAWIDTH_4_BYTES;
886 step = SPRD_DMA_WORD_STEP;
887 } else if (IS_ALIGNED(len, 2)) {
888 datawidth = SPRD_DMA_DATAWIDTH_2_BYTES;
889 step = SPRD_DMA_SHORT_STEP;
890 } else {
891 datawidth = SPRD_DMA_DATAWIDTH_1_BYTE;
892 step = SPRD_DMA_BYTE_STEP;
9b3b8171
BW
893 }
894
32fa2013
EL
895 temp = datawidth << SPRD_DMA_SRC_DATAWIDTH_OFFSET;
896 temp |= datawidth << SPRD_DMA_DES_DATAWIDTH_OFFSET;
897 temp |= SPRD_DMA_TRANS_REQ << SPRD_DMA_REQ_MODE_OFFSET;
898 temp |= len & SPRD_DMA_FRG_LEN_MASK;
899 hw->frg_len = temp;
9b3b8171 900
32fa2013
EL
901 hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK;
902 hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
9b3b8171 903
32fa2013
EL
904 temp = (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
905 temp |= (step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_SRC_TRSF_STEP_OFFSET;
906 hw->trsf_step = temp;
907
9b3b8171 908 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
9b3b8171
BW
909}
910
1ab8da11 911static struct dma_async_tx_descriptor *
ca1b7d3d
EL
912sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
913 unsigned int sglen, enum dma_transfer_direction dir,
914 unsigned long flags, void *context)
9b3b8171
BW
915{
916 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
ca1b7d3d
EL
917 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
918 dma_addr_t src = 0, dst = 0;
689379c2 919 dma_addr_t start_src = 0, start_dst = 0;
9b3b8171 920 struct sprd_dma_desc *sdesc;
ca1b7d3d
EL
921 struct scatterlist *sg;
922 u32 len = 0;
923 int ret, i;
924
4ac69546 925 if (!is_slave_direction(dir))
ca1b7d3d 926 return NULL;
9b3b8171 927
4ac69546
EL
928 if (context) {
929 struct sprd_dma_linklist *ll_cfg =
930 (struct sprd_dma_linklist *)context;
931
932 schan->linklist.phy_addr = ll_cfg->phy_addr;
933 schan->linklist.virt_addr = ll_cfg->virt_addr;
934 } else {
935 schan->linklist.phy_addr = 0;
936 schan->linklist.virt_addr = 0;
937 }
938
9bb9fe0c
BW
939 /*
940 * Set channel mode, interrupt mode and trigger mode for 2-stage
941 * transfer.
942 */
c434e377
EL
943 schan->chn_mode =
944 (flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
945 schan->trg_mode =
946 (flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
9bb9fe0c 947 schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
c434e377 948
9b3b8171
BW
949 sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
950 if (!sdesc)
951 return NULL;
952
d762ab33
EL
953 sdesc->dir = dir;
954
ca1b7d3d
EL
955 for_each_sg(sgl, sg, sglen, i) {
956 len = sg_dma_len(sg);
957
958 if (dir == DMA_MEM_TO_DEV) {
959 src = sg_dma_address(sg);
960 dst = slave_cfg->dst_addr;
961 } else {
962 src = slave_cfg->src_addr;
963 dst = sg_dma_address(sg);
964 }
4ac69546 965
689379c2
BW
966 if (!i) {
967 start_src = src;
968 start_dst = dst;
969 }
970
4ac69546
EL
971 /*
972 * The link-list mode needs at least 2 link-list
973 * configurations. If there is only one sg, it doesn't
974 * need to fill the link-list configuration.
975 */
976 if (sglen < 2)
977 break;
978
979 ret = sprd_dma_fill_linklist_desc(chan, sglen, i, src, dst, len,
980 dir, flags, slave_cfg);
981 if (ret) {
982 kfree(sdesc);
983 return NULL;
984 }
ca1b7d3d
EL
985 }
986
689379c2
BW
987 ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, start_src,
988 start_dst, len, dir, flags, slave_cfg);
9b3b8171
BW
989 if (ret) {
990 kfree(sdesc);
991 return NULL;
992 }
993
994 return vchan_tx_prep(&schan->vc, &sdesc->vd, flags);
995}
996
ca1b7d3d
EL
997static int sprd_dma_slave_config(struct dma_chan *chan,
998 struct dma_slave_config *config)
999{
1000 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1001 struct dma_slave_config *slave_cfg = &schan->slave_cfg;
1002
ca1b7d3d
EL
1003 memcpy(slave_cfg, config, sizeof(*config));
1004 return 0;
1005}
1006
9b3b8171
BW
1007static int sprd_dma_pause(struct dma_chan *chan)
1008{
1009 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1010 unsigned long flags;
1011
1012 spin_lock_irqsave(&schan->vc.lock, flags);
1013 sprd_dma_pause_resume(schan, true);
1014 spin_unlock_irqrestore(&schan->vc.lock, flags);
1015
1016 return 0;
1017}
1018
1019static int sprd_dma_resume(struct dma_chan *chan)
1020{
1021 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1022 unsigned long flags;
1023
1024 spin_lock_irqsave(&schan->vc.lock, flags);
1025 sprd_dma_pause_resume(schan, false);
1026 spin_unlock_irqrestore(&schan->vc.lock, flags);
1027
1028 return 0;
1029}
1030
1031static int sprd_dma_terminate_all(struct dma_chan *chan)
1032{
1033 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
1034 unsigned long flags;
1035 LIST_HEAD(head);
1036
1037 spin_lock_irqsave(&schan->vc.lock, flags);
1038 sprd_dma_stop(schan);
1039
1040 vchan_get_all_descriptors(&schan->vc, &head);
1041 spin_unlock_irqrestore(&schan->vc.lock, flags);
1042
1043 vchan_dma_desc_free_list(&schan->vc, &head);
1044 return 0;
1045}
1046
1047static void sprd_dma_free_desc(struct virt_dma_desc *vd)
1048{
1049 struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd);
1050
1051 kfree(sdesc);
1052}
1053
1054static bool sprd_dma_filter_fn(struct dma_chan *chan, void *param)
1055{
1056 struct sprd_dma_chn *schan = to_sprd_dma_chan(chan);
ffb5be7c 1057 u32 slave_id = *(u32 *)param;
9b3b8171 1058
ffb5be7c
BW
1059 schan->dev_id = slave_id;
1060 return true;
9b3b8171
BW
1061}
1062
1063static int sprd_dma_probe(struct platform_device *pdev)
1064{
1065 struct device_node *np = pdev->dev.of_node;
1066 struct sprd_dma_dev *sdev;
1067 struct sprd_dma_chn *dma_chn;
1068 struct resource *res;
1069 u32 chn_count;
1070 int ret, i;
1071
1072 ret = device_property_read_u32(&pdev->dev, "#dma-channels", &chn_count);
1073 if (ret) {
1074 dev_err(&pdev->dev, "get dma channels count failed\n");
1075 return ret;
1076 }
1077
0ed2dd03
KC
1078 sdev = devm_kzalloc(&pdev->dev,
1079 struct_size(sdev, channels, chn_count),
9b3b8171
BW
1080 GFP_KERNEL);
1081 if (!sdev)
1082 return -ENOMEM;
1083
1084 sdev->clk = devm_clk_get(&pdev->dev, "enable");
1085 if (IS_ERR(sdev->clk)) {
1086 dev_err(&pdev->dev, "get enable clock failed\n");
1087 return PTR_ERR(sdev->clk);
1088 }
1089
1090 /* ashb clock is optional for AGCP DMA */
1091 sdev->ashb_clk = devm_clk_get(&pdev->dev, "ashb_eb");
1092 if (IS_ERR(sdev->ashb_clk))
1093 dev_warn(&pdev->dev, "no optional ashb eb clock\n");
1094
1095 /*
1096 * We have three DMA controllers: AP DMA, AON DMA and AGCP DMA. For AGCP
1097 * DMA controller, it can or do not request the irq, which will save
1098 * system power without resuming system by DMA interrupts if AGCP DMA
1099 * does not request the irq. Thus the DMA interrupts property should
1100 * be optional.
1101 */
1102 sdev->irq = platform_get_irq(pdev, 0);
1103 if (sdev->irq > 0) {
1104 ret = devm_request_irq(&pdev->dev, sdev->irq, dma_irq_handle,
1105 0, "sprd_dma", (void *)sdev);
1106 if (ret < 0) {
1107 dev_err(&pdev->dev, "request dma irq failed\n");
1108 return ret;
1109 }
1110 } else {
1111 dev_warn(&pdev->dev, "no interrupts for the dma controller\n");
1112 }
1113
1114 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
e7f063ae 1115 sdev->glb_base = devm_ioremap_resource(&pdev->dev, res);
fd8d26ad
DC
1116 if (IS_ERR(sdev->glb_base))
1117 return PTR_ERR(sdev->glb_base);
9b3b8171
BW
1118
1119 dma_cap_set(DMA_MEMCPY, sdev->dma_dev.cap_mask);
1120 sdev->total_chns = chn_count;
1121 sdev->dma_dev.chancnt = chn_count;
1122 INIT_LIST_HEAD(&sdev->dma_dev.channels);
1123 INIT_LIST_HEAD(&sdev->dma_dev.global_node);
1124 sdev->dma_dev.dev = &pdev->dev;
1125 sdev->dma_dev.device_alloc_chan_resources = sprd_dma_alloc_chan_resources;
1126 sdev->dma_dev.device_free_chan_resources = sprd_dma_free_chan_resources;
1127 sdev->dma_dev.device_tx_status = sprd_dma_tx_status;
1128 sdev->dma_dev.device_issue_pending = sprd_dma_issue_pending;
1129 sdev->dma_dev.device_prep_dma_memcpy = sprd_dma_prep_dma_memcpy;
ca1b7d3d
EL
1130 sdev->dma_dev.device_prep_slave_sg = sprd_dma_prep_slave_sg;
1131 sdev->dma_dev.device_config = sprd_dma_slave_config;
9b3b8171
BW
1132 sdev->dma_dev.device_pause = sprd_dma_pause;
1133 sdev->dma_dev.device_resume = sprd_dma_resume;
1134 sdev->dma_dev.device_terminate_all = sprd_dma_terminate_all;
1135
1136 for (i = 0; i < chn_count; i++) {
1137 dma_chn = &sdev->channels[i];
1138 dma_chn->chn_num = i;
1139 dma_chn->cur_desc = NULL;
1140 /* get each channel's registers base address. */
1141 dma_chn->chn_base = sdev->glb_base + SPRD_DMA_CHN_REG_OFFSET +
1142 SPRD_DMA_CHN_REG_LENGTH * i;
1143
1144 dma_chn->vc.desc_free = sprd_dma_free_desc;
1145 vchan_init(&dma_chn->vc, &sdev->dma_dev);
1146 }
1147
1148 platform_set_drvdata(pdev, sdev);
1149 ret = sprd_dma_enable(sdev);
1150 if (ret)
1151 return ret;
1152
1153 pm_runtime_set_active(&pdev->dev);
1154 pm_runtime_enable(&pdev->dev);
1155
1156 ret = pm_runtime_get_sync(&pdev->dev);
1157 if (ret < 0)
1158 goto err_rpm;
1159
1160 ret = dma_async_device_register(&sdev->dma_dev);
1161 if (ret < 0) {
1162 dev_err(&pdev->dev, "register dma device failed:%d\n", ret);
1163 goto err_register;
1164 }
1165
1166 sprd_dma_info.dma_cap = sdev->dma_dev.cap_mask;
1167 ret = of_dma_controller_register(np, of_dma_simple_xlate,
1168 &sprd_dma_info);
1169 if (ret)
1170 goto err_of_register;
1171
1172 pm_runtime_put(&pdev->dev);
1173 return 0;
1174
1175err_of_register:
1176 dma_async_device_unregister(&sdev->dma_dev);
1177err_register:
1178 pm_runtime_put_noidle(&pdev->dev);
1179 pm_runtime_disable(&pdev->dev);
1180err_rpm:
1181 sprd_dma_disable(sdev);
1182 return ret;
1183}
1184
1185static int sprd_dma_remove(struct platform_device *pdev)
1186{
1187 struct sprd_dma_dev *sdev = platform_get_drvdata(pdev);
1188 struct sprd_dma_chn *c, *cn;
1189 int ret;
1190
1191 ret = pm_runtime_get_sync(&pdev->dev);
1192 if (ret < 0)
1193 return ret;
1194
1195 /* explicitly free the irq */
1196 if (sdev->irq > 0)
1197 devm_free_irq(&pdev->dev, sdev->irq, sdev);
1198
1199 list_for_each_entry_safe(c, cn, &sdev->dma_dev.channels,
1200 vc.chan.device_node) {
1201 list_del(&c->vc.chan.device_node);
1202 tasklet_kill(&c->vc.task);
1203 }
1204
1205 of_dma_controller_free(pdev->dev.of_node);
1206 dma_async_device_unregister(&sdev->dma_dev);
1207 sprd_dma_disable(sdev);
1208
1209 pm_runtime_put_noidle(&pdev->dev);
1210 pm_runtime_disable(&pdev->dev);
1211 return 0;
1212}
1213
1214static const struct of_device_id sprd_dma_match[] = {
1215 { .compatible = "sprd,sc9860-dma", },
1216 {},
1217};
1218
1219static int __maybe_unused sprd_dma_runtime_suspend(struct device *dev)
1220{
1221 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1222
1223 sprd_dma_disable(sdev);
1224 return 0;
1225}
1226
1227static int __maybe_unused sprd_dma_runtime_resume(struct device *dev)
1228{
1229 struct sprd_dma_dev *sdev = dev_get_drvdata(dev);
1230 int ret;
1231
1232 ret = sprd_dma_enable(sdev);
1233 if (ret)
1234 dev_err(sdev->dma_dev.dev, "enable dma failed\n");
1235
1236 return ret;
1237}
1238
1239static const struct dev_pm_ops sprd_dma_pm_ops = {
1240 SET_RUNTIME_PM_OPS(sprd_dma_runtime_suspend,
1241 sprd_dma_runtime_resume,
1242 NULL)
1243};
1244
1245static struct platform_driver sprd_dma_driver = {
1246 .probe = sprd_dma_probe,
1247 .remove = sprd_dma_remove,
1248 .driver = {
1249 .name = "sprd-dma",
1250 .of_match_table = sprd_dma_match,
1251 .pm = &sprd_dma_pm_ops,
1252 },
1253};
1254module_platform_driver(sprd_dma_driver);
1255
1256MODULE_LICENSE("GPL v2");
1257MODULE_DESCRIPTION("DMA driver for Spreadtrum");
1258MODULE_AUTHOR("Baolin Wang <baolin.wang@spreadtrum.com>");
53197123 1259MODULE_AUTHOR("Eric Long <eric.long@spreadtrum.com>");
9b3b8171 1260MODULE_ALIAS("platform:sprd-dma");