]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/k3dma.c
x86/speculation: Move arch_smt_update() call to after mitigation decisions
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / k3dma.c
CommitLineData
8e6152bc 1/*
a7e08fa6 2 * Copyright (c) 2013 - 2015 Linaro Ltd.
8e6152bc
ZG
3 * Copyright (c) 2013 Hisilicon Limited.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9#include <linux/sched.h>
10#include <linux/device.h>
b77f262a
JS
11#include <linux/dma-mapping.h>
12#include <linux/dmapool.h>
8e6152bc
ZG
13#include <linux/dmaengine.h>
14#include <linux/init.h>
15#include <linux/interrupt.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
21#include <linux/of_device.h>
22#include <linux/of.h>
23#include <linux/clk.h>
24#include <linux/of_dma.h>
25
26#include "virt-dma.h"
27
28#define DRIVER_NAME "k3-dma"
8e6152bc 29#define DMA_MAX_SIZE 0x1ffc
a7e08fa6 30#define DMA_CYCLIC_MAX_PERIOD 0x1000
b77f262a 31#define LLI_BLOCK_SIZE (4 * PAGE_SIZE)
8e6152bc
ZG
32
33#define INT_STAT 0x00
34#define INT_TC1 0x04
a7e08fa6 35#define INT_TC2 0x08
8e6152bc
ZG
36#define INT_ERR1 0x0c
37#define INT_ERR2 0x10
38#define INT_TC1_MASK 0x18
a7e08fa6 39#define INT_TC2_MASK 0x1c
8e6152bc
ZG
40#define INT_ERR1_MASK 0x20
41#define INT_ERR2_MASK 0x24
42#define INT_TC1_RAW 0x600
a7e08fa6 43#define INT_TC2_RAW 0x608
aceaaa17
AG
44#define INT_ERR1_RAW 0x610
45#define INT_ERR2_RAW 0x618
8e6152bc
ZG
46#define CH_PRI 0x688
47#define CH_STAT 0x690
48#define CX_CUR_CNT 0x704
49#define CX_LLI 0x800
a7e08fa6
AG
50#define CX_CNT1 0x80c
51#define CX_CNT0 0x810
8e6152bc
ZG
52#define CX_SRC 0x814
53#define CX_DST 0x818
54#define CX_CFG 0x81c
55#define AXI_CFG 0x820
56#define AXI_CFG_DEFAULT 0x201201
57
58#define CX_LLI_CHAIN_EN 0x2
59#define CX_CFG_EN 0x1
a7e08fa6 60#define CX_CFG_NODEIRQ BIT(1)
8e6152bc
ZG
61#define CX_CFG_MEM2PER (0x1 << 2)
62#define CX_CFG_PER2MEM (0x2 << 2)
63#define CX_CFG_SRCINCR (0x1 << 31)
64#define CX_CFG_DSTINCR (0x1 << 30)
65
66struct k3_desc_hw {
67 u32 lli;
68 u32 reserved[3];
69 u32 count;
70 u32 saddr;
71 u32 daddr;
72 u32 config;
73} __aligned(32);
74
75struct k3_dma_desc_sw {
76 struct virt_dma_desc vd;
77 dma_addr_t desc_hw_lli;
78 size_t desc_num;
79 size_t size;
b77f262a 80 struct k3_desc_hw *desc_hw;
8e6152bc
ZG
81};
82
83struct k3_dma_phy;
84
85struct k3_dma_chan {
86 u32 ccfg;
87 struct virt_dma_chan vc;
88 struct k3_dma_phy *phy;
89 struct list_head node;
90 enum dma_transfer_direction dir;
91 dma_addr_t dev_addr;
92 enum dma_status status;
a7e08fa6 93 bool cyclic;
8e6152bc
ZG
94};
95
96struct k3_dma_phy {
97 u32 idx;
98 void __iomem *base;
99 struct k3_dma_chan *vchan;
100 struct k3_dma_desc_sw *ds_run;
101 struct k3_dma_desc_sw *ds_done;
102};
103
104struct k3_dma_dev {
105 struct dma_device slave;
106 void __iomem *base;
107 struct tasklet_struct task;
108 spinlock_t lock;
109 struct list_head chan_pending;
110 struct k3_dma_phy *phy;
111 struct k3_dma_chan *chans;
112 struct clk *clk;
b77f262a 113 struct dma_pool *pool;
8e6152bc
ZG
114 u32 dma_channels;
115 u32 dma_requests;
486b10a2 116 unsigned int irq;
8e6152bc
ZG
117};
118
119#define to_k3_dma(dmadev) container_of(dmadev, struct k3_dma_dev, slave)
120
121static struct k3_dma_chan *to_k3_chan(struct dma_chan *chan)
122{
123 return container_of(chan, struct k3_dma_chan, vc.chan);
124}
125
126static void k3_dma_pause_dma(struct k3_dma_phy *phy, bool on)
127{
128 u32 val = 0;
129
130 if (on) {
131 val = readl_relaxed(phy->base + CX_CFG);
132 val |= CX_CFG_EN;
133 writel_relaxed(val, phy->base + CX_CFG);
134 } else {
135 val = readl_relaxed(phy->base + CX_CFG);
136 val &= ~CX_CFG_EN;
137 writel_relaxed(val, phy->base + CX_CFG);
138 }
139}
140
141static void k3_dma_terminate_chan(struct k3_dma_phy *phy, struct k3_dma_dev *d)
142{
143 u32 val = 0;
144
145 k3_dma_pause_dma(phy, false);
146
147 val = 0x1 << phy->idx;
148 writel_relaxed(val, d->base + INT_TC1_RAW);
a7e08fa6 149 writel_relaxed(val, d->base + INT_TC2_RAW);
8e6152bc
ZG
150 writel_relaxed(val, d->base + INT_ERR1_RAW);
151 writel_relaxed(val, d->base + INT_ERR2_RAW);
152}
153
154static void k3_dma_set_desc(struct k3_dma_phy *phy, struct k3_desc_hw *hw)
155{
156 writel_relaxed(hw->lli, phy->base + CX_LLI);
a7e08fa6 157 writel_relaxed(hw->count, phy->base + CX_CNT0);
8e6152bc
ZG
158 writel_relaxed(hw->saddr, phy->base + CX_SRC);
159 writel_relaxed(hw->daddr, phy->base + CX_DST);
160 writel_relaxed(AXI_CFG_DEFAULT, phy->base + AXI_CFG);
161 writel_relaxed(hw->config, phy->base + CX_CFG);
162}
163
164static u32 k3_dma_get_curr_cnt(struct k3_dma_dev *d, struct k3_dma_phy *phy)
165{
166 u32 cnt = 0;
167
168 cnt = readl_relaxed(d->base + CX_CUR_CNT + phy->idx * 0x10);
169 cnt &= 0xffff;
170 return cnt;
171}
172
173static u32 k3_dma_get_curr_lli(struct k3_dma_phy *phy)
174{
175 return readl_relaxed(phy->base + CX_LLI);
176}
177
178static u32 k3_dma_get_chan_stat(struct k3_dma_dev *d)
179{
180 return readl_relaxed(d->base + CH_STAT);
181}
182
183static void k3_dma_enable_dma(struct k3_dma_dev *d, bool on)
184{
185 if (on) {
186 /* set same priority */
187 writel_relaxed(0x0, d->base + CH_PRI);
188
189 /* unmask irq */
190 writel_relaxed(0xffff, d->base + INT_TC1_MASK);
a7e08fa6 191 writel_relaxed(0xffff, d->base + INT_TC2_MASK);
8e6152bc
ZG
192 writel_relaxed(0xffff, d->base + INT_ERR1_MASK);
193 writel_relaxed(0xffff, d->base + INT_ERR2_MASK);
194 } else {
195 /* mask irq */
196 writel_relaxed(0x0, d->base + INT_TC1_MASK);
a7e08fa6 197 writel_relaxed(0x0, d->base + INT_TC2_MASK);
8e6152bc
ZG
198 writel_relaxed(0x0, d->base + INT_ERR1_MASK);
199 writel_relaxed(0x0, d->base + INT_ERR2_MASK);
200 }
201}
202
203static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
204{
205 struct k3_dma_dev *d = (struct k3_dma_dev *)dev_id;
206 struct k3_dma_phy *p;
207 struct k3_dma_chan *c;
208 u32 stat = readl_relaxed(d->base + INT_STAT);
209 u32 tc1 = readl_relaxed(d->base + INT_TC1);
a7e08fa6 210 u32 tc2 = readl_relaxed(d->base + INT_TC2);
8e6152bc
ZG
211 u32 err1 = readl_relaxed(d->base + INT_ERR1);
212 u32 err2 = readl_relaxed(d->base + INT_ERR2);
213 u32 i, irq_chan = 0;
214
215 while (stat) {
216 i = __ffs(stat);
a7e08fa6
AG
217 stat &= ~BIT(i);
218 if (likely(tc1 & BIT(i)) || (tc2 & BIT(i))) {
219 unsigned long flags;
220
8e6152bc
ZG
221 p = &d->phy[i];
222 c = p->vchan;
a7e08fa6 223 if (c && (tc1 & BIT(i))) {
8e6152bc
ZG
224 spin_lock_irqsave(&c->vc.lock, flags);
225 vchan_cookie_complete(&p->ds_run->vd);
226 p->ds_done = p->ds_run;
36387a2b 227 p->ds_run = NULL;
8e6152bc
ZG
228 spin_unlock_irqrestore(&c->vc.lock, flags);
229 }
a7e08fa6
AG
230 if (c && (tc2 & BIT(i))) {
231 spin_lock_irqsave(&c->vc.lock, flags);
232 if (p->ds_run != NULL)
233 vchan_cyclic_callback(&p->ds_run->vd);
234 spin_unlock_irqrestore(&c->vc.lock, flags);
235 }
8e6152bc
ZG
236 irq_chan |= BIT(i);
237 }
238 if (unlikely((err1 & BIT(i)) || (err2 & BIT(i))))
239 dev_warn(d->slave.dev, "DMA ERR\n");
240 }
241
242 writel_relaxed(irq_chan, d->base + INT_TC1_RAW);
a7e08fa6 243 writel_relaxed(irq_chan, d->base + INT_TC2_RAW);
8e6152bc
ZG
244 writel_relaxed(err1, d->base + INT_ERR1_RAW);
245 writel_relaxed(err2, d->base + INT_ERR2_RAW);
246
0173c895 247 if (irq_chan)
8e6152bc 248 tasklet_schedule(&d->task);
0173c895
AG
249
250 if (irq_chan || err1 || err2)
8e6152bc 251 return IRQ_HANDLED;
0173c895
AG
252
253 return IRQ_NONE;
8e6152bc
ZG
254}
255
256static int k3_dma_start_txd(struct k3_dma_chan *c)
257{
258 struct k3_dma_dev *d = to_k3_dma(c->vc.chan.device);
259 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
260
261 if (!c->phy)
262 return -EAGAIN;
263
264 if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
265 return -EAGAIN;
266
267 if (vd) {
268 struct k3_dma_desc_sw *ds =
269 container_of(vd, struct k3_dma_desc_sw, vd);
270 /*
271 * fetch and remove request from vc->desc_issued
272 * so vc->desc_issued only contains desc pending
273 */
274 list_del(&ds->vd.node);
36387a2b 275
8e6152bc 276 c->phy->ds_run = ds;
626c4e85 277 c->phy->ds_done = NULL;
8e6152bc
ZG
278 /* start dma */
279 k3_dma_set_desc(c->phy, &ds->desc_hw[0]);
280 return 0;
281 }
626c4e85
AB
282 c->phy->ds_run = NULL;
283 c->phy->ds_done = NULL;
8e6152bc
ZG
284 return -EAGAIN;
285}
286
287static void k3_dma_tasklet(unsigned long arg)
288{
289 struct k3_dma_dev *d = (struct k3_dma_dev *)arg;
290 struct k3_dma_phy *p;
291 struct k3_dma_chan *c, *cn;
292 unsigned pch, pch_alloc = 0;
293
294 /* check new dma request of running channel in vc->desc_issued */
295 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
296 spin_lock_irq(&c->vc.lock);
297 p = c->phy;
298 if (p && p->ds_done) {
299 if (k3_dma_start_txd(c)) {
300 /* No current txd associated with this channel */
301 dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx);
302 /* Mark this channel free */
303 c->phy = NULL;
304 p->vchan = NULL;
305 }
306 }
307 spin_unlock_irq(&c->vc.lock);
308 }
309
310 /* check new channel request in d->chan_pending */
311 spin_lock_irq(&d->lock);
312 for (pch = 0; pch < d->dma_channels; pch++) {
313 p = &d->phy[pch];
314
315 if (p->vchan == NULL && !list_empty(&d->chan_pending)) {
316 c = list_first_entry(&d->chan_pending,
317 struct k3_dma_chan, node);
318 /* remove from d->chan_pending */
319 list_del_init(&c->node);
320 pch_alloc |= 1 << pch;
321 /* Mark this channel allocated */
322 p->vchan = c;
323 c->phy = p;
324 dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc);
325 }
326 }
327 spin_unlock_irq(&d->lock);
328
329 for (pch = 0; pch < d->dma_channels; pch++) {
330 if (pch_alloc & (1 << pch)) {
331 p = &d->phy[pch];
332 c = p->vchan;
333 if (c) {
334 spin_lock_irq(&c->vc.lock);
335 k3_dma_start_txd(c);
336 spin_unlock_irq(&c->vc.lock);
337 }
338 }
339 }
340}
341
8e6152bc
ZG
342static void k3_dma_free_chan_resources(struct dma_chan *chan)
343{
344 struct k3_dma_chan *c = to_k3_chan(chan);
345 struct k3_dma_dev *d = to_k3_dma(chan->device);
346 unsigned long flags;
347
348 spin_lock_irqsave(&d->lock, flags);
349 list_del_init(&c->node);
350 spin_unlock_irqrestore(&d->lock, flags);
351
352 vchan_free_chan_resources(&c->vc);
353 c->ccfg = 0;
354}
355
356static enum dma_status k3_dma_tx_status(struct dma_chan *chan,
357 dma_cookie_t cookie, struct dma_tx_state *state)
358{
359 struct k3_dma_chan *c = to_k3_chan(chan);
360 struct k3_dma_dev *d = to_k3_dma(chan->device);
361 struct k3_dma_phy *p;
362 struct virt_dma_desc *vd;
363 unsigned long flags;
364 enum dma_status ret;
365 size_t bytes = 0;
366
367 ret = dma_cookie_status(&c->vc.chan, cookie, state);
bd2c348e 368 if (ret == DMA_COMPLETE)
8e6152bc
ZG
369 return ret;
370
371 spin_lock_irqsave(&c->vc.lock, flags);
372 p = c->phy;
373 ret = c->status;
374
375 /*
376 * If the cookie is on our issue queue, then the residue is
377 * its total size.
378 */
379 vd = vchan_find_desc(&c->vc, cookie);
a7e08fa6 380 if (vd && !c->cyclic) {
8e6152bc
ZG
381 bytes = container_of(vd, struct k3_dma_desc_sw, vd)->size;
382 } else if ((!p) || (!p->ds_run)) {
383 bytes = 0;
384 } else {
385 struct k3_dma_desc_sw *ds = p->ds_run;
386 u32 clli = 0, index = 0;
387
388 bytes = k3_dma_get_curr_cnt(d, p);
389 clli = k3_dma_get_curr_lli(p);
a7e08fa6
AG
390 index = ((clli - ds->desc_hw_lli) /
391 sizeof(struct k3_desc_hw)) + 1;
8e6152bc
ZG
392 for (; index < ds->desc_num; index++) {
393 bytes += ds->desc_hw[index].count;
394 /* end of lli */
395 if (!ds->desc_hw[index].lli)
396 break;
397 }
398 }
399 spin_unlock_irqrestore(&c->vc.lock, flags);
400 dma_set_residue(state, bytes);
401 return ret;
402}
403
404static void k3_dma_issue_pending(struct dma_chan *chan)
405{
406 struct k3_dma_chan *c = to_k3_chan(chan);
407 struct k3_dma_dev *d = to_k3_dma(chan->device);
408 unsigned long flags;
409
410 spin_lock_irqsave(&c->vc.lock, flags);
411 /* add request to vc->desc_issued */
412 if (vchan_issue_pending(&c->vc)) {
413 spin_lock(&d->lock);
414 if (!c->phy) {
415 if (list_empty(&c->node)) {
416 /* if new channel, add chan_pending */
417 list_add_tail(&c->node, &d->chan_pending);
418 /* check in tasklet */
419 tasklet_schedule(&d->task);
420 dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc);
421 }
422 }
423 spin_unlock(&d->lock);
424 } else
425 dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc);
426 spin_unlock_irqrestore(&c->vc.lock, flags);
427}
428
429static void k3_dma_fill_desc(struct k3_dma_desc_sw *ds, dma_addr_t dst,
430 dma_addr_t src, size_t len, u32 num, u32 ccfg)
431{
a7e08fa6 432 if (num != ds->desc_num - 1)
8e6152bc
ZG
433 ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) *
434 sizeof(struct k3_desc_hw);
a7e08fa6 435
8e6152bc
ZG
436 ds->desc_hw[num].lli |= CX_LLI_CHAIN_EN;
437 ds->desc_hw[num].count = len;
438 ds->desc_hw[num].saddr = src;
439 ds->desc_hw[num].daddr = dst;
440 ds->desc_hw[num].config = ccfg;
441}
442
b77f262a
JS
443static struct k3_dma_desc_sw *k3_dma_alloc_desc_resource(int num,
444 struct dma_chan *chan)
445{
446 struct k3_dma_chan *c = to_k3_chan(chan);
447 struct k3_dma_desc_sw *ds;
448 struct k3_dma_dev *d = to_k3_dma(chan->device);
449 int lli_limit = LLI_BLOCK_SIZE / sizeof(struct k3_desc_hw);
450
451 if (num > lli_limit) {
452 dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n",
453 &c->vc, num, lli_limit);
454 return NULL;
455 }
456
457 ds = kzalloc(sizeof(*ds), GFP_NOWAIT);
458 if (!ds)
459 return NULL;
460
646b3b56 461 ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli);
b77f262a
JS
462 if (!ds->desc_hw) {
463 dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc);
464 kfree(ds);
465 return NULL;
466 }
b77f262a
JS
467 ds->desc_num = num;
468 return ds;
469}
470
8e6152bc
ZG
471static struct dma_async_tx_descriptor *k3_dma_prep_memcpy(
472 struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
473 size_t len, unsigned long flags)
474{
475 struct k3_dma_chan *c = to_k3_chan(chan);
476 struct k3_dma_desc_sw *ds;
477 size_t copy = 0;
478 int num = 0;
479
480 if (!len)
481 return NULL;
482
483 num = DIV_ROUND_UP(len, DMA_MAX_SIZE);
b77f262a
JS
484
485 ds = k3_dma_alloc_desc_resource(num, chan);
aef94fea 486 if (!ds)
8e6152bc 487 return NULL;
aef94fea 488
a7e08fa6 489 c->cyclic = 0;
8e6152bc 490 ds->size = len;
8e6152bc
ZG
491 num = 0;
492
493 if (!c->ccfg) {
db08425e 494 /* default is memtomem, without calling device_config */
8e6152bc
ZG
495 c->ccfg = CX_CFG_SRCINCR | CX_CFG_DSTINCR | CX_CFG_EN;
496 c->ccfg |= (0xf << 20) | (0xf << 24); /* burst = 16 */
497 c->ccfg |= (0x3 << 12) | (0x3 << 16); /* width = 64 bit */
498 }
499
500 do {
501 copy = min_t(size_t, len, DMA_MAX_SIZE);
502 k3_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg);
503
504 if (c->dir == DMA_MEM_TO_DEV) {
505 src += copy;
506 } else if (c->dir == DMA_DEV_TO_MEM) {
507 dst += copy;
508 } else {
509 src += copy;
510 dst += copy;
511 }
512 len -= copy;
513 } while (len);
514
515 ds->desc_hw[num-1].lli = 0; /* end of link */
516 return vchan_tx_prep(&c->vc, &ds->vd, flags);
517}
518
519static struct dma_async_tx_descriptor *k3_dma_prep_slave_sg(
520 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen,
521 enum dma_transfer_direction dir, unsigned long flags, void *context)
522{
523 struct k3_dma_chan *c = to_k3_chan(chan);
524 struct k3_dma_desc_sw *ds;
525 size_t len, avail, total = 0;
526 struct scatterlist *sg;
527 dma_addr_t addr, src = 0, dst = 0;
528 int num = sglen, i;
529
c61177c5 530 if (sgl == NULL)
8e6152bc
ZG
531 return NULL;
532
a7e08fa6
AG
533 c->cyclic = 0;
534
8e6152bc
ZG
535 for_each_sg(sgl, sg, sglen, i) {
536 avail = sg_dma_len(sg);
537 if (avail > DMA_MAX_SIZE)
538 num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1;
539 }
540
b77f262a 541 ds = k3_dma_alloc_desc_resource(num, chan);
aef94fea 542 if (!ds)
8e6152bc 543 return NULL;
8e6152bc
ZG
544 num = 0;
545
546 for_each_sg(sgl, sg, sglen, i) {
547 addr = sg_dma_address(sg);
548 avail = sg_dma_len(sg);
549 total += avail;
550
551 do {
552 len = min_t(size_t, avail, DMA_MAX_SIZE);
553
554 if (dir == DMA_MEM_TO_DEV) {
555 src = addr;
556 dst = c->dev_addr;
557 } else if (dir == DMA_DEV_TO_MEM) {
558 src = c->dev_addr;
559 dst = addr;
560 }
561
562 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg);
563
564 addr += len;
565 avail -= len;
566 } while (avail);
567 }
568
569 ds->desc_hw[num-1].lli = 0; /* end of link */
570 ds->size = total;
571 return vchan_tx_prep(&c->vc, &ds->vd, flags);
572}
573
a7e08fa6
AG
574static struct dma_async_tx_descriptor *
575k3_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
576 size_t buf_len, size_t period_len,
577 enum dma_transfer_direction dir,
578 unsigned long flags)
579{
580 struct k3_dma_chan *c = to_k3_chan(chan);
581 struct k3_dma_desc_sw *ds;
582 size_t len, avail, total = 0;
583 dma_addr_t addr, src = 0, dst = 0;
584 int num = 1, since = 0;
585 size_t modulo = DMA_CYCLIC_MAX_PERIOD;
586 u32 en_tc2 = 0;
587
5f03c399
AB
588 dev_dbg(chan->device->dev, "%s: buf %pad, dst %pad, buf len %zu, period_len = %zu, dir %d\n",
589 __func__, &buf_addr, &to_k3_chan(chan)->dev_addr,
590 buf_len, period_len, (int)dir);
a7e08fa6
AG
591
592 avail = buf_len;
593 if (avail > modulo)
594 num += DIV_ROUND_UP(avail, modulo) - 1;
595
596 ds = k3_dma_alloc_desc_resource(num, chan);
597 if (!ds)
598 return NULL;
599
600 c->cyclic = 1;
601 addr = buf_addr;
602 avail = buf_len;
603 total = avail;
604 num = 0;
605
606 if (period_len < modulo)
607 modulo = period_len;
608
609 do {
610 len = min_t(size_t, avail, modulo);
611
612 if (dir == DMA_MEM_TO_DEV) {
613 src = addr;
614 dst = c->dev_addr;
615 } else if (dir == DMA_DEV_TO_MEM) {
616 src = c->dev_addr;
617 dst = addr;
618 }
619 since += len;
620 if (since >= period_len) {
621 /* descriptor asks for TC2 interrupt on completion */
622 en_tc2 = CX_CFG_NODEIRQ;
623 since -= period_len;
624 } else
625 en_tc2 = 0;
626
627 k3_dma_fill_desc(ds, dst, src, len, num++, c->ccfg | en_tc2);
628
629 addr += len;
630 avail -= len;
631 } while (avail);
632
633 /* "Cyclic" == end of link points back to start of link */
634 ds->desc_hw[num - 1].lli |= ds->desc_hw_lli;
635
636 ds->size = total;
637
638 return vchan_tx_prep(&c->vc, &ds->vd, flags);
639}
640
db08425e
MR
641static int k3_dma_config(struct dma_chan *chan,
642 struct dma_slave_config *cfg)
643{
644 struct k3_dma_chan *c = to_k3_chan(chan);
645 u32 maxburst = 0, val = 0;
646 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
647
648 if (cfg == NULL)
649 return -EINVAL;
650 c->dir = cfg->direction;
651 if (c->dir == DMA_DEV_TO_MEM) {
652 c->ccfg = CX_CFG_DSTINCR;
653 c->dev_addr = cfg->src_addr;
654 maxburst = cfg->src_maxburst;
655 width = cfg->src_addr_width;
656 } else if (c->dir == DMA_MEM_TO_DEV) {
657 c->ccfg = CX_CFG_SRCINCR;
658 c->dev_addr = cfg->dst_addr;
659 maxburst = cfg->dst_maxburst;
660 width = cfg->dst_addr_width;
661 }
662 switch (width) {
663 case DMA_SLAVE_BUSWIDTH_1_BYTE:
664 case DMA_SLAVE_BUSWIDTH_2_BYTES:
665 case DMA_SLAVE_BUSWIDTH_4_BYTES:
666 case DMA_SLAVE_BUSWIDTH_8_BYTES:
667 val = __ffs(width);
668 break;
669 default:
670 val = 3;
671 break;
672 }
673 c->ccfg |= (val << 12) | (val << 16);
674
675 if ((maxburst == 0) || (maxburst > 16))
6c28a90f 676 val = 15;
db08425e
MR
677 else
678 val = maxburst - 1;
679 c->ccfg |= (val << 20) | (val << 24);
680 c->ccfg |= CX_CFG_MEM2PER | CX_CFG_EN;
681
682 /* specific request line */
683 c->ccfg |= c->vc.chan.chan_id << 4;
684
685 return 0;
686}
687
36387a2b
JS
688static void k3_dma_free_desc(struct virt_dma_desc *vd)
689{
690 struct k3_dma_desc_sw *ds =
691 container_of(vd, struct k3_dma_desc_sw, vd);
692 struct k3_dma_dev *d = to_k3_dma(vd->tx.chan->device);
693
694 dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli);
695 kfree(ds);
696}
697
db08425e 698static int k3_dma_terminate_all(struct dma_chan *chan)
8e6152bc
ZG
699{
700 struct k3_dma_chan *c = to_k3_chan(chan);
701 struct k3_dma_dev *d = to_k3_dma(chan->device);
8e6152bc
ZG
702 struct k3_dma_phy *p = c->phy;
703 unsigned long flags;
8e6152bc
ZG
704 LIST_HEAD(head);
705
db08425e 706 dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc);
8e6152bc 707
db08425e
MR
708 /* Prevent this channel being scheduled */
709 spin_lock(&d->lock);
710 list_del_init(&c->node);
711 spin_unlock(&d->lock);
8e6152bc 712
db08425e
MR
713 /* Clear the tx descriptor lists */
714 spin_lock_irqsave(&c->vc.lock, flags);
715 vchan_get_all_descriptors(&c->vc, &head);
716 if (p) {
717 /* vchan is assigned to a pchan - stop the channel */
718 k3_dma_terminate_chan(p, d);
719 c->phy = NULL;
720 p->vchan = NULL;
36387a2b
JS
721 if (p->ds_run) {
722 k3_dma_free_desc(&p->ds_run->vd);
723 p->ds_run = NULL;
724 }
132b473c 725 p->ds_done = NULL;
db08425e
MR
726 }
727 spin_unlock_irqrestore(&c->vc.lock, flags);
728 vchan_dma_desc_free_list(&c->vc, &head);
8e6152bc 729
db08425e
MR
730 return 0;
731}
8e6152bc 732
a1a9becb 733static int k3_dma_transfer_pause(struct dma_chan *chan)
db08425e
MR
734{
735 struct k3_dma_chan *c = to_k3_chan(chan);
736 struct k3_dma_dev *d = to_k3_dma(chan->device);
737 struct k3_dma_phy *p = c->phy;
8e6152bc 738
db08425e
MR
739 dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc);
740 if (c->status == DMA_IN_PROGRESS) {
741 c->status = DMA_PAUSED;
8e6152bc 742 if (p) {
db08425e
MR
743 k3_dma_pause_dma(p, false);
744 } else {
745 spin_lock(&d->lock);
746 list_del_init(&c->node);
747 spin_unlock(&d->lock);
8e6152bc 748 }
db08425e 749 }
8e6152bc 750
db08425e
MR
751 return 0;
752}
8e6152bc 753
a1a9becb 754static int k3_dma_transfer_resume(struct dma_chan *chan)
db08425e
MR
755{
756 struct k3_dma_chan *c = to_k3_chan(chan);
757 struct k3_dma_dev *d = to_k3_dma(chan->device);
758 struct k3_dma_phy *p = c->phy;
759 unsigned long flags;
760
761 dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc);
762 spin_lock_irqsave(&c->vc.lock, flags);
763 if (c->status == DMA_PAUSED) {
764 c->status = DMA_IN_PROGRESS;
765 if (p) {
766 k3_dma_pause_dma(p, true);
767 } else if (!list_empty(&c->vc.desc_issued)) {
768 spin_lock(&d->lock);
769 list_add_tail(&c->node, &d->chan_pending);
770 spin_unlock(&d->lock);
8e6152bc 771 }
8e6152bc 772 }
db08425e
MR
773 spin_unlock_irqrestore(&c->vc.lock, flags);
774
8e6152bc
ZG
775 return 0;
776}
777
57c03422 778static const struct of_device_id k3_pdma_dt_ids[] = {
8e6152bc
ZG
779 { .compatible = "hisilicon,k3-dma-1.0", },
780 {}
781};
782MODULE_DEVICE_TABLE(of, k3_pdma_dt_ids);
783
784static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
785 struct of_dma *ofdma)
786{
787 struct k3_dma_dev *d = ofdma->of_dma_data;
788 unsigned int request = dma_spec->args[0];
789
790 if (request > d->dma_requests)
791 return NULL;
792
793 return dma_get_slave_channel(&(d->chans[request].vc.chan));
794}
795
796static int k3_dma_probe(struct platform_device *op)
797{
798 struct k3_dma_dev *d;
799 const struct of_device_id *of_id;
800 struct resource *iores;
801 int i, ret, irq = 0;
802
803 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
804 if (!iores)
805 return -EINVAL;
806
807 d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL);
808 if (!d)
809 return -ENOMEM;
810
a576b7fe
JH
811 d->base = devm_ioremap_resource(&op->dev, iores);
812 if (IS_ERR(d->base))
813 return PTR_ERR(d->base);
8e6152bc
ZG
814
815 of_id = of_match_device(k3_pdma_dt_ids, &op->dev);
816 if (of_id) {
817 of_property_read_u32((&op->dev)->of_node,
818 "dma-channels", &d->dma_channels);
819 of_property_read_u32((&op->dev)->of_node,
820 "dma-requests", &d->dma_requests);
821 }
822
823 d->clk = devm_clk_get(&op->dev, NULL);
824 if (IS_ERR(d->clk)) {
825 dev_err(&op->dev, "no dma clk\n");
826 return PTR_ERR(d->clk);
827 }
828
829 irq = platform_get_irq(op, 0);
830 ret = devm_request_irq(&op->dev, irq,
174b537a 831 k3_dma_int_handler, 0, DRIVER_NAME, d);
8e6152bc
ZG
832 if (ret)
833 return ret;
834
486b10a2
VK
835 d->irq = irq;
836
b77f262a
JS
837 /* A DMA memory pool for LLIs, align on 32-byte boundary */
838 d->pool = dmam_pool_create(DRIVER_NAME, &op->dev,
839 LLI_BLOCK_SIZE, 32, 0);
840 if (!d->pool)
841 return -ENOMEM;
842
8e6152bc
ZG
843 /* init phy channel */
844 d->phy = devm_kzalloc(&op->dev,
845 d->dma_channels * sizeof(struct k3_dma_phy), GFP_KERNEL);
846 if (d->phy == NULL)
847 return -ENOMEM;
848
849 for (i = 0; i < d->dma_channels; i++) {
850 struct k3_dma_phy *p = &d->phy[i];
851
852 p->idx = i;
853 p->base = d->base + i * 0x40;
854 }
855
856 INIT_LIST_HEAD(&d->slave.channels);
857 dma_cap_set(DMA_SLAVE, d->slave.cap_mask);
858 dma_cap_set(DMA_MEMCPY, d->slave.cap_mask);
a7e08fa6 859 dma_cap_set(DMA_CYCLIC, d->slave.cap_mask);
8e6152bc 860 d->slave.dev = &op->dev;
8e6152bc
ZG
861 d->slave.device_free_chan_resources = k3_dma_free_chan_resources;
862 d->slave.device_tx_status = k3_dma_tx_status;
863 d->slave.device_prep_dma_memcpy = k3_dma_prep_memcpy;
864 d->slave.device_prep_slave_sg = k3_dma_prep_slave_sg;
a7e08fa6 865 d->slave.device_prep_dma_cyclic = k3_dma_prep_dma_cyclic;
8e6152bc 866 d->slave.device_issue_pending = k3_dma_issue_pending;
db08425e 867 d->slave.device_config = k3_dma_config;
a1a9becb
KK
868 d->slave.device_pause = k3_dma_transfer_pause;
869 d->slave.device_resume = k3_dma_transfer_resume;
db08425e 870 d->slave.device_terminate_all = k3_dma_terminate_all;
77a68e56 871 d->slave.copy_align = DMAENGINE_ALIGN_8_BYTES;
8e6152bc
ZG
872
873 /* init virtual channel */
874 d->chans = devm_kzalloc(&op->dev,
875 d->dma_requests * sizeof(struct k3_dma_chan), GFP_KERNEL);
876 if (d->chans == NULL)
877 return -ENOMEM;
878
879 for (i = 0; i < d->dma_requests; i++) {
880 struct k3_dma_chan *c = &d->chans[i];
881
882 c->status = DMA_IN_PROGRESS;
883 INIT_LIST_HEAD(&c->node);
884 c->vc.desc_free = k3_dma_free_desc;
885 vchan_init(&c->vc, &d->slave);
886 }
887
888 /* Enable clock before accessing registers */
889 ret = clk_prepare_enable(d->clk);
890 if (ret < 0) {
891 dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret);
892 return ret;
893 }
894
895 k3_dma_enable_dma(d, true);
896
897 ret = dma_async_device_register(&d->slave);
898 if (ret)
89b90c09 899 goto dma_async_register_fail;
8e6152bc
ZG
900
901 ret = of_dma_controller_register((&op->dev)->of_node,
902 k3_of_dma_simple_xlate, d);
903 if (ret)
904 goto of_dma_register_fail;
905
906 spin_lock_init(&d->lock);
907 INIT_LIST_HEAD(&d->chan_pending);
908 tasklet_init(&d->task, k3_dma_tasklet, (unsigned long)d);
909 platform_set_drvdata(op, d);
910 dev_info(&op->dev, "initialized\n");
911
912 return 0;
913
914of_dma_register_fail:
915 dma_async_device_unregister(&d->slave);
89b90c09
WY
916dma_async_register_fail:
917 clk_disable_unprepare(d->clk);
8e6152bc
ZG
918 return ret;
919}
920
921static int k3_dma_remove(struct platform_device *op)
922{
923 struct k3_dma_chan *c, *cn;
924 struct k3_dma_dev *d = platform_get_drvdata(op);
925
926 dma_async_device_unregister(&d->slave);
927 of_dma_controller_free((&op->dev)->of_node);
928
486b10a2
VK
929 devm_free_irq(&op->dev, d->irq, d);
930
8e6152bc
ZG
931 list_for_each_entry_safe(c, cn, &d->slave.channels, vc.chan.device_node) {
932 list_del(&c->vc.chan.device_node);
933 tasklet_kill(&c->vc.task);
934 }
935 tasklet_kill(&d->task);
936 clk_disable_unprepare(d->clk);
937 return 0;
938}
939
af2d3139 940#ifdef CONFIG_PM_SLEEP
10b3e223 941static int k3_dma_suspend_dev(struct device *dev)
8e6152bc
ZG
942{
943 struct k3_dma_dev *d = dev_get_drvdata(dev);
944 u32 stat = 0;
945
946 stat = k3_dma_get_chan_stat(d);
947 if (stat) {
948 dev_warn(d->slave.dev,
949 "chan %d is running fail to suspend\n", stat);
950 return -1;
951 }
952 k3_dma_enable_dma(d, false);
953 clk_disable_unprepare(d->clk);
954 return 0;
955}
956
10b3e223 957static int k3_dma_resume_dev(struct device *dev)
8e6152bc
ZG
958{
959 struct k3_dma_dev *d = dev_get_drvdata(dev);
960 int ret = 0;
961
962 ret = clk_prepare_enable(d->clk);
963 if (ret < 0) {
964 dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret);
965 return ret;
966 }
967 k3_dma_enable_dma(d, true);
968 return 0;
969}
af2d3139 970#endif
8e6152bc 971
10b3e223 972static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend_dev, k3_dma_resume_dev);
8e6152bc
ZG
973
974static struct platform_driver k3_pdma_driver = {
975 .driver = {
976 .name = DRIVER_NAME,
8e6152bc
ZG
977 .pm = &k3_dma_pmops,
978 .of_match_table = k3_pdma_dt_ids,
979 },
980 .probe = k3_dma_probe,
981 .remove = k3_dma_remove,
982};
983
984module_platform_driver(k3_pdma_driver);
985
986MODULE_DESCRIPTION("Hisilicon k3 DMA Driver");
987MODULE_ALIAS("platform:k3dma");
988MODULE_LICENSE("GPL v2");