]>
Commit | Line | Data |
---|---|---|
e3fa9841 JN |
1 | /* |
2 | * Copyright 2015 Linaro. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License version 2 as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | #include <linux/sched.h> | |
9 | #include <linux/device.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/dmapool.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/kernel.h> | |
16 | #include <linux/module.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/spinlock.h> | |
20 | #include <linux/of_device.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/clk.h> | |
23 | #include <linux/of_dma.h> | |
24 | ||
25 | #include "virt-dma.h" | |
26 | ||
27 | #define DRIVER_NAME "zx-dma" | |
28 | #define DMA_ALIGN 4 | |
29 | #define DMA_MAX_SIZE (0x10000 - PAGE_SIZE) | |
30 | #define LLI_BLOCK_SIZE (4 * PAGE_SIZE) | |
31 | ||
32 | #define REG_ZX_SRC_ADDR 0x00 | |
33 | #define REG_ZX_DST_ADDR 0x04 | |
34 | #define REG_ZX_TX_X_COUNT 0x08 | |
35 | #define REG_ZX_TX_ZY_COUNT 0x0c | |
36 | #define REG_ZX_SRC_ZY_STEP 0x10 | |
37 | #define REG_ZX_DST_ZY_STEP 0x14 | |
38 | #define REG_ZX_LLI_ADDR 0x1c | |
39 | #define REG_ZX_CTRL 0x20 | |
40 | #define REG_ZX_TC_IRQ 0x800 | |
41 | #define REG_ZX_SRC_ERR_IRQ 0x804 | |
42 | #define REG_ZX_DST_ERR_IRQ 0x808 | |
43 | #define REG_ZX_CFG_ERR_IRQ 0x80c | |
44 | #define REG_ZX_TC_IRQ_RAW 0x810 | |
45 | #define REG_ZX_SRC_ERR_IRQ_RAW 0x814 | |
46 | #define REG_ZX_DST_ERR_IRQ_RAW 0x818 | |
47 | #define REG_ZX_CFG_ERR_IRQ_RAW 0x81c | |
48 | #define REG_ZX_STATUS 0x820 | |
49 | #define REG_ZX_DMA_GRP_PRIO 0x824 | |
50 | #define REG_ZX_DMA_ARB 0x828 | |
51 | ||
52 | #define ZX_FORCE_CLOSE BIT(31) | |
53 | #define ZX_DST_BURST_WIDTH(x) (((x) & 0x7) << 13) | |
54 | #define ZX_MAX_BURST_LEN 16 | |
55 | #define ZX_SRC_BURST_LEN(x) (((x) & 0xf) << 9) | |
56 | #define ZX_SRC_BURST_WIDTH(x) (((x) & 0x7) << 6) | |
57 | #define ZX_IRQ_ENABLE_ALL (3 << 4) | |
58 | #define ZX_DST_FIFO_MODE BIT(3) | |
59 | #define ZX_SRC_FIFO_MODE BIT(2) | |
60 | #define ZX_SOFT_REQ BIT(1) | |
61 | #define ZX_CH_ENABLE BIT(0) | |
62 | ||
63 | #define ZX_DMA_BUSWIDTHS \ | |
64 | (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
65 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
66 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
67 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
68 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
69 | ||
70 | enum zx_dma_burst_width { | |
71 | ZX_DMA_WIDTH_8BIT = 0, | |
72 | ZX_DMA_WIDTH_16BIT = 1, | |
73 | ZX_DMA_WIDTH_32BIT = 2, | |
74 | ZX_DMA_WIDTH_64BIT = 3, | |
75 | }; | |
76 | ||
77 | struct zx_desc_hw { | |
78 | u32 saddr; | |
79 | u32 daddr; | |
80 | u32 src_x; | |
81 | u32 src_zy; | |
82 | u32 src_zy_step; | |
83 | u32 dst_zy_step; | |
84 | u32 reserved1; | |
85 | u32 lli; | |
86 | u32 ctr; | |
87 | u32 reserved[7]; /* pack as hardware registers region size */ | |
88 | } __aligned(32); | |
89 | ||
90 | struct zx_dma_desc_sw { | |
91 | struct virt_dma_desc vd; | |
92 | dma_addr_t desc_hw_lli; | |
93 | size_t desc_num; | |
94 | size_t size; | |
95 | struct zx_desc_hw *desc_hw; | |
96 | }; | |
97 | ||
98 | struct zx_dma_phy; | |
99 | ||
100 | struct zx_dma_chan { | |
101 | struct dma_slave_config slave_cfg; | |
102 | int id; /* Request phy chan id */ | |
103 | u32 ccfg; | |
2f2560e3 | 104 | u32 cyclic; |
e3fa9841 JN |
105 | struct virt_dma_chan vc; |
106 | struct zx_dma_phy *phy; | |
107 | struct list_head node; | |
108 | dma_addr_t dev_addr; | |
109 | enum dma_status status; | |
110 | }; | |
111 | ||
112 | struct zx_dma_phy { | |
113 | u32 idx; | |
114 | void __iomem *base; | |
115 | struct zx_dma_chan *vchan; | |
116 | struct zx_dma_desc_sw *ds_run; | |
117 | struct zx_dma_desc_sw *ds_done; | |
118 | }; | |
119 | ||
120 | struct zx_dma_dev { | |
121 | struct dma_device slave; | |
122 | void __iomem *base; | |
123 | spinlock_t lock; /* lock for ch and phy */ | |
124 | struct list_head chan_pending; | |
125 | struct zx_dma_phy *phy; | |
126 | struct zx_dma_chan *chans; | |
127 | struct clk *clk; | |
128 | struct dma_pool *pool; | |
129 | u32 dma_channels; | |
130 | u32 dma_requests; | |
9bde2823 | 131 | int irq; |
e3fa9841 JN |
132 | }; |
133 | ||
134 | #define to_zx_dma(dmadev) container_of(dmadev, struct zx_dma_dev, slave) | |
135 | ||
136 | static struct zx_dma_chan *to_zx_chan(struct dma_chan *chan) | |
137 | { | |
138 | return container_of(chan, struct zx_dma_chan, vc.chan); | |
139 | } | |
140 | ||
141 | static void zx_dma_terminate_chan(struct zx_dma_phy *phy, struct zx_dma_dev *d) | |
142 | { | |
143 | u32 val = 0; | |
144 | ||
145 | val = readl_relaxed(phy->base + REG_ZX_CTRL); | |
146 | val &= ~ZX_CH_ENABLE; | |
ed9c87b3 | 147 | val |= ZX_FORCE_CLOSE; |
e3fa9841 JN |
148 | writel_relaxed(val, phy->base + REG_ZX_CTRL); |
149 | ||
150 | val = 0x1 << phy->idx; | |
151 | writel_relaxed(val, d->base + REG_ZX_TC_IRQ_RAW); | |
152 | writel_relaxed(val, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
153 | writel_relaxed(val, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
154 | writel_relaxed(val, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
155 | } | |
156 | ||
157 | static void zx_dma_set_desc(struct zx_dma_phy *phy, struct zx_desc_hw *hw) | |
158 | { | |
159 | writel_relaxed(hw->saddr, phy->base + REG_ZX_SRC_ADDR); | |
160 | writel_relaxed(hw->daddr, phy->base + REG_ZX_DST_ADDR); | |
161 | writel_relaxed(hw->src_x, phy->base + REG_ZX_TX_X_COUNT); | |
162 | writel_relaxed(0, phy->base + REG_ZX_TX_ZY_COUNT); | |
163 | writel_relaxed(0, phy->base + REG_ZX_SRC_ZY_STEP); | |
164 | writel_relaxed(0, phy->base + REG_ZX_DST_ZY_STEP); | |
165 | writel_relaxed(hw->lli, phy->base + REG_ZX_LLI_ADDR); | |
166 | writel_relaxed(hw->ctr, phy->base + REG_ZX_CTRL); | |
167 | } | |
168 | ||
169 | static u32 zx_dma_get_curr_lli(struct zx_dma_phy *phy) | |
170 | { | |
171 | return readl_relaxed(phy->base + REG_ZX_LLI_ADDR); | |
172 | } | |
173 | ||
174 | static u32 zx_dma_get_chan_stat(struct zx_dma_dev *d) | |
175 | { | |
176 | return readl_relaxed(d->base + REG_ZX_STATUS); | |
177 | } | |
178 | ||
179 | static void zx_dma_init_state(struct zx_dma_dev *d) | |
180 | { | |
181 | /* set same priority */ | |
182 | writel_relaxed(0x0, d->base + REG_ZX_DMA_ARB); | |
183 | /* clear all irq */ | |
184 | writel_relaxed(0xffffffff, d->base + REG_ZX_TC_IRQ_RAW); | |
185 | writel_relaxed(0xffffffff, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
186 | writel_relaxed(0xffffffff, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
187 | writel_relaxed(0xffffffff, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
188 | } | |
189 | ||
190 | static int zx_dma_start_txd(struct zx_dma_chan *c) | |
191 | { | |
192 | struct zx_dma_dev *d = to_zx_dma(c->vc.chan.device); | |
193 | struct virt_dma_desc *vd = vchan_next_desc(&c->vc); | |
194 | ||
195 | if (!c->phy) | |
196 | return -EAGAIN; | |
197 | ||
198 | if (BIT(c->phy->idx) & zx_dma_get_chan_stat(d)) | |
199 | return -EAGAIN; | |
200 | ||
201 | if (vd) { | |
202 | struct zx_dma_desc_sw *ds = | |
203 | container_of(vd, struct zx_dma_desc_sw, vd); | |
204 | /* | |
205 | * fetch and remove request from vc->desc_issued | |
206 | * so vc->desc_issued only contains desc pending | |
207 | */ | |
208 | list_del(&ds->vd.node); | |
209 | c->phy->ds_run = ds; | |
210 | c->phy->ds_done = NULL; | |
211 | /* start dma */ | |
212 | zx_dma_set_desc(c->phy, ds->desc_hw); | |
213 | return 0; | |
214 | } | |
215 | c->phy->ds_done = NULL; | |
216 | c->phy->ds_run = NULL; | |
217 | return -EAGAIN; | |
218 | } | |
219 | ||
220 | static void zx_dma_task(struct zx_dma_dev *d) | |
221 | { | |
222 | struct zx_dma_phy *p; | |
223 | struct zx_dma_chan *c, *cn; | |
224 | unsigned pch, pch_alloc = 0; | |
225 | unsigned long flags; | |
226 | ||
227 | /* check new dma request of running channel in vc->desc_issued */ | |
228 | list_for_each_entry_safe(c, cn, &d->slave.channels, | |
229 | vc.chan.device_node) { | |
230 | spin_lock_irqsave(&c->vc.lock, flags); | |
231 | p = c->phy; | |
232 | if (p && p->ds_done && zx_dma_start_txd(c)) { | |
233 | /* No current txd associated with this channel */ | |
234 | dev_dbg(d->slave.dev, "pchan %u: free\n", p->idx); | |
235 | /* Mark this channel free */ | |
236 | c->phy = NULL; | |
237 | p->vchan = NULL; | |
238 | } | |
239 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
240 | } | |
241 | ||
242 | /* check new channel request in d->chan_pending */ | |
243 | spin_lock_irqsave(&d->lock, flags); | |
244 | while (!list_empty(&d->chan_pending)) { | |
245 | c = list_first_entry(&d->chan_pending, | |
246 | struct zx_dma_chan, node); | |
247 | p = &d->phy[c->id]; | |
248 | if (!p->vchan) { | |
249 | /* remove from d->chan_pending */ | |
250 | list_del_init(&c->node); | |
251 | pch_alloc |= 1 << c->id; | |
252 | /* Mark this channel allocated */ | |
253 | p->vchan = c; | |
254 | c->phy = p; | |
255 | } else { | |
256 | dev_dbg(d->slave.dev, "pchan %u: busy!\n", c->id); | |
257 | } | |
258 | } | |
259 | spin_unlock_irqrestore(&d->lock, flags); | |
260 | ||
261 | for (pch = 0; pch < d->dma_channels; pch++) { | |
262 | if (pch_alloc & (1 << pch)) { | |
263 | p = &d->phy[pch]; | |
264 | c = p->vchan; | |
265 | if (c) { | |
266 | spin_lock_irqsave(&c->vc.lock, flags); | |
267 | zx_dma_start_txd(c); | |
268 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
269 | } | |
270 | } | |
271 | } | |
272 | } | |
273 | ||
274 | static irqreturn_t zx_dma_int_handler(int irq, void *dev_id) | |
275 | { | |
276 | struct zx_dma_dev *d = (struct zx_dma_dev *)dev_id; | |
277 | struct zx_dma_phy *p; | |
278 | struct zx_dma_chan *c; | |
279 | u32 tc = readl_relaxed(d->base + REG_ZX_TC_IRQ); | |
280 | u32 serr = readl_relaxed(d->base + REG_ZX_SRC_ERR_IRQ); | |
281 | u32 derr = readl_relaxed(d->base + REG_ZX_DST_ERR_IRQ); | |
282 | u32 cfg = readl_relaxed(d->base + REG_ZX_CFG_ERR_IRQ); | |
2f2560e3 | 283 | u32 i, irq_chan = 0, task = 0; |
e3fa9841 JN |
284 | |
285 | while (tc) { | |
286 | i = __ffs(tc); | |
287 | tc &= ~BIT(i); | |
288 | p = &d->phy[i]; | |
289 | c = p->vchan; | |
290 | if (c) { | |
291 | unsigned long flags; | |
292 | ||
293 | spin_lock_irqsave(&c->vc.lock, flags); | |
2f2560e3 JN |
294 | if (c->cyclic) { |
295 | vchan_cyclic_callback(&p->ds_run->vd); | |
296 | } else { | |
297 | vchan_cookie_complete(&p->ds_run->vd); | |
298 | p->ds_done = p->ds_run; | |
299 | task = 1; | |
300 | } | |
e3fa9841 | 301 | spin_unlock_irqrestore(&c->vc.lock, flags); |
2f2560e3 | 302 | irq_chan |= BIT(i); |
e3fa9841 | 303 | } |
e3fa9841 JN |
304 | } |
305 | ||
306 | if (serr || derr || cfg) | |
307 | dev_warn(d->slave.dev, "DMA ERR src 0x%x, dst 0x%x, cfg 0x%x\n", | |
308 | serr, derr, cfg); | |
309 | ||
310 | writel_relaxed(irq_chan, d->base + REG_ZX_TC_IRQ_RAW); | |
311 | writel_relaxed(serr, d->base + REG_ZX_SRC_ERR_IRQ_RAW); | |
312 | writel_relaxed(derr, d->base + REG_ZX_DST_ERR_IRQ_RAW); | |
313 | writel_relaxed(cfg, d->base + REG_ZX_CFG_ERR_IRQ_RAW); | |
314 | ||
2f2560e3 | 315 | if (task) |
e3fa9841 | 316 | zx_dma_task(d); |
2f2560e3 | 317 | return IRQ_HANDLED; |
e3fa9841 JN |
318 | } |
319 | ||
320 | static void zx_dma_free_chan_resources(struct dma_chan *chan) | |
321 | { | |
322 | struct zx_dma_chan *c = to_zx_chan(chan); | |
323 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
324 | unsigned long flags; | |
325 | ||
326 | spin_lock_irqsave(&d->lock, flags); | |
327 | list_del_init(&c->node); | |
328 | spin_unlock_irqrestore(&d->lock, flags); | |
329 | ||
330 | vchan_free_chan_resources(&c->vc); | |
331 | c->ccfg = 0; | |
332 | } | |
333 | ||
334 | static enum dma_status zx_dma_tx_status(struct dma_chan *chan, | |
335 | dma_cookie_t cookie, | |
336 | struct dma_tx_state *state) | |
337 | { | |
338 | struct zx_dma_chan *c = to_zx_chan(chan); | |
339 | struct zx_dma_phy *p; | |
340 | struct virt_dma_desc *vd; | |
341 | unsigned long flags; | |
342 | enum dma_status ret; | |
343 | size_t bytes = 0; | |
344 | ||
345 | ret = dma_cookie_status(&c->vc.chan, cookie, state); | |
346 | if (ret == DMA_COMPLETE || !state) | |
347 | return ret; | |
348 | ||
349 | spin_lock_irqsave(&c->vc.lock, flags); | |
350 | p = c->phy; | |
351 | ret = c->status; | |
352 | ||
353 | /* | |
354 | * If the cookie is on our issue queue, then the residue is | |
355 | * its total size. | |
356 | */ | |
357 | vd = vchan_find_desc(&c->vc, cookie); | |
358 | if (vd) { | |
359 | bytes = container_of(vd, struct zx_dma_desc_sw, vd)->size; | |
360 | } else if ((!p) || (!p->ds_run)) { | |
361 | bytes = 0; | |
362 | } else { | |
363 | struct zx_dma_desc_sw *ds = p->ds_run; | |
364 | u32 clli = 0, index = 0; | |
365 | ||
366 | bytes = 0; | |
367 | clli = zx_dma_get_curr_lli(p); | |
368 | index = (clli - ds->desc_hw_lli) / sizeof(struct zx_desc_hw); | |
369 | for (; index < ds->desc_num; index++) { | |
370 | bytes += ds->desc_hw[index].src_x; | |
371 | /* end of lli */ | |
372 | if (!ds->desc_hw[index].lli) | |
373 | break; | |
374 | } | |
375 | } | |
376 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
377 | dma_set_residue(state, bytes); | |
378 | return ret; | |
379 | } | |
380 | ||
381 | static void zx_dma_issue_pending(struct dma_chan *chan) | |
382 | { | |
383 | struct zx_dma_chan *c = to_zx_chan(chan); | |
384 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
385 | unsigned long flags; | |
386 | int issue = 0; | |
387 | ||
388 | spin_lock_irqsave(&c->vc.lock, flags); | |
389 | /* add request to vc->desc_issued */ | |
390 | if (vchan_issue_pending(&c->vc)) { | |
391 | spin_lock(&d->lock); | |
392 | if (!c->phy && list_empty(&c->node)) { | |
393 | /* if new channel, add chan_pending */ | |
394 | list_add_tail(&c->node, &d->chan_pending); | |
395 | issue = 1; | |
396 | dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); | |
397 | } | |
398 | spin_unlock(&d->lock); | |
399 | } else { | |
400 | dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); | |
401 | } | |
402 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
403 | ||
404 | if (issue) | |
405 | zx_dma_task(d); | |
406 | } | |
407 | ||
408 | static void zx_dma_fill_desc(struct zx_dma_desc_sw *ds, dma_addr_t dst, | |
409 | dma_addr_t src, size_t len, u32 num, u32 ccfg) | |
410 | { | |
411 | if ((num + 1) < ds->desc_num) | |
412 | ds->desc_hw[num].lli = ds->desc_hw_lli + (num + 1) * | |
413 | sizeof(struct zx_desc_hw); | |
414 | ds->desc_hw[num].saddr = src; | |
415 | ds->desc_hw[num].daddr = dst; | |
416 | ds->desc_hw[num].src_x = len; | |
417 | ds->desc_hw[num].ctr = ccfg; | |
418 | } | |
419 | ||
420 | static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, | |
421 | struct dma_chan *chan) | |
422 | { | |
423 | struct zx_dma_chan *c = to_zx_chan(chan); | |
424 | struct zx_dma_desc_sw *ds; | |
425 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
426 | int lli_limit = LLI_BLOCK_SIZE / sizeof(struct zx_desc_hw); | |
427 | ||
428 | if (num > lli_limit) { | |
429 | dev_dbg(chan->device->dev, "vch %p: sg num %d exceed max %d\n", | |
430 | &c->vc, num, lli_limit); | |
431 | return NULL; | |
432 | } | |
433 | ||
434 | ds = kzalloc(sizeof(*ds), GFP_ATOMIC); | |
435 | if (!ds) | |
436 | return NULL; | |
437 | ||
c2e60fc7 | 438 | ds->desc_hw = dma_pool_zalloc(d->pool, GFP_NOWAIT, &ds->desc_hw_lli); |
e3fa9841 JN |
439 | if (!ds->desc_hw) { |
440 | dev_dbg(chan->device->dev, "vch %p: dma alloc fail\n", &c->vc); | |
441 | kfree(ds); | |
442 | return NULL; | |
443 | } | |
e3fa9841 JN |
444 | ds->desc_num = num; |
445 | return ds; | |
446 | } | |
447 | ||
448 | static enum zx_dma_burst_width zx_dma_burst_width(enum dma_slave_buswidth width) | |
449 | { | |
450 | switch (width) { | |
451 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
452 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
453 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
454 | case DMA_SLAVE_BUSWIDTH_8_BYTES: | |
455 | return ffs(width) - 1; | |
456 | default: | |
457 | return ZX_DMA_WIDTH_32BIT; | |
458 | } | |
459 | } | |
460 | ||
461 | static int zx_pre_config(struct zx_dma_chan *c, enum dma_transfer_direction dir) | |
462 | { | |
463 | struct dma_slave_config *cfg = &c->slave_cfg; | |
464 | enum zx_dma_burst_width src_width; | |
465 | enum zx_dma_burst_width dst_width; | |
466 | u32 maxburst = 0; | |
467 | ||
468 | switch (dir) { | |
469 | case DMA_MEM_TO_MEM: | |
470 | c->ccfg = ZX_CH_ENABLE | ZX_SOFT_REQ | |
471 | | ZX_SRC_BURST_LEN(ZX_MAX_BURST_LEN - 1) | |
472 | | ZX_SRC_BURST_WIDTH(ZX_DMA_WIDTH_32BIT) | |
473 | | ZX_DST_BURST_WIDTH(ZX_DMA_WIDTH_32BIT); | |
474 | break; | |
475 | case DMA_MEM_TO_DEV: | |
476 | c->dev_addr = cfg->dst_addr; | |
477 | /* dst len is calculated from src width, len and dst width. | |
478 | * We need make sure dst len not exceed MAX LEN. | |
2092539b JN |
479 | * Trailing single transaction that does not fill a full |
480 | * burst also require identical src/dst data width. | |
e3fa9841 JN |
481 | */ |
482 | dst_width = zx_dma_burst_width(cfg->dst_addr_width); | |
2092539b | 483 | maxburst = cfg->dst_maxburst; |
e3fa9841 JN |
484 | maxburst = maxburst < ZX_MAX_BURST_LEN ? |
485 | maxburst : ZX_MAX_BURST_LEN; | |
486 | c->ccfg = ZX_DST_FIFO_MODE | ZX_CH_ENABLE | |
487 | | ZX_SRC_BURST_LEN(maxburst - 1) | |
2092539b | 488 | | ZX_SRC_BURST_WIDTH(dst_width) |
e3fa9841 JN |
489 | | ZX_DST_BURST_WIDTH(dst_width); |
490 | break; | |
491 | case DMA_DEV_TO_MEM: | |
492 | c->dev_addr = cfg->src_addr; | |
493 | src_width = zx_dma_burst_width(cfg->src_addr_width); | |
494 | maxburst = cfg->src_maxburst; | |
495 | maxburst = maxburst < ZX_MAX_BURST_LEN ? | |
496 | maxburst : ZX_MAX_BURST_LEN; | |
497 | c->ccfg = ZX_SRC_FIFO_MODE | ZX_CH_ENABLE | |
498 | | ZX_SRC_BURST_LEN(maxburst - 1) | |
499 | | ZX_SRC_BURST_WIDTH(src_width) | |
2092539b | 500 | | ZX_DST_BURST_WIDTH(src_width); |
e3fa9841 JN |
501 | break; |
502 | default: | |
503 | return -EINVAL; | |
504 | } | |
505 | return 0; | |
506 | } | |
507 | ||
508 | static struct dma_async_tx_descriptor *zx_dma_prep_memcpy( | |
509 | struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |
510 | size_t len, unsigned long flags) | |
511 | { | |
512 | struct zx_dma_chan *c = to_zx_chan(chan); | |
513 | struct zx_dma_desc_sw *ds; | |
514 | size_t copy = 0; | |
515 | int num = 0; | |
516 | ||
517 | if (!len) | |
518 | return NULL; | |
519 | ||
520 | if (zx_pre_config(c, DMA_MEM_TO_MEM)) | |
521 | return NULL; | |
522 | ||
523 | num = DIV_ROUND_UP(len, DMA_MAX_SIZE); | |
524 | ||
525 | ds = zx_alloc_desc_resource(num, chan); | |
526 | if (!ds) | |
527 | return NULL; | |
528 | ||
529 | ds->size = len; | |
530 | num = 0; | |
531 | ||
532 | do { | |
533 | copy = min_t(size_t, len, DMA_MAX_SIZE); | |
534 | zx_dma_fill_desc(ds, dst, src, copy, num++, c->ccfg); | |
535 | ||
536 | src += copy; | |
537 | dst += copy; | |
538 | len -= copy; | |
539 | } while (len); | |
540 | ||
2f2560e3 | 541 | c->cyclic = 0; |
e3fa9841 JN |
542 | ds->desc_hw[num - 1].lli = 0; /* end of link */ |
543 | ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; | |
544 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
545 | } | |
546 | ||
547 | static struct dma_async_tx_descriptor *zx_dma_prep_slave_sg( | |
548 | struct dma_chan *chan, struct scatterlist *sgl, unsigned int sglen, | |
549 | enum dma_transfer_direction dir, unsigned long flags, void *context) | |
550 | { | |
551 | struct zx_dma_chan *c = to_zx_chan(chan); | |
552 | struct zx_dma_desc_sw *ds; | |
553 | size_t len, avail, total = 0; | |
554 | struct scatterlist *sg; | |
555 | dma_addr_t addr, src = 0, dst = 0; | |
556 | int num = sglen, i; | |
557 | ||
558 | if (!sgl) | |
559 | return NULL; | |
560 | ||
561 | if (zx_pre_config(c, dir)) | |
562 | return NULL; | |
563 | ||
564 | for_each_sg(sgl, sg, sglen, i) { | |
565 | avail = sg_dma_len(sg); | |
566 | if (avail > DMA_MAX_SIZE) | |
567 | num += DIV_ROUND_UP(avail, DMA_MAX_SIZE) - 1; | |
568 | } | |
569 | ||
570 | ds = zx_alloc_desc_resource(num, chan); | |
571 | if (!ds) | |
572 | return NULL; | |
573 | ||
2f2560e3 | 574 | c->cyclic = 0; |
e3fa9841 JN |
575 | num = 0; |
576 | for_each_sg(sgl, sg, sglen, i) { | |
577 | addr = sg_dma_address(sg); | |
578 | avail = sg_dma_len(sg); | |
579 | total += avail; | |
580 | ||
581 | do { | |
582 | len = min_t(size_t, avail, DMA_MAX_SIZE); | |
583 | ||
584 | if (dir == DMA_MEM_TO_DEV) { | |
585 | src = addr; | |
586 | dst = c->dev_addr; | |
587 | } else if (dir == DMA_DEV_TO_MEM) { | |
588 | src = c->dev_addr; | |
589 | dst = addr; | |
590 | } | |
591 | ||
592 | zx_dma_fill_desc(ds, dst, src, len, num++, c->ccfg); | |
593 | ||
594 | addr += len; | |
595 | avail -= len; | |
596 | } while (avail); | |
597 | } | |
598 | ||
599 | ds->desc_hw[num - 1].lli = 0; /* end of link */ | |
600 | ds->desc_hw[num - 1].ctr |= ZX_IRQ_ENABLE_ALL; | |
601 | ds->size = total; | |
602 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
603 | } | |
604 | ||
2f2560e3 JN |
605 | static struct dma_async_tx_descriptor *zx_dma_prep_dma_cyclic( |
606 | struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len, | |
607 | size_t period_len, enum dma_transfer_direction dir, | |
608 | unsigned long flags) | |
609 | { | |
610 | struct zx_dma_chan *c = to_zx_chan(chan); | |
611 | struct zx_dma_desc_sw *ds; | |
612 | dma_addr_t src = 0, dst = 0; | |
613 | int num_periods = buf_len / period_len; | |
614 | int buf = 0, num = 0; | |
615 | ||
616 | if (period_len > DMA_MAX_SIZE) { | |
617 | dev_err(chan->device->dev, "maximum period size exceeded\n"); | |
618 | return NULL; | |
619 | } | |
620 | ||
621 | if (zx_pre_config(c, dir)) | |
622 | return NULL; | |
623 | ||
624 | ds = zx_alloc_desc_resource(num_periods, chan); | |
625 | if (!ds) | |
626 | return NULL; | |
627 | c->cyclic = 1; | |
628 | ||
629 | while (buf < buf_len) { | |
630 | if (dir == DMA_MEM_TO_DEV) { | |
631 | src = dma_addr; | |
632 | dst = c->dev_addr; | |
633 | } else if (dir == DMA_DEV_TO_MEM) { | |
634 | src = c->dev_addr; | |
635 | dst = dma_addr; | |
636 | } | |
637 | zx_dma_fill_desc(ds, dst, src, period_len, num++, | |
638 | c->ccfg | ZX_IRQ_ENABLE_ALL); | |
639 | dma_addr += period_len; | |
640 | buf += period_len; | |
641 | } | |
642 | ||
643 | ds->desc_hw[num - 1].lli = ds->desc_hw_lli; | |
644 | ds->size = buf_len; | |
645 | return vchan_tx_prep(&c->vc, &ds->vd, flags); | |
646 | } | |
647 | ||
e3fa9841 JN |
648 | static int zx_dma_config(struct dma_chan *chan, |
649 | struct dma_slave_config *cfg) | |
650 | { | |
651 | struct zx_dma_chan *c = to_zx_chan(chan); | |
652 | ||
653 | if (!cfg) | |
654 | return -EINVAL; | |
655 | ||
656 | memcpy(&c->slave_cfg, cfg, sizeof(*cfg)); | |
657 | ||
658 | return 0; | |
659 | } | |
660 | ||
661 | static int zx_dma_terminate_all(struct dma_chan *chan) | |
662 | { | |
663 | struct zx_dma_chan *c = to_zx_chan(chan); | |
664 | struct zx_dma_dev *d = to_zx_dma(chan->device); | |
665 | struct zx_dma_phy *p = c->phy; | |
666 | unsigned long flags; | |
667 | LIST_HEAD(head); | |
668 | ||
669 | dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); | |
670 | ||
671 | /* Prevent this channel being scheduled */ | |
672 | spin_lock(&d->lock); | |
673 | list_del_init(&c->node); | |
674 | spin_unlock(&d->lock); | |
675 | ||
676 | /* Clear the tx descriptor lists */ | |
677 | spin_lock_irqsave(&c->vc.lock, flags); | |
678 | vchan_get_all_descriptors(&c->vc, &head); | |
679 | if (p) { | |
680 | /* vchan is assigned to a pchan - stop the channel */ | |
681 | zx_dma_terminate_chan(p, d); | |
682 | c->phy = NULL; | |
683 | p->vchan = NULL; | |
684 | p->ds_run = NULL; | |
685 | p->ds_done = NULL; | |
686 | } | |
687 | spin_unlock_irqrestore(&c->vc.lock, flags); | |
688 | vchan_dma_desc_free_list(&c->vc, &head); | |
689 | ||
690 | return 0; | |
691 | } | |
692 | ||
2f2560e3 JN |
693 | static int zx_dma_transfer_pause(struct dma_chan *chan) |
694 | { | |
695 | struct zx_dma_chan *c = to_zx_chan(chan); | |
696 | u32 val = 0; | |
697 | ||
698 | val = readl_relaxed(c->phy->base + REG_ZX_CTRL); | |
699 | val &= ~ZX_CH_ENABLE; | |
700 | writel_relaxed(val, c->phy->base + REG_ZX_CTRL); | |
701 | ||
702 | return 0; | |
703 | } | |
704 | ||
705 | static int zx_dma_transfer_resume(struct dma_chan *chan) | |
706 | { | |
707 | struct zx_dma_chan *c = to_zx_chan(chan); | |
708 | u32 val = 0; | |
709 | ||
710 | val = readl_relaxed(c->phy->base + REG_ZX_CTRL); | |
711 | val |= ZX_CH_ENABLE; | |
712 | writel_relaxed(val, c->phy->base + REG_ZX_CTRL); | |
713 | ||
714 | return 0; | |
715 | } | |
716 | ||
e3fa9841 JN |
717 | static void zx_dma_free_desc(struct virt_dma_desc *vd) |
718 | { | |
719 | struct zx_dma_desc_sw *ds = | |
720 | container_of(vd, struct zx_dma_desc_sw, vd); | |
721 | struct zx_dma_dev *d = to_zx_dma(vd->tx.chan->device); | |
722 | ||
723 | dma_pool_free(d->pool, ds->desc_hw, ds->desc_hw_lli); | |
724 | kfree(ds); | |
725 | } | |
726 | ||
727 | static const struct of_device_id zx6702_dma_dt_ids[] = { | |
728 | { .compatible = "zte,zx296702-dma", }, | |
729 | {} | |
730 | }; | |
731 | MODULE_DEVICE_TABLE(of, zx6702_dma_dt_ids); | |
732 | ||
733 | static struct dma_chan *zx_of_dma_simple_xlate(struct of_phandle_args *dma_spec, | |
734 | struct of_dma *ofdma) | |
735 | { | |
736 | struct zx_dma_dev *d = ofdma->of_dma_data; | |
737 | unsigned int request = dma_spec->args[0]; | |
738 | struct dma_chan *chan; | |
739 | struct zx_dma_chan *c; | |
740 | ||
aa3ee5f5 | 741 | if (request >= d->dma_requests) |
e3fa9841 JN |
742 | return NULL; |
743 | ||
744 | chan = dma_get_any_slave_channel(&d->slave); | |
745 | if (!chan) { | |
746 | dev_err(d->slave.dev, "get channel fail in %s.\n", __func__); | |
747 | return NULL; | |
748 | } | |
749 | c = to_zx_chan(chan); | |
750 | c->id = request; | |
751 | dev_info(d->slave.dev, "zx_dma: pchan %u: alloc vchan %p\n", | |
752 | c->id, &c->vc); | |
753 | return chan; | |
754 | } | |
755 | ||
756 | static int zx_dma_probe(struct platform_device *op) | |
757 | { | |
758 | struct zx_dma_dev *d; | |
759 | struct resource *iores; | |
9bde2823 | 760 | int i, ret = 0; |
e3fa9841 JN |
761 | |
762 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | |
763 | if (!iores) | |
764 | return -EINVAL; | |
765 | ||
766 | d = devm_kzalloc(&op->dev, sizeof(*d), GFP_KERNEL); | |
767 | if (!d) | |
768 | return -ENOMEM; | |
769 | ||
770 | d->base = devm_ioremap_resource(&op->dev, iores); | |
771 | if (IS_ERR(d->base)) | |
772 | return PTR_ERR(d->base); | |
773 | ||
774 | of_property_read_u32((&op->dev)->of_node, | |
775 | "dma-channels", &d->dma_channels); | |
776 | of_property_read_u32((&op->dev)->of_node, | |
777 | "dma-requests", &d->dma_requests); | |
778 | if (!d->dma_requests || !d->dma_channels) | |
779 | return -EINVAL; | |
780 | ||
781 | d->clk = devm_clk_get(&op->dev, NULL); | |
782 | if (IS_ERR(d->clk)) { | |
783 | dev_err(&op->dev, "no dma clk\n"); | |
784 | return PTR_ERR(d->clk); | |
785 | } | |
786 | ||
9bde2823 VK |
787 | d->irq = platform_get_irq(op, 0); |
788 | ret = devm_request_irq(&op->dev, d->irq, zx_dma_int_handler, | |
e3fa9841 JN |
789 | 0, DRIVER_NAME, d); |
790 | if (ret) | |
791 | return ret; | |
792 | ||
793 | /* A DMA memory pool for LLIs, align on 32-byte boundary */ | |
794 | d->pool = dmam_pool_create(DRIVER_NAME, &op->dev, | |
795 | LLI_BLOCK_SIZE, 32, 0); | |
796 | if (!d->pool) | |
797 | return -ENOMEM; | |
798 | ||
799 | /* init phy channel */ | |
800 | d->phy = devm_kzalloc(&op->dev, | |
801 | d->dma_channels * sizeof(struct zx_dma_phy), GFP_KERNEL); | |
802 | if (!d->phy) | |
803 | return -ENOMEM; | |
804 | ||
805 | for (i = 0; i < d->dma_channels; i++) { | |
806 | struct zx_dma_phy *p = &d->phy[i]; | |
807 | ||
808 | p->idx = i; | |
809 | p->base = d->base + i * 0x40; | |
810 | } | |
811 | ||
812 | INIT_LIST_HEAD(&d->slave.channels); | |
813 | dma_cap_set(DMA_SLAVE, d->slave.cap_mask); | |
814 | dma_cap_set(DMA_MEMCPY, d->slave.cap_mask); | |
815 | dma_cap_set(DMA_PRIVATE, d->slave.cap_mask); | |
816 | d->slave.dev = &op->dev; | |
817 | d->slave.device_free_chan_resources = zx_dma_free_chan_resources; | |
818 | d->slave.device_tx_status = zx_dma_tx_status; | |
819 | d->slave.device_prep_dma_memcpy = zx_dma_prep_memcpy; | |
820 | d->slave.device_prep_slave_sg = zx_dma_prep_slave_sg; | |
2f2560e3 | 821 | d->slave.device_prep_dma_cyclic = zx_dma_prep_dma_cyclic; |
e3fa9841 JN |
822 | d->slave.device_issue_pending = zx_dma_issue_pending; |
823 | d->slave.device_config = zx_dma_config; | |
824 | d->slave.device_terminate_all = zx_dma_terminate_all; | |
2f2560e3 JN |
825 | d->slave.device_pause = zx_dma_transfer_pause; |
826 | d->slave.device_resume = zx_dma_transfer_resume; | |
e3fa9841 JN |
827 | d->slave.copy_align = DMA_ALIGN; |
828 | d->slave.src_addr_widths = ZX_DMA_BUSWIDTHS; | |
829 | d->slave.dst_addr_widths = ZX_DMA_BUSWIDTHS; | |
830 | d->slave.directions = BIT(DMA_MEM_TO_MEM) | BIT(DMA_MEM_TO_DEV) | |
831 | | BIT(DMA_DEV_TO_MEM); | |
832 | d->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; | |
833 | ||
834 | /* init virtual channel */ | |
835 | d->chans = devm_kzalloc(&op->dev, | |
836 | d->dma_requests * sizeof(struct zx_dma_chan), GFP_KERNEL); | |
837 | if (!d->chans) | |
838 | return -ENOMEM; | |
839 | ||
840 | for (i = 0; i < d->dma_requests; i++) { | |
841 | struct zx_dma_chan *c = &d->chans[i]; | |
842 | ||
843 | c->status = DMA_IN_PROGRESS; | |
844 | INIT_LIST_HEAD(&c->node); | |
845 | c->vc.desc_free = zx_dma_free_desc; | |
846 | vchan_init(&c->vc, &d->slave); | |
847 | } | |
848 | ||
849 | /* Enable clock before accessing registers */ | |
850 | ret = clk_prepare_enable(d->clk); | |
851 | if (ret < 0) { | |
852 | dev_err(&op->dev, "clk_prepare_enable failed: %d\n", ret); | |
853 | goto zx_dma_out; | |
854 | } | |
855 | ||
856 | zx_dma_init_state(d); | |
857 | ||
858 | spin_lock_init(&d->lock); | |
859 | INIT_LIST_HEAD(&d->chan_pending); | |
860 | platform_set_drvdata(op, d); | |
861 | ||
862 | ret = dma_async_device_register(&d->slave); | |
863 | if (ret) | |
864 | goto clk_dis; | |
865 | ||
866 | ret = of_dma_controller_register((&op->dev)->of_node, | |
867 | zx_of_dma_simple_xlate, d); | |
868 | if (ret) | |
869 | goto of_dma_register_fail; | |
870 | ||
871 | dev_info(&op->dev, "initialized\n"); | |
872 | return 0; | |
873 | ||
874 | of_dma_register_fail: | |
875 | dma_async_device_unregister(&d->slave); | |
876 | clk_dis: | |
877 | clk_disable_unprepare(d->clk); | |
878 | zx_dma_out: | |
879 | return ret; | |
880 | } | |
881 | ||
882 | static int zx_dma_remove(struct platform_device *op) | |
883 | { | |
884 | struct zx_dma_chan *c, *cn; | |
885 | struct zx_dma_dev *d = platform_get_drvdata(op); | |
886 | ||
9bde2823 VK |
887 | /* explictly free the irq */ |
888 | devm_free_irq(&op->dev, d->irq, d); | |
889 | ||
e3fa9841 JN |
890 | dma_async_device_unregister(&d->slave); |
891 | of_dma_controller_free((&op->dev)->of_node); | |
892 | ||
893 | list_for_each_entry_safe(c, cn, &d->slave.channels, | |
894 | vc.chan.device_node) { | |
895 | list_del(&c->vc.chan.device_node); | |
896 | } | |
897 | clk_disable_unprepare(d->clk); | |
898 | dmam_pool_destroy(d->pool); | |
899 | ||
900 | return 0; | |
901 | } | |
902 | ||
903 | #ifdef CONFIG_PM_SLEEP | |
904 | static int zx_dma_suspend_dev(struct device *dev) | |
905 | { | |
906 | struct zx_dma_dev *d = dev_get_drvdata(dev); | |
907 | u32 stat = 0; | |
908 | ||
909 | stat = zx_dma_get_chan_stat(d); | |
910 | if (stat) { | |
911 | dev_warn(d->slave.dev, | |
912 | "chan %d is running fail to suspend\n", stat); | |
913 | return -1; | |
914 | } | |
915 | clk_disable_unprepare(d->clk); | |
916 | return 0; | |
917 | } | |
918 | ||
919 | static int zx_dma_resume_dev(struct device *dev) | |
920 | { | |
921 | struct zx_dma_dev *d = dev_get_drvdata(dev); | |
922 | int ret = 0; | |
923 | ||
924 | ret = clk_prepare_enable(d->clk); | |
925 | if (ret < 0) { | |
926 | dev_err(d->slave.dev, "clk_prepare_enable failed: %d\n", ret); | |
927 | return ret; | |
928 | } | |
929 | zx_dma_init_state(d); | |
930 | return 0; | |
931 | } | |
932 | #endif | |
933 | ||
934 | static SIMPLE_DEV_PM_OPS(zx_dma_pmops, zx_dma_suspend_dev, zx_dma_resume_dev); | |
935 | ||
936 | static struct platform_driver zx_pdma_driver = { | |
937 | .driver = { | |
938 | .name = DRIVER_NAME, | |
939 | .pm = &zx_dma_pmops, | |
940 | .of_match_table = zx6702_dma_dt_ids, | |
941 | }, | |
942 | .probe = zx_dma_probe, | |
943 | .remove = zx_dma_remove, | |
944 | }; | |
945 | ||
946 | module_platform_driver(zx_pdma_driver); | |
947 | ||
948 | MODULE_DESCRIPTION("ZTE ZX296702 DMA Driver"); | |
949 | MODULE_AUTHOR("Jun Nie jun.nie@linaro.org"); | |
950 | MODULE_LICENSE("GPL v2"); |