]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/omap-dma.c
x86/speculation: Rework speculative_store_bypass_update()
[mirror_ubuntu-artful-kernel.git] / drivers / dma / omap-dma.c
CommitLineData
7bedaa55
RK
1/*
2 * OMAP DMAengine support
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
fa3ad86a 8#include <linux/delay.h>
7bedaa55
RK
9#include <linux/dmaengine.h>
10#include <linux/dma-mapping.h>
1c2e8e6b 11#include <linux/dmapool.h>
7bedaa55
RK
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/omap-dma.h>
18#include <linux/platform_device.h>
19#include <linux/slab.h>
20#include <linux/spinlock.h>
8d30662a
JH
21#include <linux/of_dma.h>
22#include <linux/of_device.h>
7bedaa55
RK
23
24#include "virt-dma.h"
7d7e1eba 25
341ce712
PU
26#define OMAP_SDMA_REQUESTS 127
27#define OMAP_SDMA_CHANNELS 32
28
7bedaa55
RK
29struct omap_dmadev {
30 struct dma_device ddev;
31 spinlock_t lock;
596c471b
RK
32 void __iomem *base;
33 const struct omap_dma_reg *reg_map;
1b416c4b 34 struct omap_system_dma_plat_info *plat;
6ddeb6d8 35 bool legacy;
1c2e8e6b
PU
36 bool ll123_supported;
37 struct dma_pool *desc_pool;
de506089 38 unsigned dma_requests;
6ddeb6d8
RK
39 spinlock_t irq_lock;
40 uint32_t irq_enable_mask;
2d1a9a94 41 struct omap_chan **lch_map;
7bedaa55
RK
42};
43
44struct omap_chan {
45 struct virt_dma_chan vc;
596c471b
RK
46 void __iomem *channel_base;
47 const struct omap_dma_reg *reg_map;
aa4c5b96 48 uint32_t ccr;
7bedaa55
RK
49
50 struct dma_slave_config cfg;
51 unsigned dma_sig;
3a774ea9 52 bool cyclic;
2dcdf570 53 bool paused;
689d3c5e 54 bool running;
7bedaa55
RK
55
56 int dma_ch;
57 struct omap_desc *desc;
58 unsigned sgidx;
59};
60
1c2e8e6b
PU
61#define DESC_NXT_SV_REFRESH (0x1 << 24)
62#define DESC_NXT_SV_REUSE (0x2 << 24)
63#define DESC_NXT_DV_REFRESH (0x1 << 26)
64#define DESC_NXT_DV_REUSE (0x2 << 26)
65#define DESC_NTYPE_TYPE2 (0x2 << 29)
66
67/* Type 2 descriptor with Source or Destination address update */
68struct omap_type2_desc {
69 uint32_t next_desc;
70 uint32_t en;
71 uint32_t addr; /* src or dst */
72 uint16_t fn;
73 uint16_t cicr;
d4c77c05
PU
74 int16_t cdei;
75 int16_t csei;
76 int32_t cdfi;
77 int32_t csfi;
1c2e8e6b
PU
78} __packed;
79
7bedaa55
RK
80struct omap_sg {
81 dma_addr_t addr;
82 uint32_t en; /* number of elements (24-bit) */
83 uint32_t fn; /* number of frames (16-bit) */
ad52465b
PU
84 int32_t fi; /* for double indexing */
85 int16_t ei; /* for double indexing */
1c2e8e6b
PU
86
87 /* Linked list */
88 struct omap_type2_desc *t2_desc;
89 dma_addr_t t2_desc_paddr;
7bedaa55
RK
90};
91
92struct omap_desc {
93 struct virt_dma_desc vd;
1c2e8e6b 94 bool using_ll;
7bedaa55
RK
95 enum dma_transfer_direction dir;
96 dma_addr_t dev_addr;
97
ad52465b
PU
98 int32_t fi; /* for OMAP_DMA_SYNC_PACKET / double indexing */
99 int16_t ei; /* for double indexing */
9043826d 100 uint8_t es; /* CSDP_DATA_TYPE_xxx */
3ed4d18f 101 uint32_t ccr; /* CCR value */
965aeb4d 102 uint16_t clnk_ctrl; /* CLNK_CTRL value */
fa3ad86a 103 uint16_t cicr; /* CICR value */
2f0d13bd 104 uint32_t csdp; /* CSDP value */
7bedaa55
RK
105
106 unsigned sglen;
107 struct omap_sg sg[0];
108};
109
9043826d 110enum {
1c2e8e6b
PU
111 CAPS_0_SUPPORT_LL123 = BIT(20), /* Linked List type1/2/3 */
112 CAPS_0_SUPPORT_LL4 = BIT(21), /* Linked List type4 */
113
9043826d
RK
114 CCR_FS = BIT(5),
115 CCR_READ_PRIORITY = BIT(6),
116 CCR_ENABLE = BIT(7),
117 CCR_AUTO_INIT = BIT(8), /* OMAP1 only */
118 CCR_REPEAT = BIT(9), /* OMAP1 only */
119 CCR_OMAP31_DISABLE = BIT(10), /* OMAP1 only */
120 CCR_SUSPEND_SENSITIVE = BIT(8), /* OMAP2+ only */
121 CCR_RD_ACTIVE = BIT(9), /* OMAP2+ only */
122 CCR_WR_ACTIVE = BIT(10), /* OMAP2+ only */
123 CCR_SRC_AMODE_CONSTANT = 0 << 12,
124 CCR_SRC_AMODE_POSTINC = 1 << 12,
125 CCR_SRC_AMODE_SGLIDX = 2 << 12,
126 CCR_SRC_AMODE_DBLIDX = 3 << 12,
127 CCR_DST_AMODE_CONSTANT = 0 << 14,
128 CCR_DST_AMODE_POSTINC = 1 << 14,
129 CCR_DST_AMODE_SGLIDX = 2 << 14,
130 CCR_DST_AMODE_DBLIDX = 3 << 14,
131 CCR_CONSTANT_FILL = BIT(16),
132 CCR_TRANSPARENT_COPY = BIT(17),
133 CCR_BS = BIT(18),
134 CCR_SUPERVISOR = BIT(22),
135 CCR_PREFETCH = BIT(23),
136 CCR_TRIGGER_SRC = BIT(24),
137 CCR_BUFFERING_DISABLE = BIT(25),
138 CCR_WRITE_PRIORITY = BIT(26),
139 CCR_SYNC_ELEMENT = 0,
140 CCR_SYNC_FRAME = CCR_FS,
141 CCR_SYNC_BLOCK = CCR_BS,
142 CCR_SYNC_PACKET = CCR_BS | CCR_FS,
143
144 CSDP_DATA_TYPE_8 = 0,
145 CSDP_DATA_TYPE_16 = 1,
146 CSDP_DATA_TYPE_32 = 2,
147 CSDP_SRC_PORT_EMIFF = 0 << 2, /* OMAP1 only */
148 CSDP_SRC_PORT_EMIFS = 1 << 2, /* OMAP1 only */
149 CSDP_SRC_PORT_OCP_T1 = 2 << 2, /* OMAP1 only */
150 CSDP_SRC_PORT_TIPB = 3 << 2, /* OMAP1 only */
151 CSDP_SRC_PORT_OCP_T2 = 4 << 2, /* OMAP1 only */
152 CSDP_SRC_PORT_MPUI = 5 << 2, /* OMAP1 only */
153 CSDP_SRC_PACKED = BIT(6),
154 CSDP_SRC_BURST_1 = 0 << 7,
155 CSDP_SRC_BURST_16 = 1 << 7,
156 CSDP_SRC_BURST_32 = 2 << 7,
157 CSDP_SRC_BURST_64 = 3 << 7,
158 CSDP_DST_PORT_EMIFF = 0 << 9, /* OMAP1 only */
159 CSDP_DST_PORT_EMIFS = 1 << 9, /* OMAP1 only */
160 CSDP_DST_PORT_OCP_T1 = 2 << 9, /* OMAP1 only */
161 CSDP_DST_PORT_TIPB = 3 << 9, /* OMAP1 only */
162 CSDP_DST_PORT_OCP_T2 = 4 << 9, /* OMAP1 only */
163 CSDP_DST_PORT_MPUI = 5 << 9, /* OMAP1 only */
164 CSDP_DST_PACKED = BIT(13),
165 CSDP_DST_BURST_1 = 0 << 14,
166 CSDP_DST_BURST_16 = 1 << 14,
167 CSDP_DST_BURST_32 = 2 << 14,
168 CSDP_DST_BURST_64 = 3 << 14,
201ac486
PU
169 CSDP_WRITE_NON_POSTED = 0 << 16,
170 CSDP_WRITE_POSTED = 1 << 16,
171 CSDP_WRITE_LAST_NON_POSTED = 2 << 16,
9043826d
RK
172
173 CICR_TOUT_IE = BIT(0), /* OMAP1 only */
174 CICR_DROP_IE = BIT(1),
175 CICR_HALF_IE = BIT(2),
176 CICR_FRAME_IE = BIT(3),
177 CICR_LAST_IE = BIT(4),
178 CICR_BLOCK_IE = BIT(5),
179 CICR_PKT_IE = BIT(7), /* OMAP2+ only */
180 CICR_TRANS_ERR_IE = BIT(8), /* OMAP2+ only */
181 CICR_SUPERVISOR_ERR_IE = BIT(10), /* OMAP2+ only */
182 CICR_MISALIGNED_ERR_IE = BIT(11), /* OMAP2+ only */
183 CICR_DRAIN_IE = BIT(12), /* OMAP2+ only */
184 CICR_SUPER_BLOCK_IE = BIT(14), /* OMAP2+ only */
185
186 CLNK_CTRL_ENABLE_LNK = BIT(15),
1c2e8e6b
PU
187
188 CDP_DST_VALID_INC = 0 << 0,
189 CDP_DST_VALID_RELOAD = 1 << 0,
190 CDP_DST_VALID_REUSE = 2 << 0,
191 CDP_SRC_VALID_INC = 0 << 2,
192 CDP_SRC_VALID_RELOAD = 1 << 2,
193 CDP_SRC_VALID_REUSE = 2 << 2,
194 CDP_NTYPE_TYPE1 = 1 << 4,
195 CDP_NTYPE_TYPE2 = 2 << 4,
196 CDP_NTYPE_TYPE3 = 3 << 4,
197 CDP_TMODE_NORMAL = 0 << 8,
198 CDP_TMODE_LLIST = 1 << 8,
199 CDP_FAST = BIT(10),
9043826d
RK
200};
201
7bedaa55 202static const unsigned es_bytes[] = {
9043826d
RK
203 [CSDP_DATA_TYPE_8] = 1,
204 [CSDP_DATA_TYPE_16] = 2,
205 [CSDP_DATA_TYPE_32] = 4,
7bedaa55
RK
206};
207
8d30662a
JH
208static struct of_dma_filter_info omap_dma_info = {
209 .filter_fn = omap_dma_filter_fn,
210};
211
7bedaa55
RK
212static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d)
213{
214 return container_of(d, struct omap_dmadev, ddev);
215}
216
217static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c)
218{
219 return container_of(c, struct omap_chan, vc.chan);
220}
221
222static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t)
223{
224 return container_of(t, struct omap_desc, vd.tx);
225}
226
227static void omap_dma_desc_free(struct virt_dma_desc *vd)
228{
1c2e8e6b
PU
229 struct omap_desc *d = to_omap_dma_desc(&vd->tx);
230
231 if (d->using_ll) {
232 struct omap_dmadev *od = to_omap_dma_dev(vd->tx.chan->device);
233 int i;
234
235 for (i = 0; i < d->sglen; i++) {
236 if (d->sg[i].t2_desc)
237 dma_pool_free(od->desc_pool, d->sg[i].t2_desc,
238 d->sg[i].t2_desc_paddr);
239 }
240 }
241
242 kfree(d);
243}
244
245static void omap_dma_fill_type2_desc(struct omap_desc *d, int idx,
246 enum dma_transfer_direction dir, bool last)
247{
248 struct omap_sg *sg = &d->sg[idx];
249 struct omap_type2_desc *t2_desc = sg->t2_desc;
250
251 if (idx)
252 d->sg[idx - 1].t2_desc->next_desc = sg->t2_desc_paddr;
253 if (last)
254 t2_desc->next_desc = 0xfffffffc;
255
256 t2_desc->en = sg->en;
257 t2_desc->addr = sg->addr;
258 t2_desc->fn = sg->fn & 0xffff;
259 t2_desc->cicr = d->cicr;
260 if (!last)
261 t2_desc->cicr &= ~CICR_BLOCK_IE;
262
263 switch (dir) {
264 case DMA_DEV_TO_MEM:
265 t2_desc->cdei = sg->ei;
266 t2_desc->csei = d->ei;
267 t2_desc->cdfi = sg->fi;
268 t2_desc->csfi = d->fi;
269
270 t2_desc->en |= DESC_NXT_DV_REFRESH;
271 t2_desc->en |= DESC_NXT_SV_REUSE;
272 break;
273 case DMA_MEM_TO_DEV:
274 t2_desc->cdei = d->ei;
275 t2_desc->csei = sg->ei;
276 t2_desc->cdfi = d->fi;
277 t2_desc->csfi = sg->fi;
278
279 t2_desc->en |= DESC_NXT_SV_REFRESH;
280 t2_desc->en |= DESC_NXT_DV_REUSE;
281 break;
282 default:
283 return;
284 }
285
286 t2_desc->en |= DESC_NTYPE_TYPE2;
7bedaa55
RK
287}
288
596c471b
RK
289static void omap_dma_write(uint32_t val, unsigned type, void __iomem *addr)
290{
291 switch (type) {
292 case OMAP_DMA_REG_16BIT:
293 writew_relaxed(val, addr);
294 break;
295 case OMAP_DMA_REG_2X16BIT:
296 writew_relaxed(val, addr);
297 writew_relaxed(val >> 16, addr + 2);
298 break;
299 case OMAP_DMA_REG_32BIT:
300 writel_relaxed(val, addr);
301 break;
302 default:
303 WARN_ON(1);
304 }
305}
306
307static unsigned omap_dma_read(unsigned type, void __iomem *addr)
308{
309 unsigned val;
310
311 switch (type) {
312 case OMAP_DMA_REG_16BIT:
313 val = readw_relaxed(addr);
314 break;
315 case OMAP_DMA_REG_2X16BIT:
316 val = readw_relaxed(addr);
317 val |= readw_relaxed(addr + 2) << 16;
318 break;
319 case OMAP_DMA_REG_32BIT:
320 val = readl_relaxed(addr);
321 break;
322 default:
323 WARN_ON(1);
324 val = 0;
325 }
326
327 return val;
328}
329
c5ed98b6
RK
330static void omap_dma_glbl_write(struct omap_dmadev *od, unsigned reg, unsigned val)
331{
596c471b
RK
332 const struct omap_dma_reg *r = od->reg_map + reg;
333
334 WARN_ON(r->stride);
335
336 omap_dma_write(val, r->type, od->base + r->offset);
c5ed98b6
RK
337}
338
339static unsigned omap_dma_glbl_read(struct omap_dmadev *od, unsigned reg)
340{
596c471b
RK
341 const struct omap_dma_reg *r = od->reg_map + reg;
342
343 WARN_ON(r->stride);
344
345 return omap_dma_read(r->type, od->base + r->offset);
c5ed98b6
RK
346}
347
348static void omap_dma_chan_write(struct omap_chan *c, unsigned reg, unsigned val)
349{
596c471b
RK
350 const struct omap_dma_reg *r = c->reg_map + reg;
351
352 omap_dma_write(val, r->type, c->channel_base + r->offset);
c5ed98b6
RK
353}
354
355static unsigned omap_dma_chan_read(struct omap_chan *c, unsigned reg)
356{
596c471b
RK
357 const struct omap_dma_reg *r = c->reg_map + reg;
358
359 return omap_dma_read(r->type, c->channel_base + r->offset);
c5ed98b6
RK
360}
361
470b23f7
RK
362static void omap_dma_clear_csr(struct omap_chan *c)
363{
364 if (dma_omap1())
c5ed98b6 365 omap_dma_chan_read(c, CSR);
470b23f7 366 else
c5ed98b6 367 omap_dma_chan_write(c, CSR, ~0);
470b23f7
RK
368}
369
6ddeb6d8
RK
370static unsigned omap_dma_get_csr(struct omap_chan *c)
371{
372 unsigned val = omap_dma_chan_read(c, CSR);
373
374 if (!dma_omap1())
375 omap_dma_chan_write(c, CSR, val);
376
377 return val;
378}
379
596c471b
RK
380static void omap_dma_assign(struct omap_dmadev *od, struct omap_chan *c,
381 unsigned lch)
382{
383 c->channel_base = od->base + od->plat->channel_stride * lch;
6ddeb6d8
RK
384
385 od->lch_map[lch] = c;
596c471b
RK
386}
387
fa3ad86a
RK
388static void omap_dma_start(struct omap_chan *c, struct omap_desc *d)
389{
390 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
1c2e8e6b 391 uint16_t cicr = d->cicr;
fa3ad86a
RK
392
393 if (__dma_omap15xx(od->plat->dma_attr))
c5ed98b6 394 omap_dma_chan_write(c, CPC, 0);
fa3ad86a 395 else
c5ed98b6 396 omap_dma_chan_write(c, CDAC, 0);
fa3ad86a 397
470b23f7 398 omap_dma_clear_csr(c);
fa3ad86a 399
1c2e8e6b
PU
400 if (d->using_ll) {
401 uint32_t cdp = CDP_TMODE_LLIST | CDP_NTYPE_TYPE2 | CDP_FAST;
402
403 if (d->dir == DMA_DEV_TO_MEM)
404 cdp |= (CDP_DST_VALID_RELOAD | CDP_SRC_VALID_REUSE);
405 else
406 cdp |= (CDP_DST_VALID_REUSE | CDP_SRC_VALID_RELOAD);
407 omap_dma_chan_write(c, CDP, cdp);
408
409 omap_dma_chan_write(c, CNDP, d->sg[0].t2_desc_paddr);
410 omap_dma_chan_write(c, CCDN, 0);
411 omap_dma_chan_write(c, CCFN, 0xffff);
412 omap_dma_chan_write(c, CCEN, 0xffffff);
413
414 cicr &= ~CICR_BLOCK_IE;
415 } else if (od->ll123_supported) {
416 omap_dma_chan_write(c, CDP, 0);
417 }
418
fa3ad86a 419 /* Enable interrupts */
1c2e8e6b 420 omap_dma_chan_write(c, CICR, cicr);
fa3ad86a 421
45da7b04 422 /* Enable channel */
c5ed98b6 423 omap_dma_chan_write(c, CCR, d->ccr | CCR_ENABLE);
689d3c5e
PU
424
425 c->running = true;
fa3ad86a
RK
426}
427
99340754
SAS
428static void omap_dma_drain_chan(struct omap_chan *c)
429{
430 int i;
431 u32 val;
432
433 /* Wait for sDMA FIFO to drain */
434 for (i = 0; ; i++) {
435 val = omap_dma_chan_read(c, CCR);
436 if (!(val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE)))
437 break;
438
439 if (i > 100)
440 break;
441
442 udelay(5);
443 }
444
445 if (val & (CCR_RD_ACTIVE | CCR_WR_ACTIVE))
446 dev_err(c->vc.chan.device->dev,
447 "DMA drain did not complete on lch %d\n",
448 c->dma_ch);
449}
450
451static int omap_dma_stop(struct omap_chan *c)
fa3ad86a
RK
452{
453 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
454 uint32_t val;
455
456 /* disable irq */
c5ed98b6 457 omap_dma_chan_write(c, CICR, 0);
fa3ad86a 458
470b23f7 459 omap_dma_clear_csr(c);
fa3ad86a 460
c5ed98b6 461 val = omap_dma_chan_read(c, CCR);
9043826d 462 if (od->plat->errata & DMA_ERRATA_i541 && val & CCR_TRIGGER_SRC) {
fa3ad86a 463 uint32_t sysconfig;
fa3ad86a 464
c5ed98b6 465 sysconfig = omap_dma_glbl_read(od, OCP_SYSCONFIG);
fa3ad86a
RK
466 val = sysconfig & ~DMA_SYSCONFIG_MIDLEMODE_MASK;
467 val |= DMA_SYSCONFIG_MIDLEMODE(DMA_IDLEMODE_NO_IDLE);
c5ed98b6 468 omap_dma_glbl_write(od, OCP_SYSCONFIG, val);
fa3ad86a 469
c5ed98b6 470 val = omap_dma_chan_read(c, CCR);
9043826d 471 val &= ~CCR_ENABLE;
c5ed98b6 472 omap_dma_chan_write(c, CCR, val);
fa3ad86a 473
99340754
SAS
474 if (!(c->ccr & CCR_BUFFERING_DISABLE))
475 omap_dma_drain_chan(c);
fa3ad86a 476
c5ed98b6 477 omap_dma_glbl_write(od, OCP_SYSCONFIG, sysconfig);
fa3ad86a 478 } else {
99340754
SAS
479 if (!(val & CCR_ENABLE))
480 return -EINVAL;
481
9043826d 482 val &= ~CCR_ENABLE;
c5ed98b6 483 omap_dma_chan_write(c, CCR, val);
99340754
SAS
484
485 if (!(c->ccr & CCR_BUFFERING_DISABLE))
486 omap_dma_drain_chan(c);
fa3ad86a
RK
487 }
488
489 mb();
490
491 if (!__dma_omap15xx(od->plat->dma_attr) && c->cyclic) {
c5ed98b6 492 val = omap_dma_chan_read(c, CLNK_CTRL);
fa3ad86a
RK
493
494 if (dma_omap1())
495 val |= 1 << 14; /* set the STOP_LNK bit */
496 else
9043826d 497 val &= ~CLNK_CTRL_ENABLE_LNK;
fa3ad86a 498
c5ed98b6 499 omap_dma_chan_write(c, CLNK_CTRL, val);
fa3ad86a 500 }
689d3c5e 501 c->running = false;
99340754 502 return 0;
fa3ad86a
RK
503}
504
a5dc3fca 505static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d)
7bedaa55 506{
a5dc3fca 507 struct omap_sg *sg = d->sg + c->sgidx;
893e63e3 508 unsigned cxsa, cxei, cxfi;
913a2d0c 509
4ce98c0a 510 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
893e63e3
RK
511 cxsa = CDSA;
512 cxei = CDEI;
513 cxfi = CDFI;
913a2d0c 514 } else {
893e63e3
RK
515 cxsa = CSSA;
516 cxei = CSEI;
517 cxfi = CSFI;
913a2d0c
RK
518 }
519
c5ed98b6 520 omap_dma_chan_write(c, cxsa, sg->addr);
ad52465b
PU
521 omap_dma_chan_write(c, cxei, sg->ei);
522 omap_dma_chan_write(c, cxfi, sg->fi);
c5ed98b6
RK
523 omap_dma_chan_write(c, CEN, sg->en);
524 omap_dma_chan_write(c, CFN, sg->fn);
913a2d0c 525
fa3ad86a 526 omap_dma_start(c, d);
a5dc3fca 527 c->sgidx++;
913a2d0c
RK
528}
529
530static void omap_dma_start_desc(struct omap_chan *c)
531{
532 struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
533 struct omap_desc *d;
893e63e3 534 unsigned cxsa, cxei, cxfi;
b9e97822 535
913a2d0c
RK
536 if (!vd) {
537 c->desc = NULL;
538 return;
539 }
540
541 list_del(&vd->node);
542
543 c->desc = d = to_omap_dma_desc(&vd->tx);
544 c->sgidx = 0;
545
59871902
RK
546 /*
547 * This provides the necessary barrier to ensure data held in
548 * DMA coherent memory is visible to the DMA engine prior to
549 * the transfer starting.
550 */
551 mb();
552
c5ed98b6 553 omap_dma_chan_write(c, CCR, d->ccr);
3ed4d18f 554 if (dma_omap1())
c5ed98b6 555 omap_dma_chan_write(c, CCR2, d->ccr >> 16);
b9e97822 556
4ce98c0a 557 if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM) {
893e63e3
RK
558 cxsa = CSSA;
559 cxei = CSEI;
560 cxfi = CSFI;
b9e97822 561 } else {
893e63e3
RK
562 cxsa = CDSA;
563 cxei = CDEI;
564 cxfi = CDFI;
b9e97822
RK
565 }
566
c5ed98b6 567 omap_dma_chan_write(c, cxsa, d->dev_addr);
ad52465b 568 omap_dma_chan_write(c, cxei, d->ei);
c5ed98b6
RK
569 omap_dma_chan_write(c, cxfi, d->fi);
570 omap_dma_chan_write(c, CSDP, d->csdp);
571 omap_dma_chan_write(c, CLNK_CTRL, d->clnk_ctrl);
b9e97822 572
a5dc3fca 573 omap_dma_start_sg(c, d);
7bedaa55
RK
574}
575
576static void omap_dma_callback(int ch, u16 status, void *data)
577{
578 struct omap_chan *c = data;
579 struct omap_desc *d;
580 unsigned long flags;
581
582 spin_lock_irqsave(&c->vc.lock, flags);
583 d = c->desc;
584 if (d) {
b57ebe08 585 if (c->cyclic) {
3a774ea9 586 vchan_cyclic_callback(&d->vd);
1c2e8e6b 587 } else if (d->using_ll || c->sgidx == d->sglen) {
b57ebe08
PU
588 omap_dma_start_desc(c);
589 vchan_cookie_complete(&d->vd);
590 } else {
591 omap_dma_start_sg(c, d);
7bedaa55
RK
592 }
593 }
594 spin_unlock_irqrestore(&c->vc.lock, flags);
595}
596
6ddeb6d8
RK
597static irqreturn_t omap_dma_irq(int irq, void *devid)
598{
599 struct omap_dmadev *od = devid;
600 unsigned status, channel;
601
602 spin_lock(&od->irq_lock);
603
604 status = omap_dma_glbl_read(od, IRQSTATUS_L1);
605 status &= od->irq_enable_mask;
606 if (status == 0) {
607 spin_unlock(&od->irq_lock);
608 return IRQ_NONE;
609 }
610
611 while ((channel = ffs(status)) != 0) {
612 unsigned mask, csr;
613 struct omap_chan *c;
614
615 channel -= 1;
616 mask = BIT(channel);
617 status &= ~mask;
618
619 c = od->lch_map[channel];
620 if (c == NULL) {
621 /* This should never happen */
622 dev_err(od->ddev.dev, "invalid channel %u\n", channel);
623 continue;
624 }
625
626 csr = omap_dma_get_csr(c);
627 omap_dma_glbl_write(od, IRQSTATUS_L1, mask);
628
629 omap_dma_callback(channel, csr, c);
630 }
631
632 spin_unlock(&od->irq_lock);
633
634 return IRQ_HANDLED;
635}
636
7bedaa55
RK
637static int omap_dma_alloc_chan_resources(struct dma_chan *chan)
638{
596c471b 639 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55 640 struct omap_chan *c = to_omap_dma_chan(chan);
1c2e8e6b 641 struct device *dev = od->ddev.dev;
596c471b
RK
642 int ret;
643
6ddeb6d8
RK
644 if (od->legacy) {
645 ret = omap_request_dma(c->dma_sig, "DMA engine",
646 omap_dma_callback, c, &c->dma_ch);
647 } else {
648 ret = omap_request_dma(c->dma_sig, "DMA engine", NULL, NULL,
649 &c->dma_ch);
650 }
7bedaa55 651
1c2e8e6b 652 dev_dbg(dev, "allocating channel %u for %u\n", c->dma_ch, c->dma_sig);
7bedaa55 653
6ddeb6d8 654 if (ret >= 0) {
596c471b
RK
655 omap_dma_assign(od, c, c->dma_ch);
656
6ddeb6d8
RK
657 if (!od->legacy) {
658 unsigned val;
659
660 spin_lock_irq(&od->irq_lock);
661 val = BIT(c->dma_ch);
662 omap_dma_glbl_write(od, IRQSTATUS_L1, val);
663 od->irq_enable_mask |= val;
664 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
665
666 val = omap_dma_glbl_read(od, IRQENABLE_L0);
667 val &= ~BIT(c->dma_ch);
668 omap_dma_glbl_write(od, IRQENABLE_L0, val);
669 spin_unlock_irq(&od->irq_lock);
670 }
671 }
672
aa4c5b96
RK
673 if (dma_omap1()) {
674 if (__dma_omap16xx(od->plat->dma_attr)) {
675 c->ccr = CCR_OMAP31_DISABLE;
676 /* Duplicate what plat-omap/dma.c does */
677 c->ccr |= c->dma_ch + 1;
678 } else {
679 c->ccr = c->dma_sig & 0x1f;
680 }
681 } else {
682 c->ccr = c->dma_sig & 0x1f;
683 c->ccr |= (c->dma_sig & ~0x1f) << 14;
684 }
685 if (od->plat->errata & DMA_ERRATA_IFRAME_BUFFERING)
686 c->ccr |= CCR_BUFFERING_DISABLE;
687
596c471b 688 return ret;
7bedaa55
RK
689}
690
691static void omap_dma_free_chan_resources(struct dma_chan *chan)
692{
6ddeb6d8 693 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55
RK
694 struct omap_chan *c = to_omap_dma_chan(chan);
695
6ddeb6d8
RK
696 if (!od->legacy) {
697 spin_lock_irq(&od->irq_lock);
698 od->irq_enable_mask &= ~BIT(c->dma_ch);
699 omap_dma_glbl_write(od, IRQENABLE_L1, od->irq_enable_mask);
700 spin_unlock_irq(&od->irq_lock);
701 }
702
596c471b 703 c->channel_base = NULL;
6ddeb6d8 704 od->lch_map[c->dma_ch] = NULL;
7bedaa55
RK
705 vchan_free_chan_resources(&c->vc);
706 omap_free_dma(c->dma_ch);
707
3c9b833f
PU
708 dev_dbg(od->ddev.dev, "freeing channel %u used for %u\n", c->dma_ch,
709 c->dma_sig);
eea531ea 710 c->dma_sig = 0;
7bedaa55
RK
711}
712
3850e22f
RK
713static size_t omap_dma_sg_size(struct omap_sg *sg)
714{
715 return sg->en * sg->fn;
716}
717
718static size_t omap_dma_desc_size(struct omap_desc *d)
719{
720 unsigned i;
721 size_t size;
722
723 for (size = i = 0; i < d->sglen; i++)
724 size += omap_dma_sg_size(&d->sg[i]);
725
726 return size * es_bytes[d->es];
727}
728
729static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr)
730{
731 unsigned i;
732 size_t size, es_size = es_bytes[d->es];
733
734 for (size = i = 0; i < d->sglen; i++) {
735 size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size;
736
737 if (size)
738 size += this_size;
739 else if (addr >= d->sg[i].addr &&
740 addr < d->sg[i].addr + this_size)
741 size += d->sg[i].addr + this_size - addr;
742 }
743 return size;
744}
745
b07fd625
RK
746/*
747 * OMAP 3.2/3.3 erratum: sometimes 0 is returned if CSAC/CDAC is
748 * read before the DMA controller finished disabling the channel.
749 */
750static uint32_t omap_dma_chan_read_3_3(struct omap_chan *c, unsigned reg)
751{
752 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
753 uint32_t val;
754
755 val = omap_dma_chan_read(c, reg);
756 if (val == 0 && od->plat->errata & DMA_ERRATA_3_3)
757 val = omap_dma_chan_read(c, reg);
758
759 return val;
760}
761
3997cab3
RK
762static dma_addr_t omap_dma_get_src_pos(struct omap_chan *c)
763{
764 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
b07fd625 765 dma_addr_t addr, cdac;
3997cab3 766
b07fd625 767 if (__dma_omap15xx(od->plat->dma_attr)) {
c5ed98b6 768 addr = omap_dma_chan_read(c, CPC);
b07fd625
RK
769 } else {
770 addr = omap_dma_chan_read_3_3(c, CSAC);
771 cdac = omap_dma_chan_read_3_3(c, CDAC);
3997cab3 772
3997cab3
RK
773 /*
774 * CDAC == 0 indicates that the DMA transfer on the channel has
775 * not been started (no data has been transferred so far).
776 * Return the programmed source start address in this case.
777 */
b07fd625 778 if (cdac == 0)
c5ed98b6 779 addr = omap_dma_chan_read(c, CSSA);
3997cab3
RK
780 }
781
782 if (dma_omap1())
c5ed98b6 783 addr |= omap_dma_chan_read(c, CSSA) & 0xffff0000;
3997cab3
RK
784
785 return addr;
786}
787
788static dma_addr_t omap_dma_get_dst_pos(struct omap_chan *c)
789{
790 struct omap_dmadev *od = to_omap_dma_dev(c->vc.chan.device);
791 dma_addr_t addr;
792
b07fd625 793 if (__dma_omap15xx(od->plat->dma_attr)) {
c5ed98b6 794 addr = omap_dma_chan_read(c, CPC);
b07fd625
RK
795 } else {
796 addr = omap_dma_chan_read_3_3(c, CDAC);
3997cab3 797
3997cab3 798 /*
b07fd625
RK
799 * CDAC == 0 indicates that the DMA transfer on the channel
800 * has not been started (no data has been transferred so
801 * far). Return the programmed destination start address in
802 * this case.
3997cab3
RK
803 */
804 if (addr == 0)
c5ed98b6 805 addr = omap_dma_chan_read(c, CDSA);
3997cab3
RK
806 }
807
808 if (dma_omap1())
c5ed98b6 809 addr |= omap_dma_chan_read(c, CDSA) & 0xffff0000;
3997cab3
RK
810
811 return addr;
812}
813
7bedaa55
RK
814static enum dma_status omap_dma_tx_status(struct dma_chan *chan,
815 dma_cookie_t cookie, struct dma_tx_state *txstate)
816{
3850e22f
RK
817 struct omap_chan *c = to_omap_dma_chan(chan);
818 struct virt_dma_desc *vd;
819 enum dma_status ret;
820 unsigned long flags;
821
822 ret = dma_cookie_status(chan, cookie, txstate);
689d3c5e
PU
823
824 if (!c->paused && c->running) {
825 uint32_t ccr = omap_dma_chan_read(c, CCR);
826 /*
827 * The channel is no longer active, set the return value
828 * accordingly
829 */
830 if (!(ccr & CCR_ENABLE))
831 ret = DMA_COMPLETE;
832 }
833
7cce5083 834 if (ret == DMA_COMPLETE || !txstate)
3850e22f
RK
835 return ret;
836
837 spin_lock_irqsave(&c->vc.lock, flags);
838 vd = vchan_find_desc(&c->vc, cookie);
839 if (vd) {
840 txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx));
841 } else if (c->desc && c->desc->vd.tx.cookie == cookie) {
842 struct omap_desc *d = c->desc;
843 dma_addr_t pos;
844
845 if (d->dir == DMA_MEM_TO_DEV)
3997cab3 846 pos = omap_dma_get_src_pos(c);
adf850bc 847 else if (d->dir == DMA_DEV_TO_MEM || d->dir == DMA_MEM_TO_MEM)
3997cab3 848 pos = omap_dma_get_dst_pos(c);
3850e22f
RK
849 else
850 pos = 0;
851
852 txstate->residue = omap_dma_desc_size_pos(d, pos);
853 } else {
854 txstate->residue = 0;
855 }
99340754
SAS
856 if (ret == DMA_IN_PROGRESS && c->paused)
857 ret = DMA_PAUSED;
3850e22f
RK
858 spin_unlock_irqrestore(&c->vc.lock, flags);
859
860 return ret;
7bedaa55
RK
861}
862
863static void omap_dma_issue_pending(struct dma_chan *chan)
864{
865 struct omap_chan *c = to_omap_dma_chan(chan);
866 unsigned long flags;
867
868 spin_lock_irqsave(&c->vc.lock, flags);
1c1d25f9
PU
869 if (vchan_issue_pending(&c->vc) && !c->desc)
870 omap_dma_start_desc(c);
7bedaa55
RK
871 spin_unlock_irqrestore(&c->vc.lock, flags);
872}
873
874static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
875 struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen,
876 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
877{
49ae0b29 878 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55
RK
879 struct omap_chan *c = to_omap_dma_chan(chan);
880 enum dma_slave_buswidth dev_width;
881 struct scatterlist *sgent;
882 struct omap_desc *d;
883 dma_addr_t dev_addr;
e8a5e79c 884 unsigned i, es, en, frame_bytes;
1c2e8e6b 885 bool ll_failed = false;
7bedaa55 886 u32 burst;
201ac486 887 u32 port_window, port_window_bytes;
7bedaa55
RK
888
889 if (dir == DMA_DEV_TO_MEM) {
890 dev_addr = c->cfg.src_addr;
891 dev_width = c->cfg.src_addr_width;
892 burst = c->cfg.src_maxburst;
201ac486 893 port_window = c->cfg.src_port_window_size;
7bedaa55
RK
894 } else if (dir == DMA_MEM_TO_DEV) {
895 dev_addr = c->cfg.dst_addr;
896 dev_width = c->cfg.dst_addr_width;
897 burst = c->cfg.dst_maxburst;
201ac486 898 port_window = c->cfg.dst_port_window_size;
7bedaa55
RK
899 } else {
900 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
901 return NULL;
902 }
903
904 /* Bus width translates to the element size (ES) */
905 switch (dev_width) {
906 case DMA_SLAVE_BUSWIDTH_1_BYTE:
9043826d 907 es = CSDP_DATA_TYPE_8;
7bedaa55
RK
908 break;
909 case DMA_SLAVE_BUSWIDTH_2_BYTES:
9043826d 910 es = CSDP_DATA_TYPE_16;
7bedaa55
RK
911 break;
912 case DMA_SLAVE_BUSWIDTH_4_BYTES:
9043826d 913 es = CSDP_DATA_TYPE_32;
7bedaa55
RK
914 break;
915 default: /* not reached */
916 return NULL;
917 }
918
919 /* Now allocate and setup the descriptor. */
920 d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC);
921 if (!d)
922 return NULL;
923
924 d->dir = dir;
925 d->dev_addr = dev_addr;
926 d->es = es;
3ed4d18f 927
9816c09e
PU
928 /* When the port_window is used, one frame must cover the window */
929 if (port_window) {
930 burst = port_window;
931 port_window_bytes = port_window * es_bytes[es];
932
933 d->ei = 1;
934 /*
935 * One frame covers the port_window and by configure
936 * the source frame index to be -1 * (port_window - 1)
937 * we instruct the sDMA that after a frame is processed
938 * it should move back to the start of the window.
939 */
940 d->fi = -(port_window_bytes - 1);
941 }
942
aa4c5b96 943 d->ccr = c->ccr | CCR_SYNC_FRAME;
e7b2acfc 944 if (dir == DMA_DEV_TO_MEM) {
e7b2acfc 945 d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
201ac486
PU
946
947 d->ccr |= CCR_DST_AMODE_POSTINC;
948 if (port_window) {
949 d->ccr |= CCR_SRC_AMODE_DBLIDX;
201ac486
PU
950
951 if (port_window_bytes >= 64)
527a2759 952 d->csdp |= CSDP_SRC_BURST_64;
201ac486 953 else if (port_window_bytes >= 32)
527a2759 954 d->csdp |= CSDP_SRC_BURST_32;
201ac486 955 else if (port_window_bytes >= 16)
527a2759
PU
956 d->csdp |= CSDP_SRC_BURST_16;
957
201ac486
PU
958 } else {
959 d->ccr |= CCR_SRC_AMODE_CONSTANT;
960 }
e7b2acfc 961 } else {
e7b2acfc 962 d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
201ac486
PU
963
964 d->ccr |= CCR_SRC_AMODE_POSTINC;
965 if (port_window) {
966 d->ccr |= CCR_DST_AMODE_DBLIDX;
967
968 if (port_window_bytes >= 64)
527a2759 969 d->csdp |= CSDP_DST_BURST_64;
201ac486 970 else if (port_window_bytes >= 32)
527a2759 971 d->csdp |= CSDP_DST_BURST_32;
201ac486 972 else if (port_window_bytes >= 16)
527a2759 973 d->csdp |= CSDP_DST_BURST_16;
201ac486
PU
974 } else {
975 d->ccr |= CCR_DST_AMODE_CONSTANT;
976 }
e7b2acfc 977 }
3ed4d18f 978
9043826d 979 d->cicr = CICR_DROP_IE | CICR_BLOCK_IE;
e7b2acfc 980 d->csdp |= es;
fa3ad86a 981
2f0d13bd 982 if (dma_omap1()) {
9043826d 983 d->cicr |= CICR_TOUT_IE;
2f0d13bd
RK
984
985 if (dir == DMA_DEV_TO_MEM)
9043826d 986 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_TIPB;
2f0d13bd 987 else
9043826d 988 d->csdp |= CSDP_DST_PORT_TIPB | CSDP_SRC_PORT_EMIFF;
2f0d13bd 989 } else {
3ed4d18f 990 if (dir == DMA_DEV_TO_MEM)
9043826d 991 d->ccr |= CCR_TRIGGER_SRC;
3ed4d18f 992
9043826d 993 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
201ac486
PU
994
995 if (port_window)
996 d->csdp |= CSDP_WRITE_LAST_NON_POSTED;
2f0d13bd 997 }
965aeb4d
RK
998 if (od->plat->errata & DMA_ERRATA_PARALLEL_CHANNELS)
999 d->clnk_ctrl = c->dma_ch;
7bedaa55
RK
1000
1001 /*
1002 * Build our scatterlist entries: each contains the address,
1003 * the number of elements (EN) in each frame, and the number of
1004 * frames (FN). Number of bytes for this entry = ES * EN * FN.
1005 *
1006 * Burst size translates to number of elements with frame sync.
1007 * Note: DMA engine defines burst to be the number of dev-width
1008 * transfers.
1009 */
1010 en = burst;
1011 frame_bytes = es_bytes[es] * en;
1c2e8e6b
PU
1012
1013 if (sglen >= 2)
1014 d->using_ll = od->ll123_supported;
1015
7bedaa55 1016 for_each_sg(sgl, sgent, sglen, i) {
cb7958df
PU
1017 struct omap_sg *osg = &d->sg[i];
1018
1019 osg->addr = sg_dma_address(sgent);
1020 osg->en = en;
1021 osg->fn = sg_dma_len(sgent) / frame_bytes;
1c2e8e6b
PU
1022
1023 if (d->using_ll) {
1024 osg->t2_desc = dma_pool_alloc(od->desc_pool, GFP_ATOMIC,
1025 &osg->t2_desc_paddr);
1026 if (!osg->t2_desc) {
1027 dev_err(chan->device->dev,
1028 "t2_desc[%d] allocation failed\n", i);
1029 ll_failed = true;
1030 d->using_ll = false;
1031 continue;
1032 }
1033
1034 omap_dma_fill_type2_desc(d, i, dir, (i == sglen - 1));
1035 }
7bedaa55
RK
1036 }
1037
e8a5e79c 1038 d->sglen = sglen;
7bedaa55 1039
1c2e8e6b
PU
1040 /* Release the dma_pool entries if one allocation failed */
1041 if (ll_failed) {
1042 for (i = 0; i < d->sglen; i++) {
1043 struct omap_sg *osg = &d->sg[i];
1044
1045 if (osg->t2_desc) {
1046 dma_pool_free(od->desc_pool, osg->t2_desc,
1047 osg->t2_desc_paddr);
1048 osg->t2_desc = NULL;
1049 }
1050 }
1051 }
1052
7bedaa55
RK
1053 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1054}
1055
3a774ea9
RK
1056static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic(
1057 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
31c1e5a1 1058 size_t period_len, enum dma_transfer_direction dir, unsigned long flags)
3a774ea9 1059{
fa3ad86a 1060 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
3a774ea9
RK
1061 struct omap_chan *c = to_omap_dma_chan(chan);
1062 enum dma_slave_buswidth dev_width;
1063 struct omap_desc *d;
1064 dma_addr_t dev_addr;
3ed4d18f 1065 unsigned es;
3a774ea9
RK
1066 u32 burst;
1067
1068 if (dir == DMA_DEV_TO_MEM) {
1069 dev_addr = c->cfg.src_addr;
1070 dev_width = c->cfg.src_addr_width;
1071 burst = c->cfg.src_maxburst;
3a774ea9
RK
1072 } else if (dir == DMA_MEM_TO_DEV) {
1073 dev_addr = c->cfg.dst_addr;
1074 dev_width = c->cfg.dst_addr_width;
1075 burst = c->cfg.dst_maxburst;
3a774ea9
RK
1076 } else {
1077 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
1078 return NULL;
1079 }
1080
1081 /* Bus width translates to the element size (ES) */
1082 switch (dev_width) {
1083 case DMA_SLAVE_BUSWIDTH_1_BYTE:
9043826d 1084 es = CSDP_DATA_TYPE_8;
3a774ea9
RK
1085 break;
1086 case DMA_SLAVE_BUSWIDTH_2_BYTES:
9043826d 1087 es = CSDP_DATA_TYPE_16;
3a774ea9
RK
1088 break;
1089 case DMA_SLAVE_BUSWIDTH_4_BYTES:
9043826d 1090 es = CSDP_DATA_TYPE_32;
3a774ea9
RK
1091 break;
1092 default: /* not reached */
1093 return NULL;
1094 }
1095
1096 /* Now allocate and setup the descriptor. */
1097 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1098 if (!d)
1099 return NULL;
1100
1101 d->dir = dir;
1102 d->dev_addr = dev_addr;
1103 d->fi = burst;
1104 d->es = es;
3a774ea9
RK
1105 d->sg[0].addr = buf_addr;
1106 d->sg[0].en = period_len / es_bytes[es];
1107 d->sg[0].fn = buf_len / period_len;
1108 d->sglen = 1;
3ed4d18f 1109
aa4c5b96 1110 d->ccr = c->ccr;
3ed4d18f 1111 if (dir == DMA_DEV_TO_MEM)
9043826d 1112 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_CONSTANT;
3ed4d18f 1113 else
9043826d 1114 d->ccr |= CCR_DST_AMODE_CONSTANT | CCR_SRC_AMODE_POSTINC;
3ed4d18f 1115
9043826d 1116 d->cicr = CICR_DROP_IE;
fa3ad86a 1117 if (flags & DMA_PREP_INTERRUPT)
9043826d 1118 d->cicr |= CICR_FRAME_IE;
fa3ad86a 1119
2f0d13bd
RK
1120 d->csdp = es;
1121
1122 if (dma_omap1()) {
9043826d 1123 d->cicr |= CICR_TOUT_IE;
2f0d13bd
RK
1124
1125 if (dir == DMA_DEV_TO_MEM)
9043826d 1126 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_MPUI;
2f0d13bd 1127 else
9043826d 1128 d->csdp |= CSDP_DST_PORT_MPUI | CSDP_SRC_PORT_EMIFF;
2f0d13bd 1129 } else {
3ed4d18f 1130 if (burst)
9043826d
RK
1131 d->ccr |= CCR_SYNC_PACKET;
1132 else
1133 d->ccr |= CCR_SYNC_ELEMENT;
3ed4d18f 1134
47fac241 1135 if (dir == DMA_DEV_TO_MEM) {
9043826d 1136 d->ccr |= CCR_TRIGGER_SRC;
47fac241
MLC
1137 d->csdp |= CSDP_DST_PACKED;
1138 } else {
1139 d->csdp |= CSDP_SRC_PACKED;
1140 }
3ed4d18f 1141
9043826d 1142 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
3a774ea9 1143
9043826d 1144 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
2f0d13bd
RK
1145 }
1146
965aeb4d
RK
1147 if (__dma_omap15xx(od->plat->dma_attr))
1148 d->ccr |= CCR_AUTO_INIT | CCR_REPEAT;
1149 else
1150 d->clnk_ctrl = c->dma_ch | CLNK_CTRL_ENABLE_LNK;
1151
3ed4d18f 1152 c->cyclic = true;
3a774ea9 1153
2dde5b90 1154 return vchan_tx_prep(&c->vc, &d->vd, flags);
3a774ea9
RK
1155}
1156
4ce98c0a
PU
1157static struct dma_async_tx_descriptor *omap_dma_prep_dma_memcpy(
1158 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1159 size_t len, unsigned long tx_flags)
1160{
1161 struct omap_chan *c = to_omap_dma_chan(chan);
1162 struct omap_desc *d;
1163 uint8_t data_type;
1164
1165 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1166 if (!d)
1167 return NULL;
1168
1169 data_type = __ffs((src | dest | len));
1170 if (data_type > CSDP_DATA_TYPE_32)
1171 data_type = CSDP_DATA_TYPE_32;
1172
1173 d->dir = DMA_MEM_TO_MEM;
1174 d->dev_addr = src;
1175 d->fi = 0;
1176 d->es = data_type;
1177 d->sg[0].en = len / BIT(data_type);
1178 d->sg[0].fn = 1;
1179 d->sg[0].addr = dest;
1180 d->sglen = 1;
1181 d->ccr = c->ccr;
1182 d->ccr |= CCR_DST_AMODE_POSTINC | CCR_SRC_AMODE_POSTINC;
1183
b96c033c 1184 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
4ce98c0a
PU
1185
1186 d->csdp = data_type;
1187
1188 if (dma_omap1()) {
1189 d->cicr |= CICR_TOUT_IE;
1190 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1191 } else {
1192 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1193 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1194 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1195 }
1196
1197 return vchan_tx_prep(&c->vc, &d->vd, tx_flags);
1198}
1199
ad52465b
PU
1200static struct dma_async_tx_descriptor *omap_dma_prep_dma_interleaved(
1201 struct dma_chan *chan, struct dma_interleaved_template *xt,
1202 unsigned long flags)
1203{
1204 struct omap_chan *c = to_omap_dma_chan(chan);
1205 struct omap_desc *d;
1206 struct omap_sg *sg;
1207 uint8_t data_type;
1208 size_t src_icg, dst_icg;
1209
1210 /* Slave mode is not supported */
1211 if (is_slave_direction(xt->dir))
1212 return NULL;
1213
1214 if (xt->frame_size != 1 || xt->numf == 0)
1215 return NULL;
1216
1217 d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC);
1218 if (!d)
1219 return NULL;
1220
1221 data_type = __ffs((xt->src_start | xt->dst_start | xt->sgl[0].size));
1222 if (data_type > CSDP_DATA_TYPE_32)
1223 data_type = CSDP_DATA_TYPE_32;
1224
1225 sg = &d->sg[0];
1226 d->dir = DMA_MEM_TO_MEM;
1227 d->dev_addr = xt->src_start;
1228 d->es = data_type;
1229 sg->en = xt->sgl[0].size / BIT(data_type);
1230 sg->fn = xt->numf;
1231 sg->addr = xt->dst_start;
1232 d->sglen = 1;
1233 d->ccr = c->ccr;
1234
1235 src_icg = dmaengine_get_src_icg(xt, &xt->sgl[0]);
1236 dst_icg = dmaengine_get_dst_icg(xt, &xt->sgl[0]);
1237 if (src_icg) {
1238 d->ccr |= CCR_SRC_AMODE_DBLIDX;
1239 d->ei = 1;
1240 d->fi = src_icg;
1241 } else if (xt->src_inc) {
1242 d->ccr |= CCR_SRC_AMODE_POSTINC;
1243 d->fi = 0;
1244 } else {
1245 dev_err(chan->device->dev,
1246 "%s: SRC constant addressing is not supported\n",
1247 __func__);
1248 kfree(d);
1249 return NULL;
1250 }
1251
1252 if (dst_icg) {
1253 d->ccr |= CCR_DST_AMODE_DBLIDX;
1254 sg->ei = 1;
1255 sg->fi = dst_icg;
1256 } else if (xt->dst_inc) {
1257 d->ccr |= CCR_DST_AMODE_POSTINC;
1258 sg->fi = 0;
1259 } else {
1260 dev_err(chan->device->dev,
1261 "%s: DST constant addressing is not supported\n",
1262 __func__);
1263 kfree(d);
1264 return NULL;
1265 }
1266
1267 d->cicr = CICR_DROP_IE | CICR_FRAME_IE;
1268
1269 d->csdp = data_type;
1270
1271 if (dma_omap1()) {
1272 d->cicr |= CICR_TOUT_IE;
1273 d->csdp |= CSDP_DST_PORT_EMIFF | CSDP_SRC_PORT_EMIFF;
1274 } else {
1275 d->csdp |= CSDP_DST_PACKED | CSDP_SRC_PACKED;
1276 d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE;
1277 d->csdp |= CSDP_DST_BURST_64 | CSDP_SRC_BURST_64;
1278 }
1279
1280 return vchan_tx_prep(&c->vc, &d->vd, flags);
1281}
1282
78ea4fe7 1283static int omap_dma_slave_config(struct dma_chan *chan, struct dma_slave_config *cfg)
7bedaa55 1284{
78ea4fe7
MR
1285 struct omap_chan *c = to_omap_dma_chan(chan);
1286
7bedaa55
RK
1287 if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1288 cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
1289 return -EINVAL;
1290
1291 memcpy(&c->cfg, cfg, sizeof(c->cfg));
1292
1293 return 0;
1294}
1295
78ea4fe7 1296static int omap_dma_terminate_all(struct dma_chan *chan)
7bedaa55 1297{
78ea4fe7 1298 struct omap_chan *c = to_omap_dma_chan(chan);
7bedaa55
RK
1299 unsigned long flags;
1300 LIST_HEAD(head);
1301
1302 spin_lock_irqsave(&c->vc.lock, flags);
1303
7bedaa55
RK
1304 /*
1305 * Stop DMA activity: we assume the callback will not be called
fa3ad86a 1306 * after omap_dma_stop() returns (even if it does, it will see
7bedaa55
RK
1307 * c->desc is NULL and exit.)
1308 */
1309 if (c->desc) {
02d88b73 1310 omap_dma_desc_free(&c->desc->vd);
7bedaa55 1311 c->desc = NULL;
2dcdf570
PU
1312 /* Avoid stopping the dma twice */
1313 if (!c->paused)
fa3ad86a 1314 omap_dma_stop(c);
7bedaa55
RK
1315 }
1316
99340754
SAS
1317 c->cyclic = false;
1318 c->paused = false;
3a774ea9 1319
7bedaa55
RK
1320 vchan_get_all_descriptors(&c->vc, &head);
1321 spin_unlock_irqrestore(&c->vc.lock, flags);
1322 vchan_dma_desc_free_list(&c->vc, &head);
1323
1324 return 0;
1325}
1326
9bef6d82
PU
1327static void omap_dma_synchronize(struct dma_chan *chan)
1328{
1329 struct omap_chan *c = to_omap_dma_chan(chan);
1330
1331 vchan_synchronize(&c->vc);
1332}
1333
78ea4fe7 1334static int omap_dma_pause(struct dma_chan *chan)
7bedaa55 1335{
78ea4fe7 1336 struct omap_chan *c = to_omap_dma_chan(chan);
99340754
SAS
1337 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1338 unsigned long flags;
1339 int ret = -EINVAL;
0741b819 1340 bool can_pause = false;
78ea4fe7 1341
99340754 1342 spin_lock_irqsave(&od->irq_lock, flags);
2dcdf570 1343
99340754
SAS
1344 if (!c->desc)
1345 goto out;
1346
1347 if (c->cyclic)
1348 can_pause = true;
1349
1350 /*
1351 * We do not allow DMA_MEM_TO_DEV transfers to be paused.
1352 * From the AM572x TRM, 16.1.4.18 Disabling a Channel During Transfer:
1353 * "When a channel is disabled during a transfer, the channel undergoes
1354 * an abort, unless it is hardware-source-synchronized …".
1355 * A source-synchronised channel is one where the fetching of data is
1356 * under control of the device. In other words, a device-to-memory
1357 * transfer. So, a destination-synchronised channel (which would be a
1358 * memory-to-device transfer) undergoes an abort if the the CCR_ENABLE
1359 * bit is cleared.
1360 * From 16.1.4.20.4.6.2 Abort: "If an abort trigger occurs, the channel
1361 * aborts immediately after completion of current read/write
1362 * transactions and then the FIFO is cleaned up." The term "cleaned up"
1363 * is not defined. TI recommends to check that RD_ACTIVE and WR_ACTIVE
1364 * are both clear _before_ disabling the channel, otherwise data loss
1365 * will occur.
1366 * The problem is that if the channel is active, then device activity
1367 * can result in DMA activity starting between reading those as both
1368 * clear and the write to DMA_CCR to clear the enable bit hitting the
1369 * hardware. If the DMA hardware can't drain the data in its FIFO to the
1370 * destination, then data loss "might" occur (say if we write to an UART
1371 * and the UART is not accepting any further data).
1372 */
1373 else if (c->desc->dir == DMA_DEV_TO_MEM)
1374 can_pause = true;
1375
1376 if (can_pause && !c->paused) {
1377 ret = omap_dma_stop(c);
1378 if (!ret)
1379 c->paused = true;
2dcdf570 1380 }
99340754
SAS
1381out:
1382 spin_unlock_irqrestore(&od->irq_lock, flags);
2dcdf570 1383
99340754 1384 return ret;
7bedaa55
RK
1385}
1386
78ea4fe7 1387static int omap_dma_resume(struct dma_chan *chan)
7bedaa55 1388{
78ea4fe7 1389 struct omap_chan *c = to_omap_dma_chan(chan);
99340754
SAS
1390 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
1391 unsigned long flags;
1392 int ret = -EINVAL;
78ea4fe7 1393
99340754 1394 spin_lock_irqsave(&od->irq_lock, flags);
2dcdf570 1395
99340754 1396 if (c->paused && c->desc) {
b3d09da7
PU
1397 mb();
1398
bfb60745
PU
1399 /* Restore channel link register */
1400 omap_dma_chan_write(c, CLNK_CTRL, c->desc->clnk_ctrl);
1401
fa3ad86a 1402 omap_dma_start(c, c->desc);
2dcdf570 1403 c->paused = false;
99340754 1404 ret = 0;
2dcdf570 1405 }
99340754 1406 spin_unlock_irqrestore(&od->irq_lock, flags);
2dcdf570 1407
99340754 1408 return ret;
7bedaa55
RK
1409}
1410
eea531ea 1411static int omap_dma_chan_init(struct omap_dmadev *od)
7bedaa55
RK
1412{
1413 struct omap_chan *c;
1414
1415 c = kzalloc(sizeof(*c), GFP_KERNEL);
1416 if (!c)
1417 return -ENOMEM;
1418
596c471b 1419 c->reg_map = od->reg_map;
7bedaa55
RK
1420 c->vc.desc_free = omap_dma_desc_free;
1421 vchan_init(&c->vc, &od->ddev);
7bedaa55 1422
7bedaa55
RK
1423 return 0;
1424}
1425
1426static void omap_dma_free(struct omap_dmadev *od)
1427{
7bedaa55
RK
1428 while (!list_empty(&od->ddev.channels)) {
1429 struct omap_chan *c = list_first_entry(&od->ddev.channels,
1430 struct omap_chan, vc.chan.device_node);
1431
1432 list_del(&c->vc.chan.device_node);
1433 tasklet_kill(&c->vc.task);
1434 kfree(c);
1435 }
7bedaa55
RK
1436}
1437
80b0e0ab
PU
1438#define OMAP_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1439 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1440 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1441
7bedaa55
RK
1442static int omap_dma_probe(struct platform_device *pdev)
1443{
1444 struct omap_dmadev *od;
596c471b 1445 struct resource *res;
6ddeb6d8 1446 int rc, i, irq;
836c3ce2 1447 u32 lch_count;
7bedaa55 1448
104fce73 1449 od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
7bedaa55
RK
1450 if (!od)
1451 return -ENOMEM;
1452
596c471b
RK
1453 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1454 od->base = devm_ioremap_resource(&pdev->dev, res);
1455 if (IS_ERR(od->base))
1456 return PTR_ERR(od->base);
1457
1b416c4b
RK
1458 od->plat = omap_get_plat_info();
1459 if (!od->plat)
1460 return -EPROBE_DEFER;
1461
596c471b
RK
1462 od->reg_map = od->plat->reg_map;
1463
7bedaa55 1464 dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
3a774ea9 1465 dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
4ce98c0a 1466 dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
ad52465b 1467 dma_cap_set(DMA_INTERLEAVE, od->ddev.cap_mask);
7bedaa55
RK
1468 od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources;
1469 od->ddev.device_free_chan_resources = omap_dma_free_chan_resources;
1470 od->ddev.device_tx_status = omap_dma_tx_status;
1471 od->ddev.device_issue_pending = omap_dma_issue_pending;
1472 od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg;
3a774ea9 1473 od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic;
4ce98c0a 1474 od->ddev.device_prep_dma_memcpy = omap_dma_prep_dma_memcpy;
ad52465b 1475 od->ddev.device_prep_interleaved_dma = omap_dma_prep_dma_interleaved;
6c04cd4f 1476 od->ddev.device_config = omap_dma_slave_config;
78ea4fe7
MR
1477 od->ddev.device_pause = omap_dma_pause;
1478 od->ddev.device_resume = omap_dma_resume;
1479 od->ddev.device_terminate_all = omap_dma_terminate_all;
9bef6d82 1480 od->ddev.device_synchronize = omap_dma_synchronize;
7d15b87d
MR
1481 od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
1482 od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
1483 od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1484 od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
7bedaa55
RK
1485 od->ddev.dev = &pdev->dev;
1486 INIT_LIST_HEAD(&od->ddev.channels);
7bedaa55 1487 spin_lock_init(&od->lock);
6ddeb6d8 1488 spin_lock_init(&od->irq_lock);
7bedaa55 1489
836c3ce2
PU
1490 /* Number of DMA requests */
1491 od->dma_requests = OMAP_SDMA_REQUESTS;
1492 if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
1493 "dma-requests",
1494 &od->dma_requests)) {
de506089
PU
1495 dev_info(&pdev->dev,
1496 "Missing dma-requests property, using %u.\n",
1497 OMAP_SDMA_REQUESTS);
1498 }
1499
836c3ce2
PU
1500 /* Number of available logical channels */
1501 if (!pdev->dev.of_node) {
1502 lch_count = od->plat->dma_attr->lch_count;
1503 if (unlikely(!lch_count))
1504 lch_count = OMAP_SDMA_CHANNELS;
1505 } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
1506 &lch_count)) {
1507 dev_info(&pdev->dev,
1508 "Missing dma-channels property, using %u.\n",
1509 OMAP_SDMA_CHANNELS);
1510 lch_count = OMAP_SDMA_CHANNELS;
1511 }
1512
1513 od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
1514 GFP_KERNEL);
2d1a9a94
PU
1515 if (!od->lch_map)
1516 return -ENOMEM;
1517
1518 for (i = 0; i < od->dma_requests; i++) {
eea531ea 1519 rc = omap_dma_chan_init(od);
7bedaa55
RK
1520 if (rc) {
1521 omap_dma_free(od);
1522 return rc;
1523 }
1524 }
1525
6ddeb6d8
RK
1526 irq = platform_get_irq(pdev, 1);
1527 if (irq <= 0) {
1528 dev_info(&pdev->dev, "failed to get L1 IRQ: %d\n", irq);
1529 od->legacy = true;
1530 } else {
1531 /* Disable all interrupts */
1532 od->irq_enable_mask = 0;
1533 omap_dma_glbl_write(od, IRQENABLE_L1, 0);
1534
1535 rc = devm_request_irq(&pdev->dev, irq, omap_dma_irq,
1536 IRQF_SHARED, "omap-dma-engine", od);
1537 if (rc)
1538 return rc;
1539 }
1540
1c2e8e6b
PU
1541 if (omap_dma_glbl_read(od, CAPS_0) & CAPS_0_SUPPORT_LL123)
1542 od->ll123_supported = true;
1543
020c62ae
PU
1544 od->ddev.filter.map = od->plat->slave_map;
1545 od->ddev.filter.mapcnt = od->plat->slavecnt;
1546 od->ddev.filter.fn = omap_dma_filter_fn;
1547
1c2e8e6b
PU
1548 if (od->ll123_supported) {
1549 od->desc_pool = dma_pool_create(dev_name(&pdev->dev),
1550 &pdev->dev,
1551 sizeof(struct omap_type2_desc),
1552 4, 0);
1553 if (!od->desc_pool) {
1554 dev_err(&pdev->dev,
1555 "unable to allocate descriptor pool\n");
1556 od->ll123_supported = false;
1557 }
1558 }
1559
7bedaa55
RK
1560 rc = dma_async_device_register(&od->ddev);
1561 if (rc) {
1562 pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n",
1563 rc);
1564 omap_dma_free(od);
8d30662a
JH
1565 return rc;
1566 }
1567
1568 platform_set_drvdata(pdev, od);
1569
1570 if (pdev->dev.of_node) {
1571 omap_dma_info.dma_cap = od->ddev.cap_mask;
1572
1573 /* Device-tree DMA controller registration */
1574 rc = of_dma_controller_register(pdev->dev.of_node,
1575 of_dma_simple_xlate, &omap_dma_info);
1576 if (rc) {
1577 pr_warn("OMAP-DMA: failed to register DMA controller\n");
1578 dma_async_device_unregister(&od->ddev);
1579 omap_dma_free(od);
1580 }
7bedaa55
RK
1581 }
1582
1c2e8e6b
PU
1583 dev_info(&pdev->dev, "OMAP DMA engine driver%s\n",
1584 od->ll123_supported ? " (LinkedList1/2/3 supported)" : "");
7bedaa55
RK
1585
1586 return rc;
1587}
1588
1589static int omap_dma_remove(struct platform_device *pdev)
1590{
1591 struct omap_dmadev *od = platform_get_drvdata(pdev);
898dbbf6 1592 int irq;
7bedaa55 1593
8d30662a
JH
1594 if (pdev->dev.of_node)
1595 of_dma_controller_free(pdev->dev.of_node);
1596
898dbbf6
VK
1597 irq = platform_get_irq(pdev, 1);
1598 devm_free_irq(&pdev->dev, irq, od);
1599
7bedaa55 1600 dma_async_device_unregister(&od->ddev);
6ddeb6d8
RK
1601
1602 if (!od->legacy) {
1603 /* Disable all interrupts */
1604 omap_dma_glbl_write(od, IRQENABLE_L0, 0);
1605 }
1606
1c2e8e6b
PU
1607 if (od->ll123_supported)
1608 dma_pool_destroy(od->desc_pool);
1609
7bedaa55
RK
1610 omap_dma_free(od);
1611
1612 return 0;
1613}
1614
8d30662a
JH
1615static const struct of_device_id omap_dma_match[] = {
1616 { .compatible = "ti,omap2420-sdma", },
1617 { .compatible = "ti,omap2430-sdma", },
1618 { .compatible = "ti,omap3430-sdma", },
1619 { .compatible = "ti,omap3630-sdma", },
1620 { .compatible = "ti,omap4430-sdma", },
1621 {},
1622};
1623MODULE_DEVICE_TABLE(of, omap_dma_match);
1624
7bedaa55
RK
1625static struct platform_driver omap_dma_driver = {
1626 .probe = omap_dma_probe,
1627 .remove = omap_dma_remove,
1628 .driver = {
1629 .name = "omap-dma-engine",
8d30662a 1630 .of_match_table = of_match_ptr(omap_dma_match),
7bedaa55
RK
1631 },
1632};
1633
1634bool omap_dma_filter_fn(struct dma_chan *chan, void *param)
1635{
1636 if (chan->device->dev->driver == &omap_dma_driver.driver) {
eea531ea 1637 struct omap_dmadev *od = to_omap_dma_dev(chan->device);
7bedaa55
RK
1638 struct omap_chan *c = to_omap_dma_chan(chan);
1639 unsigned req = *(unsigned *)param;
1640
eea531ea
PU
1641 if (req <= od->dma_requests) {
1642 c->dma_sig = req;
1643 return true;
1644 }
7bedaa55
RK
1645 }
1646 return false;
1647}
1648EXPORT_SYMBOL_GPL(omap_dma_filter_fn);
1649
7bedaa55
RK
1650static int omap_dma_init(void)
1651{
be1f9481 1652 return platform_driver_register(&omap_dma_driver);
7bedaa55
RK
1653}
1654subsys_initcall(omap_dma_init);
1655
1656static void __exit omap_dma_exit(void)
1657{
7bedaa55
RK
1658 platform_driver_unregister(&omap_dma_driver);
1659}
1660module_exit(omap_dma_exit);
1661
1662MODULE_AUTHOR("Russell King");
1663MODULE_LICENSE("GPL");