]>
Commit | Line | Data |
---|---|---|
ca21a146 RY |
1 | /* |
2 | * DMA controller driver for CSR SiRFprimaII | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/sirfsoc_dma.h> | |
20 | ||
949ff5b8 VK |
21 | #include "dmaengine.h" |
22 | ||
ca21a146 RY |
23 | #define SIRFSOC_DMA_DESCRIPTORS 16 |
24 | #define SIRFSOC_DMA_CHANNELS 16 | |
25 | ||
26 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
27 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
28 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
29 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
30 | ||
31 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
32 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
33 | #define SIRFSOC_DMA_CH_INT 0x144 | |
34 | #define SIRFSOC_DMA_INT_EN 0x148 | |
f7d935dc | 35 | #define SIRFSOC_DMA_INT_EN_CLR 0x14C |
ca21a146 | 36 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 |
f7d935dc | 37 | #define SIRFSOC_DMA_CH_LOOP_CTRL_CLR 0x15C |
ca21a146 RY |
38 | |
39 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
40 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
41 | ||
42 | /* xlen and dma_width register is in 4 bytes boundary */ | |
43 | #define SIRFSOC_DMA_WORD_LEN 4 | |
44 | ||
45 | struct sirfsoc_dma_desc { | |
46 | struct dma_async_tx_descriptor desc; | |
47 | struct list_head node; | |
48 | ||
49 | /* SiRFprimaII 2D-DMA parameters */ | |
50 | ||
51 | int xlen; /* DMA xlen */ | |
52 | int ylen; /* DMA ylen */ | |
53 | int width; /* DMA width */ | |
54 | int dir; | |
55 | bool cyclic; /* is loop DMA? */ | |
56 | u32 addr; /* DMA buffer address */ | |
57 | }; | |
58 | ||
59 | struct sirfsoc_dma_chan { | |
60 | struct dma_chan chan; | |
61 | struct list_head free; | |
62 | struct list_head prepared; | |
63 | struct list_head queued; | |
64 | struct list_head active; | |
65 | struct list_head completed; | |
ca21a146 RY |
66 | unsigned long happened_cyclic; |
67 | unsigned long completed_cyclic; | |
68 | ||
69 | /* Lock for this structure */ | |
70 | spinlock_t lock; | |
71 | ||
72 | int mode; | |
73 | }; | |
74 | ||
75 | struct sirfsoc_dma { | |
76 | struct dma_device dma; | |
77 | struct tasklet_struct tasklet; | |
78 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
79 | void __iomem *base; | |
80 | int irq; | |
f7d935dc | 81 | bool is_marco; |
ca21a146 RY |
82 | }; |
83 | ||
84 | #define DRV_NAME "sirfsoc_dma" | |
85 | ||
86 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | |
87 | static inline | |
88 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
89 | { | |
90 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
91 | } | |
92 | ||
93 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
94 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
95 | { | |
96 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
97 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
98 | } | |
99 | ||
100 | /* Execute all queued DMA descriptors */ | |
101 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
102 | { | |
103 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
104 | int cid = schan->chan.chan_id; | |
105 | struct sirfsoc_dma_desc *sdesc = NULL; | |
106 | ||
107 | /* | |
108 | * lock has been held by functions calling this, so we don't hold | |
109 | * lock again | |
110 | */ | |
111 | ||
112 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | |
113 | node); | |
114 | /* Move the first queued descriptor to active list */ | |
26fd1220 | 115 | list_move_tail(&sdesc->node, &schan->active); |
ca21a146 RY |
116 | |
117 | /* Start the DMA transfer */ | |
118 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | |
119 | cid * 4); | |
120 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
121 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
122 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
123 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | |
124 | SIRFSOC_DMA_CH_XLEN); | |
125 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | |
126 | SIRFSOC_DMA_CH_YLEN); | |
127 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | |
128 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
129 | ||
130 | /* | |
131 | * writel has an implict memory write barrier to make sure data is | |
132 | * flushed into memory before starting DMA | |
133 | */ | |
134 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
135 | ||
136 | if (sdesc->cyclic) { | |
137 | writel((1 << cid) | 1 << (cid + 16) | | |
138 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
139 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
140 | schan->happened_cyclic = schan->completed_cyclic = 0; | |
141 | } | |
142 | } | |
143 | ||
144 | /* Interrupt handler */ | |
145 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
146 | { | |
147 | struct sirfsoc_dma *sdma = data; | |
148 | struct sirfsoc_dma_chan *schan; | |
149 | struct sirfsoc_dma_desc *sdesc = NULL; | |
150 | u32 is; | |
151 | int ch; | |
152 | ||
153 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
154 | while ((ch = fls(is) - 1) >= 0) { | |
155 | is &= ~(1 << ch); | |
156 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | |
157 | schan = &sdma->channels[ch]; | |
158 | ||
159 | spin_lock(&schan->lock); | |
160 | ||
161 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
162 | node); | |
163 | if (!sdesc->cyclic) { | |
164 | /* Execute queued descriptors */ | |
165 | list_splice_tail_init(&schan->active, &schan->completed); | |
166 | if (!list_empty(&schan->queued)) | |
167 | sirfsoc_dma_execute(schan); | |
168 | } else | |
169 | schan->happened_cyclic++; | |
170 | ||
171 | spin_unlock(&schan->lock); | |
172 | } | |
173 | ||
174 | /* Schedule tasklet */ | |
175 | tasklet_schedule(&sdma->tasklet); | |
176 | ||
177 | return IRQ_HANDLED; | |
178 | } | |
179 | ||
180 | /* process completed descriptors */ | |
181 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
182 | { | |
183 | dma_cookie_t last_cookie = 0; | |
184 | struct sirfsoc_dma_chan *schan; | |
185 | struct sirfsoc_dma_desc *sdesc; | |
186 | struct dma_async_tx_descriptor *desc; | |
187 | unsigned long flags; | |
188 | unsigned long happened_cyclic; | |
189 | LIST_HEAD(list); | |
190 | int i; | |
191 | ||
192 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
193 | schan = &sdma->channels[i]; | |
194 | ||
195 | /* Get all completed descriptors */ | |
196 | spin_lock_irqsave(&schan->lock, flags); | |
197 | if (!list_empty(&schan->completed)) { | |
198 | list_splice_tail_init(&schan->completed, &list); | |
199 | spin_unlock_irqrestore(&schan->lock, flags); | |
200 | ||
201 | /* Execute callbacks and run dependencies */ | |
202 | list_for_each_entry(sdesc, &list, node) { | |
203 | desc = &sdesc->desc; | |
204 | ||
205 | if (desc->callback) | |
206 | desc->callback(desc->callback_param); | |
207 | ||
208 | last_cookie = desc->cookie; | |
209 | dma_run_dependencies(desc); | |
210 | } | |
211 | ||
212 | /* Free descriptors */ | |
213 | spin_lock_irqsave(&schan->lock, flags); | |
214 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 215 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
216 | spin_unlock_irqrestore(&schan->lock, flags); |
217 | } else { | |
218 | /* for cyclic channel, desc is always in active list */ | |
219 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
220 | node); | |
221 | ||
222 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | |
223 | /* without active cyclic DMA */ | |
224 | spin_unlock_irqrestore(&schan->lock, flags); | |
225 | continue; | |
226 | } | |
227 | ||
228 | /* cyclic DMA */ | |
229 | happened_cyclic = schan->happened_cyclic; | |
230 | spin_unlock_irqrestore(&schan->lock, flags); | |
231 | ||
232 | desc = &sdesc->desc; | |
233 | while (happened_cyclic != schan->completed_cyclic) { | |
234 | if (desc->callback) | |
235 | desc->callback(desc->callback_param); | |
236 | schan->completed_cyclic++; | |
237 | } | |
238 | } | |
239 | } | |
240 | } | |
241 | ||
242 | /* DMA Tasklet */ | |
243 | static void sirfsoc_dma_tasklet(unsigned long data) | |
244 | { | |
245 | struct sirfsoc_dma *sdma = (void *)data; | |
246 | ||
247 | sirfsoc_dma_process_completed(sdma); | |
248 | } | |
249 | ||
250 | /* Submit descriptor to hardware */ | |
251 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
252 | { | |
253 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
254 | struct sirfsoc_dma_desc *sdesc; | |
255 | unsigned long flags; | |
256 | dma_cookie_t cookie; | |
257 | ||
258 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
259 | ||
260 | spin_lock_irqsave(&schan->lock, flags); | |
261 | ||
262 | /* Move descriptor to queue */ | |
263 | list_move_tail(&sdesc->node, &schan->queued); | |
264 | ||
884485e1 | 265 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
266 | |
267 | spin_unlock_irqrestore(&schan->lock, flags); | |
268 | ||
269 | return cookie; | |
270 | } | |
271 | ||
272 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | |
273 | struct dma_slave_config *config) | |
274 | { | |
275 | unsigned long flags; | |
276 | ||
277 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
278 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
279 | return -EINVAL; | |
280 | ||
281 | spin_lock_irqsave(&schan->lock, flags); | |
282 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
283 | spin_unlock_irqrestore(&schan->lock, flags); | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |
289 | { | |
290 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
291 | int cid = schan->chan.chan_id; | |
292 | unsigned long flags; | |
293 | ||
f7d935dc BS |
294 | if (!sdma->is_marco) { |
295 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
296 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
297 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
298 | & ~((1 << cid) | 1 << (cid + 16)), | |
ca21a146 | 299 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); |
f7d935dc BS |
300 | } else { |
301 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_INT_EN_CLR); | |
302 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
303 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | |
304 | } | |
305 | ||
306 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
ca21a146 RY |
307 | |
308 | spin_lock_irqsave(&schan->lock, flags); | |
309 | list_splice_tail_init(&schan->active, &schan->free); | |
310 | list_splice_tail_init(&schan->queued, &schan->free); | |
311 | spin_unlock_irqrestore(&schan->lock, flags); | |
312 | ||
313 | return 0; | |
314 | } | |
315 | ||
2518d1d1 BS |
316 | static int sirfsoc_dma_pause_chan(struct sirfsoc_dma_chan *schan) |
317 | { | |
318 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
319 | int cid = schan->chan.chan_id; | |
320 | unsigned long flags; | |
321 | ||
322 | spin_lock_irqsave(&schan->lock, flags); | |
323 | ||
324 | if (!sdma->is_marco) | |
325 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
326 | & ~((1 << cid) | 1 << (cid + 16)), | |
327 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
328 | else | |
329 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
330 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL_CLR); | |
331 | ||
332 | spin_unlock_irqrestore(&schan->lock, flags); | |
333 | ||
334 | return 0; | |
335 | } | |
336 | ||
337 | static int sirfsoc_dma_resume_chan(struct sirfsoc_dma_chan *schan) | |
338 | { | |
339 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
340 | int cid = schan->chan.chan_id; | |
341 | unsigned long flags; | |
342 | ||
343 | spin_lock_irqsave(&schan->lock, flags); | |
344 | ||
345 | if (!sdma->is_marco) | |
346 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
347 | | ((1 << cid) | 1 << (cid + 16)), | |
348 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
349 | else | |
350 | writel_relaxed((1 << cid) | 1 << (cid + 16), | |
351 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
352 | ||
353 | spin_unlock_irqrestore(&schan->lock, flags); | |
354 | ||
355 | return 0; | |
356 | } | |
357 | ||
ca21a146 RY |
358 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
359 | unsigned long arg) | |
360 | { | |
361 | struct dma_slave_config *config; | |
362 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
363 | ||
364 | switch (cmd) { | |
2518d1d1 BS |
365 | case DMA_PAUSE: |
366 | return sirfsoc_dma_pause_chan(schan); | |
367 | case DMA_RESUME: | |
368 | return sirfsoc_dma_resume_chan(schan); | |
ca21a146 RY |
369 | case DMA_TERMINATE_ALL: |
370 | return sirfsoc_dma_terminate_all(schan); | |
371 | case DMA_SLAVE_CONFIG: | |
372 | config = (struct dma_slave_config *)arg; | |
373 | return sirfsoc_dma_slave_config(schan, config); | |
374 | ||
375 | default: | |
376 | break; | |
377 | } | |
378 | ||
379 | return -ENOSYS; | |
380 | } | |
381 | ||
382 | /* Alloc channel resources */ | |
383 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
384 | { | |
385 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
386 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
387 | struct sirfsoc_dma_desc *sdesc; | |
388 | unsigned long flags; | |
389 | LIST_HEAD(descs); | |
390 | int i; | |
391 | ||
392 | /* Alloc descriptors for this channel */ | |
393 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
394 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
395 | if (!sdesc) { | |
396 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
397 | "Allocated only %u descriptors\n", i); | |
398 | break; | |
399 | } | |
400 | ||
401 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
402 | sdesc->desc.flags = DMA_CTRL_ACK; | |
403 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
404 | ||
405 | list_add_tail(&sdesc->node, &descs); | |
406 | } | |
407 | ||
408 | /* Return error only if no descriptors were allocated */ | |
409 | if (i == 0) | |
410 | return -ENOMEM; | |
411 | ||
412 | spin_lock_irqsave(&schan->lock, flags); | |
413 | ||
414 | list_splice_tail_init(&descs, &schan->free); | |
415 | spin_unlock_irqrestore(&schan->lock, flags); | |
416 | ||
417 | return i; | |
418 | } | |
419 | ||
420 | /* Free channel resources */ | |
421 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
422 | { | |
423 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
424 | struct sirfsoc_dma_desc *sdesc, *tmp; | |
425 | unsigned long flags; | |
426 | LIST_HEAD(descs); | |
427 | ||
428 | spin_lock_irqsave(&schan->lock, flags); | |
429 | ||
430 | /* Channel must be idle */ | |
431 | BUG_ON(!list_empty(&schan->prepared)); | |
432 | BUG_ON(!list_empty(&schan->queued)); | |
433 | BUG_ON(!list_empty(&schan->active)); | |
434 | BUG_ON(!list_empty(&schan->completed)); | |
435 | ||
436 | /* Move data */ | |
437 | list_splice_tail_init(&schan->free, &descs); | |
438 | ||
439 | spin_unlock_irqrestore(&schan->lock, flags); | |
440 | ||
441 | /* Free descriptors */ | |
442 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
443 | kfree(sdesc); | |
444 | } | |
445 | ||
446 | /* Send pending descriptor to hardware */ | |
447 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
448 | { | |
449 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
450 | unsigned long flags; | |
451 | ||
452 | spin_lock_irqsave(&schan->lock, flags); | |
453 | ||
454 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
455 | sirfsoc_dma_execute(schan); | |
456 | ||
457 | spin_unlock_irqrestore(&schan->lock, flags); | |
458 | } | |
459 | ||
460 | /* Check request completion status */ | |
461 | static enum dma_status | |
462 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
463 | struct dma_tx_state *txstate) | |
464 | { | |
465 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
466 | unsigned long flags; | |
96a2af41 | 467 | enum dma_status ret; |
ca21a146 RY |
468 | |
469 | spin_lock_irqsave(&schan->lock, flags); | |
96a2af41 | 470 | ret = dma_cookie_status(chan, cookie, txstate); |
ca21a146 RY |
471 | spin_unlock_irqrestore(&schan->lock, flags); |
472 | ||
96a2af41 | 473 | return ret; |
ca21a146 RY |
474 | } |
475 | ||
476 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
477 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
478 | unsigned long flags) | |
479 | { | |
480 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
481 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
482 | struct sirfsoc_dma_desc *sdesc = NULL; | |
483 | unsigned long iflags; | |
484 | int ret; | |
485 | ||
5997e089 | 486 | if ((xt->dir != DMA_MEM_TO_DEV) && (xt->dir != DMA_DEV_TO_MEM)) { |
ca21a146 RY |
487 | ret = -EINVAL; |
488 | goto err_dir; | |
489 | } | |
490 | ||
491 | /* Get free descriptor */ | |
492 | spin_lock_irqsave(&schan->lock, iflags); | |
493 | if (!list_empty(&schan->free)) { | |
494 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
495 | node); | |
496 | list_del(&sdesc->node); | |
497 | } | |
498 | spin_unlock_irqrestore(&schan->lock, iflags); | |
499 | ||
500 | if (!sdesc) { | |
501 | /* try to free completed descriptors */ | |
502 | sirfsoc_dma_process_completed(sdma); | |
503 | ret = 0; | |
504 | goto no_desc; | |
505 | } | |
506 | ||
507 | /* Place descriptor in prepared list */ | |
508 | spin_lock_irqsave(&schan->lock, iflags); | |
509 | ||
510 | /* | |
511 | * Number of chunks in a frame can only be 1 for prima2 | |
512 | * and ylen (number of frame - 1) must be at least 0 | |
513 | */ | |
514 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
515 | sdesc->cyclic = 0; | |
516 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
517 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
518 | SIRFSOC_DMA_WORD_LEN; | |
519 | sdesc->ylen = xt->numf - 1; | |
520 | if (xt->dir == DMA_MEM_TO_DEV) { | |
521 | sdesc->addr = xt->src_start; | |
522 | sdesc->dir = 1; | |
523 | } else { | |
524 | sdesc->addr = xt->dst_start; | |
525 | sdesc->dir = 0; | |
526 | } | |
527 | ||
528 | list_add_tail(&sdesc->node, &schan->prepared); | |
529 | } else { | |
530 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
531 | ret = -EINVAL; | |
532 | goto err_xfer; | |
533 | } | |
534 | spin_unlock_irqrestore(&schan->lock, iflags); | |
535 | ||
536 | return &sdesc->desc; | |
537 | err_xfer: | |
538 | spin_unlock_irqrestore(&schan->lock, iflags); | |
539 | no_desc: | |
540 | err_dir: | |
541 | return ERR_PTR(ret); | |
542 | } | |
543 | ||
544 | static struct dma_async_tx_descriptor * | |
545 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
546 | size_t buf_len, size_t period_len, | |
ec8b5e48 | 547 | enum dma_transfer_direction direction, unsigned long flags, void *context) |
ca21a146 RY |
548 | { |
549 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
550 | struct sirfsoc_dma_desc *sdesc = NULL; | |
551 | unsigned long iflags; | |
552 | ||
553 | /* | |
554 | * we only support cycle transfer with 2 period | |
555 | * If the X-length is set to 0, it would be the loop mode. | |
556 | * The DMA address keeps increasing until reaching the end of a loop | |
557 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
558 | * the DMA address goes back to the beginning of this area. | |
559 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
560 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
561 | * when the DMA address reaches the end of BUFA or the end of the | |
562 | * BUFB | |
563 | */ | |
564 | if (buf_len != 2 * period_len) | |
565 | return ERR_PTR(-EINVAL); | |
566 | ||
567 | /* Get free descriptor */ | |
568 | spin_lock_irqsave(&schan->lock, iflags); | |
569 | if (!list_empty(&schan->free)) { | |
570 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
571 | node); | |
572 | list_del(&sdesc->node); | |
573 | } | |
574 | spin_unlock_irqrestore(&schan->lock, iflags); | |
575 | ||
576 | if (!sdesc) | |
577 | return 0; | |
578 | ||
579 | /* Place descriptor in prepared list */ | |
580 | spin_lock_irqsave(&schan->lock, iflags); | |
581 | sdesc->addr = addr; | |
582 | sdesc->cyclic = 1; | |
583 | sdesc->xlen = 0; | |
584 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
585 | sdesc->width = 1; | |
586 | list_add_tail(&sdesc->node, &schan->prepared); | |
587 | spin_unlock_irqrestore(&schan->lock, iflags); | |
588 | ||
589 | return &sdesc->desc; | |
590 | } | |
591 | ||
592 | /* | |
593 | * The DMA controller consists of 16 independent DMA channels. | |
594 | * Each channel is allocated to a different function | |
595 | */ | |
596 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
597 | { | |
598 | unsigned int ch_nr = (unsigned int) chan_id; | |
599 | ||
600 | if (ch_nr == chan->chan_id + | |
601 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
602 | return true; | |
603 | ||
604 | return false; | |
605 | } | |
606 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
607 | ||
463a1f8b | 608 | static int sirfsoc_dma_probe(struct platform_device *op) |
ca21a146 RY |
609 | { |
610 | struct device_node *dn = op->dev.of_node; | |
611 | struct device *dev = &op->dev; | |
612 | struct dma_device *dma; | |
613 | struct sirfsoc_dma *sdma; | |
614 | struct sirfsoc_dma_chan *schan; | |
615 | struct resource res; | |
616 | ulong regs_start, regs_size; | |
617 | u32 id; | |
618 | int ret, i; | |
619 | ||
620 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
621 | if (!sdma) { | |
622 | dev_err(dev, "Memory exhausted!\n"); | |
623 | return -ENOMEM; | |
624 | } | |
625 | ||
f7d935dc BS |
626 | if (of_device_is_compatible(dn, "sirf,marco-dmac")) |
627 | sdma->is_marco = true; | |
628 | ||
ca21a146 RY |
629 | if (of_property_read_u32(dn, "cell-index", &id)) { |
630 | dev_err(dev, "Fail to get DMAC index\n"); | |
94d3901c | 631 | return -ENODEV; |
ca21a146 RY |
632 | } |
633 | ||
634 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
635 | if (sdma->irq == NO_IRQ) { | |
636 | dev_err(dev, "Error mapping IRQ!\n"); | |
94d3901c | 637 | return -EINVAL; |
ca21a146 RY |
638 | } |
639 | ||
640 | ret = of_address_to_resource(dn, 0, &res); | |
641 | if (ret) { | |
642 | dev_err(dev, "Error parsing memory region!\n"); | |
94d3901c | 643 | goto irq_dispose; |
ca21a146 RY |
644 | } |
645 | ||
646 | regs_start = res.start; | |
647 | regs_size = resource_size(&res); | |
648 | ||
649 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
650 | if (!sdma->base) { | |
651 | dev_err(dev, "Error mapping memory region!\n"); | |
652 | ret = -ENOMEM; | |
653 | goto irq_dispose; | |
654 | } | |
655 | ||
94d3901c | 656 | ret = request_irq(sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, sdma); |
ca21a146 RY |
657 | if (ret) { |
658 | dev_err(dev, "Error requesting IRQ!\n"); | |
659 | ret = -EINVAL; | |
94d3901c | 660 | goto irq_dispose; |
ca21a146 RY |
661 | } |
662 | ||
663 | dma = &sdma->dma; | |
664 | dma->dev = dev; | |
665 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | |
666 | ||
667 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
668 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
669 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
670 | dma->device_control = sirfsoc_dma_control; | |
671 | dma->device_tx_status = sirfsoc_dma_tx_status; | |
672 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
673 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
674 | ||
675 | INIT_LIST_HEAD(&dma->channels); | |
676 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
677 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
678 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
679 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
680 | ||
681 | for (i = 0; i < dma->chancnt; i++) { | |
682 | schan = &sdma->channels[i]; | |
683 | ||
684 | schan->chan.device = dma; | |
d3ee98cd | 685 | dma_cookie_init(&schan->chan); |
ca21a146 RY |
686 | |
687 | INIT_LIST_HEAD(&schan->free); | |
688 | INIT_LIST_HEAD(&schan->prepared); | |
689 | INIT_LIST_HEAD(&schan->queued); | |
690 | INIT_LIST_HEAD(&schan->active); | |
691 | INIT_LIST_HEAD(&schan->completed); | |
692 | ||
693 | spin_lock_init(&schan->lock); | |
694 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
695 | } | |
696 | ||
697 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | |
698 | ||
699 | /* Register DMA engine */ | |
700 | dev_set_drvdata(dev, sdma); | |
701 | ret = dma_async_device_register(dma); | |
702 | if (ret) | |
703 | goto free_irq; | |
704 | ||
705 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | |
706 | ||
707 | return 0; | |
708 | ||
709 | free_irq: | |
94d3901c | 710 | free_irq(sdma->irq, sdma); |
ca21a146 RY |
711 | irq_dispose: |
712 | irq_dispose_mapping(sdma->irq); | |
ca21a146 RY |
713 | return ret; |
714 | } | |
715 | ||
716 | static int __devexit sirfsoc_dma_remove(struct platform_device *op) | |
717 | { | |
718 | struct device *dev = &op->dev; | |
719 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
720 | ||
721 | dma_async_device_unregister(&sdma->dma); | |
94d3901c | 722 | free_irq(sdma->irq, sdma); |
ca21a146 | 723 | irq_dispose_mapping(sdma->irq); |
ca21a146 RY |
724 | return 0; |
725 | } | |
726 | ||
727 | static struct of_device_id sirfsoc_dma_match[] = { | |
728 | { .compatible = "sirf,prima2-dmac", }, | |
f7d935dc | 729 | { .compatible = "sirf,marco-dmac", }, |
ca21a146 RY |
730 | {}, |
731 | }; | |
732 | ||
733 | static struct platform_driver sirfsoc_dma_driver = { | |
734 | .probe = sirfsoc_dma_probe, | |
a7d6e3ec | 735 | .remove = sirfsoc_dma_remove, |
ca21a146 RY |
736 | .driver = { |
737 | .name = DRV_NAME, | |
738 | .owner = THIS_MODULE, | |
739 | .of_match_table = sirfsoc_dma_match, | |
740 | }, | |
741 | }; | |
742 | ||
c94e9105 | 743 | module_platform_driver(sirfsoc_dma_driver); |
ca21a146 RY |
744 | |
745 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | |
746 | "Barry Song <baohua.song@csr.com>"); | |
747 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | |
748 | MODULE_LICENSE("GPL v2"); |