]>
Commit | Line | Data |
---|---|---|
ca21a146 RY |
1 | /* |
2 | * DMA controller driver for CSR SiRFprimaII | |
3 | * | |
4 | * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. | |
5 | * | |
6 | * Licensed under GPLv2 or later. | |
7 | */ | |
8 | ||
9 | #include <linux/module.h> | |
10 | #include <linux/dmaengine.h> | |
11 | #include <linux/dma-mapping.h> | |
12 | #include <linux/interrupt.h> | |
13 | #include <linux/io.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/of_address.h> | |
17 | #include <linux/of_device.h> | |
18 | #include <linux/of_platform.h> | |
19 | #include <linux/sirfsoc_dma.h> | |
20 | ||
21 | #define SIRFSOC_DMA_DESCRIPTORS 16 | |
22 | #define SIRFSOC_DMA_CHANNELS 16 | |
23 | ||
24 | #define SIRFSOC_DMA_CH_ADDR 0x00 | |
25 | #define SIRFSOC_DMA_CH_XLEN 0x04 | |
26 | #define SIRFSOC_DMA_CH_YLEN 0x08 | |
27 | #define SIRFSOC_DMA_CH_CTRL 0x0C | |
28 | ||
29 | #define SIRFSOC_DMA_WIDTH_0 0x100 | |
30 | #define SIRFSOC_DMA_CH_VALID 0x140 | |
31 | #define SIRFSOC_DMA_CH_INT 0x144 | |
32 | #define SIRFSOC_DMA_INT_EN 0x148 | |
33 | #define SIRFSOC_DMA_CH_LOOP_CTRL 0x150 | |
34 | ||
35 | #define SIRFSOC_DMA_MODE_CTRL_BIT 4 | |
36 | #define SIRFSOC_DMA_DIR_CTRL_BIT 5 | |
37 | ||
38 | /* xlen and dma_width register is in 4 bytes boundary */ | |
39 | #define SIRFSOC_DMA_WORD_LEN 4 | |
40 | ||
41 | struct sirfsoc_dma_desc { | |
42 | struct dma_async_tx_descriptor desc; | |
43 | struct list_head node; | |
44 | ||
45 | /* SiRFprimaII 2D-DMA parameters */ | |
46 | ||
47 | int xlen; /* DMA xlen */ | |
48 | int ylen; /* DMA ylen */ | |
49 | int width; /* DMA width */ | |
50 | int dir; | |
51 | bool cyclic; /* is loop DMA? */ | |
52 | u32 addr; /* DMA buffer address */ | |
53 | }; | |
54 | ||
55 | struct sirfsoc_dma_chan { | |
56 | struct dma_chan chan; | |
57 | struct list_head free; | |
58 | struct list_head prepared; | |
59 | struct list_head queued; | |
60 | struct list_head active; | |
61 | struct list_head completed; | |
ca21a146 RY |
62 | unsigned long happened_cyclic; |
63 | unsigned long completed_cyclic; | |
64 | ||
65 | /* Lock for this structure */ | |
66 | spinlock_t lock; | |
67 | ||
68 | int mode; | |
69 | }; | |
70 | ||
71 | struct sirfsoc_dma { | |
72 | struct dma_device dma; | |
73 | struct tasklet_struct tasklet; | |
74 | struct sirfsoc_dma_chan channels[SIRFSOC_DMA_CHANNELS]; | |
75 | void __iomem *base; | |
76 | int irq; | |
77 | }; | |
78 | ||
79 | #define DRV_NAME "sirfsoc_dma" | |
80 | ||
81 | /* Convert struct dma_chan to struct sirfsoc_dma_chan */ | |
82 | static inline | |
83 | struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c) | |
84 | { | |
85 | return container_of(c, struct sirfsoc_dma_chan, chan); | |
86 | } | |
87 | ||
88 | /* Convert struct dma_chan to struct sirfsoc_dma */ | |
89 | static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c) | |
90 | { | |
91 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c); | |
92 | return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]); | |
93 | } | |
94 | ||
95 | /* Execute all queued DMA descriptors */ | |
96 | static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan) | |
97 | { | |
98 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
99 | int cid = schan->chan.chan_id; | |
100 | struct sirfsoc_dma_desc *sdesc = NULL; | |
101 | ||
102 | /* | |
103 | * lock has been held by functions calling this, so we don't hold | |
104 | * lock again | |
105 | */ | |
106 | ||
107 | sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc, | |
108 | node); | |
109 | /* Move the first queued descriptor to active list */ | |
110 | list_move_tail(&schan->queued, &schan->active); | |
111 | ||
112 | /* Start the DMA transfer */ | |
113 | writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 + | |
114 | cid * 4); | |
115 | writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) | | |
116 | (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT), | |
117 | sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL); | |
118 | writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 + | |
119 | SIRFSOC_DMA_CH_XLEN); | |
120 | writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 + | |
121 | SIRFSOC_DMA_CH_YLEN); | |
122 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) | | |
123 | (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
124 | ||
125 | /* | |
126 | * writel has an implict memory write barrier to make sure data is | |
127 | * flushed into memory before starting DMA | |
128 | */ | |
129 | writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR); | |
130 | ||
131 | if (sdesc->cyclic) { | |
132 | writel((1 << cid) | 1 << (cid + 16) | | |
133 | readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL), | |
134 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
135 | schan->happened_cyclic = schan->completed_cyclic = 0; | |
136 | } | |
137 | } | |
138 | ||
139 | /* Interrupt handler */ | |
140 | static irqreturn_t sirfsoc_dma_irq(int irq, void *data) | |
141 | { | |
142 | struct sirfsoc_dma *sdma = data; | |
143 | struct sirfsoc_dma_chan *schan; | |
144 | struct sirfsoc_dma_desc *sdesc = NULL; | |
145 | u32 is; | |
146 | int ch; | |
147 | ||
148 | is = readl(sdma->base + SIRFSOC_DMA_CH_INT); | |
149 | while ((ch = fls(is) - 1) >= 0) { | |
150 | is &= ~(1 << ch); | |
151 | writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT); | |
152 | schan = &sdma->channels[ch]; | |
153 | ||
154 | spin_lock(&schan->lock); | |
155 | ||
156 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
157 | node); | |
158 | if (!sdesc->cyclic) { | |
159 | /* Execute queued descriptors */ | |
160 | list_splice_tail_init(&schan->active, &schan->completed); | |
161 | if (!list_empty(&schan->queued)) | |
162 | sirfsoc_dma_execute(schan); | |
163 | } else | |
164 | schan->happened_cyclic++; | |
165 | ||
166 | spin_unlock(&schan->lock); | |
167 | } | |
168 | ||
169 | /* Schedule tasklet */ | |
170 | tasklet_schedule(&sdma->tasklet); | |
171 | ||
172 | return IRQ_HANDLED; | |
173 | } | |
174 | ||
175 | /* process completed descriptors */ | |
176 | static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma) | |
177 | { | |
178 | dma_cookie_t last_cookie = 0; | |
179 | struct sirfsoc_dma_chan *schan; | |
180 | struct sirfsoc_dma_desc *sdesc; | |
181 | struct dma_async_tx_descriptor *desc; | |
182 | unsigned long flags; | |
183 | unsigned long happened_cyclic; | |
184 | LIST_HEAD(list); | |
185 | int i; | |
186 | ||
187 | for (i = 0; i < sdma->dma.chancnt; i++) { | |
188 | schan = &sdma->channels[i]; | |
189 | ||
190 | /* Get all completed descriptors */ | |
191 | spin_lock_irqsave(&schan->lock, flags); | |
192 | if (!list_empty(&schan->completed)) { | |
193 | list_splice_tail_init(&schan->completed, &list); | |
194 | spin_unlock_irqrestore(&schan->lock, flags); | |
195 | ||
196 | /* Execute callbacks and run dependencies */ | |
197 | list_for_each_entry(sdesc, &list, node) { | |
198 | desc = &sdesc->desc; | |
199 | ||
200 | if (desc->callback) | |
201 | desc->callback(desc->callback_param); | |
202 | ||
203 | last_cookie = desc->cookie; | |
204 | dma_run_dependencies(desc); | |
205 | } | |
206 | ||
207 | /* Free descriptors */ | |
208 | spin_lock_irqsave(&schan->lock, flags); | |
209 | list_splice_tail_init(&list, &schan->free); | |
4d4e58de | 210 | schan->chan.completed_cookie = last_cookie; |
ca21a146 RY |
211 | spin_unlock_irqrestore(&schan->lock, flags); |
212 | } else { | |
213 | /* for cyclic channel, desc is always in active list */ | |
214 | sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc, | |
215 | node); | |
216 | ||
217 | if (!sdesc || (sdesc && !sdesc->cyclic)) { | |
218 | /* without active cyclic DMA */ | |
219 | spin_unlock_irqrestore(&schan->lock, flags); | |
220 | continue; | |
221 | } | |
222 | ||
223 | /* cyclic DMA */ | |
224 | happened_cyclic = schan->happened_cyclic; | |
225 | spin_unlock_irqrestore(&schan->lock, flags); | |
226 | ||
227 | desc = &sdesc->desc; | |
228 | while (happened_cyclic != schan->completed_cyclic) { | |
229 | if (desc->callback) | |
230 | desc->callback(desc->callback_param); | |
231 | schan->completed_cyclic++; | |
232 | } | |
233 | } | |
234 | } | |
235 | } | |
236 | ||
237 | /* DMA Tasklet */ | |
238 | static void sirfsoc_dma_tasklet(unsigned long data) | |
239 | { | |
240 | struct sirfsoc_dma *sdma = (void *)data; | |
241 | ||
242 | sirfsoc_dma_process_completed(sdma); | |
243 | } | |
244 | ||
245 | /* Submit descriptor to hardware */ | |
246 | static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
247 | { | |
248 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan); | |
249 | struct sirfsoc_dma_desc *sdesc; | |
250 | unsigned long flags; | |
251 | dma_cookie_t cookie; | |
252 | ||
253 | sdesc = container_of(txd, struct sirfsoc_dma_desc, desc); | |
254 | ||
255 | spin_lock_irqsave(&schan->lock, flags); | |
256 | ||
257 | /* Move descriptor to queue */ | |
258 | list_move_tail(&sdesc->node, &schan->queued); | |
259 | ||
884485e1 | 260 | cookie = dma_cookie_assign(txd); |
ca21a146 RY |
261 | |
262 | spin_unlock_irqrestore(&schan->lock, flags); | |
263 | ||
264 | return cookie; | |
265 | } | |
266 | ||
267 | static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan, | |
268 | struct dma_slave_config *config) | |
269 | { | |
270 | unsigned long flags; | |
271 | ||
272 | if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) || | |
273 | (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES)) | |
274 | return -EINVAL; | |
275 | ||
276 | spin_lock_irqsave(&schan->lock, flags); | |
277 | schan->mode = (config->src_maxburst == 4 ? 1 : 0); | |
278 | spin_unlock_irqrestore(&schan->lock, flags); | |
279 | ||
280 | return 0; | |
281 | } | |
282 | ||
283 | static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan) | |
284 | { | |
285 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan); | |
286 | int cid = schan->chan.chan_id; | |
287 | unsigned long flags; | |
288 | ||
289 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) & | |
290 | ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN); | |
291 | writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID); | |
292 | ||
293 | writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL) | |
294 | & ~((1 << cid) | 1 << (cid + 16)), | |
295 | sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL); | |
296 | ||
297 | spin_lock_irqsave(&schan->lock, flags); | |
298 | list_splice_tail_init(&schan->active, &schan->free); | |
299 | list_splice_tail_init(&schan->queued, &schan->free); | |
300 | spin_unlock_irqrestore(&schan->lock, flags); | |
301 | ||
302 | return 0; | |
303 | } | |
304 | ||
305 | static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, | |
306 | unsigned long arg) | |
307 | { | |
308 | struct dma_slave_config *config; | |
309 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
310 | ||
311 | switch (cmd) { | |
312 | case DMA_TERMINATE_ALL: | |
313 | return sirfsoc_dma_terminate_all(schan); | |
314 | case DMA_SLAVE_CONFIG: | |
315 | config = (struct dma_slave_config *)arg; | |
316 | return sirfsoc_dma_slave_config(schan, config); | |
317 | ||
318 | default: | |
319 | break; | |
320 | } | |
321 | ||
322 | return -ENOSYS; | |
323 | } | |
324 | ||
325 | /* Alloc channel resources */ | |
326 | static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan) | |
327 | { | |
328 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
329 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
330 | struct sirfsoc_dma_desc *sdesc; | |
331 | unsigned long flags; | |
332 | LIST_HEAD(descs); | |
333 | int i; | |
334 | ||
335 | /* Alloc descriptors for this channel */ | |
336 | for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) { | |
337 | sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL); | |
338 | if (!sdesc) { | |
339 | dev_notice(sdma->dma.dev, "Memory allocation error. " | |
340 | "Allocated only %u descriptors\n", i); | |
341 | break; | |
342 | } | |
343 | ||
344 | dma_async_tx_descriptor_init(&sdesc->desc, chan); | |
345 | sdesc->desc.flags = DMA_CTRL_ACK; | |
346 | sdesc->desc.tx_submit = sirfsoc_dma_tx_submit; | |
347 | ||
348 | list_add_tail(&sdesc->node, &descs); | |
349 | } | |
350 | ||
351 | /* Return error only if no descriptors were allocated */ | |
352 | if (i == 0) | |
353 | return -ENOMEM; | |
354 | ||
355 | spin_lock_irqsave(&schan->lock, flags); | |
356 | ||
357 | list_splice_tail_init(&descs, &schan->free); | |
358 | spin_unlock_irqrestore(&schan->lock, flags); | |
359 | ||
360 | return i; | |
361 | } | |
362 | ||
363 | /* Free channel resources */ | |
364 | static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan) | |
365 | { | |
366 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
367 | struct sirfsoc_dma_desc *sdesc, *tmp; | |
368 | unsigned long flags; | |
369 | LIST_HEAD(descs); | |
370 | ||
371 | spin_lock_irqsave(&schan->lock, flags); | |
372 | ||
373 | /* Channel must be idle */ | |
374 | BUG_ON(!list_empty(&schan->prepared)); | |
375 | BUG_ON(!list_empty(&schan->queued)); | |
376 | BUG_ON(!list_empty(&schan->active)); | |
377 | BUG_ON(!list_empty(&schan->completed)); | |
378 | ||
379 | /* Move data */ | |
380 | list_splice_tail_init(&schan->free, &descs); | |
381 | ||
382 | spin_unlock_irqrestore(&schan->lock, flags); | |
383 | ||
384 | /* Free descriptors */ | |
385 | list_for_each_entry_safe(sdesc, tmp, &descs, node) | |
386 | kfree(sdesc); | |
387 | } | |
388 | ||
389 | /* Send pending descriptor to hardware */ | |
390 | static void sirfsoc_dma_issue_pending(struct dma_chan *chan) | |
391 | { | |
392 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
393 | unsigned long flags; | |
394 | ||
395 | spin_lock_irqsave(&schan->lock, flags); | |
396 | ||
397 | if (list_empty(&schan->active) && !list_empty(&schan->queued)) | |
398 | sirfsoc_dma_execute(schan); | |
399 | ||
400 | spin_unlock_irqrestore(&schan->lock, flags); | |
401 | } | |
402 | ||
403 | /* Check request completion status */ | |
404 | static enum dma_status | |
405 | sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
406 | struct dma_tx_state *txstate) | |
407 | { | |
408 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
409 | unsigned long flags; | |
410 | dma_cookie_t last_used; | |
411 | dma_cookie_t last_complete; | |
412 | ||
413 | spin_lock_irqsave(&schan->lock, flags); | |
414 | last_used = schan->chan.cookie; | |
4d4e58de | 415 | last_complete = schan->chan.completed_cookie; |
ca21a146 RY |
416 | spin_unlock_irqrestore(&schan->lock, flags); |
417 | ||
418 | dma_set_tx_state(txstate, last_complete, last_used, 0); | |
419 | return dma_async_is_complete(cookie, last_complete, last_used); | |
420 | } | |
421 | ||
422 | static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved( | |
423 | struct dma_chan *chan, struct dma_interleaved_template *xt, | |
424 | unsigned long flags) | |
425 | { | |
426 | struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan); | |
427 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
428 | struct sirfsoc_dma_desc *sdesc = NULL; | |
429 | unsigned long iflags; | |
430 | int ret; | |
431 | ||
432 | if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) { | |
433 | ret = -EINVAL; | |
434 | goto err_dir; | |
435 | } | |
436 | ||
437 | /* Get free descriptor */ | |
438 | spin_lock_irqsave(&schan->lock, iflags); | |
439 | if (!list_empty(&schan->free)) { | |
440 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
441 | node); | |
442 | list_del(&sdesc->node); | |
443 | } | |
444 | spin_unlock_irqrestore(&schan->lock, iflags); | |
445 | ||
446 | if (!sdesc) { | |
447 | /* try to free completed descriptors */ | |
448 | sirfsoc_dma_process_completed(sdma); | |
449 | ret = 0; | |
450 | goto no_desc; | |
451 | } | |
452 | ||
453 | /* Place descriptor in prepared list */ | |
454 | spin_lock_irqsave(&schan->lock, iflags); | |
455 | ||
456 | /* | |
457 | * Number of chunks in a frame can only be 1 for prima2 | |
458 | * and ylen (number of frame - 1) must be at least 0 | |
459 | */ | |
460 | if ((xt->frame_size == 1) && (xt->numf > 0)) { | |
461 | sdesc->cyclic = 0; | |
462 | sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN; | |
463 | sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) / | |
464 | SIRFSOC_DMA_WORD_LEN; | |
465 | sdesc->ylen = xt->numf - 1; | |
466 | if (xt->dir == DMA_MEM_TO_DEV) { | |
467 | sdesc->addr = xt->src_start; | |
468 | sdesc->dir = 1; | |
469 | } else { | |
470 | sdesc->addr = xt->dst_start; | |
471 | sdesc->dir = 0; | |
472 | } | |
473 | ||
474 | list_add_tail(&sdesc->node, &schan->prepared); | |
475 | } else { | |
476 | pr_err("sirfsoc DMA Invalid xfer\n"); | |
477 | ret = -EINVAL; | |
478 | goto err_xfer; | |
479 | } | |
480 | spin_unlock_irqrestore(&schan->lock, iflags); | |
481 | ||
482 | return &sdesc->desc; | |
483 | err_xfer: | |
484 | spin_unlock_irqrestore(&schan->lock, iflags); | |
485 | no_desc: | |
486 | err_dir: | |
487 | return ERR_PTR(ret); | |
488 | } | |
489 | ||
490 | static struct dma_async_tx_descriptor * | |
491 | sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr, | |
492 | size_t buf_len, size_t period_len, | |
493 | enum dma_transfer_direction direction) | |
494 | { | |
495 | struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan); | |
496 | struct sirfsoc_dma_desc *sdesc = NULL; | |
497 | unsigned long iflags; | |
498 | ||
499 | /* | |
500 | * we only support cycle transfer with 2 period | |
501 | * If the X-length is set to 0, it would be the loop mode. | |
502 | * The DMA address keeps increasing until reaching the end of a loop | |
503 | * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then | |
504 | * the DMA address goes back to the beginning of this area. | |
505 | * In loop mode, the DMA data region is divided into two parts, BUFA | |
506 | * and BUFB. DMA controller generates interrupts twice in each loop: | |
507 | * when the DMA address reaches the end of BUFA or the end of the | |
508 | * BUFB | |
509 | */ | |
510 | if (buf_len != 2 * period_len) | |
511 | return ERR_PTR(-EINVAL); | |
512 | ||
513 | /* Get free descriptor */ | |
514 | spin_lock_irqsave(&schan->lock, iflags); | |
515 | if (!list_empty(&schan->free)) { | |
516 | sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc, | |
517 | node); | |
518 | list_del(&sdesc->node); | |
519 | } | |
520 | spin_unlock_irqrestore(&schan->lock, iflags); | |
521 | ||
522 | if (!sdesc) | |
523 | return 0; | |
524 | ||
525 | /* Place descriptor in prepared list */ | |
526 | spin_lock_irqsave(&schan->lock, iflags); | |
527 | sdesc->addr = addr; | |
528 | sdesc->cyclic = 1; | |
529 | sdesc->xlen = 0; | |
530 | sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1; | |
531 | sdesc->width = 1; | |
532 | list_add_tail(&sdesc->node, &schan->prepared); | |
533 | spin_unlock_irqrestore(&schan->lock, iflags); | |
534 | ||
535 | return &sdesc->desc; | |
536 | } | |
537 | ||
538 | /* | |
539 | * The DMA controller consists of 16 independent DMA channels. | |
540 | * Each channel is allocated to a different function | |
541 | */ | |
542 | bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id) | |
543 | { | |
544 | unsigned int ch_nr = (unsigned int) chan_id; | |
545 | ||
546 | if (ch_nr == chan->chan_id + | |
547 | chan->device->dev_id * SIRFSOC_DMA_CHANNELS) | |
548 | return true; | |
549 | ||
550 | return false; | |
551 | } | |
552 | EXPORT_SYMBOL(sirfsoc_dma_filter_id); | |
553 | ||
554 | static int __devinit sirfsoc_dma_probe(struct platform_device *op) | |
555 | { | |
556 | struct device_node *dn = op->dev.of_node; | |
557 | struct device *dev = &op->dev; | |
558 | struct dma_device *dma; | |
559 | struct sirfsoc_dma *sdma; | |
560 | struct sirfsoc_dma_chan *schan; | |
561 | struct resource res; | |
562 | ulong regs_start, regs_size; | |
563 | u32 id; | |
564 | int ret, i; | |
565 | ||
566 | sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL); | |
567 | if (!sdma) { | |
568 | dev_err(dev, "Memory exhausted!\n"); | |
569 | return -ENOMEM; | |
570 | } | |
571 | ||
572 | if (of_property_read_u32(dn, "cell-index", &id)) { | |
573 | dev_err(dev, "Fail to get DMAC index\n"); | |
574 | ret = -ENODEV; | |
575 | goto free_mem; | |
576 | } | |
577 | ||
578 | sdma->irq = irq_of_parse_and_map(dn, 0); | |
579 | if (sdma->irq == NO_IRQ) { | |
580 | dev_err(dev, "Error mapping IRQ!\n"); | |
581 | ret = -EINVAL; | |
582 | goto free_mem; | |
583 | } | |
584 | ||
585 | ret = of_address_to_resource(dn, 0, &res); | |
586 | if (ret) { | |
587 | dev_err(dev, "Error parsing memory region!\n"); | |
588 | goto free_mem; | |
589 | } | |
590 | ||
591 | regs_start = res.start; | |
592 | regs_size = resource_size(&res); | |
593 | ||
594 | sdma->base = devm_ioremap(dev, regs_start, regs_size); | |
595 | if (!sdma->base) { | |
596 | dev_err(dev, "Error mapping memory region!\n"); | |
597 | ret = -ENOMEM; | |
598 | goto irq_dispose; | |
599 | } | |
600 | ||
601 | ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME, | |
602 | sdma); | |
603 | if (ret) { | |
604 | dev_err(dev, "Error requesting IRQ!\n"); | |
605 | ret = -EINVAL; | |
606 | goto unmap_mem; | |
607 | } | |
608 | ||
609 | dma = &sdma->dma; | |
610 | dma->dev = dev; | |
611 | dma->chancnt = SIRFSOC_DMA_CHANNELS; | |
612 | ||
613 | dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; | |
614 | dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; | |
615 | dma->device_issue_pending = sirfsoc_dma_issue_pending; | |
616 | dma->device_control = sirfsoc_dma_control; | |
617 | dma->device_tx_status = sirfsoc_dma_tx_status; | |
618 | dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved; | |
619 | dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic; | |
620 | ||
621 | INIT_LIST_HEAD(&dma->channels); | |
622 | dma_cap_set(DMA_SLAVE, dma->cap_mask); | |
623 | dma_cap_set(DMA_CYCLIC, dma->cap_mask); | |
624 | dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); | |
625 | dma_cap_set(DMA_PRIVATE, dma->cap_mask); | |
626 | ||
627 | for (i = 0; i < dma->chancnt; i++) { | |
628 | schan = &sdma->channels[i]; | |
629 | ||
630 | schan->chan.device = dma; | |
631 | schan->chan.cookie = 1; | |
4d4e58de | 632 | schan->chan.completed_cookie = schan->chan.cookie; |
ca21a146 RY |
633 | |
634 | INIT_LIST_HEAD(&schan->free); | |
635 | INIT_LIST_HEAD(&schan->prepared); | |
636 | INIT_LIST_HEAD(&schan->queued); | |
637 | INIT_LIST_HEAD(&schan->active); | |
638 | INIT_LIST_HEAD(&schan->completed); | |
639 | ||
640 | spin_lock_init(&schan->lock); | |
641 | list_add_tail(&schan->chan.device_node, &dma->channels); | |
642 | } | |
643 | ||
644 | tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma); | |
645 | ||
646 | /* Register DMA engine */ | |
647 | dev_set_drvdata(dev, sdma); | |
648 | ret = dma_async_device_register(dma); | |
649 | if (ret) | |
650 | goto free_irq; | |
651 | ||
652 | dev_info(dev, "initialized SIRFSOC DMAC driver\n"); | |
653 | ||
654 | return 0; | |
655 | ||
656 | free_irq: | |
657 | devm_free_irq(dev, sdma->irq, sdma); | |
658 | irq_dispose: | |
659 | irq_dispose_mapping(sdma->irq); | |
660 | unmap_mem: | |
661 | iounmap(sdma->base); | |
662 | free_mem: | |
663 | devm_kfree(dev, sdma); | |
664 | return ret; | |
665 | } | |
666 | ||
667 | static int __devexit sirfsoc_dma_remove(struct platform_device *op) | |
668 | { | |
669 | struct device *dev = &op->dev; | |
670 | struct sirfsoc_dma *sdma = dev_get_drvdata(dev); | |
671 | ||
672 | dma_async_device_unregister(&sdma->dma); | |
673 | devm_free_irq(dev, sdma->irq, sdma); | |
674 | irq_dispose_mapping(sdma->irq); | |
675 | iounmap(sdma->base); | |
676 | devm_kfree(dev, sdma); | |
677 | return 0; | |
678 | } | |
679 | ||
680 | static struct of_device_id sirfsoc_dma_match[] = { | |
681 | { .compatible = "sirf,prima2-dmac", }, | |
682 | {}, | |
683 | }; | |
684 | ||
685 | static struct platform_driver sirfsoc_dma_driver = { | |
686 | .probe = sirfsoc_dma_probe, | |
687 | .remove = __devexit_p(sirfsoc_dma_remove), | |
688 | .driver = { | |
689 | .name = DRV_NAME, | |
690 | .owner = THIS_MODULE, | |
691 | .of_match_table = sirfsoc_dma_match, | |
692 | }, | |
693 | }; | |
694 | ||
c94e9105 | 695 | module_platform_driver(sirfsoc_dma_driver); |
ca21a146 RY |
696 | |
697 | MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, " | |
698 | "Barry Song <baohua.song@csr.com>"); | |
699 | MODULE_DESCRIPTION("SIRFSOC DMA control driver"); | |
700 | MODULE_LICENSE("GPL v2"); |