]>
Commit | Line | Data |
---|---|---|
173acc7c ZW |
1 | /* |
2 | * Freescale MPC85xx, MPC83xx DMA Engine support | |
3 | * | |
4 | * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. | |
5 | * | |
6 | * Author: | |
7 | * Zhang Wei <wei.zhang@freescale.com>, Jul 2007 | |
8 | * Ebony Zhu <ebony.zhu@freescale.com>, May 2007 | |
9 | * | |
10 | * Description: | |
11 | * DMA engine driver for Freescale MPC8540 DMA controller, which is | |
12 | * also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc. | |
13 | * The support for MPC8349 DMA contorller is also added. | |
14 | * | |
15 | * This is free software; you can redistribute it and/or modify | |
16 | * it under the terms of the GNU General Public License as published by | |
17 | * the Free Software Foundation; either version 2 of the License, or | |
18 | * (at your option) any later version. | |
19 | * | |
20 | */ | |
21 | ||
22 | #include <linux/init.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/pci.h> | |
25 | #include <linux/interrupt.h> | |
26 | #include <linux/dmaengine.h> | |
27 | #include <linux/delay.h> | |
28 | #include <linux/dma-mapping.h> | |
29 | #include <linux/dmapool.h> | |
30 | #include <linux/of_platform.h> | |
31 | ||
32 | #include "fsldma.h" | |
33 | ||
34 | static void dma_init(struct fsl_dma_chan *fsl_chan) | |
35 | { | |
36 | /* Reset the channel */ | |
37 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, 0, 32); | |
38 | ||
39 | switch (fsl_chan->feature & FSL_DMA_IP_MASK) { | |
40 | case FSL_DMA_IP_85XX: | |
41 | /* Set the channel to below modes: | |
42 | * EIE - Error interrupt enable | |
43 | * EOSIE - End of segments interrupt enable (basic mode) | |
44 | * EOLNIE - End of links interrupt enable | |
45 | */ | |
46 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EIE | |
47 | | FSL_DMA_MR_EOLNIE | FSL_DMA_MR_EOSIE, 32); | |
48 | break; | |
49 | case FSL_DMA_IP_83XX: | |
50 | /* Set the channel to below modes: | |
51 | * EOTIE - End-of-transfer interrupt enable | |
52 | */ | |
53 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, FSL_DMA_MR_EOTIE, | |
54 | 32); | |
55 | break; | |
56 | } | |
57 | ||
58 | } | |
59 | ||
56822843 | 60 | static void set_sr(struct fsl_dma_chan *fsl_chan, u32 val) |
173acc7c ZW |
61 | { |
62 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->sr, val, 32); | |
63 | } | |
64 | ||
56822843 | 65 | static u32 get_sr(struct fsl_dma_chan *fsl_chan) |
173acc7c ZW |
66 | { |
67 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->sr, 32); | |
68 | } | |
69 | ||
70 | static void set_desc_cnt(struct fsl_dma_chan *fsl_chan, | |
71 | struct fsl_dma_ld_hw *hw, u32 count) | |
72 | { | |
73 | hw->count = CPU_TO_DMA(fsl_chan, count, 32); | |
74 | } | |
75 | ||
76 | static void set_desc_src(struct fsl_dma_chan *fsl_chan, | |
77 | struct fsl_dma_ld_hw *hw, dma_addr_t src) | |
78 | { | |
79 | u64 snoop_bits; | |
80 | ||
81 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
82 | ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0; | |
83 | hw->src_addr = CPU_TO_DMA(fsl_chan, snoop_bits | src, 64); | |
84 | } | |
85 | ||
86 | static void set_desc_dest(struct fsl_dma_chan *fsl_chan, | |
87 | struct fsl_dma_ld_hw *hw, dma_addr_t dest) | |
88 | { | |
89 | u64 snoop_bits; | |
90 | ||
91 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) | |
92 | ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0; | |
93 | hw->dst_addr = CPU_TO_DMA(fsl_chan, snoop_bits | dest, 64); | |
94 | } | |
95 | ||
96 | static void set_desc_next(struct fsl_dma_chan *fsl_chan, | |
97 | struct fsl_dma_ld_hw *hw, dma_addr_t next) | |
98 | { | |
99 | u64 snoop_bits; | |
100 | ||
101 | snoop_bits = ((fsl_chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX) | |
102 | ? FSL_DMA_SNEN : 0; | |
103 | hw->next_ln_addr = CPU_TO_DMA(fsl_chan, snoop_bits | next, 64); | |
104 | } | |
105 | ||
106 | static void set_cdar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | |
107 | { | |
108 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->cdar, addr | FSL_DMA_SNEN, 64); | |
109 | } | |
110 | ||
111 | static dma_addr_t get_cdar(struct fsl_dma_chan *fsl_chan) | |
112 | { | |
113 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->cdar, 64) & ~FSL_DMA_SNEN; | |
114 | } | |
115 | ||
116 | static void set_ndar(struct fsl_dma_chan *fsl_chan, dma_addr_t addr) | |
117 | { | |
118 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->ndar, addr, 64); | |
119 | } | |
120 | ||
121 | static dma_addr_t get_ndar(struct fsl_dma_chan *fsl_chan) | |
122 | { | |
123 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->ndar, 64); | |
124 | } | |
125 | ||
f79abb62 ZW |
126 | static u32 get_bcr(struct fsl_dma_chan *fsl_chan) |
127 | { | |
128 | return DMA_IN(fsl_chan, &fsl_chan->reg_base->bcr, 32); | |
129 | } | |
130 | ||
173acc7c ZW |
131 | static int dma_is_idle(struct fsl_dma_chan *fsl_chan) |
132 | { | |
133 | u32 sr = get_sr(fsl_chan); | |
134 | return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH); | |
135 | } | |
136 | ||
137 | static void dma_start(struct fsl_dma_chan *fsl_chan) | |
138 | { | |
139 | u32 mr_set = 0;; | |
140 | ||
141 | if (fsl_chan->feature & FSL_DMA_CHAN_PAUSE_EXT) { | |
142 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->bcr, 0, 32); | |
143 | mr_set |= FSL_DMA_MR_EMP_EN; | |
144 | } else | |
145 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
146 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | |
147 | & ~FSL_DMA_MR_EMP_EN, 32); | |
148 | ||
149 | if (fsl_chan->feature & FSL_DMA_CHAN_START_EXT) | |
150 | mr_set |= FSL_DMA_MR_EMS_EN; | |
151 | else | |
152 | mr_set |= FSL_DMA_MR_CS; | |
153 | ||
154 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
155 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | |
156 | | mr_set, 32); | |
157 | } | |
158 | ||
159 | static void dma_halt(struct fsl_dma_chan *fsl_chan) | |
160 | { | |
161 | int i = 0; | |
162 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
163 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | FSL_DMA_MR_CA, | |
164 | 32); | |
165 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
166 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & ~(FSL_DMA_MR_CS | |
167 | | FSL_DMA_MR_EMS_EN | FSL_DMA_MR_CA), 32); | |
168 | ||
169 | while (!dma_is_idle(fsl_chan) && (i++ < 100)) | |
170 | udelay(10); | |
171 | if (i >= 100 && !dma_is_idle(fsl_chan)) | |
172 | dev_err(fsl_chan->dev, "DMA halt timeout!\n"); | |
173 | } | |
174 | ||
175 | static void set_ld_eol(struct fsl_dma_chan *fsl_chan, | |
176 | struct fsl_desc_sw *desc) | |
177 | { | |
178 | desc->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | |
179 | DMA_TO_CPU(fsl_chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL, | |
180 | 64); | |
181 | } | |
182 | ||
183 | static void append_ld_queue(struct fsl_dma_chan *fsl_chan, | |
184 | struct fsl_desc_sw *new_desc) | |
185 | { | |
186 | struct fsl_desc_sw *queue_tail = to_fsl_desc(fsl_chan->ld_queue.prev); | |
187 | ||
188 | if (list_empty(&fsl_chan->ld_queue)) | |
189 | return; | |
190 | ||
191 | /* Link to the new descriptor physical address and | |
192 | * Enable End-of-segment interrupt for | |
193 | * the last link descriptor. | |
194 | * (the previous node's next link descriptor) | |
195 | * | |
196 | * For FSL_DMA_IP_83xx, the snoop enable bit need be set. | |
197 | */ | |
198 | queue_tail->hw.next_ln_addr = CPU_TO_DMA(fsl_chan, | |
199 | new_desc->async_tx.phys | FSL_DMA_EOSIE | | |
200 | (((fsl_chan->feature & FSL_DMA_IP_MASK) | |
201 | == FSL_DMA_IP_83XX) ? FSL_DMA_SNEN : 0), 64); | |
202 | } | |
203 | ||
204 | /** | |
205 | * fsl_chan_set_src_loop_size - Set source address hold transfer size | |
206 | * @fsl_chan : Freescale DMA channel | |
207 | * @size : Address loop size, 0 for disable loop | |
208 | * | |
209 | * The set source address hold transfer size. The source | |
210 | * address hold or loop transfer size is when the DMA transfer | |
211 | * data from source address (SA), if the loop size is 4, the DMA will | |
212 | * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA, | |
213 | * SA + 1 ... and so on. | |
214 | */ | |
215 | static void fsl_chan_set_src_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |
216 | { | |
217 | switch (size) { | |
218 | case 0: | |
219 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
220 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | |
221 | (~FSL_DMA_MR_SAHE), 32); | |
222 | break; | |
223 | case 1: | |
224 | case 2: | |
225 | case 4: | |
226 | case 8: | |
227 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
228 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | |
229 | FSL_DMA_MR_SAHE | (__ilog2(size) << 14), | |
230 | 32); | |
231 | break; | |
232 | } | |
233 | } | |
234 | ||
235 | /** | |
236 | * fsl_chan_set_dest_loop_size - Set destination address hold transfer size | |
237 | * @fsl_chan : Freescale DMA channel | |
238 | * @size : Address loop size, 0 for disable loop | |
239 | * | |
240 | * The set destination address hold transfer size. The destination | |
241 | * address hold or loop transfer size is when the DMA transfer | |
242 | * data to destination address (TA), if the loop size is 4, the DMA will | |
243 | * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA, | |
244 | * TA + 1 ... and so on. | |
245 | */ | |
246 | static void fsl_chan_set_dest_loop_size(struct fsl_dma_chan *fsl_chan, int size) | |
247 | { | |
248 | switch (size) { | |
249 | case 0: | |
250 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
251 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) & | |
252 | (~FSL_DMA_MR_DAHE), 32); | |
253 | break; | |
254 | case 1: | |
255 | case 2: | |
256 | case 4: | |
257 | case 8: | |
258 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
259 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | | |
260 | FSL_DMA_MR_DAHE | (__ilog2(size) << 16), | |
261 | 32); | |
262 | break; | |
263 | } | |
264 | } | |
265 | ||
266 | /** | |
267 | * fsl_chan_toggle_ext_pause - Toggle channel external pause status | |
268 | * @fsl_chan : Freescale DMA channel | |
269 | * @size : Pause control size, 0 for disable external pause control. | |
270 | * The maximum is 1024. | |
271 | * | |
272 | * The Freescale DMA channel can be controlled by the external | |
273 | * signal DREQ#. The pause control size is how many bytes are allowed | |
274 | * to transfer before pausing the channel, after which a new assertion | |
275 | * of DREQ# resumes channel operation. | |
276 | */ | |
277 | static void fsl_chan_toggle_ext_pause(struct fsl_dma_chan *fsl_chan, int size) | |
278 | { | |
279 | if (size > 1024) | |
280 | return; | |
281 | ||
282 | if (size) { | |
283 | DMA_OUT(fsl_chan, &fsl_chan->reg_base->mr, | |
284 | DMA_IN(fsl_chan, &fsl_chan->reg_base->mr, 32) | |
285 | | ((__ilog2(size) << 24) & 0x0f000000), | |
286 | 32); | |
287 | fsl_chan->feature |= FSL_DMA_CHAN_PAUSE_EXT; | |
288 | } else | |
289 | fsl_chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT; | |
290 | } | |
291 | ||
292 | /** | |
293 | * fsl_chan_toggle_ext_start - Toggle channel external start status | |
294 | * @fsl_chan : Freescale DMA channel | |
295 | * @enable : 0 is disabled, 1 is enabled. | |
296 | * | |
297 | * If enable the external start, the channel can be started by an | |
298 | * external DMA start pin. So the dma_start() does not start the | |
299 | * transfer immediately. The DMA channel will wait for the | |
300 | * control pin asserted. | |
301 | */ | |
302 | static void fsl_chan_toggle_ext_start(struct fsl_dma_chan *fsl_chan, int enable) | |
303 | { | |
304 | if (enable) | |
305 | fsl_chan->feature |= FSL_DMA_CHAN_START_EXT; | |
306 | else | |
307 | fsl_chan->feature &= ~FSL_DMA_CHAN_START_EXT; | |
308 | } | |
309 | ||
310 | static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx) | |
311 | { | |
312 | struct fsl_desc_sw *desc = tx_to_fsl_desc(tx); | |
313 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(tx->chan); | |
314 | unsigned long flags; | |
315 | dma_cookie_t cookie; | |
316 | ||
317 | /* cookie increment and adding to ld_queue must be atomic */ | |
318 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
319 | ||
320 | cookie = fsl_chan->common.cookie; | |
321 | cookie++; | |
322 | if (cookie < 0) | |
323 | cookie = 1; | |
324 | desc->async_tx.cookie = cookie; | |
325 | fsl_chan->common.cookie = desc->async_tx.cookie; | |
326 | ||
327 | append_ld_queue(fsl_chan, desc); | |
328 | list_splice_init(&desc->async_tx.tx_list, fsl_chan->ld_queue.prev); | |
329 | ||
330 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
331 | ||
332 | return cookie; | |
333 | } | |
334 | ||
335 | /** | |
336 | * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool. | |
337 | * @fsl_chan : Freescale DMA channel | |
338 | * | |
339 | * Return - The descriptor allocated. NULL for failed. | |
340 | */ | |
341 | static struct fsl_desc_sw *fsl_dma_alloc_descriptor( | |
342 | struct fsl_dma_chan *fsl_chan) | |
343 | { | |
344 | dma_addr_t pdesc; | |
345 | struct fsl_desc_sw *desc_sw; | |
346 | ||
347 | desc_sw = dma_pool_alloc(fsl_chan->desc_pool, GFP_ATOMIC, &pdesc); | |
348 | if (desc_sw) { | |
349 | memset(desc_sw, 0, sizeof(struct fsl_desc_sw)); | |
350 | dma_async_tx_descriptor_init(&desc_sw->async_tx, | |
351 | &fsl_chan->common); | |
352 | desc_sw->async_tx.tx_submit = fsl_dma_tx_submit; | |
353 | INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); | |
354 | desc_sw->async_tx.phys = pdesc; | |
355 | } | |
356 | ||
357 | return desc_sw; | |
358 | } | |
359 | ||
360 | ||
361 | /** | |
362 | * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel. | |
363 | * @fsl_chan : Freescale DMA channel | |
364 | * | |
365 | * This function will create a dma pool for descriptor allocation. | |
366 | * | |
367 | * Return - The number of descriptors allocated. | |
368 | */ | |
aa1e6f1a | 369 | static int fsl_dma_alloc_chan_resources(struct dma_chan *chan) |
173acc7c ZW |
370 | { |
371 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | |
77cd62e8 TT |
372 | |
373 | /* Has this channel already been allocated? */ | |
374 | if (fsl_chan->desc_pool) | |
375 | return 1; | |
173acc7c ZW |
376 | |
377 | /* We need the descriptor to be aligned to 32bytes | |
378 | * for meeting FSL DMA specification requirement. | |
379 | */ | |
380 | fsl_chan->desc_pool = dma_pool_create("fsl_dma_engine_desc_pool", | |
381 | fsl_chan->dev, sizeof(struct fsl_desc_sw), | |
382 | 32, 0); | |
383 | if (!fsl_chan->desc_pool) { | |
384 | dev_err(fsl_chan->dev, "No memory for channel %d " | |
385 | "descriptor dma pool.\n", fsl_chan->id); | |
386 | return 0; | |
387 | } | |
388 | ||
389 | return 1; | |
390 | } | |
391 | ||
392 | /** | |
393 | * fsl_dma_free_chan_resources - Free all resources of the channel. | |
394 | * @fsl_chan : Freescale DMA channel | |
395 | */ | |
396 | static void fsl_dma_free_chan_resources(struct dma_chan *chan) | |
397 | { | |
398 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | |
399 | struct fsl_desc_sw *desc, *_desc; | |
400 | unsigned long flags; | |
401 | ||
402 | dev_dbg(fsl_chan->dev, "Free all channel resources.\n"); | |
403 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
404 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | |
405 | #ifdef FSL_DMA_LD_DEBUG | |
406 | dev_dbg(fsl_chan->dev, | |
407 | "LD %p will be released.\n", desc); | |
408 | #endif | |
409 | list_del(&desc->node); | |
410 | /* free link descriptor */ | |
411 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | |
412 | } | |
413 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
414 | dma_pool_destroy(fsl_chan->desc_pool); | |
77cd62e8 TT |
415 | |
416 | fsl_chan->desc_pool = NULL; | |
173acc7c ZW |
417 | } |
418 | ||
2187c269 | 419 | static struct dma_async_tx_descriptor * |
636bdeaa | 420 | fsl_dma_prep_interrupt(struct dma_chan *chan, unsigned long flags) |
2187c269 ZW |
421 | { |
422 | struct fsl_dma_chan *fsl_chan; | |
423 | struct fsl_desc_sw *new; | |
424 | ||
425 | if (!chan) | |
426 | return NULL; | |
427 | ||
428 | fsl_chan = to_fsl_chan(chan); | |
429 | ||
430 | new = fsl_dma_alloc_descriptor(fsl_chan); | |
431 | if (!new) { | |
432 | dev_err(fsl_chan->dev, "No free memory for link descriptor\n"); | |
433 | return NULL; | |
434 | } | |
435 | ||
436 | new->async_tx.cookie = -EBUSY; | |
636bdeaa | 437 | new->async_tx.flags = flags; |
2187c269 | 438 | |
f79abb62 ZW |
439 | /* Insert the link descriptor to the LD ring */ |
440 | list_add_tail(&new->node, &new->async_tx.tx_list); | |
441 | ||
2187c269 ZW |
442 | /* Set End-of-link to the last link descriptor of new list*/ |
443 | set_ld_eol(fsl_chan, new); | |
444 | ||
445 | return &new->async_tx; | |
446 | } | |
447 | ||
173acc7c ZW |
448 | static struct dma_async_tx_descriptor *fsl_dma_prep_memcpy( |
449 | struct dma_chan *chan, dma_addr_t dma_dest, dma_addr_t dma_src, | |
450 | size_t len, unsigned long flags) | |
451 | { | |
452 | struct fsl_dma_chan *fsl_chan; | |
453 | struct fsl_desc_sw *first = NULL, *prev = NULL, *new; | |
454 | size_t copy; | |
455 | LIST_HEAD(link_chain); | |
456 | ||
457 | if (!chan) | |
458 | return NULL; | |
459 | ||
460 | if (!len) | |
461 | return NULL; | |
462 | ||
463 | fsl_chan = to_fsl_chan(chan); | |
464 | ||
465 | do { | |
466 | ||
467 | /* Allocate the link descriptor from DMA pool */ | |
468 | new = fsl_dma_alloc_descriptor(fsl_chan); | |
469 | if (!new) { | |
470 | dev_err(fsl_chan->dev, | |
471 | "No free memory for link descriptor\n"); | |
472 | return NULL; | |
473 | } | |
474 | #ifdef FSL_DMA_LD_DEBUG | |
475 | dev_dbg(fsl_chan->dev, "new link desc alloc %p\n", new); | |
476 | #endif | |
477 | ||
56822843 | 478 | copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT); |
173acc7c ZW |
479 | |
480 | set_desc_cnt(fsl_chan, &new->hw, copy); | |
481 | set_desc_src(fsl_chan, &new->hw, dma_src); | |
482 | set_desc_dest(fsl_chan, &new->hw, dma_dest); | |
483 | ||
484 | if (!first) | |
485 | first = new; | |
486 | else | |
487 | set_desc_next(fsl_chan, &prev->hw, new->async_tx.phys); | |
488 | ||
489 | new->async_tx.cookie = 0; | |
636bdeaa | 490 | async_tx_ack(&new->async_tx); |
173acc7c ZW |
491 | |
492 | prev = new; | |
493 | len -= copy; | |
494 | dma_src += copy; | |
495 | dma_dest += copy; | |
496 | ||
497 | /* Insert the link descriptor to the LD ring */ | |
498 | list_add_tail(&new->node, &first->async_tx.tx_list); | |
499 | } while (len); | |
500 | ||
636bdeaa | 501 | new->async_tx.flags = flags; /* client is in control of this ack */ |
173acc7c ZW |
502 | new->async_tx.cookie = -EBUSY; |
503 | ||
504 | /* Set End-of-link to the last link descriptor of new list*/ | |
505 | set_ld_eol(fsl_chan, new); | |
506 | ||
507 | return first ? &first->async_tx : NULL; | |
508 | } | |
509 | ||
510 | /** | |
511 | * fsl_dma_update_completed_cookie - Update the completed cookie. | |
512 | * @fsl_chan : Freescale DMA channel | |
513 | */ | |
514 | static void fsl_dma_update_completed_cookie(struct fsl_dma_chan *fsl_chan) | |
515 | { | |
516 | struct fsl_desc_sw *cur_desc, *desc; | |
517 | dma_addr_t ld_phy; | |
518 | ||
519 | ld_phy = get_cdar(fsl_chan) & FSL_DMA_NLDA_MASK; | |
520 | ||
521 | if (ld_phy) { | |
522 | cur_desc = NULL; | |
523 | list_for_each_entry(desc, &fsl_chan->ld_queue, node) | |
524 | if (desc->async_tx.phys == ld_phy) { | |
525 | cur_desc = desc; | |
526 | break; | |
527 | } | |
528 | ||
529 | if (cur_desc && cur_desc->async_tx.cookie) { | |
530 | if (dma_is_idle(fsl_chan)) | |
531 | fsl_chan->completed_cookie = | |
532 | cur_desc->async_tx.cookie; | |
533 | else | |
534 | fsl_chan->completed_cookie = | |
535 | cur_desc->async_tx.cookie - 1; | |
536 | } | |
537 | } | |
538 | } | |
539 | ||
540 | /** | |
541 | * fsl_chan_ld_cleanup - Clean up link descriptors | |
542 | * @fsl_chan : Freescale DMA channel | |
543 | * | |
544 | * This function clean up the ld_queue of DMA channel. | |
545 | * If 'in_intr' is set, the function will move the link descriptor to | |
546 | * the recycle list. Otherwise, free it directly. | |
547 | */ | |
548 | static void fsl_chan_ld_cleanup(struct fsl_dma_chan *fsl_chan) | |
549 | { | |
550 | struct fsl_desc_sw *desc, *_desc; | |
551 | unsigned long flags; | |
552 | ||
553 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
554 | ||
173acc7c ZW |
555 | dev_dbg(fsl_chan->dev, "chan completed_cookie = %d\n", |
556 | fsl_chan->completed_cookie); | |
557 | list_for_each_entry_safe(desc, _desc, &fsl_chan->ld_queue, node) { | |
558 | dma_async_tx_callback callback; | |
559 | void *callback_param; | |
560 | ||
561 | if (dma_async_is_complete(desc->async_tx.cookie, | |
562 | fsl_chan->completed_cookie, fsl_chan->common.cookie) | |
563 | == DMA_IN_PROGRESS) | |
564 | break; | |
565 | ||
566 | callback = desc->async_tx.callback; | |
567 | callback_param = desc->async_tx.callback_param; | |
568 | ||
569 | /* Remove from ld_queue list */ | |
570 | list_del(&desc->node); | |
571 | ||
572 | dev_dbg(fsl_chan->dev, "link descriptor %p will be recycle.\n", | |
573 | desc); | |
574 | dma_pool_free(fsl_chan->desc_pool, desc, desc->async_tx.phys); | |
575 | ||
576 | /* Run the link descriptor callback function */ | |
577 | if (callback) { | |
578 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
579 | dev_dbg(fsl_chan->dev, "link descriptor %p callback\n", | |
580 | desc); | |
581 | callback(callback_param); | |
582 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
583 | } | |
584 | } | |
585 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
586 | } | |
587 | ||
588 | /** | |
589 | * fsl_chan_xfer_ld_queue - Transfer link descriptors in channel ld_queue. | |
590 | * @fsl_chan : Freescale DMA channel | |
591 | */ | |
592 | static void fsl_chan_xfer_ld_queue(struct fsl_dma_chan *fsl_chan) | |
593 | { | |
594 | struct list_head *ld_node; | |
595 | dma_addr_t next_dest_addr; | |
596 | unsigned long flags; | |
597 | ||
598 | if (!dma_is_idle(fsl_chan)) | |
599 | return; | |
600 | ||
601 | dma_halt(fsl_chan); | |
602 | ||
603 | /* If there are some link descriptors | |
604 | * not transfered in queue. We need to start it. | |
605 | */ | |
606 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
607 | ||
608 | /* Find the first un-transfer desciptor */ | |
609 | for (ld_node = fsl_chan->ld_queue.next; | |
610 | (ld_node != &fsl_chan->ld_queue) | |
611 | && (dma_async_is_complete( | |
612 | to_fsl_desc(ld_node)->async_tx.cookie, | |
613 | fsl_chan->completed_cookie, | |
614 | fsl_chan->common.cookie) == DMA_SUCCESS); | |
615 | ld_node = ld_node->next); | |
616 | ||
617 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
618 | ||
619 | if (ld_node != &fsl_chan->ld_queue) { | |
620 | /* Get the ld start address from ld_queue */ | |
621 | next_dest_addr = to_fsl_desc(ld_node)->async_tx.phys; | |
56822843 ZW |
622 | dev_dbg(fsl_chan->dev, "xfer LDs staring from %p\n", |
623 | (void *)next_dest_addr); | |
173acc7c ZW |
624 | set_cdar(fsl_chan, next_dest_addr); |
625 | dma_start(fsl_chan); | |
626 | } else { | |
627 | set_cdar(fsl_chan, 0); | |
628 | set_ndar(fsl_chan, 0); | |
629 | } | |
630 | } | |
631 | ||
632 | /** | |
633 | * fsl_dma_memcpy_issue_pending - Issue the DMA start command | |
634 | * @fsl_chan : Freescale DMA channel | |
635 | */ | |
636 | static void fsl_dma_memcpy_issue_pending(struct dma_chan *chan) | |
637 | { | |
638 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | |
639 | ||
640 | #ifdef FSL_DMA_LD_DEBUG | |
641 | struct fsl_desc_sw *ld; | |
642 | unsigned long flags; | |
643 | ||
644 | spin_lock_irqsave(&fsl_chan->desc_lock, flags); | |
645 | if (list_empty(&fsl_chan->ld_queue)) { | |
646 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
647 | return; | |
648 | } | |
649 | ||
650 | dev_dbg(fsl_chan->dev, "--memcpy issue--\n"); | |
651 | list_for_each_entry(ld, &fsl_chan->ld_queue, node) { | |
652 | int i; | |
653 | dev_dbg(fsl_chan->dev, "Ch %d, LD %08x\n", | |
654 | fsl_chan->id, ld->async_tx.phys); | |
655 | for (i = 0; i < 8; i++) | |
656 | dev_dbg(fsl_chan->dev, "LD offset %d: %08x\n", | |
657 | i, *(((u32 *)&ld->hw) + i)); | |
658 | } | |
659 | dev_dbg(fsl_chan->dev, "----------------\n"); | |
660 | spin_unlock_irqrestore(&fsl_chan->desc_lock, flags); | |
661 | #endif | |
662 | ||
663 | fsl_chan_xfer_ld_queue(fsl_chan); | |
664 | } | |
665 | ||
173acc7c ZW |
666 | /** |
667 | * fsl_dma_is_complete - Determine the DMA status | |
668 | * @fsl_chan : Freescale DMA channel | |
669 | */ | |
670 | static enum dma_status fsl_dma_is_complete(struct dma_chan *chan, | |
671 | dma_cookie_t cookie, | |
672 | dma_cookie_t *done, | |
673 | dma_cookie_t *used) | |
674 | { | |
675 | struct fsl_dma_chan *fsl_chan = to_fsl_chan(chan); | |
676 | dma_cookie_t last_used; | |
677 | dma_cookie_t last_complete; | |
678 | ||
679 | fsl_chan_ld_cleanup(fsl_chan); | |
680 | ||
681 | last_used = chan->cookie; | |
682 | last_complete = fsl_chan->completed_cookie; | |
683 | ||
684 | if (done) | |
685 | *done = last_complete; | |
686 | ||
687 | if (used) | |
688 | *used = last_used; | |
689 | ||
690 | return dma_async_is_complete(cookie, last_complete, last_used); | |
691 | } | |
692 | ||
693 | static irqreturn_t fsl_dma_chan_do_interrupt(int irq, void *data) | |
694 | { | |
695 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | |
56822843 | 696 | u32 stat; |
1c62979e ZW |
697 | int update_cookie = 0; |
698 | int xfer_ld_q = 0; | |
173acc7c ZW |
699 | |
700 | stat = get_sr(fsl_chan); | |
701 | dev_dbg(fsl_chan->dev, "event: channel %d, stat = 0x%x\n", | |
702 | fsl_chan->id, stat); | |
703 | set_sr(fsl_chan, stat); /* Clear the event register */ | |
704 | ||
705 | stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH); | |
706 | if (!stat) | |
707 | return IRQ_NONE; | |
708 | ||
709 | if (stat & FSL_DMA_SR_TE) | |
710 | dev_err(fsl_chan->dev, "Transfer Error!\n"); | |
711 | ||
f79abb62 ZW |
712 | /* Programming Error |
713 | * The DMA_INTERRUPT async_tx is a NULL transfer, which will | |
714 | * triger a PE interrupt. | |
715 | */ | |
716 | if (stat & FSL_DMA_SR_PE) { | |
717 | dev_dbg(fsl_chan->dev, "event: Programming Error INT\n"); | |
718 | if (get_bcr(fsl_chan) == 0) { | |
719 | /* BCR register is 0, this is a DMA_INTERRUPT async_tx. | |
720 | * Now, update the completed cookie, and continue the | |
721 | * next uncompleted transfer. | |
722 | */ | |
1c62979e ZW |
723 | update_cookie = 1; |
724 | xfer_ld_q = 1; | |
f79abb62 ZW |
725 | } |
726 | stat &= ~FSL_DMA_SR_PE; | |
727 | } | |
728 | ||
173acc7c ZW |
729 | /* If the link descriptor segment transfer finishes, |
730 | * we will recycle the used descriptor. | |
731 | */ | |
732 | if (stat & FSL_DMA_SR_EOSI) { | |
733 | dev_dbg(fsl_chan->dev, "event: End-of-segments INT\n"); | |
56822843 ZW |
734 | dev_dbg(fsl_chan->dev, "event: clndar %p, nlndar %p\n", |
735 | (void *)get_cdar(fsl_chan), (void *)get_ndar(fsl_chan)); | |
173acc7c | 736 | stat &= ~FSL_DMA_SR_EOSI; |
1c62979e ZW |
737 | update_cookie = 1; |
738 | } | |
739 | ||
740 | /* For MPC8349, EOCDI event need to update cookie | |
741 | * and start the next transfer if it exist. | |
742 | */ | |
743 | if (stat & FSL_DMA_SR_EOCDI) { | |
744 | dev_dbg(fsl_chan->dev, "event: End-of-Chain link INT\n"); | |
745 | stat &= ~FSL_DMA_SR_EOCDI; | |
746 | update_cookie = 1; | |
747 | xfer_ld_q = 1; | |
173acc7c ZW |
748 | } |
749 | ||
750 | /* If it current transfer is the end-of-transfer, | |
751 | * we should clear the Channel Start bit for | |
752 | * prepare next transfer. | |
753 | */ | |
1c62979e | 754 | if (stat & FSL_DMA_SR_EOLNI) { |
173acc7c ZW |
755 | dev_dbg(fsl_chan->dev, "event: End-of-link INT\n"); |
756 | stat &= ~FSL_DMA_SR_EOLNI; | |
1c62979e | 757 | xfer_ld_q = 1; |
173acc7c ZW |
758 | } |
759 | ||
1c62979e ZW |
760 | if (update_cookie) |
761 | fsl_dma_update_completed_cookie(fsl_chan); | |
762 | if (xfer_ld_q) | |
763 | fsl_chan_xfer_ld_queue(fsl_chan); | |
173acc7c ZW |
764 | if (stat) |
765 | dev_dbg(fsl_chan->dev, "event: unhandled sr 0x%02x\n", | |
766 | stat); | |
767 | ||
768 | dev_dbg(fsl_chan->dev, "event: Exit\n"); | |
769 | tasklet_schedule(&fsl_chan->tasklet); | |
770 | return IRQ_HANDLED; | |
771 | } | |
772 | ||
773 | static irqreturn_t fsl_dma_do_interrupt(int irq, void *data) | |
774 | { | |
775 | struct fsl_dma_device *fdev = (struct fsl_dma_device *)data; | |
776 | u32 gsr; | |
777 | int ch_nr; | |
778 | ||
779 | gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->reg_base) | |
780 | : in_le32(fdev->reg_base); | |
781 | ch_nr = (32 - ffs(gsr)) / 8; | |
782 | ||
783 | return fdev->chan[ch_nr] ? fsl_dma_chan_do_interrupt(irq, | |
784 | fdev->chan[ch_nr]) : IRQ_NONE; | |
785 | } | |
786 | ||
787 | static void dma_do_tasklet(unsigned long data) | |
788 | { | |
789 | struct fsl_dma_chan *fsl_chan = (struct fsl_dma_chan *)data; | |
790 | fsl_chan_ld_cleanup(fsl_chan); | |
791 | } | |
792 | ||
77cd62e8 TT |
793 | static int __devinit fsl_dma_chan_probe(struct fsl_dma_device *fdev, |
794 | struct device_node *node, u32 feature, const char *compatible) | |
173acc7c | 795 | { |
173acc7c ZW |
796 | struct fsl_dma_chan *new_fsl_chan; |
797 | int err; | |
798 | ||
173acc7c ZW |
799 | /* alloc channel */ |
800 | new_fsl_chan = kzalloc(sizeof(struct fsl_dma_chan), GFP_KERNEL); | |
801 | if (!new_fsl_chan) { | |
77cd62e8 | 802 | dev_err(fdev->dev, "No free memory for allocating " |
173acc7c | 803 | "dma channels!\n"); |
51ee87f2 | 804 | return -ENOMEM; |
173acc7c ZW |
805 | } |
806 | ||
807 | /* get dma channel register base */ | |
77cd62e8 | 808 | err = of_address_to_resource(node, 0, &new_fsl_chan->reg); |
173acc7c | 809 | if (err) { |
77cd62e8 TT |
810 | dev_err(fdev->dev, "Can't get %s property 'reg'\n", |
811 | node->full_name); | |
51ee87f2 | 812 | goto err_no_reg; |
173acc7c ZW |
813 | } |
814 | ||
77cd62e8 | 815 | new_fsl_chan->feature = feature; |
173acc7c ZW |
816 | |
817 | if (!fdev->feature) | |
818 | fdev->feature = new_fsl_chan->feature; | |
819 | ||
820 | /* If the DMA device's feature is different than its channels', | |
821 | * report the bug. | |
822 | */ | |
823 | WARN_ON(fdev->feature != new_fsl_chan->feature); | |
824 | ||
41d5e59c | 825 | new_fsl_chan->dev = &new_fsl_chan->common.dev->device; |
173acc7c ZW |
826 | new_fsl_chan->reg_base = ioremap(new_fsl_chan->reg.start, |
827 | new_fsl_chan->reg.end - new_fsl_chan->reg.start + 1); | |
828 | ||
829 | new_fsl_chan->id = ((new_fsl_chan->reg.start - 0x100) & 0xfff) >> 7; | |
830 | if (new_fsl_chan->id > FSL_DMA_MAX_CHANS_PER_DEVICE) { | |
77cd62e8 | 831 | dev_err(fdev->dev, "There is no %d channel!\n", |
173acc7c ZW |
832 | new_fsl_chan->id); |
833 | err = -EINVAL; | |
51ee87f2 | 834 | goto err_no_chan; |
173acc7c ZW |
835 | } |
836 | fdev->chan[new_fsl_chan->id] = new_fsl_chan; | |
837 | tasklet_init(&new_fsl_chan->tasklet, dma_do_tasklet, | |
838 | (unsigned long)new_fsl_chan); | |
839 | ||
840 | /* Init the channel */ | |
841 | dma_init(new_fsl_chan); | |
842 | ||
843 | /* Clear cdar registers */ | |
844 | set_cdar(new_fsl_chan, 0); | |
845 | ||
846 | switch (new_fsl_chan->feature & FSL_DMA_IP_MASK) { | |
847 | case FSL_DMA_IP_85XX: | |
848 | new_fsl_chan->toggle_ext_start = fsl_chan_toggle_ext_start; | |
849 | new_fsl_chan->toggle_ext_pause = fsl_chan_toggle_ext_pause; | |
850 | case FSL_DMA_IP_83XX: | |
851 | new_fsl_chan->set_src_loop_size = fsl_chan_set_src_loop_size; | |
852 | new_fsl_chan->set_dest_loop_size = fsl_chan_set_dest_loop_size; | |
853 | } | |
854 | ||
855 | spin_lock_init(&new_fsl_chan->desc_lock); | |
856 | INIT_LIST_HEAD(&new_fsl_chan->ld_queue); | |
857 | ||
858 | new_fsl_chan->common.device = &fdev->common; | |
859 | ||
860 | /* Add the channel to DMA device channel list */ | |
861 | list_add_tail(&new_fsl_chan->common.device_node, | |
862 | &fdev->common.channels); | |
863 | fdev->common.chancnt++; | |
864 | ||
77cd62e8 | 865 | new_fsl_chan->irq = irq_of_parse_and_map(node, 0); |
173acc7c ZW |
866 | if (new_fsl_chan->irq != NO_IRQ) { |
867 | err = request_irq(new_fsl_chan->irq, | |
868 | &fsl_dma_chan_do_interrupt, IRQF_SHARED, | |
869 | "fsldma-channel", new_fsl_chan); | |
870 | if (err) { | |
77cd62e8 TT |
871 | dev_err(fdev->dev, "DMA channel %s request_irq error " |
872 | "with return %d\n", node->full_name, err); | |
51ee87f2 | 873 | goto err_no_irq; |
173acc7c ZW |
874 | } |
875 | } | |
876 | ||
77cd62e8 TT |
877 | dev_info(fdev->dev, "#%d (%s), irq %d\n", new_fsl_chan->id, |
878 | compatible, new_fsl_chan->irq); | |
173acc7c ZW |
879 | |
880 | return 0; | |
51ee87f2 | 881 | |
51ee87f2 | 882 | err_no_irq: |
173acc7c | 883 | list_del(&new_fsl_chan->common.device_node); |
51ee87f2 LY |
884 | err_no_chan: |
885 | iounmap(new_fsl_chan->reg_base); | |
886 | err_no_reg: | |
173acc7c ZW |
887 | kfree(new_fsl_chan); |
888 | return err; | |
889 | } | |
890 | ||
77cd62e8 | 891 | static void fsl_dma_chan_remove(struct fsl_dma_chan *fchan) |
173acc7c | 892 | { |
77cd62e8 TT |
893 | free_irq(fchan->irq, fchan); |
894 | list_del(&fchan->common.device_node); | |
895 | iounmap(fchan->reg_base); | |
896 | kfree(fchan); | |
173acc7c ZW |
897 | } |
898 | ||
899 | static int __devinit of_fsl_dma_probe(struct of_device *dev, | |
900 | const struct of_device_id *match) | |
901 | { | |
902 | int err; | |
173acc7c | 903 | struct fsl_dma_device *fdev; |
77cd62e8 | 904 | struct device_node *child; |
173acc7c ZW |
905 | |
906 | fdev = kzalloc(sizeof(struct fsl_dma_device), GFP_KERNEL); | |
907 | if (!fdev) { | |
908 | dev_err(&dev->dev, "No enough memory for 'priv'\n"); | |
51ee87f2 | 909 | return -ENOMEM; |
173acc7c ZW |
910 | } |
911 | fdev->dev = &dev->dev; | |
912 | INIT_LIST_HEAD(&fdev->common.channels); | |
913 | ||
914 | /* get DMA controller register base */ | |
915 | err = of_address_to_resource(dev->node, 0, &fdev->reg); | |
916 | if (err) { | |
917 | dev_err(&dev->dev, "Can't get %s property 'reg'\n", | |
918 | dev->node->full_name); | |
51ee87f2 | 919 | goto err_no_reg; |
173acc7c ZW |
920 | } |
921 | ||
922 | dev_info(&dev->dev, "Probe the Freescale DMA driver for %s " | |
56822843 ZW |
923 | "controller at %p...\n", |
924 | match->compatible, (void *)fdev->reg.start); | |
173acc7c ZW |
925 | fdev->reg_base = ioremap(fdev->reg.start, fdev->reg.end |
926 | - fdev->reg.start + 1); | |
927 | ||
928 | dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask); | |
929 | dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask); | |
930 | fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources; | |
931 | fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources; | |
2187c269 | 932 | fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt; |
173acc7c ZW |
933 | fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy; |
934 | fdev->common.device_is_tx_complete = fsl_dma_is_complete; | |
935 | fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; | |
173acc7c ZW |
936 | fdev->common.dev = &dev->dev; |
937 | ||
77cd62e8 TT |
938 | fdev->irq = irq_of_parse_and_map(dev->node, 0); |
939 | if (fdev->irq != NO_IRQ) { | |
940 | err = request_irq(fdev->irq, &fsl_dma_do_interrupt, IRQF_SHARED, | |
173acc7c ZW |
941 | "fsldma-device", fdev); |
942 | if (err) { | |
943 | dev_err(&dev->dev, "DMA device request_irq error " | |
944 | "with return %d\n", err); | |
945 | goto err; | |
946 | } | |
947 | } | |
948 | ||
949 | dev_set_drvdata(&(dev->dev), fdev); | |
77cd62e8 TT |
950 | |
951 | /* We cannot use of_platform_bus_probe() because there is no | |
952 | * of_platform_bus_remove. Instead, we manually instantiate every DMA | |
953 | * channel object. | |
954 | */ | |
955 | for_each_child_of_node(dev->node, child) { | |
956 | if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) | |
957 | fsl_dma_chan_probe(fdev, child, | |
958 | FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN, | |
959 | "fsl,eloplus-dma-channel"); | |
960 | if (of_device_is_compatible(child, "fsl,elo-dma-channel")) | |
961 | fsl_dma_chan_probe(fdev, child, | |
962 | FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN, | |
963 | "fsl,elo-dma-channel"); | |
964 | } | |
173acc7c ZW |
965 | |
966 | dma_async_device_register(&fdev->common); | |
967 | return 0; | |
968 | ||
969 | err: | |
970 | iounmap(fdev->reg_base); | |
51ee87f2 | 971 | err_no_reg: |
173acc7c ZW |
972 | kfree(fdev); |
973 | return err; | |
974 | } | |
975 | ||
77cd62e8 TT |
976 | static int of_fsl_dma_remove(struct of_device *of_dev) |
977 | { | |
978 | struct fsl_dma_device *fdev; | |
979 | unsigned int i; | |
980 | ||
981 | fdev = dev_get_drvdata(&of_dev->dev); | |
982 | ||
983 | dma_async_device_unregister(&fdev->common); | |
984 | ||
985 | for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) | |
986 | if (fdev->chan[i]) | |
987 | fsl_dma_chan_remove(fdev->chan[i]); | |
988 | ||
989 | if (fdev->irq != NO_IRQ) | |
990 | free_irq(fdev->irq, fdev); | |
991 | ||
992 | iounmap(fdev->reg_base); | |
993 | ||
994 | kfree(fdev); | |
995 | dev_set_drvdata(&of_dev->dev, NULL); | |
996 | ||
997 | return 0; | |
998 | } | |
999 | ||
173acc7c | 1000 | static struct of_device_id of_fsl_dma_ids[] = { |
049c9d45 KG |
1001 | { .compatible = "fsl,eloplus-dma", }, |
1002 | { .compatible = "fsl,elo-dma", }, | |
173acc7c ZW |
1003 | {} |
1004 | }; | |
1005 | ||
1006 | static struct of_platform_driver of_fsl_dma_driver = { | |
77cd62e8 | 1007 | .name = "fsl-elo-dma", |
173acc7c ZW |
1008 | .match_table = of_fsl_dma_ids, |
1009 | .probe = of_fsl_dma_probe, | |
77cd62e8 | 1010 | .remove = of_fsl_dma_remove, |
173acc7c ZW |
1011 | }; |
1012 | ||
1013 | static __init int of_fsl_dma_init(void) | |
1014 | { | |
77cd62e8 TT |
1015 | int ret; |
1016 | ||
1017 | pr_info("Freescale Elo / Elo Plus DMA driver\n"); | |
1018 | ||
1019 | ret = of_register_platform_driver(&of_fsl_dma_driver); | |
1020 | if (ret) | |
1021 | pr_err("fsldma: failed to register platform driver\n"); | |
1022 | ||
1023 | return ret; | |
1024 | } | |
1025 | ||
1026 | static void __exit of_fsl_dma_exit(void) | |
1027 | { | |
1028 | of_unregister_platform_driver(&of_fsl_dma_driver); | |
173acc7c ZW |
1029 | } |
1030 | ||
173acc7c | 1031 | subsys_initcall(of_fsl_dma_init); |
77cd62e8 TT |
1032 | module_exit(of_fsl_dma_exit); |
1033 | ||
1034 | MODULE_DESCRIPTION("Freescale Elo / Elo Plus DMA driver"); | |
1035 | MODULE_LICENSE("GPL"); |