]>
Commit | Line | Data |
---|---|---|
0fb6f739 PZ |
1 | /* |
2 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | |
3 | * Copyright (C) Semihalf 2009 | |
ba2eea25 | 4 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 |
63da8e0d | 5 | * Copyright (C) Alexander Popov, Promcontroller 2014 |
899ed9dd | 6 | * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016 |
0fb6f739 PZ |
7 | * |
8 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | |
9 | * (defines, structures and comments) was taken from MPC5121 DMA driver | |
10 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | |
11 | * | |
12 | * Approved as OSADL project by a majority of OSADL members and funded | |
13 | * by OSADL membership fees in 2009; for details see www.osadl.org. | |
14 | * | |
15 | * This program is free software; you can redistribute it and/or modify it | |
16 | * under the terms of the GNU General Public License as published by the Free | |
17 | * Software Foundation; either version 2 of the License, or (at your option) | |
18 | * any later version. | |
19 | * | |
20 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
21 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
22 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
23 | * more details. | |
24 | * | |
0fb6f739 PZ |
25 | * The full GNU General Public License is included in this distribution in the |
26 | * file called COPYING. | |
27 | */ | |
28 | ||
29 | /* | |
899ed9dd MS |
30 | * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers |
31 | * (tested using dmatest module) and data transfers between memory and | |
32 | * peripheral I/O memory by means of slave scatter/gather with these | |
33 | * limitations: | |
34 | * - chunked transfers (described by s/g lists with more than one item) are | |
35 | * refused as long as proper support for scatter/gather is missing | |
36 | * - transfers on MPC8308 always start from software as this SoC does not have | |
37 | * external request lines for peripheral flow control | |
38 | * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for | |
39 | * MPC512x), and 32 bytes are supported, and, consequently, source | |
40 | * addresses and destination addresses must be aligned accordingly; | |
41 | * furthermore, for MPC512x SoCs, the transfer size must be aligned on | |
42 | * (chunk size * maxburst) | |
0fb6f739 PZ |
43 | */ |
44 | ||
45 | #include <linux/module.h> | |
46 | #include <linux/dmaengine.h> | |
47 | #include <linux/dma-mapping.h> | |
48 | #include <linux/interrupt.h> | |
49 | #include <linux/io.h> | |
5a0e3ad6 | 50 | #include <linux/slab.h> |
5af50730 | 51 | #include <linux/of_address.h> |
0fb6f739 | 52 | #include <linux/of_device.h> |
5af50730 | 53 | #include <linux/of_irq.h> |
ec1f0c96 | 54 | #include <linux/of_dma.h> |
0fb6f739 PZ |
55 | #include <linux/of_platform.h> |
56 | ||
57 | #include <linux/random.h> | |
58 | ||
d2ebfb33 RKAL |
59 | #include "dmaengine.h" |
60 | ||
0fb6f739 PZ |
61 | /* Number of DMA Transfer descriptors allocated per channel */ |
62 | #define MPC_DMA_DESCRIPTORS 64 | |
63 | ||
64 | /* Macro definitions */ | |
0fb6f739 PZ |
65 | #define MPC_DMA_TCD_OFFSET 0x1000 |
66 | ||
78a4f036 AP |
67 | /* |
68 | * Maximum channel counts for individual hardware variants | |
69 | * and the maximum channel count over all supported controllers, | |
70 | * used for data structure size | |
71 | */ | |
72 | #define MPC8308_DMACHAN_MAX 16 | |
73 | #define MPC512x_DMACHAN_MAX 64 | |
74 | #define MPC_DMA_CHANNELS 64 | |
75 | ||
0fb6f739 PZ |
76 | /* Arbitration mode of group and channel */ |
77 | #define MPC_DMA_DMACR_EDCG (1 << 31) | |
78 | #define MPC_DMA_DMACR_ERGA (1 << 3) | |
79 | #define MPC_DMA_DMACR_ERCA (1 << 2) | |
80 | ||
81 | /* Error codes */ | |
82 | #define MPC_DMA_DMAES_VLD (1 << 31) | |
83 | #define MPC_DMA_DMAES_GPE (1 << 15) | |
84 | #define MPC_DMA_DMAES_CPE (1 << 14) | |
85 | #define MPC_DMA_DMAES_ERRCHN(err) \ | |
86 | (((err) >> 8) & 0x3f) | |
87 | #define MPC_DMA_DMAES_SAE (1 << 7) | |
88 | #define MPC_DMA_DMAES_SOE (1 << 6) | |
89 | #define MPC_DMA_DMAES_DAE (1 << 5) | |
90 | #define MPC_DMA_DMAES_DOE (1 << 4) | |
91 | #define MPC_DMA_DMAES_NCE (1 << 3) | |
92 | #define MPC_DMA_DMAES_SGE (1 << 2) | |
93 | #define MPC_DMA_DMAES_SBE (1 << 1) | |
94 | #define MPC_DMA_DMAES_DBE (1 << 0) | |
95 | ||
ba2eea25 IY |
96 | #define MPC_DMA_DMAGPOR_SNOOP_ENABLE (1 << 6) |
97 | ||
0fb6f739 PZ |
98 | #define MPC_DMA_TSIZE_1 0x00 |
99 | #define MPC_DMA_TSIZE_2 0x01 | |
100 | #define MPC_DMA_TSIZE_4 0x02 | |
101 | #define MPC_DMA_TSIZE_16 0x04 | |
102 | #define MPC_DMA_TSIZE_32 0x05 | |
103 | ||
104 | /* MPC5121 DMA engine registers */ | |
105 | struct __attribute__ ((__packed__)) mpc_dma_regs { | |
106 | /* 0x00 */ | |
107 | u32 dmacr; /* DMA control register */ | |
108 | u32 dmaes; /* DMA error status */ | |
109 | /* 0x08 */ | |
110 | u32 dmaerqh; /* DMA enable request high(channels 63~32) */ | |
111 | u32 dmaerql; /* DMA enable request low(channels 31~0) */ | |
112 | u32 dmaeeih; /* DMA enable error interrupt high(ch63~32) */ | |
113 | u32 dmaeeil; /* DMA enable error interrupt low(ch31~0) */ | |
114 | /* 0x18 */ | |
115 | u8 dmaserq; /* DMA set enable request */ | |
116 | u8 dmacerq; /* DMA clear enable request */ | |
117 | u8 dmaseei; /* DMA set enable error interrupt */ | |
118 | u8 dmaceei; /* DMA clear enable error interrupt */ | |
119 | /* 0x1c */ | |
120 | u8 dmacint; /* DMA clear interrupt request */ | |
121 | u8 dmacerr; /* DMA clear error */ | |
122 | u8 dmassrt; /* DMA set start bit */ | |
123 | u8 dmacdne; /* DMA clear DONE status bit */ | |
124 | /* 0x20 */ | |
125 | u32 dmainth; /* DMA interrupt request high(ch63~32) */ | |
126 | u32 dmaintl; /* DMA interrupt request low(ch31~0) */ | |
127 | u32 dmaerrh; /* DMA error high(ch63~32) */ | |
128 | u32 dmaerrl; /* DMA error low(ch31~0) */ | |
129 | /* 0x30 */ | |
130 | u32 dmahrsh; /* DMA hw request status high(ch63~32) */ | |
131 | u32 dmahrsl; /* DMA hardware request status low(ch31~0) */ | |
ba2eea25 IY |
132 | union { |
133 | u32 dmaihsa; /* DMA interrupt high select AXE(ch63~32) */ | |
134 | u32 dmagpor; /* (General purpose register on MPC8308) */ | |
135 | }; | |
0fb6f739 PZ |
136 | u32 dmailsa; /* DMA interrupt low select AXE(ch31~0) */ |
137 | /* 0x40 ~ 0xff */ | |
138 | u32 reserve0[48]; /* Reserved */ | |
139 | /* 0x100 */ | |
140 | u8 dchpri[MPC_DMA_CHANNELS]; | |
141 | /* DMA channels(0~63) priority */ | |
142 | }; | |
143 | ||
144 | struct __attribute__ ((__packed__)) mpc_dma_tcd { | |
145 | /* 0x00 */ | |
146 | u32 saddr; /* Source address */ | |
147 | ||
148 | u32 smod:5; /* Source address modulo */ | |
149 | u32 ssize:3; /* Source data transfer size */ | |
150 | u32 dmod:5; /* Destination address modulo */ | |
151 | u32 dsize:3; /* Destination data transfer size */ | |
152 | u32 soff:16; /* Signed source address offset */ | |
153 | ||
154 | /* 0x08 */ | |
155 | u32 nbytes; /* Inner "minor" byte count */ | |
156 | u32 slast; /* Last source address adjustment */ | |
157 | u32 daddr; /* Destination address */ | |
158 | ||
159 | /* 0x14 */ | |
160 | u32 citer_elink:1; /* Enable channel-to-channel linking on | |
161 | * minor loop complete | |
162 | */ | |
163 | u32 citer_linkch:6; /* Link channel for minor loop complete */ | |
164 | u32 citer:9; /* Current "major" iteration count */ | |
165 | u32 doff:16; /* Signed destination address offset */ | |
166 | ||
167 | /* 0x18 */ | |
168 | u32 dlast_sga; /* Last Destination address adjustment/scatter | |
169 | * gather address | |
170 | */ | |
171 | ||
172 | /* 0x1c */ | |
173 | u32 biter_elink:1; /* Enable channel-to-channel linking on major | |
174 | * loop complete | |
175 | */ | |
176 | u32 biter_linkch:6; | |
177 | u32 biter:9; /* Beginning "major" iteration count */ | |
178 | u32 bwc:2; /* Bandwidth control */ | |
179 | u32 major_linkch:6; /* Link channel number */ | |
180 | u32 done:1; /* Channel done */ | |
181 | u32 active:1; /* Channel active */ | |
182 | u32 major_elink:1; /* Enable channel-to-channel linking on major | |
183 | * loop complete | |
184 | */ | |
185 | u32 e_sg:1; /* Enable scatter/gather processing */ | |
186 | u32 d_req:1; /* Disable request */ | |
187 | u32 int_half:1; /* Enable an interrupt when major counter is | |
188 | * half complete | |
189 | */ | |
190 | u32 int_maj:1; /* Enable an interrupt when major iteration | |
191 | * count completes | |
192 | */ | |
193 | u32 start:1; /* Channel start */ | |
194 | }; | |
195 | ||
196 | struct mpc_dma_desc { | |
197 | struct dma_async_tx_descriptor desc; | |
198 | struct mpc_dma_tcd *tcd; | |
199 | dma_addr_t tcd_paddr; | |
200 | int error; | |
201 | struct list_head node; | |
63da8e0d | 202 | int will_access_peripheral; |
0fb6f739 PZ |
203 | }; |
204 | ||
205 | struct mpc_dma_chan { | |
206 | struct dma_chan chan; | |
207 | struct list_head free; | |
208 | struct list_head prepared; | |
209 | struct list_head queued; | |
210 | struct list_head active; | |
211 | struct list_head completed; | |
212 | struct mpc_dma_tcd *tcd; | |
213 | dma_addr_t tcd_paddr; | |
0fb6f739 | 214 | |
63da8e0d AP |
215 | /* Settings for access to peripheral FIFO */ |
216 | dma_addr_t src_per_paddr; | |
217 | u32 src_tcd_nunits; | |
899ed9dd | 218 | u8 swidth; |
63da8e0d AP |
219 | dma_addr_t dst_per_paddr; |
220 | u32 dst_tcd_nunits; | |
899ed9dd | 221 | u8 dwidth; |
63da8e0d | 222 | |
0fb6f739 PZ |
223 | /* Lock for this structure */ |
224 | spinlock_t lock; | |
225 | }; | |
226 | ||
227 | struct mpc_dma { | |
228 | struct dma_device dma; | |
229 | struct tasklet_struct tasklet; | |
230 | struct mpc_dma_chan channels[MPC_DMA_CHANNELS]; | |
231 | struct mpc_dma_regs __iomem *regs; | |
232 | struct mpc_dma_tcd __iomem *tcd; | |
233 | int irq; | |
ba2eea25 | 234 | int irq2; |
0fb6f739 | 235 | uint error_status; |
ba2eea25 | 236 | int is_mpc8308; |
0fb6f739 PZ |
237 | |
238 | /* Lock for error_status field in this structure */ | |
239 | spinlock_t error_status_lock; | |
240 | }; | |
241 | ||
242 | #define DRV_NAME "mpc512x_dma" | |
243 | ||
244 | /* Convert struct dma_chan to struct mpc_dma_chan */ | |
245 | static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c) | |
246 | { | |
247 | return container_of(c, struct mpc_dma_chan, chan); | |
248 | } | |
249 | ||
250 | /* Convert struct dma_chan to struct mpc_dma */ | |
251 | static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c) | |
252 | { | |
253 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c); | |
77fc3976 | 254 | |
0fb6f739 PZ |
255 | return container_of(mchan, struct mpc_dma, channels[c->chan_id]); |
256 | } | |
257 | ||
258 | /* | |
259 | * Execute all queued DMA descriptors. | |
260 | * | |
261 | * Following requirements must be met while calling mpc_dma_execute(): | |
77fc3976 MS |
262 | * a) mchan->lock is acquired, |
263 | * b) mchan->active list is empty, | |
264 | * c) mchan->queued list contains at least one entry. | |
0fb6f739 PZ |
265 | */ |
266 | static void mpc_dma_execute(struct mpc_dma_chan *mchan) | |
267 | { | |
268 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); | |
269 | struct mpc_dma_desc *first = NULL; | |
270 | struct mpc_dma_desc *prev = NULL; | |
271 | struct mpc_dma_desc *mdesc; | |
272 | int cid = mchan->chan.chan_id; | |
273 | ||
63da8e0d AP |
274 | while (!list_empty(&mchan->queued)) { |
275 | mdesc = list_first_entry(&mchan->queued, | |
276 | struct mpc_dma_desc, node); | |
277 | /* | |
278 | * Grab either several mem-to-mem transfer descriptors | |
279 | * or one peripheral transfer descriptor, | |
280 | * don't mix mem-to-mem and peripheral transfer descriptors | |
281 | * within the same 'active' list. | |
282 | */ | |
283 | if (mdesc->will_access_peripheral) { | |
284 | if (list_empty(&mchan->active)) | |
285 | list_move_tail(&mdesc->node, &mchan->active); | |
286 | break; | |
287 | } else { | |
288 | list_move_tail(&mdesc->node, &mchan->active); | |
289 | } | |
290 | } | |
0fb6f739 PZ |
291 | |
292 | /* Chain descriptors into one transaction */ | |
293 | list_for_each_entry(mdesc, &mchan->active, node) { | |
294 | if (!first) | |
295 | first = mdesc; | |
296 | ||
297 | if (!prev) { | |
298 | prev = mdesc; | |
299 | continue; | |
300 | } | |
301 | ||
302 | prev->tcd->dlast_sga = mdesc->tcd_paddr; | |
303 | prev->tcd->e_sg = 1; | |
304 | mdesc->tcd->start = 1; | |
305 | ||
306 | prev = mdesc; | |
307 | } | |
308 | ||
0fb6f739 PZ |
309 | prev->tcd->int_maj = 1; |
310 | ||
311 | /* Send first descriptor in chain into hardware */ | |
312 | memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd)); | |
6504cf34 IY |
313 | |
314 | if (first != prev) | |
315 | mdma->tcd[cid].e_sg = 1; | |
63da8e0d AP |
316 | |
317 | if (mdma->is_mpc8308) { | |
318 | /* MPC8308, no request lines, software initiated start */ | |
319 | out_8(&mdma->regs->dmassrt, cid); | |
320 | } else if (first->will_access_peripheral) { | |
321 | /* Peripherals involved, start by external request signal */ | |
322 | out_8(&mdma->regs->dmaserq, cid); | |
323 | } else { | |
324 | /* Memory to memory transfer, software initiated start */ | |
325 | out_8(&mdma->regs->dmassrt, cid); | |
326 | } | |
0fb6f739 PZ |
327 | } |
328 | ||
329 | /* Handle interrupt on one half of DMA controller (32 channels) */ | |
330 | static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off) | |
331 | { | |
332 | struct mpc_dma_chan *mchan; | |
333 | struct mpc_dma_desc *mdesc; | |
334 | u32 status = is | es; | |
335 | int ch; | |
336 | ||
337 | while ((ch = fls(status) - 1) >= 0) { | |
338 | status &= ~(1 << ch); | |
339 | mchan = &mdma->channels[ch + off]; | |
340 | ||
341 | spin_lock(&mchan->lock); | |
342 | ||
2862559e IY |
343 | out_8(&mdma->regs->dmacint, ch + off); |
344 | out_8(&mdma->regs->dmacerr, ch + off); | |
345 | ||
0fb6f739 PZ |
346 | /* Check error status */ |
347 | if (es & (1 << ch)) | |
348 | list_for_each_entry(mdesc, &mchan->active, node) | |
349 | mdesc->error = -EIO; | |
350 | ||
351 | /* Execute queued descriptors */ | |
352 | list_splice_tail_init(&mchan->active, &mchan->completed); | |
353 | if (!list_empty(&mchan->queued)) | |
354 | mpc_dma_execute(mchan); | |
355 | ||
356 | spin_unlock(&mchan->lock); | |
357 | } | |
358 | } | |
359 | ||
360 | /* Interrupt handler */ | |
361 | static irqreturn_t mpc_dma_irq(int irq, void *data) | |
362 | { | |
363 | struct mpc_dma *mdma = data; | |
364 | uint es; | |
365 | ||
366 | /* Save error status register */ | |
367 | es = in_be32(&mdma->regs->dmaes); | |
368 | spin_lock(&mdma->error_status_lock); | |
369 | if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0) | |
370 | mdma->error_status = es; | |
371 | spin_unlock(&mdma->error_status_lock); | |
372 | ||
373 | /* Handle interrupt on each channel */ | |
ba2eea25 IY |
374 | if (mdma->dma.chancnt > 32) { |
375 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth), | |
0fb6f739 | 376 | in_be32(&mdma->regs->dmaerrh), 32); |
ba2eea25 | 377 | } |
0fb6f739 PZ |
378 | mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl), |
379 | in_be32(&mdma->regs->dmaerrl), 0); | |
380 | ||
0fb6f739 PZ |
381 | /* Schedule tasklet */ |
382 | tasklet_schedule(&mdma->tasklet); | |
383 | ||
384 | return IRQ_HANDLED; | |
385 | } | |
386 | ||
25985edc | 387 | /* process completed descriptors */ |
a2769913 | 388 | static void mpc_dma_process_completed(struct mpc_dma *mdma) |
0fb6f739 | 389 | { |
0fb6f739 PZ |
390 | dma_cookie_t last_cookie = 0; |
391 | struct mpc_dma_chan *mchan; | |
392 | struct mpc_dma_desc *mdesc; | |
393 | struct dma_async_tx_descriptor *desc; | |
394 | unsigned long flags; | |
395 | LIST_HEAD(list); | |
0fb6f739 PZ |
396 | int i; |
397 | ||
a2769913 IY |
398 | for (i = 0; i < mdma->dma.chancnt; i++) { |
399 | mchan = &mdma->channels[i]; | |
400 | ||
401 | /* Get all completed descriptors */ | |
402 | spin_lock_irqsave(&mchan->lock, flags); | |
403 | if (!list_empty(&mchan->completed)) | |
404 | list_splice_tail_init(&mchan->completed, &list); | |
405 | spin_unlock_irqrestore(&mchan->lock, flags); | |
406 | ||
407 | if (list_empty(&list)) | |
408 | continue; | |
409 | ||
410 | /* Execute callbacks and run dependencies */ | |
411 | list_for_each_entry(mdesc, &list, node) { | |
412 | desc = &mdesc->desc; | |
413 | ||
414 | if (desc->callback) | |
415 | desc->callback(desc->callback_param); | |
416 | ||
417 | last_cookie = desc->cookie; | |
418 | dma_run_dependencies(desc); | |
419 | } | |
420 | ||
421 | /* Free descriptors */ | |
422 | spin_lock_irqsave(&mchan->lock, flags); | |
423 | list_splice_tail_init(&list, &mchan->free); | |
4d4e58de | 424 | mchan->chan.completed_cookie = last_cookie; |
a2769913 IY |
425 | spin_unlock_irqrestore(&mchan->lock, flags); |
426 | } | |
427 | } | |
428 | ||
429 | /* DMA Tasklet */ | |
430 | static void mpc_dma_tasklet(unsigned long data) | |
431 | { | |
432 | struct mpc_dma *mdma = (void *)data; | |
433 | unsigned long flags; | |
434 | uint es; | |
435 | ||
0fb6f739 PZ |
436 | spin_lock_irqsave(&mdma->error_status_lock, flags); |
437 | es = mdma->error_status; | |
438 | mdma->error_status = 0; | |
439 | spin_unlock_irqrestore(&mdma->error_status_lock, flags); | |
440 | ||
441 | /* Print nice error report */ | |
442 | if (es) { | |
443 | dev_err(mdma->dma.dev, | |
444 | "Hardware reported following error(s) on channel %u:\n", | |
445 | MPC_DMA_DMAES_ERRCHN(es)); | |
446 | ||
447 | if (es & MPC_DMA_DMAES_GPE) | |
448 | dev_err(mdma->dma.dev, "- Group Priority Error\n"); | |
449 | if (es & MPC_DMA_DMAES_CPE) | |
450 | dev_err(mdma->dma.dev, "- Channel Priority Error\n"); | |
451 | if (es & MPC_DMA_DMAES_SAE) | |
452 | dev_err(mdma->dma.dev, "- Source Address Error\n"); | |
453 | if (es & MPC_DMA_DMAES_SOE) | |
77fc3976 | 454 | dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n"); |
0fb6f739 | 455 | if (es & MPC_DMA_DMAES_DAE) |
77fc3976 | 456 | dev_err(mdma->dma.dev, "- Destination Address Error\n"); |
0fb6f739 | 457 | if (es & MPC_DMA_DMAES_DOE) |
77fc3976 | 458 | dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n"); |
0fb6f739 | 459 | if (es & MPC_DMA_DMAES_NCE) |
77fc3976 | 460 | dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n"); |
0fb6f739 | 461 | if (es & MPC_DMA_DMAES_SGE) |
77fc3976 | 462 | dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n"); |
0fb6f739 PZ |
463 | if (es & MPC_DMA_DMAES_SBE) |
464 | dev_err(mdma->dma.dev, "- Source Bus Error\n"); | |
465 | if (es & MPC_DMA_DMAES_DBE) | |
466 | dev_err(mdma->dma.dev, "- Destination Bus Error\n"); | |
467 | } | |
468 | ||
a2769913 | 469 | mpc_dma_process_completed(mdma); |
0fb6f739 PZ |
470 | } |
471 | ||
472 | /* Submit descriptor to hardware */ | |
473 | static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd) | |
474 | { | |
475 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan); | |
476 | struct mpc_dma_desc *mdesc; | |
477 | unsigned long flags; | |
478 | dma_cookie_t cookie; | |
479 | ||
480 | mdesc = container_of(txd, struct mpc_dma_desc, desc); | |
481 | ||
482 | spin_lock_irqsave(&mchan->lock, flags); | |
483 | ||
484 | /* Move descriptor to queue */ | |
485 | list_move_tail(&mdesc->node, &mchan->queued); | |
486 | ||
487 | /* If channel is idle, execute all queued descriptors */ | |
488 | if (list_empty(&mchan->active)) | |
489 | mpc_dma_execute(mchan); | |
490 | ||
491 | /* Update cookie */ | |
884485e1 | 492 | cookie = dma_cookie_assign(txd); |
0fb6f739 PZ |
493 | spin_unlock_irqrestore(&mchan->lock, flags); |
494 | ||
495 | return cookie; | |
496 | } | |
497 | ||
498 | /* Alloc channel resources */ | |
499 | static int mpc_dma_alloc_chan_resources(struct dma_chan *chan) | |
500 | { | |
501 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | |
502 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | |
503 | struct mpc_dma_desc *mdesc; | |
504 | struct mpc_dma_tcd *tcd; | |
505 | dma_addr_t tcd_paddr; | |
506 | unsigned long flags; | |
507 | LIST_HEAD(descs); | |
508 | int i; | |
509 | ||
510 | /* Alloc DMA memory for Transfer Control Descriptors */ | |
511 | tcd = dma_alloc_coherent(mdma->dma.dev, | |
512 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | |
513 | &tcd_paddr, GFP_KERNEL); | |
514 | if (!tcd) | |
515 | return -ENOMEM; | |
516 | ||
517 | /* Alloc descriptors for this channel */ | |
518 | for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) { | |
519 | mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL); | |
520 | if (!mdesc) { | |
77fc3976 MS |
521 | dev_notice(mdma->dma.dev, |
522 | "Memory allocation error. Allocated only %u descriptors\n", i); | |
0fb6f739 PZ |
523 | break; |
524 | } | |
525 | ||
526 | dma_async_tx_descriptor_init(&mdesc->desc, chan); | |
527 | mdesc->desc.flags = DMA_CTRL_ACK; | |
528 | mdesc->desc.tx_submit = mpc_dma_tx_submit; | |
529 | ||
530 | mdesc->tcd = &tcd[i]; | |
531 | mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd)); | |
532 | ||
533 | list_add_tail(&mdesc->node, &descs); | |
534 | } | |
535 | ||
536 | /* Return error only if no descriptors were allocated */ | |
537 | if (i == 0) { | |
538 | dma_free_coherent(mdma->dma.dev, | |
539 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | |
540 | tcd, tcd_paddr); | |
541 | return -ENOMEM; | |
542 | } | |
543 | ||
544 | spin_lock_irqsave(&mchan->lock, flags); | |
545 | mchan->tcd = tcd; | |
546 | mchan->tcd_paddr = tcd_paddr; | |
547 | list_splice_tail_init(&descs, &mchan->free); | |
548 | spin_unlock_irqrestore(&mchan->lock, flags); | |
549 | ||
550 | /* Enable Error Interrupt */ | |
551 | out_8(&mdma->regs->dmaseei, chan->chan_id); | |
552 | ||
553 | return 0; | |
554 | } | |
555 | ||
556 | /* Free channel resources */ | |
557 | static void mpc_dma_free_chan_resources(struct dma_chan *chan) | |
558 | { | |
559 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | |
560 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | |
561 | struct mpc_dma_desc *mdesc, *tmp; | |
562 | struct mpc_dma_tcd *tcd; | |
563 | dma_addr_t tcd_paddr; | |
564 | unsigned long flags; | |
565 | LIST_HEAD(descs); | |
566 | ||
567 | spin_lock_irqsave(&mchan->lock, flags); | |
568 | ||
569 | /* Channel must be idle */ | |
570 | BUG_ON(!list_empty(&mchan->prepared)); | |
571 | BUG_ON(!list_empty(&mchan->queued)); | |
572 | BUG_ON(!list_empty(&mchan->active)); | |
573 | BUG_ON(!list_empty(&mchan->completed)); | |
574 | ||
575 | /* Move data */ | |
576 | list_splice_tail_init(&mchan->free, &descs); | |
577 | tcd = mchan->tcd; | |
578 | tcd_paddr = mchan->tcd_paddr; | |
579 | ||
580 | spin_unlock_irqrestore(&mchan->lock, flags); | |
581 | ||
582 | /* Free DMA memory used by descriptors */ | |
583 | dma_free_coherent(mdma->dma.dev, | |
584 | MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd), | |
585 | tcd, tcd_paddr); | |
586 | ||
587 | /* Free descriptors */ | |
588 | list_for_each_entry_safe(mdesc, tmp, &descs, node) | |
589 | kfree(mdesc); | |
590 | ||
591 | /* Disable Error Interrupt */ | |
592 | out_8(&mdma->regs->dmaceei, chan->chan_id); | |
593 | } | |
594 | ||
595 | /* Send all pending descriptor to hardware */ | |
596 | static void mpc_dma_issue_pending(struct dma_chan *chan) | |
597 | { | |
598 | /* | |
599 | * We are posting descriptors to the hardware as soon as | |
600 | * they are ready, so this function does nothing. | |
601 | */ | |
602 | } | |
603 | ||
604 | /* Check request completion status */ | |
605 | static enum dma_status | |
07934481 LW |
606 | mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
607 | struct dma_tx_state *txstate) | |
0fb6f739 | 608 | { |
108fae84 | 609 | return dma_cookie_status(chan, cookie, txstate); |
0fb6f739 PZ |
610 | } |
611 | ||
612 | /* Prepare descriptor for memory to memory copy */ | |
613 | static struct dma_async_tx_descriptor * | |
614 | mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src, | |
615 | size_t len, unsigned long flags) | |
616 | { | |
ba2eea25 | 617 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); |
0fb6f739 PZ |
618 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
619 | struct mpc_dma_desc *mdesc = NULL; | |
620 | struct mpc_dma_tcd *tcd; | |
621 | unsigned long iflags; | |
622 | ||
623 | /* Get free descriptor */ | |
624 | spin_lock_irqsave(&mchan->lock, iflags); | |
625 | if (!list_empty(&mchan->free)) { | |
626 | mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc, | |
627 | node); | |
628 | list_del(&mdesc->node); | |
629 | } | |
630 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
631 | ||
a2769913 IY |
632 | if (!mdesc) { |
633 | /* try to free completed descriptors */ | |
634 | mpc_dma_process_completed(mdma); | |
0fb6f739 | 635 | return NULL; |
a2769913 | 636 | } |
0fb6f739 PZ |
637 | |
638 | mdesc->error = 0; | |
63da8e0d | 639 | mdesc->will_access_peripheral = 0; |
0fb6f739 PZ |
640 | tcd = mdesc->tcd; |
641 | ||
642 | /* Prepare Transfer Control Descriptor for this transaction */ | |
643 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | |
644 | ||
645 | if (IS_ALIGNED(src | dst | len, 32)) { | |
646 | tcd->ssize = MPC_DMA_TSIZE_32; | |
647 | tcd->dsize = MPC_DMA_TSIZE_32; | |
648 | tcd->soff = 32; | |
649 | tcd->doff = 32; | |
ba2eea25 IY |
650 | } else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) { |
651 | /* MPC8308 doesn't support 16 byte transfers */ | |
0fb6f739 PZ |
652 | tcd->ssize = MPC_DMA_TSIZE_16; |
653 | tcd->dsize = MPC_DMA_TSIZE_16; | |
654 | tcd->soff = 16; | |
655 | tcd->doff = 16; | |
656 | } else if (IS_ALIGNED(src | dst | len, 4)) { | |
657 | tcd->ssize = MPC_DMA_TSIZE_4; | |
658 | tcd->dsize = MPC_DMA_TSIZE_4; | |
659 | tcd->soff = 4; | |
660 | tcd->doff = 4; | |
661 | } else if (IS_ALIGNED(src | dst | len, 2)) { | |
662 | tcd->ssize = MPC_DMA_TSIZE_2; | |
663 | tcd->dsize = MPC_DMA_TSIZE_2; | |
664 | tcd->soff = 2; | |
665 | tcd->doff = 2; | |
666 | } else { | |
667 | tcd->ssize = MPC_DMA_TSIZE_1; | |
668 | tcd->dsize = MPC_DMA_TSIZE_1; | |
669 | tcd->soff = 1; | |
670 | tcd->doff = 1; | |
671 | } | |
672 | ||
673 | tcd->saddr = src; | |
674 | tcd->daddr = dst; | |
675 | tcd->nbytes = len; | |
676 | tcd->biter = 1; | |
677 | tcd->citer = 1; | |
678 | ||
679 | /* Place descriptor in prepared list */ | |
680 | spin_lock_irqsave(&mchan->lock, iflags); | |
681 | list_add_tail(&mdesc->node, &mchan->prepared); | |
682 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
683 | ||
684 | return &mdesc->desc; | |
685 | } | |
686 | ||
899ed9dd MS |
687 | inline u8 buswidth_to_dmatsize(u8 buswidth) |
688 | { | |
689 | u8 res; | |
690 | ||
691 | for (res = 0; buswidth > 1; buswidth /= 2) | |
692 | res++; | |
693 | return res; | |
694 | } | |
695 | ||
63da8e0d AP |
696 | static struct dma_async_tx_descriptor * |
697 | mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
698 | unsigned int sg_len, enum dma_transfer_direction direction, | |
699 | unsigned long flags, void *context) | |
700 | { | |
701 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | |
702 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | |
703 | struct mpc_dma_desc *mdesc = NULL; | |
704 | dma_addr_t per_paddr; | |
705 | u32 tcd_nunits; | |
706 | struct mpc_dma_tcd *tcd; | |
707 | unsigned long iflags; | |
708 | struct scatterlist *sg; | |
709 | size_t len; | |
710 | int iter, i; | |
711 | ||
712 | /* Currently there is no proper support for scatter/gather */ | |
713 | if (sg_len != 1) | |
714 | return NULL; | |
715 | ||
716 | if (!is_slave_direction(direction)) | |
717 | return NULL; | |
718 | ||
719 | for_each_sg(sgl, sg, sg_len, i) { | |
720 | spin_lock_irqsave(&mchan->lock, iflags); | |
721 | ||
722 | mdesc = list_first_entry(&mchan->free, | |
723 | struct mpc_dma_desc, node); | |
724 | if (!mdesc) { | |
725 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
726 | /* Try to free completed descriptors */ | |
727 | mpc_dma_process_completed(mdma); | |
728 | return NULL; | |
729 | } | |
730 | ||
731 | list_del(&mdesc->node); | |
732 | ||
733 | if (direction == DMA_DEV_TO_MEM) { | |
734 | per_paddr = mchan->src_per_paddr; | |
735 | tcd_nunits = mchan->src_tcd_nunits; | |
736 | } else { | |
737 | per_paddr = mchan->dst_per_paddr; | |
738 | tcd_nunits = mchan->dst_tcd_nunits; | |
739 | } | |
740 | ||
741 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
742 | ||
743 | if (per_paddr == 0 || tcd_nunits == 0) | |
744 | goto err_prep; | |
745 | ||
746 | mdesc->error = 0; | |
747 | mdesc->will_access_peripheral = 1; | |
748 | ||
749 | /* Prepare Transfer Control Descriptor for this transaction */ | |
750 | tcd = mdesc->tcd; | |
751 | ||
752 | memset(tcd, 0, sizeof(struct mpc_dma_tcd)); | |
753 | ||
63da8e0d AP |
754 | if (direction == DMA_DEV_TO_MEM) { |
755 | tcd->saddr = per_paddr; | |
756 | tcd->daddr = sg_dma_address(sg); | |
899ed9dd MS |
757 | |
758 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth)) | |
759 | goto err_prep; | |
760 | ||
63da8e0d | 761 | tcd->soff = 0; |
899ed9dd | 762 | tcd->doff = mchan->dwidth; |
63da8e0d AP |
763 | } else { |
764 | tcd->saddr = sg_dma_address(sg); | |
765 | tcd->daddr = per_paddr; | |
899ed9dd MS |
766 | |
767 | if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth)) | |
768 | goto err_prep; | |
769 | ||
770 | tcd->soff = mchan->swidth; | |
63da8e0d AP |
771 | tcd->doff = 0; |
772 | } | |
773 | ||
899ed9dd MS |
774 | tcd->ssize = buswidth_to_dmatsize(mchan->swidth); |
775 | tcd->dsize = buswidth_to_dmatsize(mchan->dwidth); | |
63da8e0d | 776 | |
237ec709 MS |
777 | if (mdma->is_mpc8308) { |
778 | tcd->nbytes = sg_dma_len(sg); | |
899ed9dd | 779 | if (!IS_ALIGNED(tcd->nbytes, mchan->swidth)) |
237ec709 MS |
780 | goto err_prep; |
781 | ||
782 | /* No major loops for MPC8303 */ | |
783 | tcd->biter = 1; | |
784 | tcd->citer = 1; | |
785 | } else { | |
786 | len = sg_dma_len(sg); | |
899ed9dd | 787 | tcd->nbytes = tcd_nunits * tcd->ssize; |
237ec709 MS |
788 | if (!IS_ALIGNED(len, tcd->nbytes)) |
789 | goto err_prep; | |
790 | ||
791 | iter = len / tcd->nbytes; | |
792 | if (iter >= 1 << 15) { | |
793 | /* len is too big */ | |
794 | goto err_prep; | |
795 | } | |
796 | /* citer_linkch contains the high bits of iter */ | |
797 | tcd->biter = iter & 0x1ff; | |
798 | tcd->biter_linkch = iter >> 9; | |
799 | tcd->citer = tcd->biter; | |
800 | tcd->citer_linkch = tcd->biter_linkch; | |
63da8e0d | 801 | } |
63da8e0d AP |
802 | |
803 | tcd->e_sg = 0; | |
804 | tcd->d_req = 1; | |
805 | ||
806 | /* Place descriptor in prepared list */ | |
807 | spin_lock_irqsave(&mchan->lock, iflags); | |
808 | list_add_tail(&mdesc->node, &mchan->prepared); | |
809 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
810 | } | |
811 | ||
812 | return &mdesc->desc; | |
813 | ||
814 | err_prep: | |
815 | /* Put the descriptor back */ | |
816 | spin_lock_irqsave(&mchan->lock, iflags); | |
817 | list_add_tail(&mdesc->node, &mchan->free); | |
818 | spin_unlock_irqrestore(&mchan->lock, iflags); | |
819 | ||
820 | return NULL; | |
821 | } | |
822 | ||
899ed9dd MS |
823 | inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) |
824 | { | |
825 | switch (buswidth) { | |
826 | case 16: | |
827 | if (is_mpc8308) | |
828 | return false; | |
829 | case 1: | |
830 | case 2: | |
831 | case 4: | |
832 | case 32: | |
833 | break; | |
834 | default: | |
835 | return false; | |
836 | } | |
837 | ||
838 | return true; | |
839 | } | |
840 | ||
95335f1f MR |
841 | static int mpc_dma_device_config(struct dma_chan *chan, |
842 | struct dma_slave_config *cfg) | |
63da8e0d | 843 | { |
95335f1f | 844 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); |
899ed9dd | 845 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan); |
63da8e0d AP |
846 | unsigned long flags; |
847 | ||
95335f1f MR |
848 | /* |
849 | * Software constraints: | |
899ed9dd MS |
850 | * - only transfers between a peripheral device and memory are |
851 | * supported | |
852 | * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes | |
853 | * are supported, and, consequently, source addresses and | |
854 | * destination addresses; must be aligned accordingly; furthermore, | |
855 | * for MPC512x SoCs, the transfer size must be aligned on (chunk | |
856 | * size * maxburst) | |
857 | * - during the transfer, the RAM address is incremented by the size | |
858 | * of transfer chunk | |
859 | * - the peripheral port's address is constant during the transfer. | |
95335f1f | 860 | */ |
63da8e0d | 861 | |
899ed9dd MS |
862 | if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) || |
863 | !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) { | |
95335f1f MR |
864 | return -EINVAL; |
865 | } | |
63da8e0d | 866 | |
899ed9dd MS |
867 | if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) || |
868 | !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308)) | |
869 | return -EINVAL; | |
870 | ||
95335f1f | 871 | spin_lock_irqsave(&mchan->lock, flags); |
63da8e0d | 872 | |
95335f1f MR |
873 | mchan->src_per_paddr = cfg->src_addr; |
874 | mchan->src_tcd_nunits = cfg->src_maxburst; | |
899ed9dd | 875 | mchan->swidth = cfg->src_addr_width; |
95335f1f MR |
876 | mchan->dst_per_paddr = cfg->dst_addr; |
877 | mchan->dst_tcd_nunits = cfg->dst_maxburst; | |
899ed9dd | 878 | mchan->dwidth = cfg->dst_addr_width; |
63da8e0d | 879 | |
95335f1f MR |
880 | /* Apply defaults */ |
881 | if (mchan->src_tcd_nunits == 0) | |
882 | mchan->src_tcd_nunits = 1; | |
883 | if (mchan->dst_tcd_nunits == 0) | |
884 | mchan->dst_tcd_nunits = 1; | |
63da8e0d | 885 | |
95335f1f | 886 | spin_unlock_irqrestore(&mchan->lock, flags); |
63da8e0d | 887 | |
95335f1f MR |
888 | return 0; |
889 | } | |
63da8e0d | 890 | |
95335f1f MR |
891 | static int mpc_dma_device_terminate_all(struct dma_chan *chan) |
892 | { | |
893 | struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); | |
894 | struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan); | |
895 | unsigned long flags; | |
63da8e0d | 896 | |
95335f1f MR |
897 | /* Disable channel requests */ |
898 | spin_lock_irqsave(&mchan->lock, flags); | |
63da8e0d | 899 | |
95335f1f MR |
900 | out_8(&mdma->regs->dmacerq, chan->chan_id); |
901 | list_splice_tail_init(&mchan->prepared, &mchan->free); | |
902 | list_splice_tail_init(&mchan->queued, &mchan->free); | |
903 | list_splice_tail_init(&mchan->active, &mchan->free); | |
63da8e0d | 904 | |
95335f1f | 905 | spin_unlock_irqrestore(&mchan->lock, flags); |
63da8e0d | 906 | |
95335f1f | 907 | return 0; |
63da8e0d AP |
908 | } |
909 | ||
463a1f8b | 910 | static int mpc_dma_probe(struct platform_device *op) |
0fb6f739 | 911 | { |
b4a75c91 | 912 | struct device_node *dn = op->dev.of_node; |
0fb6f739 PZ |
913 | struct device *dev = &op->dev; |
914 | struct dma_device *dma; | |
915 | struct mpc_dma *mdma; | |
916 | struct mpc_dma_chan *mchan; | |
917 | struct resource res; | |
918 | ulong regs_start, regs_size; | |
919 | int retval, i; | |
9d82faeb | 920 | u8 chancnt; |
0fb6f739 PZ |
921 | |
922 | mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); | |
923 | if (!mdma) { | |
baca66f7 AP |
924 | retval = -ENOMEM; |
925 | goto err; | |
0fb6f739 PZ |
926 | } |
927 | ||
928 | mdma->irq = irq_of_parse_and_map(dn, 0); | |
929 | if (mdma->irq == NO_IRQ) { | |
930 | dev_err(dev, "Error mapping IRQ!\n"); | |
baca66f7 AP |
931 | retval = -EINVAL; |
932 | goto err; | |
0fb6f739 PZ |
933 | } |
934 | ||
ba2eea25 IY |
935 | if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) { |
936 | mdma->is_mpc8308 = 1; | |
937 | mdma->irq2 = irq_of_parse_and_map(dn, 1); | |
938 | if (mdma->irq2 == NO_IRQ) { | |
939 | dev_err(dev, "Error mapping IRQ!\n"); | |
baca66f7 AP |
940 | retval = -EINVAL; |
941 | goto err_dispose1; | |
ba2eea25 IY |
942 | } |
943 | } | |
944 | ||
0fb6f739 PZ |
945 | retval = of_address_to_resource(dn, 0, &res); |
946 | if (retval) { | |
947 | dev_err(dev, "Error parsing memory region!\n"); | |
baca66f7 | 948 | goto err_dispose2; |
0fb6f739 PZ |
949 | } |
950 | ||
951 | regs_start = res.start; | |
8381fc35 | 952 | regs_size = resource_size(&res); |
0fb6f739 PZ |
953 | |
954 | if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) { | |
955 | dev_err(dev, "Error requesting memory region!\n"); | |
baca66f7 AP |
956 | retval = -EBUSY; |
957 | goto err_dispose2; | |
0fb6f739 PZ |
958 | } |
959 | ||
960 | mdma->regs = devm_ioremap(dev, regs_start, regs_size); | |
961 | if (!mdma->regs) { | |
962 | dev_err(dev, "Error mapping memory region!\n"); | |
baca66f7 AP |
963 | retval = -ENOMEM; |
964 | goto err_dispose2; | |
0fb6f739 PZ |
965 | } |
966 | ||
967 | mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs) | |
968 | + MPC_DMA_TCD_OFFSET); | |
969 | ||
baca66f7 | 970 | retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma); |
0fb6f739 PZ |
971 | if (retval) { |
972 | dev_err(dev, "Error requesting IRQ!\n"); | |
baca66f7 AP |
973 | retval = -EINVAL; |
974 | goto err_dispose2; | |
0fb6f739 PZ |
975 | } |
976 | ||
ba2eea25 | 977 | if (mdma->is_mpc8308) { |
baca66f7 AP |
978 | retval = request_irq(mdma->irq2, &mpc_dma_irq, 0, |
979 | DRV_NAME, mdma); | |
ba2eea25 IY |
980 | if (retval) { |
981 | dev_err(dev, "Error requesting IRQ2!\n"); | |
baca66f7 AP |
982 | retval = -EINVAL; |
983 | goto err_free1; | |
ba2eea25 IY |
984 | } |
985 | } | |
986 | ||
0fb6f739 PZ |
987 | spin_lock_init(&mdma->error_status_lock); |
988 | ||
989 | dma = &mdma->dma; | |
990 | dma->dev = dev; | |
0fb6f739 PZ |
991 | dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; |
992 | dma->device_free_chan_resources = mpc_dma_free_chan_resources; | |
993 | dma->device_issue_pending = mpc_dma_issue_pending; | |
07934481 | 994 | dma->device_tx_status = mpc_dma_tx_status; |
0fb6f739 | 995 | dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; |
63da8e0d | 996 | dma->device_prep_slave_sg = mpc_dma_prep_slave_sg; |
95335f1f MR |
997 | dma->device_config = mpc_dma_device_config; |
998 | dma->device_terminate_all = mpc_dma_device_terminate_all; | |
0fb6f739 PZ |
999 | |
1000 | INIT_LIST_HEAD(&dma->channels); | |
1001 | dma_cap_set(DMA_MEMCPY, dma->cap_mask); | |
63da8e0d | 1002 | dma_cap_set(DMA_SLAVE, dma->cap_mask); |
0fb6f739 | 1003 | |
9d82faeb MR |
1004 | if (mdma->is_mpc8308) |
1005 | chancnt = MPC8308_DMACHAN_MAX; | |
1006 | else | |
1007 | chancnt = MPC512x_DMACHAN_MAX; | |
1008 | ||
1009 | for (i = 0; i < chancnt; i++) { | |
0fb6f739 PZ |
1010 | mchan = &mdma->channels[i]; |
1011 | ||
1012 | mchan->chan.device = dma; | |
d3ee98cd | 1013 | dma_cookie_init(&mchan->chan); |
0fb6f739 PZ |
1014 | |
1015 | INIT_LIST_HEAD(&mchan->free); | |
1016 | INIT_LIST_HEAD(&mchan->prepared); | |
1017 | INIT_LIST_HEAD(&mchan->queued); | |
1018 | INIT_LIST_HEAD(&mchan->active); | |
1019 | INIT_LIST_HEAD(&mchan->completed); | |
1020 | ||
1021 | spin_lock_init(&mchan->lock); | |
1022 | list_add_tail(&mchan->chan.device_node, &dma->channels); | |
1023 | } | |
1024 | ||
1025 | tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma); | |
1026 | ||
1027 | /* | |
1028 | * Configure DMA Engine: | |
1029 | * - Dynamic clock, | |
1030 | * - Round-robin group arbitration, | |
1031 | * - Round-robin channel arbitration. | |
1032 | */ | |
78a4f036 AP |
1033 | if (mdma->is_mpc8308) { |
1034 | /* MPC8308 has 16 channels and lacks some registers */ | |
1035 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA); | |
1036 | ||
1037 | /* enable snooping */ | |
1038 | out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE); | |
1039 | /* Disable error interrupts */ | |
1040 | out_be32(&mdma->regs->dmaeeil, 0); | |
1041 | ||
1042 | /* Clear interrupts status */ | |
1043 | out_be32(&mdma->regs->dmaintl, 0xFFFF); | |
1044 | out_be32(&mdma->regs->dmaerrl, 0xFFFF); | |
1045 | } else { | |
ba2eea25 | 1046 | out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG | |
77fc3976 MS |
1047 | MPC_DMA_DMACR_ERGA | |
1048 | MPC_DMA_DMACR_ERCA); | |
ba2eea25 IY |
1049 | |
1050 | /* Disable hardware DMA requests */ | |
1051 | out_be32(&mdma->regs->dmaerqh, 0); | |
1052 | out_be32(&mdma->regs->dmaerql, 0); | |
1053 | ||
1054 | /* Disable error interrupts */ | |
1055 | out_be32(&mdma->regs->dmaeeih, 0); | |
1056 | out_be32(&mdma->regs->dmaeeil, 0); | |
1057 | ||
1058 | /* Clear interrupts status */ | |
1059 | out_be32(&mdma->regs->dmainth, 0xFFFFFFFF); | |
1060 | out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF); | |
1061 | out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF); | |
1062 | out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF); | |
1063 | ||
1064 | /* Route interrupts to IPIC */ | |
1065 | out_be32(&mdma->regs->dmaihsa, 0); | |
1066 | out_be32(&mdma->regs->dmailsa, 0); | |
ba2eea25 | 1067 | } |
0fb6f739 PZ |
1068 | |
1069 | /* Register DMA engine */ | |
1070 | dev_set_drvdata(dev, mdma); | |
1071 | retval = dma_async_device_register(dma); | |
baca66f7 AP |
1072 | if (retval) |
1073 | goto err_free2; | |
0fb6f739 | 1074 | |
ec1f0c96 AP |
1075 | /* Register with OF helpers for DMA lookups (nonfatal) */ |
1076 | if (dev->of_node) { | |
1077 | retval = of_dma_controller_register(dev->of_node, | |
1078 | of_dma_xlate_by_chan_id, mdma); | |
1079 | if (retval) | |
1080 | dev_warn(dev, "Could not register for OF lookup\n"); | |
1081 | } | |
1082 | ||
1083 | return 0; | |
baca66f7 AP |
1084 | |
1085 | err_free2: | |
1086 | if (mdma->is_mpc8308) | |
1087 | free_irq(mdma->irq2, mdma); | |
1088 | err_free1: | |
1089 | free_irq(mdma->irq, mdma); | |
1090 | err_dispose2: | |
1091 | if (mdma->is_mpc8308) | |
1092 | irq_dispose_mapping(mdma->irq2); | |
1093 | err_dispose1: | |
1094 | irq_dispose_mapping(mdma->irq); | |
1095 | err: | |
1096 | return retval; | |
0fb6f739 PZ |
1097 | } |
1098 | ||
4bf27b8b | 1099 | static int mpc_dma_remove(struct platform_device *op) |
0fb6f739 PZ |
1100 | { |
1101 | struct device *dev = &op->dev; | |
1102 | struct mpc_dma *mdma = dev_get_drvdata(dev); | |
1103 | ||
ec1f0c96 AP |
1104 | if (dev->of_node) |
1105 | of_dma_controller_free(dev->of_node); | |
0fb6f739 | 1106 | dma_async_device_unregister(&mdma->dma); |
baca66f7 AP |
1107 | if (mdma->is_mpc8308) { |
1108 | free_irq(mdma->irq2, mdma); | |
1109 | irq_dispose_mapping(mdma->irq2); | |
1110 | } | |
1111 | free_irq(mdma->irq, mdma); | |
0fb6f739 | 1112 | irq_dispose_mapping(mdma->irq); |
085fedf7 | 1113 | tasklet_kill(&mdma->tasklet); |
0fb6f739 PZ |
1114 | |
1115 | return 0; | |
1116 | } | |
1117 | ||
57c03422 | 1118 | static const struct of_device_id mpc_dma_match[] = { |
0fb6f739 | 1119 | { .compatible = "fsl,mpc5121-dma", }, |
62057d33 | 1120 | { .compatible = "fsl,mpc8308-dma", }, |
0fb6f739 PZ |
1121 | {}, |
1122 | }; | |
9ace300c | 1123 | MODULE_DEVICE_TABLE(of, mpc_dma_match); |
0fb6f739 | 1124 | |
00006124 | 1125 | static struct platform_driver mpc_dma_driver = { |
0fb6f739 | 1126 | .probe = mpc_dma_probe, |
a7d6e3ec | 1127 | .remove = mpc_dma_remove, |
b4a75c91 AG |
1128 | .driver = { |
1129 | .name = DRV_NAME, | |
b4a75c91 | 1130 | .of_match_table = mpc_dma_match, |
0fb6f739 PZ |
1131 | }, |
1132 | }; | |
1133 | ||
c94e9105 | 1134 | module_platform_driver(mpc_dma_driver); |
0fb6f739 PZ |
1135 | |
1136 | MODULE_LICENSE("GPL"); | |
1137 | MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>"); |