]>
Commit | Line | Data |
---|---|---|
c2dde5f8 MP |
1 | /* |
2 | * TI EDMA DMA engine driver | |
3 | * | |
4 | * Copyright 2012 Texas Instruments | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU General Public License as | |
8 | * published by the Free Software Foundation version 2. | |
9 | * | |
10 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | |
11 | * kind, whether express or implied; without even the implied warranty | |
12 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | #include <linux/dmaengine.h> | |
17 | #include <linux/dma-mapping.h> | |
b7a4fd53 | 18 | #include <linux/edma.h> |
c2dde5f8 MP |
19 | #include <linux/err.h> |
20 | #include <linux/init.h> | |
21 | #include <linux/interrupt.h> | |
22 | #include <linux/list.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/platform_device.h> | |
25 | #include <linux/slab.h> | |
26 | #include <linux/spinlock.h> | |
ed64610f | 27 | #include <linux/of.h> |
dc9b6055 | 28 | #include <linux/of_dma.h> |
2b6b3b74 PU |
29 | #include <linux/of_irq.h> |
30 | #include <linux/of_address.h> | |
31 | #include <linux/of_device.h> | |
32 | #include <linux/pm_runtime.h> | |
c2dde5f8 | 33 | |
3ad7a42d | 34 | #include <linux/platform_data/edma.h> |
c2dde5f8 MP |
35 | |
36 | #include "dmaengine.h" | |
37 | #include "virt-dma.h" | |
38 | ||
2b6b3b74 PU |
39 | /* Offsets matching "struct edmacc_param" */ |
40 | #define PARM_OPT 0x00 | |
41 | #define PARM_SRC 0x04 | |
42 | #define PARM_A_B_CNT 0x08 | |
43 | #define PARM_DST 0x0c | |
44 | #define PARM_SRC_DST_BIDX 0x10 | |
45 | #define PARM_LINK_BCNTRLD 0x14 | |
46 | #define PARM_SRC_DST_CIDX 0x18 | |
47 | #define PARM_CCNT 0x1c | |
48 | ||
49 | #define PARM_SIZE 0x20 | |
50 | ||
51 | /* Offsets for EDMA CC global channel registers and their shadows */ | |
52 | #define SH_ER 0x00 /* 64 bits */ | |
53 | #define SH_ECR 0x08 /* 64 bits */ | |
54 | #define SH_ESR 0x10 /* 64 bits */ | |
55 | #define SH_CER 0x18 /* 64 bits */ | |
56 | #define SH_EER 0x20 /* 64 bits */ | |
57 | #define SH_EECR 0x28 /* 64 bits */ | |
58 | #define SH_EESR 0x30 /* 64 bits */ | |
59 | #define SH_SER 0x38 /* 64 bits */ | |
60 | #define SH_SECR 0x40 /* 64 bits */ | |
61 | #define SH_IER 0x50 /* 64 bits */ | |
62 | #define SH_IECR 0x58 /* 64 bits */ | |
63 | #define SH_IESR 0x60 /* 64 bits */ | |
64 | #define SH_IPR 0x68 /* 64 bits */ | |
65 | #define SH_ICR 0x70 /* 64 bits */ | |
66 | #define SH_IEVAL 0x78 | |
67 | #define SH_QER 0x80 | |
68 | #define SH_QEER 0x84 | |
69 | #define SH_QEECR 0x88 | |
70 | #define SH_QEESR 0x8c | |
71 | #define SH_QSER 0x90 | |
72 | #define SH_QSECR 0x94 | |
73 | #define SH_SIZE 0x200 | |
74 | ||
75 | /* Offsets for EDMA CC global registers */ | |
76 | #define EDMA_REV 0x0000 | |
77 | #define EDMA_CCCFG 0x0004 | |
78 | #define EDMA_QCHMAP 0x0200 /* 8 registers */ | |
79 | #define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ | |
80 | #define EDMA_QDMAQNUM 0x0260 | |
81 | #define EDMA_QUETCMAP 0x0280 | |
82 | #define EDMA_QUEPRI 0x0284 | |
83 | #define EDMA_EMR 0x0300 /* 64 bits */ | |
84 | #define EDMA_EMCR 0x0308 /* 64 bits */ | |
85 | #define EDMA_QEMR 0x0310 | |
86 | #define EDMA_QEMCR 0x0314 | |
87 | #define EDMA_CCERR 0x0318 | |
88 | #define EDMA_CCERRCLR 0x031c | |
89 | #define EDMA_EEVAL 0x0320 | |
90 | #define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ | |
91 | #define EDMA_QRAE 0x0380 /* 4 registers */ | |
92 | #define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ | |
93 | #define EDMA_QSTAT 0x0600 /* 2 registers */ | |
94 | #define EDMA_QWMTHRA 0x0620 | |
95 | #define EDMA_QWMTHRB 0x0624 | |
96 | #define EDMA_CCSTAT 0x0640 | |
97 | ||
98 | #define EDMA_M 0x1000 /* global channel registers */ | |
99 | #define EDMA_ECR 0x1008 | |
100 | #define EDMA_ECRH 0x100C | |
101 | #define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ | |
102 | #define EDMA_PARM 0x4000 /* PaRAM entries */ | |
103 | ||
104 | #define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) | |
105 | ||
106 | #define EDMA_DCHMAP 0x0100 /* 64 registers */ | |
107 | ||
108 | /* CCCFG register */ | |
109 | #define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ | |
633e42b8 | 110 | #define GET_NUM_QDMACH(x) (x & 0x70 >> 4) /* bits 4-6 */ |
2b6b3b74 PU |
111 | #define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ |
112 | #define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ | |
113 | #define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ | |
114 | #define CHMAP_EXIST BIT(24) | |
115 | ||
2abd5f1b JF |
116 | /* |
117 | * Max of 20 segments per channel to conserve PaRAM slots | |
118 | * Also note that MAX_NR_SG should be atleast the no.of periods | |
119 | * that are required for ASoC, otherwise DMA prep calls will | |
120 | * fail. Today davinci-pcm is the only user of this driver and | |
121 | * requires atleast 17 slots, so we setup the default to 20. | |
122 | */ | |
123 | #define MAX_NR_SG 20 | |
c2dde5f8 MP |
124 | #define EDMA_MAX_SLOTS MAX_NR_SG |
125 | #define EDMA_DESCRIPTORS 16 | |
126 | ||
2b6b3b74 PU |
127 | #define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ |
128 | #define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ | |
129 | #define EDMA_CONT_PARAMS_ANY 1001 | |
130 | #define EDMA_CONT_PARAMS_FIXED_EXACT 1002 | |
131 | #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 | |
132 | ||
2b6b3b74 PU |
133 | /* PaRAM slots are laid out like this */ |
134 | struct edmacc_param { | |
135 | u32 opt; | |
136 | u32 src; | |
137 | u32 a_b_cnt; | |
138 | u32 dst; | |
139 | u32 src_dst_bidx; | |
140 | u32 link_bcntrld; | |
141 | u32 src_dst_cidx; | |
142 | u32 ccnt; | |
143 | } __packed; | |
144 | ||
145 | /* fields in edmacc_param.opt */ | |
146 | #define SAM BIT(0) | |
147 | #define DAM BIT(1) | |
148 | #define SYNCDIM BIT(2) | |
149 | #define STATIC BIT(3) | |
150 | #define EDMA_FWID (0x07 << 8) | |
151 | #define TCCMODE BIT(11) | |
152 | #define EDMA_TCC(t) ((t) << 12) | |
153 | #define TCINTEN BIT(20) | |
154 | #define ITCINTEN BIT(21) | |
155 | #define TCCHEN BIT(22) | |
156 | #define ITCCHEN BIT(23) | |
157 | ||
b5088ad9 | 158 | struct edma_pset { |
c2da2340 TG |
159 | u32 len; |
160 | dma_addr_t addr; | |
b5088ad9 TG |
161 | struct edmacc_param param; |
162 | }; | |
163 | ||
c2dde5f8 MP |
164 | struct edma_desc { |
165 | struct virt_dma_desc vdesc; | |
166 | struct list_head node; | |
c2da2340 | 167 | enum dma_transfer_direction direction; |
50a9c707 | 168 | int cyclic; |
c2dde5f8 MP |
169 | int absync; |
170 | int pset_nr; | |
04361d88 | 171 | struct edma_chan *echan; |
53407062 | 172 | int processed; |
04361d88 JF |
173 | |
174 | /* | |
175 | * The following 4 elements are used for residue accounting. | |
176 | * | |
177 | * - processed_stat: the number of SG elements we have traversed | |
178 | * so far to cover accounting. This is updated directly to processed | |
179 | * during edma_callback and is always <= processed, because processed | |
180 | * refers to the number of pending transfer (programmed to EDMA | |
181 | * controller), where as processed_stat tracks number of transfers | |
182 | * accounted for so far. | |
183 | * | |
184 | * - residue: The amount of bytes we have left to transfer for this desc | |
185 | * | |
186 | * - residue_stat: The residue in bytes of data we have covered | |
187 | * so far for accounting. This is updated directly to residue | |
188 | * during callbacks to keep it current. | |
189 | * | |
190 | * - sg_len: Tracks the length of the current intermediate transfer, | |
191 | * this is required to update the residue during intermediate transfer | |
192 | * completion callback. | |
193 | */ | |
740b41f7 | 194 | int processed_stat; |
740b41f7 | 195 | u32 sg_len; |
04361d88 | 196 | u32 residue; |
740b41f7 | 197 | u32 residue_stat; |
04361d88 | 198 | |
b5088ad9 | 199 | struct edma_pset pset[0]; |
c2dde5f8 MP |
200 | }; |
201 | ||
202 | struct edma_cc; | |
203 | ||
204 | struct edma_chan { | |
205 | struct virt_dma_chan vchan; | |
206 | struct list_head node; | |
207 | struct edma_desc *edesc; | |
208 | struct edma_cc *ecc; | |
209 | int ch_num; | |
210 | bool alloced; | |
211 | int slot[EDMA_MAX_SLOTS]; | |
c5f47990 | 212 | int missed; |
661f7cb5 | 213 | struct dma_slave_config cfg; |
c2dde5f8 MP |
214 | }; |
215 | ||
216 | struct edma_cc { | |
2b6b3b74 PU |
217 | struct device *dev; |
218 | struct edma_soc_info *info; | |
219 | void __iomem *base; | |
220 | int id; | |
221 | ||
222 | /* eDMA3 resource information */ | |
223 | unsigned num_channels; | |
633e42b8 | 224 | unsigned num_qchannels; |
2b6b3b74 PU |
225 | unsigned num_region; |
226 | unsigned num_slots; | |
227 | unsigned num_tc; | |
4ab54f69 | 228 | bool chmap_exist; |
2b6b3b74 PU |
229 | enum dma_event_q default_queue; |
230 | ||
231 | bool unused_chan_list_done; | |
7a73b135 | 232 | /* The slot_inuse bit for each PaRAM slot is clear unless the |
2b6b3b74 PU |
233 | * channel is in use ... by ARM or DSP, for QDMA, or whatever. |
234 | */ | |
7a73b135 | 235 | unsigned long *slot_inuse; |
2b6b3b74 | 236 | |
7a73b135 | 237 | /* The channel_unused bit for each channel is clear unless |
2b6b3b74 PU |
238 | * it is not being used on this platform. It uses a bit |
239 | * of SOC-specific initialization code. | |
240 | */ | |
7a73b135 | 241 | unsigned long *channel_unused; |
2b6b3b74 | 242 | |
c2dde5f8 | 243 | struct dma_device dma_slave; |
cb782059 | 244 | struct edma_chan *slave_chans; |
c2dde5f8 MP |
245 | int dummy_slot; |
246 | }; | |
247 | ||
2b6b3b74 PU |
248 | /* dummy param set used to (re)initialize parameter RAM slots */ |
249 | static const struct edmacc_param dummy_paramset = { | |
250 | .link_bcntrld = 0xffff, | |
251 | .ccnt = 1, | |
252 | }; | |
253 | ||
254 | static const struct of_device_id edma_of_ids[] = { | |
255 | { .compatible = "ti,edma3", }, | |
256 | {} | |
257 | }; | |
258 | ||
259 | static inline unsigned int edma_read(struct edma_cc *ecc, int offset) | |
260 | { | |
261 | return (unsigned int)__raw_readl(ecc->base + offset); | |
262 | } | |
263 | ||
264 | static inline void edma_write(struct edma_cc *ecc, int offset, int val) | |
265 | { | |
266 | __raw_writel(val, ecc->base + offset); | |
267 | } | |
268 | ||
269 | static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, | |
270 | unsigned or) | |
271 | { | |
272 | unsigned val = edma_read(ecc, offset); | |
273 | ||
274 | val &= and; | |
275 | val |= or; | |
276 | edma_write(ecc, offset, val); | |
277 | } | |
278 | ||
279 | static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) | |
280 | { | |
281 | unsigned val = edma_read(ecc, offset); | |
282 | ||
283 | val &= and; | |
284 | edma_write(ecc, offset, val); | |
285 | } | |
286 | ||
287 | static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) | |
288 | { | |
289 | unsigned val = edma_read(ecc, offset); | |
290 | ||
291 | val |= or; | |
292 | edma_write(ecc, offset, val); | |
293 | } | |
294 | ||
295 | static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, | |
296 | int i) | |
297 | { | |
298 | return edma_read(ecc, offset + (i << 2)); | |
299 | } | |
300 | ||
301 | static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, | |
302 | unsigned val) | |
303 | { | |
304 | edma_write(ecc, offset + (i << 2), val); | |
305 | } | |
306 | ||
307 | static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, | |
308 | unsigned and, unsigned or) | |
309 | { | |
310 | edma_modify(ecc, offset + (i << 2), and, or); | |
311 | } | |
312 | ||
313 | static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, | |
314 | unsigned or) | |
315 | { | |
316 | edma_or(ecc, offset + (i << 2), or); | |
317 | } | |
318 | ||
319 | static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, | |
320 | unsigned or) | |
321 | { | |
322 | edma_or(ecc, offset + ((i * 2 + j) << 2), or); | |
323 | } | |
324 | ||
325 | static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, | |
326 | int j, unsigned val) | |
327 | { | |
328 | edma_write(ecc, offset + ((i * 2 + j) << 2), val); | |
329 | } | |
330 | ||
331 | static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) | |
332 | { | |
333 | return edma_read(ecc, EDMA_SHADOW0 + offset); | |
334 | } | |
335 | ||
336 | static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, | |
337 | int offset, int i) | |
338 | { | |
339 | return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); | |
340 | } | |
341 | ||
342 | static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, | |
343 | unsigned val) | |
344 | { | |
345 | edma_write(ecc, EDMA_SHADOW0 + offset, val); | |
346 | } | |
347 | ||
348 | static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, | |
349 | int i, unsigned val) | |
350 | { | |
351 | edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); | |
352 | } | |
353 | ||
d9c345d1 PU |
354 | static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, |
355 | int param_no) | |
2b6b3b74 PU |
356 | { |
357 | return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); | |
358 | } | |
359 | ||
d9c345d1 PU |
360 | static inline void edma_param_write(struct edma_cc *ecc, int offset, |
361 | int param_no, unsigned val) | |
2b6b3b74 PU |
362 | { |
363 | edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); | |
364 | } | |
365 | ||
d9c345d1 PU |
366 | static inline void edma_param_modify(struct edma_cc *ecc, int offset, |
367 | int param_no, unsigned and, unsigned or) | |
2b6b3b74 PU |
368 | { |
369 | edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); | |
370 | } | |
371 | ||
d9c345d1 PU |
372 | static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, |
373 | unsigned and) | |
2b6b3b74 PU |
374 | { |
375 | edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); | |
376 | } | |
377 | ||
d9c345d1 PU |
378 | static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, |
379 | unsigned or) | |
2b6b3b74 PU |
380 | { |
381 | edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); | |
382 | } | |
383 | ||
384 | static inline void set_bits(int offset, int len, unsigned long *p) | |
385 | { | |
386 | for (; len > 0; len--) | |
387 | set_bit(offset + (len - 1), p); | |
388 | } | |
389 | ||
390 | static inline void clear_bits(int offset, int len, unsigned long *p) | |
391 | { | |
392 | for (; len > 0; len--) | |
393 | clear_bit(offset + (len - 1), p); | |
394 | } | |
395 | ||
2b6b3b74 PU |
396 | static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, |
397 | int priority) | |
398 | { | |
399 | int bit = queue_no * 4; | |
400 | ||
401 | edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); | |
402 | } | |
403 | ||
34cf3011 | 404 | static void edma_set_chmap(struct edma_chan *echan, int slot) |
2b6b3b74 | 405 | { |
34cf3011 PU |
406 | struct edma_cc *ecc = echan->ecc; |
407 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
408 | ||
e4e886c6 | 409 | if (ecc->chmap_exist) { |
e4e886c6 PU |
410 | slot = EDMA_CHAN_SLOT(slot); |
411 | edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); | |
412 | } | |
2b6b3b74 PU |
413 | } |
414 | ||
415 | static int prepare_unused_channel_list(struct device *dev, void *data) | |
416 | { | |
417 | struct platform_device *pdev = to_platform_device(dev); | |
418 | struct edma_cc *ecc = data; | |
cb782059 PU |
419 | int dma_req_min = EDMA_CTLR_CHAN(ecc->id, 0); |
420 | int dma_req_max = dma_req_min + ecc->num_channels; | |
2b6b3b74 PU |
421 | int i, count; |
422 | struct of_phandle_args dma_spec; | |
423 | ||
424 | if (dev->of_node) { | |
425 | struct platform_device *dma_pdev; | |
426 | ||
427 | count = of_property_count_strings(dev->of_node, "dma-names"); | |
428 | if (count < 0) | |
429 | return 0; | |
430 | for (i = 0; i < count; i++) { | |
431 | if (of_parse_phandle_with_args(dev->of_node, "dmas", | |
432 | "#dma-cells", i, | |
433 | &dma_spec)) | |
434 | continue; | |
435 | ||
436 | if (!of_match_node(edma_of_ids, dma_spec.np)) { | |
437 | of_node_put(dma_spec.np); | |
438 | continue; | |
439 | } | |
440 | ||
441 | dma_pdev = of_find_device_by_node(dma_spec.np); | |
442 | if (&dma_pdev->dev != ecc->dev) | |
443 | continue; | |
444 | ||
445 | clear_bit(EDMA_CHAN_SLOT(dma_spec.args[0]), | |
7a73b135 | 446 | ecc->channel_unused); |
2b6b3b74 PU |
447 | of_node_put(dma_spec.np); |
448 | } | |
449 | return 0; | |
450 | } | |
451 | ||
452 | /* For non-OF case */ | |
453 | for (i = 0; i < pdev->num_resources; i++) { | |
454 | struct resource *res = &pdev->resource[i]; | |
cb782059 | 455 | int dma_req; |
2b6b3b74 | 456 | |
cb782059 PU |
457 | if (!(res->flags & IORESOURCE_DMA)) |
458 | continue; | |
459 | ||
460 | dma_req = (int)res->start; | |
461 | if (dma_req >= dma_req_min && dma_req < dma_req_max) | |
2b6b3b74 | 462 | clear_bit(EDMA_CHAN_SLOT(pdev->resource[i].start), |
7a73b135 | 463 | ecc->channel_unused); |
2b6b3b74 PU |
464 | } |
465 | ||
466 | return 0; | |
467 | } | |
468 | ||
34cf3011 | 469 | static void edma_setup_interrupt(struct edma_chan *echan, bool enable) |
2b6b3b74 | 470 | { |
34cf3011 PU |
471 | struct edma_cc *ecc = echan->ecc; |
472 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
2b6b3b74 | 473 | |
79ad2e38 | 474 | if (enable) { |
34cf3011 PU |
475 | edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, |
476 | BIT(channel & 0x1f)); | |
477 | edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, | |
478 | BIT(channel & 0x1f)); | |
79ad2e38 | 479 | } else { |
34cf3011 PU |
480 | edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, |
481 | BIT(channel & 0x1f)); | |
2b6b3b74 PU |
482 | } |
483 | } | |
484 | ||
485 | /* | |
11c15733 | 486 | * paRAM slot management functions |
2b6b3b74 PU |
487 | */ |
488 | static void edma_write_slot(struct edma_cc *ecc, unsigned slot, | |
489 | const struct edmacc_param *param) | |
490 | { | |
491 | slot = EDMA_CHAN_SLOT(slot); | |
492 | if (slot >= ecc->num_slots) | |
493 | return; | |
494 | memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); | |
495 | } | |
496 | ||
2b6b3b74 PU |
497 | static void edma_read_slot(struct edma_cc *ecc, unsigned slot, |
498 | struct edmacc_param *param) | |
499 | { | |
500 | slot = EDMA_CHAN_SLOT(slot); | |
501 | if (slot >= ecc->num_slots) | |
502 | return; | |
503 | memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); | |
504 | } | |
505 | ||
506 | /** | |
507 | * edma_alloc_slot - allocate DMA parameter RAM | |
508 | * @ecc: pointer to edma_cc struct | |
509 | * @slot: specific slot to allocate; negative for "any unused slot" | |
510 | * | |
511 | * This allocates a parameter RAM slot, initializing it to hold a | |
512 | * dummy transfer. Slots allocated using this routine have not been | |
513 | * mapped to a hardware DMA channel, and will normally be used by | |
514 | * linking to them from a slot associated with a DMA channel. | |
515 | * | |
516 | * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific | |
517 | * slots may be allocated on behalf of DSP firmware. | |
518 | * | |
519 | * Returns the number of the slot, else negative errno. | |
520 | */ | |
521 | static int edma_alloc_slot(struct edma_cc *ecc, int slot) | |
522 | { | |
e4e886c6 | 523 | if (slot > 0) { |
2b6b3b74 | 524 | slot = EDMA_CHAN_SLOT(slot); |
e4e886c6 PU |
525 | /* Requesting entry paRAM slot for a HW triggered channel. */ |
526 | if (ecc->chmap_exist && slot < ecc->num_channels) | |
527 | slot = EDMA_SLOT_ANY; | |
528 | } | |
529 | ||
2b6b3b74 | 530 | if (slot < 0) { |
e4e886c6 PU |
531 | if (ecc->chmap_exist) |
532 | slot = 0; | |
533 | else | |
534 | slot = ecc->num_channels; | |
2b6b3b74 | 535 | for (;;) { |
7a73b135 | 536 | slot = find_next_zero_bit(ecc->slot_inuse, |
2b6b3b74 PU |
537 | ecc->num_slots, |
538 | slot); | |
539 | if (slot == ecc->num_slots) | |
540 | return -ENOMEM; | |
7a73b135 | 541 | if (!test_and_set_bit(slot, ecc->slot_inuse)) |
2b6b3b74 PU |
542 | break; |
543 | } | |
e4e886c6 | 544 | } else if (slot >= ecc->num_slots) { |
2b6b3b74 | 545 | return -EINVAL; |
7a73b135 | 546 | } else if (test_and_set_bit(slot, ecc->slot_inuse)) { |
2b6b3b74 PU |
547 | return -EBUSY; |
548 | } | |
549 | ||
550 | edma_write_slot(ecc, slot, &dummy_paramset); | |
551 | ||
552 | return EDMA_CTLR_CHAN(ecc->id, slot); | |
553 | } | |
554 | ||
2b6b3b74 PU |
555 | static void edma_free_slot(struct edma_cc *ecc, unsigned slot) |
556 | { | |
557 | slot = EDMA_CHAN_SLOT(slot); | |
e4e886c6 | 558 | if (slot >= ecc->num_slots) |
2b6b3b74 PU |
559 | return; |
560 | ||
561 | edma_write_slot(ecc, slot, &dummy_paramset); | |
7a73b135 | 562 | clear_bit(slot, ecc->slot_inuse); |
2b6b3b74 PU |
563 | } |
564 | ||
565 | /** | |
566 | * edma_link - link one parameter RAM slot to another | |
567 | * @ecc: pointer to edma_cc struct | |
568 | * @from: parameter RAM slot originating the link | |
569 | * @to: parameter RAM slot which is the link target | |
570 | * | |
571 | * The originating slot should not be part of any active DMA transfer. | |
572 | */ | |
573 | static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) | |
574 | { | |
fc014095 PU |
575 | if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) |
576 | dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); | |
577 | ||
2b6b3b74 PU |
578 | from = EDMA_CHAN_SLOT(from); |
579 | to = EDMA_CHAN_SLOT(to); | |
580 | if (from >= ecc->num_slots || to >= ecc->num_slots) | |
581 | return; | |
582 | ||
d9c345d1 PU |
583 | edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, |
584 | PARM_OFFSET(to)); | |
2b6b3b74 PU |
585 | } |
586 | ||
587 | /** | |
588 | * edma_get_position - returns the current transfer point | |
589 | * @ecc: pointer to edma_cc struct | |
590 | * @slot: parameter RAM slot being examined | |
591 | * @dst: true selects the dest position, false the source | |
592 | * | |
593 | * Returns the position of the current active slot | |
594 | */ | |
595 | static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, | |
596 | bool dst) | |
597 | { | |
598 | u32 offs; | |
599 | ||
600 | slot = EDMA_CHAN_SLOT(slot); | |
601 | offs = PARM_OFFSET(slot); | |
602 | offs += dst ? PARM_DST : PARM_SRC; | |
603 | ||
604 | return edma_read(ecc, offs); | |
605 | } | |
606 | ||
34cf3011 | 607 | /* |
2b6b3b74 PU |
608 | * Channels with event associations will be triggered by their hardware |
609 | * events, and channels without such associations will be triggered by | |
610 | * software. (At this writing there is no interface for using software | |
611 | * triggers except with channels that don't support hardware triggers.) | |
2b6b3b74 | 612 | */ |
34cf3011 | 613 | static void edma_start(struct edma_chan *echan) |
2b6b3b74 | 614 | { |
34cf3011 PU |
615 | struct edma_cc *ecc = echan->ecc; |
616 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
617 | int j = (channel >> 5); | |
618 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 | 619 | |
34cf3011 | 620 | if (test_bit(channel, ecc->channel_unused)) { |
2b6b3b74 | 621 | /* EDMA channels without event association */ |
34cf3011 PU |
622 | dev_dbg(ecc->dev, "ESR%d %08x\n", j, |
623 | edma_shadow0_read_array(ecc, SH_ESR, j)); | |
624 | edma_shadow0_write_array(ecc, SH_ESR, j, mask); | |
625 | } else { | |
2b6b3b74 | 626 | /* EDMA channel with event association */ |
3287fb4d PU |
627 | dev_dbg(ecc->dev, "ER%d %08x\n", j, |
628 | edma_shadow0_read_array(ecc, SH_ER, j)); | |
2b6b3b74 PU |
629 | /* Clear any pending event or error */ |
630 | edma_write_array(ecc, EDMA_ECR, j, mask); | |
631 | edma_write_array(ecc, EDMA_EMCR, j, mask); | |
632 | /* Clear any SER */ | |
633 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | |
634 | edma_shadow0_write_array(ecc, SH_EESR, j, mask); | |
3287fb4d PU |
635 | dev_dbg(ecc->dev, "EER%d %08x\n", j, |
636 | edma_shadow0_read_array(ecc, SH_EER, j)); | |
2b6b3b74 | 637 | } |
2b6b3b74 PU |
638 | } |
639 | ||
34cf3011 | 640 | static void edma_stop(struct edma_chan *echan) |
2b6b3b74 | 641 | { |
34cf3011 PU |
642 | struct edma_cc *ecc = echan->ecc; |
643 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
644 | int j = (channel >> 5); | |
645 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 | 646 | |
34cf3011 PU |
647 | edma_shadow0_write_array(ecc, SH_EECR, j, mask); |
648 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | |
649 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | |
650 | edma_write_array(ecc, EDMA_EMCR, j, mask); | |
2b6b3b74 | 651 | |
34cf3011 PU |
652 | /* clear possibly pending completion interrupt */ |
653 | edma_shadow0_write_array(ecc, SH_ICR, j, mask); | |
2b6b3b74 | 654 | |
34cf3011 PU |
655 | dev_dbg(ecc->dev, "EER%d %08x\n", j, |
656 | edma_shadow0_read_array(ecc, SH_EER, j)); | |
2b6b3b74 | 657 | |
34cf3011 PU |
658 | /* REVISIT: consider guarding against inappropriate event |
659 | * chaining by overwriting with dummy_paramset. | |
660 | */ | |
2b6b3b74 PU |
661 | } |
662 | ||
11c15733 PU |
663 | /* |
664 | * Temporarily disable EDMA hardware events on the specified channel, | |
665 | * preventing them from triggering new transfers | |
2b6b3b74 | 666 | */ |
34cf3011 | 667 | static void edma_pause(struct edma_chan *echan) |
2b6b3b74 | 668 | { |
34cf3011 PU |
669 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
670 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 | 671 | |
34cf3011 | 672 | edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); |
2b6b3b74 PU |
673 | } |
674 | ||
11c15733 | 675 | /* Re-enable EDMA hardware events on the specified channel. */ |
34cf3011 | 676 | static void edma_resume(struct edma_chan *echan) |
2b6b3b74 | 677 | { |
34cf3011 PU |
678 | int channel = EDMA_CHAN_SLOT(echan->ch_num); |
679 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 | 680 | |
34cf3011 | 681 | edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); |
2b6b3b74 PU |
682 | } |
683 | ||
34cf3011 | 684 | static void edma_trigger_channel(struct edma_chan *echan) |
2b6b3b74 | 685 | { |
34cf3011 PU |
686 | struct edma_cc *ecc = echan->ecc; |
687 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
688 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 PU |
689 | |
690 | edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); | |
691 | ||
3287fb4d PU |
692 | dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), |
693 | edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); | |
2b6b3b74 PU |
694 | } |
695 | ||
34cf3011 | 696 | static void edma_clean_channel(struct edma_chan *echan) |
2b6b3b74 | 697 | { |
34cf3011 PU |
698 | struct edma_cc *ecc = echan->ecc; |
699 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
700 | int j = (channel >> 5); | |
701 | unsigned int mask = BIT(channel & 0x1f); | |
2b6b3b74 | 702 | |
34cf3011 PU |
703 | dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); |
704 | edma_shadow0_write_array(ecc, SH_ECR, j, mask); | |
705 | /* Clear the corresponding EMR bits */ | |
706 | edma_write_array(ecc, EDMA_EMCR, j, mask); | |
707 | /* Clear any SER */ | |
708 | edma_shadow0_write_array(ecc, SH_SECR, j, mask); | |
709 | edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); | |
2b6b3b74 PU |
710 | } |
711 | ||
f9425deb PU |
712 | /* Move channel to a specific event queue */ |
713 | static void edma_assign_channel_eventq(struct edma_chan *echan, | |
714 | enum dma_event_q eventq_no) | |
715 | { | |
716 | struct edma_cc *ecc = echan->ecc; | |
717 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
718 | int bit = (channel & 0x7) * 4; | |
719 | ||
720 | /* default to low priority queue */ | |
721 | if (eventq_no == EVENTQ_DEFAULT) | |
722 | eventq_no = ecc->default_queue; | |
723 | if (eventq_no >= ecc->num_tc) | |
724 | return; | |
725 | ||
726 | eventq_no &= 7; | |
727 | edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), | |
728 | eventq_no << bit); | |
729 | } | |
730 | ||
34cf3011 | 731 | static int edma_alloc_channel(struct edma_chan *echan, |
79ad2e38 | 732 | enum dma_event_q eventq_no) |
2b6b3b74 | 733 | { |
34cf3011 PU |
734 | struct edma_cc *ecc = echan->ecc; |
735 | int channel = EDMA_CHAN_SLOT(echan->ch_num); | |
2b6b3b74 PU |
736 | |
737 | if (!ecc->unused_chan_list_done) { | |
738 | /* | |
739 | * Scan all the platform devices to find out the EDMA channels | |
740 | * used and clear them in the unused list, making the rest | |
741 | * available for ARM usage. | |
742 | */ | |
34cf3011 PU |
743 | int ret = bus_for_each_dev(&platform_bus_type, NULL, ecc, |
744 | prepare_unused_channel_list); | |
2b6b3b74 PU |
745 | if (ret < 0) |
746 | return ret; | |
747 | ||
748 | ecc->unused_chan_list_done = true; | |
749 | } | |
750 | ||
2b6b3b74 PU |
751 | /* ensure access through shadow region 0 */ |
752 | edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); | |
753 | ||
754 | /* ensure no events are pending */ | |
34cf3011 | 755 | edma_stop(echan); |
2b6b3b74 | 756 | |
34cf3011 | 757 | edma_setup_interrupt(echan, true); |
2b6b3b74 | 758 | |
f9425deb | 759 | edma_assign_channel_eventq(echan, eventq_no); |
2b6b3b74 | 760 | |
34cf3011 | 761 | return 0; |
2b6b3b74 PU |
762 | } |
763 | ||
34cf3011 | 764 | static void edma_free_channel(struct edma_chan *echan) |
2b6b3b74 | 765 | { |
34cf3011 PU |
766 | /* ensure no events are pending */ |
767 | edma_stop(echan); | |
2b6b3b74 | 768 | /* REVISIT should probably take out of shadow region 0 */ |
34cf3011 | 769 | edma_setup_interrupt(echan, false); |
2b6b3b74 PU |
770 | } |
771 | ||
c2dde5f8 MP |
772 | static inline struct edma_cc *to_edma_cc(struct dma_device *d) |
773 | { | |
774 | return container_of(d, struct edma_cc, dma_slave); | |
775 | } | |
776 | ||
777 | static inline struct edma_chan *to_edma_chan(struct dma_chan *c) | |
778 | { | |
779 | return container_of(c, struct edma_chan, vchan.chan); | |
780 | } | |
781 | ||
2b6b3b74 | 782 | static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) |
c2dde5f8 MP |
783 | { |
784 | return container_of(tx, struct edma_desc, vdesc.tx); | |
785 | } | |
786 | ||
787 | static void edma_desc_free(struct virt_dma_desc *vdesc) | |
788 | { | |
789 | kfree(container_of(vdesc, struct edma_desc, vdesc)); | |
790 | } | |
791 | ||
792 | /* Dispatch a queued descriptor to the controller (caller holds lock) */ | |
793 | static void edma_execute(struct edma_chan *echan) | |
794 | { | |
2b6b3b74 | 795 | struct edma_cc *ecc = echan->ecc; |
53407062 | 796 | struct virt_dma_desc *vdesc; |
c2dde5f8 | 797 | struct edma_desc *edesc; |
53407062 JF |
798 | struct device *dev = echan->vchan.chan.device->dev; |
799 | int i, j, left, nslots; | |
800 | ||
8fa7ff4f PU |
801 | if (!echan->edesc) { |
802 | /* Setup is needed for the first transfer */ | |
53407062 | 803 | vdesc = vchan_next_desc(&echan->vchan); |
8fa7ff4f | 804 | if (!vdesc) |
53407062 | 805 | return; |
53407062 JF |
806 | list_del(&vdesc->node); |
807 | echan->edesc = to_edma_desc(&vdesc->tx); | |
c2dde5f8 MP |
808 | } |
809 | ||
53407062 | 810 | edesc = echan->edesc; |
c2dde5f8 | 811 | |
53407062 JF |
812 | /* Find out how many left */ |
813 | left = edesc->pset_nr - edesc->processed; | |
814 | nslots = min(MAX_NR_SG, left); | |
740b41f7 | 815 | edesc->sg_len = 0; |
c2dde5f8 MP |
816 | |
817 | /* Write descriptor PaRAM set(s) */ | |
53407062 JF |
818 | for (i = 0; i < nslots; i++) { |
819 | j = i + edesc->processed; | |
2b6b3b74 | 820 | edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); |
740b41f7 | 821 | edesc->sg_len += edesc->pset[j].len; |
907f74a0 PU |
822 | dev_vdbg(dev, |
823 | "\n pset[%d]:\n" | |
824 | " chnum\t%d\n" | |
825 | " slot\t%d\n" | |
826 | " opt\t%08x\n" | |
827 | " src\t%08x\n" | |
828 | " dst\t%08x\n" | |
829 | " abcnt\t%08x\n" | |
830 | " ccnt\t%08x\n" | |
831 | " bidx\t%08x\n" | |
832 | " cidx\t%08x\n" | |
833 | " lkrld\t%08x\n", | |
834 | j, echan->ch_num, echan->slot[i], | |
835 | edesc->pset[j].param.opt, | |
836 | edesc->pset[j].param.src, | |
837 | edesc->pset[j].param.dst, | |
838 | edesc->pset[j].param.a_b_cnt, | |
839 | edesc->pset[j].param.ccnt, | |
840 | edesc->pset[j].param.src_dst_bidx, | |
841 | edesc->pset[j].param.src_dst_cidx, | |
842 | edesc->pset[j].param.link_bcntrld); | |
c2dde5f8 | 843 | /* Link to the previous slot if not the last set */ |
53407062 | 844 | if (i != (nslots - 1)) |
2b6b3b74 | 845 | edma_link(ecc, echan->slot[i], echan->slot[i + 1]); |
c2dde5f8 MP |
846 | } |
847 | ||
53407062 JF |
848 | edesc->processed += nslots; |
849 | ||
b267b3bc JF |
850 | /* |
851 | * If this is either the last set in a set of SG-list transactions | |
852 | * then setup a link to the dummy slot, this results in all future | |
853 | * events being absorbed and that's OK because we're done | |
854 | */ | |
50a9c707 JF |
855 | if (edesc->processed == edesc->pset_nr) { |
856 | if (edesc->cyclic) | |
2b6b3b74 | 857 | edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); |
50a9c707 | 858 | else |
2b6b3b74 | 859 | edma_link(ecc, echan->slot[nslots - 1], |
50a9c707 JF |
860 | echan->ecc->dummy_slot); |
861 | } | |
b267b3bc | 862 | |
c5f47990 | 863 | if (echan->missed) { |
8fa7ff4f PU |
864 | /* |
865 | * This happens due to setup times between intermediate | |
866 | * transfers in long SG lists which have to be broken up into | |
867 | * transfers of MAX_NR_SG | |
868 | */ | |
9aac9096 | 869 | dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); |
34cf3011 PU |
870 | edma_clean_channel(echan); |
871 | edma_stop(echan); | |
872 | edma_start(echan); | |
873 | edma_trigger_channel(echan); | |
c5f47990 | 874 | echan->missed = 0; |
8fa7ff4f PU |
875 | } else if (edesc->processed <= MAX_NR_SG) { |
876 | dev_dbg(dev, "first transfer starting on channel %d\n", | |
877 | echan->ch_num); | |
34cf3011 | 878 | edma_start(echan); |
8fa7ff4f PU |
879 | } else { |
880 | dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", | |
881 | echan->ch_num, edesc->processed); | |
34cf3011 | 882 | edma_resume(echan); |
c5f47990 | 883 | } |
c2dde5f8 MP |
884 | } |
885 | ||
aa7c09b6 | 886 | static int edma_terminate_all(struct dma_chan *chan) |
c2dde5f8 | 887 | { |
aa7c09b6 | 888 | struct edma_chan *echan = to_edma_chan(chan); |
c2dde5f8 MP |
889 | unsigned long flags; |
890 | LIST_HEAD(head); | |
891 | ||
892 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
893 | ||
894 | /* | |
895 | * Stop DMA activity: we assume the callback will not be called | |
896 | * after edma_dma() returns (even if it does, it will see | |
897 | * echan->edesc is NULL and exit.) | |
898 | */ | |
899 | if (echan->edesc) { | |
34cf3011 | 900 | edma_stop(echan); |
8fa7ff4f PU |
901 | /* Move the cyclic channel back to default queue */ |
902 | if (echan->edesc->cyclic) | |
34cf3011 | 903 | edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); |
5ca9e7ce PK |
904 | /* |
905 | * free the running request descriptor | |
906 | * since it is not in any of the vdesc lists | |
907 | */ | |
908 | edma_desc_free(&echan->edesc->vdesc); | |
c2dde5f8 | 909 | echan->edesc = NULL; |
c2dde5f8 MP |
910 | } |
911 | ||
912 | vchan_get_all_descriptors(&echan->vchan, &head); | |
913 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
914 | vchan_dma_desc_free_list(&echan->vchan, &head); | |
915 | ||
916 | return 0; | |
917 | } | |
918 | ||
aa7c09b6 | 919 | static int edma_slave_config(struct dma_chan *chan, |
661f7cb5 | 920 | struct dma_slave_config *cfg) |
c2dde5f8 | 921 | { |
aa7c09b6 MR |
922 | struct edma_chan *echan = to_edma_chan(chan); |
923 | ||
661f7cb5 MP |
924 | if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || |
925 | cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) | |
c2dde5f8 MP |
926 | return -EINVAL; |
927 | ||
661f7cb5 | 928 | memcpy(&echan->cfg, cfg, sizeof(echan->cfg)); |
c2dde5f8 MP |
929 | |
930 | return 0; | |
931 | } | |
932 | ||
aa7c09b6 | 933 | static int edma_dma_pause(struct dma_chan *chan) |
72c7b67a | 934 | { |
aa7c09b6 MR |
935 | struct edma_chan *echan = to_edma_chan(chan); |
936 | ||
02ec6041 | 937 | if (!echan->edesc) |
72c7b67a PU |
938 | return -EINVAL; |
939 | ||
34cf3011 | 940 | edma_pause(echan); |
72c7b67a PU |
941 | return 0; |
942 | } | |
943 | ||
aa7c09b6 | 944 | static int edma_dma_resume(struct dma_chan *chan) |
72c7b67a | 945 | { |
aa7c09b6 MR |
946 | struct edma_chan *echan = to_edma_chan(chan); |
947 | ||
34cf3011 | 948 | edma_resume(echan); |
72c7b67a PU |
949 | return 0; |
950 | } | |
951 | ||
fd009035 JF |
952 | /* |
953 | * A PaRAM set configuration abstraction used by other modes | |
954 | * @chan: Channel who's PaRAM set we're configuring | |
955 | * @pset: PaRAM set to initialize and setup. | |
956 | * @src_addr: Source address of the DMA | |
957 | * @dst_addr: Destination address of the DMA | |
958 | * @burst: In units of dev_width, how much to send | |
959 | * @dev_width: How much is the dev_width | |
960 | * @dma_length: Total length of the DMA transfer | |
961 | * @direction: Direction of the transfer | |
962 | */ | |
b5088ad9 | 963 | static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, |
2b6b3b74 | 964 | dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, |
df6694f8 | 965 | unsigned int acnt, unsigned int dma_length, |
2b6b3b74 | 966 | enum dma_transfer_direction direction) |
fd009035 JF |
967 | { |
968 | struct edma_chan *echan = to_edma_chan(chan); | |
969 | struct device *dev = chan->device->dev; | |
b5088ad9 | 970 | struct edmacc_param *param = &epset->param; |
df6694f8 | 971 | int bcnt, ccnt, cidx; |
fd009035 JF |
972 | int src_bidx, dst_bidx, src_cidx, dst_cidx; |
973 | int absync; | |
974 | ||
b2b617de PU |
975 | /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ |
976 | if (!burst) | |
977 | burst = 1; | |
fd009035 JF |
978 | /* |
979 | * If the maxburst is equal to the fifo width, use | |
980 | * A-synced transfers. This allows for large contiguous | |
981 | * buffer transfers using only one PaRAM set. | |
982 | */ | |
983 | if (burst == 1) { | |
984 | /* | |
985 | * For the A-sync case, bcnt and ccnt are the remainder | |
986 | * and quotient respectively of the division of: | |
987 | * (dma_length / acnt) by (SZ_64K -1). This is so | |
988 | * that in case bcnt over flows, we have ccnt to use. | |
989 | * Note: In A-sync tranfer only, bcntrld is used, but it | |
990 | * only applies for sg_dma_len(sg) >= SZ_64K. | |
991 | * In this case, the best way adopted is- bccnt for the | |
992 | * first frame will be the remainder below. Then for | |
993 | * every successive frame, bcnt will be SZ_64K-1. This | |
994 | * is assured as bcntrld = 0xffff in end of function. | |
995 | */ | |
996 | absync = false; | |
997 | ccnt = dma_length / acnt / (SZ_64K - 1); | |
998 | bcnt = dma_length / acnt - ccnt * (SZ_64K - 1); | |
999 | /* | |
1000 | * If bcnt is non-zero, we have a remainder and hence an | |
1001 | * extra frame to transfer, so increment ccnt. | |
1002 | */ | |
1003 | if (bcnt) | |
1004 | ccnt++; | |
1005 | else | |
1006 | bcnt = SZ_64K - 1; | |
1007 | cidx = acnt; | |
1008 | } else { | |
1009 | /* | |
1010 | * If maxburst is greater than the fifo address_width, | |
1011 | * use AB-synced transfers where A count is the fifo | |
1012 | * address_width and B count is the maxburst. In this | |
1013 | * case, we are limited to transfers of C count frames | |
1014 | * of (address_width * maxburst) where C count is limited | |
1015 | * to SZ_64K-1. This places an upper bound on the length | |
1016 | * of an SG segment that can be handled. | |
1017 | */ | |
1018 | absync = true; | |
1019 | bcnt = burst; | |
1020 | ccnt = dma_length / (acnt * bcnt); | |
1021 | if (ccnt > (SZ_64K - 1)) { | |
1022 | dev_err(dev, "Exceeded max SG segment size\n"); | |
1023 | return -EINVAL; | |
1024 | } | |
1025 | cidx = acnt * bcnt; | |
1026 | } | |
1027 | ||
c2da2340 TG |
1028 | epset->len = dma_length; |
1029 | ||
fd009035 JF |
1030 | if (direction == DMA_MEM_TO_DEV) { |
1031 | src_bidx = acnt; | |
1032 | src_cidx = cidx; | |
1033 | dst_bidx = 0; | |
1034 | dst_cidx = 0; | |
c2da2340 | 1035 | epset->addr = src_addr; |
fd009035 JF |
1036 | } else if (direction == DMA_DEV_TO_MEM) { |
1037 | src_bidx = 0; | |
1038 | src_cidx = 0; | |
1039 | dst_bidx = acnt; | |
1040 | dst_cidx = cidx; | |
c2da2340 | 1041 | epset->addr = dst_addr; |
8cc3e30b JF |
1042 | } else if (direction == DMA_MEM_TO_MEM) { |
1043 | src_bidx = acnt; | |
1044 | src_cidx = cidx; | |
1045 | dst_bidx = acnt; | |
1046 | dst_cidx = cidx; | |
fd009035 JF |
1047 | } else { |
1048 | dev_err(dev, "%s: direction not implemented yet\n", __func__); | |
1049 | return -EINVAL; | |
1050 | } | |
1051 | ||
b5088ad9 | 1052 | param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num)); |
fd009035 JF |
1053 | /* Configure A or AB synchronized transfers */ |
1054 | if (absync) | |
b5088ad9 | 1055 | param->opt |= SYNCDIM; |
fd009035 | 1056 | |
b5088ad9 TG |
1057 | param->src = src_addr; |
1058 | param->dst = dst_addr; | |
fd009035 | 1059 | |
b5088ad9 TG |
1060 | param->src_dst_bidx = (dst_bidx << 16) | src_bidx; |
1061 | param->src_dst_cidx = (dst_cidx << 16) | src_cidx; | |
fd009035 | 1062 | |
b5088ad9 TG |
1063 | param->a_b_cnt = bcnt << 16 | acnt; |
1064 | param->ccnt = ccnt; | |
fd009035 JF |
1065 | /* |
1066 | * Only time when (bcntrld) auto reload is required is for | |
1067 | * A-sync case, and in this case, a requirement of reload value | |
1068 | * of SZ_64K-1 only is assured. 'link' is initially set to NULL | |
1069 | * and then later will be populated by edma_execute. | |
1070 | */ | |
b5088ad9 | 1071 | param->link_bcntrld = 0xffffffff; |
fd009035 JF |
1072 | return absync; |
1073 | } | |
1074 | ||
c2dde5f8 MP |
1075 | static struct dma_async_tx_descriptor *edma_prep_slave_sg( |
1076 | struct dma_chan *chan, struct scatterlist *sgl, | |
1077 | unsigned int sg_len, enum dma_transfer_direction direction, | |
1078 | unsigned long tx_flags, void *context) | |
1079 | { | |
1080 | struct edma_chan *echan = to_edma_chan(chan); | |
1081 | struct device *dev = chan->device->dev; | |
1082 | struct edma_desc *edesc; | |
fd009035 | 1083 | dma_addr_t src_addr = 0, dst_addr = 0; |
661f7cb5 MP |
1084 | enum dma_slave_buswidth dev_width; |
1085 | u32 burst; | |
c2dde5f8 | 1086 | struct scatterlist *sg; |
fd009035 | 1087 | int i, nslots, ret; |
c2dde5f8 MP |
1088 | |
1089 | if (unlikely(!echan || !sgl || !sg_len)) | |
1090 | return NULL; | |
1091 | ||
661f7cb5 | 1092 | if (direction == DMA_DEV_TO_MEM) { |
fd009035 | 1093 | src_addr = echan->cfg.src_addr; |
661f7cb5 MP |
1094 | dev_width = echan->cfg.src_addr_width; |
1095 | burst = echan->cfg.src_maxburst; | |
1096 | } else if (direction == DMA_MEM_TO_DEV) { | |
fd009035 | 1097 | dst_addr = echan->cfg.dst_addr; |
661f7cb5 MP |
1098 | dev_width = echan->cfg.dst_addr_width; |
1099 | burst = echan->cfg.dst_maxburst; | |
1100 | } else { | |
e6fad592 | 1101 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
661f7cb5 MP |
1102 | return NULL; |
1103 | } | |
1104 | ||
1105 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
c594c891 | 1106 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
c2dde5f8 MP |
1107 | return NULL; |
1108 | } | |
1109 | ||
2b6b3b74 PU |
1110 | edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), |
1111 | GFP_ATOMIC); | |
c2dde5f8 | 1112 | if (!edesc) { |
c594c891 | 1113 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
c2dde5f8 MP |
1114 | return NULL; |
1115 | } | |
1116 | ||
1117 | edesc->pset_nr = sg_len; | |
b6205c39 | 1118 | edesc->residue = 0; |
c2da2340 | 1119 | edesc->direction = direction; |
740b41f7 | 1120 | edesc->echan = echan; |
c2dde5f8 | 1121 | |
6fbe24da JF |
1122 | /* Allocate a PaRAM slot, if needed */ |
1123 | nslots = min_t(unsigned, MAX_NR_SG, sg_len); | |
1124 | ||
1125 | for (i = 0; i < nslots; i++) { | |
c2dde5f8 MP |
1126 | if (echan->slot[i] < 0) { |
1127 | echan->slot[i] = | |
2b6b3b74 | 1128 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
c2dde5f8 | 1129 | if (echan->slot[i] < 0) { |
4b6271a6 | 1130 | kfree(edesc); |
c594c891 PU |
1131 | dev_err(dev, "%s: Failed to allocate slot\n", |
1132 | __func__); | |
c2dde5f8 MP |
1133 | return NULL; |
1134 | } | |
1135 | } | |
6fbe24da JF |
1136 | } |
1137 | ||
1138 | /* Configure PaRAM sets for each SG */ | |
1139 | for_each_sg(sgl, sg, sg_len, i) { | |
fd009035 JF |
1140 | /* Get address for each SG */ |
1141 | if (direction == DMA_DEV_TO_MEM) | |
1142 | dst_addr = sg_dma_address(sg); | |
1143 | else | |
1144 | src_addr = sg_dma_address(sg); | |
c2dde5f8 | 1145 | |
fd009035 JF |
1146 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, |
1147 | dst_addr, burst, dev_width, | |
1148 | sg_dma_len(sg), direction); | |
b967aecf VK |
1149 | if (ret < 0) { |
1150 | kfree(edesc); | |
fd009035 | 1151 | return NULL; |
c2dde5f8 MP |
1152 | } |
1153 | ||
fd009035 | 1154 | edesc->absync = ret; |
b6205c39 | 1155 | edesc->residue += sg_dma_len(sg); |
6fbe24da JF |
1156 | |
1157 | /* If this is the last in a current SG set of transactions, | |
1158 | enable interrupts so that next set is processed */ | |
1159 | if (!((i+1) % MAX_NR_SG)) | |
b5088ad9 | 1160 | edesc->pset[i].param.opt |= TCINTEN; |
6fbe24da | 1161 | |
c2dde5f8 MP |
1162 | /* If this is the last set, enable completion interrupt flag */ |
1163 | if (i == sg_len - 1) | |
b5088ad9 | 1164 | edesc->pset[i].param.opt |= TCINTEN; |
c2dde5f8 | 1165 | } |
740b41f7 | 1166 | edesc->residue_stat = edesc->residue; |
c2dde5f8 | 1167 | |
c2dde5f8 MP |
1168 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
1169 | } | |
c2dde5f8 | 1170 | |
b7a4fd53 | 1171 | static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( |
8cc3e30b JF |
1172 | struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, |
1173 | size_t len, unsigned long tx_flags) | |
1174 | { | |
df6694f8 | 1175 | int ret, nslots; |
8cc3e30b JF |
1176 | struct edma_desc *edesc; |
1177 | struct device *dev = chan->device->dev; | |
1178 | struct edma_chan *echan = to_edma_chan(chan); | |
df6694f8 | 1179 | unsigned int width, pset_len; |
8cc3e30b JF |
1180 | |
1181 | if (unlikely(!echan || !len)) | |
1182 | return NULL; | |
1183 | ||
df6694f8 PU |
1184 | if (len < SZ_64K) { |
1185 | /* | |
1186 | * Transfer size less than 64K can be handled with one paRAM | |
1187 | * slot and with one burst. | |
1188 | * ACNT = length | |
1189 | */ | |
1190 | width = len; | |
1191 | pset_len = len; | |
1192 | nslots = 1; | |
1193 | } else { | |
1194 | /* | |
1195 | * Transfer size bigger than 64K will be handled with maximum of | |
1196 | * two paRAM slots. | |
1197 | * slot1: (full_length / 32767) times 32767 bytes bursts. | |
1198 | * ACNT = 32767, length1: (full_length / 32767) * 32767 | |
1199 | * slot2: the remaining amount of data after slot1. | |
1200 | * ACNT = full_length - length1, length2 = ACNT | |
1201 | * | |
1202 | * When the full_length is multibple of 32767 one slot can be | |
1203 | * used to complete the transfer. | |
1204 | */ | |
1205 | width = SZ_32K - 1; | |
1206 | pset_len = rounddown(len, width); | |
1207 | /* One slot is enough for lengths multiple of (SZ_32K -1) */ | |
1208 | if (unlikely(pset_len == len)) | |
1209 | nslots = 1; | |
1210 | else | |
1211 | nslots = 2; | |
1212 | } | |
1213 | ||
1214 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), | |
1215 | GFP_ATOMIC); | |
8cc3e30b JF |
1216 | if (!edesc) { |
1217 | dev_dbg(dev, "Failed to allocate a descriptor\n"); | |
1218 | return NULL; | |
1219 | } | |
1220 | ||
df6694f8 PU |
1221 | edesc->pset_nr = nslots; |
1222 | edesc->residue = edesc->residue_stat = len; | |
1223 | edesc->direction = DMA_MEM_TO_MEM; | |
1224 | edesc->echan = echan; | |
21a31846 | 1225 | |
8cc3e30b | 1226 | ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, |
df6694f8 PU |
1227 | width, pset_len, DMA_MEM_TO_MEM); |
1228 | if (ret < 0) { | |
1229 | kfree(edesc); | |
8cc3e30b | 1230 | return NULL; |
df6694f8 | 1231 | } |
8cc3e30b JF |
1232 | |
1233 | edesc->absync = ret; | |
1234 | ||
b0cce4ca | 1235 | edesc->pset[0].param.opt |= ITCCHEN; |
df6694f8 PU |
1236 | if (nslots == 1) { |
1237 | /* Enable transfer complete interrupt */ | |
1238 | edesc->pset[0].param.opt |= TCINTEN; | |
1239 | } else { | |
1240 | /* Enable transfer complete chaining for the first slot */ | |
1241 | edesc->pset[0].param.opt |= TCCHEN; | |
1242 | ||
1243 | if (echan->slot[1] < 0) { | |
1244 | echan->slot[1] = edma_alloc_slot(echan->ecc, | |
1245 | EDMA_SLOT_ANY); | |
1246 | if (echan->slot[1] < 0) { | |
1247 | kfree(edesc); | |
1248 | dev_err(dev, "%s: Failed to allocate slot\n", | |
1249 | __func__); | |
1250 | return NULL; | |
1251 | } | |
1252 | } | |
1253 | dest += pset_len; | |
1254 | src += pset_len; | |
1255 | pset_len = width = len % (SZ_32K - 1); | |
1256 | ||
1257 | ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, | |
1258 | width, pset_len, DMA_MEM_TO_MEM); | |
1259 | if (ret < 0) { | |
1260 | kfree(edesc); | |
1261 | return NULL; | |
1262 | } | |
1263 | ||
1264 | edesc->pset[1].param.opt |= ITCCHEN; | |
1265 | edesc->pset[1].param.opt |= TCINTEN; | |
1266 | } | |
8cc3e30b JF |
1267 | |
1268 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); | |
1269 | } | |
1270 | ||
50a9c707 JF |
1271 | static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( |
1272 | struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, | |
1273 | size_t period_len, enum dma_transfer_direction direction, | |
31c1e5a1 | 1274 | unsigned long tx_flags) |
50a9c707 JF |
1275 | { |
1276 | struct edma_chan *echan = to_edma_chan(chan); | |
1277 | struct device *dev = chan->device->dev; | |
1278 | struct edma_desc *edesc; | |
1279 | dma_addr_t src_addr, dst_addr; | |
1280 | enum dma_slave_buswidth dev_width; | |
1281 | u32 burst; | |
1282 | int i, ret, nslots; | |
1283 | ||
1284 | if (unlikely(!echan || !buf_len || !period_len)) | |
1285 | return NULL; | |
1286 | ||
1287 | if (direction == DMA_DEV_TO_MEM) { | |
1288 | src_addr = echan->cfg.src_addr; | |
1289 | dst_addr = buf_addr; | |
1290 | dev_width = echan->cfg.src_addr_width; | |
1291 | burst = echan->cfg.src_maxburst; | |
1292 | } else if (direction == DMA_MEM_TO_DEV) { | |
1293 | src_addr = buf_addr; | |
1294 | dst_addr = echan->cfg.dst_addr; | |
1295 | dev_width = echan->cfg.dst_addr_width; | |
1296 | burst = echan->cfg.dst_maxburst; | |
1297 | } else { | |
e6fad592 | 1298 | dev_err(dev, "%s: bad direction: %d\n", __func__, direction); |
50a9c707 JF |
1299 | return NULL; |
1300 | } | |
1301 | ||
1302 | if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
c594c891 | 1303 | dev_err(dev, "%s: Undefined slave buswidth\n", __func__); |
50a9c707 JF |
1304 | return NULL; |
1305 | } | |
1306 | ||
1307 | if (unlikely(buf_len % period_len)) { | |
1308 | dev_err(dev, "Period should be multiple of Buffer length\n"); | |
1309 | return NULL; | |
1310 | } | |
1311 | ||
1312 | nslots = (buf_len / period_len) + 1; | |
1313 | ||
1314 | /* | |
1315 | * Cyclic DMA users such as audio cannot tolerate delays introduced | |
1316 | * by cases where the number of periods is more than the maximum | |
1317 | * number of SGs the EDMA driver can handle at a time. For DMA types | |
1318 | * such as Slave SGs, such delays are tolerable and synchronized, | |
1319 | * but the synchronization is difficult to achieve with Cyclic and | |
1320 | * cannot be guaranteed, so we error out early. | |
1321 | */ | |
1322 | if (nslots > MAX_NR_SG) | |
1323 | return NULL; | |
1324 | ||
2b6b3b74 PU |
1325 | edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), |
1326 | GFP_ATOMIC); | |
50a9c707 | 1327 | if (!edesc) { |
c594c891 | 1328 | dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); |
50a9c707 JF |
1329 | return NULL; |
1330 | } | |
1331 | ||
1332 | edesc->cyclic = 1; | |
1333 | edesc->pset_nr = nslots; | |
740b41f7 | 1334 | edesc->residue = edesc->residue_stat = buf_len; |
c2da2340 | 1335 | edesc->direction = direction; |
740b41f7 | 1336 | edesc->echan = echan; |
50a9c707 | 1337 | |
83bb3126 PU |
1338 | dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n", |
1339 | __func__, echan->ch_num, nslots, period_len, buf_len); | |
50a9c707 JF |
1340 | |
1341 | for (i = 0; i < nslots; i++) { | |
1342 | /* Allocate a PaRAM slot, if needed */ | |
1343 | if (echan->slot[i] < 0) { | |
1344 | echan->slot[i] = | |
2b6b3b74 | 1345 | edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); |
50a9c707 | 1346 | if (echan->slot[i] < 0) { |
e3ddc979 | 1347 | kfree(edesc); |
c594c891 PU |
1348 | dev_err(dev, "%s: Failed to allocate slot\n", |
1349 | __func__); | |
50a9c707 JF |
1350 | return NULL; |
1351 | } | |
1352 | } | |
1353 | ||
1354 | if (i == nslots - 1) { | |
1355 | memcpy(&edesc->pset[i], &edesc->pset[0], | |
1356 | sizeof(edesc->pset[0])); | |
1357 | break; | |
1358 | } | |
1359 | ||
1360 | ret = edma_config_pset(chan, &edesc->pset[i], src_addr, | |
1361 | dst_addr, burst, dev_width, period_len, | |
1362 | direction); | |
e3ddc979 CE |
1363 | if (ret < 0) { |
1364 | kfree(edesc); | |
50a9c707 | 1365 | return NULL; |
e3ddc979 | 1366 | } |
c2dde5f8 | 1367 | |
50a9c707 JF |
1368 | if (direction == DMA_DEV_TO_MEM) |
1369 | dst_addr += period_len; | |
1370 | else | |
1371 | src_addr += period_len; | |
c2dde5f8 | 1372 | |
83bb3126 PU |
1373 | dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i); |
1374 | dev_vdbg(dev, | |
50a9c707 JF |
1375 | "\n pset[%d]:\n" |
1376 | " chnum\t%d\n" | |
1377 | " slot\t%d\n" | |
1378 | " opt\t%08x\n" | |
1379 | " src\t%08x\n" | |
1380 | " dst\t%08x\n" | |
1381 | " abcnt\t%08x\n" | |
1382 | " ccnt\t%08x\n" | |
1383 | " bidx\t%08x\n" | |
1384 | " cidx\t%08x\n" | |
1385 | " lkrld\t%08x\n", | |
1386 | i, echan->ch_num, echan->slot[i], | |
b5088ad9 TG |
1387 | edesc->pset[i].param.opt, |
1388 | edesc->pset[i].param.src, | |
1389 | edesc->pset[i].param.dst, | |
1390 | edesc->pset[i].param.a_b_cnt, | |
1391 | edesc->pset[i].param.ccnt, | |
1392 | edesc->pset[i].param.src_dst_bidx, | |
1393 | edesc->pset[i].param.src_dst_cidx, | |
1394 | edesc->pset[i].param.link_bcntrld); | |
50a9c707 JF |
1395 | |
1396 | edesc->absync = ret; | |
1397 | ||
1398 | /* | |
a1f146f3 | 1399 | * Enable period interrupt only if it is requested |
50a9c707 | 1400 | */ |
a1f146f3 PU |
1401 | if (tx_flags & DMA_PREP_INTERRUPT) |
1402 | edesc->pset[i].param.opt |= TCINTEN; | |
c2dde5f8 MP |
1403 | } |
1404 | ||
8e8805d5 | 1405 | /* Place the cyclic channel to highest priority queue */ |
34cf3011 | 1406 | edma_assign_channel_eventq(echan, EVENTQ_0); |
8e8805d5 | 1407 | |
c2dde5f8 MP |
1408 | return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); |
1409 | } | |
1410 | ||
79ad2e38 | 1411 | static void edma_completion_handler(struct edma_chan *echan) |
c2dde5f8 | 1412 | { |
c2dde5f8 | 1413 | struct device *dev = echan->vchan.chan.device->dev; |
79ad2e38 | 1414 | struct edma_desc *edesc = echan->edesc; |
c2dde5f8 | 1415 | |
79ad2e38 PU |
1416 | if (!edesc) |
1417 | return; | |
50a9c707 | 1418 | |
8fa7ff4f | 1419 | spin_lock(&echan->vchan.lock); |
79ad2e38 PU |
1420 | if (edesc->cyclic) { |
1421 | vchan_cyclic_callback(&edesc->vdesc); | |
1422 | spin_unlock(&echan->vchan.lock); | |
1423 | return; | |
1424 | } else if (edesc->processed == edesc->pset_nr) { | |
1425 | edesc->residue = 0; | |
34cf3011 | 1426 | edma_stop(echan); |
79ad2e38 PU |
1427 | vchan_cookie_complete(&edesc->vdesc); |
1428 | echan->edesc = NULL; | |
1429 | ||
1430 | dev_dbg(dev, "Transfer completed on channel %d\n", | |
1431 | echan->ch_num); | |
1432 | } else { | |
1433 | dev_dbg(dev, "Sub transfer completed on channel %d\n", | |
1434 | echan->ch_num); | |
1435 | ||
34cf3011 | 1436 | edma_pause(echan); |
79ad2e38 PU |
1437 | |
1438 | /* Update statistics for tx_status */ | |
1439 | edesc->residue -= edesc->sg_len; | |
1440 | edesc->residue_stat = edesc->residue; | |
1441 | edesc->processed_stat = edesc->processed; | |
1442 | } | |
1443 | edma_execute(echan); | |
1444 | ||
1445 | spin_unlock(&echan->vchan.lock); | |
1446 | } | |
1447 | ||
1448 | /* eDMA interrupt handler */ | |
1449 | static irqreturn_t dma_irq_handler(int irq, void *data) | |
1450 | { | |
1451 | struct edma_cc *ecc = data; | |
1452 | int ctlr; | |
1453 | u32 sh_ier; | |
1454 | u32 sh_ipr; | |
1455 | u32 bank; | |
1456 | ||
1457 | ctlr = ecc->id; | |
1458 | if (ctlr < 0) | |
1459 | return IRQ_NONE; | |
1460 | ||
1461 | dev_vdbg(ecc->dev, "dma_irq_handler\n"); | |
1462 | ||
1463 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); | |
1464 | if (!sh_ipr) { | |
1465 | sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); | |
1466 | if (!sh_ipr) | |
1467 | return IRQ_NONE; | |
1468 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); | |
1469 | bank = 1; | |
1470 | } else { | |
1471 | sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); | |
1472 | bank = 0; | |
1473 | } | |
1474 | ||
1475 | do { | |
1476 | u32 slot; | |
1477 | u32 channel; | |
1478 | ||
1479 | slot = __ffs(sh_ipr); | |
1480 | sh_ipr &= ~(BIT(slot)); | |
1481 | ||
1482 | if (sh_ier & BIT(slot)) { | |
1483 | channel = (bank << 5) | slot; | |
1484 | /* Clear the corresponding IPR bits */ | |
1485 | edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); | |
1486 | edma_completion_handler(&ecc->slave_chans[channel]); | |
c2dde5f8 | 1487 | } |
79ad2e38 PU |
1488 | } while (sh_ipr); |
1489 | ||
1490 | edma_shadow0_write(ecc, SH_IEVAL, 1); | |
1491 | return IRQ_HANDLED; | |
1492 | } | |
1493 | ||
1494 | static void edma_error_handler(struct edma_chan *echan) | |
1495 | { | |
1496 | struct edma_cc *ecc = echan->ecc; | |
1497 | struct device *dev = echan->vchan.chan.device->dev; | |
1498 | struct edmacc_param p; | |
1499 | ||
1500 | if (!echan->edesc) | |
1501 | return; | |
1502 | ||
1503 | spin_lock(&echan->vchan.lock); | |
c5f47990 | 1504 | |
79ad2e38 PU |
1505 | edma_read_slot(ecc, echan->slot[0], &p); |
1506 | /* | |
1507 | * Issue later based on missed flag which will be sure | |
1508 | * to happen as: | |
1509 | * (1) we finished transmitting an intermediate slot and | |
1510 | * edma_execute is coming up. | |
1511 | * (2) or we finished current transfer and issue will | |
1512 | * call edma_execute. | |
1513 | * | |
1514 | * Important note: issuing can be dangerous here and | |
1515 | * lead to some nasty recursion when we are in a NULL | |
1516 | * slot. So we avoid doing so and set the missed flag. | |
1517 | */ | |
1518 | if (p.a_b_cnt == 0 && p.ccnt == 0) { | |
1519 | dev_dbg(dev, "Error on null slot, setting miss\n"); | |
1520 | echan->missed = 1; | |
1521 | } else { | |
c5f47990 | 1522 | /* |
79ad2e38 PU |
1523 | * The slot is already programmed but the event got |
1524 | * missed, so its safe to issue it here. | |
c5f47990 | 1525 | */ |
79ad2e38 | 1526 | dev_dbg(dev, "Missed event, TRIGGERING\n"); |
34cf3011 PU |
1527 | edma_clean_channel(echan); |
1528 | edma_stop(echan); | |
1529 | edma_start(echan); | |
1530 | edma_trigger_channel(echan); | |
79ad2e38 PU |
1531 | } |
1532 | spin_unlock(&echan->vchan.lock); | |
1533 | } | |
1534 | ||
7c3b8b3d PU |
1535 | static inline bool edma_error_pending(struct edma_cc *ecc) |
1536 | { | |
1537 | if (edma_read_array(ecc, EDMA_EMR, 0) || | |
1538 | edma_read_array(ecc, EDMA_EMR, 1) || | |
1539 | edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) | |
1540 | return true; | |
1541 | ||
1542 | return false; | |
1543 | } | |
1544 | ||
79ad2e38 PU |
1545 | /* eDMA error interrupt handler */ |
1546 | static irqreturn_t dma_ccerr_handler(int irq, void *data) | |
1547 | { | |
1548 | struct edma_cc *ecc = data; | |
e4402a12 | 1549 | int i, j; |
79ad2e38 PU |
1550 | int ctlr; |
1551 | unsigned int cnt = 0; | |
e4402a12 | 1552 | unsigned int val; |
79ad2e38 PU |
1553 | |
1554 | ctlr = ecc->id; | |
1555 | if (ctlr < 0) | |
1556 | return IRQ_NONE; | |
1557 | ||
1558 | dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); | |
1559 | ||
7c3b8b3d | 1560 | if (!edma_error_pending(ecc)) |
79ad2e38 PU |
1561 | return IRQ_NONE; |
1562 | ||
1563 | while (1) { | |
e4402a12 PU |
1564 | /* Event missed register(s) */ |
1565 | for (j = 0; j < 2; j++) { | |
1566 | unsigned long emr; | |
1567 | ||
1568 | val = edma_read_array(ecc, EDMA_EMR, j); | |
1569 | if (!val) | |
1570 | continue; | |
1571 | ||
1572 | dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); | |
1573 | emr = val; | |
1574 | for (i = find_next_bit(&emr, 32, 0); i < 32; | |
1575 | i = find_next_bit(&emr, 32, i + 1)) { | |
79ad2e38 PU |
1576 | int k = (j << 5) + i; |
1577 | ||
e4402a12 PU |
1578 | /* Clear the corresponding EMR bits */ |
1579 | edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); | |
1580 | /* Clear any SER */ | |
1581 | edma_shadow0_write_array(ecc, SH_SECR, j, | |
79ad2e38 | 1582 | BIT(i)); |
e4402a12 | 1583 | edma_error_handler(&ecc->slave_chans[k]); |
79ad2e38 | 1584 | } |
c5f47990 | 1585 | } |
e4402a12 PU |
1586 | |
1587 | val = edma_read(ecc, EDMA_QEMR); | |
1588 | if (val) { | |
1589 | dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); | |
1590 | /* Not reported, just clear the interrupt reason. */ | |
1591 | edma_write(ecc, EDMA_QEMCR, val); | |
1592 | edma_shadow0_write(ecc, SH_QSECR, val); | |
1593 | } | |
1594 | ||
1595 | val = edma_read(ecc, EDMA_CCERR); | |
1596 | if (val) { | |
1597 | dev_warn(ecc->dev, "CCERR 0x%08x\n", val); | |
1598 | /* Not reported, just clear the interrupt reason. */ | |
1599 | edma_write(ecc, EDMA_CCERRCLR, val); | |
1600 | } | |
1601 | ||
7c3b8b3d | 1602 | if (!edma_error_pending(ecc)) |
79ad2e38 PU |
1603 | break; |
1604 | cnt++; | |
1605 | if (cnt > 10) | |
1606 | break; | |
c2dde5f8 | 1607 | } |
79ad2e38 PU |
1608 | edma_write(ecc, EDMA_EEVAL, 1); |
1609 | return IRQ_HANDLED; | |
c2dde5f8 MP |
1610 | } |
1611 | ||
1612 | /* Alloc channel resources */ | |
1613 | static int edma_alloc_chan_resources(struct dma_chan *chan) | |
1614 | { | |
1615 | struct edma_chan *echan = to_edma_chan(chan); | |
1616 | struct device *dev = chan->device->dev; | |
1617 | int ret; | |
c2dde5f8 | 1618 | |
34cf3011 PU |
1619 | ret = edma_alloc_channel(echan, EVENTQ_DEFAULT); |
1620 | if (ret) | |
1621 | return ret; | |
c2dde5f8 | 1622 | |
e4e886c6 PU |
1623 | echan->slot[0] = edma_alloc_slot(echan->ecc, echan->ch_num); |
1624 | if (echan->slot[0] < 0) { | |
1625 | dev_err(dev, "Entry slot allocation failed for channel %u\n", | |
1626 | EDMA_CHAN_SLOT(echan->ch_num)); | |
34cf3011 | 1627 | goto err_slot; |
e4e886c6 PU |
1628 | } |
1629 | ||
1630 | /* Set up channel -> slot mapping for the entry slot */ | |
34cf3011 PU |
1631 | edma_set_chmap(echan, echan->slot[0]); |
1632 | echan->alloced = true; | |
c2dde5f8 | 1633 | |
9aac9096 | 1634 | dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, |
0e772c67 | 1635 | EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); |
c2dde5f8 MP |
1636 | |
1637 | return 0; | |
1638 | ||
34cf3011 PU |
1639 | err_slot: |
1640 | edma_free_channel(echan); | |
c2dde5f8 MP |
1641 | return ret; |
1642 | } | |
1643 | ||
1644 | /* Free channel resources */ | |
1645 | static void edma_free_chan_resources(struct dma_chan *chan) | |
1646 | { | |
1647 | struct edma_chan *echan = to_edma_chan(chan); | |
c2dde5f8 MP |
1648 | int i; |
1649 | ||
1650 | /* Terminate transfers */ | |
34cf3011 | 1651 | edma_stop(echan); |
c2dde5f8 MP |
1652 | |
1653 | vchan_free_chan_resources(&echan->vchan); | |
1654 | ||
1655 | /* Free EDMA PaRAM slots */ | |
e4e886c6 | 1656 | for (i = 0; i < EDMA_MAX_SLOTS; i++) { |
c2dde5f8 | 1657 | if (echan->slot[i] >= 0) { |
2b6b3b74 | 1658 | edma_free_slot(echan->ecc, echan->slot[i]); |
c2dde5f8 MP |
1659 | echan->slot[i] = -1; |
1660 | } | |
1661 | } | |
1662 | ||
e4e886c6 | 1663 | /* Set entry slot to the dummy slot */ |
34cf3011 | 1664 | edma_set_chmap(echan, echan->ecc->dummy_slot); |
e4e886c6 | 1665 | |
c2dde5f8 MP |
1666 | /* Free EDMA channel */ |
1667 | if (echan->alloced) { | |
34cf3011 | 1668 | edma_free_channel(echan); |
c2dde5f8 MP |
1669 | echan->alloced = false; |
1670 | } | |
1671 | ||
907f74a0 | 1672 | dev_dbg(chan->device->dev, "freeing channel for %u\n", echan->ch_num); |
c2dde5f8 MP |
1673 | } |
1674 | ||
1675 | /* Send pending descriptor to hardware */ | |
1676 | static void edma_issue_pending(struct dma_chan *chan) | |
1677 | { | |
1678 | struct edma_chan *echan = to_edma_chan(chan); | |
1679 | unsigned long flags; | |
1680 | ||
1681 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
1682 | if (vchan_issue_pending(&echan->vchan) && !echan->edesc) | |
1683 | edma_execute(echan); | |
1684 | spin_unlock_irqrestore(&echan->vchan.lock, flags); | |
1685 | } | |
1686 | ||
740b41f7 TG |
1687 | static u32 edma_residue(struct edma_desc *edesc) |
1688 | { | |
1689 | bool dst = edesc->direction == DMA_DEV_TO_MEM; | |
1690 | struct edma_pset *pset = edesc->pset; | |
1691 | dma_addr_t done, pos; | |
1692 | int i; | |
1693 | ||
1694 | /* | |
1695 | * We always read the dst/src position from the first RamPar | |
1696 | * pset. That's the one which is active now. | |
1697 | */ | |
2b6b3b74 | 1698 | pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); |
740b41f7 TG |
1699 | |
1700 | /* | |
1701 | * Cyclic is simple. Just subtract pset[0].addr from pos. | |
1702 | * | |
1703 | * We never update edesc->residue in the cyclic case, so we | |
1704 | * can tell the remaining room to the end of the circular | |
1705 | * buffer. | |
1706 | */ | |
1707 | if (edesc->cyclic) { | |
1708 | done = pos - pset->addr; | |
1709 | edesc->residue_stat = edesc->residue - done; | |
1710 | return edesc->residue_stat; | |
1711 | } | |
1712 | ||
1713 | /* | |
1714 | * For SG operation we catch up with the last processed | |
1715 | * status. | |
1716 | */ | |
1717 | pset += edesc->processed_stat; | |
1718 | ||
1719 | for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) { | |
1720 | /* | |
1721 | * If we are inside this pset address range, we know | |
1722 | * this is the active one. Get the current delta and | |
1723 | * stop walking the psets. | |
1724 | */ | |
1725 | if (pos >= pset->addr && pos < pset->addr + pset->len) | |
1726 | return edesc->residue_stat - (pos - pset->addr); | |
1727 | ||
1728 | /* Otherwise mark it done and update residue_stat. */ | |
1729 | edesc->processed_stat++; | |
1730 | edesc->residue_stat -= pset->len; | |
1731 | } | |
1732 | return edesc->residue_stat; | |
1733 | } | |
1734 | ||
c2dde5f8 MP |
1735 | /* Check request completion status */ |
1736 | static enum dma_status edma_tx_status(struct dma_chan *chan, | |
1737 | dma_cookie_t cookie, | |
1738 | struct dma_tx_state *txstate) | |
1739 | { | |
1740 | struct edma_chan *echan = to_edma_chan(chan); | |
1741 | struct virt_dma_desc *vdesc; | |
1742 | enum dma_status ret; | |
1743 | unsigned long flags; | |
1744 | ||
1745 | ret = dma_cookie_status(chan, cookie, txstate); | |
9d386ec5 | 1746 | if (ret == DMA_COMPLETE || !txstate) |
c2dde5f8 MP |
1747 | return ret; |
1748 | ||
1749 | spin_lock_irqsave(&echan->vchan.lock, flags); | |
de135939 | 1750 | if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie) |
740b41f7 | 1751 | txstate->residue = edma_residue(echan->edesc); |
de135939 TG |
1752 | else if ((vdesc = vchan_find_desc(&echan->vchan, cookie))) |
1753 | txstate->residue = to_edma_desc(&vdesc->tx)->residue; | |
c2dde5f8 MP |
1754 | spin_unlock_irqrestore(&echan->vchan.lock, flags); |
1755 | ||
1756 | return ret; | |
1757 | } | |
1758 | ||
2b6b3b74 | 1759 | static void __init edma_chan_init(struct edma_cc *ecc, struct dma_device *dma, |
c2dde5f8 MP |
1760 | struct edma_chan *echans) |
1761 | { | |
1762 | int i, j; | |
1763 | ||
cb782059 | 1764 | for (i = 0; i < ecc->num_channels; i++) { |
c2dde5f8 | 1765 | struct edma_chan *echan = &echans[i]; |
2b6b3b74 | 1766 | echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); |
c2dde5f8 MP |
1767 | echan->ecc = ecc; |
1768 | echan->vchan.desc_free = edma_desc_free; | |
1769 | ||
1770 | vchan_init(&echan->vchan, dma); | |
1771 | ||
1772 | INIT_LIST_HEAD(&echan->node); | |
1773 | for (j = 0; j < EDMA_MAX_SLOTS; j++) | |
1774 | echan->slot[j] = -1; | |
1775 | } | |
1776 | } | |
1777 | ||
2c88ee6b PU |
1778 | #define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ |
1779 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
e4a899d9 | 1780 | BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ |
2c88ee6b PU |
1781 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) |
1782 | ||
c2dde5f8 MP |
1783 | static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, |
1784 | struct device *dev) | |
1785 | { | |
1786 | dma->device_prep_slave_sg = edma_prep_slave_sg; | |
50a9c707 | 1787 | dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; |
8cc3e30b | 1788 | dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; |
c2dde5f8 MP |
1789 | dma->device_alloc_chan_resources = edma_alloc_chan_resources; |
1790 | dma->device_free_chan_resources = edma_free_chan_resources; | |
1791 | dma->device_issue_pending = edma_issue_pending; | |
1792 | dma->device_tx_status = edma_tx_status; | |
aa7c09b6 MR |
1793 | dma->device_config = edma_slave_config; |
1794 | dma->device_pause = edma_dma_pause; | |
1795 | dma->device_resume = edma_dma_resume; | |
1796 | dma->device_terminate_all = edma_terminate_all; | |
9f59cd05 MR |
1797 | |
1798 | dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; | |
1799 | dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; | |
1800 | dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1801 | dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
1802 | ||
c2dde5f8 MP |
1803 | dma->dev = dev; |
1804 | ||
1805 | INIT_LIST_HEAD(&dma->channels); | |
1806 | } | |
1807 | ||
2b6b3b74 PU |
1808 | static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, |
1809 | struct edma_cc *ecc) | |
1810 | { | |
1811 | int i; | |
1812 | u32 value, cccfg; | |
1813 | s8 (*queue_priority_map)[2]; | |
1814 | ||
1815 | /* Decode the eDMA3 configuration from CCCFG register */ | |
1816 | cccfg = edma_read(ecc, EDMA_CCCFG); | |
1817 | ||
1818 | value = GET_NUM_REGN(cccfg); | |
1819 | ecc->num_region = BIT(value); | |
1820 | ||
1821 | value = GET_NUM_DMACH(cccfg); | |
1822 | ecc->num_channels = BIT(value + 1); | |
1823 | ||
633e42b8 PU |
1824 | value = GET_NUM_QDMACH(cccfg); |
1825 | ecc->num_qchannels = value * 2; | |
1826 | ||
2b6b3b74 PU |
1827 | value = GET_NUM_PAENTRY(cccfg); |
1828 | ecc->num_slots = BIT(value + 4); | |
1829 | ||
1830 | value = GET_NUM_EVQUE(cccfg); | |
1831 | ecc->num_tc = value + 1; | |
1832 | ||
4ab54f69 PU |
1833 | ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; |
1834 | ||
2b6b3b74 PU |
1835 | dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); |
1836 | dev_dbg(dev, "num_region: %u\n", ecc->num_region); | |
1837 | dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); | |
633e42b8 | 1838 | dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); |
2b6b3b74 PU |
1839 | dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); |
1840 | dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); | |
4ab54f69 | 1841 | dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); |
2b6b3b74 PU |
1842 | |
1843 | /* Nothing need to be done if queue priority is provided */ | |
1844 | if (pdata->queue_priority_mapping) | |
1845 | return 0; | |
1846 | ||
1847 | /* | |
1848 | * Configure TC/queue priority as follows: | |
1849 | * Q0 - priority 0 | |
1850 | * Q1 - priority 1 | |
1851 | * Q2 - priority 2 | |
1852 | * ... | |
1853 | * The meaning of priority numbers: 0 highest priority, 7 lowest | |
1854 | * priority. So Q0 is the highest priority queue and the last queue has | |
1855 | * the lowest priority. | |
1856 | */ | |
547c6e27 | 1857 | queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), |
2b6b3b74 PU |
1858 | GFP_KERNEL); |
1859 | if (!queue_priority_map) | |
1860 | return -ENOMEM; | |
1861 | ||
1862 | for (i = 0; i < ecc->num_tc; i++) { | |
1863 | queue_priority_map[i][0] = i; | |
1864 | queue_priority_map[i][1] = i; | |
1865 | } | |
1866 | queue_priority_map[i][0] = -1; | |
1867 | queue_priority_map[i][1] = -1; | |
1868 | ||
1869 | pdata->queue_priority_mapping = queue_priority_map; | |
1870 | /* Default queue has the lowest priority */ | |
1871 | pdata->default_queue = i - 1; | |
1872 | ||
1873 | return 0; | |
1874 | } | |
1875 | ||
1876 | #if IS_ENABLED(CONFIG_OF) | |
1877 | static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, | |
1878 | size_t sz) | |
1879 | { | |
1880 | const char pname[] = "ti,edma-xbar-event-map"; | |
1881 | struct resource res; | |
1882 | void __iomem *xbar; | |
1883 | s16 (*xbar_chans)[2]; | |
1884 | size_t nelm = sz / sizeof(s16); | |
1885 | u32 shift, offset, mux; | |
1886 | int ret, i; | |
1887 | ||
547c6e27 | 1888 | xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); |
2b6b3b74 PU |
1889 | if (!xbar_chans) |
1890 | return -ENOMEM; | |
1891 | ||
1892 | ret = of_address_to_resource(dev->of_node, 1, &res); | |
1893 | if (ret) | |
1894 | return -ENOMEM; | |
1895 | ||
1896 | xbar = devm_ioremap(dev, res.start, resource_size(&res)); | |
1897 | if (!xbar) | |
1898 | return -ENOMEM; | |
1899 | ||
1900 | ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, | |
1901 | nelm); | |
1902 | if (ret) | |
1903 | return -EIO; | |
1904 | ||
1905 | /* Invalidate last entry for the other user of this mess */ | |
1906 | nelm >>= 1; | |
1907 | xbar_chans[nelm][0] = -1; | |
1908 | xbar_chans[nelm][1] = -1; | |
1909 | ||
1910 | for (i = 0; i < nelm; i++) { | |
1911 | shift = (xbar_chans[i][1] & 0x03) << 3; | |
1912 | offset = xbar_chans[i][1] & 0xfffffffc; | |
1913 | mux = readl(xbar + offset); | |
1914 | mux &= ~(0xff << shift); | |
1915 | mux |= xbar_chans[i][0] << shift; | |
1916 | writel(mux, (xbar + offset)); | |
1917 | } | |
1918 | ||
1919 | pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; | |
1920 | return 0; | |
1921 | } | |
1922 | ||
1923 | static int edma_of_parse_dt(struct device *dev, struct edma_soc_info *pdata) | |
1924 | { | |
1925 | int ret = 0; | |
1926 | struct property *prop; | |
1927 | size_t sz; | |
1928 | struct edma_rsv_info *rsv_info; | |
1929 | ||
1930 | rsv_info = devm_kzalloc(dev, sizeof(struct edma_rsv_info), GFP_KERNEL); | |
1931 | if (!rsv_info) | |
1932 | return -ENOMEM; | |
1933 | pdata->rsv = rsv_info; | |
1934 | ||
1935 | prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", &sz); | |
1936 | if (prop) | |
1937 | ret = edma_xbar_event_map(dev, pdata, sz); | |
1938 | ||
1939 | return ret; | |
1940 | } | |
1941 | ||
1942 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) | |
1943 | { | |
1944 | struct edma_soc_info *info; | |
1945 | int ret; | |
1946 | ||
1947 | info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); | |
1948 | if (!info) | |
1949 | return ERR_PTR(-ENOMEM); | |
1950 | ||
1951 | ret = edma_of_parse_dt(dev, info); | |
1952 | if (ret) | |
1953 | return ERR_PTR(ret); | |
1954 | ||
1955 | return info; | |
1956 | } | |
1957 | #else | |
1958 | static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev) | |
1959 | { | |
1960 | return ERR_PTR(-EINVAL); | |
1961 | } | |
1962 | #endif | |
1963 | ||
463a1f8b | 1964 | static int edma_probe(struct platform_device *pdev) |
c2dde5f8 | 1965 | { |
2b6b3b74 PU |
1966 | struct edma_soc_info *info = pdev->dev.platform_data; |
1967 | s8 (*queue_priority_mapping)[2]; | |
1968 | int i, off, ln; | |
1969 | const s16 (*rsv_chans)[2]; | |
1970 | const s16 (*rsv_slots)[2]; | |
1971 | const s16 (*xbar_chans)[2]; | |
1972 | int irq; | |
1973 | char *irq_name; | |
1974 | struct resource *mem; | |
1975 | struct device_node *node = pdev->dev.of_node; | |
1976 | struct device *dev = &pdev->dev; | |
1977 | struct edma_cc *ecc; | |
c2dde5f8 MP |
1978 | int ret; |
1979 | ||
2b6b3b74 PU |
1980 | if (node) { |
1981 | info = edma_setup_info_from_dt(dev); | |
1982 | if (IS_ERR(info)) { | |
1983 | dev_err(dev, "failed to get DT data\n"); | |
1984 | return PTR_ERR(info); | |
1985 | } | |
1986 | } | |
1987 | ||
1988 | if (!info) | |
1989 | return -ENODEV; | |
1990 | ||
1991 | pm_runtime_enable(dev); | |
1992 | ret = pm_runtime_get_sync(dev); | |
1993 | if (ret < 0) { | |
1994 | dev_err(dev, "pm_runtime_get_sync() failed\n"); | |
1995 | return ret; | |
1996 | } | |
1997 | ||
907f74a0 | 1998 | ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); |
94cb0e79 RK |
1999 | if (ret) |
2000 | return ret; | |
2001 | ||
907f74a0 | 2002 | ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); |
c2dde5f8 | 2003 | if (!ecc) { |
907f74a0 | 2004 | dev_err(dev, "Can't allocate controller\n"); |
c2dde5f8 MP |
2005 | return -ENOMEM; |
2006 | } | |
2007 | ||
2b6b3b74 PU |
2008 | ecc->dev = dev; |
2009 | ecc->id = pdev->id; | |
2010 | /* When booting with DT the pdev->id is -1 */ | |
2011 | if (ecc->id < 0) | |
2012 | ecc->id = 0; | |
2013 | ||
2014 | mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); | |
2015 | if (!mem) { | |
2016 | dev_dbg(dev, "mem resource not found, using index 0\n"); | |
2017 | mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
2018 | if (!mem) { | |
2019 | dev_err(dev, "no mem resource?\n"); | |
2020 | return -ENODEV; | |
2021 | } | |
2022 | } | |
2023 | ecc->base = devm_ioremap_resource(dev, mem); | |
2024 | if (IS_ERR(ecc->base)) | |
2025 | return PTR_ERR(ecc->base); | |
2026 | ||
2027 | platform_set_drvdata(pdev, ecc); | |
2028 | ||
2029 | /* Get eDMA3 configuration from IP */ | |
2030 | ret = edma_setup_from_hw(dev, info, ecc); | |
2031 | if (ret) | |
2032 | return ret; | |
2033 | ||
cb782059 PU |
2034 | /* Allocate memory based on the information we got from the IP */ |
2035 | ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, | |
2036 | sizeof(*ecc->slave_chans), GFP_KERNEL); | |
2037 | if (!ecc->slave_chans) | |
2038 | return -ENOMEM; | |
2039 | ||
7a73b135 PU |
2040 | ecc->channel_unused = devm_kcalloc(dev, |
2041 | BITS_TO_LONGS(ecc->num_channels), | |
2042 | sizeof(unsigned long), GFP_KERNEL); | |
2043 | if (!ecc->channel_unused) | |
cb782059 PU |
2044 | return -ENOMEM; |
2045 | ||
7a73b135 | 2046 | ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), |
cb782059 | 2047 | sizeof(unsigned long), GFP_KERNEL); |
7a73b135 | 2048 | if (!ecc->slot_inuse) |
cb782059 PU |
2049 | return -ENOMEM; |
2050 | ||
2b6b3b74 PU |
2051 | ecc->default_queue = info->default_queue; |
2052 | ||
2053 | for (i = 0; i < ecc->num_slots; i++) | |
2054 | edma_write_slot(ecc, i, &dummy_paramset); | |
2055 | ||
2056 | /* Mark all channels as unused */ | |
7a73b135 | 2057 | memset(ecc->channel_unused, 0xff, sizeof(ecc->channel_unused)); |
2b6b3b74 PU |
2058 | |
2059 | if (info->rsv) { | |
2060 | /* Clear the reserved channels in unused list */ | |
2061 | rsv_chans = info->rsv->rsv_chans; | |
2062 | if (rsv_chans) { | |
2063 | for (i = 0; rsv_chans[i][0] != -1; i++) { | |
2064 | off = rsv_chans[i][0]; | |
2065 | ln = rsv_chans[i][1]; | |
7a73b135 | 2066 | clear_bits(off, ln, ecc->channel_unused); |
2b6b3b74 PU |
2067 | } |
2068 | } | |
2069 | ||
2070 | /* Set the reserved slots in inuse list */ | |
2071 | rsv_slots = info->rsv->rsv_slots; | |
2072 | if (rsv_slots) { | |
2073 | for (i = 0; rsv_slots[i][0] != -1; i++) { | |
2074 | off = rsv_slots[i][0]; | |
2075 | ln = rsv_slots[i][1]; | |
7a73b135 | 2076 | set_bits(off, ln, ecc->slot_inuse); |
2b6b3b74 PU |
2077 | } |
2078 | } | |
2079 | } | |
2080 | ||
2081 | /* Clear the xbar mapped channels in unused list */ | |
2082 | xbar_chans = info->xbar_chans; | |
2083 | if (xbar_chans) { | |
2084 | for (i = 0; xbar_chans[i][1] != -1; i++) { | |
2085 | off = xbar_chans[i][1]; | |
7a73b135 | 2086 | clear_bits(off, 1, ecc->channel_unused); |
2b6b3b74 PU |
2087 | } |
2088 | } | |
2089 | ||
2090 | irq = platform_get_irq_byname(pdev, "edma3_ccint"); | |
2091 | if (irq < 0 && node) | |
2092 | irq = irq_of_parse_and_map(node, 0); | |
2093 | ||
2094 | if (irq >= 0) { | |
2095 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", | |
2096 | dev_name(dev)); | |
2097 | ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, | |
2098 | ecc); | |
2099 | if (ret) { | |
2100 | dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); | |
2101 | return ret; | |
2102 | } | |
2103 | } | |
2104 | ||
2105 | irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); | |
2106 | if (irq < 0 && node) | |
2107 | irq = irq_of_parse_and_map(node, 2); | |
2108 | ||
2109 | if (irq >= 0) { | |
2110 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", | |
2111 | dev_name(dev)); | |
2112 | ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, | |
2113 | ecc); | |
2114 | if (ret) { | |
2115 | dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); | |
2116 | return ret; | |
2117 | } | |
2118 | } | |
2119 | ||
e4e886c6 PU |
2120 | ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); |
2121 | if (ecc->dummy_slot < 0) { | |
2122 | dev_err(dev, "Can't allocate PaRAM dummy slot\n"); | |
2123 | return ecc->dummy_slot; | |
2124 | } | |
2125 | ||
2b6b3b74 PU |
2126 | queue_priority_mapping = info->queue_priority_mapping; |
2127 | ||
2128 | /* Event queue priority mapping */ | |
2129 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | |
2130 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | |
2131 | queue_priority_mapping[i][1]); | |
ca304fa9 | 2132 | |
2b6b3b74 PU |
2133 | for (i = 0; i < ecc->num_region; i++) { |
2134 | edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); | |
2135 | edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); | |
2136 | edma_write_array(ecc, EDMA_QRAE, i, 0x0); | |
2137 | } | |
2138 | ecc->info = info; | |
2139 | ||
c2dde5f8 MP |
2140 | dma_cap_zero(ecc->dma_slave.cap_mask); |
2141 | dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); | |
232b223d | 2142 | dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); |
8cc3e30b | 2143 | dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); |
c2dde5f8 | 2144 | |
907f74a0 | 2145 | edma_dma_init(ecc, &ecc->dma_slave, dev); |
c2dde5f8 MP |
2146 | |
2147 | edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); | |
2148 | ||
34cf3011 PU |
2149 | for (i = 0; i < ecc->num_channels; i++) { |
2150 | /* Assign all channels to the default queue */ | |
f9425deb PU |
2151 | edma_assign_channel_eventq(&ecc->slave_chans[i], |
2152 | info->default_queue); | |
34cf3011 PU |
2153 | /* Set entry slot to the dummy slot */ |
2154 | edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); | |
2155 | } | |
2156 | ||
c2dde5f8 MP |
2157 | ret = dma_async_device_register(&ecc->dma_slave); |
2158 | if (ret) | |
2159 | goto err_reg1; | |
2160 | ||
2b6b3b74 PU |
2161 | if (node) |
2162 | of_dma_controller_register(node, of_dma_xlate_by_chan_id, | |
b2c843a1 | 2163 | &ecc->dma_slave); |
dc9b6055 | 2164 | |
907f74a0 | 2165 | dev_info(dev, "TI EDMA DMA engine driver\n"); |
c2dde5f8 MP |
2166 | |
2167 | return 0; | |
2168 | ||
2169 | err_reg1: | |
2b6b3b74 | 2170 | edma_free_slot(ecc, ecc->dummy_slot); |
c2dde5f8 MP |
2171 | return ret; |
2172 | } | |
2173 | ||
4bf27b8b | 2174 | static int edma_remove(struct platform_device *pdev) |
c2dde5f8 MP |
2175 | { |
2176 | struct device *dev = &pdev->dev; | |
2177 | struct edma_cc *ecc = dev_get_drvdata(dev); | |
2178 | ||
907f74a0 PU |
2179 | if (dev->of_node) |
2180 | of_dma_controller_free(dev->of_node); | |
c2dde5f8 | 2181 | dma_async_device_unregister(&ecc->dma_slave); |
2b6b3b74 | 2182 | edma_free_slot(ecc, ecc->dummy_slot); |
c2dde5f8 MP |
2183 | |
2184 | return 0; | |
2185 | } | |
2186 | ||
2b6b3b74 PU |
2187 | #ifdef CONFIG_PM_SLEEP |
2188 | static int edma_pm_resume(struct device *dev) | |
2189 | { | |
2190 | struct edma_cc *ecc = dev_get_drvdata(dev); | |
e4e886c6 | 2191 | struct edma_chan *echan = ecc->slave_chans; |
2b6b3b74 PU |
2192 | int i; |
2193 | s8 (*queue_priority_mapping)[2]; | |
2194 | ||
2195 | queue_priority_mapping = ecc->info->queue_priority_mapping; | |
2196 | ||
2197 | /* Event queue priority mapping */ | |
2198 | for (i = 0; queue_priority_mapping[i][0] != -1; i++) | |
2199 | edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], | |
2200 | queue_priority_mapping[i][1]); | |
2201 | ||
2b6b3b74 | 2202 | for (i = 0; i < ecc->num_channels; i++) { |
e4e886c6 | 2203 | if (echan[i].alloced) { |
2b6b3b74 PU |
2204 | /* ensure access through shadow region 0 */ |
2205 | edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, | |
2206 | BIT(i & 0x1f)); | |
2207 | ||
34cf3011 | 2208 | edma_setup_interrupt(&echan[i], true); |
e4e886c6 PU |
2209 | |
2210 | /* Set up channel -> slot mapping for the entry slot */ | |
34cf3011 | 2211 | edma_set_chmap(&echan[i], echan[i].slot[0]); |
2b6b3b74 PU |
2212 | } |
2213 | } | |
2214 | ||
2215 | return 0; | |
2216 | } | |
2217 | #endif | |
2218 | ||
2219 | static const struct dev_pm_ops edma_pm_ops = { | |
2220 | SET_LATE_SYSTEM_SLEEP_PM_OPS(NULL, edma_pm_resume) | |
2221 | }; | |
2222 | ||
c2dde5f8 MP |
2223 | static struct platform_driver edma_driver = { |
2224 | .probe = edma_probe, | |
a7d6e3ec | 2225 | .remove = edma_remove, |
c2dde5f8 | 2226 | .driver = { |
2b6b3b74 PU |
2227 | .name = "edma", |
2228 | .pm = &edma_pm_ops, | |
2229 | .of_match_table = edma_of_ids, | |
c2dde5f8 MP |
2230 | }, |
2231 | }; | |
2232 | ||
2233 | bool edma_filter_fn(struct dma_chan *chan, void *param) | |
2234 | { | |
2235 | if (chan->device->dev->driver == &edma_driver.driver) { | |
2236 | struct edma_chan *echan = to_edma_chan(chan); | |
2237 | unsigned ch_req = *(unsigned *)param; | |
2238 | return ch_req == echan->ch_num; | |
2239 | } | |
2240 | return false; | |
2241 | } | |
2242 | EXPORT_SYMBOL(edma_filter_fn); | |
2243 | ||
c2dde5f8 MP |
2244 | static int edma_init(void) |
2245 | { | |
5305e4d6 | 2246 | return platform_driver_register(&edma_driver); |
c2dde5f8 MP |
2247 | } |
2248 | subsys_initcall(edma_init); | |
2249 | ||
2250 | static void __exit edma_exit(void) | |
2251 | { | |
c2dde5f8 MP |
2252 | platform_driver_unregister(&edma_driver); |
2253 | } | |
2254 | module_exit(edma_exit); | |
2255 | ||
d71505b6 | 2256 | MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>"); |
c2dde5f8 MP |
2257 | MODULE_DESCRIPTION("TI EDMA DMA engine driver"); |
2258 | MODULE_LICENSE("GPL v2"); |