]>
Commit | Line | Data |
---|---|---|
d8b46839 CM |
1 | /* |
2 | * Driver for STM32 DMA controller | |
3 | * | |
4 | * Inspired by dma-jz4740.c and tegra20-apb-dma.c | |
5 | * | |
6 | * Copyright (C) M'boumba Cedric Madianga 2015 | |
7 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | |
a2b6103b | 8 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> |
d8b46839 CM |
9 | * |
10 | * License terms: GNU General Public License (GPL), version 2 | |
11 | */ | |
12 | ||
13 | #include <linux/clk.h> | |
14 | #include <linux/delay.h> | |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dma-mapping.h> | |
17 | #include <linux/err.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/jiffies.h> | |
20 | #include <linux/list.h> | |
21 | #include <linux/module.h> | |
22 | #include <linux/of.h> | |
23 | #include <linux/of_device.h> | |
24 | #include <linux/of_dma.h> | |
25 | #include <linux/platform_device.h> | |
48bc73ba | 26 | #include <linux/pm_runtime.h> |
d8b46839 CM |
27 | #include <linux/reset.h> |
28 | #include <linux/sched.h> | |
29 | #include <linux/slab.h> | |
30 | ||
31 | #include "virt-dma.h" | |
32 | ||
33 | #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ | |
34 | #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ | |
35 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ | |
36 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ | |
37 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ | |
c2d86b1c | 38 | #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ |
d8b46839 CM |
39 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ |
40 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ | |
41 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ | |
9df3bd55 PYM |
42 | #define STM32_DMA_MASKI (STM32_DMA_TCI \ |
43 | | STM32_DMA_TEI \ | |
44 | | STM32_DMA_DMEI \ | |
45 | | STM32_DMA_FEI) | |
d8b46839 CM |
46 | |
47 | /* DMA Stream x Configuration Register */ | |
48 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ | |
49 | #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) | |
50 | #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) | |
51 | #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) | |
52 | #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) | |
53 | #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) | |
54 | #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) | |
55 | #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) | |
56 | #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) | |
57 | #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) | |
58 | #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) | |
59 | #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) | |
60 | #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) | |
61 | #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) | |
62 | #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) | |
63 | #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ | |
64 | #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ | |
65 | #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ | |
66 | #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ | |
67 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ | |
68 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ | |
69 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ | |
249d5531 PYM |
70 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable |
71 | */ | |
d8b46839 CM |
72 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ |
73 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ | |
74 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ | |
75 | #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ | |
76 | | STM32_DMA_SCR_MINC \ | |
77 | | STM32_DMA_SCR_PINCOS \ | |
78 | | STM32_DMA_SCR_PL_MASK) | |
79 | #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ | |
80 | | STM32_DMA_SCR_TEIE \ | |
81 | | STM32_DMA_SCR_DMEIE) | |
82 | ||
83 | /* DMA Stream x number of data register */ | |
84 | #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) | |
85 | ||
86 | /* DMA stream peripheral address register */ | |
87 | #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) | |
88 | ||
89 | /* DMA stream x memory 0 address register */ | |
90 | #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) | |
91 | ||
92 | /* DMA stream x memory 1 address register */ | |
93 | #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) | |
94 | ||
95 | /* DMA stream x FIFO control register */ | |
96 | #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) | |
97 | #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) | |
98 | #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) | |
99 | #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ | |
100 | #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ | |
101 | #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ | |
102 | | STM32_DMA_SFCR_DMDIS) | |
103 | ||
104 | /* DMA direction */ | |
105 | #define STM32_DMA_DEV_TO_MEM 0x00 | |
106 | #define STM32_DMA_MEM_TO_DEV 0x01 | |
107 | #define STM32_DMA_MEM_TO_MEM 0x02 | |
108 | ||
109 | /* DMA priority level */ | |
110 | #define STM32_DMA_PRIORITY_LOW 0x00 | |
111 | #define STM32_DMA_PRIORITY_MEDIUM 0x01 | |
112 | #define STM32_DMA_PRIORITY_HIGH 0x02 | |
113 | #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 | |
114 | ||
115 | /* DMA FIFO threshold selection */ | |
116 | #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 | |
117 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 | |
118 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 | |
119 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 | |
120 | ||
121 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff | |
80a76952 PYM |
122 | /* |
123 | * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter | |
124 | * gather at boundary. Thus it's safer to round down this value on FIFO | |
125 | * size (16 Bytes) | |
126 | */ | |
127 | #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ | |
128 | ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) | |
d8b46839 CM |
129 | #define STM32_DMA_MAX_CHANNELS 0x08 |
130 | #define STM32_DMA_MAX_REQUEST_ID 0x08 | |
131 | #define STM32_DMA_MAX_DATA_PARAM 0x03 | |
a2b6103b PYM |
132 | #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ |
133 | #define STM32_DMA_MIN_BURST 4 | |
276b0046 | 134 | #define STM32_DMA_MAX_BURST 16 |
d8b46839 | 135 | |
951f44cb PYM |
136 | /* DMA Features */ |
137 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) | |
138 | #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) | |
139 | ||
d8b46839 CM |
140 | enum stm32_dma_width { |
141 | STM32_DMA_BYTE, | |
142 | STM32_DMA_HALF_WORD, | |
143 | STM32_DMA_WORD, | |
144 | }; | |
145 | ||
146 | enum stm32_dma_burst_size { | |
147 | STM32_DMA_BURST_SINGLE, | |
148 | STM32_DMA_BURST_INCR4, | |
149 | STM32_DMA_BURST_INCR8, | |
150 | STM32_DMA_BURST_INCR16, | |
151 | }; | |
152 | ||
951f44cb PYM |
153 | /** |
154 | * struct stm32_dma_cfg - STM32 DMA custom configuration | |
155 | * @channel_id: channel ID | |
156 | * @request_line: DMA request | |
157 | * @stream_config: 32bit mask specifying the DMA channel configuration | |
158 | * @features: 32bit mask specifying the DMA Feature list | |
159 | */ | |
d8b46839 CM |
160 | struct stm32_dma_cfg { |
161 | u32 channel_id; | |
162 | u32 request_line; | |
163 | u32 stream_config; | |
951f44cb | 164 | u32 features; |
d8b46839 CM |
165 | }; |
166 | ||
167 | struct stm32_dma_chan_reg { | |
168 | u32 dma_lisr; | |
169 | u32 dma_hisr; | |
170 | u32 dma_lifcr; | |
171 | u32 dma_hifcr; | |
172 | u32 dma_scr; | |
173 | u32 dma_sndtr; | |
174 | u32 dma_spar; | |
175 | u32 dma_sm0ar; | |
176 | u32 dma_sm1ar; | |
177 | u32 dma_sfcr; | |
178 | }; | |
179 | ||
180 | struct stm32_dma_sg_req { | |
181 | u32 len; | |
182 | struct stm32_dma_chan_reg chan_reg; | |
183 | }; | |
184 | ||
185 | struct stm32_dma_desc { | |
186 | struct virt_dma_desc vdesc; | |
187 | bool cyclic; | |
188 | u32 num_sgs; | |
189 | struct stm32_dma_sg_req sg_req[]; | |
190 | }; | |
191 | ||
192 | struct stm32_dma_chan { | |
193 | struct virt_dma_chan vchan; | |
194 | bool config_init; | |
195 | bool busy; | |
196 | u32 id; | |
197 | u32 irq; | |
198 | struct stm32_dma_desc *desc; | |
199 | u32 next_sg; | |
200 | struct dma_slave_config dma_sconfig; | |
201 | struct stm32_dma_chan_reg chan_reg; | |
951f44cb | 202 | u32 threshold; |
a2b6103b PYM |
203 | u32 mem_burst; |
204 | u32 mem_width; | |
d8b46839 CM |
205 | }; |
206 | ||
207 | struct stm32_dma_device { | |
208 | struct dma_device ddev; | |
209 | void __iomem *base; | |
210 | struct clk *clk; | |
211 | struct reset_control *rst; | |
212 | bool mem2mem; | |
213 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; | |
214 | }; | |
215 | ||
216 | static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) | |
217 | { | |
218 | return container_of(chan->vchan.chan.device, struct stm32_dma_device, | |
219 | ddev); | |
220 | } | |
221 | ||
222 | static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) | |
223 | { | |
224 | return container_of(c, struct stm32_dma_chan, vchan.chan); | |
225 | } | |
226 | ||
227 | static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) | |
228 | { | |
229 | return container_of(vdesc, struct stm32_dma_desc, vdesc); | |
230 | } | |
231 | ||
232 | static struct device *chan2dev(struct stm32_dma_chan *chan) | |
233 | { | |
234 | return &chan->vchan.chan.dev->device; | |
235 | } | |
236 | ||
237 | static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) | |
238 | { | |
239 | return readl_relaxed(dmadev->base + reg); | |
240 | } | |
241 | ||
242 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) | |
243 | { | |
244 | writel_relaxed(val, dmadev->base + reg); | |
245 | } | |
246 | ||
247 | static struct stm32_dma_desc *stm32_dma_alloc_desc(u32 num_sgs) | |
248 | { | |
249 | return kzalloc(sizeof(struct stm32_dma_desc) + | |
250 | sizeof(struct stm32_dma_sg_req) * num_sgs, GFP_NOWAIT); | |
251 | } | |
252 | ||
253 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, | |
254 | enum dma_slave_buswidth width) | |
255 | { | |
256 | switch (width) { | |
257 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
258 | return STM32_DMA_BYTE; | |
259 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
260 | return STM32_DMA_HALF_WORD; | |
261 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
262 | return STM32_DMA_WORD; | |
263 | default: | |
264 | dev_err(chan2dev(chan), "Dma bus width not supported\n"); | |
265 | return -EINVAL; | |
266 | } | |
267 | } | |
268 | ||
a2b6103b PYM |
269 | static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, |
270 | u32 threshold) | |
271 | { | |
272 | enum dma_slave_buswidth max_width; | |
273 | ||
274 | if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) | |
275 | max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
276 | else | |
277 | max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
278 | ||
279 | while ((buf_len < max_width || buf_len % max_width) && | |
280 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) | |
281 | max_width = max_width >> 1; | |
282 | ||
283 | return max_width; | |
284 | } | |
285 | ||
286 | static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, | |
287 | enum dma_slave_buswidth width) | |
288 | { | |
289 | u32 remaining; | |
290 | ||
291 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
292 | if (burst != 0) { | |
293 | /* | |
294 | * If number of beats fit in several whole bursts | |
295 | * this configuration is allowed. | |
296 | */ | |
297 | remaining = ((STM32_DMA_FIFO_SIZE / width) * | |
298 | (threshold + 1) / 4) % burst; | |
299 | ||
300 | if (remaining == 0) | |
301 | return true; | |
302 | } else { | |
303 | return true; | |
304 | } | |
305 | } | |
306 | ||
307 | return false; | |
308 | } | |
309 | ||
310 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) | |
311 | { | |
cc832dc8 PYM |
312 | /* |
313 | * Buffer or period length has to be aligned on FIFO depth. | |
314 | * Otherwise bytes may be stuck within FIFO at buffer or period | |
315 | * length. | |
316 | */ | |
317 | return ((buf_len % ((threshold + 1) * 4)) == 0); | |
a2b6103b PYM |
318 | } |
319 | ||
320 | static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, | |
321 | enum dma_slave_buswidth width) | |
322 | { | |
323 | u32 best_burst = max_burst; | |
324 | ||
325 | if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) | |
326 | return 0; | |
327 | ||
328 | while ((buf_len < best_burst * width && best_burst > 1) || | |
329 | !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold, | |
330 | width)) { | |
331 | if (best_burst > STM32_DMA_MIN_BURST) | |
332 | best_burst = best_burst >> 1; | |
333 | else | |
334 | best_burst = 0; | |
335 | } | |
336 | ||
337 | return best_burst; | |
338 | } | |
339 | ||
d8b46839 CM |
340 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) |
341 | { | |
342 | switch (maxburst) { | |
343 | case 0: | |
344 | case 1: | |
345 | return STM32_DMA_BURST_SINGLE; | |
346 | case 4: | |
347 | return STM32_DMA_BURST_INCR4; | |
348 | case 8: | |
349 | return STM32_DMA_BURST_INCR8; | |
350 | case 16: | |
351 | return STM32_DMA_BURST_INCR16; | |
352 | default: | |
353 | dev_err(chan2dev(chan), "Dma burst size not supported\n"); | |
354 | return -EINVAL; | |
355 | } | |
356 | } | |
357 | ||
358 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, | |
a2b6103b | 359 | u32 src_burst, u32 dst_burst) |
d8b46839 CM |
360 | { |
361 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; | |
362 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; | |
363 | ||
a2b6103b | 364 | if (!src_burst && !dst_burst) { |
d8b46839 CM |
365 | /* Using direct mode */ |
366 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; | |
367 | } else { | |
368 | /* Using FIFO mode */ | |
369 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; | |
370 | } | |
371 | } | |
372 | ||
373 | static int stm32_dma_slave_config(struct dma_chan *c, | |
374 | struct dma_slave_config *config) | |
375 | { | |
376 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
377 | ||
378 | memcpy(&chan->dma_sconfig, config, sizeof(*config)); | |
379 | ||
380 | chan->config_init = true; | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) | |
386 | { | |
387 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
388 | u32 flags, dma_isr; | |
389 | ||
390 | /* | |
391 | * Read "flags" from DMA_xISR register corresponding to the selected | |
392 | * DMA channel at the correct bit offset inside that register. | |
393 | * | |
394 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
395 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
396 | */ | |
397 | ||
398 | if (chan->id & 4) | |
399 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); | |
400 | else | |
401 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); | |
402 | ||
403 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | |
404 | ||
9df3bd55 | 405 | return flags & STM32_DMA_MASKI; |
d8b46839 CM |
406 | } |
407 | ||
408 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | |
409 | { | |
410 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
411 | u32 dma_ifcr; | |
412 | ||
413 | /* | |
414 | * Write "flags" to the DMA_xIFCR register corresponding to the selected | |
415 | * DMA channel at the correct bit offset inside that register. | |
416 | * | |
417 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
418 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
419 | */ | |
9df3bd55 | 420 | flags &= STM32_DMA_MASKI; |
d8b46839 CM |
421 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); |
422 | ||
423 | if (chan->id & 4) | |
424 | stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); | |
425 | else | |
426 | stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); | |
427 | } | |
428 | ||
429 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) | |
430 | { | |
431 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
432 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | |
433 | u32 dma_scr, id; | |
434 | ||
435 | id = chan->id; | |
436 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
437 | ||
438 | if (dma_scr & STM32_DMA_SCR_EN) { | |
439 | dma_scr &= ~STM32_DMA_SCR_EN; | |
440 | stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); | |
441 | ||
442 | do { | |
443 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
444 | dma_scr &= STM32_DMA_SCR_EN; | |
445 | if (!dma_scr) | |
446 | break; | |
447 | ||
448 | if (time_after_eq(jiffies, timeout)) { | |
449 | dev_err(chan2dev(chan), "%s: timeout!\n", | |
450 | __func__); | |
451 | return -EBUSY; | |
452 | } | |
453 | cond_resched(); | |
454 | } while (1); | |
455 | } | |
456 | ||
457 | return 0; | |
458 | } | |
459 | ||
460 | static void stm32_dma_stop(struct stm32_dma_chan *chan) | |
461 | { | |
462 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
463 | u32 dma_scr, dma_sfcr, status; | |
464 | int ret; | |
465 | ||
466 | /* Disable interrupts */ | |
467 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
468 | dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; | |
469 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); | |
470 | dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
471 | dma_sfcr &= ~STM32_DMA_SFCR_FEIE; | |
472 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); | |
473 | ||
474 | /* Disable DMA */ | |
475 | ret = stm32_dma_disable_chan(chan); | |
476 | if (ret < 0) | |
477 | return; | |
478 | ||
479 | /* Clear interrupt status if it is there */ | |
480 | status = stm32_dma_irq_status(chan); | |
481 | if (status) { | |
482 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | |
483 | __func__, status); | |
484 | stm32_dma_irq_clear(chan, status); | |
485 | } | |
486 | ||
487 | chan->busy = false; | |
488 | } | |
489 | ||
490 | static int stm32_dma_terminate_all(struct dma_chan *c) | |
491 | { | |
492 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
493 | unsigned long flags; | |
494 | LIST_HEAD(head); | |
495 | ||
496 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
497 | ||
498 | if (chan->busy) { | |
499 | stm32_dma_stop(chan); | |
500 | chan->desc = NULL; | |
501 | } | |
502 | ||
503 | vchan_get_all_descriptors(&chan->vchan, &head); | |
504 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
505 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
dc808675 CM |
510 | static void stm32_dma_synchronize(struct dma_chan *c) |
511 | { | |
512 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
513 | ||
514 | vchan_synchronize(&chan->vchan); | |
515 | } | |
516 | ||
d8b46839 CM |
517 | static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) |
518 | { | |
519 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
520 | u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
521 | u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
522 | u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); | |
523 | u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); | |
524 | u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); | |
525 | u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
526 | ||
527 | dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); | |
528 | dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); | |
529 | dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); | |
530 | dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); | |
531 | dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); | |
532 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); | |
533 | } | |
534 | ||
e57cb3b3 PYM |
535 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); |
536 | ||
8d1b76f0 | 537 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
d8b46839 CM |
538 | { |
539 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
540 | struct virt_dma_desc *vdesc; | |
541 | struct stm32_dma_sg_req *sg_req; | |
542 | struct stm32_dma_chan_reg *reg; | |
543 | u32 status; | |
544 | int ret; | |
545 | ||
546 | ret = stm32_dma_disable_chan(chan); | |
547 | if (ret < 0) | |
8d1b76f0 | 548 | return; |
d8b46839 CM |
549 | |
550 | if (!chan->desc) { | |
551 | vdesc = vchan_next_desc(&chan->vchan); | |
552 | if (!vdesc) | |
8d1b76f0 | 553 | return; |
d8b46839 CM |
554 | |
555 | chan->desc = to_stm32_dma_desc(vdesc); | |
556 | chan->next_sg = 0; | |
557 | } | |
558 | ||
559 | if (chan->next_sg == chan->desc->num_sgs) | |
560 | chan->next_sg = 0; | |
561 | ||
562 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
563 | reg = &sg_req->chan_reg; | |
564 | ||
565 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | |
566 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); | |
567 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); | |
568 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); | |
569 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); | |
570 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); | |
571 | ||
572 | chan->next_sg++; | |
573 | ||
574 | /* Clear interrupt status if it is there */ | |
575 | status = stm32_dma_irq_status(chan); | |
576 | if (status) | |
577 | stm32_dma_irq_clear(chan, status); | |
578 | ||
e57cb3b3 PYM |
579 | if (chan->desc->cyclic) |
580 | stm32_dma_configure_next_sg(chan); | |
581 | ||
d8b46839 CM |
582 | stm32_dma_dump_reg(chan); |
583 | ||
584 | /* Start DMA */ | |
585 | reg->dma_scr |= STM32_DMA_SCR_EN; | |
586 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | |
587 | ||
588 | chan->busy = true; | |
589 | ||
90ec93cb | 590 | dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); |
d8b46839 CM |
591 | } |
592 | ||
593 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) | |
594 | { | |
595 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
596 | struct stm32_dma_sg_req *sg_req; | |
597 | u32 dma_scr, dma_sm0ar, dma_sm1ar, id; | |
598 | ||
599 | id = chan->id; | |
600 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
601 | ||
602 | if (dma_scr & STM32_DMA_SCR_DBM) { | |
603 | if (chan->next_sg == chan->desc->num_sgs) | |
604 | chan->next_sg = 0; | |
605 | ||
606 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
607 | ||
608 | if (dma_scr & STM32_DMA_SCR_CT) { | |
609 | dma_sm0ar = sg_req->chan_reg.dma_sm0ar; | |
610 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); | |
611 | dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", | |
612 | stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); | |
613 | } else { | |
614 | dma_sm1ar = sg_req->chan_reg.dma_sm1ar; | |
615 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); | |
616 | dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", | |
617 | stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); | |
618 | } | |
d8b46839 CM |
619 | } |
620 | } | |
621 | ||
622 | static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) | |
623 | { | |
624 | if (chan->desc) { | |
625 | if (chan->desc->cyclic) { | |
626 | vchan_cyclic_callback(&chan->desc->vdesc); | |
2b12c558 | 627 | chan->next_sg++; |
d8b46839 CM |
628 | stm32_dma_configure_next_sg(chan); |
629 | } else { | |
630 | chan->busy = false; | |
631 | if (chan->next_sg == chan->desc->num_sgs) { | |
632 | list_del(&chan->desc->vdesc.node); | |
633 | vchan_cookie_complete(&chan->desc->vdesc); | |
634 | chan->desc = NULL; | |
635 | } | |
636 | stm32_dma_start_transfer(chan); | |
637 | } | |
638 | } | |
639 | } | |
640 | ||
641 | static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |
642 | { | |
643 | struct stm32_dma_chan *chan = devid; | |
644 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
ca4c72c0 | 645 | u32 status, scr, sfcr; |
d8b46839 CM |
646 | |
647 | spin_lock(&chan->vchan.lock); | |
648 | ||
649 | status = stm32_dma_irq_status(chan); | |
650 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
ca4c72c0 | 651 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
d8b46839 | 652 | |
c2d86b1c | 653 | if (status & STM32_DMA_TCI) { |
d8b46839 | 654 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
c2d86b1c PYM |
655 | if (scr & STM32_DMA_SCR_TCIE) |
656 | stm32_dma_handle_chan_done(chan); | |
657 | status &= ~STM32_DMA_TCI; | |
658 | } | |
659 | if (status & STM32_DMA_HTI) { | |
660 | stm32_dma_irq_clear(chan, STM32_DMA_HTI); | |
661 | status &= ~STM32_DMA_HTI; | |
662 | } | |
663 | if (status & STM32_DMA_FEI) { | |
664 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | |
665 | status &= ~STM32_DMA_FEI; | |
ca4c72c0 PYM |
666 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
667 | if (!(scr & STM32_DMA_SCR_EN)) | |
668 | dev_err(chan2dev(chan), "FIFO Error\n"); | |
669 | else | |
670 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | |
671 | } | |
c2d86b1c PYM |
672 | } |
673 | if (status) { | |
d8b46839 CM |
674 | stm32_dma_irq_clear(chan, status); |
675 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); | |
c2d86b1c PYM |
676 | if (!(scr & STM32_DMA_SCR_EN)) |
677 | dev_err(chan2dev(chan), "chan disabled by HW\n"); | |
d8b46839 CM |
678 | } |
679 | ||
680 | spin_unlock(&chan->vchan.lock); | |
681 | ||
682 | return IRQ_HANDLED; | |
683 | } | |
684 | ||
685 | static void stm32_dma_issue_pending(struct dma_chan *c) | |
686 | { | |
687 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
688 | unsigned long flags; | |
d8b46839 CM |
689 | |
690 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
8d1b76f0 | 691 | if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { |
90ec93cb | 692 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); |
8d1b76f0 | 693 | stm32_dma_start_transfer(chan); |
e57cb3b3 | 694 | |
d8b46839 CM |
695 | } |
696 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
697 | } | |
698 | ||
699 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |
700 | enum dma_transfer_direction direction, | |
a2b6103b PYM |
701 | enum dma_slave_buswidth *buswidth, |
702 | u32 buf_len) | |
d8b46839 CM |
703 | { |
704 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | |
705 | int src_bus_width, dst_bus_width; | |
706 | int src_burst_size, dst_burst_size; | |
a2b6103b PYM |
707 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
708 | u32 dma_scr, threshold; | |
d8b46839 CM |
709 | |
710 | src_addr_width = chan->dma_sconfig.src_addr_width; | |
711 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | |
712 | src_maxburst = chan->dma_sconfig.src_maxburst; | |
713 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | |
a2b6103b | 714 | threshold = chan->threshold; |
d8b46839 CM |
715 | |
716 | switch (direction) { | |
717 | case DMA_MEM_TO_DEV: | |
a2b6103b | 718 | /* Set device data size */ |
d8b46839 CM |
719 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
720 | if (dst_bus_width < 0) | |
721 | return dst_bus_width; | |
722 | ||
a2b6103b PYM |
723 | /* Set device burst size */ |
724 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
725 | dst_maxburst, | |
726 | threshold, | |
727 | dst_addr_width); | |
728 | ||
729 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
730 | if (dst_burst_size < 0) |
731 | return dst_burst_size; | |
732 | ||
a2b6103b PYM |
733 | /* Set memory data size */ |
734 | src_addr_width = stm32_dma_get_max_width(buf_len, threshold); | |
735 | chan->mem_width = src_addr_width; | |
d8b46839 CM |
736 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
737 | if (src_bus_width < 0) | |
738 | return src_bus_width; | |
739 | ||
a2b6103b PYM |
740 | /* Set memory burst size */ |
741 | src_maxburst = STM32_DMA_MAX_BURST; | |
742 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
743 | src_maxburst, | |
744 | threshold, | |
745 | src_addr_width); | |
746 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
747 | if (src_burst_size < 0) |
748 | return src_burst_size; | |
749 | ||
750 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | | |
751 | STM32_DMA_SCR_PSIZE(dst_bus_width) | | |
752 | STM32_DMA_SCR_MSIZE(src_bus_width) | | |
753 | STM32_DMA_SCR_PBURST(dst_burst_size) | | |
754 | STM32_DMA_SCR_MBURST(src_burst_size); | |
755 | ||
a2b6103b PYM |
756 | /* Set FIFO threshold */ |
757 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
758 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | |
759 | ||
760 | /* Set peripheral address */ | |
d8b46839 CM |
761 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
762 | *buswidth = dst_addr_width; | |
763 | break; | |
764 | ||
765 | case DMA_DEV_TO_MEM: | |
a2b6103b | 766 | /* Set device data size */ |
d8b46839 CM |
767 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
768 | if (src_bus_width < 0) | |
769 | return src_bus_width; | |
770 | ||
a2b6103b PYM |
771 | /* Set device burst size */ |
772 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
773 | src_maxburst, | |
774 | threshold, | |
775 | src_addr_width); | |
776 | chan->mem_burst = src_best_burst; | |
777 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
778 | if (src_burst_size < 0) |
779 | return src_burst_size; | |
780 | ||
a2b6103b PYM |
781 | /* Set memory data size */ |
782 | dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); | |
783 | chan->mem_width = dst_addr_width; | |
d8b46839 CM |
784 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
785 | if (dst_bus_width < 0) | |
786 | return dst_bus_width; | |
787 | ||
a2b6103b PYM |
788 | /* Set memory burst size */ |
789 | dst_maxburst = STM32_DMA_MAX_BURST; | |
790 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
791 | dst_maxburst, | |
792 | threshold, | |
793 | dst_addr_width); | |
794 | chan->mem_burst = dst_best_burst; | |
795 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
796 | if (dst_burst_size < 0) |
797 | return dst_burst_size; | |
798 | ||
799 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | | |
800 | STM32_DMA_SCR_PSIZE(src_bus_width) | | |
801 | STM32_DMA_SCR_MSIZE(dst_bus_width) | | |
802 | STM32_DMA_SCR_PBURST(src_burst_size) | | |
803 | STM32_DMA_SCR_MBURST(dst_burst_size); | |
804 | ||
a2b6103b PYM |
805 | /* Set FIFO threshold */ |
806 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
807 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | |
808 | ||
809 | /* Set peripheral address */ | |
d8b46839 CM |
810 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
811 | *buswidth = chan->dma_sconfig.src_addr_width; | |
812 | break; | |
813 | ||
814 | default: | |
815 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | |
816 | return -EINVAL; | |
817 | } | |
818 | ||
a2b6103b | 819 | stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst); |
d8b46839 | 820 | |
a2b6103b | 821 | /* Set DMA control register */ |
d8b46839 CM |
822 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | |
823 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | | |
824 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); | |
825 | chan->chan_reg.dma_scr |= dma_scr; | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
830 | static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) | |
831 | { | |
832 | memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); | |
833 | } | |
834 | ||
835 | static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | |
836 | struct dma_chan *c, struct scatterlist *sgl, | |
837 | u32 sg_len, enum dma_transfer_direction direction, | |
838 | unsigned long flags, void *context) | |
839 | { | |
840 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
841 | struct stm32_dma_desc *desc; | |
842 | struct scatterlist *sg; | |
843 | enum dma_slave_buswidth buswidth; | |
844 | u32 nb_data_items; | |
845 | int i, ret; | |
846 | ||
847 | if (!chan->config_init) { | |
848 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
849 | return NULL; | |
850 | } | |
851 | ||
852 | if (sg_len < 1) { | |
853 | dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); | |
854 | return NULL; | |
855 | } | |
856 | ||
857 | desc = stm32_dma_alloc_desc(sg_len); | |
858 | if (!desc) | |
859 | return NULL; | |
860 | ||
d8b46839 CM |
861 | /* Set peripheral flow controller */ |
862 | if (chan->dma_sconfig.device_fc) | |
863 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; | |
864 | else | |
865 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
866 | ||
867 | for_each_sg(sgl, sg, sg_len, i) { | |
a2b6103b PYM |
868 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, |
869 | sg_dma_len(sg)); | |
870 | if (ret < 0) | |
871 | goto err; | |
872 | ||
d8b46839 CM |
873 | desc->sg_req[i].len = sg_dma_len(sg); |
874 | ||
875 | nb_data_items = desc->sg_req[i].len / buswidth; | |
80a76952 | 876 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
877 | dev_err(chan2dev(chan), "nb items not supported\n"); |
878 | goto err; | |
879 | } | |
880 | ||
881 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
882 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
883 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
884 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
885 | desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); | |
886 | desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); | |
887 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
888 | } | |
889 | ||
890 | desc->num_sgs = sg_len; | |
891 | desc->cyclic = false; | |
892 | ||
893 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
894 | ||
895 | err: | |
896 | kfree(desc); | |
897 | return NULL; | |
898 | } | |
899 | ||
900 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( | |
901 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
902 | size_t period_len, enum dma_transfer_direction direction, | |
903 | unsigned long flags) | |
904 | { | |
905 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
906 | struct stm32_dma_desc *desc; | |
907 | enum dma_slave_buswidth buswidth; | |
908 | u32 num_periods, nb_data_items; | |
909 | int i, ret; | |
910 | ||
911 | if (!buf_len || !period_len) { | |
912 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | |
913 | return NULL; | |
914 | } | |
915 | ||
916 | if (!chan->config_init) { | |
917 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
918 | return NULL; | |
919 | } | |
920 | ||
921 | if (buf_len % period_len) { | |
922 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | |
923 | return NULL; | |
924 | } | |
925 | ||
926 | /* | |
927 | * We allow to take more number of requests till DMA is | |
928 | * not started. The driver will loop over all requests. | |
929 | * Once DMA is started then new requests can be queued only after | |
930 | * terminating the DMA. | |
931 | */ | |
932 | if (chan->busy) { | |
933 | dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); | |
934 | return NULL; | |
935 | } | |
936 | ||
a2b6103b | 937 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len); |
d8b46839 CM |
938 | if (ret < 0) |
939 | return NULL; | |
940 | ||
941 | nb_data_items = period_len / buswidth; | |
80a76952 | 942 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
943 | dev_err(chan2dev(chan), "number of items not supported\n"); |
944 | return NULL; | |
945 | } | |
946 | ||
947 | /* Enable Circular mode or double buffer mode */ | |
948 | if (buf_len == period_len) | |
949 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; | |
950 | else | |
951 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; | |
952 | ||
953 | /* Clear periph ctrl if client set it */ | |
954 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
955 | ||
956 | num_periods = buf_len / period_len; | |
957 | ||
958 | desc = stm32_dma_alloc_desc(num_periods); | |
959 | if (!desc) | |
960 | return NULL; | |
961 | ||
962 | for (i = 0; i < num_periods; i++) { | |
963 | desc->sg_req[i].len = period_len; | |
964 | ||
965 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
966 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
967 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
968 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
969 | desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; | |
970 | desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; | |
971 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
972 | buf_addr += period_len; | |
973 | } | |
974 | ||
975 | desc->num_sgs = num_periods; | |
976 | desc->cyclic = true; | |
977 | ||
978 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
979 | } | |
980 | ||
981 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( | |
982 | struct dma_chan *c, dma_addr_t dest, | |
983 | dma_addr_t src, size_t len, unsigned long flags) | |
984 | { | |
985 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
a2b6103b | 986 | enum dma_slave_buswidth max_width; |
d8b46839 CM |
987 | struct stm32_dma_desc *desc; |
988 | size_t xfer_count, offset; | |
a2b6103b | 989 | u32 num_sgs, best_burst, dma_burst, threshold; |
d8b46839 CM |
990 | int i; |
991 | ||
80a76952 | 992 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
d8b46839 CM |
993 | desc = stm32_dma_alloc_desc(num_sgs); |
994 | if (!desc) | |
995 | return NULL; | |
996 | ||
a2b6103b PYM |
997 | threshold = chan->threshold; |
998 | ||
d8b46839 CM |
999 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { |
1000 | xfer_count = min_t(size_t, len - offset, | |
80a76952 | 1001 | STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
d8b46839 | 1002 | |
a2b6103b PYM |
1003 | /* Compute best burst size */ |
1004 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
1005 | best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, | |
1006 | threshold, max_width); | |
1007 | dma_burst = stm32_dma_get_burst(chan, best_burst); | |
d8b46839 CM |
1008 | |
1009 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
1010 | desc->sg_req[i].chan_reg.dma_scr = | |
1011 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | | |
a2b6103b PYM |
1012 | STM32_DMA_SCR_PBURST(dma_burst) | |
1013 | STM32_DMA_SCR_MBURST(dma_burst) | | |
d8b46839 CM |
1014 | STM32_DMA_SCR_MINC | |
1015 | STM32_DMA_SCR_PINC | | |
1016 | STM32_DMA_SCR_TCIE | | |
1017 | STM32_DMA_SCR_TEIE; | |
a2b6103b PYM |
1018 | desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
1019 | desc->sg_req[i].chan_reg.dma_sfcr |= | |
1020 | STM32_DMA_SFCR_FTH(threshold); | |
d8b46839 CM |
1021 | desc->sg_req[i].chan_reg.dma_spar = src + offset; |
1022 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; | |
1023 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; | |
a2b6103b | 1024 | desc->sg_req[i].len = xfer_count; |
d8b46839 CM |
1025 | } |
1026 | ||
1027 | desc->num_sgs = num_sgs; | |
1028 | desc->cyclic = false; | |
1029 | ||
1030 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
1031 | } | |
1032 | ||
2b12c558 CM |
1033 | static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) |
1034 | { | |
1035 | u32 dma_scr, width, ndtr; | |
1036 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1037 | ||
1038 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
1039 | width = STM32_DMA_SCR_PSIZE_GET(dma_scr); | |
1040 | ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
1041 | ||
1042 | return ndtr << width; | |
1043 | } | |
1044 | ||
2a4885ab AP |
1045 | /** |
1046 | * stm32_dma_is_current_sg - check that expected sg_req is currently transferred | |
1047 | * @chan: dma channel | |
1048 | * | |
1049 | * This function called when IRQ are disable, checks that the hardware has not | |
1050 | * switched on the next transfer in double buffer mode. The test is done by | |
1051 | * comparing the next_sg memory address with the hardware related register | |
1052 | * (based on CT bit value). | |
1053 | * | |
1054 | * Returns true if expected current transfer is still running or double | |
1055 | * buffer mode is not activated. | |
1056 | */ | |
1057 | static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) | |
1058 | { | |
1059 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1060 | struct stm32_dma_sg_req *sg_req; | |
1061 | u32 dma_scr, dma_smar, id; | |
1062 | ||
1063 | id = chan->id; | |
1064 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
1065 | ||
1066 | if (!(dma_scr & STM32_DMA_SCR_DBM)) | |
1067 | return true; | |
1068 | ||
1069 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
1070 | ||
1071 | if (dma_scr & STM32_DMA_SCR_CT) { | |
1072 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); | |
1073 | return (dma_smar == sg_req->chan_reg.dma_sm0ar); | |
1074 | } | |
1075 | ||
1076 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); | |
1077 | ||
1078 | return (dma_smar == sg_req->chan_reg.dma_sm1ar); | |
1079 | } | |
1080 | ||
d8b46839 CM |
1081 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, |
1082 | struct stm32_dma_desc *desc, | |
1083 | u32 next_sg) | |
1084 | { | |
a2b6103b | 1085 | u32 modulo, burst_size; |
2a4885ab AP |
1086 | u32 residue; |
1087 | u32 n_sg = next_sg; | |
1088 | struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; | |
d8b46839 CM |
1089 | int i; |
1090 | ||
2b12c558 | 1091 | /* |
2a4885ab AP |
1092 | * Calculate the residue means compute the descriptors |
1093 | * information: | |
1094 | * - the sg_req currently transferred | |
1095 | * - the Hardware remaining position in this sg (NDTR bits field). | |
1096 | * | |
1097 | * A race condition may occur if DMA is running in cyclic or double | |
1098 | * buffer mode, since the DMA register are automatically reloaded at end | |
1099 | * of period transfer. The hardware may have switched to the next | |
1100 | * transfer (CT bit updated) just before the position (SxNDTR reg) is | |
1101 | * read. | |
1102 | * In this case the SxNDTR reg could (or not) correspond to the new | |
1103 | * transfer position, and not the expected one. | |
1104 | * The strategy implemented in the stm32 driver is to: | |
1105 | * - read the SxNDTR register | |
1106 | * - crosscheck that hardware is still in current transfer. | |
1107 | * In case of switch, we can assume that the DMA is at the beginning of | |
1108 | * the next transfer. So we approximate the residue in consequence, by | |
1109 | * pointing on the beginning of next transfer. | |
1110 | * | |
1111 | * This race condition doesn't apply for none cyclic mode, as double | |
1112 | * buffer is not used. In such situation registers are updated by the | |
1113 | * software. | |
2b12c558 | 1114 | */ |
2a4885ab AP |
1115 | |
1116 | residue = stm32_dma_get_remaining_bytes(chan); | |
1117 | ||
1118 | if (!stm32_dma_is_current_sg(chan)) { | |
1119 | n_sg++; | |
1120 | if (n_sg == chan->desc->num_sgs) | |
1121 | n_sg = 0; | |
1122 | residue = sg_req->len; | |
a2b6103b | 1123 | } |
d8b46839 | 1124 | |
2b12c558 | 1125 | /* |
2a4885ab AP |
1126 | * In cyclic mode, for the last period, residue = remaining bytes |
1127 | * from NDTR, | |
1128 | * else for all other periods in cyclic mode, and in sg mode, | |
1129 | * residue = remaining bytes from NDTR + remaining | |
1130 | * periods/sg to be transferred | |
2b12c558 | 1131 | */ |
2a4885ab AP |
1132 | if (!chan->desc->cyclic || n_sg != 0) |
1133 | for (i = n_sg; i < desc->num_sgs; i++) | |
1134 | residue += desc->sg_req[i].len; | |
d8b46839 | 1135 | |
a2b6103b PYM |
1136 | if (!chan->mem_burst) |
1137 | return residue; | |
1138 | ||
1139 | burst_size = chan->mem_burst * chan->mem_width; | |
1140 | modulo = residue % burst_size; | |
1141 | if (modulo) | |
1142 | residue = residue - modulo + burst_size; | |
1143 | ||
d8b46839 CM |
1144 | return residue; |
1145 | } | |
1146 | ||
1147 | static enum dma_status stm32_dma_tx_status(struct dma_chan *c, | |
1148 | dma_cookie_t cookie, | |
1149 | struct dma_tx_state *state) | |
1150 | { | |
1151 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1152 | struct virt_dma_desc *vdesc; | |
1153 | enum dma_status status; | |
1154 | unsigned long flags; | |
57b5a321 | 1155 | u32 residue = 0; |
d8b46839 CM |
1156 | |
1157 | status = dma_cookie_status(c, cookie, state); | |
249d5531 | 1158 | if (status == DMA_COMPLETE || !state) |
d8b46839 CM |
1159 | return status; |
1160 | ||
1161 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1162 | vdesc = vchan_find_desc(&chan->vchan, cookie); | |
57b5a321 | 1163 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) |
d8b46839 CM |
1164 | residue = stm32_dma_desc_residue(chan, chan->desc, |
1165 | chan->next_sg); | |
57b5a321 | 1166 | else if (vdesc) |
d8b46839 CM |
1167 | residue = stm32_dma_desc_residue(chan, |
1168 | to_stm32_dma_desc(vdesc), 0); | |
d8b46839 CM |
1169 | dma_set_residue(state, residue); |
1170 | ||
1171 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1172 | ||
1173 | return status; | |
1174 | } | |
1175 | ||
1176 | static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | |
1177 | { | |
1178 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1179 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1180 | int ret; | |
1181 | ||
1182 | chan->config_init = false; | |
48bc73ba PYM |
1183 | |
1184 | ret = pm_runtime_get_sync(dmadev->ddev.dev); | |
1185 | if (ret < 0) | |
d8b46839 | 1186 | return ret; |
d8b46839 CM |
1187 | |
1188 | ret = stm32_dma_disable_chan(chan); | |
1189 | if (ret < 0) | |
48bc73ba | 1190 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1191 | |
1192 | return ret; | |
1193 | } | |
1194 | ||
1195 | static void stm32_dma_free_chan_resources(struct dma_chan *c) | |
1196 | { | |
1197 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1198 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1199 | unsigned long flags; | |
1200 | ||
1201 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | |
1202 | ||
1203 | if (chan->busy) { | |
1204 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1205 | stm32_dma_stop(chan); | |
1206 | chan->desc = NULL; | |
1207 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1208 | } | |
1209 | ||
48bc73ba | 1210 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1211 | |
1212 | vchan_free_chan_resources(to_virt_chan(c)); | |
1213 | } | |
1214 | ||
1215 | static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) | |
1216 | { | |
1217 | kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); | |
1218 | } | |
1219 | ||
e97adb49 | 1220 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, |
249d5531 | 1221 | struct stm32_dma_cfg *cfg) |
d8b46839 CM |
1222 | { |
1223 | stm32_dma_clear_reg(&chan->chan_reg); | |
1224 | ||
1225 | chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; | |
1226 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); | |
1227 | ||
1228 | /* Enable Interrupts */ | |
1229 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; | |
1230 | ||
951f44cb | 1231 | chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); |
d8b46839 CM |
1232 | } |
1233 | ||
1234 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | |
1235 | struct of_dma *ofdma) | |
1236 | { | |
1237 | struct stm32_dma_device *dmadev = ofdma->of_dma_data; | |
5df4eb45 | 1238 | struct device *dev = dmadev->ddev.dev; |
d8b46839 CM |
1239 | struct stm32_dma_cfg cfg; |
1240 | struct stm32_dma_chan *chan; | |
1241 | struct dma_chan *c; | |
1242 | ||
5df4eb45 CM |
1243 | if (dma_spec->args_count < 4) { |
1244 | dev_err(dev, "Bad number of cells\n"); | |
d8b46839 | 1245 | return NULL; |
5df4eb45 | 1246 | } |
d8b46839 CM |
1247 | |
1248 | cfg.channel_id = dma_spec->args[0]; | |
1249 | cfg.request_line = dma_spec->args[1]; | |
1250 | cfg.stream_config = dma_spec->args[2]; | |
951f44cb | 1251 | cfg.features = dma_spec->args[3]; |
d8b46839 | 1252 | |
249d5531 PYM |
1253 | if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || |
1254 | cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { | |
5df4eb45 | 1255 | dev_err(dev, "Bad channel and/or request id\n"); |
d8b46839 | 1256 | return NULL; |
5df4eb45 | 1257 | } |
d8b46839 | 1258 | |
d8b46839 CM |
1259 | chan = &dmadev->chan[cfg.channel_id]; |
1260 | ||
1261 | c = dma_get_slave_channel(&chan->vchan.chan); | |
5df4eb45 | 1262 | if (!c) { |
041cf7e0 | 1263 | dev_err(dev, "No more channels available\n"); |
5df4eb45 CM |
1264 | return NULL; |
1265 | } | |
1266 | ||
1267 | stm32_dma_set_config(chan, &cfg); | |
d8b46839 CM |
1268 | |
1269 | return c; | |
1270 | } | |
1271 | ||
1272 | static const struct of_device_id stm32_dma_of_match[] = { | |
1273 | { .compatible = "st,stm32-dma", }, | |
1274 | { /* sentinel */ }, | |
1275 | }; | |
1276 | MODULE_DEVICE_TABLE(of, stm32_dma_of_match); | |
1277 | ||
1278 | static int stm32_dma_probe(struct platform_device *pdev) | |
1279 | { | |
1280 | struct stm32_dma_chan *chan; | |
1281 | struct stm32_dma_device *dmadev; | |
1282 | struct dma_device *dd; | |
1283 | const struct of_device_id *match; | |
1284 | struct resource *res; | |
1285 | int i, ret; | |
1286 | ||
1287 | match = of_match_device(stm32_dma_of_match, &pdev->dev); | |
1288 | if (!match) { | |
1289 | dev_err(&pdev->dev, "Error: No device match found\n"); | |
1290 | return -ENODEV; | |
1291 | } | |
1292 | ||
1293 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | |
1294 | if (!dmadev) | |
1295 | return -ENOMEM; | |
1296 | ||
1297 | dd = &dmadev->ddev; | |
1298 | ||
1299 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1300 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | |
1301 | if (IS_ERR(dmadev->base)) | |
1302 | return PTR_ERR(dmadev->base); | |
1303 | ||
1304 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | |
1305 | if (IS_ERR(dmadev->clk)) { | |
1306 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | |
1307 | return PTR_ERR(dmadev->clk); | |
1308 | } | |
1309 | ||
48bc73ba PYM |
1310 | ret = clk_prepare_enable(dmadev->clk); |
1311 | if (ret < 0) { | |
1312 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | |
1313 | return ret; | |
1314 | } | |
1315 | ||
d8b46839 CM |
1316 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
1317 | "st,mem2mem"); | |
1318 | ||
1319 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | |
1320 | if (!IS_ERR(dmadev->rst)) { | |
1321 | reset_control_assert(dmadev->rst); | |
1322 | udelay(2); | |
1323 | reset_control_deassert(dmadev->rst); | |
1324 | } | |
1325 | ||
1326 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
1327 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | |
1328 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
1329 | dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; | |
1330 | dd->device_free_chan_resources = stm32_dma_free_chan_resources; | |
1331 | dd->device_tx_status = stm32_dma_tx_status; | |
1332 | dd->device_issue_pending = stm32_dma_issue_pending; | |
1333 | dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; | |
1334 | dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; | |
1335 | dd->device_config = stm32_dma_slave_config; | |
1336 | dd->device_terminate_all = stm32_dma_terminate_all; | |
dc808675 | 1337 | dd->device_synchronize = stm32_dma_synchronize; |
d8b46839 CM |
1338 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1339 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1340 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1341 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1342 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1343 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1344 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1345 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
276b0046 | 1346 | dd->max_burst = STM32_DMA_MAX_BURST; |
d8b46839 CM |
1347 | dd->dev = &pdev->dev; |
1348 | INIT_LIST_HEAD(&dd->channels); | |
1349 | ||
1350 | if (dmadev->mem2mem) { | |
1351 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
1352 | dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; | |
1353 | dd->directions |= BIT(DMA_MEM_TO_MEM); | |
1354 | } | |
1355 | ||
1356 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1357 | chan = &dmadev->chan[i]; | |
1358 | chan->id = i; | |
1359 | chan->vchan.desc_free = stm32_dma_desc_free; | |
1360 | vchan_init(&chan->vchan, dd); | |
1361 | } | |
1362 | ||
1363 | ret = dma_async_device_register(dd); | |
1364 | if (ret) | |
48bc73ba | 1365 | goto clk_free; |
d8b46839 CM |
1366 | |
1367 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1368 | chan = &dmadev->chan[i]; | |
f4fd2ec0 | 1369 | chan->irq = platform_get_irq(pdev, i); |
c6504be5 VK |
1370 | ret = platform_get_irq(pdev, i); |
1371 | if (ret < 0) { | |
f4fd2ec0 FD |
1372 | if (ret != -EPROBE_DEFER) |
1373 | dev_err(&pdev->dev, | |
1374 | "No irq resource for chan %d\n", i); | |
d8b46839 CM |
1375 | goto err_unregister; |
1376 | } | |
c6504be5 VK |
1377 | chan->irq = ret; |
1378 | ||
d8b46839 CM |
1379 | ret = devm_request_irq(&pdev->dev, chan->irq, |
1380 | stm32_dma_chan_irq, 0, | |
1381 | dev_name(chan2dev(chan)), chan); | |
1382 | if (ret) { | |
1383 | dev_err(&pdev->dev, | |
1384 | "request_irq failed with err %d channel %d\n", | |
1385 | ret, i); | |
1386 | goto err_unregister; | |
1387 | } | |
1388 | } | |
1389 | ||
1390 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1391 | stm32_dma_of_xlate, dmadev); | |
1392 | if (ret < 0) { | |
1393 | dev_err(&pdev->dev, | |
1394 | "STM32 DMA DMA OF registration failed %d\n", ret); | |
1395 | goto err_unregister; | |
1396 | } | |
1397 | ||
1398 | platform_set_drvdata(pdev, dmadev); | |
1399 | ||
48bc73ba PYM |
1400 | pm_runtime_set_active(&pdev->dev); |
1401 | pm_runtime_enable(&pdev->dev); | |
1402 | pm_runtime_get_noresume(&pdev->dev); | |
1403 | pm_runtime_put(&pdev->dev); | |
1404 | ||
d8b46839 CM |
1405 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
1406 | ||
1407 | return 0; | |
1408 | ||
1409 | err_unregister: | |
1410 | dma_async_device_unregister(dd); | |
48bc73ba PYM |
1411 | clk_free: |
1412 | clk_disable_unprepare(dmadev->clk); | |
d8b46839 CM |
1413 | |
1414 | return ret; | |
1415 | } | |
1416 | ||
48bc73ba PYM |
1417 | #ifdef CONFIG_PM |
1418 | static int stm32_dma_runtime_suspend(struct device *dev) | |
1419 | { | |
1420 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1421 | ||
1422 | clk_disable_unprepare(dmadev->clk); | |
1423 | ||
1424 | return 0; | |
1425 | } | |
1426 | ||
1427 | static int stm32_dma_runtime_resume(struct device *dev) | |
1428 | { | |
1429 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1430 | int ret; | |
1431 | ||
1432 | ret = clk_prepare_enable(dmadev->clk); | |
1433 | if (ret) { | |
1434 | dev_err(dev, "failed to prepare_enable clock\n"); | |
1435 | return ret; | |
1436 | } | |
1437 | ||
1438 | return 0; | |
1439 | } | |
1440 | #endif | |
1441 | ||
1442 | static const struct dev_pm_ops stm32_dma_pm_ops = { | |
1443 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, | |
1444 | stm32_dma_runtime_resume, NULL) | |
1445 | }; | |
1446 | ||
d8b46839 CM |
1447 | static struct platform_driver stm32_dma_driver = { |
1448 | .driver = { | |
1449 | .name = "stm32-dma", | |
1450 | .of_match_table = stm32_dma_of_match, | |
48bc73ba | 1451 | .pm = &stm32_dma_pm_ops, |
d8b46839 CM |
1452 | }, |
1453 | }; | |
1454 | ||
1455 | static int __init stm32_dma_init(void) | |
1456 | { | |
1457 | return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); | |
1458 | } | |
1459 | subsys_initcall(stm32_dma_init); |