]>
Commit | Line | Data |
---|---|---|
af873fce | 1 | // SPDX-License-Identifier: GPL-2.0-only |
d8b46839 CM |
2 | /* |
3 | * Driver for STM32 DMA controller | |
4 | * | |
5 | * Inspired by dma-jz4740.c and tegra20-apb-dma.c | |
6 | * | |
7 | * Copyright (C) M'boumba Cedric Madianga 2015 | |
8 | * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com> | |
a2b6103b | 9 | * Pierre-Yves Mordret <pierre-yves.mordret@st.com> |
d8b46839 CM |
10 | */ |
11 | ||
12 | #include <linux/clk.h> | |
13 | #include <linux/delay.h> | |
14 | #include <linux/dmaengine.h> | |
15 | #include <linux/dma-mapping.h> | |
16 | #include <linux/err.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/jiffies.h> | |
19 | #include <linux/list.h> | |
20 | #include <linux/module.h> | |
21 | #include <linux/of.h> | |
22 | #include <linux/of_device.h> | |
23 | #include <linux/of_dma.h> | |
24 | #include <linux/platform_device.h> | |
48bc73ba | 25 | #include <linux/pm_runtime.h> |
d8b46839 CM |
26 | #include <linux/reset.h> |
27 | #include <linux/sched.h> | |
28 | #include <linux/slab.h> | |
29 | ||
30 | #include "virt-dma.h" | |
31 | ||
32 | #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */ | |
33 | #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */ | |
34 | #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */ | |
35 | #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */ | |
36 | #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */ | |
c2d86b1c | 37 | #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */ |
d8b46839 CM |
38 | #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */ |
39 | #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */ | |
40 | #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */ | |
9df3bd55 PYM |
41 | #define STM32_DMA_MASKI (STM32_DMA_TCI \ |
42 | | STM32_DMA_TEI \ | |
43 | | STM32_DMA_DMEI \ | |
44 | | STM32_DMA_FEI) | |
d8b46839 CM |
45 | |
46 | /* DMA Stream x Configuration Register */ | |
47 | #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */ | |
48 | #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25) | |
49 | #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23) | |
50 | #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23) | |
51 | #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21) | |
52 | #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21) | |
53 | #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16) | |
54 | #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16) | |
55 | #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13) | |
56 | #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13) | |
57 | #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11) | |
58 | #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11) | |
59 | #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11) | |
60 | #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6) | |
61 | #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6) | |
62 | #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */ | |
63 | #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */ | |
64 | #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */ | |
65 | #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */ | |
66 | #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */ | |
67 | #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */ | |
68 | #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */ | |
249d5531 PYM |
69 | #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable |
70 | */ | |
d8b46839 CM |
71 | #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */ |
72 | #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */ | |
73 | #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */ | |
74 | #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \ | |
75 | | STM32_DMA_SCR_MINC \ | |
76 | | STM32_DMA_SCR_PINCOS \ | |
77 | | STM32_DMA_SCR_PL_MASK) | |
78 | #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \ | |
79 | | STM32_DMA_SCR_TEIE \ | |
80 | | STM32_DMA_SCR_DMEIE) | |
81 | ||
82 | /* DMA Stream x number of data register */ | |
83 | #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x)) | |
84 | ||
85 | /* DMA stream peripheral address register */ | |
86 | #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x)) | |
87 | ||
88 | /* DMA stream x memory 0 address register */ | |
89 | #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x)) | |
90 | ||
91 | /* DMA stream x memory 1 address register */ | |
92 | #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x)) | |
93 | ||
94 | /* DMA stream x FIFO control register */ | |
95 | #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x)) | |
96 | #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0) | |
97 | #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK) | |
98 | #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */ | |
99 | #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */ | |
100 | #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \ | |
101 | | STM32_DMA_SFCR_DMDIS) | |
102 | ||
103 | /* DMA direction */ | |
104 | #define STM32_DMA_DEV_TO_MEM 0x00 | |
105 | #define STM32_DMA_MEM_TO_DEV 0x01 | |
106 | #define STM32_DMA_MEM_TO_MEM 0x02 | |
107 | ||
108 | /* DMA priority level */ | |
109 | #define STM32_DMA_PRIORITY_LOW 0x00 | |
110 | #define STM32_DMA_PRIORITY_MEDIUM 0x01 | |
111 | #define STM32_DMA_PRIORITY_HIGH 0x02 | |
112 | #define STM32_DMA_PRIORITY_VERY_HIGH 0x03 | |
113 | ||
114 | /* DMA FIFO threshold selection */ | |
115 | #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00 | |
116 | #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01 | |
117 | #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02 | |
118 | #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03 | |
119 | ||
120 | #define STM32_DMA_MAX_DATA_ITEMS 0xffff | |
80a76952 PYM |
121 | /* |
122 | * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter | |
123 | * gather at boundary. Thus it's safer to round down this value on FIFO | |
124 | * size (16 Bytes) | |
125 | */ | |
126 | #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \ | |
127 | ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16) | |
d8b46839 CM |
128 | #define STM32_DMA_MAX_CHANNELS 0x08 |
129 | #define STM32_DMA_MAX_REQUEST_ID 0x08 | |
130 | #define STM32_DMA_MAX_DATA_PARAM 0x03 | |
a2b6103b PYM |
131 | #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */ |
132 | #define STM32_DMA_MIN_BURST 4 | |
276b0046 | 133 | #define STM32_DMA_MAX_BURST 16 |
d8b46839 | 134 | |
951f44cb PYM |
135 | /* DMA Features */ |
136 | #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0) | |
137 | #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK) | |
138 | ||
d8b46839 CM |
139 | enum stm32_dma_width { |
140 | STM32_DMA_BYTE, | |
141 | STM32_DMA_HALF_WORD, | |
142 | STM32_DMA_WORD, | |
143 | }; | |
144 | ||
145 | enum stm32_dma_burst_size { | |
146 | STM32_DMA_BURST_SINGLE, | |
147 | STM32_DMA_BURST_INCR4, | |
148 | STM32_DMA_BURST_INCR8, | |
149 | STM32_DMA_BURST_INCR16, | |
150 | }; | |
151 | ||
951f44cb PYM |
152 | /** |
153 | * struct stm32_dma_cfg - STM32 DMA custom configuration | |
154 | * @channel_id: channel ID | |
155 | * @request_line: DMA request | |
156 | * @stream_config: 32bit mask specifying the DMA channel configuration | |
157 | * @features: 32bit mask specifying the DMA Feature list | |
158 | */ | |
d8b46839 CM |
159 | struct stm32_dma_cfg { |
160 | u32 channel_id; | |
161 | u32 request_line; | |
162 | u32 stream_config; | |
951f44cb | 163 | u32 features; |
d8b46839 CM |
164 | }; |
165 | ||
166 | struct stm32_dma_chan_reg { | |
167 | u32 dma_lisr; | |
168 | u32 dma_hisr; | |
169 | u32 dma_lifcr; | |
170 | u32 dma_hifcr; | |
171 | u32 dma_scr; | |
172 | u32 dma_sndtr; | |
173 | u32 dma_spar; | |
174 | u32 dma_sm0ar; | |
175 | u32 dma_sm1ar; | |
176 | u32 dma_sfcr; | |
177 | }; | |
178 | ||
179 | struct stm32_dma_sg_req { | |
180 | u32 len; | |
181 | struct stm32_dma_chan_reg chan_reg; | |
182 | }; | |
183 | ||
184 | struct stm32_dma_desc { | |
185 | struct virt_dma_desc vdesc; | |
186 | bool cyclic; | |
187 | u32 num_sgs; | |
188 | struct stm32_dma_sg_req sg_req[]; | |
189 | }; | |
190 | ||
191 | struct stm32_dma_chan { | |
192 | struct virt_dma_chan vchan; | |
193 | bool config_init; | |
194 | bool busy; | |
195 | u32 id; | |
196 | u32 irq; | |
197 | struct stm32_dma_desc *desc; | |
198 | u32 next_sg; | |
199 | struct dma_slave_config dma_sconfig; | |
200 | struct stm32_dma_chan_reg chan_reg; | |
951f44cb | 201 | u32 threshold; |
a2b6103b PYM |
202 | u32 mem_burst; |
203 | u32 mem_width; | |
d8b46839 CM |
204 | }; |
205 | ||
206 | struct stm32_dma_device { | |
207 | struct dma_device ddev; | |
208 | void __iomem *base; | |
209 | struct clk *clk; | |
210 | struct reset_control *rst; | |
211 | bool mem2mem; | |
212 | struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS]; | |
213 | }; | |
214 | ||
215 | static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan) | |
216 | { | |
217 | return container_of(chan->vchan.chan.device, struct stm32_dma_device, | |
218 | ddev); | |
219 | } | |
220 | ||
221 | static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c) | |
222 | { | |
223 | return container_of(c, struct stm32_dma_chan, vchan.chan); | |
224 | } | |
225 | ||
226 | static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc) | |
227 | { | |
228 | return container_of(vdesc, struct stm32_dma_desc, vdesc); | |
229 | } | |
230 | ||
231 | static struct device *chan2dev(struct stm32_dma_chan *chan) | |
232 | { | |
233 | return &chan->vchan.chan.dev->device; | |
234 | } | |
235 | ||
236 | static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg) | |
237 | { | |
238 | return readl_relaxed(dmadev->base + reg); | |
239 | } | |
240 | ||
241 | static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val) | |
242 | { | |
243 | writel_relaxed(val, dmadev->base + reg); | |
244 | } | |
245 | ||
d8b46839 CM |
246 | static int stm32_dma_get_width(struct stm32_dma_chan *chan, |
247 | enum dma_slave_buswidth width) | |
248 | { | |
249 | switch (width) { | |
250 | case DMA_SLAVE_BUSWIDTH_1_BYTE: | |
251 | return STM32_DMA_BYTE; | |
252 | case DMA_SLAVE_BUSWIDTH_2_BYTES: | |
253 | return STM32_DMA_HALF_WORD; | |
254 | case DMA_SLAVE_BUSWIDTH_4_BYTES: | |
255 | return STM32_DMA_WORD; | |
256 | default: | |
257 | dev_err(chan2dev(chan), "Dma bus width not supported\n"); | |
258 | return -EINVAL; | |
259 | } | |
260 | } | |
261 | ||
a2b6103b PYM |
262 | static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len, |
263 | u32 threshold) | |
264 | { | |
265 | enum dma_slave_buswidth max_width; | |
266 | ||
267 | if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL) | |
268 | max_width = DMA_SLAVE_BUSWIDTH_4_BYTES; | |
269 | else | |
270 | max_width = DMA_SLAVE_BUSWIDTH_2_BYTES; | |
271 | ||
272 | while ((buf_len < max_width || buf_len % max_width) && | |
273 | max_width > DMA_SLAVE_BUSWIDTH_1_BYTE) | |
274 | max_width = max_width >> 1; | |
275 | ||
276 | return max_width; | |
277 | } | |
278 | ||
279 | static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold, | |
280 | enum dma_slave_buswidth width) | |
281 | { | |
282 | u32 remaining; | |
283 | ||
284 | if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) { | |
285 | if (burst != 0) { | |
286 | /* | |
287 | * If number of beats fit in several whole bursts | |
288 | * this configuration is allowed. | |
289 | */ | |
290 | remaining = ((STM32_DMA_FIFO_SIZE / width) * | |
291 | (threshold + 1) / 4) % burst; | |
292 | ||
293 | if (remaining == 0) | |
294 | return true; | |
295 | } else { | |
296 | return true; | |
297 | } | |
298 | } | |
299 | ||
300 | return false; | |
301 | } | |
302 | ||
303 | static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold) | |
304 | { | |
cc832dc8 PYM |
305 | /* |
306 | * Buffer or period length has to be aligned on FIFO depth. | |
307 | * Otherwise bytes may be stuck within FIFO at buffer or period | |
308 | * length. | |
309 | */ | |
310 | return ((buf_len % ((threshold + 1) * 4)) == 0); | |
a2b6103b PYM |
311 | } |
312 | ||
313 | static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold, | |
314 | enum dma_slave_buswidth width) | |
315 | { | |
316 | u32 best_burst = max_burst; | |
317 | ||
318 | if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold)) | |
319 | return 0; | |
320 | ||
321 | while ((buf_len < best_burst * width && best_burst > 1) || | |
322 | !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold, | |
323 | width)) { | |
324 | if (best_burst > STM32_DMA_MIN_BURST) | |
325 | best_burst = best_burst >> 1; | |
326 | else | |
327 | best_burst = 0; | |
328 | } | |
329 | ||
330 | return best_burst; | |
331 | } | |
332 | ||
d8b46839 CM |
333 | static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst) |
334 | { | |
335 | switch (maxburst) { | |
336 | case 0: | |
337 | case 1: | |
338 | return STM32_DMA_BURST_SINGLE; | |
339 | case 4: | |
340 | return STM32_DMA_BURST_INCR4; | |
341 | case 8: | |
342 | return STM32_DMA_BURST_INCR8; | |
343 | case 16: | |
344 | return STM32_DMA_BURST_INCR16; | |
345 | default: | |
346 | dev_err(chan2dev(chan), "Dma burst size not supported\n"); | |
347 | return -EINVAL; | |
348 | } | |
349 | } | |
350 | ||
351 | static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan, | |
a2b6103b | 352 | u32 src_burst, u32 dst_burst) |
d8b46839 CM |
353 | { |
354 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK; | |
355 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE; | |
356 | ||
a2b6103b | 357 | if (!src_burst && !dst_burst) { |
d8b46839 CM |
358 | /* Using direct mode */ |
359 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE; | |
360 | } else { | |
361 | /* Using FIFO mode */ | |
362 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; | |
363 | } | |
364 | } | |
365 | ||
366 | static int stm32_dma_slave_config(struct dma_chan *c, | |
367 | struct dma_slave_config *config) | |
368 | { | |
369 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
370 | ||
371 | memcpy(&chan->dma_sconfig, config, sizeof(*config)); | |
372 | ||
373 | chan->config_init = true; | |
374 | ||
375 | return 0; | |
376 | } | |
377 | ||
378 | static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan) | |
379 | { | |
380 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
381 | u32 flags, dma_isr; | |
382 | ||
383 | /* | |
384 | * Read "flags" from DMA_xISR register corresponding to the selected | |
385 | * DMA channel at the correct bit offset inside that register. | |
386 | * | |
387 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
388 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
389 | */ | |
390 | ||
391 | if (chan->id & 4) | |
392 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR); | |
393 | else | |
394 | dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR); | |
395 | ||
396 | flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); | |
397 | ||
9df3bd55 | 398 | return flags & STM32_DMA_MASKI; |
d8b46839 CM |
399 | } |
400 | ||
401 | static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags) | |
402 | { | |
403 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
404 | u32 dma_ifcr; | |
405 | ||
406 | /* | |
407 | * Write "flags" to the DMA_xIFCR register corresponding to the selected | |
408 | * DMA channel at the correct bit offset inside that register. | |
409 | * | |
410 | * If (ch % 4) is 2 or 3, left shift the mask by 16 bits. | |
411 | * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits. | |
412 | */ | |
9df3bd55 | 413 | flags &= STM32_DMA_MASKI; |
d8b46839 CM |
414 | dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6)); |
415 | ||
416 | if (chan->id & 4) | |
417 | stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr); | |
418 | else | |
419 | stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr); | |
420 | } | |
421 | ||
422 | static int stm32_dma_disable_chan(struct stm32_dma_chan *chan) | |
423 | { | |
424 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
425 | unsigned long timeout = jiffies + msecs_to_jiffies(5000); | |
426 | u32 dma_scr, id; | |
427 | ||
428 | id = chan->id; | |
429 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
430 | ||
431 | if (dma_scr & STM32_DMA_SCR_EN) { | |
432 | dma_scr &= ~STM32_DMA_SCR_EN; | |
433 | stm32_dma_write(dmadev, STM32_DMA_SCR(id), dma_scr); | |
434 | ||
435 | do { | |
436 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
437 | dma_scr &= STM32_DMA_SCR_EN; | |
438 | if (!dma_scr) | |
439 | break; | |
440 | ||
441 | if (time_after_eq(jiffies, timeout)) { | |
442 | dev_err(chan2dev(chan), "%s: timeout!\n", | |
443 | __func__); | |
444 | return -EBUSY; | |
445 | } | |
446 | cond_resched(); | |
447 | } while (1); | |
448 | } | |
449 | ||
450 | return 0; | |
451 | } | |
452 | ||
453 | static void stm32_dma_stop(struct stm32_dma_chan *chan) | |
454 | { | |
455 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
456 | u32 dma_scr, dma_sfcr, status; | |
457 | int ret; | |
458 | ||
459 | /* Disable interrupts */ | |
460 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
461 | dma_scr &= ~STM32_DMA_SCR_IRQ_MASK; | |
462 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr); | |
463 | dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
464 | dma_sfcr &= ~STM32_DMA_SFCR_FEIE; | |
465 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr); | |
466 | ||
467 | /* Disable DMA */ | |
468 | ret = stm32_dma_disable_chan(chan); | |
469 | if (ret < 0) | |
470 | return; | |
471 | ||
472 | /* Clear interrupt status if it is there */ | |
473 | status = stm32_dma_irq_status(chan); | |
474 | if (status) { | |
475 | dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n", | |
476 | __func__, status); | |
477 | stm32_dma_irq_clear(chan, status); | |
478 | } | |
479 | ||
480 | chan->busy = false; | |
481 | } | |
482 | ||
483 | static int stm32_dma_terminate_all(struct dma_chan *c) | |
484 | { | |
485 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
486 | unsigned long flags; | |
487 | LIST_HEAD(head); | |
488 | ||
489 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
490 | ||
491 | if (chan->busy) { | |
492 | stm32_dma_stop(chan); | |
493 | chan->desc = NULL; | |
494 | } | |
495 | ||
496 | vchan_get_all_descriptors(&chan->vchan, &head); | |
497 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
498 | vchan_dma_desc_free_list(&chan->vchan, &head); | |
499 | ||
500 | return 0; | |
501 | } | |
502 | ||
dc808675 CM |
503 | static void stm32_dma_synchronize(struct dma_chan *c) |
504 | { | |
505 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
506 | ||
507 | vchan_synchronize(&chan->vchan); | |
508 | } | |
509 | ||
d8b46839 CM |
510 | static void stm32_dma_dump_reg(struct stm32_dma_chan *chan) |
511 | { | |
512 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
513 | u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
514 | u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
515 | u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id)); | |
516 | u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id)); | |
517 | u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id)); | |
518 | u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); | |
519 | ||
520 | dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr); | |
521 | dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr); | |
522 | dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar); | |
523 | dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar); | |
524 | dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar); | |
525 | dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr); | |
526 | } | |
527 | ||
e57cb3b3 PYM |
528 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan); |
529 | ||
8d1b76f0 | 530 | static void stm32_dma_start_transfer(struct stm32_dma_chan *chan) |
d8b46839 CM |
531 | { |
532 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
533 | struct virt_dma_desc *vdesc; | |
534 | struct stm32_dma_sg_req *sg_req; | |
535 | struct stm32_dma_chan_reg *reg; | |
536 | u32 status; | |
537 | int ret; | |
538 | ||
539 | ret = stm32_dma_disable_chan(chan); | |
540 | if (ret < 0) | |
8d1b76f0 | 541 | return; |
d8b46839 CM |
542 | |
543 | if (!chan->desc) { | |
544 | vdesc = vchan_next_desc(&chan->vchan); | |
545 | if (!vdesc) | |
8d1b76f0 | 546 | return; |
d8b46839 CM |
547 | |
548 | chan->desc = to_stm32_dma_desc(vdesc); | |
549 | chan->next_sg = 0; | |
550 | } | |
551 | ||
552 | if (chan->next_sg == chan->desc->num_sgs) | |
553 | chan->next_sg = 0; | |
554 | ||
555 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
556 | reg = &sg_req->chan_reg; | |
557 | ||
558 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | |
559 | stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar); | |
560 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar); | |
561 | stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr); | |
562 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar); | |
563 | stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr); | |
564 | ||
565 | chan->next_sg++; | |
566 | ||
567 | /* Clear interrupt status if it is there */ | |
568 | status = stm32_dma_irq_status(chan); | |
569 | if (status) | |
570 | stm32_dma_irq_clear(chan, status); | |
571 | ||
e57cb3b3 PYM |
572 | if (chan->desc->cyclic) |
573 | stm32_dma_configure_next_sg(chan); | |
574 | ||
d8b46839 CM |
575 | stm32_dma_dump_reg(chan); |
576 | ||
577 | /* Start DMA */ | |
578 | reg->dma_scr |= STM32_DMA_SCR_EN; | |
579 | stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr); | |
580 | ||
581 | chan->busy = true; | |
582 | ||
90ec93cb | 583 | dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan); |
d8b46839 CM |
584 | } |
585 | ||
586 | static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan) | |
587 | { | |
588 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
589 | struct stm32_dma_sg_req *sg_req; | |
590 | u32 dma_scr, dma_sm0ar, dma_sm1ar, id; | |
591 | ||
592 | id = chan->id; | |
593 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
594 | ||
595 | if (dma_scr & STM32_DMA_SCR_DBM) { | |
596 | if (chan->next_sg == chan->desc->num_sgs) | |
597 | chan->next_sg = 0; | |
598 | ||
599 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
600 | ||
601 | if (dma_scr & STM32_DMA_SCR_CT) { | |
602 | dma_sm0ar = sg_req->chan_reg.dma_sm0ar; | |
603 | stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar); | |
604 | dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n", | |
605 | stm32_dma_read(dmadev, STM32_DMA_SM0AR(id))); | |
606 | } else { | |
607 | dma_sm1ar = sg_req->chan_reg.dma_sm1ar; | |
608 | stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar); | |
609 | dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n", | |
610 | stm32_dma_read(dmadev, STM32_DMA_SM1AR(id))); | |
611 | } | |
d8b46839 CM |
612 | } |
613 | } | |
614 | ||
615 | static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan) | |
616 | { | |
617 | if (chan->desc) { | |
618 | if (chan->desc->cyclic) { | |
619 | vchan_cyclic_callback(&chan->desc->vdesc); | |
2b12c558 | 620 | chan->next_sg++; |
d8b46839 CM |
621 | stm32_dma_configure_next_sg(chan); |
622 | } else { | |
623 | chan->busy = false; | |
624 | if (chan->next_sg == chan->desc->num_sgs) { | |
625 | list_del(&chan->desc->vdesc.node); | |
626 | vchan_cookie_complete(&chan->desc->vdesc); | |
627 | chan->desc = NULL; | |
628 | } | |
629 | stm32_dma_start_transfer(chan); | |
630 | } | |
631 | } | |
632 | } | |
633 | ||
634 | static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) | |
635 | { | |
636 | struct stm32_dma_chan *chan = devid; | |
637 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
ca4c72c0 | 638 | u32 status, scr, sfcr; |
d8b46839 CM |
639 | |
640 | spin_lock(&chan->vchan.lock); | |
641 | ||
642 | status = stm32_dma_irq_status(chan); | |
643 | scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
ca4c72c0 | 644 | sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id)); |
d8b46839 | 645 | |
c2d86b1c | 646 | if (status & STM32_DMA_TCI) { |
d8b46839 | 647 | stm32_dma_irq_clear(chan, STM32_DMA_TCI); |
c2d86b1c PYM |
648 | if (scr & STM32_DMA_SCR_TCIE) |
649 | stm32_dma_handle_chan_done(chan); | |
650 | status &= ~STM32_DMA_TCI; | |
651 | } | |
652 | if (status & STM32_DMA_HTI) { | |
653 | stm32_dma_irq_clear(chan, STM32_DMA_HTI); | |
654 | status &= ~STM32_DMA_HTI; | |
655 | } | |
656 | if (status & STM32_DMA_FEI) { | |
657 | stm32_dma_irq_clear(chan, STM32_DMA_FEI); | |
658 | status &= ~STM32_DMA_FEI; | |
ca4c72c0 PYM |
659 | if (sfcr & STM32_DMA_SFCR_FEIE) { |
660 | if (!(scr & STM32_DMA_SCR_EN)) | |
661 | dev_err(chan2dev(chan), "FIFO Error\n"); | |
662 | else | |
663 | dev_dbg(chan2dev(chan), "FIFO over/underrun\n"); | |
664 | } | |
c2d86b1c PYM |
665 | } |
666 | if (status) { | |
d8b46839 CM |
667 | stm32_dma_irq_clear(chan, status); |
668 | dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status); | |
c2d86b1c PYM |
669 | if (!(scr & STM32_DMA_SCR_EN)) |
670 | dev_err(chan2dev(chan), "chan disabled by HW\n"); | |
d8b46839 CM |
671 | } |
672 | ||
673 | spin_unlock(&chan->vchan.lock); | |
674 | ||
675 | return IRQ_HANDLED; | |
676 | } | |
677 | ||
678 | static void stm32_dma_issue_pending(struct dma_chan *c) | |
679 | { | |
680 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
681 | unsigned long flags; | |
d8b46839 CM |
682 | |
683 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
8d1b76f0 | 684 | if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) { |
90ec93cb | 685 | dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan); |
8d1b76f0 | 686 | stm32_dma_start_transfer(chan); |
e57cb3b3 | 687 | |
d8b46839 CM |
688 | } |
689 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
690 | } | |
691 | ||
692 | static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan, | |
693 | enum dma_transfer_direction direction, | |
a2b6103b PYM |
694 | enum dma_slave_buswidth *buswidth, |
695 | u32 buf_len) | |
d8b46839 CM |
696 | { |
697 | enum dma_slave_buswidth src_addr_width, dst_addr_width; | |
698 | int src_bus_width, dst_bus_width; | |
699 | int src_burst_size, dst_burst_size; | |
a2b6103b PYM |
700 | u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst; |
701 | u32 dma_scr, threshold; | |
d8b46839 CM |
702 | |
703 | src_addr_width = chan->dma_sconfig.src_addr_width; | |
704 | dst_addr_width = chan->dma_sconfig.dst_addr_width; | |
705 | src_maxburst = chan->dma_sconfig.src_maxburst; | |
706 | dst_maxburst = chan->dma_sconfig.dst_maxburst; | |
a2b6103b | 707 | threshold = chan->threshold; |
d8b46839 CM |
708 | |
709 | switch (direction) { | |
710 | case DMA_MEM_TO_DEV: | |
a2b6103b | 711 | /* Set device data size */ |
d8b46839 CM |
712 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
713 | if (dst_bus_width < 0) | |
714 | return dst_bus_width; | |
715 | ||
a2b6103b PYM |
716 | /* Set device burst size */ |
717 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
718 | dst_maxburst, | |
719 | threshold, | |
720 | dst_addr_width); | |
721 | ||
722 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
723 | if (dst_burst_size < 0) |
724 | return dst_burst_size; | |
725 | ||
a2b6103b PYM |
726 | /* Set memory data size */ |
727 | src_addr_width = stm32_dma_get_max_width(buf_len, threshold); | |
728 | chan->mem_width = src_addr_width; | |
d8b46839 CM |
729 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
730 | if (src_bus_width < 0) | |
731 | return src_bus_width; | |
732 | ||
a2b6103b PYM |
733 | /* Set memory burst size */ |
734 | src_maxburst = STM32_DMA_MAX_BURST; | |
735 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
736 | src_maxburst, | |
737 | threshold, | |
738 | src_addr_width); | |
739 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
740 | if (src_burst_size < 0) |
741 | return src_burst_size; | |
742 | ||
743 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) | | |
744 | STM32_DMA_SCR_PSIZE(dst_bus_width) | | |
745 | STM32_DMA_SCR_MSIZE(src_bus_width) | | |
746 | STM32_DMA_SCR_PBURST(dst_burst_size) | | |
747 | STM32_DMA_SCR_MBURST(src_burst_size); | |
748 | ||
a2b6103b PYM |
749 | /* Set FIFO threshold */ |
750 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
751 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | |
752 | ||
753 | /* Set peripheral address */ | |
d8b46839 CM |
754 | chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr; |
755 | *buswidth = dst_addr_width; | |
756 | break; | |
757 | ||
758 | case DMA_DEV_TO_MEM: | |
a2b6103b | 759 | /* Set device data size */ |
d8b46839 CM |
760 | src_bus_width = stm32_dma_get_width(chan, src_addr_width); |
761 | if (src_bus_width < 0) | |
762 | return src_bus_width; | |
763 | ||
a2b6103b PYM |
764 | /* Set device burst size */ |
765 | src_best_burst = stm32_dma_get_best_burst(buf_len, | |
766 | src_maxburst, | |
767 | threshold, | |
768 | src_addr_width); | |
769 | chan->mem_burst = src_best_burst; | |
770 | src_burst_size = stm32_dma_get_burst(chan, src_best_burst); | |
d8b46839 CM |
771 | if (src_burst_size < 0) |
772 | return src_burst_size; | |
773 | ||
a2b6103b PYM |
774 | /* Set memory data size */ |
775 | dst_addr_width = stm32_dma_get_max_width(buf_len, threshold); | |
776 | chan->mem_width = dst_addr_width; | |
d8b46839 CM |
777 | dst_bus_width = stm32_dma_get_width(chan, dst_addr_width); |
778 | if (dst_bus_width < 0) | |
779 | return dst_bus_width; | |
780 | ||
a2b6103b PYM |
781 | /* Set memory burst size */ |
782 | dst_maxburst = STM32_DMA_MAX_BURST; | |
783 | dst_best_burst = stm32_dma_get_best_burst(buf_len, | |
784 | dst_maxburst, | |
785 | threshold, | |
786 | dst_addr_width); | |
787 | chan->mem_burst = dst_best_burst; | |
788 | dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst); | |
d8b46839 CM |
789 | if (dst_burst_size < 0) |
790 | return dst_burst_size; | |
791 | ||
792 | dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) | | |
793 | STM32_DMA_SCR_PSIZE(src_bus_width) | | |
794 | STM32_DMA_SCR_MSIZE(dst_bus_width) | | |
795 | STM32_DMA_SCR_PBURST(src_burst_size) | | |
796 | STM32_DMA_SCR_MBURST(dst_burst_size); | |
797 | ||
a2b6103b PYM |
798 | /* Set FIFO threshold */ |
799 | chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK; | |
800 | chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(threshold); | |
801 | ||
802 | /* Set peripheral address */ | |
d8b46839 CM |
803 | chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr; |
804 | *buswidth = chan->dma_sconfig.src_addr_width; | |
805 | break; | |
806 | ||
807 | default: | |
808 | dev_err(chan2dev(chan), "Dma direction is not supported\n"); | |
809 | return -EINVAL; | |
810 | } | |
811 | ||
a2b6103b | 812 | stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst); |
d8b46839 | 813 | |
a2b6103b | 814 | /* Set DMA control register */ |
d8b46839 CM |
815 | chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK | |
816 | STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK | | |
817 | STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK); | |
818 | chan->chan_reg.dma_scr |= dma_scr; | |
819 | ||
820 | return 0; | |
821 | } | |
822 | ||
823 | static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs) | |
824 | { | |
825 | memset(regs, 0, sizeof(struct stm32_dma_chan_reg)); | |
826 | } | |
827 | ||
828 | static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg( | |
829 | struct dma_chan *c, struct scatterlist *sgl, | |
830 | u32 sg_len, enum dma_transfer_direction direction, | |
831 | unsigned long flags, void *context) | |
832 | { | |
833 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
834 | struct stm32_dma_desc *desc; | |
835 | struct scatterlist *sg; | |
836 | enum dma_slave_buswidth buswidth; | |
837 | u32 nb_data_items; | |
838 | int i, ret; | |
839 | ||
840 | if (!chan->config_init) { | |
841 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
842 | return NULL; | |
843 | } | |
844 | ||
845 | if (sg_len < 1) { | |
846 | dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len); | |
847 | return NULL; | |
848 | } | |
849 | ||
402096cb | 850 | desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT); |
d8b46839 CM |
851 | if (!desc) |
852 | return NULL; | |
853 | ||
d8b46839 CM |
854 | /* Set peripheral flow controller */ |
855 | if (chan->dma_sconfig.device_fc) | |
856 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL; | |
857 | else | |
858 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
859 | ||
860 | for_each_sg(sgl, sg, sg_len, i) { | |
a2b6103b PYM |
861 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, |
862 | sg_dma_len(sg)); | |
863 | if (ret < 0) | |
864 | goto err; | |
865 | ||
d8b46839 CM |
866 | desc->sg_req[i].len = sg_dma_len(sg); |
867 | ||
868 | nb_data_items = desc->sg_req[i].len / buswidth; | |
80a76952 | 869 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
870 | dev_err(chan2dev(chan), "nb items not supported\n"); |
871 | goto err; | |
872 | } | |
873 | ||
874 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
875 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
876 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
877 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
878 | desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg); | |
879 | desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg); | |
880 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
881 | } | |
882 | ||
883 | desc->num_sgs = sg_len; | |
884 | desc->cyclic = false; | |
885 | ||
886 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
887 | ||
888 | err: | |
889 | kfree(desc); | |
890 | return NULL; | |
891 | } | |
892 | ||
893 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic( | |
894 | struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len, | |
895 | size_t period_len, enum dma_transfer_direction direction, | |
896 | unsigned long flags) | |
897 | { | |
898 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
899 | struct stm32_dma_desc *desc; | |
900 | enum dma_slave_buswidth buswidth; | |
901 | u32 num_periods, nb_data_items; | |
902 | int i, ret; | |
903 | ||
904 | if (!buf_len || !period_len) { | |
905 | dev_err(chan2dev(chan), "Invalid buffer/period len\n"); | |
906 | return NULL; | |
907 | } | |
908 | ||
909 | if (!chan->config_init) { | |
910 | dev_err(chan2dev(chan), "dma channel is not configured\n"); | |
911 | return NULL; | |
912 | } | |
913 | ||
914 | if (buf_len % period_len) { | |
915 | dev_err(chan2dev(chan), "buf_len not multiple of period_len\n"); | |
916 | return NULL; | |
917 | } | |
918 | ||
919 | /* | |
920 | * We allow to take more number of requests till DMA is | |
921 | * not started. The driver will loop over all requests. | |
922 | * Once DMA is started then new requests can be queued only after | |
923 | * terminating the DMA. | |
924 | */ | |
925 | if (chan->busy) { | |
926 | dev_err(chan2dev(chan), "Request not allowed when dma busy\n"); | |
927 | return NULL; | |
928 | } | |
929 | ||
a2b6103b | 930 | ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len); |
d8b46839 CM |
931 | if (ret < 0) |
932 | return NULL; | |
933 | ||
934 | nb_data_items = period_len / buswidth; | |
80a76952 | 935 | if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) { |
d8b46839 CM |
936 | dev_err(chan2dev(chan), "number of items not supported\n"); |
937 | return NULL; | |
938 | } | |
939 | ||
940 | /* Enable Circular mode or double buffer mode */ | |
941 | if (buf_len == period_len) | |
942 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC; | |
943 | else | |
944 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM; | |
945 | ||
946 | /* Clear periph ctrl if client set it */ | |
947 | chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL; | |
948 | ||
949 | num_periods = buf_len / period_len; | |
950 | ||
402096cb | 951 | desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT); |
d8b46839 CM |
952 | if (!desc) |
953 | return NULL; | |
954 | ||
955 | for (i = 0; i < num_periods; i++) { | |
956 | desc->sg_req[i].len = period_len; | |
957 | ||
958 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
959 | desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr; | |
960 | desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr; | |
961 | desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar; | |
962 | desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr; | |
963 | desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr; | |
964 | desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items; | |
965 | buf_addr += period_len; | |
966 | } | |
967 | ||
968 | desc->num_sgs = num_periods; | |
969 | desc->cyclic = true; | |
970 | ||
971 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
972 | } | |
973 | ||
974 | static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( | |
975 | struct dma_chan *c, dma_addr_t dest, | |
976 | dma_addr_t src, size_t len, unsigned long flags) | |
977 | { | |
978 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
a2b6103b | 979 | enum dma_slave_buswidth max_width; |
d8b46839 CM |
980 | struct stm32_dma_desc *desc; |
981 | size_t xfer_count, offset; | |
a2b6103b | 982 | u32 num_sgs, best_burst, dma_burst, threshold; |
d8b46839 CM |
983 | int i; |
984 | ||
80a76952 | 985 | num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
402096cb | 986 | desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); |
d8b46839 CM |
987 | if (!desc) |
988 | return NULL; | |
989 | ||
a2b6103b PYM |
990 | threshold = chan->threshold; |
991 | ||
d8b46839 CM |
992 | for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) { |
993 | xfer_count = min_t(size_t, len - offset, | |
80a76952 | 994 | STM32_DMA_ALIGNED_MAX_DATA_ITEMS); |
d8b46839 | 995 | |
a2b6103b PYM |
996 | /* Compute best burst size */ |
997 | max_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | |
998 | best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, | |
999 | threshold, max_width); | |
1000 | dma_burst = stm32_dma_get_burst(chan, best_burst); | |
d8b46839 CM |
1001 | |
1002 | stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); | |
1003 | desc->sg_req[i].chan_reg.dma_scr = | |
1004 | STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) | | |
a2b6103b PYM |
1005 | STM32_DMA_SCR_PBURST(dma_burst) | |
1006 | STM32_DMA_SCR_MBURST(dma_burst) | | |
d8b46839 CM |
1007 | STM32_DMA_SCR_MINC | |
1008 | STM32_DMA_SCR_PINC | | |
1009 | STM32_DMA_SCR_TCIE | | |
1010 | STM32_DMA_SCR_TEIE; | |
a2b6103b PYM |
1011 | desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK; |
1012 | desc->sg_req[i].chan_reg.dma_sfcr |= | |
1013 | STM32_DMA_SFCR_FTH(threshold); | |
d8b46839 CM |
1014 | desc->sg_req[i].chan_reg.dma_spar = src + offset; |
1015 | desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset; | |
1016 | desc->sg_req[i].chan_reg.dma_sndtr = xfer_count; | |
a2b6103b | 1017 | desc->sg_req[i].len = xfer_count; |
d8b46839 CM |
1018 | } |
1019 | ||
1020 | desc->num_sgs = num_sgs; | |
1021 | desc->cyclic = false; | |
1022 | ||
1023 | return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); | |
1024 | } | |
1025 | ||
2b12c558 CM |
1026 | static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) |
1027 | { | |
1028 | u32 dma_scr, width, ndtr; | |
1029 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1030 | ||
1031 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id)); | |
1032 | width = STM32_DMA_SCR_PSIZE_GET(dma_scr); | |
1033 | ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); | |
1034 | ||
1035 | return ndtr << width; | |
1036 | } | |
1037 | ||
2a4885ab AP |
1038 | /** |
1039 | * stm32_dma_is_current_sg - check that expected sg_req is currently transferred | |
1040 | * @chan: dma channel | |
1041 | * | |
1042 | * This function called when IRQ are disable, checks that the hardware has not | |
1043 | * switched on the next transfer in double buffer mode. The test is done by | |
1044 | * comparing the next_sg memory address with the hardware related register | |
1045 | * (based on CT bit value). | |
1046 | * | |
1047 | * Returns true if expected current transfer is still running or double | |
1048 | * buffer mode is not activated. | |
1049 | */ | |
1050 | static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan) | |
1051 | { | |
1052 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1053 | struct stm32_dma_sg_req *sg_req; | |
1054 | u32 dma_scr, dma_smar, id; | |
1055 | ||
1056 | id = chan->id; | |
1057 | dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id)); | |
1058 | ||
1059 | if (!(dma_scr & STM32_DMA_SCR_DBM)) | |
1060 | return true; | |
1061 | ||
1062 | sg_req = &chan->desc->sg_req[chan->next_sg]; | |
1063 | ||
1064 | if (dma_scr & STM32_DMA_SCR_CT) { | |
1065 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)); | |
1066 | return (dma_smar == sg_req->chan_reg.dma_sm0ar); | |
1067 | } | |
1068 | ||
1069 | dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)); | |
1070 | ||
1071 | return (dma_smar == sg_req->chan_reg.dma_sm1ar); | |
1072 | } | |
1073 | ||
d8b46839 CM |
1074 | static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, |
1075 | struct stm32_dma_desc *desc, | |
1076 | u32 next_sg) | |
1077 | { | |
a2b6103b | 1078 | u32 modulo, burst_size; |
2a4885ab AP |
1079 | u32 residue; |
1080 | u32 n_sg = next_sg; | |
1081 | struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg]; | |
d8b46839 CM |
1082 | int i; |
1083 | ||
2b12c558 | 1084 | /* |
2a4885ab AP |
1085 | * Calculate the residue means compute the descriptors |
1086 | * information: | |
1087 | * - the sg_req currently transferred | |
1088 | * - the Hardware remaining position in this sg (NDTR bits field). | |
1089 | * | |
1090 | * A race condition may occur if DMA is running in cyclic or double | |
1091 | * buffer mode, since the DMA register are automatically reloaded at end | |
1092 | * of period transfer. The hardware may have switched to the next | |
1093 | * transfer (CT bit updated) just before the position (SxNDTR reg) is | |
1094 | * read. | |
1095 | * In this case the SxNDTR reg could (or not) correspond to the new | |
1096 | * transfer position, and not the expected one. | |
1097 | * The strategy implemented in the stm32 driver is to: | |
1098 | * - read the SxNDTR register | |
1099 | * - crosscheck that hardware is still in current transfer. | |
1100 | * In case of switch, we can assume that the DMA is at the beginning of | |
1101 | * the next transfer. So we approximate the residue in consequence, by | |
1102 | * pointing on the beginning of next transfer. | |
1103 | * | |
1104 | * This race condition doesn't apply for none cyclic mode, as double | |
1105 | * buffer is not used. In such situation registers are updated by the | |
1106 | * software. | |
2b12c558 | 1107 | */ |
2a4885ab AP |
1108 | |
1109 | residue = stm32_dma_get_remaining_bytes(chan); | |
1110 | ||
1111 | if (!stm32_dma_is_current_sg(chan)) { | |
1112 | n_sg++; | |
1113 | if (n_sg == chan->desc->num_sgs) | |
1114 | n_sg = 0; | |
1115 | residue = sg_req->len; | |
a2b6103b | 1116 | } |
d8b46839 | 1117 | |
2b12c558 | 1118 | /* |
2a4885ab AP |
1119 | * In cyclic mode, for the last period, residue = remaining bytes |
1120 | * from NDTR, | |
1121 | * else for all other periods in cyclic mode, and in sg mode, | |
1122 | * residue = remaining bytes from NDTR + remaining | |
1123 | * periods/sg to be transferred | |
2b12c558 | 1124 | */ |
2a4885ab AP |
1125 | if (!chan->desc->cyclic || n_sg != 0) |
1126 | for (i = n_sg; i < desc->num_sgs; i++) | |
1127 | residue += desc->sg_req[i].len; | |
d8b46839 | 1128 | |
a2b6103b PYM |
1129 | if (!chan->mem_burst) |
1130 | return residue; | |
1131 | ||
1132 | burst_size = chan->mem_burst * chan->mem_width; | |
1133 | modulo = residue % burst_size; | |
1134 | if (modulo) | |
1135 | residue = residue - modulo + burst_size; | |
1136 | ||
d8b46839 CM |
1137 | return residue; |
1138 | } | |
1139 | ||
1140 | static enum dma_status stm32_dma_tx_status(struct dma_chan *c, | |
1141 | dma_cookie_t cookie, | |
1142 | struct dma_tx_state *state) | |
1143 | { | |
1144 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1145 | struct virt_dma_desc *vdesc; | |
1146 | enum dma_status status; | |
1147 | unsigned long flags; | |
57b5a321 | 1148 | u32 residue = 0; |
d8b46839 CM |
1149 | |
1150 | status = dma_cookie_status(c, cookie, state); | |
249d5531 | 1151 | if (status == DMA_COMPLETE || !state) |
d8b46839 CM |
1152 | return status; |
1153 | ||
1154 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1155 | vdesc = vchan_find_desc(&chan->vchan, cookie); | |
57b5a321 | 1156 | if (chan->desc && cookie == chan->desc->vdesc.tx.cookie) |
d8b46839 CM |
1157 | residue = stm32_dma_desc_residue(chan, chan->desc, |
1158 | chan->next_sg); | |
57b5a321 | 1159 | else if (vdesc) |
d8b46839 CM |
1160 | residue = stm32_dma_desc_residue(chan, |
1161 | to_stm32_dma_desc(vdesc), 0); | |
d8b46839 CM |
1162 | dma_set_residue(state, residue); |
1163 | ||
1164 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1165 | ||
1166 | return status; | |
1167 | } | |
1168 | ||
1169 | static int stm32_dma_alloc_chan_resources(struct dma_chan *c) | |
1170 | { | |
1171 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1172 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1173 | int ret; | |
1174 | ||
1175 | chan->config_init = false; | |
48bc73ba PYM |
1176 | |
1177 | ret = pm_runtime_get_sync(dmadev->ddev.dev); | |
1178 | if (ret < 0) | |
d8b46839 | 1179 | return ret; |
d8b46839 CM |
1180 | |
1181 | ret = stm32_dma_disable_chan(chan); | |
1182 | if (ret < 0) | |
48bc73ba | 1183 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1184 | |
1185 | return ret; | |
1186 | } | |
1187 | ||
1188 | static void stm32_dma_free_chan_resources(struct dma_chan *c) | |
1189 | { | |
1190 | struct stm32_dma_chan *chan = to_stm32_dma_chan(c); | |
1191 | struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan); | |
1192 | unsigned long flags; | |
1193 | ||
1194 | dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id); | |
1195 | ||
1196 | if (chan->busy) { | |
1197 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1198 | stm32_dma_stop(chan); | |
1199 | chan->desc = NULL; | |
1200 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1201 | } | |
1202 | ||
48bc73ba | 1203 | pm_runtime_put(dmadev->ddev.dev); |
d8b46839 CM |
1204 | |
1205 | vchan_free_chan_resources(to_virt_chan(c)); | |
1206 | } | |
1207 | ||
1208 | static void stm32_dma_desc_free(struct virt_dma_desc *vdesc) | |
1209 | { | |
1210 | kfree(container_of(vdesc, struct stm32_dma_desc, vdesc)); | |
1211 | } | |
1212 | ||
e97adb49 | 1213 | static void stm32_dma_set_config(struct stm32_dma_chan *chan, |
249d5531 | 1214 | struct stm32_dma_cfg *cfg) |
d8b46839 CM |
1215 | { |
1216 | stm32_dma_clear_reg(&chan->chan_reg); | |
1217 | ||
1218 | chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK; | |
1219 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line); | |
1220 | ||
1221 | /* Enable Interrupts */ | |
1222 | chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE; | |
1223 | ||
951f44cb | 1224 | chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features); |
d8b46839 CM |
1225 | } |
1226 | ||
1227 | static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec, | |
1228 | struct of_dma *ofdma) | |
1229 | { | |
1230 | struct stm32_dma_device *dmadev = ofdma->of_dma_data; | |
5df4eb45 | 1231 | struct device *dev = dmadev->ddev.dev; |
d8b46839 CM |
1232 | struct stm32_dma_cfg cfg; |
1233 | struct stm32_dma_chan *chan; | |
1234 | struct dma_chan *c; | |
1235 | ||
5df4eb45 CM |
1236 | if (dma_spec->args_count < 4) { |
1237 | dev_err(dev, "Bad number of cells\n"); | |
d8b46839 | 1238 | return NULL; |
5df4eb45 | 1239 | } |
d8b46839 CM |
1240 | |
1241 | cfg.channel_id = dma_spec->args[0]; | |
1242 | cfg.request_line = dma_spec->args[1]; | |
1243 | cfg.stream_config = dma_spec->args[2]; | |
951f44cb | 1244 | cfg.features = dma_spec->args[3]; |
d8b46839 | 1245 | |
249d5531 PYM |
1246 | if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS || |
1247 | cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) { | |
5df4eb45 | 1248 | dev_err(dev, "Bad channel and/or request id\n"); |
d8b46839 | 1249 | return NULL; |
5df4eb45 | 1250 | } |
d8b46839 | 1251 | |
d8b46839 CM |
1252 | chan = &dmadev->chan[cfg.channel_id]; |
1253 | ||
1254 | c = dma_get_slave_channel(&chan->vchan.chan); | |
5df4eb45 | 1255 | if (!c) { |
041cf7e0 | 1256 | dev_err(dev, "No more channels available\n"); |
5df4eb45 CM |
1257 | return NULL; |
1258 | } | |
1259 | ||
1260 | stm32_dma_set_config(chan, &cfg); | |
d8b46839 CM |
1261 | |
1262 | return c; | |
1263 | } | |
1264 | ||
1265 | static const struct of_device_id stm32_dma_of_match[] = { | |
1266 | { .compatible = "st,stm32-dma", }, | |
1267 | { /* sentinel */ }, | |
1268 | }; | |
1269 | MODULE_DEVICE_TABLE(of, stm32_dma_of_match); | |
1270 | ||
1271 | static int stm32_dma_probe(struct platform_device *pdev) | |
1272 | { | |
1273 | struct stm32_dma_chan *chan; | |
1274 | struct stm32_dma_device *dmadev; | |
1275 | struct dma_device *dd; | |
1276 | const struct of_device_id *match; | |
1277 | struct resource *res; | |
1278 | int i, ret; | |
1279 | ||
1280 | match = of_match_device(stm32_dma_of_match, &pdev->dev); | |
1281 | if (!match) { | |
1282 | dev_err(&pdev->dev, "Error: No device match found\n"); | |
1283 | return -ENODEV; | |
1284 | } | |
1285 | ||
1286 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | |
1287 | if (!dmadev) | |
1288 | return -ENOMEM; | |
1289 | ||
1290 | dd = &dmadev->ddev; | |
1291 | ||
1292 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
1293 | dmadev->base = devm_ioremap_resource(&pdev->dev, res); | |
1294 | if (IS_ERR(dmadev->base)) | |
1295 | return PTR_ERR(dmadev->base); | |
1296 | ||
1297 | dmadev->clk = devm_clk_get(&pdev->dev, NULL); | |
1298 | if (IS_ERR(dmadev->clk)) { | |
1299 | dev_err(&pdev->dev, "Error: Missing controller clock\n"); | |
1300 | return PTR_ERR(dmadev->clk); | |
1301 | } | |
1302 | ||
48bc73ba PYM |
1303 | ret = clk_prepare_enable(dmadev->clk); |
1304 | if (ret < 0) { | |
1305 | dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret); | |
1306 | return ret; | |
1307 | } | |
1308 | ||
d8b46839 CM |
1309 | dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node, |
1310 | "st,mem2mem"); | |
1311 | ||
1312 | dmadev->rst = devm_reset_control_get(&pdev->dev, NULL); | |
1313 | if (!IS_ERR(dmadev->rst)) { | |
1314 | reset_control_assert(dmadev->rst); | |
1315 | udelay(2); | |
1316 | reset_control_deassert(dmadev->rst); | |
1317 | } | |
1318 | ||
1319 | dma_cap_set(DMA_SLAVE, dd->cap_mask); | |
1320 | dma_cap_set(DMA_PRIVATE, dd->cap_mask); | |
1321 | dma_cap_set(DMA_CYCLIC, dd->cap_mask); | |
1322 | dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources; | |
1323 | dd->device_free_chan_resources = stm32_dma_free_chan_resources; | |
1324 | dd->device_tx_status = stm32_dma_tx_status; | |
1325 | dd->device_issue_pending = stm32_dma_issue_pending; | |
1326 | dd->device_prep_slave_sg = stm32_dma_prep_slave_sg; | |
1327 | dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic; | |
1328 | dd->device_config = stm32_dma_slave_config; | |
1329 | dd->device_terminate_all = stm32_dma_terminate_all; | |
dc808675 | 1330 | dd->device_synchronize = stm32_dma_synchronize; |
d8b46839 CM |
1331 | dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | |
1332 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1333 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1334 | dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | | |
1335 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | | |
1336 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); | |
1337 | dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
1338 | dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
276b0046 | 1339 | dd->max_burst = STM32_DMA_MAX_BURST; |
d8b46839 CM |
1340 | dd->dev = &pdev->dev; |
1341 | INIT_LIST_HEAD(&dd->channels); | |
1342 | ||
1343 | if (dmadev->mem2mem) { | |
1344 | dma_cap_set(DMA_MEMCPY, dd->cap_mask); | |
1345 | dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy; | |
1346 | dd->directions |= BIT(DMA_MEM_TO_MEM); | |
1347 | } | |
1348 | ||
1349 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1350 | chan = &dmadev->chan[i]; | |
1351 | chan->id = i; | |
1352 | chan->vchan.desc_free = stm32_dma_desc_free; | |
1353 | vchan_init(&chan->vchan, dd); | |
1354 | } | |
1355 | ||
1356 | ret = dma_async_device_register(dd); | |
1357 | if (ret) | |
48bc73ba | 1358 | goto clk_free; |
d8b46839 CM |
1359 | |
1360 | for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { | |
1361 | chan = &dmadev->chan[i]; | |
c6504be5 | 1362 | ret = platform_get_irq(pdev, i); |
e17be6e1 | 1363 | if (ret < 0) |
d8b46839 | 1364 | goto err_unregister; |
c6504be5 VK |
1365 | chan->irq = ret; |
1366 | ||
d8b46839 CM |
1367 | ret = devm_request_irq(&pdev->dev, chan->irq, |
1368 | stm32_dma_chan_irq, 0, | |
1369 | dev_name(chan2dev(chan)), chan); | |
1370 | if (ret) { | |
1371 | dev_err(&pdev->dev, | |
1372 | "request_irq failed with err %d channel %d\n", | |
1373 | ret, i); | |
1374 | goto err_unregister; | |
1375 | } | |
1376 | } | |
1377 | ||
1378 | ret = of_dma_controller_register(pdev->dev.of_node, | |
1379 | stm32_dma_of_xlate, dmadev); | |
1380 | if (ret < 0) { | |
1381 | dev_err(&pdev->dev, | |
1382 | "STM32 DMA DMA OF registration failed %d\n", ret); | |
1383 | goto err_unregister; | |
1384 | } | |
1385 | ||
1386 | platform_set_drvdata(pdev, dmadev); | |
1387 | ||
48bc73ba PYM |
1388 | pm_runtime_set_active(&pdev->dev); |
1389 | pm_runtime_enable(&pdev->dev); | |
1390 | pm_runtime_get_noresume(&pdev->dev); | |
1391 | pm_runtime_put(&pdev->dev); | |
1392 | ||
d8b46839 CM |
1393 | dev_info(&pdev->dev, "STM32 DMA driver registered\n"); |
1394 | ||
1395 | return 0; | |
1396 | ||
1397 | err_unregister: | |
1398 | dma_async_device_unregister(dd); | |
48bc73ba PYM |
1399 | clk_free: |
1400 | clk_disable_unprepare(dmadev->clk); | |
d8b46839 CM |
1401 | |
1402 | return ret; | |
1403 | } | |
1404 | ||
48bc73ba PYM |
1405 | #ifdef CONFIG_PM |
1406 | static int stm32_dma_runtime_suspend(struct device *dev) | |
1407 | { | |
1408 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1409 | ||
1410 | clk_disable_unprepare(dmadev->clk); | |
1411 | ||
1412 | return 0; | |
1413 | } | |
1414 | ||
1415 | static int stm32_dma_runtime_resume(struct device *dev) | |
1416 | { | |
1417 | struct stm32_dma_device *dmadev = dev_get_drvdata(dev); | |
1418 | int ret; | |
1419 | ||
1420 | ret = clk_prepare_enable(dmadev->clk); | |
1421 | if (ret) { | |
1422 | dev_err(dev, "failed to prepare_enable clock\n"); | |
1423 | return ret; | |
1424 | } | |
1425 | ||
1426 | return 0; | |
1427 | } | |
1428 | #endif | |
1429 | ||
1430 | static const struct dev_pm_ops stm32_dma_pm_ops = { | |
1431 | SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend, | |
1432 | stm32_dma_runtime_resume, NULL) | |
1433 | }; | |
1434 | ||
d8b46839 CM |
1435 | static struct platform_driver stm32_dma_driver = { |
1436 | .driver = { | |
1437 | .name = "stm32-dma", | |
1438 | .of_match_table = stm32_dma_of_match, | |
48bc73ba | 1439 | .pm = &stm32_dma_pm_ops, |
d8b46839 CM |
1440 | }, |
1441 | }; | |
1442 | ||
1443 | static int __init stm32_dma_init(void) | |
1444 | { | |
1445 | return platform_driver_probe(&stm32_dma_driver, stm32_dma_probe); | |
1446 | } | |
1447 | subsys_initcall(stm32_dma_init); |