]>
Commit | Line | Data |
---|---|---|
8d318a50 LW |
1 | /* |
2 | * driver/dma/ste_dma40.c | |
3 | * | |
4 | * Copyright (C) ST-Ericsson 2007-2010 | |
5 | * License terms: GNU General Public License (GPL) version 2 | |
6 | * Author: Per Friden <per.friden@stericsson.com> | |
7 | * Author: Jonas Aaberg <jonas.aberg@stericsson.com> | |
8 | * | |
9 | */ | |
10 | ||
11 | #include <linux/kernel.h> | |
12 | #include <linux/slab.h> | |
13 | #include <linux/dmaengine.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/clk.h> | |
16 | #include <linux/delay.h> | |
17 | ||
18 | #include <plat/ste_dma40.h> | |
19 | ||
20 | #include "ste_dma40_ll.h" | |
21 | ||
22 | #define D40_NAME "dma40" | |
23 | ||
24 | #define D40_PHY_CHAN -1 | |
25 | ||
26 | /* For masking out/in 2 bit channel positions */ | |
27 | #define D40_CHAN_POS(chan) (2 * (chan / 2)) | |
28 | #define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan)) | |
29 | ||
30 | /* Maximum iterations taken before giving up suspending a channel */ | |
31 | #define D40_SUSPEND_MAX_IT 500 | |
32 | ||
508849ad LW |
33 | /* Hardware requirement on LCLA alignment */ |
34 | #define LCLA_ALIGNMENT 0x40000 | |
35 | /* Attempts before giving up to trying to get pages that are aligned */ | |
36 | #define MAX_LCLA_ALLOC_ATTEMPTS 256 | |
37 | ||
38 | /* Bit markings for allocation map */ | |
8d318a50 LW |
39 | #define D40_ALLOC_FREE (1 << 31) |
40 | #define D40_ALLOC_PHY (1 << 30) | |
41 | #define D40_ALLOC_LOG_FREE 0 | |
42 | ||
8d318a50 LW |
43 | /* Hardware designer of the block */ |
44 | #define D40_PERIPHID2_DESIGNER 0x8 | |
45 | ||
46 | /** | |
47 | * enum 40_command - The different commands and/or statuses. | |
48 | * | |
49 | * @D40_DMA_STOP: DMA channel command STOP or status STOPPED, | |
50 | * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN. | |
51 | * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible. | |
52 | * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED. | |
53 | */ | |
54 | enum d40_command { | |
55 | D40_DMA_STOP = 0, | |
56 | D40_DMA_RUN = 1, | |
57 | D40_DMA_SUSPEND_REQ = 2, | |
58 | D40_DMA_SUSPENDED = 3 | |
59 | }; | |
60 | ||
61 | /** | |
62 | * struct d40_lli_pool - Structure for keeping LLIs in memory | |
63 | * | |
64 | * @base: Pointer to memory area when the pre_alloc_lli's are not large | |
65 | * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if | |
66 | * pre_alloc_lli is used. | |
67 | * @size: The size in bytes of the memory at base or the size of pre_alloc_lli. | |
68 | * @pre_alloc_lli: Pre allocated area for the most common case of transfers, | |
69 | * one buffer to one buffer. | |
70 | */ | |
71 | struct d40_lli_pool { | |
72 | void *base; | |
508849ad | 73 | int size; |
8d318a50 | 74 | /* Space for dst and src, plus an extra for padding */ |
508849ad | 75 | u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)]; |
8d318a50 LW |
76 | }; |
77 | ||
78 | /** | |
79 | * struct d40_desc - A descriptor is one DMA job. | |
80 | * | |
81 | * @lli_phy: LLI settings for physical channel. Both src and dst= | |
82 | * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if | |
83 | * lli_len equals one. | |
84 | * @lli_log: Same as above but for logical channels. | |
85 | * @lli_pool: The pool with two entries pre-allocated. | |
941b77a3 PF |
86 | * @lli_len: Number of llis of current descriptor. |
87 | * @lli_count: Number of transfered llis. | |
88 | * @lli_tx_len: Max number of LLIs per transfer, there can be | |
89 | * many transfer for one descriptor. | |
8d318a50 LW |
90 | * @txd: DMA engine struct. Used for among other things for communication |
91 | * during a transfer. | |
92 | * @node: List entry. | |
93 | * @dir: The transfer direction of this job. | |
94 | * @is_in_client_list: true if the client owns this descriptor. | |
95 | * | |
96 | * This descriptor is used for both logical and physical transfers. | |
97 | */ | |
98 | ||
99 | struct d40_desc { | |
100 | /* LLI physical */ | |
101 | struct d40_phy_lli_bidir lli_phy; | |
102 | /* LLI logical */ | |
103 | struct d40_log_lli_bidir lli_log; | |
104 | ||
105 | struct d40_lli_pool lli_pool; | |
941b77a3 PF |
106 | int lli_len; |
107 | int lli_count; | |
108 | u32 lli_tx_len; | |
8d318a50 LW |
109 | |
110 | struct dma_async_tx_descriptor txd; | |
111 | struct list_head node; | |
112 | ||
113 | enum dma_data_direction dir; | |
114 | bool is_in_client_list; | |
115 | }; | |
116 | ||
117 | /** | |
118 | * struct d40_lcla_pool - LCLA pool settings and data. | |
119 | * | |
508849ad LW |
120 | * @base: The virtual address of LCLA. 18 bit aligned. |
121 | * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used. | |
122 | * This pointer is only there for clean-up on error. | |
123 | * @pages: The number of pages needed for all physical channels. | |
124 | * Only used later for clean-up on error | |
8d318a50 | 125 | * @lock: Lock to protect the content in this struct. |
508849ad | 126 | * @alloc_map: Bitmap mapping between physical channel and LCLA entries. |
8d318a50 LW |
127 | * @num_blocks: The number of entries of alloc_map. Equals to the |
128 | * number of physical channels. | |
129 | */ | |
130 | struct d40_lcla_pool { | |
131 | void *base; | |
508849ad LW |
132 | void *base_unaligned; |
133 | int pages; | |
8d318a50 LW |
134 | spinlock_t lock; |
135 | u32 *alloc_map; | |
136 | int num_blocks; | |
137 | }; | |
138 | ||
139 | /** | |
140 | * struct d40_phy_res - struct for handling eventlines mapped to physical | |
141 | * channels. | |
142 | * | |
143 | * @lock: A lock protection this entity. | |
144 | * @num: The physical channel number of this entity. | |
145 | * @allocated_src: Bit mapped to show which src event line's are mapped to | |
146 | * this physical channel. Can also be free or physically allocated. | |
147 | * @allocated_dst: Same as for src but is dst. | |
148 | * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as | |
149 | * event line number. Both allocated_src and allocated_dst can not be | |
150 | * allocated to a physical channel, since the interrupt handler has then | |
151 | * no way of figure out which one the interrupt belongs to. | |
152 | */ | |
153 | struct d40_phy_res { | |
154 | spinlock_t lock; | |
155 | int num; | |
156 | u32 allocated_src; | |
157 | u32 allocated_dst; | |
158 | }; | |
159 | ||
160 | struct d40_base; | |
161 | ||
162 | /** | |
163 | * struct d40_chan - Struct that describes a channel. | |
164 | * | |
165 | * @lock: A spinlock to protect this struct. | |
166 | * @log_num: The logical number, if any of this channel. | |
167 | * @completed: Starts with 1, after first interrupt it is set to dma engine's | |
168 | * current cookie. | |
169 | * @pending_tx: The number of pending transfers. Used between interrupt handler | |
170 | * and tasklet. | |
171 | * @busy: Set to true when transfer is ongoing on this channel. | |
2a614340 JA |
172 | * @phy_chan: Pointer to physical channel which this instance runs on. If this |
173 | * point is NULL, then the channel is not allocated. | |
8d318a50 LW |
174 | * @chan: DMA engine handle. |
175 | * @tasklet: Tasklet that gets scheduled from interrupt context to complete a | |
176 | * transfer and call client callback. | |
177 | * @client: Cliented owned descriptor list. | |
178 | * @active: Active descriptor. | |
179 | * @queue: Queued jobs. | |
8d318a50 LW |
180 | * @dma_cfg: The client configuration of this dma channel. |
181 | * @base: Pointer to the device instance struct. | |
182 | * @src_def_cfg: Default cfg register setting for src. | |
183 | * @dst_def_cfg: Default cfg register setting for dst. | |
184 | * @log_def: Default logical channel settings. | |
185 | * @lcla: Space for one dst src pair for logical channel transfers. | |
186 | * @lcpa: Pointer to dst and src lcpa settings. | |
187 | * | |
188 | * This struct can either "be" a logical or a physical channel. | |
189 | */ | |
190 | struct d40_chan { | |
191 | spinlock_t lock; | |
192 | int log_num; | |
193 | /* ID of the most recent completed transfer */ | |
194 | int completed; | |
195 | int pending_tx; | |
196 | bool busy; | |
197 | struct d40_phy_res *phy_chan; | |
198 | struct dma_chan chan; | |
199 | struct tasklet_struct tasklet; | |
200 | struct list_head client; | |
201 | struct list_head active; | |
202 | struct list_head queue; | |
8d318a50 LW |
203 | struct stedma40_chan_cfg dma_cfg; |
204 | struct d40_base *base; | |
205 | /* Default register configurations */ | |
206 | u32 src_def_cfg; | |
207 | u32 dst_def_cfg; | |
208 | struct d40_def_lcsp log_def; | |
209 | struct d40_lcla_elem lcla; | |
210 | struct d40_log_lli_full *lcpa; | |
211 | }; | |
212 | ||
213 | /** | |
214 | * struct d40_base - The big global struct, one for each probe'd instance. | |
215 | * | |
216 | * @interrupt_lock: Lock used to make sure one interrupt is handle a time. | |
217 | * @execmd_lock: Lock for execute command usage since several channels share | |
218 | * the same physical register. | |
219 | * @dev: The device structure. | |
220 | * @virtbase: The virtual base address of the DMA's register. | |
221 | * @clk: Pointer to the DMA clock structure. | |
222 | * @phy_start: Physical memory start of the DMA registers. | |
223 | * @phy_size: Size of the DMA register map. | |
224 | * @irq: The IRQ number. | |
225 | * @num_phy_chans: The number of physical channels. Read from HW. This | |
226 | * is the number of available channels for this driver, not counting "Secure | |
227 | * mode" allocated physical channels. | |
228 | * @num_log_chans: The number of logical channels. Calculated from | |
229 | * num_phy_chans. | |
230 | * @dma_both: dma_device channels that can do both memcpy and slave transfers. | |
231 | * @dma_slave: dma_device channels that can do only do slave transfers. | |
232 | * @dma_memcpy: dma_device channels that can do only do memcpy transfers. | |
233 | * @phy_chans: Room for all possible physical channels in system. | |
234 | * @log_chans: Room for all possible logical channels in system. | |
235 | * @lookup_log_chans: Used to map interrupt number to logical channel. Points | |
236 | * to log_chans entries. | |
237 | * @lookup_phy_chans: Used to map interrupt number to physical channel. Points | |
238 | * to phy_chans entries. | |
239 | * @plat_data: Pointer to provided platform_data which is the driver | |
240 | * configuration. | |
241 | * @phy_res: Vector containing all physical channels. | |
242 | * @lcla_pool: lcla pool settings and data. | |
243 | * @lcpa_base: The virtual mapped address of LCPA. | |
244 | * @phy_lcpa: The physical address of the LCPA. | |
245 | * @lcpa_size: The size of the LCPA area. | |
c675b1b4 | 246 | * @desc_slab: cache for descriptors. |
8d318a50 LW |
247 | */ |
248 | struct d40_base { | |
249 | spinlock_t interrupt_lock; | |
250 | spinlock_t execmd_lock; | |
251 | struct device *dev; | |
252 | void __iomem *virtbase; | |
253 | struct clk *clk; | |
254 | phys_addr_t phy_start; | |
255 | resource_size_t phy_size; | |
256 | int irq; | |
257 | int num_phy_chans; | |
258 | int num_log_chans; | |
259 | struct dma_device dma_both; | |
260 | struct dma_device dma_slave; | |
261 | struct dma_device dma_memcpy; | |
262 | struct d40_chan *phy_chans; | |
263 | struct d40_chan *log_chans; | |
264 | struct d40_chan **lookup_log_chans; | |
265 | struct d40_chan **lookup_phy_chans; | |
266 | struct stedma40_platform_data *plat_data; | |
267 | /* Physical half channels */ | |
268 | struct d40_phy_res *phy_res; | |
269 | struct d40_lcla_pool lcla_pool; | |
270 | void *lcpa_base; | |
271 | dma_addr_t phy_lcpa; | |
272 | resource_size_t lcpa_size; | |
c675b1b4 | 273 | struct kmem_cache *desc_slab; |
8d318a50 LW |
274 | }; |
275 | ||
276 | /** | |
277 | * struct d40_interrupt_lookup - lookup table for interrupt handler | |
278 | * | |
279 | * @src: Interrupt mask register. | |
280 | * @clr: Interrupt clear register. | |
281 | * @is_error: true if this is an error interrupt. | |
282 | * @offset: start delta in the lookup_log_chans in d40_base. If equals to | |
283 | * D40_PHY_CHAN, the lookup_phy_chans shall be used instead. | |
284 | */ | |
285 | struct d40_interrupt_lookup { | |
286 | u32 src; | |
287 | u32 clr; | |
288 | bool is_error; | |
289 | int offset; | |
290 | }; | |
291 | ||
292 | /** | |
293 | * struct d40_reg_val - simple lookup struct | |
294 | * | |
295 | * @reg: The register. | |
296 | * @val: The value that belongs to the register in reg. | |
297 | */ | |
298 | struct d40_reg_val { | |
299 | unsigned int reg; | |
300 | unsigned int val; | |
301 | }; | |
302 | ||
303 | static int d40_pool_lli_alloc(struct d40_desc *d40d, | |
304 | int lli_len, bool is_log) | |
305 | { | |
306 | u32 align; | |
307 | void *base; | |
308 | ||
309 | if (is_log) | |
310 | align = sizeof(struct d40_log_lli); | |
311 | else | |
312 | align = sizeof(struct d40_phy_lli); | |
313 | ||
314 | if (lli_len == 1) { | |
315 | base = d40d->lli_pool.pre_alloc_lli; | |
316 | d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli); | |
317 | d40d->lli_pool.base = NULL; | |
318 | } else { | |
319 | d40d->lli_pool.size = ALIGN(lli_len * 2 * align, align); | |
320 | ||
321 | base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT); | |
322 | d40d->lli_pool.base = base; | |
323 | ||
324 | if (d40d->lli_pool.base == NULL) | |
325 | return -ENOMEM; | |
326 | } | |
327 | ||
328 | if (is_log) { | |
329 | d40d->lli_log.src = PTR_ALIGN((struct d40_log_lli *) base, | |
330 | align); | |
331 | d40d->lli_log.dst = PTR_ALIGN(d40d->lli_log.src + lli_len, | |
332 | align); | |
333 | } else { | |
334 | d40d->lli_phy.src = PTR_ALIGN((struct d40_phy_lli *)base, | |
335 | align); | |
336 | d40d->lli_phy.dst = PTR_ALIGN(d40d->lli_phy.src + lli_len, | |
337 | align); | |
338 | ||
339 | d40d->lli_phy.src_addr = virt_to_phys(d40d->lli_phy.src); | |
340 | d40d->lli_phy.dst_addr = virt_to_phys(d40d->lli_phy.dst); | |
341 | } | |
342 | ||
343 | return 0; | |
344 | } | |
345 | ||
346 | static void d40_pool_lli_free(struct d40_desc *d40d) | |
347 | { | |
348 | kfree(d40d->lli_pool.base); | |
349 | d40d->lli_pool.base = NULL; | |
350 | d40d->lli_pool.size = 0; | |
351 | d40d->lli_log.src = NULL; | |
352 | d40d->lli_log.dst = NULL; | |
353 | d40d->lli_phy.src = NULL; | |
354 | d40d->lli_phy.dst = NULL; | |
355 | d40d->lli_phy.src_addr = 0; | |
356 | d40d->lli_phy.dst_addr = 0; | |
357 | } | |
358 | ||
359 | static dma_cookie_t d40_assign_cookie(struct d40_chan *d40c, | |
360 | struct d40_desc *desc) | |
361 | { | |
362 | dma_cookie_t cookie = d40c->chan.cookie; | |
363 | ||
364 | if (++cookie < 0) | |
365 | cookie = 1; | |
366 | ||
367 | d40c->chan.cookie = cookie; | |
368 | desc->txd.cookie = cookie; | |
369 | ||
370 | return cookie; | |
371 | } | |
372 | ||
8d318a50 LW |
373 | static void d40_desc_remove(struct d40_desc *d40d) |
374 | { | |
375 | list_del(&d40d->node); | |
376 | } | |
377 | ||
378 | static struct d40_desc *d40_desc_get(struct d40_chan *d40c) | |
379 | { | |
8d318a50 LW |
380 | struct d40_desc *d; |
381 | struct d40_desc *_d; | |
382 | ||
383 | if (!list_empty(&d40c->client)) { | |
384 | list_for_each_entry_safe(d, _d, &d40c->client, node) | |
385 | if (async_tx_test_ack(&d->txd)) { | |
386 | d40_pool_lli_free(d); | |
387 | d40_desc_remove(d); | |
c675b1b4 | 388 | break; |
8d318a50 | 389 | } |
8d318a50 | 390 | } else { |
c675b1b4 JA |
391 | d = kmem_cache_alloc(d40c->base->desc_slab, GFP_NOWAIT); |
392 | if (d != NULL) { | |
393 | memset(d, 0, sizeof(struct d40_desc)); | |
394 | INIT_LIST_HEAD(&d->node); | |
395 | } | |
8d318a50 | 396 | } |
c675b1b4 | 397 | return d; |
8d318a50 LW |
398 | } |
399 | ||
400 | static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d) | |
401 | { | |
c675b1b4 | 402 | kmem_cache_free(d40c->base->desc_slab, d40d); |
8d318a50 LW |
403 | } |
404 | ||
405 | static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc) | |
406 | { | |
407 | list_add_tail(&desc->node, &d40c->active); | |
408 | } | |
409 | ||
410 | static struct d40_desc *d40_first_active_get(struct d40_chan *d40c) | |
411 | { | |
412 | struct d40_desc *d; | |
413 | ||
414 | if (list_empty(&d40c->active)) | |
415 | return NULL; | |
416 | ||
417 | d = list_first_entry(&d40c->active, | |
418 | struct d40_desc, | |
419 | node); | |
420 | return d; | |
421 | } | |
422 | ||
423 | static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc) | |
424 | { | |
425 | list_add_tail(&desc->node, &d40c->queue); | |
426 | } | |
427 | ||
428 | static struct d40_desc *d40_first_queued(struct d40_chan *d40c) | |
429 | { | |
430 | struct d40_desc *d; | |
431 | ||
432 | if (list_empty(&d40c->queue)) | |
433 | return NULL; | |
434 | ||
435 | d = list_first_entry(&d40c->queue, | |
436 | struct d40_desc, | |
437 | node); | |
438 | return d; | |
439 | } | |
440 | ||
441 | /* Support functions for logical channels */ | |
442 | ||
508849ad | 443 | static int d40_lcla_id_get(struct d40_chan *d40c) |
8d318a50 LW |
444 | { |
445 | int src_id = 0; | |
446 | int dst_id = 0; | |
447 | struct d40_log_lli *lcla_lidx_base = | |
508849ad | 448 | d40c->base->lcla_pool.base + d40c->phy_chan->num * 1024; |
8d318a50 LW |
449 | int i; |
450 | int lli_per_log = d40c->base->plat_data->llis_per_log; | |
2292b880 | 451 | unsigned long flags; |
8d318a50 LW |
452 | |
453 | if (d40c->lcla.src_id >= 0 && d40c->lcla.dst_id >= 0) | |
454 | return 0; | |
455 | ||
508849ad | 456 | if (d40c->base->lcla_pool.num_blocks > 32) |
8d318a50 LW |
457 | return -EINVAL; |
458 | ||
508849ad | 459 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
8d318a50 | 460 | |
508849ad LW |
461 | for (i = 0; i < d40c->base->lcla_pool.num_blocks; i++) { |
462 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | |
463 | (0x1 << i))) { | |
464 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | |
465 | (0x1 << i); | |
8d318a50 LW |
466 | break; |
467 | } | |
468 | } | |
469 | src_id = i; | |
508849ad | 470 | if (src_id >= d40c->base->lcla_pool.num_blocks) |
8d318a50 LW |
471 | goto err; |
472 | ||
508849ad LW |
473 | for (; i < d40c->base->lcla_pool.num_blocks; i++) { |
474 | if (!(d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] & | |
475 | (0x1 << i))) { | |
476 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] |= | |
477 | (0x1 << i); | |
8d318a50 LW |
478 | break; |
479 | } | |
480 | } | |
481 | ||
482 | dst_id = i; | |
483 | if (dst_id == src_id) | |
484 | goto err; | |
485 | ||
486 | d40c->lcla.src_id = src_id; | |
487 | d40c->lcla.dst_id = dst_id; | |
488 | d40c->lcla.dst = lcla_lidx_base + dst_id * lli_per_log + 1; | |
489 | d40c->lcla.src = lcla_lidx_base + src_id * lli_per_log + 1; | |
490 | ||
508849ad | 491 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
8d318a50 LW |
492 | return 0; |
493 | err: | |
508849ad | 494 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); |
8d318a50 LW |
495 | return -EINVAL; |
496 | } | |
497 | ||
8d318a50 LW |
498 | |
499 | static int d40_channel_execute_command(struct d40_chan *d40c, | |
500 | enum d40_command command) | |
501 | { | |
502 | int status, i; | |
503 | void __iomem *active_reg; | |
504 | int ret = 0; | |
505 | unsigned long flags; | |
1d392a7b | 506 | u32 wmask; |
8d318a50 LW |
507 | |
508 | spin_lock_irqsave(&d40c->base->execmd_lock, flags); | |
509 | ||
510 | if (d40c->phy_chan->num % 2 == 0) | |
511 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
512 | else | |
513 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
514 | ||
515 | if (command == D40_DMA_SUSPEND_REQ) { | |
516 | status = (readl(active_reg) & | |
517 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
518 | D40_CHAN_POS(d40c->phy_chan->num); | |
519 | ||
520 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
521 | goto done; | |
522 | } | |
523 | ||
1d392a7b JA |
524 | wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num)); |
525 | writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)), | |
526 | active_reg); | |
8d318a50 LW |
527 | |
528 | if (command == D40_DMA_SUSPEND_REQ) { | |
529 | ||
530 | for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) { | |
531 | status = (readl(active_reg) & | |
532 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
533 | D40_CHAN_POS(d40c->phy_chan->num); | |
534 | ||
535 | cpu_relax(); | |
536 | /* | |
537 | * Reduce the number of bus accesses while | |
538 | * waiting for the DMA to suspend. | |
539 | */ | |
540 | udelay(3); | |
541 | ||
542 | if (status == D40_DMA_STOP || | |
543 | status == D40_DMA_SUSPENDED) | |
544 | break; | |
545 | } | |
546 | ||
547 | if (i == D40_SUSPEND_MAX_IT) { | |
548 | dev_err(&d40c->chan.dev->device, | |
549 | "[%s]: unable to suspend the chl %d (log: %d) status %x\n", | |
550 | __func__, d40c->phy_chan->num, d40c->log_num, | |
551 | status); | |
552 | dump_stack(); | |
553 | ret = -EBUSY; | |
554 | } | |
555 | ||
556 | } | |
557 | done: | |
558 | spin_unlock_irqrestore(&d40c->base->execmd_lock, flags); | |
559 | return ret; | |
560 | } | |
561 | ||
562 | static void d40_term_all(struct d40_chan *d40c) | |
563 | { | |
564 | struct d40_desc *d40d; | |
508849ad | 565 | unsigned long flags; |
8d318a50 LW |
566 | |
567 | /* Release active descriptors */ | |
568 | while ((d40d = d40_first_active_get(d40c))) { | |
569 | d40_desc_remove(d40d); | |
570 | ||
571 | /* Return desc to free-list */ | |
572 | d40_desc_free(d40c, d40d); | |
573 | } | |
574 | ||
575 | /* Release queued descriptors waiting for transfer */ | |
576 | while ((d40d = d40_first_queued(d40c))) { | |
577 | d40_desc_remove(d40d); | |
578 | ||
579 | /* Return desc to free-list */ | |
580 | d40_desc_free(d40c, d40d); | |
581 | } | |
582 | ||
508849ad LW |
583 | spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags); |
584 | ||
585 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | |
586 | (~(0x1 << d40c->lcla.dst_id)); | |
587 | d40c->base->lcla_pool.alloc_map[d40c->phy_chan->num] &= | |
588 | (~(0x1 << d40c->lcla.src_id)); | |
589 | ||
590 | d40c->lcla.src_id = -1; | |
591 | d40c->lcla.dst_id = -1; | |
592 | ||
593 | spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags); | |
8d318a50 LW |
594 | |
595 | d40c->pending_tx = 0; | |
596 | d40c->busy = false; | |
597 | } | |
598 | ||
599 | static void d40_config_set_event(struct d40_chan *d40c, bool do_enable) | |
600 | { | |
601 | u32 val; | |
602 | unsigned long flags; | |
603 | ||
0c32269d | 604 | /* Notice, that disable requires the physical channel to be stopped */ |
8d318a50 LW |
605 | if (do_enable) |
606 | val = D40_ACTIVATE_EVENTLINE; | |
607 | else | |
608 | val = D40_DEACTIVATE_EVENTLINE; | |
609 | ||
610 | spin_lock_irqsave(&d40c->phy_chan->lock, flags); | |
611 | ||
612 | /* Enable event line connected to device (or memcpy) */ | |
613 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | |
614 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) { | |
615 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | |
616 | ||
617 | writel((val << D40_EVENTLINE_POS(event)) | | |
618 | ~D40_EVENTLINE_MASK(event), | |
619 | d40c->base->virtbase + D40_DREG_PCBASE + | |
620 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
621 | D40_CHAN_REG_SSLNK); | |
622 | } | |
623 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) { | |
624 | u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | |
625 | ||
626 | writel((val << D40_EVENTLINE_POS(event)) | | |
627 | ~D40_EVENTLINE_MASK(event), | |
628 | d40c->base->virtbase + D40_DREG_PCBASE + | |
629 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
630 | D40_CHAN_REG_SDLNK); | |
631 | } | |
632 | ||
633 | spin_unlock_irqrestore(&d40c->phy_chan->lock, flags); | |
634 | } | |
635 | ||
a5ebca47 | 636 | static u32 d40_chan_has_events(struct d40_chan *d40c) |
8d318a50 LW |
637 | { |
638 | u32 val = 0; | |
639 | ||
640 | /* If SSLNK or SDLNK is zero all events are disabled */ | |
641 | if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) || | |
642 | (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH)) | |
643 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | |
644 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
645 | D40_CHAN_REG_SSLNK); | |
646 | ||
647 | if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM) | |
648 | val = readl(d40c->base->virtbase + D40_DREG_PCBASE + | |
649 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
650 | D40_CHAN_REG_SDLNK); | |
a5ebca47 | 651 | return val; |
8d318a50 LW |
652 | } |
653 | ||
654 | static void d40_config_enable_lidx(struct d40_chan *d40c) | |
655 | { | |
656 | /* Set LIDX for lcla */ | |
657 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | |
658 | D40_SREG_ELEM_LOG_LIDX_MASK, | |
659 | d40c->base->virtbase + D40_DREG_PCBASE + | |
660 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SDELT); | |
661 | ||
662 | writel((d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS) & | |
663 | D40_SREG_ELEM_LOG_LIDX_MASK, | |
664 | d40c->base->virtbase + D40_DREG_PCBASE + | |
665 | d40c->phy_chan->num * D40_DREG_PCDELTA + D40_CHAN_REG_SSELT); | |
666 | } | |
667 | ||
668 | static int d40_config_write(struct d40_chan *d40c) | |
669 | { | |
670 | u32 addr_base; | |
671 | u32 var; | |
672 | int res; | |
673 | ||
674 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | |
675 | if (res) | |
676 | return res; | |
677 | ||
678 | /* Odd addresses are even addresses + 4 */ | |
679 | addr_base = (d40c->phy_chan->num % 2) * 4; | |
680 | /* Setup channel mode to logical or physical */ | |
681 | var = ((u32)(d40c->log_num != D40_PHY_CHAN) + 1) << | |
682 | D40_CHAN_POS(d40c->phy_chan->num); | |
683 | writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base); | |
684 | ||
685 | /* Setup operational mode option register */ | |
686 | var = ((d40c->dma_cfg.channel_type >> STEDMA40_INFO_CH_MODE_OPT_POS) & | |
687 | 0x3) << D40_CHAN_POS(d40c->phy_chan->num); | |
688 | ||
689 | writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base); | |
690 | ||
691 | if (d40c->log_num != D40_PHY_CHAN) { | |
692 | /* Set default config for CFG reg */ | |
693 | writel(d40c->src_def_cfg, | |
694 | d40c->base->virtbase + D40_DREG_PCBASE + | |
695 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
696 | D40_CHAN_REG_SSCFG); | |
697 | writel(d40c->dst_def_cfg, | |
698 | d40c->base->virtbase + D40_DREG_PCBASE + | |
699 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
700 | D40_CHAN_REG_SDCFG); | |
701 | ||
702 | d40_config_enable_lidx(d40c); | |
703 | } | |
704 | return res; | |
705 | } | |
706 | ||
707 | static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d) | |
708 | { | |
8d318a50 LW |
709 | if (d40d->lli_phy.dst && d40d->lli_phy.src) { |
710 | d40_phy_lli_write(d40c->base->virtbase, | |
711 | d40c->phy_chan->num, | |
712 | d40d->lli_phy.dst, | |
713 | d40d->lli_phy.src); | |
8d318a50 | 714 | } else if (d40d->lli_log.dst && d40d->lli_log.src) { |
8d318a50 LW |
715 | struct d40_log_lli *src = d40d->lli_log.src; |
716 | struct d40_log_lli *dst = d40d->lli_log.dst; | |
508849ad | 717 | int s; |
8d318a50 | 718 | |
941b77a3 PF |
719 | src += d40d->lli_count; |
720 | dst += d40d->lli_count; | |
508849ad LW |
721 | s = d40_log_lli_write(d40c->lcpa, |
722 | d40c->lcla.src, d40c->lcla.dst, | |
723 | dst, src, | |
724 | d40c->base->plat_data->llis_per_log); | |
725 | ||
726 | /* If s equals to zero, the job is not linked */ | |
727 | if (s > 0) { | |
728 | (void) dma_map_single(d40c->base->dev, d40c->lcla.src, | |
729 | s * sizeof(struct d40_log_lli), | |
730 | DMA_TO_DEVICE); | |
731 | (void) dma_map_single(d40c->base->dev, d40c->lcla.dst, | |
732 | s * sizeof(struct d40_log_lli), | |
733 | DMA_TO_DEVICE); | |
734 | } | |
8d318a50 | 735 | } |
941b77a3 | 736 | d40d->lli_count += d40d->lli_tx_len; |
8d318a50 LW |
737 | } |
738 | ||
739 | static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx) | |
740 | { | |
741 | struct d40_chan *d40c = container_of(tx->chan, | |
742 | struct d40_chan, | |
743 | chan); | |
744 | struct d40_desc *d40d = container_of(tx, struct d40_desc, txd); | |
745 | unsigned long flags; | |
746 | ||
747 | spin_lock_irqsave(&d40c->lock, flags); | |
748 | ||
749 | tx->cookie = d40_assign_cookie(d40c, d40d); | |
750 | ||
751 | d40_desc_queue(d40c, d40d); | |
752 | ||
753 | spin_unlock_irqrestore(&d40c->lock, flags); | |
754 | ||
755 | return tx->cookie; | |
756 | } | |
757 | ||
758 | static int d40_start(struct d40_chan *d40c) | |
759 | { | |
0c32269d | 760 | if (d40c->log_num != D40_PHY_CHAN) |
8d318a50 | 761 | d40_config_set_event(d40c, true); |
8d318a50 | 762 | |
0c32269d | 763 | return d40_channel_execute_command(d40c, D40_DMA_RUN); |
8d318a50 LW |
764 | } |
765 | ||
766 | static struct d40_desc *d40_queue_start(struct d40_chan *d40c) | |
767 | { | |
768 | struct d40_desc *d40d; | |
769 | int err; | |
770 | ||
771 | /* Start queued jobs, if any */ | |
772 | d40d = d40_first_queued(d40c); | |
773 | ||
774 | if (d40d != NULL) { | |
775 | d40c->busy = true; | |
776 | ||
777 | /* Remove from queue */ | |
778 | d40_desc_remove(d40d); | |
779 | ||
780 | /* Add to active queue */ | |
781 | d40_desc_submit(d40c, d40d); | |
782 | ||
783 | /* Initiate DMA job */ | |
784 | d40_desc_load(d40c, d40d); | |
785 | ||
786 | /* Start dma job */ | |
787 | err = d40_start(d40c); | |
788 | ||
789 | if (err) | |
790 | return NULL; | |
791 | } | |
792 | ||
793 | return d40d; | |
794 | } | |
795 | ||
796 | /* called from interrupt context */ | |
797 | static void dma_tc_handle(struct d40_chan *d40c) | |
798 | { | |
799 | struct d40_desc *d40d; | |
800 | ||
801 | if (!d40c->phy_chan) | |
802 | return; | |
803 | ||
804 | /* Get first active entry from list */ | |
805 | d40d = d40_first_active_get(d40c); | |
806 | ||
807 | if (d40d == NULL) | |
808 | return; | |
809 | ||
941b77a3 | 810 | if (d40d->lli_count < d40d->lli_len) { |
8d318a50 LW |
811 | |
812 | d40_desc_load(d40c, d40d); | |
813 | /* Start dma job */ | |
814 | (void) d40_start(d40c); | |
815 | return; | |
816 | } | |
817 | ||
818 | if (d40_queue_start(d40c) == NULL) | |
819 | d40c->busy = false; | |
820 | ||
821 | d40c->pending_tx++; | |
822 | tasklet_schedule(&d40c->tasklet); | |
823 | ||
824 | } | |
825 | ||
826 | static void dma_tasklet(unsigned long data) | |
827 | { | |
828 | struct d40_chan *d40c = (struct d40_chan *) data; | |
829 | struct d40_desc *d40d_fin; | |
830 | unsigned long flags; | |
831 | dma_async_tx_callback callback; | |
832 | void *callback_param; | |
833 | ||
834 | spin_lock_irqsave(&d40c->lock, flags); | |
835 | ||
836 | /* Get first active entry from list */ | |
837 | d40d_fin = d40_first_active_get(d40c); | |
838 | ||
839 | if (d40d_fin == NULL) | |
840 | goto err; | |
841 | ||
842 | d40c->completed = d40d_fin->txd.cookie; | |
843 | ||
844 | /* | |
845 | * If terminating a channel pending_tx is set to zero. | |
846 | * This prevents any finished active jobs to return to the client. | |
847 | */ | |
848 | if (d40c->pending_tx == 0) { | |
849 | spin_unlock_irqrestore(&d40c->lock, flags); | |
850 | return; | |
851 | } | |
852 | ||
853 | /* Callback to client */ | |
854 | callback = d40d_fin->txd.callback; | |
855 | callback_param = d40d_fin->txd.callback_param; | |
856 | ||
857 | if (async_tx_test_ack(&d40d_fin->txd)) { | |
858 | d40_pool_lli_free(d40d_fin); | |
859 | d40_desc_remove(d40d_fin); | |
860 | /* Return desc to free-list */ | |
861 | d40_desc_free(d40c, d40d_fin); | |
862 | } else { | |
8d318a50 LW |
863 | if (!d40d_fin->is_in_client_list) { |
864 | d40_desc_remove(d40d_fin); | |
865 | list_add_tail(&d40d_fin->node, &d40c->client); | |
866 | d40d_fin->is_in_client_list = true; | |
867 | } | |
868 | } | |
869 | ||
870 | d40c->pending_tx--; | |
871 | ||
872 | if (d40c->pending_tx) | |
873 | tasklet_schedule(&d40c->tasklet); | |
874 | ||
875 | spin_unlock_irqrestore(&d40c->lock, flags); | |
876 | ||
877 | if (callback) | |
878 | callback(callback_param); | |
879 | ||
880 | return; | |
881 | ||
882 | err: | |
883 | /* Rescue manouver if receiving double interrupts */ | |
884 | if (d40c->pending_tx > 0) | |
885 | d40c->pending_tx--; | |
886 | spin_unlock_irqrestore(&d40c->lock, flags); | |
887 | } | |
888 | ||
889 | static irqreturn_t d40_handle_interrupt(int irq, void *data) | |
890 | { | |
891 | static const struct d40_interrupt_lookup il[] = { | |
892 | {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0}, | |
893 | {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32}, | |
894 | {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64}, | |
895 | {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96}, | |
896 | {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0}, | |
897 | {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32}, | |
898 | {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64}, | |
899 | {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96}, | |
900 | {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN}, | |
901 | {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN}, | |
902 | }; | |
903 | ||
904 | int i; | |
905 | u32 regs[ARRAY_SIZE(il)]; | |
906 | u32 tmp; | |
907 | u32 idx; | |
908 | u32 row; | |
909 | long chan = -1; | |
910 | struct d40_chan *d40c; | |
911 | unsigned long flags; | |
912 | struct d40_base *base = data; | |
913 | ||
914 | spin_lock_irqsave(&base->interrupt_lock, flags); | |
915 | ||
916 | /* Read interrupt status of both logical and physical channels */ | |
917 | for (i = 0; i < ARRAY_SIZE(il); i++) | |
918 | regs[i] = readl(base->virtbase + il[i].src); | |
919 | ||
920 | for (;;) { | |
921 | ||
922 | chan = find_next_bit((unsigned long *)regs, | |
923 | BITS_PER_LONG * ARRAY_SIZE(il), chan + 1); | |
924 | ||
925 | /* No more set bits found? */ | |
926 | if (chan == BITS_PER_LONG * ARRAY_SIZE(il)) | |
927 | break; | |
928 | ||
929 | row = chan / BITS_PER_LONG; | |
930 | idx = chan & (BITS_PER_LONG - 1); | |
931 | ||
932 | /* ACK interrupt */ | |
933 | tmp = readl(base->virtbase + il[row].clr); | |
934 | tmp |= 1 << idx; | |
935 | writel(tmp, base->virtbase + il[row].clr); | |
936 | ||
937 | if (il[row].offset == D40_PHY_CHAN) | |
938 | d40c = base->lookup_phy_chans[idx]; | |
939 | else | |
940 | d40c = base->lookup_log_chans[il[row].offset + idx]; | |
941 | spin_lock(&d40c->lock); | |
942 | ||
943 | if (!il[row].is_error) | |
944 | dma_tc_handle(d40c); | |
945 | else | |
508849ad LW |
946 | dev_err(base->dev, |
947 | "[%s] IRQ chan: %ld offset %d idx %d\n", | |
8d318a50 LW |
948 | __func__, chan, il[row].offset, idx); |
949 | ||
950 | spin_unlock(&d40c->lock); | |
951 | } | |
952 | ||
953 | spin_unlock_irqrestore(&base->interrupt_lock, flags); | |
954 | ||
955 | return IRQ_HANDLED; | |
956 | } | |
957 | ||
958 | ||
959 | static int d40_validate_conf(struct d40_chan *d40c, | |
960 | struct stedma40_chan_cfg *conf) | |
961 | { | |
962 | int res = 0; | |
963 | u32 dst_event_group = D40_TYPE_TO_GROUP(conf->dst_dev_type); | |
964 | u32 src_event_group = D40_TYPE_TO_GROUP(conf->src_dev_type); | |
965 | bool is_log = (conf->channel_type & STEDMA40_CHANNEL_IN_OPER_MODE) | |
966 | == STEDMA40_CHANNEL_IN_LOG_MODE; | |
967 | ||
968 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH && | |
969 | dst_event_group == STEDMA40_DEV_DST_MEMORY) { | |
970 | dev_err(&d40c->chan.dev->device, "[%s] Invalid dst\n", | |
971 | __func__); | |
972 | res = -EINVAL; | |
973 | } | |
974 | ||
975 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM && | |
976 | src_event_group == STEDMA40_DEV_SRC_MEMORY) { | |
977 | dev_err(&d40c->chan.dev->device, "[%s] Invalid src\n", | |
978 | __func__); | |
979 | res = -EINVAL; | |
980 | } | |
981 | ||
982 | if (src_event_group == STEDMA40_DEV_SRC_MEMORY && | |
983 | dst_event_group == STEDMA40_DEV_DST_MEMORY && is_log) { | |
984 | dev_err(&d40c->chan.dev->device, | |
985 | "[%s] No event line\n", __func__); | |
986 | res = -EINVAL; | |
987 | } | |
988 | ||
989 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH && | |
990 | (src_event_group != dst_event_group)) { | |
991 | dev_err(&d40c->chan.dev->device, | |
992 | "[%s] Invalid event group\n", __func__); | |
993 | res = -EINVAL; | |
994 | } | |
995 | ||
996 | if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) { | |
997 | /* | |
998 | * DMAC HW supports it. Will be added to this driver, | |
999 | * in case any dma client requires it. | |
1000 | */ | |
1001 | dev_err(&d40c->chan.dev->device, | |
1002 | "[%s] periph to periph not supported\n", | |
1003 | __func__); | |
1004 | res = -EINVAL; | |
1005 | } | |
1006 | ||
1007 | return res; | |
1008 | } | |
1009 | ||
1010 | static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src, | |
4aed79b2 | 1011 | int log_event_line, bool is_log) |
8d318a50 LW |
1012 | { |
1013 | unsigned long flags; | |
1014 | spin_lock_irqsave(&phy->lock, flags); | |
4aed79b2 | 1015 | if (!is_log) { |
8d318a50 LW |
1016 | /* Physical interrupts are masked per physical full channel */ |
1017 | if (phy->allocated_src == D40_ALLOC_FREE && | |
1018 | phy->allocated_dst == D40_ALLOC_FREE) { | |
1019 | phy->allocated_dst = D40_ALLOC_PHY; | |
1020 | phy->allocated_src = D40_ALLOC_PHY; | |
1021 | goto found; | |
1022 | } else | |
1023 | goto not_found; | |
1024 | } | |
1025 | ||
1026 | /* Logical channel */ | |
1027 | if (is_src) { | |
1028 | if (phy->allocated_src == D40_ALLOC_PHY) | |
1029 | goto not_found; | |
1030 | ||
1031 | if (phy->allocated_src == D40_ALLOC_FREE) | |
1032 | phy->allocated_src = D40_ALLOC_LOG_FREE; | |
1033 | ||
1034 | if (!(phy->allocated_src & (1 << log_event_line))) { | |
1035 | phy->allocated_src |= 1 << log_event_line; | |
1036 | goto found; | |
1037 | } else | |
1038 | goto not_found; | |
1039 | } else { | |
1040 | if (phy->allocated_dst == D40_ALLOC_PHY) | |
1041 | goto not_found; | |
1042 | ||
1043 | if (phy->allocated_dst == D40_ALLOC_FREE) | |
1044 | phy->allocated_dst = D40_ALLOC_LOG_FREE; | |
1045 | ||
1046 | if (!(phy->allocated_dst & (1 << log_event_line))) { | |
1047 | phy->allocated_dst |= 1 << log_event_line; | |
1048 | goto found; | |
1049 | } else | |
1050 | goto not_found; | |
1051 | } | |
1052 | ||
1053 | not_found: | |
1054 | spin_unlock_irqrestore(&phy->lock, flags); | |
1055 | return false; | |
1056 | found: | |
1057 | spin_unlock_irqrestore(&phy->lock, flags); | |
1058 | return true; | |
1059 | } | |
1060 | ||
1061 | static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src, | |
1062 | int log_event_line) | |
1063 | { | |
1064 | unsigned long flags; | |
1065 | bool is_free = false; | |
1066 | ||
1067 | spin_lock_irqsave(&phy->lock, flags); | |
1068 | if (!log_event_line) { | |
1069 | /* Physical interrupts are masked per physical full channel */ | |
1070 | phy->allocated_dst = D40_ALLOC_FREE; | |
1071 | phy->allocated_src = D40_ALLOC_FREE; | |
1072 | is_free = true; | |
1073 | goto out; | |
1074 | } | |
1075 | ||
1076 | /* Logical channel */ | |
1077 | if (is_src) { | |
1078 | phy->allocated_src &= ~(1 << log_event_line); | |
1079 | if (phy->allocated_src == D40_ALLOC_LOG_FREE) | |
1080 | phy->allocated_src = D40_ALLOC_FREE; | |
1081 | } else { | |
1082 | phy->allocated_dst &= ~(1 << log_event_line); | |
1083 | if (phy->allocated_dst == D40_ALLOC_LOG_FREE) | |
1084 | phy->allocated_dst = D40_ALLOC_FREE; | |
1085 | } | |
1086 | ||
1087 | is_free = ((phy->allocated_src | phy->allocated_dst) == | |
1088 | D40_ALLOC_FREE); | |
1089 | ||
1090 | out: | |
1091 | spin_unlock_irqrestore(&phy->lock, flags); | |
1092 | ||
1093 | return is_free; | |
1094 | } | |
1095 | ||
1096 | static int d40_allocate_channel(struct d40_chan *d40c) | |
1097 | { | |
1098 | int dev_type; | |
1099 | int event_group; | |
1100 | int event_line; | |
1101 | struct d40_phy_res *phys; | |
1102 | int i; | |
1103 | int j; | |
1104 | int log_num; | |
1105 | bool is_src; | |
508849ad LW |
1106 | bool is_log = (d40c->dma_cfg.channel_type & |
1107 | STEDMA40_CHANNEL_IN_OPER_MODE) | |
8d318a50 LW |
1108 | == STEDMA40_CHANNEL_IN_LOG_MODE; |
1109 | ||
1110 | ||
1111 | phys = d40c->base->phy_res; | |
1112 | ||
1113 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | |
1114 | dev_type = d40c->dma_cfg.src_dev_type; | |
1115 | log_num = 2 * dev_type; | |
1116 | is_src = true; | |
1117 | } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | |
1118 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1119 | /* dst event lines are used for logical memcpy */ | |
1120 | dev_type = d40c->dma_cfg.dst_dev_type; | |
1121 | log_num = 2 * dev_type + 1; | |
1122 | is_src = false; | |
1123 | } else | |
1124 | return -EINVAL; | |
1125 | ||
1126 | event_group = D40_TYPE_TO_GROUP(dev_type); | |
1127 | event_line = D40_TYPE_TO_EVENT(dev_type); | |
1128 | ||
1129 | if (!is_log) { | |
1130 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1131 | /* Find physical half channel */ | |
1132 | for (i = 0; i < d40c->base->num_phy_chans; i++) { | |
1133 | ||
4aed79b2 MM |
1134 | if (d40_alloc_mask_set(&phys[i], is_src, |
1135 | 0, is_log)) | |
8d318a50 LW |
1136 | goto found_phy; |
1137 | } | |
1138 | } else | |
1139 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1140 | int phy_num = j + event_group * 2; | |
1141 | for (i = phy_num; i < phy_num + 2; i++) { | |
508849ad LW |
1142 | if (d40_alloc_mask_set(&phys[i], |
1143 | is_src, | |
1144 | 0, | |
1145 | is_log)) | |
8d318a50 LW |
1146 | goto found_phy; |
1147 | } | |
1148 | } | |
1149 | return -EINVAL; | |
1150 | found_phy: | |
1151 | d40c->phy_chan = &phys[i]; | |
1152 | d40c->log_num = D40_PHY_CHAN; | |
1153 | goto out; | |
1154 | } | |
1155 | if (dev_type == -1) | |
1156 | return -EINVAL; | |
1157 | ||
1158 | /* Find logical channel */ | |
1159 | for (j = 0; j < d40c->base->num_phy_chans; j += 8) { | |
1160 | int phy_num = j + event_group * 2; | |
1161 | /* | |
1162 | * Spread logical channels across all available physical rather | |
1163 | * than pack every logical channel at the first available phy | |
1164 | * channels. | |
1165 | */ | |
1166 | if (is_src) { | |
1167 | for (i = phy_num; i < phy_num + 2; i++) { | |
1168 | if (d40_alloc_mask_set(&phys[i], is_src, | |
4aed79b2 | 1169 | event_line, is_log)) |
8d318a50 LW |
1170 | goto found_log; |
1171 | } | |
1172 | } else { | |
1173 | for (i = phy_num + 1; i >= phy_num; i--) { | |
1174 | if (d40_alloc_mask_set(&phys[i], is_src, | |
4aed79b2 | 1175 | event_line, is_log)) |
8d318a50 LW |
1176 | goto found_log; |
1177 | } | |
1178 | } | |
1179 | } | |
1180 | return -EINVAL; | |
1181 | ||
1182 | found_log: | |
1183 | d40c->phy_chan = &phys[i]; | |
1184 | d40c->log_num = log_num; | |
1185 | out: | |
1186 | ||
1187 | if (is_log) | |
1188 | d40c->base->lookup_log_chans[d40c->log_num] = d40c; | |
1189 | else | |
1190 | d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c; | |
1191 | ||
1192 | return 0; | |
1193 | ||
1194 | } | |
1195 | ||
8d318a50 LW |
1196 | static int d40_config_memcpy(struct d40_chan *d40c) |
1197 | { | |
1198 | dma_cap_mask_t cap = d40c->chan.device->cap_mask; | |
1199 | ||
1200 | if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) { | |
1201 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_log; | |
1202 | d40c->dma_cfg.src_dev_type = STEDMA40_DEV_SRC_MEMORY; | |
1203 | d40c->dma_cfg.dst_dev_type = d40c->base->plat_data-> | |
1204 | memcpy[d40c->chan.chan_id]; | |
1205 | ||
1206 | } else if (dma_has_cap(DMA_MEMCPY, cap) && | |
1207 | dma_has_cap(DMA_SLAVE, cap)) { | |
1208 | d40c->dma_cfg = *d40c->base->plat_data->memcpy_conf_phy; | |
1209 | } else { | |
1210 | dev_err(&d40c->chan.dev->device, "[%s] No memcpy\n", | |
1211 | __func__); | |
1212 | return -EINVAL; | |
1213 | } | |
1214 | ||
1215 | return 0; | |
1216 | } | |
1217 | ||
1218 | ||
1219 | static int d40_free_dma(struct d40_chan *d40c) | |
1220 | { | |
1221 | ||
1222 | int res = 0; | |
1223 | u32 event, dir; | |
1224 | struct d40_phy_res *phy = d40c->phy_chan; | |
1225 | bool is_src; | |
a8be8627 PF |
1226 | struct d40_desc *d; |
1227 | struct d40_desc *_d; | |
1228 | ||
8d318a50 LW |
1229 | |
1230 | /* Terminate all queued and active transfers */ | |
1231 | d40_term_all(d40c); | |
1232 | ||
a8be8627 PF |
1233 | /* Release client owned descriptors */ |
1234 | if (!list_empty(&d40c->client)) | |
1235 | list_for_each_entry_safe(d, _d, &d40c->client, node) { | |
1236 | d40_pool_lli_free(d); | |
1237 | d40_desc_remove(d); | |
1238 | /* Return desc to free-list */ | |
1239 | d40_desc_free(d40c, d); | |
1240 | } | |
1241 | ||
8d318a50 LW |
1242 | if (phy == NULL) { |
1243 | dev_err(&d40c->chan.dev->device, "[%s] phy == null\n", | |
1244 | __func__); | |
1245 | return -EINVAL; | |
1246 | } | |
1247 | ||
1248 | if (phy->allocated_src == D40_ALLOC_FREE && | |
1249 | phy->allocated_dst == D40_ALLOC_FREE) { | |
1250 | dev_err(&d40c->chan.dev->device, "[%s] channel already free\n", | |
1251 | __func__); | |
1252 | return -EINVAL; | |
1253 | } | |
1254 | ||
8d318a50 LW |
1255 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); |
1256 | if (res) { | |
ff0b12ba | 1257 | dev_err(&d40c->chan.dev->device, "[%s] suspend failed\n", |
8d318a50 LW |
1258 | __func__); |
1259 | return res; | |
1260 | } | |
1261 | ||
1262 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || | |
1263 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) { | |
1264 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | |
1265 | dir = D40_CHAN_REG_SDLNK; | |
1266 | is_src = false; | |
1267 | } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) { | |
1268 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | |
1269 | dir = D40_CHAN_REG_SSLNK; | |
1270 | is_src = true; | |
1271 | } else { | |
1272 | dev_err(&d40c->chan.dev->device, | |
1273 | "[%s] Unknown direction\n", __func__); | |
1274 | return -EINVAL; | |
1275 | } | |
1276 | ||
1277 | if (d40c->log_num != D40_PHY_CHAN) { | |
1278 | /* | |
1279 | * Release logical channel, deactivate the event line during | |
1280 | * the time physical res is suspended. | |
1281 | */ | |
1282 | writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event)) & | |
1283 | D40_EVENTLINE_MASK(event), | |
1284 | d40c->base->virtbase + D40_DREG_PCBASE + | |
1285 | phy->num * D40_DREG_PCDELTA + dir); | |
1286 | ||
1287 | d40c->base->lookup_log_chans[d40c->log_num] = NULL; | |
1288 | ||
1289 | /* | |
1290 | * Check if there are more logical allocation | |
1291 | * on this phy channel. | |
1292 | */ | |
1293 | if (!d40_alloc_mask_free(phy, is_src, event)) { | |
1294 | /* Resume the other logical channels if any */ | |
1295 | if (d40_chan_has_events(d40c)) { | |
1296 | res = d40_channel_execute_command(d40c, | |
1297 | D40_DMA_RUN); | |
1298 | if (res) { | |
1299 | dev_err(&d40c->chan.dev->device, | |
1300 | "[%s] Executing RUN command\n", | |
1301 | __func__); | |
1302 | return res; | |
1303 | } | |
1304 | } | |
1305 | return 0; | |
1306 | } | |
1307 | } else | |
1308 | d40_alloc_mask_free(phy, is_src, 0); | |
1309 | ||
1310 | /* Release physical channel */ | |
1311 | res = d40_channel_execute_command(d40c, D40_DMA_STOP); | |
1312 | if (res) { | |
1313 | dev_err(&d40c->chan.dev->device, | |
1314 | "[%s] Failed to stop channel\n", __func__); | |
1315 | return res; | |
1316 | } | |
1317 | d40c->phy_chan = NULL; | |
1318 | /* Invalidate channel type */ | |
1319 | d40c->dma_cfg.channel_type = 0; | |
1320 | d40c->base->lookup_phy_chans[phy->num] = NULL; | |
1321 | ||
1322 | return 0; | |
8d318a50 LW |
1323 | } |
1324 | ||
1325 | static int d40_pause(struct dma_chan *chan) | |
1326 | { | |
1327 | struct d40_chan *d40c = | |
1328 | container_of(chan, struct d40_chan, chan); | |
1329 | int res; | |
8d318a50 LW |
1330 | unsigned long flags; |
1331 | ||
1332 | spin_lock_irqsave(&d40c->lock, flags); | |
1333 | ||
1334 | res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ); | |
1335 | if (res == 0) { | |
1336 | if (d40c->log_num != D40_PHY_CHAN) { | |
1337 | d40_config_set_event(d40c, false); | |
1338 | /* Resume the other logical channels if any */ | |
1339 | if (d40_chan_has_events(d40c)) | |
1340 | res = d40_channel_execute_command(d40c, | |
1341 | D40_DMA_RUN); | |
1342 | } | |
1343 | } | |
1344 | ||
1345 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1346 | return res; | |
1347 | } | |
1348 | ||
a5ebca47 JA |
1349 | static bool d40_is_paused(struct d40_chan *d40c) |
1350 | { | |
1351 | bool is_paused = false; | |
1352 | unsigned long flags; | |
1353 | void __iomem *active_reg; | |
1354 | u32 status; | |
1355 | u32 event; | |
a5ebca47 JA |
1356 | |
1357 | spin_lock_irqsave(&d40c->lock, flags); | |
1358 | ||
1359 | if (d40c->log_num == D40_PHY_CHAN) { | |
1360 | if (d40c->phy_chan->num % 2 == 0) | |
1361 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVE; | |
1362 | else | |
1363 | active_reg = d40c->base->virtbase + D40_DREG_ACTIVO; | |
1364 | ||
1365 | status = (readl(active_reg) & | |
1366 | D40_CHAN_POS_MASK(d40c->phy_chan->num)) >> | |
1367 | D40_CHAN_POS(d40c->phy_chan->num); | |
1368 | if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP) | |
1369 | is_paused = true; | |
1370 | ||
1371 | goto _exit; | |
1372 | } | |
1373 | ||
a5ebca47 JA |
1374 | if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH || |
1375 | d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) | |
1376 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dst_dev_type); | |
1377 | else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | |
1378 | event = D40_TYPE_TO_EVENT(d40c->dma_cfg.src_dev_type); | |
1379 | else { | |
1380 | dev_err(&d40c->chan.dev->device, | |
1381 | "[%s] Unknown direction\n", __func__); | |
1382 | goto _exit; | |
1383 | } | |
1384 | status = d40_chan_has_events(d40c); | |
1385 | status = (status & D40_EVENTLINE_MASK(event)) >> | |
1386 | D40_EVENTLINE_POS(event); | |
1387 | ||
1388 | if (status != D40_DMA_RUN) | |
1389 | is_paused = true; | |
a5ebca47 JA |
1390 | _exit: |
1391 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1392 | return is_paused; | |
1393 | ||
1394 | } | |
1395 | ||
1396 | ||
8d318a50 LW |
1397 | static bool d40_tx_is_linked(struct d40_chan *d40c) |
1398 | { | |
1399 | bool is_link; | |
1400 | ||
1401 | if (d40c->log_num != D40_PHY_CHAN) | |
1402 | is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK; | |
1403 | else | |
1404 | is_link = readl(d40c->base->virtbase + D40_DREG_PCBASE + | |
1405 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
1406 | D40_CHAN_REG_SDLNK) & | |
1407 | D40_SREG_LNK_PHYS_LNK_MASK; | |
1408 | return is_link; | |
1409 | } | |
1410 | ||
1411 | static u32 d40_residue(struct d40_chan *d40c) | |
1412 | { | |
1413 | u32 num_elt; | |
1414 | ||
1415 | if (d40c->log_num != D40_PHY_CHAN) | |
508849ad | 1416 | num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK) |
8d318a50 LW |
1417 | >> D40_MEM_LCSP2_ECNT_POS; |
1418 | else | |
1419 | num_elt = (readl(d40c->base->virtbase + D40_DREG_PCBASE + | |
1420 | d40c->phy_chan->num * D40_DREG_PCDELTA + | |
1421 | D40_CHAN_REG_SDELT) & | |
508849ad LW |
1422 | D40_SREG_ELEM_PHY_ECNT_MASK) >> |
1423 | D40_SREG_ELEM_PHY_ECNT_POS; | |
8d318a50 LW |
1424 | return num_elt * (1 << d40c->dma_cfg.dst_info.data_width); |
1425 | } | |
1426 | ||
1427 | static int d40_resume(struct dma_chan *chan) | |
1428 | { | |
1429 | struct d40_chan *d40c = | |
1430 | container_of(chan, struct d40_chan, chan); | |
1431 | int res = 0; | |
1432 | unsigned long flags; | |
1433 | ||
1434 | spin_lock_irqsave(&d40c->lock, flags); | |
1435 | ||
0c32269d JA |
1436 | /* If bytes left to transfer or linked tx resume job */ |
1437 | if (d40_residue(d40c) || d40_tx_is_linked(d40c)) { | |
1438 | if (d40c->log_num != D40_PHY_CHAN) | |
8d318a50 | 1439 | d40_config_set_event(d40c, true); |
8d318a50 | 1440 | res = d40_channel_execute_command(d40c, D40_DMA_RUN); |
0c32269d | 1441 | } |
8d318a50 | 1442 | |
8d318a50 LW |
1443 | spin_unlock_irqrestore(&d40c->lock, flags); |
1444 | return res; | |
1445 | } | |
1446 | ||
1447 | static u32 stedma40_residue(struct dma_chan *chan) | |
1448 | { | |
1449 | struct d40_chan *d40c = | |
1450 | container_of(chan, struct d40_chan, chan); | |
1451 | u32 bytes_left; | |
1452 | unsigned long flags; | |
1453 | ||
1454 | spin_lock_irqsave(&d40c->lock, flags); | |
1455 | bytes_left = d40_residue(d40c); | |
1456 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1457 | ||
1458 | return bytes_left; | |
1459 | } | |
1460 | ||
1461 | /* Public DMA functions in addition to the DMA engine framework */ | |
1462 | ||
1463 | int stedma40_set_psize(struct dma_chan *chan, | |
1464 | int src_psize, | |
1465 | int dst_psize) | |
1466 | { | |
1467 | struct d40_chan *d40c = | |
1468 | container_of(chan, struct d40_chan, chan); | |
1469 | unsigned long flags; | |
1470 | ||
1471 | spin_lock_irqsave(&d40c->lock, flags); | |
1472 | ||
1473 | if (d40c->log_num != D40_PHY_CHAN) { | |
1474 | d40c->log_def.lcsp1 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | |
1475 | d40c->log_def.lcsp3 &= ~D40_MEM_LCSP1_SCFG_PSIZE_MASK; | |
508849ad LW |
1476 | d40c->log_def.lcsp1 |= src_psize << |
1477 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | |
1478 | d40c->log_def.lcsp3 |= dst_psize << | |
1479 | D40_MEM_LCSP1_SCFG_PSIZE_POS; | |
8d318a50 LW |
1480 | goto out; |
1481 | } | |
1482 | ||
1483 | if (src_psize == STEDMA40_PSIZE_PHY_1) | |
1484 | d40c->src_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | |
1485 | else { | |
1486 | d40c->src_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | |
1487 | d40c->src_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | |
1488 | D40_SREG_CFG_PSIZE_POS); | |
1489 | d40c->src_def_cfg |= src_psize << D40_SREG_CFG_PSIZE_POS; | |
1490 | } | |
1491 | ||
1492 | if (dst_psize == STEDMA40_PSIZE_PHY_1) | |
1493 | d40c->dst_def_cfg &= ~(1 << D40_SREG_CFG_PHY_PEN_POS); | |
1494 | else { | |
1495 | d40c->dst_def_cfg |= 1 << D40_SREG_CFG_PHY_PEN_POS; | |
1496 | d40c->dst_def_cfg &= ~(STEDMA40_PSIZE_PHY_16 << | |
1497 | D40_SREG_CFG_PSIZE_POS); | |
1498 | d40c->dst_def_cfg |= dst_psize << D40_SREG_CFG_PSIZE_POS; | |
1499 | } | |
1500 | out: | |
1501 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1502 | return 0; | |
1503 | } | |
1504 | EXPORT_SYMBOL(stedma40_set_psize); | |
1505 | ||
1506 | struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, | |
1507 | struct scatterlist *sgl_dst, | |
1508 | struct scatterlist *sgl_src, | |
1509 | unsigned int sgl_len, | |
2a614340 | 1510 | unsigned long dma_flags) |
8d318a50 LW |
1511 | { |
1512 | int res; | |
1513 | struct d40_desc *d40d; | |
1514 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | |
1515 | chan); | |
2a614340 | 1516 | unsigned long flags; |
8d318a50 | 1517 | |
0d0f6b8b JA |
1518 | if (d40c->phy_chan == NULL) { |
1519 | dev_err(&d40c->chan.dev->device, | |
1520 | "[%s] Unallocated channel.\n", __func__); | |
1521 | return ERR_PTR(-EINVAL); | |
1522 | } | |
1523 | ||
2a614340 | 1524 | spin_lock_irqsave(&d40c->lock, flags); |
8d318a50 LW |
1525 | d40d = d40_desc_get(d40c); |
1526 | ||
1527 | if (d40d == NULL) | |
1528 | goto err; | |
1529 | ||
8d318a50 | 1530 | d40d->lli_len = sgl_len; |
941b77a3 | 1531 | d40d->lli_tx_len = d40d->lli_len; |
2a614340 | 1532 | d40d->txd.flags = dma_flags; |
8d318a50 LW |
1533 | |
1534 | if (d40c->log_num != D40_PHY_CHAN) { | |
941b77a3 PF |
1535 | if (d40d->lli_len > d40c->base->plat_data->llis_per_log) |
1536 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | |
1537 | ||
8d318a50 LW |
1538 | if (sgl_len > 1) |
1539 | /* | |
1540 | * Check if there is space available in lcla. If not, | |
1541 | * split list into 1-length and run only in lcpa | |
1542 | * space. | |
1543 | */ | |
508849ad | 1544 | if (d40_lcla_id_get(d40c) != 0) |
941b77a3 | 1545 | d40d->lli_tx_len = 1; |
8d318a50 LW |
1546 | |
1547 | if (d40_pool_lli_alloc(d40d, sgl_len, true) < 0) { | |
1548 | dev_err(&d40c->chan.dev->device, | |
1549 | "[%s] Out of memory\n", __func__); | |
1550 | goto err; | |
1551 | } | |
1552 | ||
1553 | (void) d40_log_sg_to_lli(d40c->lcla.src_id, | |
1554 | sgl_src, | |
1555 | sgl_len, | |
1556 | d40d->lli_log.src, | |
1557 | d40c->log_def.lcsp1, | |
1558 | d40c->dma_cfg.src_info.data_width, | |
2a614340 | 1559 | dma_flags & DMA_PREP_INTERRUPT, |
941b77a3 | 1560 | d40d->lli_tx_len, |
8d318a50 LW |
1561 | d40c->base->plat_data->llis_per_log); |
1562 | ||
1563 | (void) d40_log_sg_to_lli(d40c->lcla.dst_id, | |
1564 | sgl_dst, | |
1565 | sgl_len, | |
1566 | d40d->lli_log.dst, | |
1567 | d40c->log_def.lcsp3, | |
1568 | d40c->dma_cfg.dst_info.data_width, | |
2a614340 | 1569 | dma_flags & DMA_PREP_INTERRUPT, |
941b77a3 | 1570 | d40d->lli_tx_len, |
8d318a50 LW |
1571 | d40c->base->plat_data->llis_per_log); |
1572 | ||
1573 | ||
1574 | } else { | |
1575 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | |
1576 | dev_err(&d40c->chan.dev->device, | |
1577 | "[%s] Out of memory\n", __func__); | |
1578 | goto err; | |
1579 | } | |
1580 | ||
1581 | res = d40_phy_sg_to_lli(sgl_src, | |
1582 | sgl_len, | |
1583 | 0, | |
1584 | d40d->lli_phy.src, | |
1585 | d40d->lli_phy.src_addr, | |
1586 | d40c->src_def_cfg, | |
1587 | d40c->dma_cfg.src_info.data_width, | |
1588 | d40c->dma_cfg.src_info.psize, | |
1589 | true); | |
1590 | ||
1591 | if (res < 0) | |
1592 | goto err; | |
1593 | ||
1594 | res = d40_phy_sg_to_lli(sgl_dst, | |
1595 | sgl_len, | |
1596 | 0, | |
1597 | d40d->lli_phy.dst, | |
1598 | d40d->lli_phy.dst_addr, | |
1599 | d40c->dst_def_cfg, | |
1600 | d40c->dma_cfg.dst_info.data_width, | |
1601 | d40c->dma_cfg.dst_info.psize, | |
1602 | true); | |
1603 | ||
1604 | if (res < 0) | |
1605 | goto err; | |
1606 | ||
1607 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | |
1608 | d40d->lli_pool.size, DMA_TO_DEVICE); | |
1609 | } | |
1610 | ||
1611 | dma_async_tx_descriptor_init(&d40d->txd, chan); | |
1612 | ||
1613 | d40d->txd.tx_submit = d40_tx_submit; | |
1614 | ||
2a614340 | 1615 | spin_unlock_irqrestore(&d40c->lock, flags); |
8d318a50 LW |
1616 | |
1617 | return &d40d->txd; | |
1618 | err: | |
2a614340 | 1619 | spin_unlock_irqrestore(&d40c->lock, flags); |
8d318a50 LW |
1620 | return NULL; |
1621 | } | |
1622 | EXPORT_SYMBOL(stedma40_memcpy_sg); | |
1623 | ||
1624 | bool stedma40_filter(struct dma_chan *chan, void *data) | |
1625 | { | |
1626 | struct stedma40_chan_cfg *info = data; | |
1627 | struct d40_chan *d40c = | |
1628 | container_of(chan, struct d40_chan, chan); | |
1629 | int err; | |
1630 | ||
1631 | if (data) { | |
1632 | err = d40_validate_conf(d40c, info); | |
1633 | if (!err) | |
1634 | d40c->dma_cfg = *info; | |
1635 | } else | |
1636 | err = d40_config_memcpy(d40c); | |
1637 | ||
1638 | return err == 0; | |
1639 | } | |
1640 | EXPORT_SYMBOL(stedma40_filter); | |
1641 | ||
1642 | /* DMA ENGINE functions */ | |
1643 | static int d40_alloc_chan_resources(struct dma_chan *chan) | |
1644 | { | |
1645 | int err; | |
1646 | unsigned long flags; | |
1647 | struct d40_chan *d40c = | |
1648 | container_of(chan, struct d40_chan, chan); | |
ef1872ec | 1649 | bool is_free_phy; |
8d318a50 LW |
1650 | spin_lock_irqsave(&d40c->lock, flags); |
1651 | ||
1652 | d40c->completed = chan->cookie = 1; | |
1653 | ||
1654 | /* | |
1655 | * If no dma configuration is set (channel_type == 0) | |
ef1872ec | 1656 | * use default configuration (memcpy) |
8d318a50 LW |
1657 | */ |
1658 | if (d40c->dma_cfg.channel_type == 0) { | |
1659 | err = d40_config_memcpy(d40c); | |
ff0b12ba JA |
1660 | if (err) { |
1661 | dev_err(&d40c->chan.dev->device, | |
1662 | "[%s] Failed to configure memcpy channel\n", | |
1663 | __func__); | |
1664 | goto fail; | |
1665 | } | |
8d318a50 | 1666 | } |
ef1872ec | 1667 | is_free_phy = (d40c->phy_chan == NULL); |
8d318a50 LW |
1668 | |
1669 | err = d40_allocate_channel(d40c); | |
1670 | if (err) { | |
1671 | dev_err(&d40c->chan.dev->device, | |
1672 | "[%s] Failed to allocate channel\n", __func__); | |
ff0b12ba | 1673 | goto fail; |
8d318a50 LW |
1674 | } |
1675 | ||
ef1872ec LW |
1676 | /* Fill in basic CFG register values */ |
1677 | d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg, | |
1678 | &d40c->dst_def_cfg, d40c->log_num != D40_PHY_CHAN); | |
1679 | ||
1680 | if (d40c->log_num != D40_PHY_CHAN) { | |
1681 | d40_log_cfg(&d40c->dma_cfg, | |
1682 | &d40c->log_def.lcsp1, &d40c->log_def.lcsp3); | |
1683 | ||
1684 | if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) | |
1685 | d40c->lcpa = d40c->base->lcpa_base + | |
1686 | d40c->dma_cfg.src_dev_type * D40_LCPA_CHAN_SIZE; | |
1687 | else | |
1688 | d40c->lcpa = d40c->base->lcpa_base + | |
1689 | d40c->dma_cfg.dst_dev_type * | |
1690 | D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA; | |
1691 | } | |
1692 | ||
1693 | /* | |
1694 | * Only write channel configuration to the DMA if the physical | |
1695 | * resource is free. In case of multiple logical channels | |
1696 | * on the same physical resource, only the first write is necessary. | |
1697 | */ | |
1698 | if (is_free_phy) { | |
1699 | err = d40_config_write(d40c); | |
1700 | if (err) { | |
1701 | dev_err(&d40c->chan.dev->device, | |
1702 | "[%s] Failed to configure channel\n", | |
1703 | __func__); | |
1704 | } | |
8d318a50 | 1705 | } |
ff0b12ba | 1706 | fail: |
8d318a50 | 1707 | spin_unlock_irqrestore(&d40c->lock, flags); |
ff0b12ba | 1708 | return err; |
8d318a50 LW |
1709 | } |
1710 | ||
1711 | static void d40_free_chan_resources(struct dma_chan *chan) | |
1712 | { | |
1713 | struct d40_chan *d40c = | |
1714 | container_of(chan, struct d40_chan, chan); | |
1715 | int err; | |
1716 | unsigned long flags; | |
1717 | ||
0d0f6b8b JA |
1718 | if (d40c->phy_chan == NULL) { |
1719 | dev_err(&d40c->chan.dev->device, | |
1720 | "[%s] Cannot free unallocated channel\n", __func__); | |
1721 | return; | |
1722 | } | |
1723 | ||
1724 | ||
8d318a50 LW |
1725 | spin_lock_irqsave(&d40c->lock, flags); |
1726 | ||
1727 | err = d40_free_dma(d40c); | |
1728 | ||
1729 | if (err) | |
1730 | dev_err(&d40c->chan.dev->device, | |
1731 | "[%s] Failed to free channel\n", __func__); | |
1732 | spin_unlock_irqrestore(&d40c->lock, flags); | |
1733 | } | |
1734 | ||
1735 | static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan, | |
1736 | dma_addr_t dst, | |
1737 | dma_addr_t src, | |
1738 | size_t size, | |
2a614340 | 1739 | unsigned long dma_flags) |
8d318a50 LW |
1740 | { |
1741 | struct d40_desc *d40d; | |
1742 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | |
1743 | chan); | |
2a614340 | 1744 | unsigned long flags; |
8d318a50 LW |
1745 | int err = 0; |
1746 | ||
0d0f6b8b JA |
1747 | if (d40c->phy_chan == NULL) { |
1748 | dev_err(&d40c->chan.dev->device, | |
1749 | "[%s] Channel is not allocated.\n", __func__); | |
1750 | return ERR_PTR(-EINVAL); | |
1751 | } | |
1752 | ||
2a614340 | 1753 | spin_lock_irqsave(&d40c->lock, flags); |
8d318a50 LW |
1754 | d40d = d40_desc_get(d40c); |
1755 | ||
1756 | if (d40d == NULL) { | |
1757 | dev_err(&d40c->chan.dev->device, | |
1758 | "[%s] Descriptor is NULL\n", __func__); | |
1759 | goto err; | |
1760 | } | |
1761 | ||
2a614340 | 1762 | d40d->txd.flags = dma_flags; |
8d318a50 LW |
1763 | |
1764 | dma_async_tx_descriptor_init(&d40d->txd, chan); | |
1765 | ||
1766 | d40d->txd.tx_submit = d40_tx_submit; | |
1767 | ||
1768 | if (d40c->log_num != D40_PHY_CHAN) { | |
1769 | ||
1770 | if (d40_pool_lli_alloc(d40d, 1, true) < 0) { | |
1771 | dev_err(&d40c->chan.dev->device, | |
1772 | "[%s] Out of memory\n", __func__); | |
1773 | goto err; | |
1774 | } | |
1775 | d40d->lli_len = 1; | |
941b77a3 | 1776 | d40d->lli_tx_len = 1; |
8d318a50 LW |
1777 | |
1778 | d40_log_fill_lli(d40d->lli_log.src, | |
1779 | src, | |
1780 | size, | |
1781 | 0, | |
1782 | d40c->log_def.lcsp1, | |
1783 | d40c->dma_cfg.src_info.data_width, | |
2123a61e | 1784 | false, true); |
8d318a50 LW |
1785 | |
1786 | d40_log_fill_lli(d40d->lli_log.dst, | |
1787 | dst, | |
1788 | size, | |
1789 | 0, | |
1790 | d40c->log_def.lcsp3, | |
1791 | d40c->dma_cfg.dst_info.data_width, | |
1792 | true, true); | |
1793 | ||
1794 | } else { | |
1795 | ||
1796 | if (d40_pool_lli_alloc(d40d, 1, false) < 0) { | |
1797 | dev_err(&d40c->chan.dev->device, | |
1798 | "[%s] Out of memory\n", __func__); | |
1799 | goto err; | |
1800 | } | |
1801 | ||
1802 | err = d40_phy_fill_lli(d40d->lli_phy.src, | |
1803 | src, | |
1804 | size, | |
1805 | d40c->dma_cfg.src_info.psize, | |
1806 | 0, | |
1807 | d40c->src_def_cfg, | |
1808 | true, | |
1809 | d40c->dma_cfg.src_info.data_width, | |
1810 | false); | |
1811 | if (err) | |
1812 | goto err_fill_lli; | |
1813 | ||
1814 | err = d40_phy_fill_lli(d40d->lli_phy.dst, | |
1815 | dst, | |
1816 | size, | |
1817 | d40c->dma_cfg.dst_info.psize, | |
1818 | 0, | |
1819 | d40c->dst_def_cfg, | |
1820 | true, | |
1821 | d40c->dma_cfg.dst_info.data_width, | |
1822 | false); | |
1823 | ||
1824 | if (err) | |
1825 | goto err_fill_lli; | |
1826 | ||
1827 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | |
1828 | d40d->lli_pool.size, DMA_TO_DEVICE); | |
1829 | } | |
1830 | ||
2a614340 | 1831 | spin_unlock_irqrestore(&d40c->lock, flags); |
8d318a50 LW |
1832 | return &d40d->txd; |
1833 | ||
1834 | err_fill_lli: | |
1835 | dev_err(&d40c->chan.dev->device, | |
1836 | "[%s] Failed filling in PHY LLI\n", __func__); | |
1837 | d40_pool_lli_free(d40d); | |
1838 | err: | |
2a614340 | 1839 | spin_unlock_irqrestore(&d40c->lock, flags); |
8d318a50 LW |
1840 | return NULL; |
1841 | } | |
1842 | ||
1843 | static int d40_prep_slave_sg_log(struct d40_desc *d40d, | |
1844 | struct d40_chan *d40c, | |
1845 | struct scatterlist *sgl, | |
1846 | unsigned int sg_len, | |
1847 | enum dma_data_direction direction, | |
2a614340 | 1848 | unsigned long dma_flags) |
8d318a50 LW |
1849 | { |
1850 | dma_addr_t dev_addr = 0; | |
1851 | int total_size; | |
8d318a50 LW |
1852 | |
1853 | if (d40_pool_lli_alloc(d40d, sg_len, true) < 0) { | |
1854 | dev_err(&d40c->chan.dev->device, | |
1855 | "[%s] Out of memory\n", __func__); | |
1856 | return -ENOMEM; | |
1857 | } | |
1858 | ||
1859 | d40d->lli_len = sg_len; | |
941b77a3 PF |
1860 | if (d40d->lli_len <= d40c->base->plat_data->llis_per_log) |
1861 | d40d->lli_tx_len = d40d->lli_len; | |
1862 | else | |
1863 | d40d->lli_tx_len = d40c->base->plat_data->llis_per_log; | |
8d318a50 LW |
1864 | |
1865 | if (sg_len > 1) | |
1866 | /* | |
1867 | * Check if there is space available in lcla. | |
1868 | * If not, split list into 1-length and run only | |
1869 | * in lcpa space. | |
1870 | */ | |
508849ad | 1871 | if (d40_lcla_id_get(d40c) != 0) |
941b77a3 | 1872 | d40d->lli_tx_len = 1; |
8d318a50 | 1873 | |
2a614340 | 1874 | if (direction == DMA_FROM_DEVICE) |
8d318a50 | 1875 | dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; |
2a614340 | 1876 | else if (direction == DMA_TO_DEVICE) |
8d318a50 | 1877 | dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; |
2a614340 | 1878 | else |
8d318a50 | 1879 | return -EINVAL; |
2a614340 JA |
1880 | |
1881 | total_size = d40_log_sg_to_dev(&d40c->lcla, | |
1882 | sgl, sg_len, | |
1883 | &d40d->lli_log, | |
1884 | &d40c->log_def, | |
1885 | d40c->dma_cfg.src_info.data_width, | |
1886 | d40c->dma_cfg.dst_info.data_width, | |
1887 | direction, | |
1888 | dma_flags & DMA_PREP_INTERRUPT, | |
1889 | dev_addr, d40d->lli_tx_len, | |
1890 | d40c->base->plat_data->llis_per_log); | |
1891 | ||
8d318a50 LW |
1892 | if (total_size < 0) |
1893 | return -EINVAL; | |
1894 | ||
1895 | return 0; | |
1896 | } | |
1897 | ||
1898 | static int d40_prep_slave_sg_phy(struct d40_desc *d40d, | |
1899 | struct d40_chan *d40c, | |
1900 | struct scatterlist *sgl, | |
1901 | unsigned int sgl_len, | |
1902 | enum dma_data_direction direction, | |
2a614340 | 1903 | unsigned long dma_flags) |
8d318a50 LW |
1904 | { |
1905 | dma_addr_t src_dev_addr; | |
1906 | dma_addr_t dst_dev_addr; | |
1907 | int res; | |
1908 | ||
1909 | if (d40_pool_lli_alloc(d40d, sgl_len, false) < 0) { | |
1910 | dev_err(&d40c->chan.dev->device, | |
1911 | "[%s] Out of memory\n", __func__); | |
1912 | return -ENOMEM; | |
1913 | } | |
1914 | ||
1915 | d40d->lli_len = sgl_len; | |
941b77a3 | 1916 | d40d->lli_tx_len = sgl_len; |
8d318a50 LW |
1917 | |
1918 | if (direction == DMA_FROM_DEVICE) { | |
1919 | dst_dev_addr = 0; | |
1920 | src_dev_addr = d40c->base->plat_data->dev_rx[d40c->dma_cfg.src_dev_type]; | |
1921 | } else if (direction == DMA_TO_DEVICE) { | |
1922 | dst_dev_addr = d40c->base->plat_data->dev_tx[d40c->dma_cfg.dst_dev_type]; | |
1923 | src_dev_addr = 0; | |
1924 | } else | |
1925 | return -EINVAL; | |
1926 | ||
1927 | res = d40_phy_sg_to_lli(sgl, | |
1928 | sgl_len, | |
1929 | src_dev_addr, | |
1930 | d40d->lli_phy.src, | |
1931 | d40d->lli_phy.src_addr, | |
1932 | d40c->src_def_cfg, | |
1933 | d40c->dma_cfg.src_info.data_width, | |
1934 | d40c->dma_cfg.src_info.psize, | |
1935 | true); | |
1936 | if (res < 0) | |
1937 | return res; | |
1938 | ||
1939 | res = d40_phy_sg_to_lli(sgl, | |
1940 | sgl_len, | |
1941 | dst_dev_addr, | |
1942 | d40d->lli_phy.dst, | |
1943 | d40d->lli_phy.dst_addr, | |
1944 | d40c->dst_def_cfg, | |
1945 | d40c->dma_cfg.dst_info.data_width, | |
1946 | d40c->dma_cfg.dst_info.psize, | |
1947 | true); | |
1948 | if (res < 0) | |
1949 | return res; | |
1950 | ||
1951 | (void) dma_map_single(d40c->base->dev, d40d->lli_phy.src, | |
1952 | d40d->lli_pool.size, DMA_TO_DEVICE); | |
1953 | return 0; | |
1954 | } | |
1955 | ||
1956 | static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, | |
1957 | struct scatterlist *sgl, | |
1958 | unsigned int sg_len, | |
1959 | enum dma_data_direction direction, | |
2a614340 | 1960 | unsigned long dma_flags) |
8d318a50 LW |
1961 | { |
1962 | struct d40_desc *d40d; | |
1963 | struct d40_chan *d40c = container_of(chan, struct d40_chan, | |
1964 | chan); | |
2a614340 | 1965 | unsigned long flags; |
8d318a50 LW |
1966 | int err; |
1967 | ||
0d0f6b8b JA |
1968 | if (d40c->phy_chan == NULL) { |
1969 | dev_err(&d40c->chan.dev->device, | |
1970 | "[%s] Cannot prepare unallocated channel\n", __func__); | |
1971 | return ERR_PTR(-EINVAL); | |
1972 | } | |
1973 | ||
8d318a50 LW |
1974 | if (d40c->dma_cfg.pre_transfer) |
1975 | d40c->dma_cfg.pre_transfer(chan, | |
1976 | d40c->dma_cfg.pre_transfer_data, | |
1977 | sg_dma_len(sgl)); | |
1978 | ||
2a614340 | 1979 | spin_lock_irqsave(&d40c->lock, flags); |
8d318a50 | 1980 | d40d = d40_desc_get(d40c); |
2a614340 | 1981 | spin_unlock_irqrestore(&d40c->lock, flags); |
8d318a50 LW |
1982 | |
1983 | if (d40d == NULL) | |
1984 | return NULL; | |
1985 | ||
8d318a50 LW |
1986 | if (d40c->log_num != D40_PHY_CHAN) |
1987 | err = d40_prep_slave_sg_log(d40d, d40c, sgl, sg_len, | |
2a614340 | 1988 | direction, dma_flags); |
8d318a50 LW |
1989 | else |
1990 | err = d40_prep_slave_sg_phy(d40d, d40c, sgl, sg_len, | |
2a614340 | 1991 | direction, dma_flags); |
8d318a50 LW |
1992 | if (err) { |
1993 | dev_err(&d40c->chan.dev->device, | |
1994 | "[%s] Failed to prepare %s slave sg job: %d\n", | |
1995 | __func__, | |
1996 | d40c->log_num != D40_PHY_CHAN ? "log" : "phy", err); | |
1997 | return NULL; | |
1998 | } | |
1999 | ||
2a614340 | 2000 | d40d->txd.flags = dma_flags; |
8d318a50 LW |
2001 | |
2002 | dma_async_tx_descriptor_init(&d40d->txd, chan); | |
2003 | ||
2004 | d40d->txd.tx_submit = d40_tx_submit; | |
2005 | ||
2006 | return &d40d->txd; | |
2007 | } | |
2008 | ||
2009 | static enum dma_status d40_tx_status(struct dma_chan *chan, | |
2010 | dma_cookie_t cookie, | |
2011 | struct dma_tx_state *txstate) | |
2012 | { | |
2013 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2014 | dma_cookie_t last_used; | |
2015 | dma_cookie_t last_complete; | |
2016 | int ret; | |
2017 | ||
0d0f6b8b JA |
2018 | if (d40c->phy_chan == NULL) { |
2019 | dev_err(&d40c->chan.dev->device, | |
2020 | "[%s] Cannot read status of unallocated channel\n", | |
2021 | __func__); | |
2022 | return -EINVAL; | |
2023 | } | |
2024 | ||
8d318a50 LW |
2025 | last_complete = d40c->completed; |
2026 | last_used = chan->cookie; | |
2027 | ||
a5ebca47 JA |
2028 | if (d40_is_paused(d40c)) |
2029 | ret = DMA_PAUSED; | |
2030 | else | |
2031 | ret = dma_async_is_complete(cookie, last_complete, last_used); | |
8d318a50 | 2032 | |
a5ebca47 JA |
2033 | dma_set_tx_state(txstate, last_complete, last_used, |
2034 | stedma40_residue(chan)); | |
8d318a50 LW |
2035 | |
2036 | return ret; | |
2037 | } | |
2038 | ||
2039 | static void d40_issue_pending(struct dma_chan *chan) | |
2040 | { | |
2041 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2042 | unsigned long flags; | |
2043 | ||
0d0f6b8b JA |
2044 | if (d40c->phy_chan == NULL) { |
2045 | dev_err(&d40c->chan.dev->device, | |
2046 | "[%s] Channel is not allocated!\n", __func__); | |
2047 | return; | |
2048 | } | |
2049 | ||
8d318a50 LW |
2050 | spin_lock_irqsave(&d40c->lock, flags); |
2051 | ||
2052 | /* Busy means that pending jobs are already being processed */ | |
2053 | if (!d40c->busy) | |
2054 | (void) d40_queue_start(d40c); | |
2055 | ||
2056 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2057 | } | |
2058 | ||
05827630 LW |
2059 | static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, |
2060 | unsigned long arg) | |
8d318a50 LW |
2061 | { |
2062 | unsigned long flags; | |
2063 | struct d40_chan *d40c = container_of(chan, struct d40_chan, chan); | |
2064 | ||
0d0f6b8b JA |
2065 | if (d40c->phy_chan == NULL) { |
2066 | dev_err(&d40c->chan.dev->device, | |
2067 | "[%s] Channel is not allocated!\n", __func__); | |
2068 | return -EINVAL; | |
2069 | } | |
2070 | ||
8d318a50 LW |
2071 | switch (cmd) { |
2072 | case DMA_TERMINATE_ALL: | |
2073 | spin_lock_irqsave(&d40c->lock, flags); | |
2074 | d40_term_all(d40c); | |
2075 | spin_unlock_irqrestore(&d40c->lock, flags); | |
2076 | return 0; | |
2077 | case DMA_PAUSE: | |
2078 | return d40_pause(chan); | |
2079 | case DMA_RESUME: | |
2080 | return d40_resume(chan); | |
2081 | } | |
2082 | ||
2083 | /* Other commands are unimplemented */ | |
2084 | return -ENXIO; | |
2085 | } | |
2086 | ||
2087 | /* Initialization functions */ | |
2088 | ||
2089 | static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma, | |
2090 | struct d40_chan *chans, int offset, | |
2091 | int num_chans) | |
2092 | { | |
2093 | int i = 0; | |
2094 | struct d40_chan *d40c; | |
2095 | ||
2096 | INIT_LIST_HEAD(&dma->channels); | |
2097 | ||
2098 | for (i = offset; i < offset + num_chans; i++) { | |
2099 | d40c = &chans[i]; | |
2100 | d40c->base = base; | |
2101 | d40c->chan.device = dma; | |
2102 | ||
2103 | /* Invalidate lcla element */ | |
2104 | d40c->lcla.src_id = -1; | |
2105 | d40c->lcla.dst_id = -1; | |
2106 | ||
2107 | spin_lock_init(&d40c->lock); | |
2108 | ||
2109 | d40c->log_num = D40_PHY_CHAN; | |
2110 | ||
8d318a50 LW |
2111 | INIT_LIST_HEAD(&d40c->active); |
2112 | INIT_LIST_HEAD(&d40c->queue); | |
2113 | INIT_LIST_HEAD(&d40c->client); | |
2114 | ||
8d318a50 LW |
2115 | tasklet_init(&d40c->tasklet, dma_tasklet, |
2116 | (unsigned long) d40c); | |
2117 | ||
2118 | list_add_tail(&d40c->chan.device_node, | |
2119 | &dma->channels); | |
2120 | } | |
2121 | } | |
2122 | ||
2123 | static int __init d40_dmaengine_init(struct d40_base *base, | |
2124 | int num_reserved_chans) | |
2125 | { | |
2126 | int err ; | |
2127 | ||
2128 | d40_chan_init(base, &base->dma_slave, base->log_chans, | |
2129 | 0, base->num_log_chans); | |
2130 | ||
2131 | dma_cap_zero(base->dma_slave.cap_mask); | |
2132 | dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask); | |
2133 | ||
2134 | base->dma_slave.device_alloc_chan_resources = d40_alloc_chan_resources; | |
2135 | base->dma_slave.device_free_chan_resources = d40_free_chan_resources; | |
2136 | base->dma_slave.device_prep_dma_memcpy = d40_prep_memcpy; | |
2137 | base->dma_slave.device_prep_slave_sg = d40_prep_slave_sg; | |
2138 | base->dma_slave.device_tx_status = d40_tx_status; | |
2139 | base->dma_slave.device_issue_pending = d40_issue_pending; | |
2140 | base->dma_slave.device_control = d40_control; | |
2141 | base->dma_slave.dev = base->dev; | |
2142 | ||
2143 | err = dma_async_device_register(&base->dma_slave); | |
2144 | ||
2145 | if (err) { | |
2146 | dev_err(base->dev, | |
2147 | "[%s] Failed to register slave channels\n", | |
2148 | __func__); | |
2149 | goto failure1; | |
2150 | } | |
2151 | ||
2152 | d40_chan_init(base, &base->dma_memcpy, base->log_chans, | |
2153 | base->num_log_chans, base->plat_data->memcpy_len); | |
2154 | ||
2155 | dma_cap_zero(base->dma_memcpy.cap_mask); | |
2156 | dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask); | |
2157 | ||
2158 | base->dma_memcpy.device_alloc_chan_resources = d40_alloc_chan_resources; | |
2159 | base->dma_memcpy.device_free_chan_resources = d40_free_chan_resources; | |
2160 | base->dma_memcpy.device_prep_dma_memcpy = d40_prep_memcpy; | |
2161 | base->dma_memcpy.device_prep_slave_sg = d40_prep_slave_sg; | |
2162 | base->dma_memcpy.device_tx_status = d40_tx_status; | |
2163 | base->dma_memcpy.device_issue_pending = d40_issue_pending; | |
2164 | base->dma_memcpy.device_control = d40_control; | |
2165 | base->dma_memcpy.dev = base->dev; | |
2166 | /* | |
2167 | * This controller can only access address at even | |
2168 | * 32bit boundaries, i.e. 2^2 | |
2169 | */ | |
2170 | base->dma_memcpy.copy_align = 2; | |
2171 | ||
2172 | err = dma_async_device_register(&base->dma_memcpy); | |
2173 | ||
2174 | if (err) { | |
2175 | dev_err(base->dev, | |
2176 | "[%s] Failed to regsiter memcpy only channels\n", | |
2177 | __func__); | |
2178 | goto failure2; | |
2179 | } | |
2180 | ||
2181 | d40_chan_init(base, &base->dma_both, base->phy_chans, | |
2182 | 0, num_reserved_chans); | |
2183 | ||
2184 | dma_cap_zero(base->dma_both.cap_mask); | |
2185 | dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask); | |
2186 | dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask); | |
2187 | ||
2188 | base->dma_both.device_alloc_chan_resources = d40_alloc_chan_resources; | |
2189 | base->dma_both.device_free_chan_resources = d40_free_chan_resources; | |
2190 | base->dma_both.device_prep_dma_memcpy = d40_prep_memcpy; | |
2191 | base->dma_both.device_prep_slave_sg = d40_prep_slave_sg; | |
2192 | base->dma_both.device_tx_status = d40_tx_status; | |
2193 | base->dma_both.device_issue_pending = d40_issue_pending; | |
2194 | base->dma_both.device_control = d40_control; | |
2195 | base->dma_both.dev = base->dev; | |
2196 | base->dma_both.copy_align = 2; | |
2197 | err = dma_async_device_register(&base->dma_both); | |
2198 | ||
2199 | if (err) { | |
2200 | dev_err(base->dev, | |
2201 | "[%s] Failed to register logical and physical capable channels\n", | |
2202 | __func__); | |
2203 | goto failure3; | |
2204 | } | |
2205 | return 0; | |
2206 | failure3: | |
2207 | dma_async_device_unregister(&base->dma_memcpy); | |
2208 | failure2: | |
2209 | dma_async_device_unregister(&base->dma_slave); | |
2210 | failure1: | |
2211 | return err; | |
2212 | } | |
2213 | ||
2214 | /* Initialization functions. */ | |
2215 | ||
2216 | static int __init d40_phy_res_init(struct d40_base *base) | |
2217 | { | |
2218 | int i; | |
2219 | int num_phy_chans_avail = 0; | |
2220 | u32 val[2]; | |
2221 | int odd_even_bit = -2; | |
2222 | ||
2223 | val[0] = readl(base->virtbase + D40_DREG_PRSME); | |
2224 | val[1] = readl(base->virtbase + D40_DREG_PRSMO); | |
2225 | ||
2226 | for (i = 0; i < base->num_phy_chans; i++) { | |
2227 | base->phy_res[i].num = i; | |
2228 | odd_even_bit += 2 * ((i % 2) == 0); | |
2229 | if (((val[i % 2] >> odd_even_bit) & 3) == 1) { | |
2230 | /* Mark security only channels as occupied */ | |
2231 | base->phy_res[i].allocated_src = D40_ALLOC_PHY; | |
2232 | base->phy_res[i].allocated_dst = D40_ALLOC_PHY; | |
2233 | } else { | |
2234 | base->phy_res[i].allocated_src = D40_ALLOC_FREE; | |
2235 | base->phy_res[i].allocated_dst = D40_ALLOC_FREE; | |
2236 | num_phy_chans_avail++; | |
2237 | } | |
2238 | spin_lock_init(&base->phy_res[i].lock); | |
2239 | } | |
2240 | dev_info(base->dev, "%d of %d physical DMA channels available\n", | |
2241 | num_phy_chans_avail, base->num_phy_chans); | |
2242 | ||
2243 | /* Verify settings extended vs standard */ | |
2244 | val[0] = readl(base->virtbase + D40_DREG_PRTYP); | |
2245 | ||
2246 | for (i = 0; i < base->num_phy_chans; i++) { | |
2247 | ||
2248 | if (base->phy_res[i].allocated_src == D40_ALLOC_FREE && | |
2249 | (val[0] & 0x3) != 1) | |
2250 | dev_info(base->dev, | |
2251 | "[%s] INFO: channel %d is misconfigured (%d)\n", | |
2252 | __func__, i, val[0] & 0x3); | |
2253 | ||
2254 | val[0] = val[0] >> 2; | |
2255 | } | |
2256 | ||
2257 | return num_phy_chans_avail; | |
2258 | } | |
2259 | ||
2260 | static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev) | |
2261 | { | |
2262 | static const struct d40_reg_val dma_id_regs[] = { | |
2263 | /* Peripheral Id */ | |
2264 | { .reg = D40_DREG_PERIPHID0, .val = 0x0040}, | |
2265 | { .reg = D40_DREG_PERIPHID1, .val = 0x0000}, | |
2266 | /* | |
2267 | * D40_DREG_PERIPHID2 Depends on HW revision: | |
2268 | * MOP500/HREF ED has 0x0008, | |
2269 | * ? has 0x0018, | |
2270 | * HREF V1 has 0x0028 | |
2271 | */ | |
2272 | { .reg = D40_DREG_PERIPHID3, .val = 0x0000}, | |
2273 | ||
2274 | /* PCell Id */ | |
2275 | { .reg = D40_DREG_CELLID0, .val = 0x000d}, | |
2276 | { .reg = D40_DREG_CELLID1, .val = 0x00f0}, | |
2277 | { .reg = D40_DREG_CELLID2, .val = 0x0005}, | |
2278 | { .reg = D40_DREG_CELLID3, .val = 0x00b1} | |
2279 | }; | |
2280 | struct stedma40_platform_data *plat_data; | |
2281 | struct clk *clk = NULL; | |
2282 | void __iomem *virtbase = NULL; | |
2283 | struct resource *res = NULL; | |
2284 | struct d40_base *base = NULL; | |
2285 | int num_log_chans = 0; | |
2286 | int num_phy_chans; | |
2287 | int i; | |
2288 | ||
2289 | clk = clk_get(&pdev->dev, NULL); | |
2290 | ||
2291 | if (IS_ERR(clk)) { | |
2292 | dev_err(&pdev->dev, "[%s] No matching clock found\n", | |
2293 | __func__); | |
2294 | goto failure; | |
2295 | } | |
2296 | ||
2297 | clk_enable(clk); | |
2298 | ||
2299 | /* Get IO for DMAC base address */ | |
2300 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base"); | |
2301 | if (!res) | |
2302 | goto failure; | |
2303 | ||
2304 | if (request_mem_region(res->start, resource_size(res), | |
2305 | D40_NAME " I/O base") == NULL) | |
2306 | goto failure; | |
2307 | ||
2308 | virtbase = ioremap(res->start, resource_size(res)); | |
2309 | if (!virtbase) | |
2310 | goto failure; | |
2311 | ||
2312 | /* HW version check */ | |
2313 | for (i = 0; i < ARRAY_SIZE(dma_id_regs); i++) { | |
2314 | if (dma_id_regs[i].val != | |
2315 | readl(virtbase + dma_id_regs[i].reg)) { | |
2316 | dev_err(&pdev->dev, | |
2317 | "[%s] Unknown hardware! Expected 0x%x at 0x%x but got 0x%x\n", | |
2318 | __func__, | |
2319 | dma_id_regs[i].val, | |
2320 | dma_id_regs[i].reg, | |
2321 | readl(virtbase + dma_id_regs[i].reg)); | |
2322 | goto failure; | |
2323 | } | |
2324 | } | |
2325 | ||
2326 | i = readl(virtbase + D40_DREG_PERIPHID2); | |
2327 | ||
2328 | if ((i & 0xf) != D40_PERIPHID2_DESIGNER) { | |
2329 | dev_err(&pdev->dev, | |
2330 | "[%s] Unknown designer! Got %x wanted %x\n", | |
2331 | __func__, i & 0xf, D40_PERIPHID2_DESIGNER); | |
2332 | goto failure; | |
2333 | } | |
2334 | ||
2335 | /* The number of physical channels on this HW */ | |
2336 | num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4; | |
2337 | ||
2338 | dev_info(&pdev->dev, "hardware revision: %d @ 0x%x\n", | |
2339 | (i >> 4) & 0xf, res->start); | |
2340 | ||
2341 | plat_data = pdev->dev.platform_data; | |
2342 | ||
2343 | /* Count the number of logical channels in use */ | |
2344 | for (i = 0; i < plat_data->dev_len; i++) | |
2345 | if (plat_data->dev_rx[i] != 0) | |
2346 | num_log_chans++; | |
2347 | ||
2348 | for (i = 0; i < plat_data->dev_len; i++) | |
2349 | if (plat_data->dev_tx[i] != 0) | |
2350 | num_log_chans++; | |
2351 | ||
2352 | base = kzalloc(ALIGN(sizeof(struct d40_base), 4) + | |
2353 | (num_phy_chans + num_log_chans + plat_data->memcpy_len) * | |
2354 | sizeof(struct d40_chan), GFP_KERNEL); | |
2355 | ||
2356 | if (base == NULL) { | |
2357 | dev_err(&pdev->dev, "[%s] Out of memory\n", __func__); | |
2358 | goto failure; | |
2359 | } | |
2360 | ||
2361 | base->clk = clk; | |
2362 | base->num_phy_chans = num_phy_chans; | |
2363 | base->num_log_chans = num_log_chans; | |
2364 | base->phy_start = res->start; | |
2365 | base->phy_size = resource_size(res); | |
2366 | base->virtbase = virtbase; | |
2367 | base->plat_data = plat_data; | |
2368 | base->dev = &pdev->dev; | |
2369 | base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4); | |
2370 | base->log_chans = &base->phy_chans[num_phy_chans]; | |
2371 | ||
2372 | base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res), | |
2373 | GFP_KERNEL); | |
2374 | if (!base->phy_res) | |
2375 | goto failure; | |
2376 | ||
2377 | base->lookup_phy_chans = kzalloc(num_phy_chans * | |
2378 | sizeof(struct d40_chan *), | |
2379 | GFP_KERNEL); | |
2380 | if (!base->lookup_phy_chans) | |
2381 | goto failure; | |
2382 | ||
2383 | if (num_log_chans + plat_data->memcpy_len) { | |
2384 | /* | |
2385 | * The max number of logical channels are event lines for all | |
2386 | * src devices and dst devices | |
2387 | */ | |
2388 | base->lookup_log_chans = kzalloc(plat_data->dev_len * 2 * | |
2389 | sizeof(struct d40_chan *), | |
2390 | GFP_KERNEL); | |
2391 | if (!base->lookup_log_chans) | |
2392 | goto failure; | |
2393 | } | |
2394 | base->lcla_pool.alloc_map = kzalloc(num_phy_chans * sizeof(u32), | |
2395 | GFP_KERNEL); | |
2396 | if (!base->lcla_pool.alloc_map) | |
2397 | goto failure; | |
2398 | ||
c675b1b4 JA |
2399 | base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc), |
2400 | 0, SLAB_HWCACHE_ALIGN, | |
2401 | NULL); | |
2402 | if (base->desc_slab == NULL) | |
2403 | goto failure; | |
2404 | ||
8d318a50 LW |
2405 | return base; |
2406 | ||
2407 | failure: | |
2408 | if (clk) { | |
2409 | clk_disable(clk); | |
2410 | clk_put(clk); | |
2411 | } | |
2412 | if (virtbase) | |
2413 | iounmap(virtbase); | |
2414 | if (res) | |
2415 | release_mem_region(res->start, | |
2416 | resource_size(res)); | |
2417 | if (virtbase) | |
2418 | iounmap(virtbase); | |
2419 | ||
2420 | if (base) { | |
2421 | kfree(base->lcla_pool.alloc_map); | |
2422 | kfree(base->lookup_log_chans); | |
2423 | kfree(base->lookup_phy_chans); | |
2424 | kfree(base->phy_res); | |
2425 | kfree(base); | |
2426 | } | |
2427 | ||
2428 | return NULL; | |
2429 | } | |
2430 | ||
2431 | static void __init d40_hw_init(struct d40_base *base) | |
2432 | { | |
2433 | ||
2434 | static const struct d40_reg_val dma_init_reg[] = { | |
2435 | /* Clock every part of the DMA block from start */ | |
2436 | { .reg = D40_DREG_GCC, .val = 0x0000ff01}, | |
2437 | ||
2438 | /* Interrupts on all logical channels */ | |
2439 | { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF}, | |
2440 | { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF}, | |
2441 | { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF}, | |
2442 | { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF}, | |
2443 | { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF}, | |
2444 | { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF}, | |
2445 | { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF}, | |
2446 | { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF}, | |
2447 | { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF}, | |
2448 | { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF}, | |
2449 | { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF}, | |
2450 | { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF} | |
2451 | }; | |
2452 | int i; | |
2453 | u32 prmseo[2] = {0, 0}; | |
2454 | u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF}; | |
2455 | u32 pcmis = 0; | |
2456 | u32 pcicr = 0; | |
2457 | ||
2458 | for (i = 0; i < ARRAY_SIZE(dma_init_reg); i++) | |
2459 | writel(dma_init_reg[i].val, | |
2460 | base->virtbase + dma_init_reg[i].reg); | |
2461 | ||
2462 | /* Configure all our dma channels to default settings */ | |
2463 | for (i = 0; i < base->num_phy_chans; i++) { | |
2464 | ||
2465 | activeo[i % 2] = activeo[i % 2] << 2; | |
2466 | ||
2467 | if (base->phy_res[base->num_phy_chans - i - 1].allocated_src | |
2468 | == D40_ALLOC_PHY) { | |
2469 | activeo[i % 2] |= 3; | |
2470 | continue; | |
2471 | } | |
2472 | ||
2473 | /* Enable interrupt # */ | |
2474 | pcmis = (pcmis << 1) | 1; | |
2475 | ||
2476 | /* Clear interrupt # */ | |
2477 | pcicr = (pcicr << 1) | 1; | |
2478 | ||
2479 | /* Set channel to physical mode */ | |
2480 | prmseo[i % 2] = prmseo[i % 2] << 2; | |
2481 | prmseo[i % 2] |= 1; | |
2482 | ||
2483 | } | |
2484 | ||
2485 | writel(prmseo[1], base->virtbase + D40_DREG_PRMSE); | |
2486 | writel(prmseo[0], base->virtbase + D40_DREG_PRMSO); | |
2487 | writel(activeo[1], base->virtbase + D40_DREG_ACTIVE); | |
2488 | writel(activeo[0], base->virtbase + D40_DREG_ACTIVO); | |
2489 | ||
2490 | /* Write which interrupt to enable */ | |
2491 | writel(pcmis, base->virtbase + D40_DREG_PCMIS); | |
2492 | ||
2493 | /* Write which interrupt to clear */ | |
2494 | writel(pcicr, base->virtbase + D40_DREG_PCICR); | |
2495 | ||
2496 | } | |
2497 | ||
508849ad LW |
2498 | static int __init d40_lcla_allocate(struct d40_base *base) |
2499 | { | |
2500 | unsigned long *page_list; | |
2501 | int i, j; | |
2502 | int ret = 0; | |
2503 | ||
2504 | /* | |
2505 | * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned, | |
2506 | * To full fill this hardware requirement without wasting 256 kb | |
2507 | * we allocate pages until we get an aligned one. | |
2508 | */ | |
2509 | page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS, | |
2510 | GFP_KERNEL); | |
2511 | ||
2512 | if (!page_list) { | |
2513 | ret = -ENOMEM; | |
2514 | goto failure; | |
2515 | } | |
2516 | ||
2517 | /* Calculating how many pages that are required */ | |
2518 | base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE; | |
2519 | ||
2520 | for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) { | |
2521 | page_list[i] = __get_free_pages(GFP_KERNEL, | |
2522 | base->lcla_pool.pages); | |
2523 | if (!page_list[i]) { | |
2524 | ||
2525 | dev_err(base->dev, | |
2526 | "[%s] Failed to allocate %d pages.\n", | |
2527 | __func__, base->lcla_pool.pages); | |
2528 | ||
2529 | for (j = 0; j < i; j++) | |
2530 | free_pages(page_list[j], base->lcla_pool.pages); | |
2531 | goto failure; | |
2532 | } | |
2533 | ||
2534 | if ((virt_to_phys((void *)page_list[i]) & | |
2535 | (LCLA_ALIGNMENT - 1)) == 0) | |
2536 | break; | |
2537 | } | |
2538 | ||
2539 | for (j = 0; j < i; j++) | |
2540 | free_pages(page_list[j], base->lcla_pool.pages); | |
2541 | ||
2542 | if (i < MAX_LCLA_ALLOC_ATTEMPTS) { | |
2543 | base->lcla_pool.base = (void *)page_list[i]; | |
2544 | } else { | |
2545 | /* After many attempts, no succees with finding the correct | |
2546 | * alignment try with allocating a big buffer */ | |
2547 | dev_warn(base->dev, | |
2548 | "[%s] Failed to get %d pages @ 18 bit align.\n", | |
2549 | __func__, base->lcla_pool.pages); | |
2550 | base->lcla_pool.base_unaligned = kmalloc(SZ_1K * | |
2551 | base->num_phy_chans + | |
2552 | LCLA_ALIGNMENT, | |
2553 | GFP_KERNEL); | |
2554 | if (!base->lcla_pool.base_unaligned) { | |
2555 | ret = -ENOMEM; | |
2556 | goto failure; | |
2557 | } | |
2558 | ||
2559 | base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned, | |
2560 | LCLA_ALIGNMENT); | |
2561 | } | |
2562 | ||
2563 | writel(virt_to_phys(base->lcla_pool.base), | |
2564 | base->virtbase + D40_DREG_LCLA); | |
2565 | failure: | |
2566 | kfree(page_list); | |
2567 | return ret; | |
2568 | } | |
2569 | ||
8d318a50 LW |
2570 | static int __init d40_probe(struct platform_device *pdev) |
2571 | { | |
2572 | int err; | |
2573 | int ret = -ENOENT; | |
2574 | struct d40_base *base; | |
2575 | struct resource *res = NULL; | |
2576 | int num_reserved_chans; | |
2577 | u32 val; | |
2578 | ||
2579 | base = d40_hw_detect_init(pdev); | |
2580 | ||
2581 | if (!base) | |
2582 | goto failure; | |
2583 | ||
2584 | num_reserved_chans = d40_phy_res_init(base); | |
2585 | ||
2586 | platform_set_drvdata(pdev, base); | |
2587 | ||
2588 | spin_lock_init(&base->interrupt_lock); | |
2589 | spin_lock_init(&base->execmd_lock); | |
2590 | ||
2591 | /* Get IO for logical channel parameter address */ | |
2592 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa"); | |
2593 | if (!res) { | |
2594 | ret = -ENOENT; | |
2595 | dev_err(&pdev->dev, | |
2596 | "[%s] No \"lcpa\" memory resource\n", | |
2597 | __func__); | |
2598 | goto failure; | |
2599 | } | |
2600 | base->lcpa_size = resource_size(res); | |
2601 | base->phy_lcpa = res->start; | |
2602 | ||
2603 | if (request_mem_region(res->start, resource_size(res), | |
2604 | D40_NAME " I/O lcpa") == NULL) { | |
2605 | ret = -EBUSY; | |
2606 | dev_err(&pdev->dev, | |
2607 | "[%s] Failed to request LCPA region 0x%x-0x%x\n", | |
2608 | __func__, res->start, res->end); | |
2609 | goto failure; | |
2610 | } | |
2611 | ||
2612 | /* We make use of ESRAM memory for this. */ | |
2613 | val = readl(base->virtbase + D40_DREG_LCPA); | |
2614 | if (res->start != val && val != 0) { | |
2615 | dev_warn(&pdev->dev, | |
2616 | "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n", | |
2617 | __func__, val, res->start); | |
2618 | } else | |
2619 | writel(res->start, base->virtbase + D40_DREG_LCPA); | |
2620 | ||
2621 | base->lcpa_base = ioremap(res->start, resource_size(res)); | |
2622 | if (!base->lcpa_base) { | |
2623 | ret = -ENOMEM; | |
2624 | dev_err(&pdev->dev, | |
2625 | "[%s] Failed to ioremap LCPA region\n", | |
2626 | __func__); | |
2627 | goto failure; | |
2628 | } | |
8d318a50 | 2629 | |
508849ad LW |
2630 | ret = d40_lcla_allocate(base); |
2631 | if (ret) { | |
2632 | dev_err(&pdev->dev, "[%s] Failed to allocate LCLA area\n", | |
2633 | __func__); | |
8d318a50 LW |
2634 | goto failure; |
2635 | } | |
2636 | ||
2637 | spin_lock_init(&base->lcla_pool.lock); | |
2638 | ||
2639 | base->lcla_pool.num_blocks = base->num_phy_chans; | |
2640 | ||
2641 | base->irq = platform_get_irq(pdev, 0); | |
2642 | ||
2643 | ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base); | |
2644 | ||
2645 | if (ret) { | |
2646 | dev_err(&pdev->dev, "[%s] No IRQ defined\n", __func__); | |
2647 | goto failure; | |
2648 | } | |
2649 | ||
2650 | err = d40_dmaengine_init(base, num_reserved_chans); | |
2651 | if (err) | |
2652 | goto failure; | |
2653 | ||
2654 | d40_hw_init(base); | |
2655 | ||
2656 | dev_info(base->dev, "initialized\n"); | |
2657 | return 0; | |
2658 | ||
2659 | failure: | |
2660 | if (base) { | |
c675b1b4 JA |
2661 | if (base->desc_slab) |
2662 | kmem_cache_destroy(base->desc_slab); | |
8d318a50 LW |
2663 | if (base->virtbase) |
2664 | iounmap(base->virtbase); | |
508849ad LW |
2665 | if (!base->lcla_pool.base_unaligned && base->lcla_pool.base) |
2666 | free_pages((unsigned long)base->lcla_pool.base, | |
2667 | base->lcla_pool.pages); | |
2668 | if (base->lcla_pool.base_unaligned) | |
2669 | kfree(base->lcla_pool.base_unaligned); | |
8d318a50 LW |
2670 | if (base->phy_lcpa) |
2671 | release_mem_region(base->phy_lcpa, | |
2672 | base->lcpa_size); | |
2673 | if (base->phy_start) | |
2674 | release_mem_region(base->phy_start, | |
2675 | base->phy_size); | |
2676 | if (base->clk) { | |
2677 | clk_disable(base->clk); | |
2678 | clk_put(base->clk); | |
2679 | } | |
2680 | ||
2681 | kfree(base->lcla_pool.alloc_map); | |
2682 | kfree(base->lookup_log_chans); | |
2683 | kfree(base->lookup_phy_chans); | |
2684 | kfree(base->phy_res); | |
2685 | kfree(base); | |
2686 | } | |
2687 | ||
2688 | dev_err(&pdev->dev, "[%s] probe failed\n", __func__); | |
2689 | return ret; | |
2690 | } | |
2691 | ||
2692 | static struct platform_driver d40_driver = { | |
2693 | .driver = { | |
2694 | .owner = THIS_MODULE, | |
2695 | .name = D40_NAME, | |
2696 | }, | |
2697 | }; | |
2698 | ||
2699 | int __init stedma40_init(void) | |
2700 | { | |
2701 | return platform_driver_probe(&d40_driver, d40_probe); | |
2702 | } | |
2703 | arch_initcall(stedma40_init); |