]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/dma/ste_dma40.c
ARM: ux500: Remove empty function u8500_of_init_devices()
[mirror_ubuntu-hirsute-kernel.git] / drivers / dma / ste_dma40.c
CommitLineData
8d318a50 1/*
d49278e3
PF
2 * Copyright (C) Ericsson AB 2007-2008
3 * Copyright (C) ST-Ericsson SA 2008-2010
661385f9 4 * Author: Per Forlin <per.forlin@stericsson.com> for ST-Ericsson
767a9675 5 * Author: Jonas Aaberg <jonas.aberg@stericsson.com> for ST-Ericsson
8d318a50 6 * License terms: GNU General Public License (GPL) version 2
8d318a50
LW
7 */
8
b7f080cf 9#include <linux/dma-mapping.h>
8d318a50
LW
10#include <linux/kernel.h>
11#include <linux/slab.h>
f492b210 12#include <linux/export.h>
8d318a50
LW
13#include <linux/dmaengine.h>
14#include <linux/platform_device.h>
15#include <linux/clk.h>
16#include <linux/delay.h>
7fb3e75e
N
17#include <linux/pm.h>
18#include <linux/pm_runtime.h>
698e4732 19#include <linux/err.h>
1814a170 20#include <linux/of.h>
fa332de5 21#include <linux/of_dma.h>
f4b89764 22#include <linux/amba/bus.h>
15e4b78d 23#include <linux/regulator/consumer.h>
865fab60 24#include <linux/platform_data/dma-ste-dma40.h>
8d318a50 25
d2ebfb33 26#include "dmaengine.h"
8d318a50
LW
27#include "ste_dma40_ll.h"
28
29#define D40_NAME "dma40"
30
31#define D40_PHY_CHAN -1
32
33/* For masking out/in 2 bit channel positions */
34#define D40_CHAN_POS(chan) (2 * (chan / 2))
35#define D40_CHAN_POS_MASK(chan) (0x3 << D40_CHAN_POS(chan))
36
37/* Maximum iterations taken before giving up suspending a channel */
38#define D40_SUSPEND_MAX_IT 500
39
7fb3e75e
N
40/* Milliseconds */
41#define DMA40_AUTOSUSPEND_DELAY 100
42
508849ad
LW
43/* Hardware requirement on LCLA alignment */
44#define LCLA_ALIGNMENT 0x40000
698e4732
JA
45
46/* Max number of links per event group */
47#define D40_LCLA_LINK_PER_EVENT_GRP 128
48#define D40_LCLA_END D40_LCLA_LINK_PER_EVENT_GRP
49
db72da92
LJ
50/* Max number of logical channels per physical channel */
51#define D40_MAX_LOG_CHAN_PER_PHY 32
52
508849ad
LW
53/* Attempts before giving up to trying to get pages that are aligned */
54#define MAX_LCLA_ALLOC_ATTEMPTS 256
55
56/* Bit markings for allocation map */
8d318a50
LW
57#define D40_ALLOC_FREE (1 << 31)
58#define D40_ALLOC_PHY (1 << 30)
59#define D40_ALLOC_LOG_FREE 0
60
664a57ec 61/* Reserved event lines for memcpy only. */
a2acaa21
LW
62#define DB8500_DMA_MEMCPY_EV_0 51
63#define DB8500_DMA_MEMCPY_EV_1 56
64#define DB8500_DMA_MEMCPY_EV_2 57
65#define DB8500_DMA_MEMCPY_EV_3 58
66#define DB8500_DMA_MEMCPY_EV_4 59
67#define DB8500_DMA_MEMCPY_EV_5 60
68
69static int dma40_memcpy_channels[] = {
70 DB8500_DMA_MEMCPY_EV_0,
71 DB8500_DMA_MEMCPY_EV_1,
72 DB8500_DMA_MEMCPY_EV_2,
73 DB8500_DMA_MEMCPY_EV_3,
74 DB8500_DMA_MEMCPY_EV_4,
75 DB8500_DMA_MEMCPY_EV_5,
76};
664a57ec 77
29027a1e
LJ
78/* Default configuration for physcial memcpy */
79struct stedma40_chan_cfg dma40_memcpy_conf_phy = {
80 .mode = STEDMA40_MODE_PHYSICAL,
81 .dir = STEDMA40_MEM_TO_MEM,
82
83 .src_info.data_width = STEDMA40_BYTE_WIDTH,
84 .src_info.psize = STEDMA40_PSIZE_PHY_1,
85 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
86
87 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
88 .dst_info.psize = STEDMA40_PSIZE_PHY_1,
89 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
90};
91
92/* Default configuration for logical memcpy */
93struct stedma40_chan_cfg dma40_memcpy_conf_log = {
94 .mode = STEDMA40_MODE_LOGICAL,
95 .dir = STEDMA40_MEM_TO_MEM,
96
97 .src_info.data_width = STEDMA40_BYTE_WIDTH,
98 .src_info.psize = STEDMA40_PSIZE_LOG_1,
99 .src_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
100
101 .dst_info.data_width = STEDMA40_BYTE_WIDTH,
102 .dst_info.psize = STEDMA40_PSIZE_LOG_1,
103 .dst_info.flow_ctrl = STEDMA40_NO_FLOW_CTRL,
104};
105
8d318a50
LW
106/**
107 * enum 40_command - The different commands and/or statuses.
108 *
109 * @D40_DMA_STOP: DMA channel command STOP or status STOPPED,
110 * @D40_DMA_RUN: The DMA channel is RUNNING of the command RUN.
111 * @D40_DMA_SUSPEND_REQ: Request the DMA to SUSPEND as soon as possible.
112 * @D40_DMA_SUSPENDED: The DMA channel is SUSPENDED.
113 */
114enum d40_command {
115 D40_DMA_STOP = 0,
116 D40_DMA_RUN = 1,
117 D40_DMA_SUSPEND_REQ = 2,
118 D40_DMA_SUSPENDED = 3
119};
120
1bdae6f4
N
121/*
122 * enum d40_events - The different Event Enables for the event lines.
123 *
124 * @D40_DEACTIVATE_EVENTLINE: De-activate Event line, stopping the logical chan.
125 * @D40_ACTIVATE_EVENTLINE: Activate the Event line, to start a logical chan.
126 * @D40_SUSPEND_REQ_EVENTLINE: Requesting for suspending a event line.
127 * @D40_ROUND_EVENTLINE: Status check for event line.
128 */
129
130enum d40_events {
131 D40_DEACTIVATE_EVENTLINE = 0,
132 D40_ACTIVATE_EVENTLINE = 1,
133 D40_SUSPEND_REQ_EVENTLINE = 2,
134 D40_ROUND_EVENTLINE = 3
135};
136
7fb3e75e
N
137/*
138 * These are the registers that has to be saved and later restored
139 * when the DMA hw is powered off.
140 * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
141 */
142static u32 d40_backup_regs[] = {
143 D40_DREG_LCPA,
144 D40_DREG_LCLA,
145 D40_DREG_PRMSE,
146 D40_DREG_PRMSO,
147 D40_DREG_PRMOE,
148 D40_DREG_PRMOO,
149};
150
151#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
152
3cb645dc
TL
153/*
154 * since 9540 and 8540 has the same HW revision
155 * use v4a for 9540 or ealier
156 * use v4b for 8540 or later
157 * HW revision:
158 * DB8500ed has revision 0
159 * DB8500v1 has revision 2
160 * DB8500v2 has revision 3
161 * AP9540v1 has revision 4
162 * DB8540v1 has revision 4
163 * TODO: Check if all these registers have to be saved/restored on dma40 v4a
164 */
165static u32 d40_backup_regs_v4a[] = {
7fb3e75e
N
166 D40_DREG_PSEG1,
167 D40_DREG_PSEG2,
168 D40_DREG_PSEG3,
169 D40_DREG_PSEG4,
170 D40_DREG_PCEG1,
171 D40_DREG_PCEG2,
172 D40_DREG_PCEG3,
173 D40_DREG_PCEG4,
174 D40_DREG_RSEG1,
175 D40_DREG_RSEG2,
176 D40_DREG_RSEG3,
177 D40_DREG_RSEG4,
178 D40_DREG_RCEG1,
179 D40_DREG_RCEG2,
180 D40_DREG_RCEG3,
181 D40_DREG_RCEG4,
182};
183
3cb645dc
TL
184#define BACKUP_REGS_SZ_V4A ARRAY_SIZE(d40_backup_regs_v4a)
185
186static u32 d40_backup_regs_v4b[] = {
187 D40_DREG_CPSEG1,
188 D40_DREG_CPSEG2,
189 D40_DREG_CPSEG3,
190 D40_DREG_CPSEG4,
191 D40_DREG_CPSEG5,
192 D40_DREG_CPCEG1,
193 D40_DREG_CPCEG2,
194 D40_DREG_CPCEG3,
195 D40_DREG_CPCEG4,
196 D40_DREG_CPCEG5,
197 D40_DREG_CRSEG1,
198 D40_DREG_CRSEG2,
199 D40_DREG_CRSEG3,
200 D40_DREG_CRSEG4,
201 D40_DREG_CRSEG5,
202 D40_DREG_CRCEG1,
203 D40_DREG_CRCEG2,
204 D40_DREG_CRCEG3,
205 D40_DREG_CRCEG4,
206 D40_DREG_CRCEG5,
207};
208
209#define BACKUP_REGS_SZ_V4B ARRAY_SIZE(d40_backup_regs_v4b)
7fb3e75e
N
210
211static u32 d40_backup_regs_chan[] = {
212 D40_CHAN_REG_SSCFG,
213 D40_CHAN_REG_SSELT,
214 D40_CHAN_REG_SSPTR,
215 D40_CHAN_REG_SSLNK,
216 D40_CHAN_REG_SDCFG,
217 D40_CHAN_REG_SDELT,
218 D40_CHAN_REG_SDPTR,
219 D40_CHAN_REG_SDLNK,
220};
221
84b3da14
LJ
222#define BACKUP_REGS_SZ_MAX ((BACKUP_REGS_SZ_V4A > BACKUP_REGS_SZ_V4B) ? \
223 BACKUP_REGS_SZ_V4A : BACKUP_REGS_SZ_V4B)
224
3cb645dc
TL
225/**
226 * struct d40_interrupt_lookup - lookup table for interrupt handler
227 *
228 * @src: Interrupt mask register.
229 * @clr: Interrupt clear register.
230 * @is_error: true if this is an error interrupt.
231 * @offset: start delta in the lookup_log_chans in d40_base. If equals to
232 * D40_PHY_CHAN, the lookup_phy_chans shall be used instead.
233 */
234struct d40_interrupt_lookup {
235 u32 src;
236 u32 clr;
237 bool is_error;
238 int offset;
239};
240
241
242static struct d40_interrupt_lookup il_v4a[] = {
243 {D40_DREG_LCTIS0, D40_DREG_LCICR0, false, 0},
244 {D40_DREG_LCTIS1, D40_DREG_LCICR1, false, 32},
245 {D40_DREG_LCTIS2, D40_DREG_LCICR2, false, 64},
246 {D40_DREG_LCTIS3, D40_DREG_LCICR3, false, 96},
247 {D40_DREG_LCEIS0, D40_DREG_LCICR0, true, 0},
248 {D40_DREG_LCEIS1, D40_DREG_LCICR1, true, 32},
249 {D40_DREG_LCEIS2, D40_DREG_LCICR2, true, 64},
250 {D40_DREG_LCEIS3, D40_DREG_LCICR3, true, 96},
251 {D40_DREG_PCTIS, D40_DREG_PCICR, false, D40_PHY_CHAN},
252 {D40_DREG_PCEIS, D40_DREG_PCICR, true, D40_PHY_CHAN},
253};
254
255static struct d40_interrupt_lookup il_v4b[] = {
256 {D40_DREG_CLCTIS1, D40_DREG_CLCICR1, false, 0},
257 {D40_DREG_CLCTIS2, D40_DREG_CLCICR2, false, 32},
258 {D40_DREG_CLCTIS3, D40_DREG_CLCICR3, false, 64},
259 {D40_DREG_CLCTIS4, D40_DREG_CLCICR4, false, 96},
260 {D40_DREG_CLCTIS5, D40_DREG_CLCICR5, false, 128},
261 {D40_DREG_CLCEIS1, D40_DREG_CLCICR1, true, 0},
262 {D40_DREG_CLCEIS2, D40_DREG_CLCICR2, true, 32},
263 {D40_DREG_CLCEIS3, D40_DREG_CLCICR3, true, 64},
264 {D40_DREG_CLCEIS4, D40_DREG_CLCICR4, true, 96},
265 {D40_DREG_CLCEIS5, D40_DREG_CLCICR5, true, 128},
266 {D40_DREG_CPCTIS, D40_DREG_CPCICR, false, D40_PHY_CHAN},
267 {D40_DREG_CPCEIS, D40_DREG_CPCICR, true, D40_PHY_CHAN},
268};
269
270/**
271 * struct d40_reg_val - simple lookup struct
272 *
273 * @reg: The register.
274 * @val: The value that belongs to the register in reg.
275 */
276struct d40_reg_val {
277 unsigned int reg;
278 unsigned int val;
279};
280
281static __initdata struct d40_reg_val dma_init_reg_v4a[] = {
282 /* Clock every part of the DMA block from start */
283 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
284
285 /* Interrupts on all logical channels */
286 { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
287 { .reg = D40_DREG_LCMIS1, .val = 0xFFFFFFFF},
288 { .reg = D40_DREG_LCMIS2, .val = 0xFFFFFFFF},
289 { .reg = D40_DREG_LCMIS3, .val = 0xFFFFFFFF},
290 { .reg = D40_DREG_LCICR0, .val = 0xFFFFFFFF},
291 { .reg = D40_DREG_LCICR1, .val = 0xFFFFFFFF},
292 { .reg = D40_DREG_LCICR2, .val = 0xFFFFFFFF},
293 { .reg = D40_DREG_LCICR3, .val = 0xFFFFFFFF},
294 { .reg = D40_DREG_LCTIS0, .val = 0xFFFFFFFF},
295 { .reg = D40_DREG_LCTIS1, .val = 0xFFFFFFFF},
296 { .reg = D40_DREG_LCTIS2, .val = 0xFFFFFFFF},
297 { .reg = D40_DREG_LCTIS3, .val = 0xFFFFFFFF}
298};
299static __initdata struct d40_reg_val dma_init_reg_v4b[] = {
300 /* Clock every part of the DMA block from start */
301 { .reg = D40_DREG_GCC, .val = D40_DREG_GCC_ENABLE_ALL},
302
303 /* Interrupts on all logical channels */
304 { .reg = D40_DREG_CLCMIS1, .val = 0xFFFFFFFF},
305 { .reg = D40_DREG_CLCMIS2, .val = 0xFFFFFFFF},
306 { .reg = D40_DREG_CLCMIS3, .val = 0xFFFFFFFF},
307 { .reg = D40_DREG_CLCMIS4, .val = 0xFFFFFFFF},
308 { .reg = D40_DREG_CLCMIS5, .val = 0xFFFFFFFF},
309 { .reg = D40_DREG_CLCICR1, .val = 0xFFFFFFFF},
310 { .reg = D40_DREG_CLCICR2, .val = 0xFFFFFFFF},
311 { .reg = D40_DREG_CLCICR3, .val = 0xFFFFFFFF},
312 { .reg = D40_DREG_CLCICR4, .val = 0xFFFFFFFF},
313 { .reg = D40_DREG_CLCICR5, .val = 0xFFFFFFFF},
314 { .reg = D40_DREG_CLCTIS1, .val = 0xFFFFFFFF},
315 { .reg = D40_DREG_CLCTIS2, .val = 0xFFFFFFFF},
316 { .reg = D40_DREG_CLCTIS3, .val = 0xFFFFFFFF},
317 { .reg = D40_DREG_CLCTIS4, .val = 0xFFFFFFFF},
318 { .reg = D40_DREG_CLCTIS5, .val = 0xFFFFFFFF}
319};
320
8d318a50
LW
321/**
322 * struct d40_lli_pool - Structure for keeping LLIs in memory
323 *
324 * @base: Pointer to memory area when the pre_alloc_lli's are not large
325 * enough, IE bigger than the most common case, 1 dst and 1 src. NULL if
326 * pre_alloc_lli is used.
b00f938c 327 * @dma_addr: DMA address, if mapped
8d318a50
LW
328 * @size: The size in bytes of the memory at base or the size of pre_alloc_lli.
329 * @pre_alloc_lli: Pre allocated area for the most common case of transfers,
330 * one buffer to one buffer.
331 */
332struct d40_lli_pool {
333 void *base;
508849ad 334 int size;
b00f938c 335 dma_addr_t dma_addr;
8d318a50 336 /* Space for dst and src, plus an extra for padding */
508849ad 337 u8 pre_alloc_lli[3 * sizeof(struct d40_phy_lli)];
8d318a50
LW
338};
339
340/**
341 * struct d40_desc - A descriptor is one DMA job.
342 *
343 * @lli_phy: LLI settings for physical channel. Both src and dst=
344 * points into the lli_pool, to base if lli_len > 1 or to pre_alloc_lli if
345 * lli_len equals one.
346 * @lli_log: Same as above but for logical channels.
347 * @lli_pool: The pool with two entries pre-allocated.
941b77a3 348 * @lli_len: Number of llis of current descriptor.
25985edc 349 * @lli_current: Number of transferred llis.
698e4732 350 * @lcla_alloc: Number of LCLA entries allocated.
8d318a50
LW
351 * @txd: DMA engine struct. Used for among other things for communication
352 * during a transfer.
353 * @node: List entry.
8d318a50 354 * @is_in_client_list: true if the client owns this descriptor.
7fb3e75e 355 * @cyclic: true if this is a cyclic job
8d318a50
LW
356 *
357 * This descriptor is used for both logical and physical transfers.
358 */
8d318a50
LW
359struct d40_desc {
360 /* LLI physical */
361 struct d40_phy_lli_bidir lli_phy;
362 /* LLI logical */
363 struct d40_log_lli_bidir lli_log;
364
365 struct d40_lli_pool lli_pool;
941b77a3 366 int lli_len;
698e4732
JA
367 int lli_current;
368 int lcla_alloc;
8d318a50
LW
369
370 struct dma_async_tx_descriptor txd;
371 struct list_head node;
372
8d318a50 373 bool is_in_client_list;
0c842b55 374 bool cyclic;
8d318a50
LW
375};
376
377/**
378 * struct d40_lcla_pool - LCLA pool settings and data.
379 *
508849ad
LW
380 * @base: The virtual address of LCLA. 18 bit aligned.
381 * @base_unaligned: The orignal kmalloc pointer, if kmalloc is used.
382 * This pointer is only there for clean-up on error.
383 * @pages: The number of pages needed for all physical channels.
384 * Only used later for clean-up on error
8d318a50 385 * @lock: Lock to protect the content in this struct.
698e4732 386 * @alloc_map: big map over which LCLA entry is own by which job.
8d318a50
LW
387 */
388struct d40_lcla_pool {
389 void *base;
026cbc42 390 dma_addr_t dma_addr;
508849ad
LW
391 void *base_unaligned;
392 int pages;
8d318a50 393 spinlock_t lock;
698e4732 394 struct d40_desc **alloc_map;
8d318a50
LW
395};
396
397/**
398 * struct d40_phy_res - struct for handling eventlines mapped to physical
399 * channels.
400 *
401 * @lock: A lock protection this entity.
7fb3e75e 402 * @reserved: True if used by secure world or otherwise.
8d318a50
LW
403 * @num: The physical channel number of this entity.
404 * @allocated_src: Bit mapped to show which src event line's are mapped to
405 * this physical channel. Can also be free or physically allocated.
406 * @allocated_dst: Same as for src but is dst.
407 * allocated_dst and allocated_src uses the D40_ALLOC* defines as well as
767a9675 408 * event line number.
7407048b 409 * @use_soft_lli: To mark if the linked lists of channel are managed by SW.
8d318a50
LW
410 */
411struct d40_phy_res {
412 spinlock_t lock;
7fb3e75e 413 bool reserved;
8d318a50
LW
414 int num;
415 u32 allocated_src;
416 u32 allocated_dst;
7407048b 417 bool use_soft_lli;
8d318a50
LW
418};
419
420struct d40_base;
421
422/**
423 * struct d40_chan - Struct that describes a channel.
424 *
425 * @lock: A spinlock to protect this struct.
426 * @log_num: The logical number, if any of this channel.
8d318a50
LW
427 * @pending_tx: The number of pending transfers. Used between interrupt handler
428 * and tasklet.
429 * @busy: Set to true when transfer is ongoing on this channel.
2a614340
JA
430 * @phy_chan: Pointer to physical channel which this instance runs on. If this
431 * point is NULL, then the channel is not allocated.
8d318a50
LW
432 * @chan: DMA engine handle.
433 * @tasklet: Tasklet that gets scheduled from interrupt context to complete a
434 * transfer and call client callback.
435 * @client: Cliented owned descriptor list.
da063d26 436 * @pending_queue: Submitted jobs, to be issued by issue_pending()
8d318a50 437 * @active: Active descriptor.
4226dd86 438 * @done: Completed jobs
8d318a50 439 * @queue: Queued jobs.
82babbb3 440 * @prepare_queue: Prepared jobs.
8d318a50 441 * @dma_cfg: The client configuration of this dma channel.
ce2ca125 442 * @configured: whether the dma_cfg configuration is valid
8d318a50
LW
443 * @base: Pointer to the device instance struct.
444 * @src_def_cfg: Default cfg register setting for src.
445 * @dst_def_cfg: Default cfg register setting for dst.
446 * @log_def: Default logical channel settings.
8d318a50 447 * @lcpa: Pointer to dst and src lcpa settings.
ae752bf4 448 * @runtime_addr: runtime configured address.
449 * @runtime_direction: runtime configured direction.
8d318a50
LW
450 *
451 * This struct can either "be" a logical or a physical channel.
452 */
453struct d40_chan {
454 spinlock_t lock;
455 int log_num;
8d318a50
LW
456 int pending_tx;
457 bool busy;
458 struct d40_phy_res *phy_chan;
459 struct dma_chan chan;
460 struct tasklet_struct tasklet;
461 struct list_head client;
a8f3067b 462 struct list_head pending_queue;
8d318a50 463 struct list_head active;
4226dd86 464 struct list_head done;
8d318a50 465 struct list_head queue;
82babbb3 466 struct list_head prepare_queue;
8d318a50 467 struct stedma40_chan_cfg dma_cfg;
ce2ca125 468 bool configured;
8d318a50
LW
469 struct d40_base *base;
470 /* Default register configurations */
471 u32 src_def_cfg;
472 u32 dst_def_cfg;
473 struct d40_def_lcsp log_def;
8d318a50 474 struct d40_log_lli_full *lcpa;
95e1400f
LW
475 /* Runtime reconfiguration */
476 dma_addr_t runtime_addr;
db8196df 477 enum dma_transfer_direction runtime_direction;
8d318a50
LW
478};
479
3cb645dc
TL
480/**
481 * struct d40_gen_dmac - generic values to represent u8500/u8540 DMA
482 * controller
483 *
484 * @backup: the pointer to the registers address array for backup
485 * @backup_size: the size of the registers address array for backup
486 * @realtime_en: the realtime enable register
487 * @realtime_clear: the realtime clear register
488 * @high_prio_en: the high priority enable register
489 * @high_prio_clear: the high priority clear register
490 * @interrupt_en: the interrupt enable register
491 * @interrupt_clear: the interrupt clear register
492 * @il: the pointer to struct d40_interrupt_lookup
493 * @il_size: the size of d40_interrupt_lookup array
494 * @init_reg: the pointer to the struct d40_reg_val
495 * @init_reg_size: the size of d40_reg_val array
496 */
497struct d40_gen_dmac {
498 u32 *backup;
499 u32 backup_size;
500 u32 realtime_en;
501 u32 realtime_clear;
502 u32 high_prio_en;
503 u32 high_prio_clear;
504 u32 interrupt_en;
505 u32 interrupt_clear;
506 struct d40_interrupt_lookup *il;
507 u32 il_size;
508 struct d40_reg_val *init_reg;
509 u32 init_reg_size;
510};
511
8d318a50
LW
512/**
513 * struct d40_base - The big global struct, one for each probe'd instance.
514 *
515 * @interrupt_lock: Lock used to make sure one interrupt is handle a time.
516 * @execmd_lock: Lock for execute command usage since several channels share
517 * the same physical register.
518 * @dev: The device structure.
519 * @virtbase: The virtual base address of the DMA's register.
f4185592 520 * @rev: silicon revision detected.
8d318a50
LW
521 * @clk: Pointer to the DMA clock structure.
522 * @phy_start: Physical memory start of the DMA registers.
523 * @phy_size: Size of the DMA register map.
524 * @irq: The IRQ number.
525 * @num_phy_chans: The number of physical channels. Read from HW. This
526 * is the number of available channels for this driver, not counting "Secure
527 * mode" allocated physical channels.
528 * @num_log_chans: The number of logical channels. Calculated from
529 * num_phy_chans.
530 * @dma_both: dma_device channels that can do both memcpy and slave transfers.
531 * @dma_slave: dma_device channels that can do only do slave transfers.
532 * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
7fb3e75e 533 * @phy_chans: Room for all possible physical channels in system.
8d318a50
LW
534 * @log_chans: Room for all possible logical channels in system.
535 * @lookup_log_chans: Used to map interrupt number to logical channel. Points
536 * to log_chans entries.
537 * @lookup_phy_chans: Used to map interrupt number to physical channel. Points
538 * to phy_chans entries.
539 * @plat_data: Pointer to provided platform_data which is the driver
540 * configuration.
28c7a19d 541 * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
8d318a50
LW
542 * @phy_res: Vector containing all physical channels.
543 * @lcla_pool: lcla pool settings and data.
544 * @lcpa_base: The virtual mapped address of LCPA.
545 * @phy_lcpa: The physical address of the LCPA.
546 * @lcpa_size: The size of the LCPA area.
c675b1b4 547 * @desc_slab: cache for descriptors.
7fb3e75e
N
548 * @reg_val_backup: Here the values of some hardware registers are stored
549 * before the DMA is powered off. They are restored when the power is back on.
3cb645dc
TL
550 * @reg_val_backup_v4: Backup of registers that only exits on dma40 v3 and
551 * later
7fb3e75e
N
552 * @reg_val_backup_chan: Backup data for standard channel parameter registers.
553 * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
554 * @initialized: true if the dma has been initialized
3cb645dc
TL
555 * @gen_dmac: the struct for generic registers values to represent u8500/8540
556 * DMA controller
8d318a50
LW
557 */
558struct d40_base {
559 spinlock_t interrupt_lock;
560 spinlock_t execmd_lock;
561 struct device *dev;
562 void __iomem *virtbase;
f4185592 563 u8 rev:4;
8d318a50
LW
564 struct clk *clk;
565 phys_addr_t phy_start;
566 resource_size_t phy_size;
567 int irq;
568 int num_phy_chans;
569 int num_log_chans;
b96710e5 570 struct device_dma_parameters dma_parms;
8d318a50
LW
571 struct dma_device dma_both;
572 struct dma_device dma_slave;
573 struct dma_device dma_memcpy;
574 struct d40_chan *phy_chans;
575 struct d40_chan *log_chans;
576 struct d40_chan **lookup_log_chans;
577 struct d40_chan **lookup_phy_chans;
578 struct stedma40_platform_data *plat_data;
28c7a19d 579 struct regulator *lcpa_regulator;
8d318a50
LW
580 /* Physical half channels */
581 struct d40_phy_res *phy_res;
582 struct d40_lcla_pool lcla_pool;
583 void *lcpa_base;
584 dma_addr_t phy_lcpa;
585 resource_size_t lcpa_size;
c675b1b4 586 struct kmem_cache *desc_slab;
7fb3e75e 587 u32 reg_val_backup[BACKUP_REGS_SZ];
84b3da14 588 u32 reg_val_backup_v4[BACKUP_REGS_SZ_MAX];
7fb3e75e
N
589 u32 *reg_val_backup_chan;
590 u16 gcc_pwr_off_mask;
591 bool initialized;
3cb645dc 592 struct d40_gen_dmac gen_dmac;
8d318a50
LW
593};
594
262d2915
RV
595static struct device *chan2dev(struct d40_chan *d40c)
596{
597 return &d40c->chan.dev->device;
598}
599
724a8577
RV
600static bool chan_is_physical(struct d40_chan *chan)
601{
602 return chan->log_num == D40_PHY_CHAN;
603}
604
605static bool chan_is_logical(struct d40_chan *chan)
606{
607 return !chan_is_physical(chan);
608}
609
8ca84687
RV
610static void __iomem *chan_base(struct d40_chan *chan)
611{
612 return chan->base->virtbase + D40_DREG_PCBASE +
613 chan->phy_chan->num * D40_DREG_PCDELTA;
614}
615
6db5a8ba
RV
616#define d40_err(dev, format, arg...) \
617 dev_err(dev, "[%s] " format, __func__, ## arg)
618
619#define chan_err(d40c, format, arg...) \
620 d40_err(chan2dev(d40c), format, ## arg)
621
b00f938c 622static int d40_pool_lli_alloc(struct d40_chan *d40c, struct d40_desc *d40d,
dbd88788 623 int lli_len)
8d318a50 624{
dbd88788 625 bool is_log = chan_is_logical(d40c);
8d318a50
LW
626 u32 align;
627 void *base;
628
629 if (is_log)
630 align = sizeof(struct d40_log_lli);
631 else
632 align = sizeof(struct d40_phy_lli);
633
634 if (lli_len == 1) {
635 base = d40d->lli_pool.pre_alloc_lli;
636 d40d->lli_pool.size = sizeof(d40d->lli_pool.pre_alloc_lli);
637 d40d->lli_pool.base = NULL;
638 } else {
594ece4d 639 d40d->lli_pool.size = lli_len * 2 * align;
8d318a50
LW
640
641 base = kmalloc(d40d->lli_pool.size + align, GFP_NOWAIT);
642 d40d->lli_pool.base = base;
643
644 if (d40d->lli_pool.base == NULL)
645 return -ENOMEM;
646 }
647
648 if (is_log) {
d924abad 649 d40d->lli_log.src = PTR_ALIGN(base, align);
594ece4d 650 d40d->lli_log.dst = d40d->lli_log.src + lli_len;
b00f938c
RV
651
652 d40d->lli_pool.dma_addr = 0;
8d318a50 653 } else {
d924abad 654 d40d->lli_phy.src = PTR_ALIGN(base, align);
594ece4d 655 d40d->lli_phy.dst = d40d->lli_phy.src + lli_len;
b00f938c
RV
656
657 d40d->lli_pool.dma_addr = dma_map_single(d40c->base->dev,
658 d40d->lli_phy.src,
659 d40d->lli_pool.size,
660 DMA_TO_DEVICE);
661
662 if (dma_mapping_error(d40c->base->dev,
663 d40d->lli_pool.dma_addr)) {
664 kfree(d40d->lli_pool.base);
665 d40d->lli_pool.base = NULL;
666 d40d->lli_pool.dma_addr = 0;
667 return -ENOMEM;
668 }
8d318a50
LW
669 }
670
671 return 0;
672}
673
b00f938c 674static void d40_pool_lli_free(struct d40_chan *d40c, struct d40_desc *d40d)
8d318a50 675{
b00f938c
RV
676 if (d40d->lli_pool.dma_addr)
677 dma_unmap_single(d40c->base->dev, d40d->lli_pool.dma_addr,
678 d40d->lli_pool.size, DMA_TO_DEVICE);
679
8d318a50
LW
680 kfree(d40d->lli_pool.base);
681 d40d->lli_pool.base = NULL;
682 d40d->lli_pool.size = 0;
683 d40d->lli_log.src = NULL;
684 d40d->lli_log.dst = NULL;
685 d40d->lli_phy.src = NULL;
686 d40d->lli_phy.dst = NULL;
8d318a50
LW
687}
688
698e4732
JA
689static int d40_lcla_alloc_one(struct d40_chan *d40c,
690 struct d40_desc *d40d)
691{
692 unsigned long flags;
693 int i;
694 int ret = -EINVAL;
698e4732
JA
695
696 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
697
698e4732
JA
698 /*
699 * Allocate both src and dst at the same time, therefore the half
700 * start on 1 since 0 can't be used since zero is used as end marker.
701 */
702 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
7ce529ef
FB
703 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
704
705 if (!d40c->base->lcla_pool.alloc_map[idx]) {
706 d40c->base->lcla_pool.alloc_map[idx] = d40d;
698e4732
JA
707 d40d->lcla_alloc++;
708 ret = i;
709 break;
710 }
711 }
712
713 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
714
715 return ret;
716}
717
718static int d40_lcla_free_all(struct d40_chan *d40c,
719 struct d40_desc *d40d)
720{
721 unsigned long flags;
722 int i;
723 int ret = -EINVAL;
724
724a8577 725 if (chan_is_physical(d40c))
698e4732
JA
726 return 0;
727
728 spin_lock_irqsave(&d40c->base->lcla_pool.lock, flags);
729
730 for (i = 1 ; i < D40_LCLA_LINK_PER_EVENT_GRP / 2; i++) {
7ce529ef
FB
731 int idx = d40c->phy_chan->num * D40_LCLA_LINK_PER_EVENT_GRP + i;
732
733 if (d40c->base->lcla_pool.alloc_map[idx] == d40d) {
734 d40c->base->lcla_pool.alloc_map[idx] = NULL;
698e4732
JA
735 d40d->lcla_alloc--;
736 if (d40d->lcla_alloc == 0) {
737 ret = 0;
738 break;
739 }
740 }
741 }
742
743 spin_unlock_irqrestore(&d40c->base->lcla_pool.lock, flags);
744
745 return ret;
746
747}
748
8d318a50
LW
749static void d40_desc_remove(struct d40_desc *d40d)
750{
751 list_del(&d40d->node);
752}
753
754static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
755{
a2c15fa4 756 struct d40_desc *desc = NULL;
8d318a50
LW
757
758 if (!list_empty(&d40c->client)) {
a2c15fa4
RV
759 struct d40_desc *d;
760 struct d40_desc *_d;
761
7fb3e75e 762 list_for_each_entry_safe(d, _d, &d40c->client, node) {
8d318a50 763 if (async_tx_test_ack(&d->txd)) {
8d318a50 764 d40_desc_remove(d);
a2c15fa4
RV
765 desc = d;
766 memset(desc, 0, sizeof(*desc));
c675b1b4 767 break;
8d318a50 768 }
7fb3e75e 769 }
8d318a50 770 }
a2c15fa4
RV
771
772 if (!desc)
773 desc = kmem_cache_zalloc(d40c->base->desc_slab, GFP_NOWAIT);
774
775 if (desc)
776 INIT_LIST_HEAD(&desc->node);
777
778 return desc;
8d318a50
LW
779}
780
781static void d40_desc_free(struct d40_chan *d40c, struct d40_desc *d40d)
782{
698e4732 783
b00f938c 784 d40_pool_lli_free(d40c, d40d);
698e4732 785 d40_lcla_free_all(d40c, d40d);
c675b1b4 786 kmem_cache_free(d40c->base->desc_slab, d40d);
8d318a50
LW
787}
788
789static void d40_desc_submit(struct d40_chan *d40c, struct d40_desc *desc)
790{
791 list_add_tail(&desc->node, &d40c->active);
792}
793
1c4b0927
RV
794static void d40_phy_lli_load(struct d40_chan *chan, struct d40_desc *desc)
795{
796 struct d40_phy_lli *lli_dst = desc->lli_phy.dst;
797 struct d40_phy_lli *lli_src = desc->lli_phy.src;
798 void __iomem *base = chan_base(chan);
799
800 writel(lli_src->reg_cfg, base + D40_CHAN_REG_SSCFG);
801 writel(lli_src->reg_elt, base + D40_CHAN_REG_SSELT);
802 writel(lli_src->reg_ptr, base + D40_CHAN_REG_SSPTR);
803 writel(lli_src->reg_lnk, base + D40_CHAN_REG_SSLNK);
804
805 writel(lli_dst->reg_cfg, base + D40_CHAN_REG_SDCFG);
806 writel(lli_dst->reg_elt, base + D40_CHAN_REG_SDELT);
807 writel(lli_dst->reg_ptr, base + D40_CHAN_REG_SDPTR);
808 writel(lli_dst->reg_lnk, base + D40_CHAN_REG_SDLNK);
809}
810
4226dd86
FB
811static void d40_desc_done(struct d40_chan *d40c, struct d40_desc *desc)
812{
813 list_add_tail(&desc->node, &d40c->done);
814}
815
e65889c7 816static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
698e4732 817{
e65889c7
RV
818 struct d40_lcla_pool *pool = &chan->base->lcla_pool;
819 struct d40_log_lli_bidir *lli = &desc->lli_log;
820 int lli_current = desc->lli_current;
821 int lli_len = desc->lli_len;
0c842b55 822 bool cyclic = desc->cyclic;
e65889c7 823 int curr_lcla = -EINVAL;
0c842b55 824 int first_lcla = 0;
28c7a19d 825 bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
0c842b55 826 bool linkback;
e65889c7 827
0c842b55
RV
828 /*
829 * We may have partially running cyclic transfers, in case we did't get
830 * enough LCLA entries.
831 */
832 linkback = cyclic && lli_current == 0;
833
834 /*
835 * For linkback, we need one LCLA even with only one link, because we
836 * can't link back to the one in LCPA space
837 */
838 if (linkback || (lli_len - lli_current > 1)) {
7407048b
FB
839 /*
840 * If the channel is expected to use only soft_lli don't
841 * allocate a lcla. This is to avoid a HW issue that exists
842 * in some controller during a peripheral to memory transfer
843 * that uses linked lists.
844 */
845 if (!(chan->phy_chan->use_soft_lli &&
846 chan->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM))
847 curr_lcla = d40_lcla_alloc_one(chan, desc);
848
0c842b55
RV
849 first_lcla = curr_lcla;
850 }
851
852 /*
853 * For linkback, we normally load the LCPA in the loop since we need to
854 * link it to the second LCLA and not the first. However, if we
855 * couldn't even get a first LCLA, then we have to run in LCPA and
856 * reload manually.
857 */
858 if (!linkback || curr_lcla == -EINVAL) {
859 unsigned int flags = 0;
e65889c7 860
0c842b55
RV
861 if (curr_lcla == -EINVAL)
862 flags |= LLI_TERM_INT;
e65889c7 863
0c842b55
RV
864 d40_log_lli_lcpa_write(chan->lcpa,
865 &lli->dst[lli_current],
866 &lli->src[lli_current],
867 curr_lcla,
868 flags);
869 lli_current++;
870 }
6045f0bb
RV
871
872 if (curr_lcla < 0)
873 goto out;
874
e65889c7
RV
875 for (; lli_current < lli_len; lli_current++) {
876 unsigned int lcla_offset = chan->phy_chan->num * 1024 +
877 8 * curr_lcla * 2;
878 struct d40_log_lli *lcla = pool->base + lcla_offset;
0c842b55 879 unsigned int flags = 0;
e65889c7
RV
880 int next_lcla;
881
882 if (lli_current + 1 < lli_len)
883 next_lcla = d40_lcla_alloc_one(chan, desc);
884 else
0c842b55
RV
885 next_lcla = linkback ? first_lcla : -EINVAL;
886
887 if (cyclic || next_lcla == -EINVAL)
888 flags |= LLI_TERM_INT;
e65889c7 889
0c842b55
RV
890 if (linkback && curr_lcla == first_lcla) {
891 /* First link goes in both LCPA and LCLA */
892 d40_log_lli_lcpa_write(chan->lcpa,
893 &lli->dst[lli_current],
894 &lli->src[lli_current],
895 next_lcla, flags);
896 }
897
898 /*
899 * One unused LCLA in the cyclic case if the very first
900 * next_lcla fails...
901 */
e65889c7
RV
902 d40_log_lli_lcla_write(lcla,
903 &lli->dst[lli_current],
904 &lli->src[lli_current],
0c842b55 905 next_lcla, flags);
e65889c7 906
28c7a19d
N
907 /*
908 * Cache maintenance is not needed if lcla is
909 * mapped in esram
910 */
911 if (!use_esram_lcla) {
912 dma_sync_single_range_for_device(chan->base->dev,
913 pool->dma_addr, lcla_offset,
914 2 * sizeof(struct d40_log_lli),
915 DMA_TO_DEVICE);
916 }
e65889c7
RV
917 curr_lcla = next_lcla;
918
0c842b55 919 if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
e65889c7
RV
920 lli_current++;
921 break;
922 }
923 }
924
6045f0bb 925out:
e65889c7
RV
926 desc->lli_current = lli_current;
927}
698e4732 928
e65889c7
RV
929static void d40_desc_load(struct d40_chan *d40c, struct d40_desc *d40d)
930{
724a8577 931 if (chan_is_physical(d40c)) {
1c4b0927 932 d40_phy_lli_load(d40c, d40d);
698e4732 933 d40d->lli_current = d40d->lli_len;
e65889c7
RV
934 } else
935 d40_log_lli_to_lcxa(d40c, d40d);
698e4732
JA
936}
937
8d318a50
LW
938static struct d40_desc *d40_first_active_get(struct d40_chan *d40c)
939{
940 struct d40_desc *d;
941
942 if (list_empty(&d40c->active))
943 return NULL;
944
945 d = list_first_entry(&d40c->active,
946 struct d40_desc,
947 node);
948 return d;
949}
950
7404368c 951/* remove desc from current queue and add it to the pending_queue */
8d318a50
LW
952static void d40_desc_queue(struct d40_chan *d40c, struct d40_desc *desc)
953{
7404368c
PF
954 d40_desc_remove(desc);
955 desc->is_in_client_list = false;
a8f3067b
PF
956 list_add_tail(&desc->node, &d40c->pending_queue);
957}
958
959static struct d40_desc *d40_first_pending(struct d40_chan *d40c)
960{
961 struct d40_desc *d;
962
963 if (list_empty(&d40c->pending_queue))
964 return NULL;
965
966 d = list_first_entry(&d40c->pending_queue,
967 struct d40_desc,
968 node);
969 return d;
8d318a50
LW
970}
971
972static struct d40_desc *d40_first_queued(struct d40_chan *d40c)
973{
974 struct d40_desc *d;
975
976 if (list_empty(&d40c->queue))
977 return NULL;
978
979 d = list_first_entry(&d40c->queue,
980 struct d40_desc,
981 node);
982 return d;
983}
984
4226dd86
FB
985static struct d40_desc *d40_first_done(struct d40_chan *d40c)
986{
987 if (list_empty(&d40c->done))
988 return NULL;
989
990 return list_first_entry(&d40c->done, struct d40_desc, node);
991}
992
d49278e3
PF
993static int d40_psize_2_burst_size(bool is_log, int psize)
994{
995 if (is_log) {
996 if (psize == STEDMA40_PSIZE_LOG_1)
997 return 1;
998 } else {
999 if (psize == STEDMA40_PSIZE_PHY_1)
1000 return 1;
1001 }
1002
1003 return 2 << psize;
1004}
1005
1006/*
1007 * The dma only supports transmitting packages up to
1008 * STEDMA40_MAX_SEG_SIZE << data_width. Calculate the total number of
1009 * dma elements required to send the entire sg list
1010 */
1011static int d40_size_2_dmalen(int size, u32 data_width1, u32 data_width2)
1012{
1013 int dmalen;
1014 u32 max_w = max(data_width1, data_width2);
1015 u32 min_w = min(data_width1, data_width2);
1016 u32 seg_max = ALIGN(STEDMA40_MAX_SEG_SIZE << min_w, 1 << max_w);
1017
1018 if (seg_max > STEDMA40_MAX_SEG_SIZE)
1019 seg_max -= (1 << max_w);
1020
1021 if (!IS_ALIGNED(size, 1 << max_w))
1022 return -EINVAL;
1023
1024 if (size <= seg_max)
1025 dmalen = 1;
1026 else {
1027 dmalen = size / seg_max;
1028 if (dmalen * seg_max < size)
1029 dmalen++;
1030 }
1031 return dmalen;
1032}
1033
1034static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
1035 u32 data_width1, u32 data_width2)
1036{
1037 struct scatterlist *sg;
1038 int i;
1039 int len = 0;
1040 int ret;
1041
1042 for_each_sg(sgl, sg, sg_len, i) {
1043 ret = d40_size_2_dmalen(sg_dma_len(sg),
1044 data_width1, data_width2);
1045 if (ret < 0)
1046 return ret;
1047 len += ret;
1048 }
1049 return len;
1050}
8d318a50 1051
7fb3e75e
N
1052
1053#ifdef CONFIG_PM
1054static void dma40_backup(void __iomem *baseaddr, u32 *backup,
1055 u32 *regaddr, int num, bool save)
1056{
1057 int i;
1058
1059 for (i = 0; i < num; i++) {
1060 void __iomem *addr = baseaddr + regaddr[i];
1061
1062 if (save)
1063 backup[i] = readl_relaxed(addr);
1064 else
1065 writel_relaxed(backup[i], addr);
1066 }
1067}
1068
1069static void d40_save_restore_registers(struct d40_base *base, bool save)
1070{
1071 int i;
1072
1073 /* Save/Restore channel specific registers */
1074 for (i = 0; i < base->num_phy_chans; i++) {
1075 void __iomem *addr;
1076 int idx;
1077
1078 if (base->phy_res[i].reserved)
1079 continue;
1080
1081 addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
1082 idx = i * ARRAY_SIZE(d40_backup_regs_chan);
1083
1084 dma40_backup(addr, &base->reg_val_backup_chan[idx],
1085 d40_backup_regs_chan,
1086 ARRAY_SIZE(d40_backup_regs_chan),
1087 save);
1088 }
1089
1090 /* Save/Restore global registers */
1091 dma40_backup(base->virtbase, base->reg_val_backup,
1092 d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
1093 save);
1094
1095 /* Save/Restore registers only existing on dma40 v3 and later */
3cb645dc
TL
1096 if (base->gen_dmac.backup)
1097 dma40_backup(base->virtbase, base->reg_val_backup_v4,
1098 base->gen_dmac.backup,
1099 base->gen_dmac.backup_size,
1100 save);
7fb3e75e
N
1101}
1102#else
1103static void d40_save_restore_registers(struct d40_base *base, bool save)
1104{
1105}
1106#endif
8d318a50 1107
1bdae6f4
N
1108static int __d40_execute_command_phy(struct d40_chan *d40c,
1109 enum d40_command command)
8d318a50 1110{
767a9675
JA
1111 u32 status;
1112 int i;
8d318a50
LW
1113 void __iomem *active_reg;
1114 int ret = 0;
1115 unsigned long flags;
1d392a7b 1116 u32 wmask;
8d318a50 1117
1bdae6f4
N
1118 if (command == D40_DMA_STOP) {
1119 ret = __d40_execute_command_phy(d40c, D40_DMA_SUSPEND_REQ);
1120 if (ret)
1121 return ret;
1122 }
1123
8d318a50
LW
1124 spin_lock_irqsave(&d40c->base->execmd_lock, flags);
1125
1126 if (d40c->phy_chan->num % 2 == 0)
1127 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1128 else
1129 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1130
1131 if (command == D40_DMA_SUSPEND_REQ) {
1132 status = (readl(active_reg) &
1133 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1134 D40_CHAN_POS(d40c->phy_chan->num);
1135
1136 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
1137 goto done;
1138 }
1139
1d392a7b
JA
1140 wmask = 0xffffffff & ~(D40_CHAN_POS_MASK(d40c->phy_chan->num));
1141 writel(wmask | (command << D40_CHAN_POS(d40c->phy_chan->num)),
1142 active_reg);
8d318a50
LW
1143
1144 if (command == D40_DMA_SUSPEND_REQ) {
1145
1146 for (i = 0 ; i < D40_SUSPEND_MAX_IT; i++) {
1147 status = (readl(active_reg) &
1148 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1149 D40_CHAN_POS(d40c->phy_chan->num);
1150
1151 cpu_relax();
1152 /*
1153 * Reduce the number of bus accesses while
1154 * waiting for the DMA to suspend.
1155 */
1156 udelay(3);
1157
1158 if (status == D40_DMA_STOP ||
1159 status == D40_DMA_SUSPENDED)
1160 break;
1161 }
1162
1163 if (i == D40_SUSPEND_MAX_IT) {
6db5a8ba
RV
1164 chan_err(d40c,
1165 "unable to suspend the chl %d (log: %d) status %x\n",
1166 d40c->phy_chan->num, d40c->log_num,
8d318a50
LW
1167 status);
1168 dump_stack();
1169 ret = -EBUSY;
1170 }
1171
1172 }
1173done:
1174 spin_unlock_irqrestore(&d40c->base->execmd_lock, flags);
1175 return ret;
1176}
1177
1178static void d40_term_all(struct d40_chan *d40c)
1179{
1180 struct d40_desc *d40d;
7404368c 1181 struct d40_desc *_d;
8d318a50 1182
4226dd86
FB
1183 /* Release completed descriptors */
1184 while ((d40d = d40_first_done(d40c))) {
1185 d40_desc_remove(d40d);
1186 d40_desc_free(d40c, d40d);
1187 }
1188
8d318a50
LW
1189 /* Release active descriptors */
1190 while ((d40d = d40_first_active_get(d40c))) {
1191 d40_desc_remove(d40d);
8d318a50
LW
1192 d40_desc_free(d40c, d40d);
1193 }
1194
1195 /* Release queued descriptors waiting for transfer */
1196 while ((d40d = d40_first_queued(d40c))) {
1197 d40_desc_remove(d40d);
8d318a50
LW
1198 d40_desc_free(d40c, d40d);
1199 }
1200
a8f3067b
PF
1201 /* Release pending descriptors */
1202 while ((d40d = d40_first_pending(d40c))) {
1203 d40_desc_remove(d40d);
1204 d40_desc_free(d40c, d40d);
1205 }
8d318a50 1206
7404368c
PF
1207 /* Release client owned descriptors */
1208 if (!list_empty(&d40c->client))
1209 list_for_each_entry_safe(d40d, _d, &d40c->client, node) {
1210 d40_desc_remove(d40d);
1211 d40_desc_free(d40c, d40d);
1212 }
1213
82babbb3
PF
1214 /* Release descriptors in prepare queue */
1215 if (!list_empty(&d40c->prepare_queue))
1216 list_for_each_entry_safe(d40d, _d,
1217 &d40c->prepare_queue, node) {
1218 d40_desc_remove(d40d);
1219 d40_desc_free(d40c, d40d);
1220 }
7404368c 1221
8d318a50 1222 d40c->pending_tx = 0;
8d318a50
LW
1223}
1224
1bdae6f4
N
1225static void __d40_config_set_event(struct d40_chan *d40c,
1226 enum d40_events event_type, u32 event,
1227 int reg)
262d2915 1228{
8ca84687 1229 void __iomem *addr = chan_base(d40c) + reg;
262d2915 1230 int tries;
1bdae6f4
N
1231 u32 status;
1232
1233 switch (event_type) {
1234
1235 case D40_DEACTIVATE_EVENTLINE:
262d2915 1236
262d2915
RV
1237 writel((D40_DEACTIVATE_EVENTLINE << D40_EVENTLINE_POS(event))
1238 | ~D40_EVENTLINE_MASK(event), addr);
1bdae6f4
N
1239 break;
1240
1241 case D40_SUSPEND_REQ_EVENTLINE:
1242 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1243 D40_EVENTLINE_POS(event);
1244
1245 if (status == D40_DEACTIVATE_EVENTLINE ||
1246 status == D40_SUSPEND_REQ_EVENTLINE)
1247 break;
262d2915 1248
1bdae6f4
N
1249 writel((D40_SUSPEND_REQ_EVENTLINE << D40_EVENTLINE_POS(event))
1250 | ~D40_EVENTLINE_MASK(event), addr);
1251
1252 for (tries = 0 ; tries < D40_SUSPEND_MAX_IT; tries++) {
1253
1254 status = (readl(addr) & D40_EVENTLINE_MASK(event)) >>
1255 D40_EVENTLINE_POS(event);
1256
1257 cpu_relax();
1258 /*
1259 * Reduce the number of bus accesses while
1260 * waiting for the DMA to suspend.
1261 */
1262 udelay(3);
1263
1264 if (status == D40_DEACTIVATE_EVENTLINE)
1265 break;
1266 }
1267
1268 if (tries == D40_SUSPEND_MAX_IT) {
1269 chan_err(d40c,
1270 "unable to stop the event_line chl %d (log: %d)"
1271 "status %x\n", d40c->phy_chan->num,
1272 d40c->log_num, status);
1273 }
1274 break;
1275
1276 case D40_ACTIVATE_EVENTLINE:
262d2915
RV
1277 /*
1278 * The hardware sometimes doesn't register the enable when src and dst
1279 * event lines are active on the same logical channel. Retry to ensure
1280 * it does. Usually only one retry is sufficient.
1281 */
1bdae6f4
N
1282 tries = 100;
1283 while (--tries) {
1284 writel((D40_ACTIVATE_EVENTLINE <<
1285 D40_EVENTLINE_POS(event)) |
1286 ~D40_EVENTLINE_MASK(event), addr);
262d2915 1287
1bdae6f4
N
1288 if (readl(addr) & D40_EVENTLINE_MASK(event))
1289 break;
1290 }
262d2915 1291
1bdae6f4
N
1292 if (tries != 99)
1293 dev_dbg(chan2dev(d40c),
1294 "[%s] workaround enable S%cLNK (%d tries)\n",
1295 __func__, reg == D40_CHAN_REG_SSLNK ? 'S' : 'D',
1296 100 - tries);
262d2915 1297
1bdae6f4
N
1298 WARN_ON(!tries);
1299 break;
262d2915 1300
1bdae6f4
N
1301 case D40_ROUND_EVENTLINE:
1302 BUG();
1303 break;
8d318a50 1304
1bdae6f4
N
1305 }
1306}
8d318a50 1307
1bdae6f4
N
1308static void d40_config_set_event(struct d40_chan *d40c,
1309 enum d40_events event_type)
1310{
26955c07
LJ
1311 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
1312
8d318a50
LW
1313 /* Enable event line connected to device (or memcpy) */
1314 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
26955c07 1315 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
1bdae6f4 1316 __d40_config_set_event(d40c, event_type, event,
262d2915 1317 D40_CHAN_REG_SSLNK);
8d318a50 1318
26955c07 1319 if (d40c->dma_cfg.dir != STEDMA40_PERIPH_TO_MEM)
1bdae6f4 1320 __d40_config_set_event(d40c, event_type, event,
262d2915 1321 D40_CHAN_REG_SDLNK);
8d318a50
LW
1322}
1323
a5ebca47 1324static u32 d40_chan_has_events(struct d40_chan *d40c)
8d318a50 1325{
8ca84687 1326 void __iomem *chanbase = chan_base(d40c);
be8cb7df 1327 u32 val;
8d318a50 1328
8ca84687
RV
1329 val = readl(chanbase + D40_CHAN_REG_SSLNK);
1330 val |= readl(chanbase + D40_CHAN_REG_SDLNK);
be8cb7df 1331
a5ebca47 1332 return val;
8d318a50
LW
1333}
1334
1bdae6f4
N
1335static int
1336__d40_execute_command_log(struct d40_chan *d40c, enum d40_command command)
1337{
1338 unsigned long flags;
1339 int ret = 0;
1340 u32 active_status;
1341 void __iomem *active_reg;
1342
1343 if (d40c->phy_chan->num % 2 == 0)
1344 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
1345 else
1346 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
1347
1348
1349 spin_lock_irqsave(&d40c->phy_chan->lock, flags);
1350
1351 switch (command) {
1352 case D40_DMA_STOP:
1353 case D40_DMA_SUSPEND_REQ:
1354
1355 active_status = (readl(active_reg) &
1356 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
1357 D40_CHAN_POS(d40c->phy_chan->num);
1358
1359 if (active_status == D40_DMA_RUN)
1360 d40_config_set_event(d40c, D40_SUSPEND_REQ_EVENTLINE);
1361 else
1362 d40_config_set_event(d40c, D40_DEACTIVATE_EVENTLINE);
1363
1364 if (!d40_chan_has_events(d40c) && (command == D40_DMA_STOP))
1365 ret = __d40_execute_command_phy(d40c, command);
1366
1367 break;
1368
1369 case D40_DMA_RUN:
1370
1371 d40_config_set_event(d40c, D40_ACTIVATE_EVENTLINE);
1372 ret = __d40_execute_command_phy(d40c, command);
1373 break;
1374
1375 case D40_DMA_SUSPENDED:
1376 BUG();
1377 break;
1378 }
1379
1380 spin_unlock_irqrestore(&d40c->phy_chan->lock, flags);
1381 return ret;
1382}
1383
1384static int d40_channel_execute_command(struct d40_chan *d40c,
1385 enum d40_command command)
1386{
1387 if (chan_is_logical(d40c))
1388 return __d40_execute_command_log(d40c, command);
1389 else
1390 return __d40_execute_command_phy(d40c, command);
1391}
1392
20a5b6d0
RV
1393static u32 d40_get_prmo(struct d40_chan *d40c)
1394{
1395 static const unsigned int phy_map[] = {
1396 [STEDMA40_PCHAN_BASIC_MODE]
1397 = D40_DREG_PRMO_PCHAN_BASIC,
1398 [STEDMA40_PCHAN_MODULO_MODE]
1399 = D40_DREG_PRMO_PCHAN_MODULO,
1400 [STEDMA40_PCHAN_DOUBLE_DST_MODE]
1401 = D40_DREG_PRMO_PCHAN_DOUBLE_DST,
1402 };
1403 static const unsigned int log_map[] = {
1404 [STEDMA40_LCHAN_SRC_PHY_DST_LOG]
1405 = D40_DREG_PRMO_LCHAN_SRC_PHY_DST_LOG,
1406 [STEDMA40_LCHAN_SRC_LOG_DST_PHY]
1407 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_PHY,
1408 [STEDMA40_LCHAN_SRC_LOG_DST_LOG]
1409 = D40_DREG_PRMO_LCHAN_SRC_LOG_DST_LOG,
1410 };
1411
724a8577 1412 if (chan_is_physical(d40c))
20a5b6d0
RV
1413 return phy_map[d40c->dma_cfg.mode_opt];
1414 else
1415 return log_map[d40c->dma_cfg.mode_opt];
1416}
1417
b55912c6 1418static void d40_config_write(struct d40_chan *d40c)
8d318a50
LW
1419{
1420 u32 addr_base;
1421 u32 var;
8d318a50
LW
1422
1423 /* Odd addresses are even addresses + 4 */
1424 addr_base = (d40c->phy_chan->num % 2) * 4;
1425 /* Setup channel mode to logical or physical */
724a8577 1426 var = ((u32)(chan_is_logical(d40c)) + 1) <<
8d318a50
LW
1427 D40_CHAN_POS(d40c->phy_chan->num);
1428 writel(var, d40c->base->virtbase + D40_DREG_PRMSE + addr_base);
1429
1430 /* Setup operational mode option register */
20a5b6d0 1431 var = d40_get_prmo(d40c) << D40_CHAN_POS(d40c->phy_chan->num);
8d318a50
LW
1432
1433 writel(var, d40c->base->virtbase + D40_DREG_PRMOE + addr_base);
1434
724a8577 1435 if (chan_is_logical(d40c)) {
8ca84687
RV
1436 int lidx = (d40c->phy_chan->num << D40_SREG_ELEM_LOG_LIDX_POS)
1437 & D40_SREG_ELEM_LOG_LIDX_MASK;
1438 void __iomem *chanbase = chan_base(d40c);
1439
8d318a50 1440 /* Set default config for CFG reg */
8ca84687
RV
1441 writel(d40c->src_def_cfg, chanbase + D40_CHAN_REG_SSCFG);
1442 writel(d40c->dst_def_cfg, chanbase + D40_CHAN_REG_SDCFG);
8d318a50 1443
b55912c6 1444 /* Set LIDX for lcla */
8ca84687
RV
1445 writel(lidx, chanbase + D40_CHAN_REG_SSELT);
1446 writel(lidx, chanbase + D40_CHAN_REG_SDELT);
e9f3a49c
RV
1447
1448 /* Clear LNK which will be used by d40_chan_has_events() */
1449 writel(0, chanbase + D40_CHAN_REG_SSLNK);
1450 writel(0, chanbase + D40_CHAN_REG_SDLNK);
8d318a50 1451 }
8d318a50
LW
1452}
1453
aa182ae2
JA
1454static u32 d40_residue(struct d40_chan *d40c)
1455{
1456 u32 num_elt;
1457
724a8577 1458 if (chan_is_logical(d40c))
aa182ae2
JA
1459 num_elt = (readl(&d40c->lcpa->lcsp2) & D40_MEM_LCSP2_ECNT_MASK)
1460 >> D40_MEM_LCSP2_ECNT_POS;
8ca84687
RV
1461 else {
1462 u32 val = readl(chan_base(d40c) + D40_CHAN_REG_SDELT);
1463 num_elt = (val & D40_SREG_ELEM_PHY_ECNT_MASK)
1464 >> D40_SREG_ELEM_PHY_ECNT_POS;
1465 }
1466
aa182ae2
JA
1467 return num_elt * (1 << d40c->dma_cfg.dst_info.data_width);
1468}
1469
1470static bool d40_tx_is_linked(struct d40_chan *d40c)
1471{
1472 bool is_link;
1473
724a8577 1474 if (chan_is_logical(d40c))
aa182ae2
JA
1475 is_link = readl(&d40c->lcpa->lcsp3) & D40_MEM_LCSP3_DLOS_MASK;
1476 else
8ca84687
RV
1477 is_link = readl(chan_base(d40c) + D40_CHAN_REG_SDLNK)
1478 & D40_SREG_LNK_PHYS_LNK_MASK;
1479
aa182ae2
JA
1480 return is_link;
1481}
1482
86eb5fb6 1483static int d40_pause(struct d40_chan *d40c)
aa182ae2 1484{
aa182ae2
JA
1485 int res = 0;
1486 unsigned long flags;
1487
3ac012af
JA
1488 if (!d40c->busy)
1489 return 0;
1490
7fb3e75e 1491 pm_runtime_get_sync(d40c->base->dev);
aa182ae2
JA
1492 spin_lock_irqsave(&d40c->lock, flags);
1493
1494 res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
1bdae6f4 1495
7fb3e75e
N
1496 pm_runtime_mark_last_busy(d40c->base->dev);
1497 pm_runtime_put_autosuspend(d40c->base->dev);
aa182ae2
JA
1498 spin_unlock_irqrestore(&d40c->lock, flags);
1499 return res;
1500}
1501
86eb5fb6 1502static int d40_resume(struct d40_chan *d40c)
aa182ae2 1503{
aa182ae2
JA
1504 int res = 0;
1505 unsigned long flags;
1506
3ac012af
JA
1507 if (!d40c->busy)
1508 return 0;
1509
aa182ae2 1510 spin_lock_irqsave(&d40c->lock, flags);
7fb3e75e 1511 pm_runtime_get_sync(d40c->base->dev);
aa182ae2
JA
1512
1513 /* If bytes left to transfer or linked tx resume job */
1bdae6f4 1514 if (d40_residue(d40c) || d40_tx_is_linked(d40c))
aa182ae2 1515 res = d40_channel_execute_command(d40c, D40_DMA_RUN);
aa182ae2 1516
7fb3e75e
N
1517 pm_runtime_mark_last_busy(d40c->base->dev);
1518 pm_runtime_put_autosuspend(d40c->base->dev);
aa182ae2
JA
1519 spin_unlock_irqrestore(&d40c->lock, flags);
1520 return res;
1521}
1522
8d318a50
LW
1523static dma_cookie_t d40_tx_submit(struct dma_async_tx_descriptor *tx)
1524{
1525 struct d40_chan *d40c = container_of(tx->chan,
1526 struct d40_chan,
1527 chan);
1528 struct d40_desc *d40d = container_of(tx, struct d40_desc, txd);
1529 unsigned long flags;
884485e1 1530 dma_cookie_t cookie;
8d318a50
LW
1531
1532 spin_lock_irqsave(&d40c->lock, flags);
884485e1 1533 cookie = dma_cookie_assign(tx);
8d318a50 1534 d40_desc_queue(d40c, d40d);
8d318a50
LW
1535 spin_unlock_irqrestore(&d40c->lock, flags);
1536
884485e1 1537 return cookie;
8d318a50
LW
1538}
1539
1540static int d40_start(struct d40_chan *d40c)
1541{
0c32269d 1542 return d40_channel_execute_command(d40c, D40_DMA_RUN);
8d318a50
LW
1543}
1544
1545static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
1546{
1547 struct d40_desc *d40d;
1548 int err;
1549
1550 /* Start queued jobs, if any */
1551 d40d = d40_first_queued(d40c);
1552
1553 if (d40d != NULL) {
1bdae6f4 1554 if (!d40c->busy) {
7fb3e75e 1555 d40c->busy = true;
1bdae6f4
N
1556 pm_runtime_get_sync(d40c->base->dev);
1557 }
8d318a50
LW
1558
1559 /* Remove from queue */
1560 d40_desc_remove(d40d);
1561
1562 /* Add to active queue */
1563 d40_desc_submit(d40c, d40d);
1564
7d83a854
RV
1565 /* Initiate DMA job */
1566 d40_desc_load(d40c, d40d);
8d318a50 1567
7d83a854
RV
1568 /* Start dma job */
1569 err = d40_start(d40c);
8d318a50 1570
7d83a854
RV
1571 if (err)
1572 return NULL;
8d318a50
LW
1573 }
1574
1575 return d40d;
1576}
1577
1578/* called from interrupt context */
1579static void dma_tc_handle(struct d40_chan *d40c)
1580{
1581 struct d40_desc *d40d;
1582
8d318a50
LW
1583 /* Get first active entry from list */
1584 d40d = d40_first_active_get(d40c);
1585
1586 if (d40d == NULL)
1587 return;
1588
0c842b55
RV
1589 if (d40d->cyclic) {
1590 /*
1591 * If this was a paritially loaded list, we need to reloaded
1592 * it, and only when the list is completed. We need to check
1593 * for done because the interrupt will hit for every link, and
1594 * not just the last one.
1595 */
1596 if (d40d->lli_current < d40d->lli_len
1597 && !d40_tx_is_linked(d40c)
1598 && !d40_residue(d40c)) {
1599 d40_lcla_free_all(d40c, d40d);
1600 d40_desc_load(d40c, d40d);
1601 (void) d40_start(d40c);
8d318a50 1602
0c842b55
RV
1603 if (d40d->lli_current == d40d->lli_len)
1604 d40d->lli_current = 0;
1605 }
1606 } else {
1607 d40_lcla_free_all(d40c, d40d);
8d318a50 1608
0c842b55
RV
1609 if (d40d->lli_current < d40d->lli_len) {
1610 d40_desc_load(d40c, d40d);
1611 /* Start dma job */
1612 (void) d40_start(d40c);
1613 return;
1614 }
1615
1616 if (d40_queue_start(d40c) == NULL)
1617 d40c->busy = false;
7fb3e75e
N
1618 pm_runtime_mark_last_busy(d40c->base->dev);
1619 pm_runtime_put_autosuspend(d40c->base->dev);
8d318a50 1620
7dd14525
FB
1621 d40_desc_remove(d40d);
1622 d40_desc_done(d40c, d40d);
1623 }
4226dd86 1624
8d318a50
LW
1625 d40c->pending_tx++;
1626 tasklet_schedule(&d40c->tasklet);
1627
1628}
1629
1630static void dma_tasklet(unsigned long data)
1631{
1632 struct d40_chan *d40c = (struct d40_chan *) data;
767a9675 1633 struct d40_desc *d40d;
8d318a50
LW
1634 unsigned long flags;
1635 dma_async_tx_callback callback;
1636 void *callback_param;
1637
1638 spin_lock_irqsave(&d40c->lock, flags);
1639
4226dd86
FB
1640 /* Get first entry from the done list */
1641 d40d = d40_first_done(d40c);
1642 if (d40d == NULL) {
1643 /* Check if we have reached here for cyclic job */
1644 d40d = d40_first_active_get(d40c);
1645 if (d40d == NULL || !d40d->cyclic)
1646 goto err;
1647 }
8d318a50 1648
0c842b55 1649 if (!d40d->cyclic)
f7fbce07 1650 dma_cookie_complete(&d40d->txd);
8d318a50
LW
1651
1652 /*
1653 * If terminating a channel pending_tx is set to zero.
1654 * This prevents any finished active jobs to return to the client.
1655 */
1656 if (d40c->pending_tx == 0) {
1657 spin_unlock_irqrestore(&d40c->lock, flags);
1658 return;
1659 }
1660
1661 /* Callback to client */
767a9675
JA
1662 callback = d40d->txd.callback;
1663 callback_param = d40d->txd.callback_param;
1664
0c842b55
RV
1665 if (!d40d->cyclic) {
1666 if (async_tx_test_ack(&d40d->txd)) {
767a9675 1667 d40_desc_remove(d40d);
0c842b55 1668 d40_desc_free(d40c, d40d);
f26e03ad
FB
1669 } else if (!d40d->is_in_client_list) {
1670 d40_desc_remove(d40d);
1671 d40_lcla_free_all(d40c, d40d);
1672 list_add_tail(&d40d->node, &d40c->client);
1673 d40d->is_in_client_list = true;
8d318a50
LW
1674 }
1675 }
1676
1677 d40c->pending_tx--;
1678
1679 if (d40c->pending_tx)
1680 tasklet_schedule(&d40c->tasklet);
1681
1682 spin_unlock_irqrestore(&d40c->lock, flags);
1683
767a9675 1684 if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
8d318a50
LW
1685 callback(callback_param);
1686
1687 return;
1688
1bdae6f4
N
1689err:
1690 /* Rescue manouver if receiving double interrupts */
8d318a50
LW
1691 if (d40c->pending_tx > 0)
1692 d40c->pending_tx--;
1693 spin_unlock_irqrestore(&d40c->lock, flags);
1694}
1695
1696static irqreturn_t d40_handle_interrupt(int irq, void *data)
1697{
8d318a50 1698 int i;
8d318a50
LW
1699 u32 idx;
1700 u32 row;
1701 long chan = -1;
1702 struct d40_chan *d40c;
1703 unsigned long flags;
1704 struct d40_base *base = data;
3cb645dc
TL
1705 u32 regs[base->gen_dmac.il_size];
1706 struct d40_interrupt_lookup *il = base->gen_dmac.il;
1707 u32 il_size = base->gen_dmac.il_size;
8d318a50
LW
1708
1709 spin_lock_irqsave(&base->interrupt_lock, flags);
1710
1711 /* Read interrupt status of both logical and physical channels */
3cb645dc 1712 for (i = 0; i < il_size; i++)
8d318a50
LW
1713 regs[i] = readl(base->virtbase + il[i].src);
1714
1715 for (;;) {
1716
1717 chan = find_next_bit((unsigned long *)regs,
3cb645dc 1718 BITS_PER_LONG * il_size, chan + 1);
8d318a50
LW
1719
1720 /* No more set bits found? */
3cb645dc 1721 if (chan == BITS_PER_LONG * il_size)
8d318a50
LW
1722 break;
1723
1724 row = chan / BITS_PER_LONG;
1725 idx = chan & (BITS_PER_LONG - 1);
1726
8d318a50
LW
1727 if (il[row].offset == D40_PHY_CHAN)
1728 d40c = base->lookup_phy_chans[idx];
1729 else
1730 d40c = base->lookup_log_chans[il[row].offset + idx];
53d6d68f
FB
1731
1732 if (!d40c) {
1733 /*
1734 * No error because this can happen if something else
1735 * in the system is using the channel.
1736 */
1737 continue;
1738 }
1739
1740 /* ACK interrupt */
1741 writel(1 << idx, base->virtbase + il[row].clr);
1742
8d318a50
LW
1743 spin_lock(&d40c->lock);
1744
1745 if (!il[row].is_error)
1746 dma_tc_handle(d40c);
1747 else
6db5a8ba
RV
1748 d40_err(base->dev, "IRQ chan: %ld offset %d idx %d\n",
1749 chan, il[row].offset, idx);
8d318a50
LW
1750
1751 spin_unlock(&d40c->lock);
1752 }
1753
1754 spin_unlock_irqrestore(&base->interrupt_lock, flags);
1755
1756 return IRQ_HANDLED;
1757}
1758
8d318a50
LW
1759static int d40_validate_conf(struct d40_chan *d40c,
1760 struct stedma40_chan_cfg *conf)
1761{
1762 int res = 0;
38bdbf02 1763 bool is_log = conf->mode == STEDMA40_MODE_LOGICAL;
8d318a50 1764
0747c7ba 1765 if (!conf->dir) {
6db5a8ba 1766 chan_err(d40c, "Invalid direction.\n");
0747c7ba
LW
1767 res = -EINVAL;
1768 }
1769
26955c07
LJ
1770 if ((is_log && conf->dev_type > d40c->base->num_log_chans) ||
1771 (!is_log && conf->dev_type > d40c->base->num_phy_chans) ||
1772 (conf->dev_type < 0)) {
1773 chan_err(d40c, "Invalid device type (%d)\n", conf->dev_type);
0747c7ba
LW
1774 res = -EINVAL;
1775 }
1776
8d318a50
LW
1777 if (conf->dir == STEDMA40_PERIPH_TO_PERIPH) {
1778 /*
1779 * DMAC HW supports it. Will be added to this driver,
1780 * in case any dma client requires it.
1781 */
6db5a8ba 1782 chan_err(d40c, "periph to periph not supported\n");
8d318a50
LW
1783 res = -EINVAL;
1784 }
1785
d49278e3
PF
1786 if (d40_psize_2_burst_size(is_log, conf->src_info.psize) *
1787 (1 << conf->src_info.data_width) !=
1788 d40_psize_2_burst_size(is_log, conf->dst_info.psize) *
1789 (1 << conf->dst_info.data_width)) {
1790 /*
1791 * The DMAC hardware only supports
1792 * src (burst x width) == dst (burst x width)
1793 */
1794
6db5a8ba 1795 chan_err(d40c, "src (burst x width) != dst (burst x width)\n");
d49278e3
PF
1796 res = -EINVAL;
1797 }
1798
8d318a50
LW
1799 return res;
1800}
1801
5cd326fd
N
1802static bool d40_alloc_mask_set(struct d40_phy_res *phy,
1803 bool is_src, int log_event_line, bool is_log,
1804 bool *first_user)
8d318a50
LW
1805{
1806 unsigned long flags;
1807 spin_lock_irqsave(&phy->lock, flags);
5cd326fd
N
1808
1809 *first_user = ((phy->allocated_src | phy->allocated_dst)
1810 == D40_ALLOC_FREE);
1811
4aed79b2 1812 if (!is_log) {
8d318a50
LW
1813 /* Physical interrupts are masked per physical full channel */
1814 if (phy->allocated_src == D40_ALLOC_FREE &&
1815 phy->allocated_dst == D40_ALLOC_FREE) {
1816 phy->allocated_dst = D40_ALLOC_PHY;
1817 phy->allocated_src = D40_ALLOC_PHY;
1818 goto found;
1819 } else
1820 goto not_found;
1821 }
1822
1823 /* Logical channel */
1824 if (is_src) {
1825 if (phy->allocated_src == D40_ALLOC_PHY)
1826 goto not_found;
1827
1828 if (phy->allocated_src == D40_ALLOC_FREE)
1829 phy->allocated_src = D40_ALLOC_LOG_FREE;
1830
1831 if (!(phy->allocated_src & (1 << log_event_line))) {
1832 phy->allocated_src |= 1 << log_event_line;
1833 goto found;
1834 } else
1835 goto not_found;
1836 } else {
1837 if (phy->allocated_dst == D40_ALLOC_PHY)
1838 goto not_found;
1839
1840 if (phy->allocated_dst == D40_ALLOC_FREE)
1841 phy->allocated_dst = D40_ALLOC_LOG_FREE;
1842
1843 if (!(phy->allocated_dst & (1 << log_event_line))) {
1844 phy->allocated_dst |= 1 << log_event_line;
1845 goto found;
1846 } else
1847 goto not_found;
1848 }
1849
1850not_found:
1851 spin_unlock_irqrestore(&phy->lock, flags);
1852 return false;
1853found:
1854 spin_unlock_irqrestore(&phy->lock, flags);
1855 return true;
1856}
1857
1858static bool d40_alloc_mask_free(struct d40_phy_res *phy, bool is_src,
1859 int log_event_line)
1860{
1861 unsigned long flags;
1862 bool is_free = false;
1863
1864 spin_lock_irqsave(&phy->lock, flags);
1865 if (!log_event_line) {
8d318a50
LW
1866 phy->allocated_dst = D40_ALLOC_FREE;
1867 phy->allocated_src = D40_ALLOC_FREE;
1868 is_free = true;
1869 goto out;
1870 }
1871
1872 /* Logical channel */
1873 if (is_src) {
1874 phy->allocated_src &= ~(1 << log_event_line);
1875 if (phy->allocated_src == D40_ALLOC_LOG_FREE)
1876 phy->allocated_src = D40_ALLOC_FREE;
1877 } else {
1878 phy->allocated_dst &= ~(1 << log_event_line);
1879 if (phy->allocated_dst == D40_ALLOC_LOG_FREE)
1880 phy->allocated_dst = D40_ALLOC_FREE;
1881 }
1882
1883 is_free = ((phy->allocated_src | phy->allocated_dst) ==
1884 D40_ALLOC_FREE);
1885
1886out:
1887 spin_unlock_irqrestore(&phy->lock, flags);
1888
1889 return is_free;
1890}
1891
5cd326fd 1892static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
8d318a50 1893{
26955c07 1894 int dev_type = d40c->dma_cfg.dev_type;
8d318a50
LW
1895 int event_group;
1896 int event_line;
1897 struct d40_phy_res *phys;
1898 int i;
1899 int j;
1900 int log_num;
f000df8c 1901 int num_phy_chans;
8d318a50 1902 bool is_src;
38bdbf02 1903 bool is_log = d40c->dma_cfg.mode == STEDMA40_MODE_LOGICAL;
8d318a50
LW
1904
1905 phys = d40c->base->phy_res;
f000df8c 1906 num_phy_chans = d40c->base->num_phy_chans;
8d318a50
LW
1907
1908 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
8d318a50
LW
1909 log_num = 2 * dev_type;
1910 is_src = true;
1911 } else if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
1912 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1913 /* dst event lines are used for logical memcpy */
8d318a50
LW
1914 log_num = 2 * dev_type + 1;
1915 is_src = false;
1916 } else
1917 return -EINVAL;
1918
1919 event_group = D40_TYPE_TO_GROUP(dev_type);
1920 event_line = D40_TYPE_TO_EVENT(dev_type);
1921
1922 if (!is_log) {
1923 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
1924 /* Find physical half channel */
f000df8c
GB
1925 if (d40c->dma_cfg.use_fixed_channel) {
1926 i = d40c->dma_cfg.phy_channel;
4aed79b2 1927 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1928 0, is_log,
1929 first_phy_user))
8d318a50 1930 goto found_phy;
f000df8c
GB
1931 } else {
1932 for (i = 0; i < num_phy_chans; i++) {
1933 if (d40_alloc_mask_set(&phys[i], is_src,
1934 0, is_log,
1935 first_phy_user))
1936 goto found_phy;
1937 }
8d318a50
LW
1938 }
1939 } else
1940 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1941 int phy_num = j + event_group * 2;
1942 for (i = phy_num; i < phy_num + 2; i++) {
508849ad
LW
1943 if (d40_alloc_mask_set(&phys[i],
1944 is_src,
1945 0,
5cd326fd
N
1946 is_log,
1947 first_phy_user))
8d318a50
LW
1948 goto found_phy;
1949 }
1950 }
1951 return -EINVAL;
1952found_phy:
1953 d40c->phy_chan = &phys[i];
1954 d40c->log_num = D40_PHY_CHAN;
1955 goto out;
1956 }
1957 if (dev_type == -1)
1958 return -EINVAL;
1959
1960 /* Find logical channel */
1961 for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
1962 int phy_num = j + event_group * 2;
5cd326fd
N
1963
1964 if (d40c->dma_cfg.use_fixed_channel) {
1965 i = d40c->dma_cfg.phy_channel;
1966
1967 if ((i != phy_num) && (i != phy_num + 1)) {
1968 dev_err(chan2dev(d40c),
1969 "invalid fixed phy channel %d\n", i);
1970 return -EINVAL;
1971 }
1972
1973 if (d40_alloc_mask_set(&phys[i], is_src, event_line,
1974 is_log, first_phy_user))
1975 goto found_log;
1976
1977 dev_err(chan2dev(d40c),
1978 "could not allocate fixed phy channel %d\n", i);
1979 return -EINVAL;
1980 }
1981
8d318a50
LW
1982 /*
1983 * Spread logical channels across all available physical rather
1984 * than pack every logical channel at the first available phy
1985 * channels.
1986 */
1987 if (is_src) {
1988 for (i = phy_num; i < phy_num + 2; i++) {
1989 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1990 event_line, is_log,
1991 first_phy_user))
8d318a50
LW
1992 goto found_log;
1993 }
1994 } else {
1995 for (i = phy_num + 1; i >= phy_num; i--) {
1996 if (d40_alloc_mask_set(&phys[i], is_src,
5cd326fd
N
1997 event_line, is_log,
1998 first_phy_user))
8d318a50
LW
1999 goto found_log;
2000 }
2001 }
2002 }
2003 return -EINVAL;
2004
2005found_log:
2006 d40c->phy_chan = &phys[i];
2007 d40c->log_num = log_num;
2008out:
2009
2010 if (is_log)
2011 d40c->base->lookup_log_chans[d40c->log_num] = d40c;
2012 else
2013 d40c->base->lookup_phy_chans[d40c->phy_chan->num] = d40c;
2014
2015 return 0;
2016
2017}
2018
8d318a50
LW
2019static int d40_config_memcpy(struct d40_chan *d40c)
2020{
2021 dma_cap_mask_t cap = d40c->chan.device->cap_mask;
2022
2023 if (dma_has_cap(DMA_MEMCPY, cap) && !dma_has_cap(DMA_SLAVE, cap)) {
29027a1e 2024 d40c->dma_cfg = dma40_memcpy_conf_log;
26955c07 2025 d40c->dma_cfg.dev_type = dma40_memcpy_channels[d40c->chan.chan_id];
8d318a50 2026
9b233f9b
LJ
2027 d40_log_cfg(&d40c->dma_cfg,
2028 &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2029
8d318a50
LW
2030 } else if (dma_has_cap(DMA_MEMCPY, cap) &&
2031 dma_has_cap(DMA_SLAVE, cap)) {
29027a1e 2032 d40c->dma_cfg = dma40_memcpy_conf_phy;
57e65ad7
LJ
2033
2034 /* Generate interrrupt at end of transfer or relink. */
2035 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_TIM_POS);
2036
2037 /* Generate interrupt on error. */
2038 d40c->src_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2039 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_EIM_POS);
2040
8d318a50 2041 } else {
6db5a8ba 2042 chan_err(d40c, "No memcpy\n");
8d318a50
LW
2043 return -EINVAL;
2044 }
2045
2046 return 0;
2047}
2048
8d318a50
LW
2049static int d40_free_dma(struct d40_chan *d40c)
2050{
2051
2052 int res = 0;
26955c07 2053 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
8d318a50
LW
2054 struct d40_phy_res *phy = d40c->phy_chan;
2055 bool is_src;
2056
2057 /* Terminate all queued and active transfers */
2058 d40_term_all(d40c);
2059
2060 if (phy == NULL) {
6db5a8ba 2061 chan_err(d40c, "phy == null\n");
8d318a50
LW
2062 return -EINVAL;
2063 }
2064
2065 if (phy->allocated_src == D40_ALLOC_FREE &&
2066 phy->allocated_dst == D40_ALLOC_FREE) {
6db5a8ba 2067 chan_err(d40c, "channel already free\n");
8d318a50
LW
2068 return -EINVAL;
2069 }
2070
8d318a50 2071 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
26955c07 2072 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM)
8d318a50 2073 is_src = false;
26955c07 2074 else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
8d318a50 2075 is_src = true;
26955c07 2076 else {
6db5a8ba 2077 chan_err(d40c, "Unknown direction\n");
8d318a50
LW
2078 return -EINVAL;
2079 }
2080
7fb3e75e 2081 pm_runtime_get_sync(d40c->base->dev);
1bdae6f4 2082 res = d40_channel_execute_command(d40c, D40_DMA_STOP);
d181b3a8 2083 if (res) {
1bdae6f4 2084 chan_err(d40c, "stop failed\n");
7fb3e75e 2085 goto out;
d181b3a8
JA
2086 }
2087
1bdae6f4 2088 d40_alloc_mask_free(phy, is_src, chan_is_logical(d40c) ? event : 0);
8d318a50 2089
1bdae6f4 2090 if (chan_is_logical(d40c))
8d318a50 2091 d40c->base->lookup_log_chans[d40c->log_num] = NULL;
1bdae6f4
N
2092 else
2093 d40c->base->lookup_phy_chans[phy->num] = NULL;
7fb3e75e
N
2094
2095 if (d40c->busy) {
2096 pm_runtime_mark_last_busy(d40c->base->dev);
2097 pm_runtime_put_autosuspend(d40c->base->dev);
2098 }
2099
2100 d40c->busy = false;
8d318a50 2101 d40c->phy_chan = NULL;
ce2ca125 2102 d40c->configured = false;
7fb3e75e 2103out:
8d318a50 2104
7fb3e75e
N
2105 pm_runtime_mark_last_busy(d40c->base->dev);
2106 pm_runtime_put_autosuspend(d40c->base->dev);
2107 return res;
8d318a50
LW
2108}
2109
a5ebca47
JA
2110static bool d40_is_paused(struct d40_chan *d40c)
2111{
8ca84687 2112 void __iomem *chanbase = chan_base(d40c);
a5ebca47
JA
2113 bool is_paused = false;
2114 unsigned long flags;
2115 void __iomem *active_reg;
2116 u32 status;
26955c07 2117 u32 event = D40_TYPE_TO_EVENT(d40c->dma_cfg.dev_type);
a5ebca47
JA
2118
2119 spin_lock_irqsave(&d40c->lock, flags);
2120
724a8577 2121 if (chan_is_physical(d40c)) {
a5ebca47
JA
2122 if (d40c->phy_chan->num % 2 == 0)
2123 active_reg = d40c->base->virtbase + D40_DREG_ACTIVE;
2124 else
2125 active_reg = d40c->base->virtbase + D40_DREG_ACTIVO;
2126
2127 status = (readl(active_reg) &
2128 D40_CHAN_POS_MASK(d40c->phy_chan->num)) >>
2129 D40_CHAN_POS(d40c->phy_chan->num);
2130 if (status == D40_DMA_SUSPENDED || status == D40_DMA_STOP)
2131 is_paused = true;
2132
2133 goto _exit;
2134 }
2135
a5ebca47 2136 if (d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH ||
9dbfbd35 2137 d40c->dma_cfg.dir == STEDMA40_MEM_TO_MEM) {
8ca84687 2138 status = readl(chanbase + D40_CHAN_REG_SDLNK);
9dbfbd35 2139 } else if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) {
8ca84687 2140 status = readl(chanbase + D40_CHAN_REG_SSLNK);
9dbfbd35 2141 } else {
6db5a8ba 2142 chan_err(d40c, "Unknown direction\n");
a5ebca47
JA
2143 goto _exit;
2144 }
9dbfbd35 2145
a5ebca47
JA
2146 status = (status & D40_EVENTLINE_MASK(event)) >>
2147 D40_EVENTLINE_POS(event);
2148
2149 if (status != D40_DMA_RUN)
2150 is_paused = true;
a5ebca47
JA
2151_exit:
2152 spin_unlock_irqrestore(&d40c->lock, flags);
2153 return is_paused;
2154
2155}
2156
8d318a50
LW
2157static u32 stedma40_residue(struct dma_chan *chan)
2158{
2159 struct d40_chan *d40c =
2160 container_of(chan, struct d40_chan, chan);
2161 u32 bytes_left;
2162 unsigned long flags;
2163
2164 spin_lock_irqsave(&d40c->lock, flags);
2165 bytes_left = d40_residue(d40c);
2166 spin_unlock_irqrestore(&d40c->lock, flags);
2167
2168 return bytes_left;
2169}
2170
3e3a0763
RV
2171static int
2172d40_prep_sg_log(struct d40_chan *chan, struct d40_desc *desc,
2173 struct scatterlist *sg_src, struct scatterlist *sg_dst,
822c5676
RV
2174 unsigned int sg_len, dma_addr_t src_dev_addr,
2175 dma_addr_t dst_dev_addr)
3e3a0763
RV
2176{
2177 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2178 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2179 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
5ed04b85 2180 int ret;
3e3a0763 2181
5ed04b85
RV
2182 ret = d40_log_sg_to_lli(sg_src, sg_len,
2183 src_dev_addr,
2184 desc->lli_log.src,
2185 chan->log_def.lcsp1,
2186 src_info->data_width,
2187 dst_info->data_width);
2188
2189 ret = d40_log_sg_to_lli(sg_dst, sg_len,
2190 dst_dev_addr,
2191 desc->lli_log.dst,
2192 chan->log_def.lcsp3,
2193 dst_info->data_width,
2194 src_info->data_width);
2195
2196 return ret < 0 ? ret : 0;
3e3a0763
RV
2197}
2198
2199static int
2200d40_prep_sg_phy(struct d40_chan *chan, struct d40_desc *desc,
2201 struct scatterlist *sg_src, struct scatterlist *sg_dst,
822c5676
RV
2202 unsigned int sg_len, dma_addr_t src_dev_addr,
2203 dma_addr_t dst_dev_addr)
3e3a0763 2204{
3e3a0763
RV
2205 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2206 struct stedma40_half_channel_info *src_info = &cfg->src_info;
2207 struct stedma40_half_channel_info *dst_info = &cfg->dst_info;
0c842b55 2208 unsigned long flags = 0;
3e3a0763
RV
2209 int ret;
2210
0c842b55
RV
2211 if (desc->cyclic)
2212 flags |= LLI_CYCLIC | LLI_TERM_INT;
2213
3e3a0763
RV
2214 ret = d40_phy_sg_to_lli(sg_src, sg_len, src_dev_addr,
2215 desc->lli_phy.src,
2216 virt_to_phys(desc->lli_phy.src),
2217 chan->src_def_cfg,
0c842b55 2218 src_info, dst_info, flags);
3e3a0763
RV
2219
2220 ret = d40_phy_sg_to_lli(sg_dst, sg_len, dst_dev_addr,
2221 desc->lli_phy.dst,
2222 virt_to_phys(desc->lli_phy.dst),
2223 chan->dst_def_cfg,
0c842b55 2224 dst_info, src_info, flags);
3e3a0763
RV
2225
2226 dma_sync_single_for_device(chan->base->dev, desc->lli_pool.dma_addr,
2227 desc->lli_pool.size, DMA_TO_DEVICE);
2228
2229 return ret < 0 ? ret : 0;
2230}
2231
5f81158f
RV
2232static struct d40_desc *
2233d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
2234 unsigned int sg_len, unsigned long dma_flags)
2235{
2236 struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
2237 struct d40_desc *desc;
dbd88788 2238 int ret;
5f81158f
RV
2239
2240 desc = d40_desc_get(chan);
2241 if (!desc)
2242 return NULL;
2243
2244 desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
2245 cfg->dst_info.data_width);
2246 if (desc->lli_len < 0) {
2247 chan_err(chan, "Unaligned size\n");
dbd88788
RV
2248 goto err;
2249 }
5f81158f 2250
dbd88788
RV
2251 ret = d40_pool_lli_alloc(chan, desc, desc->lli_len);
2252 if (ret < 0) {
2253 chan_err(chan, "Could not allocate lli\n");
2254 goto err;
5f81158f
RV
2255 }
2256
2257 desc->lli_current = 0;
2258 desc->txd.flags = dma_flags;
2259 desc->txd.tx_submit = d40_tx_submit;
2260
2261 dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
2262
2263 return desc;
dbd88788
RV
2264
2265err:
2266 d40_desc_free(chan, desc);
2267 return NULL;
5f81158f
RV
2268}
2269
cade1d30
RV
2270static struct dma_async_tx_descriptor *
2271d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
2272 struct scatterlist *sg_dst, unsigned int sg_len,
db8196df 2273 enum dma_transfer_direction direction, unsigned long dma_flags)
cade1d30
RV
2274{
2275 struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
822c5676
RV
2276 dma_addr_t src_dev_addr = 0;
2277 dma_addr_t dst_dev_addr = 0;
cade1d30 2278 struct d40_desc *desc;
2a614340 2279 unsigned long flags;
cade1d30 2280 int ret;
8d318a50 2281
cade1d30
RV
2282 if (!chan->phy_chan) {
2283 chan_err(chan, "Cannot prepare unallocated channel\n");
2284 return NULL;
0d0f6b8b
JA
2285 }
2286
cade1d30 2287 spin_lock_irqsave(&chan->lock, flags);
8d318a50 2288
cade1d30
RV
2289 desc = d40_prep_desc(chan, sg_src, sg_len, dma_flags);
2290 if (desc == NULL)
8d318a50
LW
2291 goto err;
2292
0c842b55
RV
2293 if (sg_next(&sg_src[sg_len - 1]) == sg_src)
2294 desc->cyclic = true;
2295
ef9c89b3
LJ
2296 if (direction == DMA_DEV_TO_MEM)
2297 src_dev_addr = chan->runtime_addr;
2298 else if (direction == DMA_MEM_TO_DEV)
2299 dst_dev_addr = chan->runtime_addr;
cade1d30
RV
2300
2301 if (chan_is_logical(chan))
2302 ret = d40_prep_sg_log(chan, desc, sg_src, sg_dst,
822c5676 2303 sg_len, src_dev_addr, dst_dev_addr);
cade1d30
RV
2304 else
2305 ret = d40_prep_sg_phy(chan, desc, sg_src, sg_dst,
822c5676 2306 sg_len, src_dev_addr, dst_dev_addr);
cade1d30
RV
2307
2308 if (ret) {
2309 chan_err(chan, "Failed to prepare %s sg job: %d\n",
2310 chan_is_logical(chan) ? "log" : "phy", ret);
2311 goto err;
8d318a50
LW
2312 }
2313
82babbb3
PF
2314 /*
2315 * add descriptor to the prepare queue in order to be able
2316 * to free them later in terminate_all
2317 */
2318 list_add_tail(&desc->node, &chan->prepare_queue);
2319
cade1d30
RV
2320 spin_unlock_irqrestore(&chan->lock, flags);
2321
2322 return &desc->txd;
8d318a50 2323
8d318a50 2324err:
cade1d30
RV
2325 if (desc)
2326 d40_desc_free(chan, desc);
2327 spin_unlock_irqrestore(&chan->lock, flags);
8d318a50
LW
2328 return NULL;
2329}
8d318a50
LW
2330
2331bool stedma40_filter(struct dma_chan *chan, void *data)
2332{
2333 struct stedma40_chan_cfg *info = data;
2334 struct d40_chan *d40c =
2335 container_of(chan, struct d40_chan, chan);
2336 int err;
2337
2338 if (data) {
2339 err = d40_validate_conf(d40c, info);
2340 if (!err)
2341 d40c->dma_cfg = *info;
2342 } else
2343 err = d40_config_memcpy(d40c);
2344
ce2ca125
RV
2345 if (!err)
2346 d40c->configured = true;
2347
8d318a50
LW
2348 return err == 0;
2349}
2350EXPORT_SYMBOL(stedma40_filter);
2351
ac2c0a38
RV
2352static void __d40_set_prio_rt(struct d40_chan *d40c, int dev_type, bool src)
2353{
2354 bool realtime = d40c->dma_cfg.realtime;
2355 bool highprio = d40c->dma_cfg.high_priority;
3cb645dc 2356 u32 rtreg;
ac2c0a38
RV
2357 u32 event = D40_TYPE_TO_EVENT(dev_type);
2358 u32 group = D40_TYPE_TO_GROUP(dev_type);
2359 u32 bit = 1 << event;
ccc3d697 2360 u32 prioreg;
3cb645dc 2361 struct d40_gen_dmac *dmac = &d40c->base->gen_dmac;
ccc3d697 2362
3cb645dc 2363 rtreg = realtime ? dmac->realtime_en : dmac->realtime_clear;
ccc3d697
RV
2364 /*
2365 * Due to a hardware bug, in some cases a logical channel triggered by
2366 * a high priority destination event line can generate extra packet
2367 * transactions.
2368 *
2369 * The workaround is to not set the high priority level for the
2370 * destination event lines that trigger logical channels.
2371 */
2372 if (!src && chan_is_logical(d40c))
2373 highprio = false;
2374
3cb645dc 2375 prioreg = highprio ? dmac->high_prio_en : dmac->high_prio_clear;
ac2c0a38
RV
2376
2377 /* Destination event lines are stored in the upper halfword */
2378 if (!src)
2379 bit <<= 16;
2380
2381 writel(bit, d40c->base->virtbase + prioreg + group * 4);
2382 writel(bit, d40c->base->virtbase + rtreg + group * 4);
2383}
2384
2385static void d40_set_prio_realtime(struct d40_chan *d40c)
2386{
2387 if (d40c->base->rev < 3)
2388 return;
2389
2390 if ((d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM) ||
2391 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
26955c07 2392 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, true);
ac2c0a38
RV
2393
2394 if ((d40c->dma_cfg.dir == STEDMA40_MEM_TO_PERIPH) ||
2395 (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_PERIPH))
26955c07 2396 __d40_set_prio_rt(d40c, d40c->dma_cfg.dev_type, false);
ac2c0a38
RV
2397}
2398
fa332de5
LJ
2399#define D40_DT_FLAGS_MODE(flags) ((flags >> 0) & 0x1)
2400#define D40_DT_FLAGS_DIR(flags) ((flags >> 1) & 0x1)
2401#define D40_DT_FLAGS_BIG_ENDIAN(flags) ((flags >> 2) & 0x1)
2402#define D40_DT_FLAGS_FIXED_CHAN(flags) ((flags >> 3) & 0x1)
2403
2404static struct dma_chan *d40_xlate(struct of_phandle_args *dma_spec,
2405 struct of_dma *ofdma)
2406{
2407 struct stedma40_chan_cfg cfg;
2408 dma_cap_mask_t cap;
2409 u32 flags;
2410
2411 memset(&cfg, 0, sizeof(struct stedma40_chan_cfg));
2412
2413 dma_cap_zero(cap);
2414 dma_cap_set(DMA_SLAVE, cap);
2415
2416 cfg.dev_type = dma_spec->args[0];
2417 flags = dma_spec->args[2];
2418
2419 switch (D40_DT_FLAGS_MODE(flags)) {
2420 case 0: cfg.mode = STEDMA40_MODE_LOGICAL; break;
2421 case 1: cfg.mode = STEDMA40_MODE_PHYSICAL; break;
2422 }
2423
2424 switch (D40_DT_FLAGS_DIR(flags)) {
2425 case 0:
2426 cfg.dir = STEDMA40_MEM_TO_PERIPH;
2427 cfg.dst_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2428 break;
2429 case 1:
2430 cfg.dir = STEDMA40_PERIPH_TO_MEM;
2431 cfg.src_info.big_endian = D40_DT_FLAGS_BIG_ENDIAN(flags);
2432 break;
2433 }
2434
2435 if (D40_DT_FLAGS_FIXED_CHAN(flags)) {
2436 cfg.phy_channel = dma_spec->args[1];
2437 cfg.use_fixed_channel = true;
2438 }
2439
2440 return dma_request_channel(cap, stedma40_filter, &cfg);
2441}
2442
8d318a50
LW
2443/* DMA ENGINE functions */
2444static int d40_alloc_chan_resources(struct dma_chan *chan)
2445{
2446 int err;
2447 unsigned long flags;
2448 struct d40_chan *d40c =
2449 container_of(chan, struct d40_chan, chan);
ef1872ec 2450 bool is_free_phy;
8d318a50
LW
2451 spin_lock_irqsave(&d40c->lock, flags);
2452
d3ee98cd 2453 dma_cookie_init(chan);
8d318a50 2454
ce2ca125
RV
2455 /* If no dma configuration is set use default configuration (memcpy) */
2456 if (!d40c->configured) {
8d318a50 2457 err = d40_config_memcpy(d40c);
ff0b12ba 2458 if (err) {
6db5a8ba 2459 chan_err(d40c, "Failed to configure memcpy channel\n");
ff0b12ba
JA
2460 goto fail;
2461 }
8d318a50
LW
2462 }
2463
5cd326fd 2464 err = d40_allocate_channel(d40c, &is_free_phy);
8d318a50 2465 if (err) {
6db5a8ba 2466 chan_err(d40c, "Failed to allocate channel\n");
7fb3e75e 2467 d40c->configured = false;
ff0b12ba 2468 goto fail;
8d318a50
LW
2469 }
2470
7fb3e75e 2471 pm_runtime_get_sync(d40c->base->dev);
ef1872ec 2472
ac2c0a38
RV
2473 d40_set_prio_realtime(d40c);
2474
724a8577 2475 if (chan_is_logical(d40c)) {
ef1872ec
LW
2476 if (d40c->dma_cfg.dir == STEDMA40_PERIPH_TO_MEM)
2477 d40c->lcpa = d40c->base->lcpa_base +
26955c07 2478 d40c->dma_cfg.dev_type * D40_LCPA_CHAN_SIZE;
ef1872ec
LW
2479 else
2480 d40c->lcpa = d40c->base->lcpa_base +
26955c07 2481 d40c->dma_cfg.dev_type *
f26e03ad 2482 D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
9778256b
LJ
2483
2484 /* Unmask the Global Interrupt Mask. */
2485 d40c->src_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
2486 d40c->dst_def_cfg |= BIT(D40_SREG_CFG_LOG_GIM_POS);
ef1872ec
LW
2487 }
2488
5cd326fd
N
2489 dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
2490 chan_is_logical(d40c) ? "logical" : "physical",
2491 d40c->phy_chan->num,
2492 d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
2493
2494
ef1872ec
LW
2495 /*
2496 * Only write channel configuration to the DMA if the physical
2497 * resource is free. In case of multiple logical channels
2498 * on the same physical resource, only the first write is necessary.
2499 */
b55912c6
JA
2500 if (is_free_phy)
2501 d40_config_write(d40c);
ff0b12ba 2502fail:
7fb3e75e
N
2503 pm_runtime_mark_last_busy(d40c->base->dev);
2504 pm_runtime_put_autosuspend(d40c->base->dev);
8d318a50 2505 spin_unlock_irqrestore(&d40c->lock, flags);
ff0b12ba 2506 return err;
8d318a50
LW
2507}
2508
2509static void d40_free_chan_resources(struct dma_chan *chan)
2510{
2511 struct d40_chan *d40c =
2512 container_of(chan, struct d40_chan, chan);
2513 int err;
2514 unsigned long flags;
2515
0d0f6b8b 2516 if (d40c->phy_chan == NULL) {
6db5a8ba 2517 chan_err(d40c, "Cannot free unallocated channel\n");
0d0f6b8b
JA
2518 return;
2519 }
2520
8d318a50
LW
2521 spin_lock_irqsave(&d40c->lock, flags);
2522
2523 err = d40_free_dma(d40c);
2524
2525 if (err)
6db5a8ba 2526 chan_err(d40c, "Failed to free channel\n");
8d318a50
LW
2527 spin_unlock_irqrestore(&d40c->lock, flags);
2528}
2529
2530static struct dma_async_tx_descriptor *d40_prep_memcpy(struct dma_chan *chan,
2531 dma_addr_t dst,
2532 dma_addr_t src,
2533 size_t size,
2a614340 2534 unsigned long dma_flags)
8d318a50 2535{
95944c6e
RV
2536 struct scatterlist dst_sg;
2537 struct scatterlist src_sg;
8d318a50 2538
95944c6e
RV
2539 sg_init_table(&dst_sg, 1);
2540 sg_init_table(&src_sg, 1);
8d318a50 2541
95944c6e
RV
2542 sg_dma_address(&dst_sg) = dst;
2543 sg_dma_address(&src_sg) = src;
8d318a50 2544
95944c6e
RV
2545 sg_dma_len(&dst_sg) = size;
2546 sg_dma_len(&src_sg) = size;
8d318a50 2547
cade1d30 2548 return d40_prep_sg(chan, &src_sg, &dst_sg, 1, DMA_NONE, dma_flags);
8d318a50
LW
2549}
2550
0d688662 2551static struct dma_async_tx_descriptor *
cade1d30
RV
2552d40_prep_memcpy_sg(struct dma_chan *chan,
2553 struct scatterlist *dst_sg, unsigned int dst_nents,
2554 struct scatterlist *src_sg, unsigned int src_nents,
2555 unsigned long dma_flags)
0d688662
IS
2556{
2557 if (dst_nents != src_nents)
2558 return NULL;
2559
cade1d30 2560 return d40_prep_sg(chan, src_sg, dst_sg, src_nents, DMA_NONE, dma_flags);
00ac0341
RV
2561}
2562
f26e03ad
FB
2563static struct dma_async_tx_descriptor *
2564d40_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2565 unsigned int sg_len, enum dma_transfer_direction direction,
2566 unsigned long dma_flags, void *context)
8d318a50 2567{
a725dcc0 2568 if (!is_slave_direction(direction))
00ac0341
RV
2569 return NULL;
2570
cade1d30 2571 return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
8d318a50
LW
2572}
2573
0c842b55
RV
2574static struct dma_async_tx_descriptor *
2575dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2576 size_t buf_len, size_t period_len,
ec8b5e48
PU
2577 enum dma_transfer_direction direction, unsigned long flags,
2578 void *context)
0c842b55
RV
2579{
2580 unsigned int periods = buf_len / period_len;
2581 struct dma_async_tx_descriptor *txd;
2582 struct scatterlist *sg;
2583 int i;
2584
79ca7ec3 2585 sg = kcalloc(periods + 1, sizeof(struct scatterlist), GFP_NOWAIT);
0c842b55
RV
2586 for (i = 0; i < periods; i++) {
2587 sg_dma_address(&sg[i]) = dma_addr;
2588 sg_dma_len(&sg[i]) = period_len;
2589 dma_addr += period_len;
2590 }
2591
2592 sg[periods].offset = 0;
fdaf9c4b 2593 sg_dma_len(&sg[periods]) = 0;
0c842b55
RV
2594 sg[periods].page_link =
2595 ((unsigned long)sg | 0x01) & ~0x02;
2596
2597 txd = d40_prep_sg(chan, sg, sg, periods, direction,
2598 DMA_PREP_INTERRUPT);
2599
2600 kfree(sg);
2601
2602 return txd;
2603}
2604
8d318a50
LW
2605static enum dma_status d40_tx_status(struct dma_chan *chan,
2606 dma_cookie_t cookie,
2607 struct dma_tx_state *txstate)
2608{
2609 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
96a2af41 2610 enum dma_status ret;
8d318a50 2611
0d0f6b8b 2612 if (d40c->phy_chan == NULL) {
6db5a8ba 2613 chan_err(d40c, "Cannot read status of unallocated channel\n");
0d0f6b8b
JA
2614 return -EINVAL;
2615 }
2616
96a2af41
RKAL
2617 ret = dma_cookie_status(chan, cookie, txstate);
2618 if (ret != DMA_SUCCESS)
2619 dma_set_residue(txstate, stedma40_residue(chan));
8d318a50 2620
a5ebca47
JA
2621 if (d40_is_paused(d40c))
2622 ret = DMA_PAUSED;
8d318a50
LW
2623
2624 return ret;
2625}
2626
2627static void d40_issue_pending(struct dma_chan *chan)
2628{
2629 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2630 unsigned long flags;
2631
0d0f6b8b 2632 if (d40c->phy_chan == NULL) {
6db5a8ba 2633 chan_err(d40c, "Channel is not allocated!\n");
0d0f6b8b
JA
2634 return;
2635 }
2636
8d318a50
LW
2637 spin_lock_irqsave(&d40c->lock, flags);
2638
a8f3067b
PF
2639 list_splice_tail_init(&d40c->pending_queue, &d40c->queue);
2640
2641 /* Busy means that queued jobs are already being processed */
8d318a50
LW
2642 if (!d40c->busy)
2643 (void) d40_queue_start(d40c);
2644
2645 spin_unlock_irqrestore(&d40c->lock, flags);
2646}
2647
1bdae6f4
N
2648static void d40_terminate_all(struct dma_chan *chan)
2649{
2650 unsigned long flags;
2651 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2652 int ret;
2653
2654 spin_lock_irqsave(&d40c->lock, flags);
2655
2656 pm_runtime_get_sync(d40c->base->dev);
2657 ret = d40_channel_execute_command(d40c, D40_DMA_STOP);
2658 if (ret)
2659 chan_err(d40c, "Failed to stop channel\n");
2660
2661 d40_term_all(d40c);
2662 pm_runtime_mark_last_busy(d40c->base->dev);
2663 pm_runtime_put_autosuspend(d40c->base->dev);
2664 if (d40c->busy) {
2665 pm_runtime_mark_last_busy(d40c->base->dev);
2666 pm_runtime_put_autosuspend(d40c->base->dev);
2667 }
2668 d40c->busy = false;
2669
2670 spin_unlock_irqrestore(&d40c->lock, flags);
2671}
2672
98ca5289
RV
2673static int
2674dma40_config_to_halfchannel(struct d40_chan *d40c,
2675 struct stedma40_half_channel_info *info,
2676 enum dma_slave_buswidth width,
2677 u32 maxburst)
2678{
2679 enum stedma40_periph_data_width addr_width;
2680 int psize;
2681
2682 switch (width) {
2683 case DMA_SLAVE_BUSWIDTH_1_BYTE:
2684 addr_width = STEDMA40_BYTE_WIDTH;
2685 break;
2686 case DMA_SLAVE_BUSWIDTH_2_BYTES:
2687 addr_width = STEDMA40_HALFWORD_WIDTH;
2688 break;
2689 case DMA_SLAVE_BUSWIDTH_4_BYTES:
2690 addr_width = STEDMA40_WORD_WIDTH;
2691 break;
2692 case DMA_SLAVE_BUSWIDTH_8_BYTES:
2693 addr_width = STEDMA40_DOUBLEWORD_WIDTH;
2694 break;
2695 default:
2696 dev_err(d40c->base->dev,
2697 "illegal peripheral address width "
2698 "requested (%d)\n",
2699 width);
2700 return -EINVAL;
2701 }
2702
2703 if (chan_is_logical(d40c)) {
2704 if (maxburst >= 16)
2705 psize = STEDMA40_PSIZE_LOG_16;
2706 else if (maxburst >= 8)
2707 psize = STEDMA40_PSIZE_LOG_8;
2708 else if (maxburst >= 4)
2709 psize = STEDMA40_PSIZE_LOG_4;
2710 else
2711 psize = STEDMA40_PSIZE_LOG_1;
2712 } else {
2713 if (maxburst >= 16)
2714 psize = STEDMA40_PSIZE_PHY_16;
2715 else if (maxburst >= 8)
2716 psize = STEDMA40_PSIZE_PHY_8;
2717 else if (maxburst >= 4)
2718 psize = STEDMA40_PSIZE_PHY_4;
2719 else
2720 psize = STEDMA40_PSIZE_PHY_1;
2721 }
2722
2723 info->data_width = addr_width;
2724 info->psize = psize;
2725 info->flow_ctrl = STEDMA40_NO_FLOW_CTRL;
2726
2727 return 0;
2728}
2729
95e1400f 2730/* Runtime reconfiguration extension */
98ca5289
RV
2731static int d40_set_runtime_config(struct dma_chan *chan,
2732 struct dma_slave_config *config)
95e1400f
LW
2733{
2734 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2735 struct stedma40_chan_cfg *cfg = &d40c->dma_cfg;
98ca5289 2736 enum dma_slave_buswidth src_addr_width, dst_addr_width;
95e1400f 2737 dma_addr_t config_addr;
98ca5289
RV
2738 u32 src_maxburst, dst_maxburst;
2739 int ret;
2740
2741 src_addr_width = config->src_addr_width;
2742 src_maxburst = config->src_maxburst;
2743 dst_addr_width = config->dst_addr_width;
2744 dst_maxburst = config->dst_maxburst;
95e1400f 2745
db8196df 2746 if (config->direction == DMA_DEV_TO_MEM) {
95e1400f 2747 config_addr = config->src_addr;
ef9c89b3 2748
95e1400f
LW
2749 if (cfg->dir != STEDMA40_PERIPH_TO_MEM)
2750 dev_dbg(d40c->base->dev,
2751 "channel was not configured for peripheral "
2752 "to memory transfer (%d) overriding\n",
2753 cfg->dir);
2754 cfg->dir = STEDMA40_PERIPH_TO_MEM;
2755
98ca5289
RV
2756 /* Configure the memory side */
2757 if (dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2758 dst_addr_width = src_addr_width;
2759 if (dst_maxburst == 0)
2760 dst_maxburst = src_maxburst;
95e1400f 2761
db8196df 2762 } else if (config->direction == DMA_MEM_TO_DEV) {
95e1400f 2763 config_addr = config->dst_addr;
ef9c89b3 2764
95e1400f
LW
2765 if (cfg->dir != STEDMA40_MEM_TO_PERIPH)
2766 dev_dbg(d40c->base->dev,
2767 "channel was not configured for memory "
2768 "to peripheral transfer (%d) overriding\n",
2769 cfg->dir);
2770 cfg->dir = STEDMA40_MEM_TO_PERIPH;
2771
98ca5289
RV
2772 /* Configure the memory side */
2773 if (src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
2774 src_addr_width = dst_addr_width;
2775 if (src_maxburst == 0)
2776 src_maxburst = dst_maxburst;
95e1400f
LW
2777 } else {
2778 dev_err(d40c->base->dev,
2779 "unrecognized channel direction %d\n",
2780 config->direction);
98ca5289 2781 return -EINVAL;
95e1400f
LW
2782 }
2783
ef9c89b3
LJ
2784 if (config_addr <= 0) {
2785 dev_err(d40c->base->dev, "no address supplied\n");
2786 return -EINVAL;
2787 }
2788
98ca5289 2789 if (src_maxburst * src_addr_width != dst_maxburst * dst_addr_width) {
95e1400f 2790 dev_err(d40c->base->dev,
98ca5289
RV
2791 "src/dst width/maxburst mismatch: %d*%d != %d*%d\n",
2792 src_maxburst,
2793 src_addr_width,
2794 dst_maxburst,
2795 dst_addr_width);
2796 return -EINVAL;
95e1400f
LW
2797 }
2798
92bb6cdb
PF
2799 if (src_maxburst > 16) {
2800 src_maxburst = 16;
2801 dst_maxburst = src_maxburst * src_addr_width / dst_addr_width;
2802 } else if (dst_maxburst > 16) {
2803 dst_maxburst = 16;
2804 src_maxburst = dst_maxburst * dst_addr_width / src_addr_width;
2805 }
2806
98ca5289
RV
2807 ret = dma40_config_to_halfchannel(d40c, &cfg->src_info,
2808 src_addr_width,
2809 src_maxburst);
2810 if (ret)
2811 return ret;
95e1400f 2812
98ca5289
RV
2813 ret = dma40_config_to_halfchannel(d40c, &cfg->dst_info,
2814 dst_addr_width,
2815 dst_maxburst);
2816 if (ret)
2817 return ret;
95e1400f 2818
a59670a4 2819 /* Fill in register values */
724a8577 2820 if (chan_is_logical(d40c))
a59670a4
PF
2821 d40_log_cfg(cfg, &d40c->log_def.lcsp1, &d40c->log_def.lcsp3);
2822 else
57e65ad7 2823 d40_phy_cfg(cfg, &d40c->src_def_cfg, &d40c->dst_def_cfg);
a59670a4 2824
95e1400f
LW
2825 /* These settings will take precedence later */
2826 d40c->runtime_addr = config_addr;
2827 d40c->runtime_direction = config->direction;
2828 dev_dbg(d40c->base->dev,
98ca5289
RV
2829 "configured channel %s for %s, data width %d/%d, "
2830 "maxburst %d/%d elements, LE, no flow control\n",
95e1400f 2831 dma_chan_name(chan),
db8196df 2832 (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
98ca5289
RV
2833 src_addr_width, dst_addr_width,
2834 src_maxburst, dst_maxburst);
2835
2836 return 0;
95e1400f
LW
2837}
2838
05827630
LW
2839static int d40_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
2840 unsigned long arg)
8d318a50 2841{
8d318a50
LW
2842 struct d40_chan *d40c = container_of(chan, struct d40_chan, chan);
2843
0d0f6b8b 2844 if (d40c->phy_chan == NULL) {
6db5a8ba 2845 chan_err(d40c, "Channel is not allocated!\n");
0d0f6b8b
JA
2846 return -EINVAL;
2847 }
2848
8d318a50
LW
2849 switch (cmd) {
2850 case DMA_TERMINATE_ALL:
1bdae6f4
N
2851 d40_terminate_all(chan);
2852 return 0;
8d318a50 2853 case DMA_PAUSE:
86eb5fb6 2854 return d40_pause(d40c);
8d318a50 2855 case DMA_RESUME:
86eb5fb6 2856 return d40_resume(d40c);
95e1400f 2857 case DMA_SLAVE_CONFIG:
98ca5289 2858 return d40_set_runtime_config(chan,
95e1400f 2859 (struct dma_slave_config *) arg);
95e1400f
LW
2860 default:
2861 break;
8d318a50
LW
2862 }
2863
2864 /* Other commands are unimplemented */
2865 return -ENXIO;
2866}
2867
2868/* Initialization functions */
2869
2870static void __init d40_chan_init(struct d40_base *base, struct dma_device *dma,
2871 struct d40_chan *chans, int offset,
2872 int num_chans)
2873{
2874 int i = 0;
2875 struct d40_chan *d40c;
2876
2877 INIT_LIST_HEAD(&dma->channels);
2878
2879 for (i = offset; i < offset + num_chans; i++) {
2880 d40c = &chans[i];
2881 d40c->base = base;
2882 d40c->chan.device = dma;
2883
8d318a50
LW
2884 spin_lock_init(&d40c->lock);
2885
2886 d40c->log_num = D40_PHY_CHAN;
2887
4226dd86 2888 INIT_LIST_HEAD(&d40c->done);
8d318a50
LW
2889 INIT_LIST_HEAD(&d40c->active);
2890 INIT_LIST_HEAD(&d40c->queue);
a8f3067b 2891 INIT_LIST_HEAD(&d40c->pending_queue);
8d318a50 2892 INIT_LIST_HEAD(&d40c->client);
82babbb3 2893 INIT_LIST_HEAD(&d40c->prepare_queue);
8d318a50 2894
8d318a50
LW
2895 tasklet_init(&d40c->tasklet, dma_tasklet,
2896 (unsigned long) d40c);
2897
2898 list_add_tail(&d40c->chan.device_node,
2899 &dma->channels);
2900 }
2901}
2902
7ad74a7c
RV
2903static void d40_ops_init(struct d40_base *base, struct dma_device *dev)
2904{
2905 if (dma_has_cap(DMA_SLAVE, dev->cap_mask))
2906 dev->device_prep_slave_sg = d40_prep_slave_sg;
2907
2908 if (dma_has_cap(DMA_MEMCPY, dev->cap_mask)) {
2909 dev->device_prep_dma_memcpy = d40_prep_memcpy;
2910
2911 /*
2912 * This controller can only access address at even
2913 * 32bit boundaries, i.e. 2^2
2914 */
2915 dev->copy_align = 2;
2916 }
2917
2918 if (dma_has_cap(DMA_SG, dev->cap_mask))
2919 dev->device_prep_dma_sg = d40_prep_memcpy_sg;
2920
0c842b55
RV
2921 if (dma_has_cap(DMA_CYCLIC, dev->cap_mask))
2922 dev->device_prep_dma_cyclic = dma40_prep_dma_cyclic;
2923
7ad74a7c
RV
2924 dev->device_alloc_chan_resources = d40_alloc_chan_resources;
2925 dev->device_free_chan_resources = d40_free_chan_resources;
2926 dev->device_issue_pending = d40_issue_pending;
2927 dev->device_tx_status = d40_tx_status;
2928 dev->device_control = d40_control;
2929 dev->dev = base->dev;
2930}
2931
8d318a50
LW
2932static int __init d40_dmaengine_init(struct d40_base *base,
2933 int num_reserved_chans)
2934{
2935 int err ;
2936
2937 d40_chan_init(base, &base->dma_slave, base->log_chans,
2938 0, base->num_log_chans);
2939
2940 dma_cap_zero(base->dma_slave.cap_mask);
2941 dma_cap_set(DMA_SLAVE, base->dma_slave.cap_mask);
0c842b55 2942 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
8d318a50 2943
7ad74a7c 2944 d40_ops_init(base, &base->dma_slave);
8d318a50
LW
2945
2946 err = dma_async_device_register(&base->dma_slave);
2947
2948 if (err) {
6db5a8ba 2949 d40_err(base->dev, "Failed to register slave channels\n");
8d318a50
LW
2950 goto failure1;
2951 }
2952
2953 d40_chan_init(base, &base->dma_memcpy, base->log_chans,
664a57ec 2954 base->num_log_chans, ARRAY_SIZE(dma40_memcpy_channels));
8d318a50
LW
2955
2956 dma_cap_zero(base->dma_memcpy.cap_mask);
2957 dma_cap_set(DMA_MEMCPY, base->dma_memcpy.cap_mask);
7ad74a7c
RV
2958 dma_cap_set(DMA_SG, base->dma_memcpy.cap_mask);
2959
2960 d40_ops_init(base, &base->dma_memcpy);
8d318a50
LW
2961
2962 err = dma_async_device_register(&base->dma_memcpy);
2963
2964 if (err) {
6db5a8ba
RV
2965 d40_err(base->dev,
2966 "Failed to regsiter memcpy only channels\n");
8d318a50
LW
2967 goto failure2;
2968 }
2969
2970 d40_chan_init(base, &base->dma_both, base->phy_chans,
2971 0, num_reserved_chans);
2972
2973 dma_cap_zero(base->dma_both.cap_mask);
2974 dma_cap_set(DMA_SLAVE, base->dma_both.cap_mask);
2975 dma_cap_set(DMA_MEMCPY, base->dma_both.cap_mask);
7ad74a7c 2976 dma_cap_set(DMA_SG, base->dma_both.cap_mask);
0c842b55 2977 dma_cap_set(DMA_CYCLIC, base->dma_slave.cap_mask);
7ad74a7c
RV
2978
2979 d40_ops_init(base, &base->dma_both);
8d318a50
LW
2980 err = dma_async_device_register(&base->dma_both);
2981
2982 if (err) {
6db5a8ba
RV
2983 d40_err(base->dev,
2984 "Failed to register logical and physical capable channels\n");
8d318a50
LW
2985 goto failure3;
2986 }
2987 return 0;
2988failure3:
2989 dma_async_device_unregister(&base->dma_memcpy);
2990failure2:
2991 dma_async_device_unregister(&base->dma_slave);
2992failure1:
2993 return err;
2994}
2995
7fb3e75e
N
2996/* Suspend resume functionality */
2997#ifdef CONFIG_PM
2998static int dma40_pm_suspend(struct device *dev)
2999{
28c7a19d
N
3000 struct platform_device *pdev = to_platform_device(dev);
3001 struct d40_base *base = platform_get_drvdata(pdev);
3002 int ret = 0;
7fb3e75e 3003
28c7a19d
N
3004 if (base->lcpa_regulator)
3005 ret = regulator_disable(base->lcpa_regulator);
3006 return ret;
7fb3e75e
N
3007}
3008
3009static int dma40_runtime_suspend(struct device *dev)
3010{
3011 struct platform_device *pdev = to_platform_device(dev);
3012 struct d40_base *base = platform_get_drvdata(pdev);
3013
3014 d40_save_restore_registers(base, true);
3015
3016 /* Don't disable/enable clocks for v1 due to HW bugs */
3017 if (base->rev != 1)
3018 writel_relaxed(base->gcc_pwr_off_mask,
3019 base->virtbase + D40_DREG_GCC);
3020
3021 return 0;
3022}
3023
3024static int dma40_runtime_resume(struct device *dev)
3025{
3026 struct platform_device *pdev = to_platform_device(dev);
3027 struct d40_base *base = platform_get_drvdata(pdev);
3028
3029 if (base->initialized)
3030 d40_save_restore_registers(base, false);
3031
3032 writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
3033 base->virtbase + D40_DREG_GCC);
3034 return 0;
3035}
3036
28c7a19d
N
3037static int dma40_resume(struct device *dev)
3038{
3039 struct platform_device *pdev = to_platform_device(dev);
3040 struct d40_base *base = platform_get_drvdata(pdev);
3041 int ret = 0;
3042
3043 if (base->lcpa_regulator)
3044 ret = regulator_enable(base->lcpa_regulator);
3045
3046 return ret;
3047}
7fb3e75e
N
3048
3049static const struct dev_pm_ops dma40_pm_ops = {
3050 .suspend = dma40_pm_suspend,
3051 .runtime_suspend = dma40_runtime_suspend,
3052 .runtime_resume = dma40_runtime_resume,
28c7a19d 3053 .resume = dma40_resume,
7fb3e75e
N
3054};
3055#define DMA40_PM_OPS (&dma40_pm_ops)
3056#else
3057#define DMA40_PM_OPS NULL
3058#endif
3059
8d318a50
LW
3060/* Initialization functions. */
3061
3062static int __init d40_phy_res_init(struct d40_base *base)
3063{
3064 int i;
3065 int num_phy_chans_avail = 0;
3066 u32 val[2];
3067 int odd_even_bit = -2;
7fb3e75e 3068 int gcc = D40_DREG_GCC_ENA;
8d318a50
LW
3069
3070 val[0] = readl(base->virtbase + D40_DREG_PRSME);
3071 val[1] = readl(base->virtbase + D40_DREG_PRSMO);
3072
3073 for (i = 0; i < base->num_phy_chans; i++) {
3074 base->phy_res[i].num = i;
3075 odd_even_bit += 2 * ((i % 2) == 0);
3076 if (((val[i % 2] >> odd_even_bit) & 3) == 1) {
3077 /* Mark security only channels as occupied */
3078 base->phy_res[i].allocated_src = D40_ALLOC_PHY;
3079 base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
7fb3e75e
N
3080 base->phy_res[i].reserved = true;
3081 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3082 D40_DREG_GCC_SRC);
3083 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
3084 D40_DREG_GCC_DST);
3085
3086
8d318a50
LW
3087 } else {
3088 base->phy_res[i].allocated_src = D40_ALLOC_FREE;
3089 base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
7fb3e75e 3090 base->phy_res[i].reserved = false;
8d318a50
LW
3091 num_phy_chans_avail++;
3092 }
3093 spin_lock_init(&base->phy_res[i].lock);
3094 }
6b7acd84
JA
3095
3096 /* Mark disabled channels as occupied */
3097 for (i = 0; base->plat_data->disabled_channels[i] != -1; i++) {
f57b407c
RV
3098 int chan = base->plat_data->disabled_channels[i];
3099
3100 base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
3101 base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
7fb3e75e
N
3102 base->phy_res[chan].reserved = true;
3103 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3104 D40_DREG_GCC_SRC);
3105 gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
3106 D40_DREG_GCC_DST);
f57b407c 3107 num_phy_chans_avail--;
6b7acd84
JA
3108 }
3109
7407048b
FB
3110 /* Mark soft_lli channels */
3111 for (i = 0; i < base->plat_data->num_of_soft_lli_chans; i++) {
3112 int chan = base->plat_data->soft_lli_chans[i];
3113
3114 base->phy_res[chan].use_soft_lli = true;
3115 }
3116
8d318a50
LW
3117 dev_info(base->dev, "%d of %d physical DMA channels available\n",
3118 num_phy_chans_avail, base->num_phy_chans);
3119
3120 /* Verify settings extended vs standard */
3121 val[0] = readl(base->virtbase + D40_DREG_PRTYP);
3122
3123 for (i = 0; i < base->num_phy_chans; i++) {
3124
3125 if (base->phy_res[i].allocated_src == D40_ALLOC_FREE &&
3126 (val[0] & 0x3) != 1)
3127 dev_info(base->dev,
3128 "[%s] INFO: channel %d is misconfigured (%d)\n",
3129 __func__, i, val[0] & 0x3);
3130
3131 val[0] = val[0] >> 2;
3132 }
3133
7fb3e75e
N
3134 /*
3135 * To keep things simple, Enable all clocks initially.
3136 * The clocks will get managed later post channel allocation.
3137 * The clocks for the event lines on which reserved channels exists
3138 * are not managed here.
3139 */
3140 writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
3141 base->gcc_pwr_off_mask = gcc;
3142
8d318a50
LW
3143 return num_phy_chans_avail;
3144}
3145
3146static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
3147{
bb75d93b 3148 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
8d318a50
LW
3149 struct clk *clk = NULL;
3150 void __iomem *virtbase = NULL;
3151 struct resource *res = NULL;
3152 struct d40_base *base = NULL;
3153 int num_log_chans = 0;
3154 int num_phy_chans;
b707c658 3155 int clk_ret = -EINVAL;
8d318a50 3156 int i;
f4b89764
LW
3157 u32 pid;
3158 u32 cid;
3159 u8 rev;
8d318a50
LW
3160
3161 clk = clk_get(&pdev->dev, NULL);
8d318a50 3162 if (IS_ERR(clk)) {
6db5a8ba 3163 d40_err(&pdev->dev, "No matching clock found\n");
8d318a50
LW
3164 goto failure;
3165 }
3166
b707c658
UH
3167 clk_ret = clk_prepare_enable(clk);
3168 if (clk_ret) {
3169 d40_err(&pdev->dev, "Failed to prepare/enable clock\n");
3170 goto failure;
3171 }
8d318a50
LW
3172
3173 /* Get IO for DMAC base address */
3174 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "base");
3175 if (!res)
3176 goto failure;
3177
3178 if (request_mem_region(res->start, resource_size(res),
3179 D40_NAME " I/O base") == NULL)
3180 goto failure;
3181
3182 virtbase = ioremap(res->start, resource_size(res));
3183 if (!virtbase)
3184 goto failure;
3185
f4b89764
LW
3186 /* This is just a regular AMBA PrimeCell ID actually */
3187 for (pid = 0, i = 0; i < 4; i++)
3188 pid |= (readl(virtbase + resource_size(res) - 0x20 + 4 * i)
3189 & 255) << (i * 8);
3190 for (cid = 0, i = 0; i < 4; i++)
3191 cid |= (readl(virtbase + resource_size(res) - 0x10 + 4 * i)
3192 & 255) << (i * 8);
8d318a50 3193
f4b89764
LW
3194 if (cid != AMBA_CID) {
3195 d40_err(&pdev->dev, "Unknown hardware! No PrimeCell ID\n");
3196 goto failure;
3197 }
3198 if (AMBA_MANF_BITS(pid) != AMBA_VENDOR_ST) {
6db5a8ba 3199 d40_err(&pdev->dev, "Unknown designer! Got %x wanted %x\n",
f4b89764
LW
3200 AMBA_MANF_BITS(pid),
3201 AMBA_VENDOR_ST);
8d318a50
LW
3202 goto failure;
3203 }
f4b89764
LW
3204 /*
3205 * HW revision:
3206 * DB8500ed has revision 0
3207 * ? has revision 1
3208 * DB8500v1 has revision 2
3209 * DB8500v2 has revision 3
47db92f4
GB
3210 * AP9540v1 has revision 4
3211 * DB8540v1 has revision 4
f4b89764
LW
3212 */
3213 rev = AMBA_REV_BITS(pid);
8b2fe9b6
LJ
3214 if (rev < 2) {
3215 d40_err(&pdev->dev, "hardware revision: %d is not supported", rev);
3216 goto failure;
3217 }
3ae0267f 3218
8d318a50 3219 /* The number of physical channels on this HW */
47db92f4
GB
3220 if (plat_data->num_of_phy_chans)
3221 num_phy_chans = plat_data->num_of_phy_chans;
3222 else
3223 num_phy_chans = 4 * (readl(virtbase + D40_DREG_ICFG) & 0x7) + 4;
8d318a50 3224
db72da92
LJ
3225 num_log_chans = num_phy_chans * D40_MAX_LOG_CHAN_PER_PHY;
3226
b2abb249
LJ
3227 dev_info(&pdev->dev,
3228 "hardware rev: %d @ 0x%x with %d physical and %d logical channels\n",
3229 rev, res->start, num_phy_chans, num_log_chans);
8d318a50 3230
8d318a50 3231 base = kzalloc(ALIGN(sizeof(struct d40_base), 4) +
664a57ec 3232 (num_phy_chans + num_log_chans + ARRAY_SIZE(dma40_memcpy_channels)) *
8d318a50
LW
3233 sizeof(struct d40_chan), GFP_KERNEL);
3234
3235 if (base == NULL) {
6db5a8ba 3236 d40_err(&pdev->dev, "Out of memory\n");
8d318a50
LW
3237 goto failure;
3238 }
3239
3ae0267f 3240 base->rev = rev;
8d318a50
LW
3241 base->clk = clk;
3242 base->num_phy_chans = num_phy_chans;
3243 base->num_log_chans = num_log_chans;
3244 base->phy_start = res->start;
3245 base->phy_size = resource_size(res);
3246 base->virtbase = virtbase;
3247 base->plat_data = plat_data;
3248 base->dev = &pdev->dev;
3249 base->phy_chans = ((void *)base) + ALIGN(sizeof(struct d40_base), 4);
3250 base->log_chans = &base->phy_chans[num_phy_chans];
3251
3cb645dc
TL
3252 if (base->plat_data->num_of_phy_chans == 14) {
3253 base->gen_dmac.backup = d40_backup_regs_v4b;
3254 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4B;
3255 base->gen_dmac.interrupt_en = D40_DREG_CPCMIS;
3256 base->gen_dmac.interrupt_clear = D40_DREG_CPCICR;
3257 base->gen_dmac.realtime_en = D40_DREG_CRSEG1;
3258 base->gen_dmac.realtime_clear = D40_DREG_CRCEG1;
3259 base->gen_dmac.high_prio_en = D40_DREG_CPSEG1;
3260 base->gen_dmac.high_prio_clear = D40_DREG_CPCEG1;
3261 base->gen_dmac.il = il_v4b;
3262 base->gen_dmac.il_size = ARRAY_SIZE(il_v4b);
3263 base->gen_dmac.init_reg = dma_init_reg_v4b;
3264 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4b);
3265 } else {
3266 if (base->rev >= 3) {
3267 base->gen_dmac.backup = d40_backup_regs_v4a;
3268 base->gen_dmac.backup_size = BACKUP_REGS_SZ_V4A;
3269 }
3270 base->gen_dmac.interrupt_en = D40_DREG_PCMIS;
3271 base->gen_dmac.interrupt_clear = D40_DREG_PCICR;
3272 base->gen_dmac.realtime_en = D40_DREG_RSEG1;
3273 base->gen_dmac.realtime_clear = D40_DREG_RCEG1;
3274 base->gen_dmac.high_prio_en = D40_DREG_PSEG1;
3275 base->gen_dmac.high_prio_clear = D40_DREG_PCEG1;
3276 base->gen_dmac.il = il_v4a;
3277 base->gen_dmac.il_size = ARRAY_SIZE(il_v4a);
3278 base->gen_dmac.init_reg = dma_init_reg_v4a;
3279 base->gen_dmac.init_reg_size = ARRAY_SIZE(dma_init_reg_v4a);
3280 }
3281
8d318a50
LW
3282 base->phy_res = kzalloc(num_phy_chans * sizeof(struct d40_phy_res),
3283 GFP_KERNEL);
3284 if (!base->phy_res)
3285 goto failure;
3286
3287 base->lookup_phy_chans = kzalloc(num_phy_chans *
3288 sizeof(struct d40_chan *),
3289 GFP_KERNEL);
3290 if (!base->lookup_phy_chans)
3291 goto failure;
3292
8a59fed3
LJ
3293 base->lookup_log_chans = kzalloc(num_log_chans *
3294 sizeof(struct d40_chan *),
3295 GFP_KERNEL);
3296 if (!base->lookup_log_chans)
3297 goto failure;
698e4732 3298
7fb3e75e
N
3299 base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
3300 sizeof(d40_backup_regs_chan),
8d318a50 3301 GFP_KERNEL);
7fb3e75e
N
3302 if (!base->reg_val_backup_chan)
3303 goto failure;
3304
3305 base->lcla_pool.alloc_map =
3306 kzalloc(num_phy_chans * sizeof(struct d40_desc *)
3307 * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
8d318a50
LW
3308 if (!base->lcla_pool.alloc_map)
3309 goto failure;
3310
c675b1b4
JA
3311 base->desc_slab = kmem_cache_create(D40_NAME, sizeof(struct d40_desc),
3312 0, SLAB_HWCACHE_ALIGN,
3313 NULL);
3314 if (base->desc_slab == NULL)
3315 goto failure;
3316
8d318a50
LW
3317 return base;
3318
3319failure:
b707c658
UH
3320 if (!clk_ret)
3321 clk_disable_unprepare(clk);
3322 if (!IS_ERR(clk))
8d318a50 3323 clk_put(clk);
8d318a50
LW
3324 if (virtbase)
3325 iounmap(virtbase);
3326 if (res)
3327 release_mem_region(res->start,
3328 resource_size(res));
3329 if (virtbase)
3330 iounmap(virtbase);
3331
3332 if (base) {
3333 kfree(base->lcla_pool.alloc_map);
1bdae6f4 3334 kfree(base->reg_val_backup_chan);
8d318a50
LW
3335 kfree(base->lookup_log_chans);
3336 kfree(base->lookup_phy_chans);
3337 kfree(base->phy_res);
3338 kfree(base);
3339 }
3340
3341 return NULL;
3342}
3343
3344static void __init d40_hw_init(struct d40_base *base)
3345{
3346
8d318a50
LW
3347 int i;
3348 u32 prmseo[2] = {0, 0};
3349 u32 activeo[2] = {0xFFFFFFFF, 0xFFFFFFFF};
3350 u32 pcmis = 0;
3351 u32 pcicr = 0;
3cb645dc
TL
3352 struct d40_reg_val *dma_init_reg = base->gen_dmac.init_reg;
3353 u32 reg_size = base->gen_dmac.init_reg_size;
8d318a50 3354
3cb645dc 3355 for (i = 0; i < reg_size; i++)
8d318a50
LW
3356 writel(dma_init_reg[i].val,
3357 base->virtbase + dma_init_reg[i].reg);
3358
3359 /* Configure all our dma channels to default settings */
3360 for (i = 0; i < base->num_phy_chans; i++) {
3361
3362 activeo[i % 2] = activeo[i % 2] << 2;
3363
3364 if (base->phy_res[base->num_phy_chans - i - 1].allocated_src
3365 == D40_ALLOC_PHY) {
3366 activeo[i % 2] |= 3;
3367 continue;
3368 }
3369
3370 /* Enable interrupt # */
3371 pcmis = (pcmis << 1) | 1;
3372
3373 /* Clear interrupt # */
3374 pcicr = (pcicr << 1) | 1;
3375
3376 /* Set channel to physical mode */
3377 prmseo[i % 2] = prmseo[i % 2] << 2;
3378 prmseo[i % 2] |= 1;
3379
3380 }
3381
3382 writel(prmseo[1], base->virtbase + D40_DREG_PRMSE);
3383 writel(prmseo[0], base->virtbase + D40_DREG_PRMSO);
3384 writel(activeo[1], base->virtbase + D40_DREG_ACTIVE);
3385 writel(activeo[0], base->virtbase + D40_DREG_ACTIVO);
3386
3387 /* Write which interrupt to enable */
3cb645dc 3388 writel(pcmis, base->virtbase + base->gen_dmac.interrupt_en);
8d318a50
LW
3389
3390 /* Write which interrupt to clear */
3cb645dc 3391 writel(pcicr, base->virtbase + base->gen_dmac.interrupt_clear);
8d318a50 3392
3cb645dc
TL
3393 /* These are __initdata and cannot be accessed after init */
3394 base->gen_dmac.init_reg = NULL;
3395 base->gen_dmac.init_reg_size = 0;
8d318a50
LW
3396}
3397
508849ad
LW
3398static int __init d40_lcla_allocate(struct d40_base *base)
3399{
026cbc42 3400 struct d40_lcla_pool *pool = &base->lcla_pool;
508849ad
LW
3401 unsigned long *page_list;
3402 int i, j;
3403 int ret = 0;
3404
3405 /*
3406 * This is somewhat ugly. We need 8192 bytes that are 18 bit aligned,
3407 * To full fill this hardware requirement without wasting 256 kb
3408 * we allocate pages until we get an aligned one.
3409 */
3410 page_list = kmalloc(sizeof(unsigned long) * MAX_LCLA_ALLOC_ATTEMPTS,
3411 GFP_KERNEL);
3412
3413 if (!page_list) {
3414 ret = -ENOMEM;
3415 goto failure;
3416 }
3417
3418 /* Calculating how many pages that are required */
3419 base->lcla_pool.pages = SZ_1K * base->num_phy_chans / PAGE_SIZE;
3420
3421 for (i = 0; i < MAX_LCLA_ALLOC_ATTEMPTS; i++) {
3422 page_list[i] = __get_free_pages(GFP_KERNEL,
3423 base->lcla_pool.pages);
3424 if (!page_list[i]) {
3425
6db5a8ba
RV
3426 d40_err(base->dev, "Failed to allocate %d pages.\n",
3427 base->lcla_pool.pages);
508849ad
LW
3428
3429 for (j = 0; j < i; j++)
3430 free_pages(page_list[j], base->lcla_pool.pages);
3431 goto failure;
3432 }
3433
3434 if ((virt_to_phys((void *)page_list[i]) &
3435 (LCLA_ALIGNMENT - 1)) == 0)
3436 break;
3437 }
3438
3439 for (j = 0; j < i; j++)
3440 free_pages(page_list[j], base->lcla_pool.pages);
3441
3442 if (i < MAX_LCLA_ALLOC_ATTEMPTS) {
3443 base->lcla_pool.base = (void *)page_list[i];
3444 } else {
767a9675
JA
3445 /*
3446 * After many attempts and no succees with finding the correct
3447 * alignment, try with allocating a big buffer.
3448 */
508849ad
LW
3449 dev_warn(base->dev,
3450 "[%s] Failed to get %d pages @ 18 bit align.\n",
3451 __func__, base->lcla_pool.pages);
3452 base->lcla_pool.base_unaligned = kmalloc(SZ_1K *
3453 base->num_phy_chans +
3454 LCLA_ALIGNMENT,
3455 GFP_KERNEL);
3456 if (!base->lcla_pool.base_unaligned) {
3457 ret = -ENOMEM;
3458 goto failure;
3459 }
3460
3461 base->lcla_pool.base = PTR_ALIGN(base->lcla_pool.base_unaligned,
3462 LCLA_ALIGNMENT);
3463 }
3464
026cbc42
RV
3465 pool->dma_addr = dma_map_single(base->dev, pool->base,
3466 SZ_1K * base->num_phy_chans,
3467 DMA_TO_DEVICE);
3468 if (dma_mapping_error(base->dev, pool->dma_addr)) {
3469 pool->dma_addr = 0;
3470 ret = -ENOMEM;
3471 goto failure;
3472 }
3473
508849ad
LW
3474 writel(virt_to_phys(base->lcla_pool.base),
3475 base->virtbase + D40_DREG_LCLA);
3476failure:
3477 kfree(page_list);
3478 return ret;
3479}
3480
1814a170
LJ
3481static int __init d40_of_probe(struct platform_device *pdev,
3482 struct device_node *np)
3483{
3484 struct stedma40_platform_data *pdata;
3485
3486 /*
3487 * FIXME: Fill in this routine as more support is added.
3488 * First platform enabled (u8500) doens't need any extra
3489 * properties to run, so this is fairly sparce currently.
3490 */
3491
3492 pdata = devm_kzalloc(&pdev->dev,
3493 sizeof(struct stedma40_platform_data),
3494 GFP_KERNEL);
3495 if (!pdata)
3496 return -ENOMEM;
3497
3498 pdev->dev.platform_data = pdata;
3499
3500 return 0;
3501}
3502
8d318a50
LW
3503static int __init d40_probe(struct platform_device *pdev)
3504{
1814a170
LJ
3505 struct stedma40_platform_data *plat_data = pdev->dev.platform_data;
3506 struct device_node *np = pdev->dev.of_node;
8d318a50
LW
3507 int err;
3508 int ret = -ENOENT;
1814a170 3509 struct d40_base *base = NULL;
8d318a50
LW
3510 struct resource *res = NULL;
3511 int num_reserved_chans;
3512 u32 val;
3513
1814a170
LJ
3514 if (!plat_data) {
3515 if (np) {
3516 if(d40_of_probe(pdev, np)) {
3517 ret = -ENOMEM;
3518 goto failure;
3519 }
3520 } else {
3521 d40_err(&pdev->dev, "No pdata or Device Tree provided\n");
3522 goto failure;
3523 }
3524 }
8d318a50 3525
1814a170 3526 base = d40_hw_detect_init(pdev);
8d318a50
LW
3527 if (!base)
3528 goto failure;
3529
3530 num_reserved_chans = d40_phy_res_init(base);
3531
3532 platform_set_drvdata(pdev, base);
3533
3534 spin_lock_init(&base->interrupt_lock);
3535 spin_lock_init(&base->execmd_lock);
3536
3537 /* Get IO for logical channel parameter address */
3538 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "lcpa");
3539 if (!res) {
3540 ret = -ENOENT;
6db5a8ba 3541 d40_err(&pdev->dev, "No \"lcpa\" memory resource\n");
8d318a50
LW
3542 goto failure;
3543 }
3544 base->lcpa_size = resource_size(res);
3545 base->phy_lcpa = res->start;
3546
3547 if (request_mem_region(res->start, resource_size(res),
3548 D40_NAME " I/O lcpa") == NULL) {
3549 ret = -EBUSY;
6db5a8ba
RV
3550 d40_err(&pdev->dev,
3551 "Failed to request LCPA region 0x%x-0x%x\n",
3552 res->start, res->end);
8d318a50
LW
3553 goto failure;
3554 }
3555
3556 /* We make use of ESRAM memory for this. */
3557 val = readl(base->virtbase + D40_DREG_LCPA);
3558 if (res->start != val && val != 0) {
3559 dev_warn(&pdev->dev,
3560 "[%s] Mismatch LCPA dma 0x%x, def 0x%x\n",
3561 __func__, val, res->start);
3562 } else
3563 writel(res->start, base->virtbase + D40_DREG_LCPA);
3564
3565 base->lcpa_base = ioremap(res->start, resource_size(res));
3566 if (!base->lcpa_base) {
3567 ret = -ENOMEM;
6db5a8ba 3568 d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
8d318a50
LW
3569 goto failure;
3570 }
28c7a19d
N
3571 /* If lcla has to be located in ESRAM we don't need to allocate */
3572 if (base->plat_data->use_esram_lcla) {
3573 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
3574 "lcla_esram");
3575 if (!res) {
3576 ret = -ENOENT;
3577 d40_err(&pdev->dev,
3578 "No \"lcla_esram\" memory resource\n");
3579 goto failure;
3580 }
3581 base->lcla_pool.base = ioremap(res->start,
3582 resource_size(res));
3583 if (!base->lcla_pool.base) {
3584 ret = -ENOMEM;
3585 d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
3586 goto failure;
3587 }
3588 writel(res->start, base->virtbase + D40_DREG_LCLA);
8d318a50 3589
28c7a19d
N
3590 } else {
3591 ret = d40_lcla_allocate(base);
3592 if (ret) {
3593 d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
3594 goto failure;
3595 }
8d318a50
LW
3596 }
3597
3598 spin_lock_init(&base->lcla_pool.lock);
3599
8d318a50
LW
3600 base->irq = platform_get_irq(pdev, 0);
3601
3602 ret = request_irq(base->irq, d40_handle_interrupt, 0, D40_NAME, base);
8d318a50 3603 if (ret) {
6db5a8ba 3604 d40_err(&pdev->dev, "No IRQ defined\n");
8d318a50
LW
3605 goto failure;
3606 }
3607
7fb3e75e
N
3608 pm_runtime_irq_safe(base->dev);
3609 pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
3610 pm_runtime_use_autosuspend(base->dev);
3611 pm_runtime_enable(base->dev);
3612 pm_runtime_resume(base->dev);
28c7a19d
N
3613
3614 if (base->plat_data->use_esram_lcla) {
3615
3616 base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
3617 if (IS_ERR(base->lcpa_regulator)) {
3618 d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
3619 base->lcpa_regulator = NULL;
3620 goto failure;
3621 }
3622
3623 ret = regulator_enable(base->lcpa_regulator);
3624 if (ret) {
3625 d40_err(&pdev->dev,
3626 "Failed to enable lcpa_regulator\n");
3627 regulator_put(base->lcpa_regulator);
3628 base->lcpa_regulator = NULL;
3629 goto failure;
3630 }
3631 }
3632
7fb3e75e 3633 base->initialized = true;
8d318a50
LW
3634 err = d40_dmaengine_init(base, num_reserved_chans);
3635 if (err)
3636 goto failure;
3637
b96710e5
PF
3638 base->dev->dma_parms = &base->dma_parms;
3639 err = dma_set_max_seg_size(base->dev, STEDMA40_MAX_SEG_SIZE);
3640 if (err) {
3641 d40_err(&pdev->dev, "Failed to set dma max seg size\n");
3642 goto failure;
3643 }
3644
8d318a50
LW
3645 d40_hw_init(base);
3646
fa332de5
LJ
3647 if (np) {
3648 err = of_dma_controller_register(np, d40_xlate, NULL);
3649 if (err && err != -ENODEV)
3650 dev_err(&pdev->dev,
3651 "could not register of_dma_controller\n");
3652 }
3653
8d318a50
LW
3654 dev_info(base->dev, "initialized\n");
3655 return 0;
3656
3657failure:
3658 if (base) {
c675b1b4
JA
3659 if (base->desc_slab)
3660 kmem_cache_destroy(base->desc_slab);
8d318a50
LW
3661 if (base->virtbase)
3662 iounmap(base->virtbase);
026cbc42 3663
28c7a19d
N
3664 if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
3665 iounmap(base->lcla_pool.base);
3666 base->lcla_pool.base = NULL;
3667 }
3668
026cbc42
RV
3669 if (base->lcla_pool.dma_addr)
3670 dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
3671 SZ_1K * base->num_phy_chans,
3672 DMA_TO_DEVICE);
3673
508849ad
LW
3674 if (!base->lcla_pool.base_unaligned && base->lcla_pool.base)
3675 free_pages((unsigned long)base->lcla_pool.base,
3676 base->lcla_pool.pages);
767a9675
JA
3677
3678 kfree(base->lcla_pool.base_unaligned);
3679
8d318a50
LW
3680 if (base->phy_lcpa)
3681 release_mem_region(base->phy_lcpa,
3682 base->lcpa_size);
3683 if (base->phy_start)
3684 release_mem_region(base->phy_start,
3685 base->phy_size);
3686 if (base->clk) {
da2ac56a 3687 clk_disable_unprepare(base->clk);
8d318a50
LW
3688 clk_put(base->clk);
3689 }
3690
28c7a19d
N
3691 if (base->lcpa_regulator) {
3692 regulator_disable(base->lcpa_regulator);
3693 regulator_put(base->lcpa_regulator);
3694 }
3695
8d318a50
LW
3696 kfree(base->lcla_pool.alloc_map);
3697 kfree(base->lookup_log_chans);
3698 kfree(base->lookup_phy_chans);
3699 kfree(base->phy_res);
3700 kfree(base);
3701 }
3702
6db5a8ba 3703 d40_err(&pdev->dev, "probe failed\n");
8d318a50
LW
3704 return ret;
3705}
3706
1814a170
LJ
3707static const struct of_device_id d40_match[] = {
3708 { .compatible = "stericsson,dma40", },
3709 {}
3710};
3711
8d318a50
LW
3712static struct platform_driver d40_driver = {
3713 .driver = {
3714 .owner = THIS_MODULE,
3715 .name = D40_NAME,
7fb3e75e 3716 .pm = DMA40_PM_OPS,
1814a170 3717 .of_match_table = d40_match,
8d318a50
LW
3718 },
3719};
3720
cb9ab2d8 3721static int __init stedma40_init(void)
8d318a50
LW
3722{
3723 return platform_driver_probe(&d40_driver, d40_probe);
3724}
a0eb221a 3725subsys_initcall(stedma40_init);