]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * Copyright (c) 2012 Samsung Electronics Co., Ltd. | |
4 | * http://www.samsung.com | |
5 | * | |
6 | * Copyright (C) 2010 Samsung Electronics Co. Ltd. | |
7 | * Jaswinder Singh <jassi.brar@samsung.com> | |
8 | */ | |
9 | ||
10 | #include <linux/debugfs.h> | |
11 | #include <linux/kernel.h> | |
12 | #include <linux/io.h> | |
13 | #include <linux/init.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/module.h> | |
16 | #include <linux/string.h> | |
17 | #include <linux/delay.h> | |
18 | #include <linux/interrupt.h> | |
19 | #include <linux/dma-mapping.h> | |
20 | #include <linux/dmaengine.h> | |
21 | #include <linux/amba/bus.h> | |
22 | #include <linux/scatterlist.h> | |
23 | #include <linux/of.h> | |
24 | #include <linux/of_dma.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/pm_runtime.h> | |
27 | #include <linux/bug.h> | |
28 | #include <linux/reset.h> | |
29 | ||
30 | #include "dmaengine.h" | |
31 | #define PL330_MAX_CHAN 8 | |
32 | #define PL330_MAX_IRQS 32 | |
33 | #define PL330_MAX_PERI 32 | |
34 | #define PL330_MAX_BURST 16 | |
35 | ||
36 | #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0) | |
37 | ||
38 | enum pl330_cachectrl { | |
39 | CCTRL0, /* Noncacheable and nonbufferable */ | |
40 | CCTRL1, /* Bufferable only */ | |
41 | CCTRL2, /* Cacheable, but do not allocate */ | |
42 | CCTRL3, /* Cacheable and bufferable, but do not allocate */ | |
43 | INVALID1, /* AWCACHE = 0x1000 */ | |
44 | INVALID2, | |
45 | CCTRL6, /* Cacheable write-through, allocate on writes only */ | |
46 | CCTRL7, /* Cacheable write-back, allocate on writes only */ | |
47 | }; | |
48 | ||
49 | enum pl330_byteswap { | |
50 | SWAP_NO, | |
51 | SWAP_2, | |
52 | SWAP_4, | |
53 | SWAP_8, | |
54 | SWAP_16, | |
55 | }; | |
56 | ||
57 | /* Register and Bit field Definitions */ | |
58 | #define DS 0x0 | |
59 | #define DS_ST_STOP 0x0 | |
60 | #define DS_ST_EXEC 0x1 | |
61 | #define DS_ST_CMISS 0x2 | |
62 | #define DS_ST_UPDTPC 0x3 | |
63 | #define DS_ST_WFE 0x4 | |
64 | #define DS_ST_ATBRR 0x5 | |
65 | #define DS_ST_QBUSY 0x6 | |
66 | #define DS_ST_WFP 0x7 | |
67 | #define DS_ST_KILL 0x8 | |
68 | #define DS_ST_CMPLT 0x9 | |
69 | #define DS_ST_FLTCMP 0xe | |
70 | #define DS_ST_FAULT 0xf | |
71 | ||
72 | #define DPC 0x4 | |
73 | #define INTEN 0x20 | |
74 | #define ES 0x24 | |
75 | #define INTSTATUS 0x28 | |
76 | #define INTCLR 0x2c | |
77 | #define FSM 0x30 | |
78 | #define FSC 0x34 | |
79 | #define FTM 0x38 | |
80 | ||
81 | #define _FTC 0x40 | |
82 | #define FTC(n) (_FTC + (n)*0x4) | |
83 | ||
84 | #define _CS 0x100 | |
85 | #define CS(n) (_CS + (n)*0x8) | |
86 | #define CS_CNS (1 << 21) | |
87 | ||
88 | #define _CPC 0x104 | |
89 | #define CPC(n) (_CPC + (n)*0x8) | |
90 | ||
91 | #define _SA 0x400 | |
92 | #define SA(n) (_SA + (n)*0x20) | |
93 | ||
94 | #define _DA 0x404 | |
95 | #define DA(n) (_DA + (n)*0x20) | |
96 | ||
97 | #define _CC 0x408 | |
98 | #define CC(n) (_CC + (n)*0x20) | |
99 | ||
100 | #define CC_SRCINC (1 << 0) | |
101 | #define CC_DSTINC (1 << 14) | |
102 | #define CC_SRCPRI (1 << 8) | |
103 | #define CC_DSTPRI (1 << 22) | |
104 | #define CC_SRCNS (1 << 9) | |
105 | #define CC_DSTNS (1 << 23) | |
106 | #define CC_SRCIA (1 << 10) | |
107 | #define CC_DSTIA (1 << 24) | |
108 | #define CC_SRCBRSTLEN_SHFT 4 | |
109 | #define CC_DSTBRSTLEN_SHFT 18 | |
110 | #define CC_SRCBRSTSIZE_SHFT 1 | |
111 | #define CC_DSTBRSTSIZE_SHFT 15 | |
112 | #define CC_SRCCCTRL_SHFT 11 | |
113 | #define CC_SRCCCTRL_MASK 0x7 | |
114 | #define CC_DSTCCTRL_SHFT 25 | |
115 | #define CC_DRCCCTRL_MASK 0x7 | |
116 | #define CC_SWAP_SHFT 28 | |
117 | ||
118 | #define _LC0 0x40c | |
119 | #define LC0(n) (_LC0 + (n)*0x20) | |
120 | ||
121 | #define _LC1 0x410 | |
122 | #define LC1(n) (_LC1 + (n)*0x20) | |
123 | ||
124 | #define DBGSTATUS 0xd00 | |
125 | #define DBG_BUSY (1 << 0) | |
126 | ||
127 | #define DBGCMD 0xd04 | |
128 | #define DBGINST0 0xd08 | |
129 | #define DBGINST1 0xd0c | |
130 | ||
131 | #define CR0 0xe00 | |
132 | #define CR1 0xe04 | |
133 | #define CR2 0xe08 | |
134 | #define CR3 0xe0c | |
135 | #define CR4 0xe10 | |
136 | #define CRD 0xe14 | |
137 | ||
138 | #define PERIPH_ID 0xfe0 | |
139 | #define PERIPH_REV_SHIFT 20 | |
140 | #define PERIPH_REV_MASK 0xf | |
141 | #define PERIPH_REV_R0P0 0 | |
142 | #define PERIPH_REV_R1P0 1 | |
143 | #define PERIPH_REV_R1P1 2 | |
144 | ||
145 | #define CR0_PERIPH_REQ_SET (1 << 0) | |
146 | #define CR0_BOOT_EN_SET (1 << 1) | |
147 | #define CR0_BOOT_MAN_NS (1 << 2) | |
148 | #define CR0_NUM_CHANS_SHIFT 4 | |
149 | #define CR0_NUM_CHANS_MASK 0x7 | |
150 | #define CR0_NUM_PERIPH_SHIFT 12 | |
151 | #define CR0_NUM_PERIPH_MASK 0x1f | |
152 | #define CR0_NUM_EVENTS_SHIFT 17 | |
153 | #define CR0_NUM_EVENTS_MASK 0x1f | |
154 | ||
155 | #define CR1_ICACHE_LEN_SHIFT 0 | |
156 | #define CR1_ICACHE_LEN_MASK 0x7 | |
157 | #define CR1_NUM_ICACHELINES_SHIFT 4 | |
158 | #define CR1_NUM_ICACHELINES_MASK 0xf | |
159 | ||
160 | #define CRD_DATA_WIDTH_SHIFT 0 | |
161 | #define CRD_DATA_WIDTH_MASK 0x7 | |
162 | #define CRD_WR_CAP_SHIFT 4 | |
163 | #define CRD_WR_CAP_MASK 0x7 | |
164 | #define CRD_WR_Q_DEP_SHIFT 8 | |
165 | #define CRD_WR_Q_DEP_MASK 0xf | |
166 | #define CRD_RD_CAP_SHIFT 12 | |
167 | #define CRD_RD_CAP_MASK 0x7 | |
168 | #define CRD_RD_Q_DEP_SHIFT 16 | |
169 | #define CRD_RD_Q_DEP_MASK 0xf | |
170 | #define CRD_DATA_BUFF_SHIFT 20 | |
171 | #define CRD_DATA_BUFF_MASK 0x3ff | |
172 | ||
173 | #define PART 0x330 | |
174 | #define DESIGNER 0x41 | |
175 | #define REVISION 0x0 | |
176 | #define INTEG_CFG 0x0 | |
177 | #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12)) | |
178 | ||
179 | #define PL330_STATE_STOPPED (1 << 0) | |
180 | #define PL330_STATE_EXECUTING (1 << 1) | |
181 | #define PL330_STATE_WFE (1 << 2) | |
182 | #define PL330_STATE_FAULTING (1 << 3) | |
183 | #define PL330_STATE_COMPLETING (1 << 4) | |
184 | #define PL330_STATE_WFP (1 << 5) | |
185 | #define PL330_STATE_KILLING (1 << 6) | |
186 | #define PL330_STATE_FAULT_COMPLETING (1 << 7) | |
187 | #define PL330_STATE_CACHEMISS (1 << 8) | |
188 | #define PL330_STATE_UPDTPC (1 << 9) | |
189 | #define PL330_STATE_ATBARRIER (1 << 10) | |
190 | #define PL330_STATE_QUEUEBUSY (1 << 11) | |
191 | #define PL330_STATE_INVALID (1 << 15) | |
192 | ||
193 | #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \ | |
194 | | PL330_STATE_WFE | PL330_STATE_FAULTING) | |
195 | ||
196 | #define CMD_DMAADDH 0x54 | |
197 | #define CMD_DMAEND 0x00 | |
198 | #define CMD_DMAFLUSHP 0x35 | |
199 | #define CMD_DMAGO 0xa0 | |
200 | #define CMD_DMALD 0x04 | |
201 | #define CMD_DMALDP 0x25 | |
202 | #define CMD_DMALP 0x20 | |
203 | #define CMD_DMALPEND 0x28 | |
204 | #define CMD_DMAKILL 0x01 | |
205 | #define CMD_DMAMOV 0xbc | |
206 | #define CMD_DMANOP 0x18 | |
207 | #define CMD_DMARMB 0x12 | |
208 | #define CMD_DMASEV 0x34 | |
209 | #define CMD_DMAST 0x08 | |
210 | #define CMD_DMASTP 0x29 | |
211 | #define CMD_DMASTZ 0x0c | |
212 | #define CMD_DMAWFE 0x36 | |
213 | #define CMD_DMAWFP 0x30 | |
214 | #define CMD_DMAWMB 0x13 | |
215 | ||
216 | #define SZ_DMAADDH 3 | |
217 | #define SZ_DMAEND 1 | |
218 | #define SZ_DMAFLUSHP 2 | |
219 | #define SZ_DMALD 1 | |
220 | #define SZ_DMALDP 2 | |
221 | #define SZ_DMALP 2 | |
222 | #define SZ_DMALPEND 2 | |
223 | #define SZ_DMAKILL 1 | |
224 | #define SZ_DMAMOV 6 | |
225 | #define SZ_DMANOP 1 | |
226 | #define SZ_DMARMB 1 | |
227 | #define SZ_DMASEV 2 | |
228 | #define SZ_DMAST 1 | |
229 | #define SZ_DMASTP 2 | |
230 | #define SZ_DMASTZ 1 | |
231 | #define SZ_DMAWFE 2 | |
232 | #define SZ_DMAWFP 2 | |
233 | #define SZ_DMAWMB 1 | |
234 | #define SZ_DMAGO 6 | |
235 | ||
236 | #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) | |
237 | #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) | |
238 | ||
239 | #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) | |
240 | #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) | |
241 | ||
242 | /* | |
243 | * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req | |
244 | * at 1byte/burst for P<->M and M<->M respectively. | |
245 | * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req | |
246 | * should be enough for P<->M and M<->M respectively. | |
247 | */ | |
248 | #define MCODE_BUFF_PER_REQ 256 | |
249 | ||
250 | /* Use this _only_ to wait on transient states */ | |
251 | #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); | |
252 | ||
253 | #ifdef PL330_DEBUG_MCGEN | |
254 | static unsigned cmd_line; | |
255 | #define PL330_DBGCMD_DUMP(off, x...) do { \ | |
256 | printk("%x:", cmd_line); \ | |
257 | printk(x); \ | |
258 | cmd_line += off; \ | |
259 | } while (0) | |
260 | #define PL330_DBGMC_START(addr) (cmd_line = addr) | |
261 | #else | |
262 | #define PL330_DBGCMD_DUMP(off, x...) do {} while (0) | |
263 | #define PL330_DBGMC_START(addr) do {} while (0) | |
264 | #endif | |
265 | ||
266 | /* The number of default descriptors */ | |
267 | ||
268 | #define NR_DEFAULT_DESC 16 | |
269 | ||
270 | /* Delay for runtime PM autosuspend, ms */ | |
271 | #define PL330_AUTOSUSPEND_DELAY 20 | |
272 | ||
273 | /* Populated by the PL330 core driver for DMA API driver's info */ | |
274 | struct pl330_config { | |
275 | u32 periph_id; | |
276 | #define DMAC_MODE_NS (1 << 0) | |
277 | unsigned int mode; | |
278 | unsigned int data_bus_width:10; /* In number of bits */ | |
279 | unsigned int data_buf_dep:11; | |
280 | unsigned int num_chan:4; | |
281 | unsigned int num_peri:6; | |
282 | u32 peri_ns; | |
283 | unsigned int num_events:6; | |
284 | u32 irq_ns; | |
285 | }; | |
286 | ||
287 | /** | |
288 | * Request Configuration. | |
289 | * The PL330 core does not modify this and uses the last | |
290 | * working configuration if the request doesn't provide any. | |
291 | * | |
292 | * The Client may want to provide this info only for the | |
293 | * first request and a request with new settings. | |
294 | */ | |
295 | struct pl330_reqcfg { | |
296 | /* Address Incrementing */ | |
297 | unsigned dst_inc:1; | |
298 | unsigned src_inc:1; | |
299 | ||
300 | /* | |
301 | * For now, the SRC & DST protection levels | |
302 | * and burst size/length are assumed same. | |
303 | */ | |
304 | bool nonsecure; | |
305 | bool privileged; | |
306 | bool insnaccess; | |
307 | unsigned brst_len:5; | |
308 | unsigned brst_size:3; /* in power of 2 */ | |
309 | ||
310 | enum pl330_cachectrl dcctl; | |
311 | enum pl330_cachectrl scctl; | |
312 | enum pl330_byteswap swap; | |
313 | struct pl330_config *pcfg; | |
314 | }; | |
315 | ||
316 | /* | |
317 | * One cycle of DMAC operation. | |
318 | * There may be more than one xfer in a request. | |
319 | */ | |
320 | struct pl330_xfer { | |
321 | u32 src_addr; | |
322 | u32 dst_addr; | |
323 | /* Size to xfer */ | |
324 | u32 bytes; | |
325 | }; | |
326 | ||
327 | /* The xfer callbacks are made with one of these arguments. */ | |
328 | enum pl330_op_err { | |
329 | /* The all xfers in the request were success. */ | |
330 | PL330_ERR_NONE, | |
331 | /* If req aborted due to global error. */ | |
332 | PL330_ERR_ABORT, | |
333 | /* If req failed due to problem with Channel. */ | |
334 | PL330_ERR_FAIL, | |
335 | }; | |
336 | ||
337 | enum dmamov_dst { | |
338 | SAR = 0, | |
339 | CCR, | |
340 | DAR, | |
341 | }; | |
342 | ||
343 | enum pl330_dst { | |
344 | SRC = 0, | |
345 | DST, | |
346 | }; | |
347 | ||
348 | enum pl330_cond { | |
349 | SINGLE, | |
350 | BURST, | |
351 | ALWAYS, | |
352 | }; | |
353 | ||
354 | struct dma_pl330_desc; | |
355 | ||
356 | struct _pl330_req { | |
357 | u32 mc_bus; | |
358 | void *mc_cpu; | |
359 | struct dma_pl330_desc *desc; | |
360 | }; | |
361 | ||
362 | /* ToBeDone for tasklet */ | |
363 | struct _pl330_tbd { | |
364 | bool reset_dmac; | |
365 | bool reset_mngr; | |
366 | u8 reset_chan; | |
367 | }; | |
368 | ||
369 | /* A DMAC Thread */ | |
370 | struct pl330_thread { | |
371 | u8 id; | |
372 | int ev; | |
373 | /* If the channel is not yet acquired by any client */ | |
374 | bool free; | |
375 | /* Parent DMAC */ | |
376 | struct pl330_dmac *dmac; | |
377 | /* Only two at a time */ | |
378 | struct _pl330_req req[2]; | |
379 | /* Index of the last enqueued request */ | |
380 | unsigned lstenq; | |
381 | /* Index of the last submitted request or -1 if the DMA is stopped */ | |
382 | int req_running; | |
383 | }; | |
384 | ||
385 | enum pl330_dmac_state { | |
386 | UNINIT, | |
387 | INIT, | |
388 | DYING, | |
389 | }; | |
390 | ||
391 | enum desc_status { | |
392 | /* In the DMAC pool */ | |
393 | FREE, | |
394 | /* | |
395 | * Allocated to some channel during prep_xxx | |
396 | * Also may be sitting on the work_list. | |
397 | */ | |
398 | PREP, | |
399 | /* | |
400 | * Sitting on the work_list and already submitted | |
401 | * to the PL330 core. Not more than two descriptors | |
402 | * of a channel can be BUSY at any time. | |
403 | */ | |
404 | BUSY, | |
405 | /* | |
406 | * Sitting on the channel work_list but xfer done | |
407 | * by PL330 core | |
408 | */ | |
409 | DONE, | |
410 | }; | |
411 | ||
412 | struct dma_pl330_chan { | |
413 | /* Schedule desc completion */ | |
414 | struct tasklet_struct task; | |
415 | ||
416 | /* DMA-Engine Channel */ | |
417 | struct dma_chan chan; | |
418 | ||
419 | /* List of submitted descriptors */ | |
420 | struct list_head submitted_list; | |
421 | /* List of issued descriptors */ | |
422 | struct list_head work_list; | |
423 | /* List of completed descriptors */ | |
424 | struct list_head completed_list; | |
425 | ||
426 | /* Pointer to the DMAC that manages this channel, | |
427 | * NULL if the channel is available to be acquired. | |
428 | * As the parent, this DMAC also provides descriptors | |
429 | * to the channel. | |
430 | */ | |
431 | struct pl330_dmac *dmac; | |
432 | ||
433 | /* To protect channel manipulation */ | |
434 | spinlock_t lock; | |
435 | ||
436 | /* | |
437 | * Hardware channel thread of PL330 DMAC. NULL if the channel is | |
438 | * available. | |
439 | */ | |
440 | struct pl330_thread *thread; | |
441 | ||
442 | /* For D-to-M and M-to-D channels */ | |
443 | int burst_sz; /* the peripheral fifo width */ | |
444 | int burst_len; /* the number of burst */ | |
445 | phys_addr_t fifo_addr; | |
446 | /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */ | |
447 | dma_addr_t fifo_dma; | |
448 | enum dma_data_direction dir; | |
449 | struct dma_slave_config slave_config; | |
450 | ||
451 | /* for cyclic capability */ | |
452 | bool cyclic; | |
453 | ||
454 | /* for runtime pm tracking */ | |
455 | bool active; | |
456 | }; | |
457 | ||
458 | struct pl330_dmac { | |
459 | /* DMA-Engine Device */ | |
460 | struct dma_device ddma; | |
461 | ||
462 | /* Holds info about sg limitations */ | |
463 | struct device_dma_parameters dma_parms; | |
464 | ||
465 | /* Pool of descriptors available for the DMAC's channels */ | |
466 | struct list_head desc_pool; | |
467 | /* To protect desc_pool manipulation */ | |
468 | spinlock_t pool_lock; | |
469 | ||
470 | /* Size of MicroCode buffers for each channel. */ | |
471 | unsigned mcbufsz; | |
472 | /* ioremap'ed address of PL330 registers. */ | |
473 | void __iomem *base; | |
474 | /* Populated by the PL330 core driver during pl330_add */ | |
475 | struct pl330_config pcfg; | |
476 | ||
477 | spinlock_t lock; | |
478 | /* Maximum possible events/irqs */ | |
479 | int events[32]; | |
480 | /* BUS address of MicroCode buffer */ | |
481 | dma_addr_t mcode_bus; | |
482 | /* CPU address of MicroCode buffer */ | |
483 | void *mcode_cpu; | |
484 | /* List of all Channel threads */ | |
485 | struct pl330_thread *channels; | |
486 | /* Pointer to the MANAGER thread */ | |
487 | struct pl330_thread *manager; | |
488 | /* To handle bad news in interrupt */ | |
489 | struct tasklet_struct tasks; | |
490 | struct _pl330_tbd dmac_tbd; | |
491 | /* State of DMAC operation */ | |
492 | enum pl330_dmac_state state; | |
493 | /* Holds list of reqs with due callbacks */ | |
494 | struct list_head req_done; | |
495 | ||
496 | /* Peripheral channels connected to this DMAC */ | |
497 | unsigned int num_peripherals; | |
498 | struct dma_pl330_chan *peripherals; /* keep at end */ | |
499 | int quirks; | |
500 | ||
501 | struct reset_control *rstc; | |
502 | struct reset_control *rstc_ocp; | |
503 | }; | |
504 | ||
505 | static struct pl330_of_quirks { | |
506 | char *quirk; | |
507 | int id; | |
508 | } of_quirks[] = { | |
509 | { | |
510 | .quirk = "arm,pl330-broken-no-flushp", | |
511 | .id = PL330_QUIRK_BROKEN_NO_FLUSHP, | |
512 | } | |
513 | }; | |
514 | ||
515 | struct dma_pl330_desc { | |
516 | /* To attach to a queue as child */ | |
517 | struct list_head node; | |
518 | ||
519 | /* Descriptor for the DMA Engine API */ | |
520 | struct dma_async_tx_descriptor txd; | |
521 | ||
522 | /* Xfer for PL330 core */ | |
523 | struct pl330_xfer px; | |
524 | ||
525 | struct pl330_reqcfg rqcfg; | |
526 | ||
527 | enum desc_status status; | |
528 | ||
529 | int bytes_requested; | |
530 | bool last; | |
531 | ||
532 | /* The channel which currently holds this desc */ | |
533 | struct dma_pl330_chan *pchan; | |
534 | ||
535 | enum dma_transfer_direction rqtype; | |
536 | /* Index of peripheral for the xfer. */ | |
537 | unsigned peri:5; | |
538 | /* Hook to attach to DMAC's list of reqs with due callback */ | |
539 | struct list_head rqd; | |
540 | }; | |
541 | ||
542 | struct _xfer_spec { | |
543 | u32 ccr; | |
544 | struct dma_pl330_desc *desc; | |
545 | }; | |
546 | ||
547 | static int pl330_config_write(struct dma_chan *chan, | |
548 | struct dma_slave_config *slave_config, | |
549 | enum dma_transfer_direction direction); | |
550 | ||
551 | static inline bool _queue_full(struct pl330_thread *thrd) | |
552 | { | |
553 | return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; | |
554 | } | |
555 | ||
556 | static inline bool is_manager(struct pl330_thread *thrd) | |
557 | { | |
558 | return thrd->dmac->manager == thrd; | |
559 | } | |
560 | ||
561 | /* If manager of the thread is in Non-Secure mode */ | |
562 | static inline bool _manager_ns(struct pl330_thread *thrd) | |
563 | { | |
564 | return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; | |
565 | } | |
566 | ||
567 | static inline u32 get_revision(u32 periph_id) | |
568 | { | |
569 | return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; | |
570 | } | |
571 | ||
572 | static inline u32 _emit_END(unsigned dry_run, u8 buf[]) | |
573 | { | |
574 | if (dry_run) | |
575 | return SZ_DMAEND; | |
576 | ||
577 | buf[0] = CMD_DMAEND; | |
578 | ||
579 | PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); | |
580 | ||
581 | return SZ_DMAEND; | |
582 | } | |
583 | ||
584 | static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) | |
585 | { | |
586 | if (dry_run) | |
587 | return SZ_DMAFLUSHP; | |
588 | ||
589 | buf[0] = CMD_DMAFLUSHP; | |
590 | ||
591 | peri &= 0x1f; | |
592 | peri <<= 3; | |
593 | buf[1] = peri; | |
594 | ||
595 | PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); | |
596 | ||
597 | return SZ_DMAFLUSHP; | |
598 | } | |
599 | ||
600 | static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond) | |
601 | { | |
602 | if (dry_run) | |
603 | return SZ_DMALD; | |
604 | ||
605 | buf[0] = CMD_DMALD; | |
606 | ||
607 | if (cond == SINGLE) | |
608 | buf[0] |= (0 << 1) | (1 << 0); | |
609 | else if (cond == BURST) | |
610 | buf[0] |= (1 << 1) | (1 << 0); | |
611 | ||
612 | PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", | |
613 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | |
614 | ||
615 | return SZ_DMALD; | |
616 | } | |
617 | ||
618 | static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], | |
619 | enum pl330_cond cond, u8 peri) | |
620 | { | |
621 | if (dry_run) | |
622 | return SZ_DMALDP; | |
623 | ||
624 | buf[0] = CMD_DMALDP; | |
625 | ||
626 | if (cond == BURST) | |
627 | buf[0] |= (1 << 1); | |
628 | ||
629 | peri &= 0x1f; | |
630 | peri <<= 3; | |
631 | buf[1] = peri; | |
632 | ||
633 | PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", | |
634 | cond == SINGLE ? 'S' : 'B', peri >> 3); | |
635 | ||
636 | return SZ_DMALDP; | |
637 | } | |
638 | ||
639 | static inline u32 _emit_LP(unsigned dry_run, u8 buf[], | |
640 | unsigned loop, u8 cnt) | |
641 | { | |
642 | if (dry_run) | |
643 | return SZ_DMALP; | |
644 | ||
645 | buf[0] = CMD_DMALP; | |
646 | ||
647 | if (loop) | |
648 | buf[0] |= (1 << 1); | |
649 | ||
650 | cnt--; /* DMAC increments by 1 internally */ | |
651 | buf[1] = cnt; | |
652 | ||
653 | PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); | |
654 | ||
655 | return SZ_DMALP; | |
656 | } | |
657 | ||
658 | struct _arg_LPEND { | |
659 | enum pl330_cond cond; | |
660 | bool forever; | |
661 | unsigned loop; | |
662 | u8 bjump; | |
663 | }; | |
664 | ||
665 | static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], | |
666 | const struct _arg_LPEND *arg) | |
667 | { | |
668 | enum pl330_cond cond = arg->cond; | |
669 | bool forever = arg->forever; | |
670 | unsigned loop = arg->loop; | |
671 | u8 bjump = arg->bjump; | |
672 | ||
673 | if (dry_run) | |
674 | return SZ_DMALPEND; | |
675 | ||
676 | buf[0] = CMD_DMALPEND; | |
677 | ||
678 | if (loop) | |
679 | buf[0] |= (1 << 2); | |
680 | ||
681 | if (!forever) | |
682 | buf[0] |= (1 << 4); | |
683 | ||
684 | if (cond == SINGLE) | |
685 | buf[0] |= (0 << 1) | (1 << 0); | |
686 | else if (cond == BURST) | |
687 | buf[0] |= (1 << 1) | (1 << 0); | |
688 | ||
689 | buf[1] = bjump; | |
690 | ||
691 | PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", | |
692 | forever ? "FE" : "END", | |
693 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), | |
694 | loop ? '1' : '0', | |
695 | bjump); | |
696 | ||
697 | return SZ_DMALPEND; | |
698 | } | |
699 | ||
700 | static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) | |
701 | { | |
702 | if (dry_run) | |
703 | return SZ_DMAKILL; | |
704 | ||
705 | buf[0] = CMD_DMAKILL; | |
706 | ||
707 | return SZ_DMAKILL; | |
708 | } | |
709 | ||
710 | static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], | |
711 | enum dmamov_dst dst, u32 val) | |
712 | { | |
713 | if (dry_run) | |
714 | return SZ_DMAMOV; | |
715 | ||
716 | buf[0] = CMD_DMAMOV; | |
717 | buf[1] = dst; | |
718 | buf[2] = val; | |
719 | buf[3] = val >> 8; | |
720 | buf[4] = val >> 16; | |
721 | buf[5] = val >> 24; | |
722 | ||
723 | PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", | |
724 | dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); | |
725 | ||
726 | return SZ_DMAMOV; | |
727 | } | |
728 | ||
729 | static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) | |
730 | { | |
731 | if (dry_run) | |
732 | return SZ_DMARMB; | |
733 | ||
734 | buf[0] = CMD_DMARMB; | |
735 | ||
736 | PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); | |
737 | ||
738 | return SZ_DMARMB; | |
739 | } | |
740 | ||
741 | static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) | |
742 | { | |
743 | if (dry_run) | |
744 | return SZ_DMASEV; | |
745 | ||
746 | buf[0] = CMD_DMASEV; | |
747 | ||
748 | ev &= 0x1f; | |
749 | ev <<= 3; | |
750 | buf[1] = ev; | |
751 | ||
752 | PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); | |
753 | ||
754 | return SZ_DMASEV; | |
755 | } | |
756 | ||
757 | static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond) | |
758 | { | |
759 | if (dry_run) | |
760 | return SZ_DMAST; | |
761 | ||
762 | buf[0] = CMD_DMAST; | |
763 | ||
764 | if (cond == SINGLE) | |
765 | buf[0] |= (0 << 1) | (1 << 0); | |
766 | else if (cond == BURST) | |
767 | buf[0] |= (1 << 1) | (1 << 0); | |
768 | ||
769 | PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", | |
770 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); | |
771 | ||
772 | return SZ_DMAST; | |
773 | } | |
774 | ||
775 | static inline u32 _emit_STP(unsigned dry_run, u8 buf[], | |
776 | enum pl330_cond cond, u8 peri) | |
777 | { | |
778 | if (dry_run) | |
779 | return SZ_DMASTP; | |
780 | ||
781 | buf[0] = CMD_DMASTP; | |
782 | ||
783 | if (cond == BURST) | |
784 | buf[0] |= (1 << 1); | |
785 | ||
786 | peri &= 0x1f; | |
787 | peri <<= 3; | |
788 | buf[1] = peri; | |
789 | ||
790 | PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", | |
791 | cond == SINGLE ? 'S' : 'B', peri >> 3); | |
792 | ||
793 | return SZ_DMASTP; | |
794 | } | |
795 | ||
796 | static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], | |
797 | enum pl330_cond cond, u8 peri) | |
798 | { | |
799 | if (dry_run) | |
800 | return SZ_DMAWFP; | |
801 | ||
802 | buf[0] = CMD_DMAWFP; | |
803 | ||
804 | if (cond == SINGLE) | |
805 | buf[0] |= (0 << 1) | (0 << 0); | |
806 | else if (cond == BURST) | |
807 | buf[0] |= (1 << 1) | (0 << 0); | |
808 | else | |
809 | buf[0] |= (0 << 1) | (1 << 0); | |
810 | ||
811 | peri &= 0x1f; | |
812 | peri <<= 3; | |
813 | buf[1] = peri; | |
814 | ||
815 | PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", | |
816 | cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); | |
817 | ||
818 | return SZ_DMAWFP; | |
819 | } | |
820 | ||
821 | static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) | |
822 | { | |
823 | if (dry_run) | |
824 | return SZ_DMAWMB; | |
825 | ||
826 | buf[0] = CMD_DMAWMB; | |
827 | ||
828 | PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); | |
829 | ||
830 | return SZ_DMAWMB; | |
831 | } | |
832 | ||
833 | struct _arg_GO { | |
834 | u8 chan; | |
835 | u32 addr; | |
836 | unsigned ns; | |
837 | }; | |
838 | ||
839 | static inline u32 _emit_GO(unsigned dry_run, u8 buf[], | |
840 | const struct _arg_GO *arg) | |
841 | { | |
842 | u8 chan = arg->chan; | |
843 | u32 addr = arg->addr; | |
844 | unsigned ns = arg->ns; | |
845 | ||
846 | if (dry_run) | |
847 | return SZ_DMAGO; | |
848 | ||
849 | buf[0] = CMD_DMAGO; | |
850 | buf[0] |= (ns << 1); | |
851 | buf[1] = chan & 0x7; | |
852 | buf[2] = addr; | |
853 | buf[3] = addr >> 8; | |
854 | buf[4] = addr >> 16; | |
855 | buf[5] = addr >> 24; | |
856 | ||
857 | return SZ_DMAGO; | |
858 | } | |
859 | ||
860 | #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) | |
861 | ||
862 | /* Returns Time-Out */ | |
863 | static bool _until_dmac_idle(struct pl330_thread *thrd) | |
864 | { | |
865 | void __iomem *regs = thrd->dmac->base; | |
866 | unsigned long loops = msecs_to_loops(5); | |
867 | ||
868 | do { | |
869 | /* Until Manager is Idle */ | |
870 | if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) | |
871 | break; | |
872 | ||
873 | cpu_relax(); | |
874 | } while (--loops); | |
875 | ||
876 | if (!loops) | |
877 | return true; | |
878 | ||
879 | return false; | |
880 | } | |
881 | ||
882 | static inline void _execute_DBGINSN(struct pl330_thread *thrd, | |
883 | u8 insn[], bool as_manager) | |
884 | { | |
885 | void __iomem *regs = thrd->dmac->base; | |
886 | u32 val; | |
887 | ||
888 | val = (insn[0] << 16) | (insn[1] << 24); | |
889 | if (!as_manager) { | |
890 | val |= (1 << 0); | |
891 | val |= (thrd->id << 8); /* Channel Number */ | |
892 | } | |
893 | writel(val, regs + DBGINST0); | |
894 | ||
895 | val = le32_to_cpu(*((__le32 *)&insn[2])); | |
896 | writel(val, regs + DBGINST1); | |
897 | ||
898 | /* If timed out due to halted state-machine */ | |
899 | if (_until_dmac_idle(thrd)) { | |
900 | dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); | |
901 | return; | |
902 | } | |
903 | ||
904 | /* Get going */ | |
905 | writel(0, regs + DBGCMD); | |
906 | } | |
907 | ||
908 | static inline u32 _state(struct pl330_thread *thrd) | |
909 | { | |
910 | void __iomem *regs = thrd->dmac->base; | |
911 | u32 val; | |
912 | ||
913 | if (is_manager(thrd)) | |
914 | val = readl(regs + DS) & 0xf; | |
915 | else | |
916 | val = readl(regs + CS(thrd->id)) & 0xf; | |
917 | ||
918 | switch (val) { | |
919 | case DS_ST_STOP: | |
920 | return PL330_STATE_STOPPED; | |
921 | case DS_ST_EXEC: | |
922 | return PL330_STATE_EXECUTING; | |
923 | case DS_ST_CMISS: | |
924 | return PL330_STATE_CACHEMISS; | |
925 | case DS_ST_UPDTPC: | |
926 | return PL330_STATE_UPDTPC; | |
927 | case DS_ST_WFE: | |
928 | return PL330_STATE_WFE; | |
929 | case DS_ST_FAULT: | |
930 | return PL330_STATE_FAULTING; | |
931 | case DS_ST_ATBRR: | |
932 | if (is_manager(thrd)) | |
933 | return PL330_STATE_INVALID; | |
934 | else | |
935 | return PL330_STATE_ATBARRIER; | |
936 | case DS_ST_QBUSY: | |
937 | if (is_manager(thrd)) | |
938 | return PL330_STATE_INVALID; | |
939 | else | |
940 | return PL330_STATE_QUEUEBUSY; | |
941 | case DS_ST_WFP: | |
942 | if (is_manager(thrd)) | |
943 | return PL330_STATE_INVALID; | |
944 | else | |
945 | return PL330_STATE_WFP; | |
946 | case DS_ST_KILL: | |
947 | if (is_manager(thrd)) | |
948 | return PL330_STATE_INVALID; | |
949 | else | |
950 | return PL330_STATE_KILLING; | |
951 | case DS_ST_CMPLT: | |
952 | if (is_manager(thrd)) | |
953 | return PL330_STATE_INVALID; | |
954 | else | |
955 | return PL330_STATE_COMPLETING; | |
956 | case DS_ST_FLTCMP: | |
957 | if (is_manager(thrd)) | |
958 | return PL330_STATE_INVALID; | |
959 | else | |
960 | return PL330_STATE_FAULT_COMPLETING; | |
961 | default: | |
962 | return PL330_STATE_INVALID; | |
963 | } | |
964 | } | |
965 | ||
966 | static void _stop(struct pl330_thread *thrd) | |
967 | { | |
968 | void __iomem *regs = thrd->dmac->base; | |
969 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | |
970 | u32 inten = readl(regs + INTEN); | |
971 | ||
972 | if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) | |
973 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | |
974 | ||
975 | /* Return if nothing needs to be done */ | |
976 | if (_state(thrd) == PL330_STATE_COMPLETING | |
977 | || _state(thrd) == PL330_STATE_KILLING | |
978 | || _state(thrd) == PL330_STATE_STOPPED) | |
979 | return; | |
980 | ||
981 | _emit_KILL(0, insn); | |
982 | ||
983 | _execute_DBGINSN(thrd, insn, is_manager(thrd)); | |
984 | ||
985 | /* clear the event */ | |
986 | if (inten & (1 << thrd->ev)) | |
987 | writel(1 << thrd->ev, regs + INTCLR); | |
988 | /* Stop generating interrupts for SEV */ | |
989 | writel(inten & ~(1 << thrd->ev), regs + INTEN); | |
990 | } | |
991 | ||
992 | /* Start doing req 'idx' of thread 'thrd' */ | |
993 | static bool _trigger(struct pl330_thread *thrd) | |
994 | { | |
995 | void __iomem *regs = thrd->dmac->base; | |
996 | struct _pl330_req *req; | |
997 | struct dma_pl330_desc *desc; | |
998 | struct _arg_GO go; | |
999 | unsigned ns; | |
1000 | u8 insn[6] = {0, 0, 0, 0, 0, 0}; | |
1001 | int idx; | |
1002 | ||
1003 | /* Return if already ACTIVE */ | |
1004 | if (_state(thrd) != PL330_STATE_STOPPED) | |
1005 | return true; | |
1006 | ||
1007 | idx = 1 - thrd->lstenq; | |
1008 | if (thrd->req[idx].desc != NULL) { | |
1009 | req = &thrd->req[idx]; | |
1010 | } else { | |
1011 | idx = thrd->lstenq; | |
1012 | if (thrd->req[idx].desc != NULL) | |
1013 | req = &thrd->req[idx]; | |
1014 | else | |
1015 | req = NULL; | |
1016 | } | |
1017 | ||
1018 | /* Return if no request */ | |
1019 | if (!req) | |
1020 | return true; | |
1021 | ||
1022 | /* Return if req is running */ | |
1023 | if (idx == thrd->req_running) | |
1024 | return true; | |
1025 | ||
1026 | desc = req->desc; | |
1027 | ||
1028 | ns = desc->rqcfg.nonsecure ? 1 : 0; | |
1029 | ||
1030 | /* See 'Abort Sources' point-4 at Page 2-25 */ | |
1031 | if (_manager_ns(thrd) && !ns) | |
1032 | dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", | |
1033 | __func__, __LINE__); | |
1034 | ||
1035 | go.chan = thrd->id; | |
1036 | go.addr = req->mc_bus; | |
1037 | go.ns = ns; | |
1038 | _emit_GO(0, insn, &go); | |
1039 | ||
1040 | /* Set to generate interrupts for SEV */ | |
1041 | writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); | |
1042 | ||
1043 | /* Only manager can execute GO */ | |
1044 | _execute_DBGINSN(thrd, insn, true); | |
1045 | ||
1046 | thrd->req_running = idx; | |
1047 | ||
1048 | return true; | |
1049 | } | |
1050 | ||
1051 | static bool _start(struct pl330_thread *thrd) | |
1052 | { | |
1053 | switch (_state(thrd)) { | |
1054 | case PL330_STATE_FAULT_COMPLETING: | |
1055 | UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); | |
1056 | ||
1057 | if (_state(thrd) == PL330_STATE_KILLING) | |
1058 | UNTIL(thrd, PL330_STATE_STOPPED) | |
1059 | /* fall through */ | |
1060 | ||
1061 | case PL330_STATE_FAULTING: | |
1062 | _stop(thrd); | |
1063 | /* fall through */ | |
1064 | ||
1065 | case PL330_STATE_KILLING: | |
1066 | case PL330_STATE_COMPLETING: | |
1067 | UNTIL(thrd, PL330_STATE_STOPPED) | |
1068 | /* fall through */ | |
1069 | ||
1070 | case PL330_STATE_STOPPED: | |
1071 | return _trigger(thrd); | |
1072 | ||
1073 | case PL330_STATE_WFP: | |
1074 | case PL330_STATE_QUEUEBUSY: | |
1075 | case PL330_STATE_ATBARRIER: | |
1076 | case PL330_STATE_UPDTPC: | |
1077 | case PL330_STATE_CACHEMISS: | |
1078 | case PL330_STATE_EXECUTING: | |
1079 | return true; | |
1080 | ||
1081 | case PL330_STATE_WFE: /* For RESUME, nothing yet */ | |
1082 | default: | |
1083 | return false; | |
1084 | } | |
1085 | } | |
1086 | ||
1087 | static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], | |
1088 | const struct _xfer_spec *pxs, int cyc) | |
1089 | { | |
1090 | int off = 0; | |
1091 | struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; | |
1092 | ||
1093 | /* check lock-up free version */ | |
1094 | if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) { | |
1095 | while (cyc--) { | |
1096 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | |
1097 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | |
1098 | } | |
1099 | } else { | |
1100 | while (cyc--) { | |
1101 | off += _emit_LD(dry_run, &buf[off], ALWAYS); | |
1102 | off += _emit_RMB(dry_run, &buf[off]); | |
1103 | off += _emit_ST(dry_run, &buf[off], ALWAYS); | |
1104 | off += _emit_WMB(dry_run, &buf[off]); | |
1105 | } | |
1106 | } | |
1107 | ||
1108 | return off; | |
1109 | } | |
1110 | ||
1111 | static u32 _emit_load(unsigned int dry_run, u8 buf[], | |
1112 | enum pl330_cond cond, enum dma_transfer_direction direction, | |
1113 | u8 peri) | |
1114 | { | |
1115 | int off = 0; | |
1116 | ||
1117 | switch (direction) { | |
1118 | case DMA_MEM_TO_MEM: | |
1119 | /* fall through */ | |
1120 | case DMA_MEM_TO_DEV: | |
1121 | off += _emit_LD(dry_run, &buf[off], cond); | |
1122 | break; | |
1123 | ||
1124 | case DMA_DEV_TO_MEM: | |
1125 | if (cond == ALWAYS) { | |
1126 | off += _emit_LDP(dry_run, &buf[off], SINGLE, | |
1127 | peri); | |
1128 | off += _emit_LDP(dry_run, &buf[off], BURST, | |
1129 | peri); | |
1130 | } else { | |
1131 | off += _emit_LDP(dry_run, &buf[off], cond, | |
1132 | peri); | |
1133 | } | |
1134 | break; | |
1135 | ||
1136 | default: | |
1137 | /* this code should be unreachable */ | |
1138 | WARN_ON(1); | |
1139 | break; | |
1140 | } | |
1141 | ||
1142 | return off; | |
1143 | } | |
1144 | ||
1145 | static inline u32 _emit_store(unsigned int dry_run, u8 buf[], | |
1146 | enum pl330_cond cond, enum dma_transfer_direction direction, | |
1147 | u8 peri) | |
1148 | { | |
1149 | int off = 0; | |
1150 | ||
1151 | switch (direction) { | |
1152 | case DMA_MEM_TO_MEM: | |
1153 | /* fall through */ | |
1154 | case DMA_DEV_TO_MEM: | |
1155 | off += _emit_ST(dry_run, &buf[off], cond); | |
1156 | break; | |
1157 | ||
1158 | case DMA_MEM_TO_DEV: | |
1159 | if (cond == ALWAYS) { | |
1160 | off += _emit_STP(dry_run, &buf[off], SINGLE, | |
1161 | peri); | |
1162 | off += _emit_STP(dry_run, &buf[off], BURST, | |
1163 | peri); | |
1164 | } else { | |
1165 | off += _emit_STP(dry_run, &buf[off], cond, | |
1166 | peri); | |
1167 | } | |
1168 | break; | |
1169 | ||
1170 | default: | |
1171 | /* this code should be unreachable */ | |
1172 | WARN_ON(1); | |
1173 | break; | |
1174 | } | |
1175 | ||
1176 | return off; | |
1177 | } | |
1178 | ||
1179 | static inline int _ldst_peripheral(struct pl330_dmac *pl330, | |
1180 | unsigned dry_run, u8 buf[], | |
1181 | const struct _xfer_spec *pxs, int cyc, | |
1182 | enum pl330_cond cond) | |
1183 | { | |
1184 | int off = 0; | |
1185 | ||
1186 | if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) | |
1187 | cond = BURST; | |
1188 | ||
1189 | /* | |
1190 | * do FLUSHP at beginning to clear any stale dma requests before the | |
1191 | * first WFP. | |
1192 | */ | |
1193 | if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)) | |
1194 | off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); | |
1195 | while (cyc--) { | |
1196 | off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); | |
1197 | off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype, | |
1198 | pxs->desc->peri); | |
1199 | off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype, | |
1200 | pxs->desc->peri); | |
1201 | } | |
1202 | ||
1203 | return off; | |
1204 | } | |
1205 | ||
1206 | static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], | |
1207 | const struct _xfer_spec *pxs, int cyc) | |
1208 | { | |
1209 | int off = 0; | |
1210 | enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE; | |
1211 | ||
1212 | switch (pxs->desc->rqtype) { | |
1213 | case DMA_MEM_TO_DEV: | |
1214 | /* fall through */ | |
1215 | case DMA_DEV_TO_MEM: | |
1216 | off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc, | |
1217 | cond); | |
1218 | break; | |
1219 | ||
1220 | case DMA_MEM_TO_MEM: | |
1221 | off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); | |
1222 | break; | |
1223 | ||
1224 | default: | |
1225 | /* this code should be unreachable */ | |
1226 | WARN_ON(1); | |
1227 | break; | |
1228 | } | |
1229 | ||
1230 | return off; | |
1231 | } | |
1232 | ||
1233 | /* | |
1234 | * transfer dregs with single transfers to peripheral, or a reduced size burst | |
1235 | * for mem-to-mem. | |
1236 | */ | |
1237 | static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[], | |
1238 | const struct _xfer_spec *pxs, int transfer_length) | |
1239 | { | |
1240 | int off = 0; | |
1241 | int dregs_ccr; | |
1242 | ||
1243 | if (transfer_length == 0) | |
1244 | return off; | |
1245 | ||
1246 | switch (pxs->desc->rqtype) { | |
1247 | case DMA_MEM_TO_DEV: | |
1248 | /* fall through */ | |
1249 | case DMA_DEV_TO_MEM: | |
1250 | off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, | |
1251 | transfer_length, SINGLE); | |
1252 | break; | |
1253 | ||
1254 | case DMA_MEM_TO_MEM: | |
1255 | dregs_ccr = pxs->ccr; | |
1256 | dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) | | |
1257 | (0xf << CC_DSTBRSTLEN_SHFT)); | |
1258 | dregs_ccr |= (((transfer_length - 1) & 0xf) << | |
1259 | CC_SRCBRSTLEN_SHFT); | |
1260 | dregs_ccr |= (((transfer_length - 1) & 0xf) << | |
1261 | CC_DSTBRSTLEN_SHFT); | |
1262 | off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr); | |
1263 | off += _ldst_memtomem(dry_run, &buf[off], pxs, 1); | |
1264 | break; | |
1265 | ||
1266 | default: | |
1267 | /* this code should be unreachable */ | |
1268 | WARN_ON(1); | |
1269 | break; | |
1270 | } | |
1271 | ||
1272 | return off; | |
1273 | } | |
1274 | ||
1275 | /* Returns bytes consumed and updates bursts */ | |
1276 | static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[], | |
1277 | unsigned long *bursts, const struct _xfer_spec *pxs) | |
1278 | { | |
1279 | int cyc, cycmax, szlp, szlpend, szbrst, off; | |
1280 | unsigned lcnt0, lcnt1, ljmp0, ljmp1; | |
1281 | struct _arg_LPEND lpend; | |
1282 | ||
1283 | if (*bursts == 1) | |
1284 | return _bursts(pl330, dry_run, buf, pxs, 1); | |
1285 | ||
1286 | /* Max iterations possible in DMALP is 256 */ | |
1287 | if (*bursts >= 256*256) { | |
1288 | lcnt1 = 256; | |
1289 | lcnt0 = 256; | |
1290 | cyc = *bursts / lcnt1 / lcnt0; | |
1291 | } else if (*bursts > 256) { | |
1292 | lcnt1 = 256; | |
1293 | lcnt0 = *bursts / lcnt1; | |
1294 | cyc = 1; | |
1295 | } else { | |
1296 | lcnt1 = *bursts; | |
1297 | lcnt0 = 0; | |
1298 | cyc = 1; | |
1299 | } | |
1300 | ||
1301 | szlp = _emit_LP(1, buf, 0, 0); | |
1302 | szbrst = _bursts(pl330, 1, buf, pxs, 1); | |
1303 | ||
1304 | lpend.cond = ALWAYS; | |
1305 | lpend.forever = false; | |
1306 | lpend.loop = 0; | |
1307 | lpend.bjump = 0; | |
1308 | szlpend = _emit_LPEND(1, buf, &lpend); | |
1309 | ||
1310 | if (lcnt0) { | |
1311 | szlp *= 2; | |
1312 | szlpend *= 2; | |
1313 | } | |
1314 | ||
1315 | /* | |
1316 | * Max bursts that we can unroll due to limit on the | |
1317 | * size of backward jump that can be encoded in DMALPEND | |
1318 | * which is 8-bits and hence 255 | |
1319 | */ | |
1320 | cycmax = (255 - (szlp + szlpend)) / szbrst; | |
1321 | ||
1322 | cyc = (cycmax < cyc) ? cycmax : cyc; | |
1323 | ||
1324 | off = 0; | |
1325 | ||
1326 | if (lcnt0) { | |
1327 | off += _emit_LP(dry_run, &buf[off], 0, lcnt0); | |
1328 | ljmp0 = off; | |
1329 | } | |
1330 | ||
1331 | off += _emit_LP(dry_run, &buf[off], 1, lcnt1); | |
1332 | ljmp1 = off; | |
1333 | ||
1334 | off += _bursts(pl330, dry_run, &buf[off], pxs, cyc); | |
1335 | ||
1336 | lpend.cond = ALWAYS; | |
1337 | lpend.forever = false; | |
1338 | lpend.loop = 1; | |
1339 | lpend.bjump = off - ljmp1; | |
1340 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | |
1341 | ||
1342 | if (lcnt0) { | |
1343 | lpend.cond = ALWAYS; | |
1344 | lpend.forever = false; | |
1345 | lpend.loop = 0; | |
1346 | lpend.bjump = off - ljmp0; | |
1347 | off += _emit_LPEND(dry_run, &buf[off], &lpend); | |
1348 | } | |
1349 | ||
1350 | *bursts = lcnt1 * cyc; | |
1351 | if (lcnt0) | |
1352 | *bursts *= lcnt0; | |
1353 | ||
1354 | return off; | |
1355 | } | |
1356 | ||
1357 | static inline int _setup_loops(struct pl330_dmac *pl330, | |
1358 | unsigned dry_run, u8 buf[], | |
1359 | const struct _xfer_spec *pxs) | |
1360 | { | |
1361 | struct pl330_xfer *x = &pxs->desc->px; | |
1362 | u32 ccr = pxs->ccr; | |
1363 | unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr); | |
1364 | int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) / | |
1365 | BRST_SIZE(ccr); | |
1366 | int off = 0; | |
1367 | ||
1368 | while (bursts) { | |
1369 | c = bursts; | |
1370 | off += _loop(pl330, dry_run, &buf[off], &c, pxs); | |
1371 | bursts -= c; | |
1372 | } | |
1373 | off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs); | |
1374 | ||
1375 | return off; | |
1376 | } | |
1377 | ||
1378 | static inline int _setup_xfer(struct pl330_dmac *pl330, | |
1379 | unsigned dry_run, u8 buf[], | |
1380 | const struct _xfer_spec *pxs) | |
1381 | { | |
1382 | struct pl330_xfer *x = &pxs->desc->px; | |
1383 | int off = 0; | |
1384 | ||
1385 | /* DMAMOV SAR, x->src_addr */ | |
1386 | off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr); | |
1387 | /* DMAMOV DAR, x->dst_addr */ | |
1388 | off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr); | |
1389 | ||
1390 | /* Setup Loop(s) */ | |
1391 | off += _setup_loops(pl330, dry_run, &buf[off], pxs); | |
1392 | ||
1393 | return off; | |
1394 | } | |
1395 | ||
1396 | /* | |
1397 | * A req is a sequence of one or more xfer units. | |
1398 | * Returns the number of bytes taken to setup the MC for the req. | |
1399 | */ | |
1400 | static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run, | |
1401 | struct pl330_thread *thrd, unsigned index, | |
1402 | struct _xfer_spec *pxs) | |
1403 | { | |
1404 | struct _pl330_req *req = &thrd->req[index]; | |
1405 | u8 *buf = req->mc_cpu; | |
1406 | int off = 0; | |
1407 | ||
1408 | PL330_DBGMC_START(req->mc_bus); | |
1409 | ||
1410 | /* DMAMOV CCR, ccr */ | |
1411 | off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); | |
1412 | ||
1413 | off += _setup_xfer(pl330, dry_run, &buf[off], pxs); | |
1414 | ||
1415 | /* DMASEV peripheral/event */ | |
1416 | off += _emit_SEV(dry_run, &buf[off], thrd->ev); | |
1417 | /* DMAEND */ | |
1418 | off += _emit_END(dry_run, &buf[off]); | |
1419 | ||
1420 | return off; | |
1421 | } | |
1422 | ||
1423 | static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) | |
1424 | { | |
1425 | u32 ccr = 0; | |
1426 | ||
1427 | if (rqc->src_inc) | |
1428 | ccr |= CC_SRCINC; | |
1429 | ||
1430 | if (rqc->dst_inc) | |
1431 | ccr |= CC_DSTINC; | |
1432 | ||
1433 | /* We set same protection levels for Src and DST for now */ | |
1434 | if (rqc->privileged) | |
1435 | ccr |= CC_SRCPRI | CC_DSTPRI; | |
1436 | if (rqc->nonsecure) | |
1437 | ccr |= CC_SRCNS | CC_DSTNS; | |
1438 | if (rqc->insnaccess) | |
1439 | ccr |= CC_SRCIA | CC_DSTIA; | |
1440 | ||
1441 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); | |
1442 | ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); | |
1443 | ||
1444 | ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); | |
1445 | ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); | |
1446 | ||
1447 | ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); | |
1448 | ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); | |
1449 | ||
1450 | ccr |= (rqc->swap << CC_SWAP_SHFT); | |
1451 | ||
1452 | return ccr; | |
1453 | } | |
1454 | ||
1455 | /* | |
1456 | * Submit a list of xfers after which the client wants notification. | |
1457 | * Client is not notified after each xfer unit, just once after all | |
1458 | * xfer units are done or some error occurs. | |
1459 | */ | |
1460 | static int pl330_submit_req(struct pl330_thread *thrd, | |
1461 | struct dma_pl330_desc *desc) | |
1462 | { | |
1463 | struct pl330_dmac *pl330 = thrd->dmac; | |
1464 | struct _xfer_spec xs; | |
1465 | unsigned long flags; | |
1466 | unsigned idx; | |
1467 | u32 ccr; | |
1468 | int ret = 0; | |
1469 | ||
1470 | switch (desc->rqtype) { | |
1471 | case DMA_MEM_TO_DEV: | |
1472 | break; | |
1473 | ||
1474 | case DMA_DEV_TO_MEM: | |
1475 | break; | |
1476 | ||
1477 | case DMA_MEM_TO_MEM: | |
1478 | break; | |
1479 | ||
1480 | default: | |
1481 | return -ENOTSUPP; | |
1482 | } | |
1483 | ||
1484 | if (pl330->state == DYING | |
1485 | || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) { | |
1486 | dev_info(thrd->dmac->ddma.dev, "%s:%d\n", | |
1487 | __func__, __LINE__); | |
1488 | return -EAGAIN; | |
1489 | } | |
1490 | ||
1491 | /* If request for non-existing peripheral */ | |
1492 | if (desc->rqtype != DMA_MEM_TO_MEM && | |
1493 | desc->peri >= pl330->pcfg.num_peri) { | |
1494 | dev_info(thrd->dmac->ddma.dev, | |
1495 | "%s:%d Invalid peripheral(%u)!\n", | |
1496 | __func__, __LINE__, desc->peri); | |
1497 | return -EINVAL; | |
1498 | } | |
1499 | ||
1500 | spin_lock_irqsave(&pl330->lock, flags); | |
1501 | ||
1502 | if (_queue_full(thrd)) { | |
1503 | ret = -EAGAIN; | |
1504 | goto xfer_exit; | |
1505 | } | |
1506 | ||
1507 | /* Prefer Secure Channel */ | |
1508 | if (!_manager_ns(thrd)) | |
1509 | desc->rqcfg.nonsecure = 0; | |
1510 | else | |
1511 | desc->rqcfg.nonsecure = 1; | |
1512 | ||
1513 | ccr = _prepare_ccr(&desc->rqcfg); | |
1514 | ||
1515 | idx = thrd->req[0].desc == NULL ? 0 : 1; | |
1516 | ||
1517 | xs.ccr = ccr; | |
1518 | xs.desc = desc; | |
1519 | ||
1520 | /* First dry run to check if req is acceptable */ | |
1521 | ret = _setup_req(pl330, 1, thrd, idx, &xs); | |
1522 | if (ret < 0) | |
1523 | goto xfer_exit; | |
1524 | ||
1525 | if (ret > pl330->mcbufsz / 2) { | |
1526 | dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n", | |
1527 | __func__, __LINE__, ret, pl330->mcbufsz / 2); | |
1528 | ret = -ENOMEM; | |
1529 | goto xfer_exit; | |
1530 | } | |
1531 | ||
1532 | /* Hook the request */ | |
1533 | thrd->lstenq = idx; | |
1534 | thrd->req[idx].desc = desc; | |
1535 | _setup_req(pl330, 0, thrd, idx, &xs); | |
1536 | ||
1537 | ret = 0; | |
1538 | ||
1539 | xfer_exit: | |
1540 | spin_unlock_irqrestore(&pl330->lock, flags); | |
1541 | ||
1542 | return ret; | |
1543 | } | |
1544 | ||
1545 | static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) | |
1546 | { | |
1547 | struct dma_pl330_chan *pch; | |
1548 | unsigned long flags; | |
1549 | ||
1550 | if (!desc) | |
1551 | return; | |
1552 | ||
1553 | pch = desc->pchan; | |
1554 | ||
1555 | /* If desc aborted */ | |
1556 | if (!pch) | |
1557 | return; | |
1558 | ||
1559 | spin_lock_irqsave(&pch->lock, flags); | |
1560 | ||
1561 | desc->status = DONE; | |
1562 | ||
1563 | spin_unlock_irqrestore(&pch->lock, flags); | |
1564 | ||
1565 | tasklet_schedule(&pch->task); | |
1566 | } | |
1567 | ||
1568 | static void pl330_dotask(unsigned long data) | |
1569 | { | |
1570 | struct pl330_dmac *pl330 = (struct pl330_dmac *) data; | |
1571 | unsigned long flags; | |
1572 | int i; | |
1573 | ||
1574 | spin_lock_irqsave(&pl330->lock, flags); | |
1575 | ||
1576 | /* The DMAC itself gone nuts */ | |
1577 | if (pl330->dmac_tbd.reset_dmac) { | |
1578 | pl330->state = DYING; | |
1579 | /* Reset the manager too */ | |
1580 | pl330->dmac_tbd.reset_mngr = true; | |
1581 | /* Clear the reset flag */ | |
1582 | pl330->dmac_tbd.reset_dmac = false; | |
1583 | } | |
1584 | ||
1585 | if (pl330->dmac_tbd.reset_mngr) { | |
1586 | _stop(pl330->manager); | |
1587 | /* Reset all channels */ | |
1588 | pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1; | |
1589 | /* Clear the reset flag */ | |
1590 | pl330->dmac_tbd.reset_mngr = false; | |
1591 | } | |
1592 | ||
1593 | for (i = 0; i < pl330->pcfg.num_chan; i++) { | |
1594 | ||
1595 | if (pl330->dmac_tbd.reset_chan & (1 << i)) { | |
1596 | struct pl330_thread *thrd = &pl330->channels[i]; | |
1597 | void __iomem *regs = pl330->base; | |
1598 | enum pl330_op_err err; | |
1599 | ||
1600 | _stop(thrd); | |
1601 | ||
1602 | if (readl(regs + FSC) & (1 << thrd->id)) | |
1603 | err = PL330_ERR_FAIL; | |
1604 | else | |
1605 | err = PL330_ERR_ABORT; | |
1606 | ||
1607 | spin_unlock_irqrestore(&pl330->lock, flags); | |
1608 | dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); | |
1609 | dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); | |
1610 | spin_lock_irqsave(&pl330->lock, flags); | |
1611 | ||
1612 | thrd->req[0].desc = NULL; | |
1613 | thrd->req[1].desc = NULL; | |
1614 | thrd->req_running = -1; | |
1615 | ||
1616 | /* Clear the reset flag */ | |
1617 | pl330->dmac_tbd.reset_chan &= ~(1 << i); | |
1618 | } | |
1619 | } | |
1620 | ||
1621 | spin_unlock_irqrestore(&pl330->lock, flags); | |
1622 | ||
1623 | return; | |
1624 | } | |
1625 | ||
1626 | /* Returns 1 if state was updated, 0 otherwise */ | |
1627 | static int pl330_update(struct pl330_dmac *pl330) | |
1628 | { | |
1629 | struct dma_pl330_desc *descdone; | |
1630 | unsigned long flags; | |
1631 | void __iomem *regs; | |
1632 | u32 val; | |
1633 | int id, ev, ret = 0; | |
1634 | ||
1635 | regs = pl330->base; | |
1636 | ||
1637 | spin_lock_irqsave(&pl330->lock, flags); | |
1638 | ||
1639 | val = readl(regs + FSM) & 0x1; | |
1640 | if (val) | |
1641 | pl330->dmac_tbd.reset_mngr = true; | |
1642 | else | |
1643 | pl330->dmac_tbd.reset_mngr = false; | |
1644 | ||
1645 | val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1); | |
1646 | pl330->dmac_tbd.reset_chan |= val; | |
1647 | if (val) { | |
1648 | int i = 0; | |
1649 | while (i < pl330->pcfg.num_chan) { | |
1650 | if (val & (1 << i)) { | |
1651 | dev_info(pl330->ddma.dev, | |
1652 | "Reset Channel-%d\t CS-%x FTC-%x\n", | |
1653 | i, readl(regs + CS(i)), | |
1654 | readl(regs + FTC(i))); | |
1655 | _stop(&pl330->channels[i]); | |
1656 | } | |
1657 | i++; | |
1658 | } | |
1659 | } | |
1660 | ||
1661 | /* Check which event happened i.e, thread notified */ | |
1662 | val = readl(regs + ES); | |
1663 | if (pl330->pcfg.num_events < 32 | |
1664 | && val & ~((1 << pl330->pcfg.num_events) - 1)) { | |
1665 | pl330->dmac_tbd.reset_dmac = true; | |
1666 | dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__, | |
1667 | __LINE__); | |
1668 | ret = 1; | |
1669 | goto updt_exit; | |
1670 | } | |
1671 | ||
1672 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) { | |
1673 | if (val & (1 << ev)) { /* Event occurred */ | |
1674 | struct pl330_thread *thrd; | |
1675 | u32 inten = readl(regs + INTEN); | |
1676 | int active; | |
1677 | ||
1678 | /* Clear the event */ | |
1679 | if (inten & (1 << ev)) | |
1680 | writel(1 << ev, regs + INTCLR); | |
1681 | ||
1682 | ret = 1; | |
1683 | ||
1684 | id = pl330->events[ev]; | |
1685 | ||
1686 | thrd = &pl330->channels[id]; | |
1687 | ||
1688 | active = thrd->req_running; | |
1689 | if (active == -1) /* Aborted */ | |
1690 | continue; | |
1691 | ||
1692 | /* Detach the req */ | |
1693 | descdone = thrd->req[active].desc; | |
1694 | thrd->req[active].desc = NULL; | |
1695 | ||
1696 | thrd->req_running = -1; | |
1697 | ||
1698 | /* Get going again ASAP */ | |
1699 | _start(thrd); | |
1700 | ||
1701 | /* For now, just make a list of callbacks to be done */ | |
1702 | list_add_tail(&descdone->rqd, &pl330->req_done); | |
1703 | } | |
1704 | } | |
1705 | ||
1706 | /* Now that we are in no hurry, do the callbacks */ | |
1707 | while (!list_empty(&pl330->req_done)) { | |
1708 | descdone = list_first_entry(&pl330->req_done, | |
1709 | struct dma_pl330_desc, rqd); | |
1710 | list_del(&descdone->rqd); | |
1711 | spin_unlock_irqrestore(&pl330->lock, flags); | |
1712 | dma_pl330_rqcb(descdone, PL330_ERR_NONE); | |
1713 | spin_lock_irqsave(&pl330->lock, flags); | |
1714 | } | |
1715 | ||
1716 | updt_exit: | |
1717 | spin_unlock_irqrestore(&pl330->lock, flags); | |
1718 | ||
1719 | if (pl330->dmac_tbd.reset_dmac | |
1720 | || pl330->dmac_tbd.reset_mngr | |
1721 | || pl330->dmac_tbd.reset_chan) { | |
1722 | ret = 1; | |
1723 | tasklet_schedule(&pl330->tasks); | |
1724 | } | |
1725 | ||
1726 | return ret; | |
1727 | } | |
1728 | ||
1729 | /* Reserve an event */ | |
1730 | static inline int _alloc_event(struct pl330_thread *thrd) | |
1731 | { | |
1732 | struct pl330_dmac *pl330 = thrd->dmac; | |
1733 | int ev; | |
1734 | ||
1735 | for (ev = 0; ev < pl330->pcfg.num_events; ev++) | |
1736 | if (pl330->events[ev] == -1) { | |
1737 | pl330->events[ev] = thrd->id; | |
1738 | return ev; | |
1739 | } | |
1740 | ||
1741 | return -1; | |
1742 | } | |
1743 | ||
1744 | static bool _chan_ns(const struct pl330_dmac *pl330, int i) | |
1745 | { | |
1746 | return pl330->pcfg.irq_ns & (1 << i); | |
1747 | } | |
1748 | ||
1749 | /* Upon success, returns IdentityToken for the | |
1750 | * allocated channel, NULL otherwise. | |
1751 | */ | |
1752 | static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330) | |
1753 | { | |
1754 | struct pl330_thread *thrd = NULL; | |
1755 | int chans, i; | |
1756 | ||
1757 | if (pl330->state == DYING) | |
1758 | return NULL; | |
1759 | ||
1760 | chans = pl330->pcfg.num_chan; | |
1761 | ||
1762 | for (i = 0; i < chans; i++) { | |
1763 | thrd = &pl330->channels[i]; | |
1764 | if ((thrd->free) && (!_manager_ns(thrd) || | |
1765 | _chan_ns(pl330, i))) { | |
1766 | thrd->ev = _alloc_event(thrd); | |
1767 | if (thrd->ev >= 0) { | |
1768 | thrd->free = false; | |
1769 | thrd->lstenq = 1; | |
1770 | thrd->req[0].desc = NULL; | |
1771 | thrd->req[1].desc = NULL; | |
1772 | thrd->req_running = -1; | |
1773 | break; | |
1774 | } | |
1775 | } | |
1776 | thrd = NULL; | |
1777 | } | |
1778 | ||
1779 | return thrd; | |
1780 | } | |
1781 | ||
1782 | /* Release an event */ | |
1783 | static inline void _free_event(struct pl330_thread *thrd, int ev) | |
1784 | { | |
1785 | struct pl330_dmac *pl330 = thrd->dmac; | |
1786 | ||
1787 | /* If the event is valid and was held by the thread */ | |
1788 | if (ev >= 0 && ev < pl330->pcfg.num_events | |
1789 | && pl330->events[ev] == thrd->id) | |
1790 | pl330->events[ev] = -1; | |
1791 | } | |
1792 | ||
1793 | static void pl330_release_channel(struct pl330_thread *thrd) | |
1794 | { | |
1795 | if (!thrd || thrd->free) | |
1796 | return; | |
1797 | ||
1798 | _stop(thrd); | |
1799 | ||
1800 | dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); | |
1801 | dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); | |
1802 | ||
1803 | _free_event(thrd, thrd->ev); | |
1804 | thrd->free = true; | |
1805 | } | |
1806 | ||
1807 | /* Initialize the structure for PL330 configuration, that can be used | |
1808 | * by the client driver the make best use of the DMAC | |
1809 | */ | |
1810 | static void read_dmac_config(struct pl330_dmac *pl330) | |
1811 | { | |
1812 | void __iomem *regs = pl330->base; | |
1813 | u32 val; | |
1814 | ||
1815 | val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; | |
1816 | val &= CRD_DATA_WIDTH_MASK; | |
1817 | pl330->pcfg.data_bus_width = 8 * (1 << val); | |
1818 | ||
1819 | val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; | |
1820 | val &= CRD_DATA_BUFF_MASK; | |
1821 | pl330->pcfg.data_buf_dep = val + 1; | |
1822 | ||
1823 | val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; | |
1824 | val &= CR0_NUM_CHANS_MASK; | |
1825 | val += 1; | |
1826 | pl330->pcfg.num_chan = val; | |
1827 | ||
1828 | val = readl(regs + CR0); | |
1829 | if (val & CR0_PERIPH_REQ_SET) { | |
1830 | val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; | |
1831 | val += 1; | |
1832 | pl330->pcfg.num_peri = val; | |
1833 | pl330->pcfg.peri_ns = readl(regs + CR4); | |
1834 | } else { | |
1835 | pl330->pcfg.num_peri = 0; | |
1836 | } | |
1837 | ||
1838 | val = readl(regs + CR0); | |
1839 | if (val & CR0_BOOT_MAN_NS) | |
1840 | pl330->pcfg.mode |= DMAC_MODE_NS; | |
1841 | else | |
1842 | pl330->pcfg.mode &= ~DMAC_MODE_NS; | |
1843 | ||
1844 | val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; | |
1845 | val &= CR0_NUM_EVENTS_MASK; | |
1846 | val += 1; | |
1847 | pl330->pcfg.num_events = val; | |
1848 | ||
1849 | pl330->pcfg.irq_ns = readl(regs + CR3); | |
1850 | } | |
1851 | ||
1852 | static inline void _reset_thread(struct pl330_thread *thrd) | |
1853 | { | |
1854 | struct pl330_dmac *pl330 = thrd->dmac; | |
1855 | ||
1856 | thrd->req[0].mc_cpu = pl330->mcode_cpu | |
1857 | + (thrd->id * pl330->mcbufsz); | |
1858 | thrd->req[0].mc_bus = pl330->mcode_bus | |
1859 | + (thrd->id * pl330->mcbufsz); | |
1860 | thrd->req[0].desc = NULL; | |
1861 | ||
1862 | thrd->req[1].mc_cpu = thrd->req[0].mc_cpu | |
1863 | + pl330->mcbufsz / 2; | |
1864 | thrd->req[1].mc_bus = thrd->req[0].mc_bus | |
1865 | + pl330->mcbufsz / 2; | |
1866 | thrd->req[1].desc = NULL; | |
1867 | ||
1868 | thrd->req_running = -1; | |
1869 | } | |
1870 | ||
1871 | static int dmac_alloc_threads(struct pl330_dmac *pl330) | |
1872 | { | |
1873 | int chans = pl330->pcfg.num_chan; | |
1874 | struct pl330_thread *thrd; | |
1875 | int i; | |
1876 | ||
1877 | /* Allocate 1 Manager and 'chans' Channel threads */ | |
1878 | pl330->channels = kcalloc(1 + chans, sizeof(*thrd), | |
1879 | GFP_KERNEL); | |
1880 | if (!pl330->channels) | |
1881 | return -ENOMEM; | |
1882 | ||
1883 | /* Init Channel threads */ | |
1884 | for (i = 0; i < chans; i++) { | |
1885 | thrd = &pl330->channels[i]; | |
1886 | thrd->id = i; | |
1887 | thrd->dmac = pl330; | |
1888 | _reset_thread(thrd); | |
1889 | thrd->free = true; | |
1890 | } | |
1891 | ||
1892 | /* MANAGER is indexed at the end */ | |
1893 | thrd = &pl330->channels[chans]; | |
1894 | thrd->id = chans; | |
1895 | thrd->dmac = pl330; | |
1896 | thrd->free = false; | |
1897 | pl330->manager = thrd; | |
1898 | ||
1899 | return 0; | |
1900 | } | |
1901 | ||
1902 | static int dmac_alloc_resources(struct pl330_dmac *pl330) | |
1903 | { | |
1904 | int chans = pl330->pcfg.num_chan; | |
1905 | int ret; | |
1906 | ||
1907 | /* | |
1908 | * Alloc MicroCode buffer for 'chans' Channel threads. | |
1909 | * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) | |
1910 | */ | |
1911 | pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev, | |
1912 | chans * pl330->mcbufsz, | |
1913 | &pl330->mcode_bus, GFP_KERNEL, | |
1914 | DMA_ATTR_PRIVILEGED); | |
1915 | if (!pl330->mcode_cpu) { | |
1916 | dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n", | |
1917 | __func__, __LINE__); | |
1918 | return -ENOMEM; | |
1919 | } | |
1920 | ||
1921 | ret = dmac_alloc_threads(pl330); | |
1922 | if (ret) { | |
1923 | dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", | |
1924 | __func__, __LINE__); | |
1925 | dma_free_attrs(pl330->ddma.dev, | |
1926 | chans * pl330->mcbufsz, | |
1927 | pl330->mcode_cpu, pl330->mcode_bus, | |
1928 | DMA_ATTR_PRIVILEGED); | |
1929 | return ret; | |
1930 | } | |
1931 | ||
1932 | return 0; | |
1933 | } | |
1934 | ||
1935 | static int pl330_add(struct pl330_dmac *pl330) | |
1936 | { | |
1937 | int i, ret; | |
1938 | ||
1939 | /* Check if we can handle this DMAC */ | |
1940 | if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) { | |
1941 | dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n", | |
1942 | pl330->pcfg.periph_id); | |
1943 | return -EINVAL; | |
1944 | } | |
1945 | ||
1946 | /* Read the configuration of the DMAC */ | |
1947 | read_dmac_config(pl330); | |
1948 | ||
1949 | if (pl330->pcfg.num_events == 0) { | |
1950 | dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n", | |
1951 | __func__, __LINE__); | |
1952 | return -EINVAL; | |
1953 | } | |
1954 | ||
1955 | spin_lock_init(&pl330->lock); | |
1956 | ||
1957 | INIT_LIST_HEAD(&pl330->req_done); | |
1958 | ||
1959 | /* Use default MC buffer size if not provided */ | |
1960 | if (!pl330->mcbufsz) | |
1961 | pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2; | |
1962 | ||
1963 | /* Mark all events as free */ | |
1964 | for (i = 0; i < pl330->pcfg.num_events; i++) | |
1965 | pl330->events[i] = -1; | |
1966 | ||
1967 | /* Allocate resources needed by the DMAC */ | |
1968 | ret = dmac_alloc_resources(pl330); | |
1969 | if (ret) { | |
1970 | dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n"); | |
1971 | return ret; | |
1972 | } | |
1973 | ||
1974 | tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330); | |
1975 | ||
1976 | pl330->state = INIT; | |
1977 | ||
1978 | return 0; | |
1979 | } | |
1980 | ||
1981 | static int dmac_free_threads(struct pl330_dmac *pl330) | |
1982 | { | |
1983 | struct pl330_thread *thrd; | |
1984 | int i; | |
1985 | ||
1986 | /* Release Channel threads */ | |
1987 | for (i = 0; i < pl330->pcfg.num_chan; i++) { | |
1988 | thrd = &pl330->channels[i]; | |
1989 | pl330_release_channel(thrd); | |
1990 | } | |
1991 | ||
1992 | /* Free memory */ | |
1993 | kfree(pl330->channels); | |
1994 | ||
1995 | return 0; | |
1996 | } | |
1997 | ||
1998 | static void pl330_del(struct pl330_dmac *pl330) | |
1999 | { | |
2000 | pl330->state = UNINIT; | |
2001 | ||
2002 | tasklet_kill(&pl330->tasks); | |
2003 | ||
2004 | /* Free DMAC resources */ | |
2005 | dmac_free_threads(pl330); | |
2006 | ||
2007 | dma_free_attrs(pl330->ddma.dev, | |
2008 | pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu, | |
2009 | pl330->mcode_bus, DMA_ATTR_PRIVILEGED); | |
2010 | } | |
2011 | ||
2012 | /* forward declaration */ | |
2013 | static struct amba_driver pl330_driver; | |
2014 | ||
2015 | static inline struct dma_pl330_chan * | |
2016 | to_pchan(struct dma_chan *ch) | |
2017 | { | |
2018 | if (!ch) | |
2019 | return NULL; | |
2020 | ||
2021 | return container_of(ch, struct dma_pl330_chan, chan); | |
2022 | } | |
2023 | ||
2024 | static inline struct dma_pl330_desc * | |
2025 | to_desc(struct dma_async_tx_descriptor *tx) | |
2026 | { | |
2027 | return container_of(tx, struct dma_pl330_desc, txd); | |
2028 | } | |
2029 | ||
2030 | static inline void fill_queue(struct dma_pl330_chan *pch) | |
2031 | { | |
2032 | struct dma_pl330_desc *desc; | |
2033 | int ret; | |
2034 | ||
2035 | list_for_each_entry(desc, &pch->work_list, node) { | |
2036 | ||
2037 | /* If already submitted */ | |
2038 | if (desc->status == BUSY) | |
2039 | continue; | |
2040 | ||
2041 | ret = pl330_submit_req(pch->thread, desc); | |
2042 | if (!ret) { | |
2043 | desc->status = BUSY; | |
2044 | } else if (ret == -EAGAIN) { | |
2045 | /* QFull or DMAC Dying */ | |
2046 | break; | |
2047 | } else { | |
2048 | /* Unacceptable request */ | |
2049 | desc->status = DONE; | |
2050 | dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", | |
2051 | __func__, __LINE__, desc->txd.cookie); | |
2052 | tasklet_schedule(&pch->task); | |
2053 | } | |
2054 | } | |
2055 | } | |
2056 | ||
2057 | static void pl330_tasklet(unsigned long data) | |
2058 | { | |
2059 | struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; | |
2060 | struct dma_pl330_desc *desc, *_dt; | |
2061 | unsigned long flags; | |
2062 | bool power_down = false; | |
2063 | ||
2064 | spin_lock_irqsave(&pch->lock, flags); | |
2065 | ||
2066 | /* Pick up ripe tomatoes */ | |
2067 | list_for_each_entry_safe(desc, _dt, &pch->work_list, node) | |
2068 | if (desc->status == DONE) { | |
2069 | if (!pch->cyclic) | |
2070 | dma_cookie_complete(&desc->txd); | |
2071 | list_move_tail(&desc->node, &pch->completed_list); | |
2072 | } | |
2073 | ||
2074 | /* Try to submit a req imm. next to the last completed cookie */ | |
2075 | fill_queue(pch); | |
2076 | ||
2077 | if (list_empty(&pch->work_list)) { | |
2078 | spin_lock(&pch->thread->dmac->lock); | |
2079 | _stop(pch->thread); | |
2080 | spin_unlock(&pch->thread->dmac->lock); | |
2081 | power_down = true; | |
2082 | pch->active = false; | |
2083 | } else { | |
2084 | /* Make sure the PL330 Channel thread is active */ | |
2085 | spin_lock(&pch->thread->dmac->lock); | |
2086 | _start(pch->thread); | |
2087 | spin_unlock(&pch->thread->dmac->lock); | |
2088 | } | |
2089 | ||
2090 | while (!list_empty(&pch->completed_list)) { | |
2091 | struct dmaengine_desc_callback cb; | |
2092 | ||
2093 | desc = list_first_entry(&pch->completed_list, | |
2094 | struct dma_pl330_desc, node); | |
2095 | ||
2096 | dmaengine_desc_get_callback(&desc->txd, &cb); | |
2097 | ||
2098 | if (pch->cyclic) { | |
2099 | desc->status = PREP; | |
2100 | list_move_tail(&desc->node, &pch->work_list); | |
2101 | if (power_down) { | |
2102 | pch->active = true; | |
2103 | spin_lock(&pch->thread->dmac->lock); | |
2104 | _start(pch->thread); | |
2105 | spin_unlock(&pch->thread->dmac->lock); | |
2106 | power_down = false; | |
2107 | } | |
2108 | } else { | |
2109 | desc->status = FREE; | |
2110 | list_move_tail(&desc->node, &pch->dmac->desc_pool); | |
2111 | } | |
2112 | ||
2113 | dma_descriptor_unmap(&desc->txd); | |
2114 | ||
2115 | if (dmaengine_desc_callback_valid(&cb)) { | |
2116 | spin_unlock_irqrestore(&pch->lock, flags); | |
2117 | dmaengine_desc_callback_invoke(&cb, NULL); | |
2118 | spin_lock_irqsave(&pch->lock, flags); | |
2119 | } | |
2120 | } | |
2121 | spin_unlock_irqrestore(&pch->lock, flags); | |
2122 | ||
2123 | /* If work list empty, power down */ | |
2124 | if (power_down) { | |
2125 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); | |
2126 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); | |
2127 | } | |
2128 | } | |
2129 | ||
2130 | static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec, | |
2131 | struct of_dma *ofdma) | |
2132 | { | |
2133 | int count = dma_spec->args_count; | |
2134 | struct pl330_dmac *pl330 = ofdma->of_dma_data; | |
2135 | unsigned int chan_id; | |
2136 | ||
2137 | if (!pl330) | |
2138 | return NULL; | |
2139 | ||
2140 | if (count != 1) | |
2141 | return NULL; | |
2142 | ||
2143 | chan_id = dma_spec->args[0]; | |
2144 | if (chan_id >= pl330->num_peripherals) | |
2145 | return NULL; | |
2146 | ||
2147 | return dma_get_slave_channel(&pl330->peripherals[chan_id].chan); | |
2148 | } | |
2149 | ||
2150 | static int pl330_alloc_chan_resources(struct dma_chan *chan) | |
2151 | { | |
2152 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2153 | struct pl330_dmac *pl330 = pch->dmac; | |
2154 | unsigned long flags; | |
2155 | ||
2156 | spin_lock_irqsave(&pl330->lock, flags); | |
2157 | ||
2158 | dma_cookie_init(chan); | |
2159 | pch->cyclic = false; | |
2160 | ||
2161 | pch->thread = pl330_request_channel(pl330); | |
2162 | if (!pch->thread) { | |
2163 | spin_unlock_irqrestore(&pl330->lock, flags); | |
2164 | return -ENOMEM; | |
2165 | } | |
2166 | ||
2167 | tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch); | |
2168 | ||
2169 | spin_unlock_irqrestore(&pl330->lock, flags); | |
2170 | ||
2171 | return 1; | |
2172 | } | |
2173 | ||
2174 | /* | |
2175 | * We need the data direction between the DMAC (the dma-mapping "device") and | |
2176 | * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing! | |
2177 | */ | |
2178 | static enum dma_data_direction | |
2179 | pl330_dma_slave_map_dir(enum dma_transfer_direction dir) | |
2180 | { | |
2181 | switch (dir) { | |
2182 | case DMA_MEM_TO_DEV: | |
2183 | return DMA_FROM_DEVICE; | |
2184 | case DMA_DEV_TO_MEM: | |
2185 | return DMA_TO_DEVICE; | |
2186 | case DMA_DEV_TO_DEV: | |
2187 | return DMA_BIDIRECTIONAL; | |
2188 | default: | |
2189 | return DMA_NONE; | |
2190 | } | |
2191 | } | |
2192 | ||
2193 | static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch) | |
2194 | { | |
2195 | if (pch->dir != DMA_NONE) | |
2196 | dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma, | |
2197 | 1 << pch->burst_sz, pch->dir, 0); | |
2198 | pch->dir = DMA_NONE; | |
2199 | } | |
2200 | ||
2201 | ||
2202 | static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch, | |
2203 | enum dma_transfer_direction dir) | |
2204 | { | |
2205 | struct device *dev = pch->chan.device->dev; | |
2206 | enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir); | |
2207 | ||
2208 | /* Already mapped for this config? */ | |
2209 | if (pch->dir == dma_dir) | |
2210 | return true; | |
2211 | ||
2212 | pl330_unprep_slave_fifo(pch); | |
2213 | pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr, | |
2214 | 1 << pch->burst_sz, dma_dir, 0); | |
2215 | if (dma_mapping_error(dev, pch->fifo_dma)) | |
2216 | return false; | |
2217 | ||
2218 | pch->dir = dma_dir; | |
2219 | return true; | |
2220 | } | |
2221 | ||
2222 | static int fixup_burst_len(int max_burst_len, int quirks) | |
2223 | { | |
2224 | if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) | |
2225 | return 1; | |
2226 | else if (max_burst_len > PL330_MAX_BURST) | |
2227 | return PL330_MAX_BURST; | |
2228 | else if (max_burst_len < 1) | |
2229 | return 1; | |
2230 | else | |
2231 | return max_burst_len; | |
2232 | } | |
2233 | ||
2234 | static int pl330_config_write(struct dma_chan *chan, | |
2235 | struct dma_slave_config *slave_config, | |
2236 | enum dma_transfer_direction direction) | |
2237 | { | |
2238 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2239 | ||
2240 | pl330_unprep_slave_fifo(pch); | |
2241 | if (direction == DMA_MEM_TO_DEV) { | |
2242 | if (slave_config->dst_addr) | |
2243 | pch->fifo_addr = slave_config->dst_addr; | |
2244 | if (slave_config->dst_addr_width) | |
2245 | pch->burst_sz = __ffs(slave_config->dst_addr_width); | |
2246 | pch->burst_len = fixup_burst_len(slave_config->dst_maxburst, | |
2247 | pch->dmac->quirks); | |
2248 | } else if (direction == DMA_DEV_TO_MEM) { | |
2249 | if (slave_config->src_addr) | |
2250 | pch->fifo_addr = slave_config->src_addr; | |
2251 | if (slave_config->src_addr_width) | |
2252 | pch->burst_sz = __ffs(slave_config->src_addr_width); | |
2253 | pch->burst_len = fixup_burst_len(slave_config->src_maxburst, | |
2254 | pch->dmac->quirks); | |
2255 | } | |
2256 | ||
2257 | return 0; | |
2258 | } | |
2259 | ||
2260 | static int pl330_config(struct dma_chan *chan, | |
2261 | struct dma_slave_config *slave_config) | |
2262 | { | |
2263 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2264 | ||
2265 | memcpy(&pch->slave_config, slave_config, sizeof(*slave_config)); | |
2266 | ||
2267 | return 0; | |
2268 | } | |
2269 | ||
2270 | static int pl330_terminate_all(struct dma_chan *chan) | |
2271 | { | |
2272 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2273 | struct dma_pl330_desc *desc; | |
2274 | unsigned long flags; | |
2275 | struct pl330_dmac *pl330 = pch->dmac; | |
2276 | bool power_down = false; | |
2277 | ||
2278 | pm_runtime_get_sync(pl330->ddma.dev); | |
2279 | spin_lock_irqsave(&pch->lock, flags); | |
2280 | ||
2281 | spin_lock(&pl330->lock); | |
2282 | _stop(pch->thread); | |
2283 | pch->thread->req[0].desc = NULL; | |
2284 | pch->thread->req[1].desc = NULL; | |
2285 | pch->thread->req_running = -1; | |
2286 | spin_unlock(&pl330->lock); | |
2287 | ||
2288 | power_down = pch->active; | |
2289 | pch->active = false; | |
2290 | ||
2291 | /* Mark all desc done */ | |
2292 | list_for_each_entry(desc, &pch->submitted_list, node) { | |
2293 | desc->status = FREE; | |
2294 | dma_cookie_complete(&desc->txd); | |
2295 | } | |
2296 | ||
2297 | list_for_each_entry(desc, &pch->work_list , node) { | |
2298 | desc->status = FREE; | |
2299 | dma_cookie_complete(&desc->txd); | |
2300 | } | |
2301 | ||
2302 | list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); | |
2303 | list_splice_tail_init(&pch->work_list, &pl330->desc_pool); | |
2304 | list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); | |
2305 | spin_unlock_irqrestore(&pch->lock, flags); | |
2306 | pm_runtime_mark_last_busy(pl330->ddma.dev); | |
2307 | if (power_down) | |
2308 | pm_runtime_put_autosuspend(pl330->ddma.dev); | |
2309 | pm_runtime_put_autosuspend(pl330->ddma.dev); | |
2310 | ||
2311 | return 0; | |
2312 | } | |
2313 | ||
2314 | /* | |
2315 | * We don't support DMA_RESUME command because of hardware | |
2316 | * limitations, so after pausing the channel we cannot restore | |
2317 | * it to active state. We have to terminate channel and setup | |
2318 | * DMA transfer again. This pause feature was implemented to | |
2319 | * allow safely read residue before channel termination. | |
2320 | */ | |
2321 | static int pl330_pause(struct dma_chan *chan) | |
2322 | { | |
2323 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2324 | struct pl330_dmac *pl330 = pch->dmac; | |
2325 | unsigned long flags; | |
2326 | ||
2327 | pm_runtime_get_sync(pl330->ddma.dev); | |
2328 | spin_lock_irqsave(&pch->lock, flags); | |
2329 | ||
2330 | spin_lock(&pl330->lock); | |
2331 | _stop(pch->thread); | |
2332 | spin_unlock(&pl330->lock); | |
2333 | ||
2334 | spin_unlock_irqrestore(&pch->lock, flags); | |
2335 | pm_runtime_mark_last_busy(pl330->ddma.dev); | |
2336 | pm_runtime_put_autosuspend(pl330->ddma.dev); | |
2337 | ||
2338 | return 0; | |
2339 | } | |
2340 | ||
2341 | static void pl330_free_chan_resources(struct dma_chan *chan) | |
2342 | { | |
2343 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2344 | struct pl330_dmac *pl330 = pch->dmac; | |
2345 | unsigned long flags; | |
2346 | ||
2347 | tasklet_kill(&pch->task); | |
2348 | ||
2349 | pm_runtime_get_sync(pch->dmac->ddma.dev); | |
2350 | spin_lock_irqsave(&pl330->lock, flags); | |
2351 | ||
2352 | pl330_release_channel(pch->thread); | |
2353 | pch->thread = NULL; | |
2354 | ||
2355 | if (pch->cyclic) | |
2356 | list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); | |
2357 | ||
2358 | spin_unlock_irqrestore(&pl330->lock, flags); | |
2359 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); | |
2360 | pm_runtime_put_autosuspend(pch->dmac->ddma.dev); | |
2361 | pl330_unprep_slave_fifo(pch); | |
2362 | } | |
2363 | ||
2364 | static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch, | |
2365 | struct dma_pl330_desc *desc) | |
2366 | { | |
2367 | struct pl330_thread *thrd = pch->thread; | |
2368 | struct pl330_dmac *pl330 = pch->dmac; | |
2369 | void __iomem *regs = thrd->dmac->base; | |
2370 | u32 val, addr; | |
2371 | ||
2372 | pm_runtime_get_sync(pl330->ddma.dev); | |
2373 | val = addr = 0; | |
2374 | if (desc->rqcfg.src_inc) { | |
2375 | val = readl(regs + SA(thrd->id)); | |
2376 | addr = desc->px.src_addr; | |
2377 | } else { | |
2378 | val = readl(regs + DA(thrd->id)); | |
2379 | addr = desc->px.dst_addr; | |
2380 | } | |
2381 | pm_runtime_mark_last_busy(pch->dmac->ddma.dev); | |
2382 | pm_runtime_put_autosuspend(pl330->ddma.dev); | |
2383 | ||
2384 | /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */ | |
2385 | if (!val) | |
2386 | return 0; | |
2387 | ||
2388 | return val - addr; | |
2389 | } | |
2390 | ||
2391 | static enum dma_status | |
2392 | pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, | |
2393 | struct dma_tx_state *txstate) | |
2394 | { | |
2395 | enum dma_status ret; | |
2396 | unsigned long flags; | |
2397 | struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL; | |
2398 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2399 | unsigned int transferred, residual = 0; | |
2400 | ||
2401 | ret = dma_cookie_status(chan, cookie, txstate); | |
2402 | ||
2403 | if (!txstate) | |
2404 | return ret; | |
2405 | ||
2406 | if (ret == DMA_COMPLETE) | |
2407 | goto out; | |
2408 | ||
2409 | spin_lock_irqsave(&pch->lock, flags); | |
2410 | spin_lock(&pch->thread->dmac->lock); | |
2411 | ||
2412 | if (pch->thread->req_running != -1) | |
2413 | running = pch->thread->req[pch->thread->req_running].desc; | |
2414 | ||
2415 | last_enq = pch->thread->req[pch->thread->lstenq].desc; | |
2416 | ||
2417 | /* Check in pending list */ | |
2418 | list_for_each_entry(desc, &pch->work_list, node) { | |
2419 | if (desc->status == DONE) | |
2420 | transferred = desc->bytes_requested; | |
2421 | else if (running && desc == running) | |
2422 | transferred = | |
2423 | pl330_get_current_xferred_count(pch, desc); | |
2424 | else if (desc->status == BUSY) | |
2425 | /* | |
2426 | * Busy but not running means either just enqueued, | |
2427 | * or finished and not yet marked done | |
2428 | */ | |
2429 | if (desc == last_enq) | |
2430 | transferred = 0; | |
2431 | else | |
2432 | transferred = desc->bytes_requested; | |
2433 | else | |
2434 | transferred = 0; | |
2435 | residual += desc->bytes_requested - transferred; | |
2436 | if (desc->txd.cookie == cookie) { | |
2437 | switch (desc->status) { | |
2438 | case DONE: | |
2439 | ret = DMA_COMPLETE; | |
2440 | break; | |
2441 | case PREP: | |
2442 | case BUSY: | |
2443 | ret = DMA_IN_PROGRESS; | |
2444 | break; | |
2445 | default: | |
2446 | WARN_ON(1); | |
2447 | } | |
2448 | break; | |
2449 | } | |
2450 | if (desc->last) | |
2451 | residual = 0; | |
2452 | } | |
2453 | spin_unlock(&pch->thread->dmac->lock); | |
2454 | spin_unlock_irqrestore(&pch->lock, flags); | |
2455 | ||
2456 | out: | |
2457 | dma_set_residue(txstate, residual); | |
2458 | ||
2459 | return ret; | |
2460 | } | |
2461 | ||
2462 | static void pl330_issue_pending(struct dma_chan *chan) | |
2463 | { | |
2464 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2465 | unsigned long flags; | |
2466 | ||
2467 | spin_lock_irqsave(&pch->lock, flags); | |
2468 | if (list_empty(&pch->work_list)) { | |
2469 | /* | |
2470 | * Warn on nothing pending. Empty submitted_list may | |
2471 | * break our pm_runtime usage counter as it is | |
2472 | * updated on work_list emptiness status. | |
2473 | */ | |
2474 | WARN_ON(list_empty(&pch->submitted_list)); | |
2475 | pch->active = true; | |
2476 | pm_runtime_get_sync(pch->dmac->ddma.dev); | |
2477 | } | |
2478 | list_splice_tail_init(&pch->submitted_list, &pch->work_list); | |
2479 | spin_unlock_irqrestore(&pch->lock, flags); | |
2480 | ||
2481 | pl330_tasklet((unsigned long)pch); | |
2482 | } | |
2483 | ||
2484 | /* | |
2485 | * We returned the last one of the circular list of descriptor(s) | |
2486 | * from prep_xxx, so the argument to submit corresponds to the last | |
2487 | * descriptor of the list. | |
2488 | */ | |
2489 | static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx) | |
2490 | { | |
2491 | struct dma_pl330_desc *desc, *last = to_desc(tx); | |
2492 | struct dma_pl330_chan *pch = to_pchan(tx->chan); | |
2493 | dma_cookie_t cookie; | |
2494 | unsigned long flags; | |
2495 | ||
2496 | spin_lock_irqsave(&pch->lock, flags); | |
2497 | ||
2498 | /* Assign cookies to all nodes */ | |
2499 | while (!list_empty(&last->node)) { | |
2500 | desc = list_entry(last->node.next, struct dma_pl330_desc, node); | |
2501 | if (pch->cyclic) { | |
2502 | desc->txd.callback = last->txd.callback; | |
2503 | desc->txd.callback_param = last->txd.callback_param; | |
2504 | } | |
2505 | desc->last = false; | |
2506 | ||
2507 | dma_cookie_assign(&desc->txd); | |
2508 | ||
2509 | list_move_tail(&desc->node, &pch->submitted_list); | |
2510 | } | |
2511 | ||
2512 | last->last = true; | |
2513 | cookie = dma_cookie_assign(&last->txd); | |
2514 | list_add_tail(&last->node, &pch->submitted_list); | |
2515 | spin_unlock_irqrestore(&pch->lock, flags); | |
2516 | ||
2517 | return cookie; | |
2518 | } | |
2519 | ||
2520 | static inline void _init_desc(struct dma_pl330_desc *desc) | |
2521 | { | |
2522 | desc->rqcfg.swap = SWAP_NO; | |
2523 | desc->rqcfg.scctl = CCTRL0; | |
2524 | desc->rqcfg.dcctl = CCTRL0; | |
2525 | desc->txd.tx_submit = pl330_tx_submit; | |
2526 | ||
2527 | INIT_LIST_HEAD(&desc->node); | |
2528 | } | |
2529 | ||
2530 | /* Returns the number of descriptors added to the DMAC pool */ | |
2531 | static int add_desc(struct list_head *pool, spinlock_t *lock, | |
2532 | gfp_t flg, int count) | |
2533 | { | |
2534 | struct dma_pl330_desc *desc; | |
2535 | unsigned long flags; | |
2536 | int i; | |
2537 | ||
2538 | desc = kcalloc(count, sizeof(*desc), flg); | |
2539 | if (!desc) | |
2540 | return 0; | |
2541 | ||
2542 | spin_lock_irqsave(lock, flags); | |
2543 | ||
2544 | for (i = 0; i < count; i++) { | |
2545 | _init_desc(&desc[i]); | |
2546 | list_add_tail(&desc[i].node, pool); | |
2547 | } | |
2548 | ||
2549 | spin_unlock_irqrestore(lock, flags); | |
2550 | ||
2551 | return count; | |
2552 | } | |
2553 | ||
2554 | static struct dma_pl330_desc *pluck_desc(struct list_head *pool, | |
2555 | spinlock_t *lock) | |
2556 | { | |
2557 | struct dma_pl330_desc *desc = NULL; | |
2558 | unsigned long flags; | |
2559 | ||
2560 | spin_lock_irqsave(lock, flags); | |
2561 | ||
2562 | if (!list_empty(pool)) { | |
2563 | desc = list_entry(pool->next, | |
2564 | struct dma_pl330_desc, node); | |
2565 | ||
2566 | list_del_init(&desc->node); | |
2567 | ||
2568 | desc->status = PREP; | |
2569 | desc->txd.callback = NULL; | |
2570 | } | |
2571 | ||
2572 | spin_unlock_irqrestore(lock, flags); | |
2573 | ||
2574 | return desc; | |
2575 | } | |
2576 | ||
2577 | static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch) | |
2578 | { | |
2579 | struct pl330_dmac *pl330 = pch->dmac; | |
2580 | u8 *peri_id = pch->chan.private; | |
2581 | struct dma_pl330_desc *desc; | |
2582 | ||
2583 | /* Pluck one desc from the pool of DMAC */ | |
2584 | desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock); | |
2585 | ||
2586 | /* If the DMAC pool is empty, alloc new */ | |
2587 | if (!desc) { | |
2588 | DEFINE_SPINLOCK(lock); | |
2589 | LIST_HEAD(pool); | |
2590 | ||
2591 | if (!add_desc(&pool, &lock, GFP_ATOMIC, 1)) | |
2592 | return NULL; | |
2593 | ||
2594 | desc = pluck_desc(&pool, &lock); | |
2595 | WARN_ON(!desc || !list_empty(&pool)); | |
2596 | } | |
2597 | ||
2598 | /* Initialize the descriptor */ | |
2599 | desc->pchan = pch; | |
2600 | desc->txd.cookie = 0; | |
2601 | async_tx_ack(&desc->txd); | |
2602 | ||
2603 | desc->peri = peri_id ? pch->chan.chan_id : 0; | |
2604 | desc->rqcfg.pcfg = &pch->dmac->pcfg; | |
2605 | ||
2606 | dma_async_tx_descriptor_init(&desc->txd, &pch->chan); | |
2607 | ||
2608 | return desc; | |
2609 | } | |
2610 | ||
2611 | static inline void fill_px(struct pl330_xfer *px, | |
2612 | dma_addr_t dst, dma_addr_t src, size_t len) | |
2613 | { | |
2614 | px->bytes = len; | |
2615 | px->dst_addr = dst; | |
2616 | px->src_addr = src; | |
2617 | } | |
2618 | ||
2619 | static struct dma_pl330_desc * | |
2620 | __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst, | |
2621 | dma_addr_t src, size_t len) | |
2622 | { | |
2623 | struct dma_pl330_desc *desc = pl330_get_desc(pch); | |
2624 | ||
2625 | if (!desc) { | |
2626 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", | |
2627 | __func__, __LINE__); | |
2628 | return NULL; | |
2629 | } | |
2630 | ||
2631 | /* | |
2632 | * Ideally we should lookout for reqs bigger than | |
2633 | * those that can be programmed with 256 bytes of | |
2634 | * MC buffer, but considering a req size is seldom | |
2635 | * going to be word-unaligned and more than 200MB, | |
2636 | * we take it easy. | |
2637 | * Also, should the limit is reached we'd rather | |
2638 | * have the platform increase MC buffer size than | |
2639 | * complicating this API driver. | |
2640 | */ | |
2641 | fill_px(&desc->px, dst, src, len); | |
2642 | ||
2643 | return desc; | |
2644 | } | |
2645 | ||
2646 | /* Call after fixing burst size */ | |
2647 | static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) | |
2648 | { | |
2649 | struct dma_pl330_chan *pch = desc->pchan; | |
2650 | struct pl330_dmac *pl330 = pch->dmac; | |
2651 | int burst_len; | |
2652 | ||
2653 | burst_len = pl330->pcfg.data_bus_width / 8; | |
2654 | burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan; | |
2655 | burst_len >>= desc->rqcfg.brst_size; | |
2656 | ||
2657 | /* src/dst_burst_len can't be more than 16 */ | |
2658 | if (burst_len > PL330_MAX_BURST) | |
2659 | burst_len = PL330_MAX_BURST; | |
2660 | ||
2661 | return burst_len; | |
2662 | } | |
2663 | ||
2664 | static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic( | |
2665 | struct dma_chan *chan, dma_addr_t dma_addr, size_t len, | |
2666 | size_t period_len, enum dma_transfer_direction direction, | |
2667 | unsigned long flags) | |
2668 | { | |
2669 | struct dma_pl330_desc *desc = NULL, *first = NULL; | |
2670 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2671 | struct pl330_dmac *pl330 = pch->dmac; | |
2672 | unsigned int i; | |
2673 | dma_addr_t dst; | |
2674 | dma_addr_t src; | |
2675 | ||
2676 | if (len % period_len != 0) | |
2677 | return NULL; | |
2678 | ||
2679 | if (!is_slave_direction(direction)) { | |
2680 | dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", | |
2681 | __func__, __LINE__); | |
2682 | return NULL; | |
2683 | } | |
2684 | ||
2685 | pl330_config_write(chan, &pch->slave_config, direction); | |
2686 | ||
2687 | if (!pl330_prep_slave_fifo(pch, direction)) | |
2688 | return NULL; | |
2689 | ||
2690 | for (i = 0; i < len / period_len; i++) { | |
2691 | desc = pl330_get_desc(pch); | |
2692 | if (!desc) { | |
2693 | dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", | |
2694 | __func__, __LINE__); | |
2695 | ||
2696 | if (!first) | |
2697 | return NULL; | |
2698 | ||
2699 | spin_lock_irqsave(&pl330->pool_lock, flags); | |
2700 | ||
2701 | while (!list_empty(&first->node)) { | |
2702 | desc = list_entry(first->node.next, | |
2703 | struct dma_pl330_desc, node); | |
2704 | list_move_tail(&desc->node, &pl330->desc_pool); | |
2705 | } | |
2706 | ||
2707 | list_move_tail(&first->node, &pl330->desc_pool); | |
2708 | ||
2709 | spin_unlock_irqrestore(&pl330->pool_lock, flags); | |
2710 | ||
2711 | return NULL; | |
2712 | } | |
2713 | ||
2714 | switch (direction) { | |
2715 | case DMA_MEM_TO_DEV: | |
2716 | desc->rqcfg.src_inc = 1; | |
2717 | desc->rqcfg.dst_inc = 0; | |
2718 | src = dma_addr; | |
2719 | dst = pch->fifo_dma; | |
2720 | break; | |
2721 | case DMA_DEV_TO_MEM: | |
2722 | desc->rqcfg.src_inc = 0; | |
2723 | desc->rqcfg.dst_inc = 1; | |
2724 | src = pch->fifo_dma; | |
2725 | dst = dma_addr; | |
2726 | break; | |
2727 | default: | |
2728 | break; | |
2729 | } | |
2730 | ||
2731 | desc->rqtype = direction; | |
2732 | desc->rqcfg.brst_size = pch->burst_sz; | |
2733 | desc->rqcfg.brst_len = pch->burst_len; | |
2734 | desc->bytes_requested = period_len; | |
2735 | fill_px(&desc->px, dst, src, period_len); | |
2736 | ||
2737 | if (!first) | |
2738 | first = desc; | |
2739 | else | |
2740 | list_add_tail(&desc->node, &first->node); | |
2741 | ||
2742 | dma_addr += period_len; | |
2743 | } | |
2744 | ||
2745 | if (!desc) | |
2746 | return NULL; | |
2747 | ||
2748 | pch->cyclic = true; | |
2749 | desc->txd.flags = flags; | |
2750 | ||
2751 | return &desc->txd; | |
2752 | } | |
2753 | ||
2754 | static struct dma_async_tx_descriptor * | |
2755 | pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, | |
2756 | dma_addr_t src, size_t len, unsigned long flags) | |
2757 | { | |
2758 | struct dma_pl330_desc *desc; | |
2759 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2760 | struct pl330_dmac *pl330; | |
2761 | int burst; | |
2762 | ||
2763 | if (unlikely(!pch || !len)) | |
2764 | return NULL; | |
2765 | ||
2766 | pl330 = pch->dmac; | |
2767 | ||
2768 | desc = __pl330_prep_dma_memcpy(pch, dst, src, len); | |
2769 | if (!desc) | |
2770 | return NULL; | |
2771 | ||
2772 | desc->rqcfg.src_inc = 1; | |
2773 | desc->rqcfg.dst_inc = 1; | |
2774 | desc->rqtype = DMA_MEM_TO_MEM; | |
2775 | ||
2776 | /* Select max possible burst size */ | |
2777 | burst = pl330->pcfg.data_bus_width / 8; | |
2778 | ||
2779 | /* | |
2780 | * Make sure we use a burst size that aligns with all the memcpy | |
2781 | * parameters because our DMA programming algorithm doesn't cope with | |
2782 | * transfers which straddle an entry in the DMA device's MFIFO. | |
2783 | */ | |
2784 | while ((src | dst | len) & (burst - 1)) | |
2785 | burst /= 2; | |
2786 | ||
2787 | desc->rqcfg.brst_size = 0; | |
2788 | while (burst != (1 << desc->rqcfg.brst_size)) | |
2789 | desc->rqcfg.brst_size++; | |
2790 | ||
2791 | /* | |
2792 | * If burst size is smaller than bus width then make sure we only | |
2793 | * transfer one at a time to avoid a burst stradling an MFIFO entry. | |
2794 | */ | |
2795 | if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) | |
2796 | desc->rqcfg.brst_len = 1; | |
2797 | ||
2798 | desc->rqcfg.brst_len = get_burst_len(desc, len); | |
2799 | desc->bytes_requested = len; | |
2800 | ||
2801 | desc->txd.flags = flags; | |
2802 | ||
2803 | return &desc->txd; | |
2804 | } | |
2805 | ||
2806 | static void __pl330_giveback_desc(struct pl330_dmac *pl330, | |
2807 | struct dma_pl330_desc *first) | |
2808 | { | |
2809 | unsigned long flags; | |
2810 | struct dma_pl330_desc *desc; | |
2811 | ||
2812 | if (!first) | |
2813 | return; | |
2814 | ||
2815 | spin_lock_irqsave(&pl330->pool_lock, flags); | |
2816 | ||
2817 | while (!list_empty(&first->node)) { | |
2818 | desc = list_entry(first->node.next, | |
2819 | struct dma_pl330_desc, node); | |
2820 | list_move_tail(&desc->node, &pl330->desc_pool); | |
2821 | } | |
2822 | ||
2823 | list_move_tail(&first->node, &pl330->desc_pool); | |
2824 | ||
2825 | spin_unlock_irqrestore(&pl330->pool_lock, flags); | |
2826 | } | |
2827 | ||
2828 | static struct dma_async_tx_descriptor * | |
2829 | pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | |
2830 | unsigned int sg_len, enum dma_transfer_direction direction, | |
2831 | unsigned long flg, void *context) | |
2832 | { | |
2833 | struct dma_pl330_desc *first, *desc = NULL; | |
2834 | struct dma_pl330_chan *pch = to_pchan(chan); | |
2835 | struct scatterlist *sg; | |
2836 | int i; | |
2837 | ||
2838 | if (unlikely(!pch || !sgl || !sg_len)) | |
2839 | return NULL; | |
2840 | ||
2841 | pl330_config_write(chan, &pch->slave_config, direction); | |
2842 | ||
2843 | if (!pl330_prep_slave_fifo(pch, direction)) | |
2844 | return NULL; | |
2845 | ||
2846 | first = NULL; | |
2847 | ||
2848 | for_each_sg(sgl, sg, sg_len, i) { | |
2849 | ||
2850 | desc = pl330_get_desc(pch); | |
2851 | if (!desc) { | |
2852 | struct pl330_dmac *pl330 = pch->dmac; | |
2853 | ||
2854 | dev_err(pch->dmac->ddma.dev, | |
2855 | "%s:%d Unable to fetch desc\n", | |
2856 | __func__, __LINE__); | |
2857 | __pl330_giveback_desc(pl330, first); | |
2858 | ||
2859 | return NULL; | |
2860 | } | |
2861 | ||
2862 | if (!first) | |
2863 | first = desc; | |
2864 | else | |
2865 | list_add_tail(&desc->node, &first->node); | |
2866 | ||
2867 | if (direction == DMA_MEM_TO_DEV) { | |
2868 | desc->rqcfg.src_inc = 1; | |
2869 | desc->rqcfg.dst_inc = 0; | |
2870 | fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg), | |
2871 | sg_dma_len(sg)); | |
2872 | } else { | |
2873 | desc->rqcfg.src_inc = 0; | |
2874 | desc->rqcfg.dst_inc = 1; | |
2875 | fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma, | |
2876 | sg_dma_len(sg)); | |
2877 | } | |
2878 | ||
2879 | desc->rqcfg.brst_size = pch->burst_sz; | |
2880 | desc->rqcfg.brst_len = pch->burst_len; | |
2881 | desc->rqtype = direction; | |
2882 | desc->bytes_requested = sg_dma_len(sg); | |
2883 | } | |
2884 | ||
2885 | /* Return the last desc in the chain */ | |
2886 | desc->txd.flags = flg; | |
2887 | return &desc->txd; | |
2888 | } | |
2889 | ||
2890 | static irqreturn_t pl330_irq_handler(int irq, void *data) | |
2891 | { | |
2892 | if (pl330_update(data)) | |
2893 | return IRQ_HANDLED; | |
2894 | else | |
2895 | return IRQ_NONE; | |
2896 | } | |
2897 | ||
2898 | #define PL330_DMA_BUSWIDTHS \ | |
2899 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ | |
2900 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
2901 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
2902 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
2903 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | |
2904 | ||
2905 | #ifdef CONFIG_DEBUG_FS | |
2906 | static int pl330_debugfs_show(struct seq_file *s, void *data) | |
2907 | { | |
2908 | struct pl330_dmac *pl330 = s->private; | |
2909 | int chans, pchs, ch, pr; | |
2910 | ||
2911 | chans = pl330->pcfg.num_chan; | |
2912 | pchs = pl330->num_peripherals; | |
2913 | ||
2914 | seq_puts(s, "PL330 physical channels:\n"); | |
2915 | seq_puts(s, "THREAD:\t\tCHANNEL:\n"); | |
2916 | seq_puts(s, "--------\t-----\n"); | |
2917 | for (ch = 0; ch < chans; ch++) { | |
2918 | struct pl330_thread *thrd = &pl330->channels[ch]; | |
2919 | int found = -1; | |
2920 | ||
2921 | for (pr = 0; pr < pchs; pr++) { | |
2922 | struct dma_pl330_chan *pch = &pl330->peripherals[pr]; | |
2923 | ||
2924 | if (!pch->thread || thrd->id != pch->thread->id) | |
2925 | continue; | |
2926 | ||
2927 | found = pr; | |
2928 | } | |
2929 | ||
2930 | seq_printf(s, "%d\t\t", thrd->id); | |
2931 | if (found == -1) | |
2932 | seq_puts(s, "--\n"); | |
2933 | else | |
2934 | seq_printf(s, "%d\n", found); | |
2935 | } | |
2936 | ||
2937 | return 0; | |
2938 | } | |
2939 | ||
2940 | DEFINE_SHOW_ATTRIBUTE(pl330_debugfs); | |
2941 | ||
2942 | static inline void init_pl330_debugfs(struct pl330_dmac *pl330) | |
2943 | { | |
2944 | debugfs_create_file(dev_name(pl330->ddma.dev), | |
2945 | S_IFREG | 0444, NULL, pl330, | |
2946 | &pl330_debugfs_fops); | |
2947 | } | |
2948 | #else | |
2949 | static inline void init_pl330_debugfs(struct pl330_dmac *pl330) | |
2950 | { | |
2951 | } | |
2952 | #endif | |
2953 | ||
2954 | /* | |
2955 | * Runtime PM callbacks are provided by amba/bus.c driver. | |
2956 | * | |
2957 | * It is assumed here that IRQ safe runtime PM is chosen in probe and amba | |
2958 | * bus driver will only disable/enable the clock in runtime PM callbacks. | |
2959 | */ | |
2960 | static int __maybe_unused pl330_suspend(struct device *dev) | |
2961 | { | |
2962 | struct amba_device *pcdev = to_amba_device(dev); | |
2963 | ||
2964 | pm_runtime_force_suspend(dev); | |
2965 | amba_pclk_unprepare(pcdev); | |
2966 | ||
2967 | return 0; | |
2968 | } | |
2969 | ||
2970 | static int __maybe_unused pl330_resume(struct device *dev) | |
2971 | { | |
2972 | struct amba_device *pcdev = to_amba_device(dev); | |
2973 | int ret; | |
2974 | ||
2975 | ret = amba_pclk_prepare(pcdev); | |
2976 | if (ret) | |
2977 | return ret; | |
2978 | ||
2979 | pm_runtime_force_resume(dev); | |
2980 | ||
2981 | return ret; | |
2982 | } | |
2983 | ||
2984 | static const struct dev_pm_ops pl330_pm = { | |
2985 | SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) | |
2986 | }; | |
2987 | ||
2988 | static int | |
2989 | pl330_probe(struct amba_device *adev, const struct amba_id *id) | |
2990 | { | |
2991 | struct pl330_config *pcfg; | |
2992 | struct pl330_dmac *pl330; | |
2993 | struct dma_pl330_chan *pch, *_p; | |
2994 | struct dma_device *pd; | |
2995 | struct resource *res; | |
2996 | int i, ret, irq; | |
2997 | int num_chan; | |
2998 | struct device_node *np = adev->dev.of_node; | |
2999 | ||
3000 | ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); | |
3001 | if (ret) | |
3002 | return ret; | |
3003 | ||
3004 | /* Allocate a new DMAC and its Channels */ | |
3005 | pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL); | |
3006 | if (!pl330) | |
3007 | return -ENOMEM; | |
3008 | ||
3009 | pd = &pl330->ddma; | |
3010 | pd->dev = &adev->dev; | |
3011 | ||
3012 | pl330->mcbufsz = 0; | |
3013 | ||
3014 | /* get quirk */ | |
3015 | for (i = 0; i < ARRAY_SIZE(of_quirks); i++) | |
3016 | if (of_property_read_bool(np, of_quirks[i].quirk)) | |
3017 | pl330->quirks |= of_quirks[i].id; | |
3018 | ||
3019 | res = &adev->res; | |
3020 | pl330->base = devm_ioremap_resource(&adev->dev, res); | |
3021 | if (IS_ERR(pl330->base)) | |
3022 | return PTR_ERR(pl330->base); | |
3023 | ||
3024 | amba_set_drvdata(adev, pl330); | |
3025 | ||
3026 | pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma"); | |
3027 | if (IS_ERR(pl330->rstc)) { | |
3028 | if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER) | |
3029 | dev_err(&adev->dev, "Failed to get reset!\n"); | |
3030 | return PTR_ERR(pl330->rstc); | |
3031 | } else { | |
3032 | ret = reset_control_deassert(pl330->rstc); | |
3033 | if (ret) { | |
3034 | dev_err(&adev->dev, "Couldn't deassert the device from reset!\n"); | |
3035 | return ret; | |
3036 | } | |
3037 | } | |
3038 | ||
3039 | pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp"); | |
3040 | if (IS_ERR(pl330->rstc_ocp)) { | |
3041 | if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER) | |
3042 | dev_err(&adev->dev, "Failed to get OCP reset!\n"); | |
3043 | return PTR_ERR(pl330->rstc_ocp); | |
3044 | } else { | |
3045 | ret = reset_control_deassert(pl330->rstc_ocp); | |
3046 | if (ret) { | |
3047 | dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n"); | |
3048 | return ret; | |
3049 | } | |
3050 | } | |
3051 | ||
3052 | for (i = 0; i < AMBA_NR_IRQS; i++) { | |
3053 | irq = adev->irq[i]; | |
3054 | if (irq) { | |
3055 | ret = devm_request_irq(&adev->dev, irq, | |
3056 | pl330_irq_handler, 0, | |
3057 | dev_name(&adev->dev), pl330); | |
3058 | if (ret) | |
3059 | return ret; | |
3060 | } else { | |
3061 | break; | |
3062 | } | |
3063 | } | |
3064 | ||
3065 | pcfg = &pl330->pcfg; | |
3066 | ||
3067 | pcfg->periph_id = adev->periphid; | |
3068 | ret = pl330_add(pl330); | |
3069 | if (ret) | |
3070 | return ret; | |
3071 | ||
3072 | INIT_LIST_HEAD(&pl330->desc_pool); | |
3073 | spin_lock_init(&pl330->pool_lock); | |
3074 | ||
3075 | /* Create a descriptor pool of default size */ | |
3076 | if (!add_desc(&pl330->desc_pool, &pl330->pool_lock, | |
3077 | GFP_KERNEL, NR_DEFAULT_DESC)) | |
3078 | dev_warn(&adev->dev, "unable to allocate desc\n"); | |
3079 | ||
3080 | INIT_LIST_HEAD(&pd->channels); | |
3081 | ||
3082 | /* Initialize channel parameters */ | |
3083 | num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); | |
3084 | ||
3085 | pl330->num_peripherals = num_chan; | |
3086 | ||
3087 | pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL); | |
3088 | if (!pl330->peripherals) { | |
3089 | ret = -ENOMEM; | |
3090 | goto probe_err2; | |
3091 | } | |
3092 | ||
3093 | for (i = 0; i < num_chan; i++) { | |
3094 | pch = &pl330->peripherals[i]; | |
3095 | ||
3096 | pch->chan.private = adev->dev.of_node; | |
3097 | INIT_LIST_HEAD(&pch->submitted_list); | |
3098 | INIT_LIST_HEAD(&pch->work_list); | |
3099 | INIT_LIST_HEAD(&pch->completed_list); | |
3100 | spin_lock_init(&pch->lock); | |
3101 | pch->thread = NULL; | |
3102 | pch->chan.device = pd; | |
3103 | pch->dmac = pl330; | |
3104 | pch->dir = DMA_NONE; | |
3105 | ||
3106 | /* Add the channel to the DMAC list */ | |
3107 | list_add_tail(&pch->chan.device_node, &pd->channels); | |
3108 | } | |
3109 | ||
3110 | dma_cap_set(DMA_MEMCPY, pd->cap_mask); | |
3111 | if (pcfg->num_peri) { | |
3112 | dma_cap_set(DMA_SLAVE, pd->cap_mask); | |
3113 | dma_cap_set(DMA_CYCLIC, pd->cap_mask); | |
3114 | dma_cap_set(DMA_PRIVATE, pd->cap_mask); | |
3115 | } | |
3116 | ||
3117 | pd->device_alloc_chan_resources = pl330_alloc_chan_resources; | |
3118 | pd->device_free_chan_resources = pl330_free_chan_resources; | |
3119 | pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy; | |
3120 | pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic; | |
3121 | pd->device_tx_status = pl330_tx_status; | |
3122 | pd->device_prep_slave_sg = pl330_prep_slave_sg; | |
3123 | pd->device_config = pl330_config; | |
3124 | pd->device_pause = pl330_pause; | |
3125 | pd->device_terminate_all = pl330_terminate_all; | |
3126 | pd->device_issue_pending = pl330_issue_pending; | |
3127 | pd->src_addr_widths = PL330_DMA_BUSWIDTHS; | |
3128 | pd->dst_addr_widths = PL330_DMA_BUSWIDTHS; | |
3129 | pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); | |
3130 | pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | |
3131 | pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ? | |
3132 | 1 : PL330_MAX_BURST); | |
3133 | ||
3134 | ret = dma_async_device_register(pd); | |
3135 | if (ret) { | |
3136 | dev_err(&adev->dev, "unable to register DMAC\n"); | |
3137 | goto probe_err3; | |
3138 | } | |
3139 | ||
3140 | if (adev->dev.of_node) { | |
3141 | ret = of_dma_controller_register(adev->dev.of_node, | |
3142 | of_dma_pl330_xlate, pl330); | |
3143 | if (ret) { | |
3144 | dev_err(&adev->dev, | |
3145 | "unable to register DMA to the generic DT DMA helpers\n"); | |
3146 | } | |
3147 | } | |
3148 | ||
3149 | adev->dev.dma_parms = &pl330->dma_parms; | |
3150 | ||
3151 | /* | |
3152 | * This is the limit for transfers with a buswidth of 1, larger | |
3153 | * buswidths will have larger limits. | |
3154 | */ | |
3155 | ret = dma_set_max_seg_size(&adev->dev, 1900800); | |
3156 | if (ret) | |
3157 | dev_err(&adev->dev, "unable to set the seg size\n"); | |
3158 | ||
3159 | ||
3160 | init_pl330_debugfs(pl330); | |
3161 | dev_info(&adev->dev, | |
3162 | "Loaded driver for PL330 DMAC-%x\n", adev->periphid); | |
3163 | dev_info(&adev->dev, | |
3164 | "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", | |
3165 | pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, | |
3166 | pcfg->num_peri, pcfg->num_events); | |
3167 | ||
3168 | pm_runtime_irq_safe(&adev->dev); | |
3169 | pm_runtime_use_autosuspend(&adev->dev); | |
3170 | pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY); | |
3171 | pm_runtime_mark_last_busy(&adev->dev); | |
3172 | pm_runtime_put_autosuspend(&adev->dev); | |
3173 | ||
3174 | return 0; | |
3175 | probe_err3: | |
3176 | /* Idle the DMAC */ | |
3177 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, | |
3178 | chan.device_node) { | |
3179 | ||
3180 | /* Remove the channel */ | |
3181 | list_del(&pch->chan.device_node); | |
3182 | ||
3183 | /* Flush the channel */ | |
3184 | if (pch->thread) { | |
3185 | pl330_terminate_all(&pch->chan); | |
3186 | pl330_free_chan_resources(&pch->chan); | |
3187 | } | |
3188 | } | |
3189 | probe_err2: | |
3190 | pl330_del(pl330); | |
3191 | ||
3192 | if (pl330->rstc_ocp) | |
3193 | reset_control_assert(pl330->rstc_ocp); | |
3194 | ||
3195 | if (pl330->rstc) | |
3196 | reset_control_assert(pl330->rstc); | |
3197 | return ret; | |
3198 | } | |
3199 | ||
3200 | static int pl330_remove(struct amba_device *adev) | |
3201 | { | |
3202 | struct pl330_dmac *pl330 = amba_get_drvdata(adev); | |
3203 | struct dma_pl330_chan *pch, *_p; | |
3204 | int i, irq; | |
3205 | ||
3206 | pm_runtime_get_noresume(pl330->ddma.dev); | |
3207 | ||
3208 | if (adev->dev.of_node) | |
3209 | of_dma_controller_free(adev->dev.of_node); | |
3210 | ||
3211 | for (i = 0; i < AMBA_NR_IRQS; i++) { | |
3212 | irq = adev->irq[i]; | |
3213 | if (irq) | |
3214 | devm_free_irq(&adev->dev, irq, pl330); | |
3215 | } | |
3216 | ||
3217 | dma_async_device_unregister(&pl330->ddma); | |
3218 | ||
3219 | /* Idle the DMAC */ | |
3220 | list_for_each_entry_safe(pch, _p, &pl330->ddma.channels, | |
3221 | chan.device_node) { | |
3222 | ||
3223 | /* Remove the channel */ | |
3224 | list_del(&pch->chan.device_node); | |
3225 | ||
3226 | /* Flush the channel */ | |
3227 | if (pch->thread) { | |
3228 | pl330_terminate_all(&pch->chan); | |
3229 | pl330_free_chan_resources(&pch->chan); | |
3230 | } | |
3231 | } | |
3232 | ||
3233 | pl330_del(pl330); | |
3234 | ||
3235 | if (pl330->rstc_ocp) | |
3236 | reset_control_assert(pl330->rstc_ocp); | |
3237 | ||
3238 | if (pl330->rstc) | |
3239 | reset_control_assert(pl330->rstc); | |
3240 | return 0; | |
3241 | } | |
3242 | ||
3243 | static const struct amba_id pl330_ids[] = { | |
3244 | { | |
3245 | .id = 0x00041330, | |
3246 | .mask = 0x000fffff, | |
3247 | }, | |
3248 | { 0, 0 }, | |
3249 | }; | |
3250 | ||
3251 | MODULE_DEVICE_TABLE(amba, pl330_ids); | |
3252 | ||
3253 | static struct amba_driver pl330_driver = { | |
3254 | .drv = { | |
3255 | .owner = THIS_MODULE, | |
3256 | .name = "dma-pl330", | |
3257 | .pm = &pl330_pm, | |
3258 | }, | |
3259 | .id_table = pl330_ids, | |
3260 | .probe = pl330_probe, | |
3261 | .remove = pl330_remove, | |
3262 | }; | |
3263 | ||
3264 | module_amba_driver(pl330_driver); | |
3265 | ||
3266 | MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>"); | |
3267 | MODULE_DESCRIPTION("API Driver for PL330 DMAC"); | |
3268 | MODULE_LICENSE("GPL"); |