]>
Commit | Line | Data |
---|---|---|
7cbb0c63 HK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Xilinx ZynqMP DPDMA Engine driver | |
4 | * | |
5 | * Copyright (C) 2015 - 2020 Xilinx, Inc. | |
6 | * | |
7 | * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com> | |
8 | */ | |
9 | ||
10 | #include <linux/bitfield.h> | |
11 | #include <linux/bits.h> | |
12 | #include <linux/clk.h> | |
1d220435 | 13 | #include <linux/debugfs.h> |
7cbb0c63 HK |
14 | #include <linux/delay.h> |
15 | #include <linux/dmaengine.h> | |
16 | #include <linux/dmapool.h> | |
17 | #include <linux/interrupt.h> | |
18 | #include <linux/module.h> | |
19 | #include <linux/of.h> | |
20 | #include <linux/of_dma.h> | |
21 | #include <linux/platform_device.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/spinlock.h> | |
25 | #include <linux/wait.h> | |
26 | ||
27 | #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h> | |
28 | ||
29 | #include "../dmaengine.h" | |
30 | #include "../virt-dma.h" | |
31 | ||
32 | /* DPDMA registers */ | |
33 | #define XILINX_DPDMA_ERR_CTRL 0x000 | |
34 | #define XILINX_DPDMA_ISR 0x004 | |
35 | #define XILINX_DPDMA_IMR 0x008 | |
36 | #define XILINX_DPDMA_IEN 0x00c | |
37 | #define XILINX_DPDMA_IDS 0x010 | |
38 | #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0) | |
39 | #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0) | |
40 | #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6) | |
41 | #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6) | |
42 | #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12) | |
43 | #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12) | |
44 | #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16) | |
45 | #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18) | |
46 | #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24) | |
47 | #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25) | |
48 | #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26) | |
49 | #define XILINX_DPDMA_INTR_VSYNC BIT(27) | |
50 | #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000 | |
51 | #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000 | |
52 | #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000 | |
53 | #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000 | |
54 | #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041 | |
55 | #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000 | |
56 | #define XILINX_DPDMA_INTR_ALL 0x0fffffff | |
57 | #define XILINX_DPDMA_EISR 0x014 | |
58 | #define XILINX_DPDMA_EIMR 0x018 | |
59 | #define XILINX_DPDMA_EIEN 0x01c | |
60 | #define XILINX_DPDMA_EIDS 0x020 | |
61 | #define XILINX_DPDMA_EINTR_INV_APB BIT(0) | |
62 | #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1) | |
63 | #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1) | |
64 | #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7) | |
65 | #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7) | |
66 | #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13) | |
67 | #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13) | |
68 | #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19) | |
69 | #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19) | |
70 | #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25) | |
71 | #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25) | |
72 | #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32) | |
73 | #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082 | |
74 | #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe | |
75 | #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001 | |
76 | #define XILINX_DPDMA_EINTR_ALL 0xffffffff | |
77 | #define XILINX_DPDMA_CNTL 0x100 | |
78 | #define XILINX_DPDMA_GBL 0x104 | |
79 | #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0) | |
80 | #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6) | |
81 | #define XILINX_DPDMA_ALC0_CNTL 0x108 | |
82 | #define XILINX_DPDMA_ALC0_STATUS 0x10c | |
83 | #define XILINX_DPDMA_ALC0_MAX 0x110 | |
84 | #define XILINX_DPDMA_ALC0_MIN 0x114 | |
85 | #define XILINX_DPDMA_ALC0_ACC 0x118 | |
86 | #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c | |
87 | #define XILINX_DPDMA_ALC1_CNTL 0x120 | |
88 | #define XILINX_DPDMA_ALC1_STATUS 0x124 | |
89 | #define XILINX_DPDMA_ALC1_MAX 0x128 | |
90 | #define XILINX_DPDMA_ALC1_MIN 0x12c | |
91 | #define XILINX_DPDMA_ALC1_ACC 0x130 | |
92 | #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134 | |
93 | ||
94 | /* Channel register */ | |
95 | #define XILINX_DPDMA_CH_BASE 0x200 | |
96 | #define XILINX_DPDMA_CH_OFFSET 0x100 | |
97 | #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000 | |
98 | #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0) | |
99 | #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004 | |
100 | #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008 | |
101 | #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c | |
102 | #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010 | |
103 | #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014 | |
104 | #define XILINX_DPDMA_CH_CNTL 0x018 | |
105 | #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0) | |
106 | #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1) | |
107 | #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2) | |
108 | #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6) | |
109 | #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10) | |
110 | #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11 | |
111 | #define XILINX_DPDMA_CH_STATUS 0x01c | |
112 | #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21) | |
113 | #define XILINX_DPDMA_CH_VDO 0x020 | |
114 | #define XILINX_DPDMA_CH_PYLD_SZ 0x024 | |
115 | #define XILINX_DPDMA_CH_DESC_ID 0x028 | |
116 | ||
117 | /* DPDMA descriptor fields */ | |
118 | #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5 | |
119 | #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8) | |
120 | #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9) | |
121 | #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10) | |
122 | #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18) | |
123 | #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19) | |
124 | #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20) | |
125 | #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21) | |
126 | #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0) | |
127 | #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0) | |
128 | #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18) | |
129 | #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0) | |
130 | #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16) | |
131 | ||
132 | #define XILINX_DPDMA_ALIGN_BYTES 256 | |
133 | #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128 | |
134 | ||
135 | #define XILINX_DPDMA_NUM_CHAN 6 | |
136 | ||
137 | struct xilinx_dpdma_chan; | |
138 | ||
139 | /** | |
140 | * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor | |
141 | * @control: control configuration field | |
142 | * @desc_id: descriptor ID | |
143 | * @xfer_size: transfer size | |
144 | * @hsize_stride: horizontal size and stride | |
145 | * @timestamp_lsb: LSB of time stamp | |
146 | * @timestamp_msb: MSB of time stamp | |
147 | * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr) | |
148 | * @next_desc: next descriptor 32 bit address | |
149 | * @src_addr: payload source address (1st page, 32 LSB) | |
150 | * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs) | |
151 | * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs) | |
152 | * @src_addr2: payload source address (2nd page, 32 LSB) | |
153 | * @src_addr3: payload source address (3rd page, 32 LSB) | |
154 | * @src_addr4: payload source address (4th page, 32 LSB) | |
155 | * @src_addr5: payload source address (5th page, 32 LSB) | |
156 | * @crc: descriptor CRC | |
157 | */ | |
158 | struct xilinx_dpdma_hw_desc { | |
159 | u32 control; | |
160 | u32 desc_id; | |
161 | u32 xfer_size; | |
162 | u32 hsize_stride; | |
163 | u32 timestamp_lsb; | |
164 | u32 timestamp_msb; | |
165 | u32 addr_ext; | |
166 | u32 next_desc; | |
167 | u32 src_addr; | |
168 | u32 addr_ext_23; | |
169 | u32 addr_ext_45; | |
170 | u32 src_addr2; | |
171 | u32 src_addr3; | |
172 | u32 src_addr4; | |
173 | u32 src_addr5; | |
174 | u32 crc; | |
175 | } __aligned(XILINX_DPDMA_ALIGN_BYTES); | |
176 | ||
177 | /** | |
178 | * struct xilinx_dpdma_sw_desc - DPDMA software descriptor | |
179 | * @hw: DPDMA hardware descriptor | |
180 | * @node: list node for software descriptors | |
181 | * @dma_addr: DMA address of the software descriptor | |
182 | */ | |
183 | struct xilinx_dpdma_sw_desc { | |
184 | struct xilinx_dpdma_hw_desc hw; | |
185 | struct list_head node; | |
186 | dma_addr_t dma_addr; | |
187 | }; | |
188 | ||
189 | /** | |
190 | * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor | |
191 | * @vdesc: virtual DMA descriptor | |
192 | * @chan: DMA channel | |
193 | * @descriptors: list of software descriptors | |
194 | * @error: an error has been detected with this descriptor | |
195 | */ | |
196 | struct xilinx_dpdma_tx_desc { | |
197 | struct virt_dma_desc vdesc; | |
198 | struct xilinx_dpdma_chan *chan; | |
199 | struct list_head descriptors; | |
200 | bool error; | |
201 | }; | |
202 | ||
203 | #define to_dpdma_tx_desc(_desc) \ | |
204 | container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc) | |
205 | ||
206 | /** | |
207 | * struct xilinx_dpdma_chan - DPDMA channel | |
208 | * @vchan: virtual DMA channel | |
209 | * @reg: register base address | |
210 | * @id: channel ID | |
211 | * @wait_to_stop: queue to wait for outstanding transacitons before stopping | |
212 | * @running: true if the channel is running | |
213 | * @first_frame: flag for the first frame of stream | |
214 | * @video_group: flag if multi-channel operation is needed for video channels | |
215 | * @lock: lock to access struct xilinx_dpdma_chan | |
216 | * @desc_pool: descriptor allocation pool | |
217 | * @err_task: error IRQ bottom half handler | |
bc227385 | 218 | * @desc: References to descriptors being processed |
7cbb0c63 HK |
219 | * @desc.pending: Descriptor schedule to the hardware, pending execution |
220 | * @desc.active: Descriptor being executed by the hardware | |
221 | * @xdev: DPDMA device | |
222 | */ | |
223 | struct xilinx_dpdma_chan { | |
224 | struct virt_dma_chan vchan; | |
225 | void __iomem *reg; | |
226 | unsigned int id; | |
227 | ||
228 | wait_queue_head_t wait_to_stop; | |
229 | bool running; | |
230 | bool first_frame; | |
231 | bool video_group; | |
232 | ||
233 | spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */ | |
234 | struct dma_pool *desc_pool; | |
235 | struct tasklet_struct err_task; | |
236 | ||
237 | struct { | |
238 | struct xilinx_dpdma_tx_desc *pending; | |
239 | struct xilinx_dpdma_tx_desc *active; | |
240 | } desc; | |
241 | ||
242 | struct xilinx_dpdma_device *xdev; | |
243 | }; | |
244 | ||
245 | #define to_xilinx_chan(_chan) \ | |
246 | container_of(_chan, struct xilinx_dpdma_chan, vchan.chan) | |
247 | ||
248 | /** | |
249 | * struct xilinx_dpdma_device - DPDMA device | |
250 | * @common: generic dma device structure | |
251 | * @reg: register base address | |
252 | * @dev: generic device structure | |
253 | * @irq: the interrupt number | |
254 | * @axi_clk: axi clock | |
255 | * @chan: DPDMA channels | |
256 | * @ext_addr: flag for 64 bit system (48 bit addressing) | |
257 | */ | |
258 | struct xilinx_dpdma_device { | |
259 | struct dma_device common; | |
260 | void __iomem *reg; | |
261 | struct device *dev; | |
262 | int irq; | |
263 | ||
264 | struct clk *axi_clk; | |
265 | struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN]; | |
266 | ||
267 | bool ext_addr; | |
268 | }; | |
269 | ||
1d220435 LP |
270 | /* ----------------------------------------------------------------------------- |
271 | * DebugFS | |
272 | */ | |
273 | ||
274 | #ifdef CONFIG_DEBUG_FS | |
275 | ||
276 | #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32 | |
277 | #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535" | |
278 | ||
279 | /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ | |
280 | enum xilinx_dpdma_testcases { | |
281 | DPDMA_TC_INTR_DONE, | |
282 | DPDMA_TC_NONE | |
283 | }; | |
284 | ||
285 | struct xilinx_dpdma_debugfs { | |
286 | enum xilinx_dpdma_testcases testcase; | |
287 | u16 xilinx_dpdma_irq_done_count; | |
288 | unsigned int chan_id; | |
289 | }; | |
290 | ||
291 | static struct xilinx_dpdma_debugfs dpdma_debugfs; | |
292 | struct xilinx_dpdma_debugfs_request { | |
293 | const char *name; | |
294 | enum xilinx_dpdma_testcases tc; | |
295 | ssize_t (*read)(char *buf); | |
296 | int (*write)(char *args); | |
297 | }; | |
298 | ||
299 | static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) | |
300 | { | |
301 | if (chan->id == dpdma_debugfs.chan_id) | |
302 | dpdma_debugfs.xilinx_dpdma_irq_done_count++; | |
303 | } | |
304 | ||
305 | static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf) | |
306 | { | |
307 | size_t out_str_len; | |
308 | ||
309 | dpdma_debugfs.testcase = DPDMA_TC_NONE; | |
310 | ||
311 | out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR); | |
312 | out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, | |
313 | out_str_len); | |
314 | snprintf(buf, out_str_len, "%d", | |
315 | dpdma_debugfs.xilinx_dpdma_irq_done_count); | |
316 | ||
317 | return 0; | |
318 | } | |
319 | ||
320 | static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args) | |
321 | { | |
322 | char *arg; | |
323 | int ret; | |
324 | u32 id; | |
325 | ||
326 | arg = strsep(&args, " "); | |
327 | if (!arg || strncasecmp(arg, "start", 5)) | |
328 | return -EINVAL; | |
329 | ||
330 | arg = strsep(&args, " "); | |
331 | if (!arg) | |
332 | return -EINVAL; | |
333 | ||
334 | ret = kstrtou32(arg, 0, &id); | |
335 | if (ret < 0) | |
336 | return ret; | |
337 | ||
338 | if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1) | |
339 | return -EINVAL; | |
340 | ||
341 | dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE; | |
342 | dpdma_debugfs.xilinx_dpdma_irq_done_count = 0; | |
343 | dpdma_debugfs.chan_id = id; | |
344 | ||
345 | return 0; | |
346 | } | |
347 | ||
348 | /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */ | |
144ceb27 | 349 | static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = { |
1d220435 LP |
350 | { |
351 | .name = "DESCRIPTOR_DONE_INTR", | |
352 | .tc = DPDMA_TC_INTR_DONE, | |
353 | .read = xilinx_dpdma_debugfs_desc_done_irq_read, | |
354 | .write = xilinx_dpdma_debugfs_desc_done_irq_write, | |
355 | }, | |
356 | }; | |
357 | ||
358 | static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf, | |
359 | size_t size, loff_t *pos) | |
360 | { | |
361 | enum xilinx_dpdma_testcases testcase; | |
362 | char *kern_buff; | |
363 | int ret = 0; | |
364 | ||
365 | if (*pos != 0 || size <= 0) | |
366 | return -EINVAL; | |
367 | ||
368 | kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL); | |
369 | if (!kern_buff) { | |
370 | dpdma_debugfs.testcase = DPDMA_TC_NONE; | |
371 | return -ENOMEM; | |
372 | } | |
373 | ||
374 | testcase = READ_ONCE(dpdma_debugfs.testcase); | |
375 | if (testcase != DPDMA_TC_NONE) { | |
376 | ret = dpdma_debugfs_reqs[testcase].read(kern_buff); | |
377 | if (ret < 0) | |
378 | goto done; | |
379 | } else { | |
380 | strlcpy(kern_buff, "No testcase executed", | |
381 | XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE); | |
382 | } | |
383 | ||
384 | size = min(size, strlen(kern_buff)); | |
385 | if (copy_to_user(buf, kern_buff, size)) | |
386 | ret = -EFAULT; | |
387 | ||
388 | done: | |
389 | kfree(kern_buff); | |
390 | if (ret) | |
391 | return ret; | |
392 | ||
393 | *pos = size + 1; | |
394 | return size; | |
395 | } | |
396 | ||
397 | static ssize_t xilinx_dpdma_debugfs_write(struct file *f, | |
398 | const char __user *buf, size_t size, | |
399 | loff_t *pos) | |
400 | { | |
401 | char *kern_buff, *kern_buff_start; | |
402 | char *testcase; | |
403 | unsigned int i; | |
404 | int ret; | |
405 | ||
406 | if (*pos != 0 || size <= 0) | |
407 | return -EINVAL; | |
408 | ||
409 | /* Supporting single instance of test as of now. */ | |
410 | if (dpdma_debugfs.testcase != DPDMA_TC_NONE) | |
411 | return -EBUSY; | |
412 | ||
413 | kern_buff = kzalloc(size, GFP_KERNEL); | |
414 | if (!kern_buff) | |
415 | return -ENOMEM; | |
416 | kern_buff_start = kern_buff; | |
417 | ||
418 | ret = strncpy_from_user(kern_buff, buf, size); | |
419 | if (ret < 0) | |
420 | goto done; | |
421 | ||
422 | /* Read the testcase name from a user request. */ | |
423 | testcase = strsep(&kern_buff, " "); | |
424 | ||
425 | for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) { | |
426 | if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name)) | |
427 | break; | |
428 | } | |
429 | ||
430 | if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) { | |
431 | ret = -EINVAL; | |
432 | goto done; | |
433 | } | |
434 | ||
435 | ret = dpdma_debugfs_reqs[i].write(kern_buff); | |
436 | if (ret < 0) | |
437 | goto done; | |
438 | ||
439 | ret = size; | |
440 | ||
441 | done: | |
442 | kfree(kern_buff_start); | |
443 | return ret; | |
444 | } | |
445 | ||
446 | static const struct file_operations fops_xilinx_dpdma_dbgfs = { | |
447 | .owner = THIS_MODULE, | |
448 | .read = xilinx_dpdma_debugfs_read, | |
449 | .write = xilinx_dpdma_debugfs_write, | |
450 | }; | |
451 | ||
452 | static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) | |
453 | { | |
454 | struct dentry *dent; | |
455 | ||
456 | dpdma_debugfs.testcase = DPDMA_TC_NONE; | |
457 | ||
458 | dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root, | |
459 | NULL, &fops_xilinx_dpdma_dbgfs); | |
460 | if (IS_ERR(dent)) | |
461 | dev_err(xdev->dev, "Failed to create debugfs testcase file\n"); | |
462 | } | |
463 | ||
464 | #else | |
465 | static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev) | |
466 | { | |
467 | } | |
468 | ||
469 | static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan) | |
470 | { | |
471 | } | |
472 | #endif /* CONFIG_DEBUG_FS */ | |
473 | ||
7cbb0c63 HK |
474 | /* ----------------------------------------------------------------------------- |
475 | * I/O Accessors | |
476 | */ | |
477 | ||
478 | static inline u32 dpdma_read(void __iomem *base, u32 offset) | |
479 | { | |
480 | return ioread32(base + offset); | |
481 | } | |
482 | ||
483 | static inline void dpdma_write(void __iomem *base, u32 offset, u32 val) | |
484 | { | |
485 | iowrite32(val, base + offset); | |
486 | } | |
487 | ||
488 | static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr) | |
489 | { | |
490 | dpdma_write(base, offset, dpdma_read(base, offset) & ~clr); | |
491 | } | |
492 | ||
493 | static inline void dpdma_set(void __iomem *base, u32 offset, u32 set) | |
494 | { | |
495 | dpdma_write(base, offset, dpdma_read(base, offset) | set); | |
496 | } | |
497 | ||
498 | /* ----------------------------------------------------------------------------- | |
499 | * Descriptor Operations | |
500 | */ | |
501 | ||
502 | /** | |
503 | * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor | |
ea55b6a3 | 504 | * @xdev: DPDMA device |
7cbb0c63 HK |
505 | * @sw_desc: The software descriptor in which to set DMA addresses |
506 | * @prev: The previous descriptor | |
507 | * @dma_addr: array of dma addresses | |
508 | * @num_src_addr: number of addresses in @dma_addr | |
509 | * | |
510 | * Set all the DMA addresses in the hardware descriptor corresponding to @dev | |
511 | * from @dma_addr. If a previous descriptor is specified in @prev, its next | |
512 | * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be | |
513 | * identical to @sw_desc for cyclic transfers. | |
514 | */ | |
515 | static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev, | |
516 | struct xilinx_dpdma_sw_desc *sw_desc, | |
517 | struct xilinx_dpdma_sw_desc *prev, | |
518 | dma_addr_t dma_addr[], | |
519 | unsigned int num_src_addr) | |
520 | { | |
521 | struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; | |
522 | unsigned int i; | |
523 | ||
524 | hw_desc->src_addr = lower_32_bits(dma_addr[0]); | |
525 | if (xdev->ext_addr) | |
526 | hw_desc->addr_ext |= | |
527 | FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK, | |
528 | upper_32_bits(dma_addr[0])); | |
529 | ||
530 | for (i = 1; i < num_src_addr; i++) { | |
531 | u32 *addr = &hw_desc->src_addr2; | |
532 | ||
533 | addr[i-1] = lower_32_bits(dma_addr[i]); | |
534 | ||
535 | if (xdev->ext_addr) { | |
536 | u32 *addr_ext = &hw_desc->addr_ext_23; | |
537 | u32 addr_msb; | |
538 | ||
539 | addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0); | |
540 | addr_msb <<= 16 * ((i - 1) % 2); | |
541 | addr_ext[(i - 1) / 2] |= addr_msb; | |
542 | } | |
543 | } | |
544 | ||
545 | if (!prev) | |
546 | return; | |
547 | ||
548 | prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr); | |
549 | if (xdev->ext_addr) | |
550 | prev->hw.addr_ext |= | |
551 | FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK, | |
552 | upper_32_bits(sw_desc->dma_addr)); | |
553 | } | |
554 | ||
555 | /** | |
556 | * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor | |
557 | * @chan: DPDMA channel | |
558 | * | |
559 | * Allocate a software descriptor from the channel's descriptor pool. | |
560 | * | |
561 | * Return: a software descriptor or NULL. | |
562 | */ | |
563 | static struct xilinx_dpdma_sw_desc * | |
564 | xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan) | |
565 | { | |
566 | struct xilinx_dpdma_sw_desc *sw_desc; | |
567 | dma_addr_t dma_addr; | |
568 | ||
569 | sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr); | |
570 | if (!sw_desc) | |
571 | return NULL; | |
572 | ||
573 | sw_desc->dma_addr = dma_addr; | |
574 | ||
575 | return sw_desc; | |
576 | } | |
577 | ||
578 | /** | |
579 | * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor | |
580 | * @chan: DPDMA channel | |
581 | * @sw_desc: software descriptor to free | |
582 | * | |
583 | * Free a software descriptor from the channel's descriptor pool. | |
584 | */ | |
585 | static void | |
586 | xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan, | |
587 | struct xilinx_dpdma_sw_desc *sw_desc) | |
588 | { | |
589 | dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr); | |
590 | } | |
591 | ||
592 | /** | |
593 | * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor | |
594 | * @chan: DPDMA channel | |
595 | * @tx_desc: tx descriptor to dump | |
596 | * | |
597 | * Dump contents of a tx descriptor | |
598 | */ | |
599 | static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan, | |
600 | struct xilinx_dpdma_tx_desc *tx_desc) | |
601 | { | |
602 | struct xilinx_dpdma_sw_desc *sw_desc; | |
603 | struct device *dev = chan->xdev->dev; | |
604 | unsigned int i = 0; | |
605 | ||
606 | dev_dbg(dev, "------- TX descriptor dump start -------\n"); | |
607 | dev_dbg(dev, "------- channel ID = %d -------\n", chan->id); | |
608 | ||
609 | list_for_each_entry(sw_desc, &tx_desc->descriptors, node) { | |
610 | struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw; | |
611 | ||
612 | dev_dbg(dev, "------- HW descriptor %d -------\n", i++); | |
613 | dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr); | |
614 | dev_dbg(dev, "control: 0x%08x\n", hw_desc->control); | |
615 | dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id); | |
616 | dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size); | |
617 | dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride); | |
618 | dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb); | |
619 | dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb); | |
620 | dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext); | |
621 | dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc); | |
622 | dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr); | |
623 | dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23); | |
624 | dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45); | |
625 | dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2); | |
626 | dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3); | |
627 | dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4); | |
628 | dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5); | |
629 | dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc); | |
630 | } | |
631 | ||
632 | dev_dbg(dev, "------- TX descriptor dump end -------\n"); | |
633 | } | |
634 | ||
635 | /** | |
636 | * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor | |
637 | * @chan: DPDMA channel | |
638 | * | |
639 | * Allocate a tx descriptor. | |
640 | * | |
641 | * Return: a tx descriptor or NULL. | |
642 | */ | |
643 | static struct xilinx_dpdma_tx_desc * | |
644 | xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan) | |
645 | { | |
646 | struct xilinx_dpdma_tx_desc *tx_desc; | |
647 | ||
648 | tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT); | |
649 | if (!tx_desc) | |
650 | return NULL; | |
651 | ||
652 | INIT_LIST_HEAD(&tx_desc->descriptors); | |
653 | tx_desc->chan = chan; | |
654 | tx_desc->error = false; | |
655 | ||
656 | return tx_desc; | |
657 | } | |
658 | ||
659 | /** | |
660 | * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor | |
661 | * @vdesc: virtual DMA descriptor | |
662 | * | |
663 | * Free the virtual DMA descriptor @vdesc including its software descriptors. | |
664 | */ | |
665 | static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc) | |
666 | { | |
667 | struct xilinx_dpdma_sw_desc *sw_desc, *next; | |
668 | struct xilinx_dpdma_tx_desc *desc; | |
669 | ||
670 | if (!vdesc) | |
671 | return; | |
672 | ||
673 | desc = to_dpdma_tx_desc(vdesc); | |
674 | ||
675 | list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) { | |
676 | list_del(&sw_desc->node); | |
677 | xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc); | |
678 | } | |
679 | ||
680 | kfree(desc); | |
681 | } | |
682 | ||
683 | /** | |
684 | * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma | |
685 | * descriptor | |
686 | * @chan: DPDMA channel | |
687 | * @xt: dma interleaved template | |
688 | * | |
689 | * Prepare a tx descriptor including internal software/hardware descriptors | |
690 | * based on @xt. | |
691 | * | |
692 | * Return: A DPDMA TX descriptor on success, or NULL. | |
693 | */ | |
694 | static struct xilinx_dpdma_tx_desc * | |
695 | xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan, | |
696 | struct dma_interleaved_template *xt) | |
697 | { | |
698 | struct xilinx_dpdma_tx_desc *tx_desc; | |
699 | struct xilinx_dpdma_sw_desc *sw_desc; | |
700 | struct xilinx_dpdma_hw_desc *hw_desc; | |
701 | size_t hsize = xt->sgl[0].size; | |
702 | size_t stride = hsize + xt->sgl[0].icg; | |
703 | ||
704 | if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) { | |
705 | dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n", | |
706 | XILINX_DPDMA_ALIGN_BYTES); | |
707 | return NULL; | |
708 | } | |
709 | ||
710 | tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan); | |
711 | if (!tx_desc) | |
712 | return NULL; | |
713 | ||
714 | sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan); | |
715 | if (!sw_desc) { | |
716 | xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc); | |
717 | return NULL; | |
718 | } | |
719 | ||
720 | xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc, | |
721 | &xt->src_start, 1); | |
722 | ||
723 | hw_desc = &sw_desc->hw; | |
724 | hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8); | |
725 | hw_desc->xfer_size = hsize * xt->numf; | |
726 | hw_desc->hsize_stride = | |
727 | FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) | | |
728 | FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK, | |
729 | stride / 16); | |
730 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE; | |
731 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR; | |
732 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE; | |
733 | hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME; | |
734 | ||
735 | list_add_tail(&sw_desc->node, &tx_desc->descriptors); | |
736 | ||
737 | return tx_desc; | |
738 | } | |
739 | ||
740 | /* ----------------------------------------------------------------------------- | |
741 | * DPDMA Channel Operations | |
742 | */ | |
743 | ||
744 | /** | |
745 | * xilinx_dpdma_chan_enable - Enable the channel | |
746 | * @chan: DPDMA channel | |
747 | * | |
748 | * Enable the channel and its interrupts. Set the QoS values for video class. | |
749 | */ | |
750 | static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan) | |
751 | { | |
752 | u32 reg; | |
753 | ||
754 | reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id) | |
755 | | XILINX_DPDMA_INTR_GLOBAL_MASK; | |
756 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); | |
757 | reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id) | |
758 | | XILINX_DPDMA_INTR_GLOBAL_ERR; | |
759 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); | |
760 | ||
761 | reg = XILINX_DPDMA_CH_CNTL_ENABLE | |
762 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK, | |
763 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) | |
764 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK, | |
765 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS) | |
766 | | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK, | |
767 | XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS); | |
768 | dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg); | |
769 | } | |
770 | ||
771 | /** | |
772 | * xilinx_dpdma_chan_disable - Disable the channel | |
773 | * @chan: DPDMA channel | |
774 | * | |
775 | * Disable the channel and its interrupts. | |
776 | */ | |
777 | static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan) | |
778 | { | |
779 | u32 reg; | |
780 | ||
781 | reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id; | |
782 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg); | |
783 | reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id; | |
784 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg); | |
785 | ||
786 | dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE); | |
787 | } | |
788 | ||
789 | /** | |
790 | * xilinx_dpdma_chan_pause - Pause the channel | |
791 | * @chan: DPDMA channel | |
792 | * | |
793 | * Pause the channel. | |
794 | */ | |
795 | static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan) | |
796 | { | |
797 | dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); | |
798 | } | |
799 | ||
800 | /** | |
801 | * xilinx_dpdma_chan_unpause - Unpause the channel | |
802 | * @chan: DPDMA channel | |
803 | * | |
804 | * Unpause the channel. | |
805 | */ | |
806 | static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan) | |
807 | { | |
808 | dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE); | |
809 | } | |
810 | ||
811 | static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan) | |
812 | { | |
813 | struct xilinx_dpdma_device *xdev = chan->xdev; | |
814 | u32 channels = 0; | |
815 | unsigned int i; | |
816 | ||
817 | for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { | |
818 | if (xdev->chan[i]->video_group && !xdev->chan[i]->running) | |
819 | return 0; | |
820 | ||
821 | if (xdev->chan[i]->video_group) | |
822 | channels |= BIT(i); | |
823 | } | |
824 | ||
825 | return channels; | |
826 | } | |
827 | ||
828 | /** | |
829 | * xilinx_dpdma_chan_queue_transfer - Queue the next transfer | |
830 | * @chan: DPDMA channel | |
831 | * | |
832 | * Queue the next descriptor, if any, to the hardware. If the channel is | |
833 | * stopped, start it first. Otherwise retrigger it with the next descriptor. | |
834 | */ | |
835 | static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan) | |
836 | { | |
837 | struct xilinx_dpdma_device *xdev = chan->xdev; | |
838 | struct xilinx_dpdma_sw_desc *sw_desc; | |
839 | struct xilinx_dpdma_tx_desc *desc; | |
840 | struct virt_dma_desc *vdesc; | |
841 | u32 reg, channels; | |
842 | ||
843 | lockdep_assert_held(&chan->lock); | |
844 | ||
845 | if (chan->desc.pending) | |
846 | return; | |
847 | ||
848 | if (!chan->running) { | |
849 | xilinx_dpdma_chan_unpause(chan); | |
850 | xilinx_dpdma_chan_enable(chan); | |
851 | chan->first_frame = true; | |
852 | chan->running = true; | |
853 | } | |
854 | ||
855 | if (chan->video_group) | |
856 | channels = xilinx_dpdma_chan_video_group_ready(chan); | |
857 | else | |
858 | channels = BIT(chan->id); | |
859 | ||
860 | if (!channels) | |
861 | return; | |
862 | ||
863 | vdesc = vchan_next_desc(&chan->vchan); | |
864 | if (!vdesc) | |
865 | return; | |
866 | ||
867 | desc = to_dpdma_tx_desc(vdesc); | |
868 | chan->desc.pending = desc; | |
869 | list_del(&desc->vdesc.node); | |
870 | ||
871 | /* | |
872 | * Assign the cookie to descriptors in this transaction. Only 16 bit | |
873 | * will be used, but it should be enough. | |
874 | */ | |
875 | list_for_each_entry(sw_desc, &desc->descriptors, node) | |
876 | sw_desc->hw.desc_id = desc->vdesc.tx.cookie; | |
877 | ||
878 | sw_desc = list_first_entry(&desc->descriptors, | |
879 | struct xilinx_dpdma_sw_desc, node); | |
880 | dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR, | |
881 | lower_32_bits(sw_desc->dma_addr)); | |
882 | if (xdev->ext_addr) | |
883 | dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE, | |
884 | FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK, | |
885 | upper_32_bits(sw_desc->dma_addr))); | |
886 | ||
887 | if (chan->first_frame) | |
888 | reg = XILINX_DPDMA_GBL_TRIG_MASK(channels); | |
889 | else | |
890 | reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels); | |
891 | ||
892 | chan->first_frame = false; | |
893 | ||
894 | dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg); | |
895 | } | |
896 | ||
897 | /** | |
898 | * xilinx_dpdma_chan_ostand - Number of outstanding transactions | |
899 | * @chan: DPDMA channel | |
900 | * | |
901 | * Read and return the number of outstanding transactions from register. | |
902 | * | |
903 | * Return: Number of outstanding transactions from the status register. | |
904 | */ | |
905 | static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan) | |
906 | { | |
907 | return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK, | |
908 | dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS)); | |
909 | } | |
910 | ||
911 | /** | |
912 | * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event | |
913 | * @chan: DPDMA channel | |
914 | * | |
915 | * Notify waiters for no outstanding event, so waiters can stop the channel | |
916 | * safely. This function is supposed to be called when 'no outstanding' | |
917 | * interrupt is generated. The 'no outstanding' interrupt is disabled and | |
918 | * should be re-enabled when this event is handled. If the channel status | |
919 | * register still shows some number of outstanding transactions, the interrupt | |
920 | * remains enabled. | |
921 | * | |
922 | * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding | |
923 | * transaction(s). | |
924 | */ | |
925 | static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan) | |
926 | { | |
927 | u32 cnt; | |
928 | ||
929 | cnt = xilinx_dpdma_chan_ostand(chan); | |
930 | if (cnt) { | |
931 | dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt); | |
932 | return -EWOULDBLOCK; | |
933 | } | |
934 | ||
935 | /* Disable 'no outstanding' interrupt */ | |
936 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS, | |
937 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); | |
938 | wake_up(&chan->wait_to_stop); | |
939 | ||
940 | return 0; | |
941 | } | |
942 | ||
943 | /** | |
944 | * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq | |
945 | * @chan: DPDMA channel | |
946 | * | |
947 | * Wait for the no outstanding transaction interrupt. This functions can sleep | |
948 | * for 50ms. | |
949 | * | |
950 | * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code | |
951 | * from wait_event_interruptible_timeout(). | |
952 | */ | |
953 | static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan) | |
954 | { | |
955 | int ret; | |
956 | ||
957 | /* Wait for a no outstanding transaction interrupt upto 50msec */ | |
958 | ret = wait_event_interruptible_timeout(chan->wait_to_stop, | |
959 | !xilinx_dpdma_chan_ostand(chan), | |
960 | msecs_to_jiffies(50)); | |
961 | if (ret > 0) { | |
962 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, | |
963 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); | |
964 | return 0; | |
965 | } | |
966 | ||
967 | dev_err(chan->xdev->dev, "not ready to stop: %d trans\n", | |
968 | xilinx_dpdma_chan_ostand(chan)); | |
969 | ||
970 | if (ret == 0) | |
971 | return -ETIMEDOUT; | |
972 | ||
973 | return ret; | |
974 | } | |
975 | ||
976 | /** | |
977 | * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status | |
978 | * @chan: DPDMA channel | |
979 | * | |
980 | * Poll the outstanding transaction status, and return when there's no | |
981 | * outstanding transaction. This functions can be used in the interrupt context | |
982 | * or where the atomicity is required. Calling thread may wait more than 50ms. | |
983 | * | |
984 | * Return: 0 on success, or -ETIMEDOUT. | |
985 | */ | |
986 | static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan) | |
987 | { | |
988 | u32 cnt, loop = 50000; | |
989 | ||
990 | /* Poll at least for 50ms (20 fps). */ | |
991 | do { | |
992 | cnt = xilinx_dpdma_chan_ostand(chan); | |
993 | udelay(1); | |
994 | } while (loop-- > 0 && cnt); | |
995 | ||
996 | if (loop) { | |
997 | dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, | |
998 | XILINX_DPDMA_INTR_NO_OSTAND(chan->id)); | |
999 | return 0; | |
1000 | } | |
1001 | ||
1002 | dev_err(chan->xdev->dev, "not ready to stop: %d trans\n", | |
1003 | xilinx_dpdma_chan_ostand(chan)); | |
1004 | ||
1005 | return -ETIMEDOUT; | |
1006 | } | |
1007 | ||
1008 | /** | |
1009 | * xilinx_dpdma_chan_stop - Stop the channel | |
1010 | * @chan: DPDMA channel | |
1011 | * | |
1012 | * Stop a previously paused channel by first waiting for completion of all | |
1013 | * outstanding transaction and then disabling the channel. | |
1014 | * | |
1015 | * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. | |
1016 | */ | |
1017 | static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) | |
1018 | { | |
1019 | unsigned long flags; | |
1020 | int ret; | |
1021 | ||
1022 | ret = xilinx_dpdma_chan_wait_no_ostand(chan); | |
1023 | if (ret) | |
1024 | return ret; | |
1025 | ||
1026 | spin_lock_irqsave(&chan->lock, flags); | |
1027 | xilinx_dpdma_chan_disable(chan); | |
1028 | chan->running = false; | |
1029 | spin_unlock_irqrestore(&chan->lock, flags); | |
1030 | ||
1031 | return 0; | |
1032 | } | |
1033 | ||
1034 | /** | |
1035 | * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion | |
1036 | * @chan: DPDMA channel | |
1037 | * | |
1038 | * Handle completion of the currently active descriptor (@chan->desc.active). As | |
1039 | * we currently support cyclic transfers only, this just invokes the cyclic | |
1040 | * callback. The descriptor will be completed at the VSYNC interrupt when a new | |
1041 | * descriptor replaces it. | |
1042 | */ | |
1043 | static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) | |
1044 | { | |
1045 | struct xilinx_dpdma_tx_desc *active = chan->desc.active; | |
1046 | unsigned long flags; | |
1047 | ||
1048 | spin_lock_irqsave(&chan->lock, flags); | |
1049 | ||
1d220435 LP |
1050 | xilinx_dpdma_debugfs_desc_done_irq(chan); |
1051 | ||
7cbb0c63 HK |
1052 | if (active) |
1053 | vchan_cyclic_callback(&active->vdesc); | |
1054 | else | |
1055 | dev_warn(chan->xdev->dev, | |
1056 | "DONE IRQ with no active descriptor!\n"); | |
1057 | ||
1058 | spin_unlock_irqrestore(&chan->lock, flags); | |
1059 | } | |
1060 | ||
1061 | /** | |
1062 | * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling | |
1063 | * @chan: DPDMA channel | |
1064 | * | |
1065 | * At VSYNC the active descriptor may have been replaced by the pending | |
1066 | * descriptor. Detect this through the DESC_ID and perform appropriate | |
1067 | * bookkeeping. | |
1068 | */ | |
1069 | static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) | |
1070 | { | |
1071 | struct xilinx_dpdma_tx_desc *pending; | |
1072 | struct xilinx_dpdma_sw_desc *sw_desc; | |
1073 | unsigned long flags; | |
1074 | u32 desc_id; | |
1075 | ||
1076 | spin_lock_irqsave(&chan->lock, flags); | |
1077 | ||
1078 | pending = chan->desc.pending; | |
1079 | if (!chan->running || !pending) | |
1080 | goto out; | |
1081 | ||
1082 | desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID); | |
1083 | ||
1084 | /* If the retrigger raced with vsync, retry at the next frame. */ | |
1085 | sw_desc = list_first_entry(&pending->descriptors, | |
1086 | struct xilinx_dpdma_sw_desc, node); | |
1087 | if (sw_desc->hw.desc_id != desc_id) | |
1088 | goto out; | |
1089 | ||
1090 | /* | |
1091 | * Complete the active descriptor, if any, promote the pending | |
1092 | * descriptor to active, and queue the next transfer, if any. | |
1093 | */ | |
1094 | if (chan->desc.active) | |
1095 | vchan_cookie_complete(&chan->desc.active->vdesc); | |
1096 | chan->desc.active = pending; | |
1097 | chan->desc.pending = NULL; | |
1098 | ||
1099 | xilinx_dpdma_chan_queue_transfer(chan); | |
1100 | ||
1101 | out: | |
1102 | spin_unlock_irqrestore(&chan->lock, flags); | |
1103 | } | |
1104 | ||
1105 | /** | |
1106 | * xilinx_dpdma_chan_err - Detect any channel error | |
1107 | * @chan: DPDMA channel | |
1108 | * @isr: masked Interrupt Status Register | |
1109 | * @eisr: Error Interrupt Status Register | |
1110 | * | |
1111 | * Return: true if any channel error occurs, or false otherwise. | |
1112 | */ | |
1113 | static bool | |
1114 | xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr) | |
1115 | { | |
1116 | if (!chan) | |
1117 | return false; | |
1118 | ||
1119 | if (chan->running && | |
1120 | ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) || | |
1121 | (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)))) | |
1122 | return true; | |
1123 | ||
1124 | return false; | |
1125 | } | |
1126 | ||
1127 | /** | |
1128 | * xilinx_dpdma_chan_handle_err - DPDMA channel error handling | |
1129 | * @chan: DPDMA channel | |
1130 | * | |
1131 | * This function is called when any channel error or any global error occurs. | |
1132 | * The function disables the paused channel by errors and determines | |
1133 | * if the current active descriptor can be rescheduled depending on | |
1134 | * the descriptor status. | |
1135 | */ | |
1136 | static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan) | |
1137 | { | |
1138 | struct xilinx_dpdma_device *xdev = chan->xdev; | |
1139 | struct xilinx_dpdma_tx_desc *active; | |
1140 | unsigned long flags; | |
1141 | ||
1142 | spin_lock_irqsave(&chan->lock, flags); | |
1143 | ||
1144 | dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n", | |
1145 | dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE), | |
1146 | dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR)); | |
1147 | dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n", | |
1148 | dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE), | |
1149 | dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR)); | |
1150 | ||
1151 | xilinx_dpdma_chan_disable(chan); | |
1152 | chan->running = false; | |
1153 | ||
1154 | if (!chan->desc.active) | |
1155 | goto out_unlock; | |
1156 | ||
1157 | active = chan->desc.active; | |
1158 | chan->desc.active = NULL; | |
1159 | ||
1160 | xilinx_dpdma_chan_dump_tx_desc(chan, active); | |
1161 | ||
1162 | if (active->error) | |
1163 | dev_dbg(xdev->dev, "repeated error on desc\n"); | |
1164 | ||
1165 | /* Reschedule if there's no new descriptor */ | |
1166 | if (!chan->desc.pending && | |
1167 | list_empty(&chan->vchan.desc_issued)) { | |
1168 | active->error = true; | |
1169 | list_add_tail(&active->vdesc.node, | |
1170 | &chan->vchan.desc_issued); | |
1171 | } else { | |
1172 | xilinx_dpdma_chan_free_tx_desc(&active->vdesc); | |
1173 | } | |
1174 | ||
1175 | out_unlock: | |
1176 | spin_unlock_irqrestore(&chan->lock, flags); | |
1177 | } | |
1178 | ||
1179 | /* ----------------------------------------------------------------------------- | |
1180 | * DMA Engine Operations | |
1181 | */ | |
1182 | ||
1183 | static struct dma_async_tx_descriptor * | |
1184 | xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan, | |
1185 | struct dma_interleaved_template *xt, | |
1186 | unsigned long flags) | |
1187 | { | |
1188 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1189 | struct xilinx_dpdma_tx_desc *desc; | |
1190 | ||
1191 | if (xt->dir != DMA_MEM_TO_DEV) | |
1192 | return NULL; | |
1193 | ||
1194 | if (!xt->numf || !xt->sgl[0].size) | |
1195 | return NULL; | |
1196 | ||
1197 | if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT)) | |
1198 | return NULL; | |
1199 | ||
1200 | desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt); | |
1201 | if (!desc) | |
1202 | return NULL; | |
1203 | ||
1204 | vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK); | |
1205 | ||
1206 | return &desc->vdesc.tx; | |
1207 | } | |
1208 | ||
1209 | /** | |
1210 | * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel | |
1211 | * @dchan: DMA channel | |
1212 | * | |
1213 | * Allocate a descriptor pool for the channel. | |
1214 | * | |
1215 | * Return: 0 on success, or -ENOMEM if failed to allocate a pool. | |
1216 | */ | |
1217 | static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan) | |
1218 | { | |
1219 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1220 | size_t align = __alignof__(struct xilinx_dpdma_sw_desc); | |
1221 | ||
1222 | chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev), | |
1223 | chan->xdev->dev, | |
1224 | sizeof(struct xilinx_dpdma_sw_desc), | |
1225 | align, 0); | |
1226 | if (!chan->desc_pool) { | |
1227 | dev_err(chan->xdev->dev, | |
1228 | "failed to allocate a descriptor pool\n"); | |
1229 | return -ENOMEM; | |
1230 | } | |
1231 | ||
1232 | return 0; | |
1233 | } | |
1234 | ||
1235 | /** | |
1236 | * xilinx_dpdma_free_chan_resources - Free all resources for the channel | |
1237 | * @dchan: DMA channel | |
1238 | * | |
1239 | * Free resources associated with the virtual DMA channel, and destroy the | |
1240 | * descriptor pool. | |
1241 | */ | |
1242 | static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan) | |
1243 | { | |
1244 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1245 | ||
1246 | vchan_free_chan_resources(&chan->vchan); | |
1247 | ||
1248 | dma_pool_destroy(chan->desc_pool); | |
1249 | chan->desc_pool = NULL; | |
1250 | } | |
1251 | ||
1252 | static void xilinx_dpdma_issue_pending(struct dma_chan *dchan) | |
1253 | { | |
1254 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1255 | unsigned long flags; | |
1256 | ||
1257 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1258 | if (vchan_issue_pending(&chan->vchan)) | |
1259 | xilinx_dpdma_chan_queue_transfer(chan); | |
1260 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1261 | } | |
1262 | ||
1263 | static int xilinx_dpdma_config(struct dma_chan *dchan, | |
1264 | struct dma_slave_config *config) | |
1265 | { | |
1266 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1267 | unsigned long flags; | |
1268 | ||
1269 | /* | |
1270 | * The destination address doesn't need to be specified as the DPDMA is | |
1271 | * hardwired to the destination (the DP controller). The transfer | |
1272 | * width, burst size and port window size are thus meaningless, they're | |
1273 | * fixed both on the DPDMA side and on the DP controller side. | |
1274 | */ | |
1275 | ||
1276 | spin_lock_irqsave(&chan->lock, flags); | |
1277 | ||
1278 | /* | |
1279 | * Abuse the slave_id to indicate that the channel is part of a video | |
1280 | * group. | |
1281 | */ | |
1c1df908 | 1282 | if (chan->id <= ZYNQMP_DPDMA_VIDEO2) |
7cbb0c63 HK |
1283 | chan->video_group = config->slave_id != 0; |
1284 | ||
1285 | spin_unlock_irqrestore(&chan->lock, flags); | |
1286 | ||
1287 | return 0; | |
1288 | } | |
1289 | ||
1290 | static int xilinx_dpdma_pause(struct dma_chan *dchan) | |
1291 | { | |
1292 | xilinx_dpdma_chan_pause(to_xilinx_chan(dchan)); | |
1293 | ||
1294 | return 0; | |
1295 | } | |
1296 | ||
1297 | static int xilinx_dpdma_resume(struct dma_chan *dchan) | |
1298 | { | |
1299 | xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan)); | |
1300 | ||
1301 | return 0; | |
1302 | } | |
1303 | ||
1304 | /** | |
1305 | * xilinx_dpdma_terminate_all - Terminate the channel and descriptors | |
1306 | * @dchan: DMA channel | |
1307 | * | |
1308 | * Pause the channel without waiting for ongoing transfers to complete. Waiting | |
1309 | * for completion is performed by xilinx_dpdma_synchronize() that will disable | |
1310 | * the channel to complete the stop. | |
1311 | * | |
1312 | * All the descriptors associated with the channel that are guaranteed not to | |
1313 | * be touched by the hardware. The pending and active descriptor are not | |
1314 | * touched, and will be freed either upon completion, or by | |
1315 | * xilinx_dpdma_synchronize(). | |
1316 | * | |
1317 | * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop. | |
1318 | */ | |
1319 | static int xilinx_dpdma_terminate_all(struct dma_chan *dchan) | |
1320 | { | |
1321 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1322 | struct xilinx_dpdma_device *xdev = chan->xdev; | |
1323 | LIST_HEAD(descriptors); | |
1324 | unsigned long flags; | |
1325 | unsigned int i; | |
1326 | ||
1327 | /* Pause the channel (including the whole video group if applicable). */ | |
1328 | if (chan->video_group) { | |
1329 | for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) { | |
1330 | if (xdev->chan[i]->video_group && | |
1331 | xdev->chan[i]->running) { | |
1332 | xilinx_dpdma_chan_pause(xdev->chan[i]); | |
1333 | xdev->chan[i]->video_group = false; | |
1334 | } | |
1335 | } | |
1336 | } else { | |
1337 | xilinx_dpdma_chan_pause(chan); | |
1338 | } | |
1339 | ||
1340 | /* Gather all the descriptors we can free and free them. */ | |
1341 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1342 | vchan_get_all_descriptors(&chan->vchan, &descriptors); | |
1343 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1344 | ||
1345 | vchan_dma_desc_free_list(&chan->vchan, &descriptors); | |
1346 | ||
1347 | return 0; | |
1348 | } | |
1349 | ||
1350 | /** | |
1351 | * xilinx_dpdma_synchronize - Synchronize callback execution | |
1352 | * @dchan: DMA channel | |
1353 | * | |
1354 | * Synchronizing callback execution ensures that all previously issued | |
1355 | * transfers have completed and all associated callbacks have been called and | |
1356 | * have returned. | |
1357 | * | |
1358 | * This function waits for the DMA channel to stop. It assumes it has been | |
1359 | * paused by a previous call to dmaengine_terminate_async(), and that no new | |
1360 | * pending descriptors have been issued with dma_async_issue_pending(). The | |
1361 | * behaviour is undefined otherwise. | |
1362 | */ | |
1363 | static void xilinx_dpdma_synchronize(struct dma_chan *dchan) | |
1364 | { | |
1365 | struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan); | |
1366 | unsigned long flags; | |
1367 | ||
1368 | xilinx_dpdma_chan_stop(chan); | |
1369 | ||
1370 | spin_lock_irqsave(&chan->vchan.lock, flags); | |
1371 | if (chan->desc.pending) { | |
1372 | vchan_terminate_vdesc(&chan->desc.pending->vdesc); | |
1373 | chan->desc.pending = NULL; | |
1374 | } | |
1375 | if (chan->desc.active) { | |
1376 | vchan_terminate_vdesc(&chan->desc.active->vdesc); | |
1377 | chan->desc.active = NULL; | |
1378 | } | |
1379 | spin_unlock_irqrestore(&chan->vchan.lock, flags); | |
1380 | ||
1381 | vchan_synchronize(&chan->vchan); | |
1382 | } | |
1383 | ||
1384 | /* ----------------------------------------------------------------------------- | |
1385 | * Interrupt and Tasklet Handling | |
1386 | */ | |
1387 | ||
1388 | /** | |
1389 | * xilinx_dpdma_err - Detect any global error | |
1390 | * @isr: Interrupt Status Register | |
1391 | * @eisr: Error Interrupt Status Register | |
1392 | * | |
1393 | * Return: True if any global error occurs, or false otherwise. | |
1394 | */ | |
1395 | static bool xilinx_dpdma_err(u32 isr, u32 eisr) | |
1396 | { | |
1397 | if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR || | |
1398 | eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR) | |
1399 | return true; | |
1400 | ||
1401 | return false; | |
1402 | } | |
1403 | ||
1404 | /** | |
1405 | * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt | |
1406 | * @xdev: DPDMA device | |
1407 | * @isr: masked Interrupt Status Register | |
1408 | * @eisr: Error Interrupt Status Register | |
1409 | * | |
1410 | * Handle if any error occurs based on @isr and @eisr. This function disables | |
1411 | * corresponding error interrupts, and those should be re-enabled once handling | |
1412 | * is done. | |
1413 | */ | |
1414 | static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev, | |
1415 | u32 isr, u32 eisr) | |
1416 | { | |
1417 | bool err = xilinx_dpdma_err(isr, eisr); | |
1418 | unsigned int i; | |
1419 | ||
1420 | dev_dbg_ratelimited(xdev->dev, | |
1421 | "error irq: isr = 0x%08x, eisr = 0x%08x\n", | |
1422 | isr, eisr); | |
1423 | ||
1424 | /* Disable channel error interrupts until errors are handled. */ | |
1425 | dpdma_write(xdev->reg, XILINX_DPDMA_IDS, | |
1426 | isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR); | |
1427 | dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, | |
1428 | eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR); | |
1429 | ||
1430 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) | |
1431 | if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr)) | |
1432 | tasklet_schedule(&xdev->chan[i]->err_task); | |
1433 | } | |
1434 | ||
1435 | /** | |
1436 | * xilinx_dpdma_enable_irq - Enable interrupts | |
1437 | * @xdev: DPDMA device | |
1438 | * | |
1439 | * Enable interrupts. | |
1440 | */ | |
1441 | static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev) | |
1442 | { | |
1443 | dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL); | |
1444 | dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL); | |
1445 | } | |
1446 | ||
1447 | /** | |
1448 | * xilinx_dpdma_disable_irq - Disable interrupts | |
1449 | * @xdev: DPDMA device | |
1450 | * | |
1451 | * Disable interrupts. | |
1452 | */ | |
1453 | static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev) | |
1454 | { | |
1455 | dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ERR_ALL); | |
1456 | dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL); | |
1457 | } | |
1458 | ||
1459 | /** | |
1460 | * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling | |
1461 | * @data: tasklet data to be casted to DPDMA channel structure | |
1462 | * | |
1463 | * Per channel error handling tasklet. This function waits for the outstanding | |
1464 | * transaction to complete and triggers error handling. After error handling, | |
1465 | * re-enable channel error interrupts, and restart the channel if needed. | |
1466 | */ | |
1467 | static void xilinx_dpdma_chan_err_task(unsigned long data) | |
1468 | { | |
1469 | struct xilinx_dpdma_chan *chan = (struct xilinx_dpdma_chan *)data; | |
1470 | struct xilinx_dpdma_device *xdev = chan->xdev; | |
1471 | unsigned long flags; | |
1472 | ||
1473 | /* Proceed error handling even when polling fails. */ | |
1474 | xilinx_dpdma_chan_poll_no_ostand(chan); | |
1475 | ||
1476 | xilinx_dpdma_chan_handle_err(chan); | |
1477 | ||
1478 | dpdma_write(xdev->reg, XILINX_DPDMA_IEN, | |
1479 | XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id); | |
1480 | dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, | |
1481 | XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id); | |
1482 | ||
1483 | spin_lock_irqsave(&chan->lock, flags); | |
1484 | xilinx_dpdma_chan_queue_transfer(chan); | |
1485 | spin_unlock_irqrestore(&chan->lock, flags); | |
1486 | } | |
1487 | ||
1488 | static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data) | |
1489 | { | |
1490 | struct xilinx_dpdma_device *xdev = data; | |
1491 | unsigned long mask; | |
1492 | unsigned int i; | |
1493 | u32 status; | |
1494 | u32 error; | |
1495 | ||
1496 | status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR); | |
1497 | error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR); | |
1498 | if (!status && !error) | |
1499 | return IRQ_NONE; | |
1500 | ||
1501 | dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status); | |
1502 | dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error); | |
1503 | ||
1504 | if (status & XILINX_DPDMA_INTR_VSYNC) { | |
1505 | /* | |
1506 | * There's a single VSYNC interrupt that needs to be processed | |
1507 | * by each running channel to update the active descriptor. | |
1508 | */ | |
1509 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) { | |
1510 | struct xilinx_dpdma_chan *chan = xdev->chan[i]; | |
1511 | ||
1512 | if (chan) | |
1513 | xilinx_dpdma_chan_vsync_irq(chan); | |
1514 | } | |
1515 | } | |
1516 | ||
1517 | mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status); | |
1518 | if (mask) { | |
1519 | for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) | |
1520 | xilinx_dpdma_chan_done_irq(xdev->chan[i]); | |
1521 | } | |
1522 | ||
1523 | mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status); | |
1524 | if (mask) { | |
1525 | for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan)) | |
1526 | xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]); | |
1527 | } | |
1528 | ||
1529 | mask = status & XILINX_DPDMA_INTR_ERR_ALL; | |
1530 | if (mask || error) | |
1531 | xilinx_dpdma_handle_err_irq(xdev, mask, error); | |
1532 | ||
1533 | return IRQ_HANDLED; | |
1534 | } | |
1535 | ||
1536 | /* ----------------------------------------------------------------------------- | |
1537 | * Initialization & Cleanup | |
1538 | */ | |
1539 | ||
1540 | static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev, | |
1541 | unsigned int chan_id) | |
1542 | { | |
1543 | struct xilinx_dpdma_chan *chan; | |
1544 | ||
1545 | chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); | |
1546 | if (!chan) | |
1547 | return -ENOMEM; | |
1548 | ||
1549 | chan->id = chan_id; | |
1550 | chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE | |
1551 | + XILINX_DPDMA_CH_OFFSET * chan->id; | |
1552 | chan->running = false; | |
1553 | chan->xdev = xdev; | |
1554 | ||
1555 | spin_lock_init(&chan->lock); | |
1556 | init_waitqueue_head(&chan->wait_to_stop); | |
1557 | ||
1558 | tasklet_init(&chan->err_task, xilinx_dpdma_chan_err_task, | |
1559 | (unsigned long)chan); | |
1560 | ||
1561 | chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc; | |
1562 | vchan_init(&chan->vchan, &xdev->common); | |
1563 | ||
1564 | xdev->chan[chan->id] = chan; | |
1565 | ||
1566 | return 0; | |
1567 | } | |
1568 | ||
1569 | static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan) | |
1570 | { | |
1571 | if (!chan) | |
1572 | return; | |
1573 | ||
1574 | tasklet_kill(&chan->err_task); | |
1575 | list_del(&chan->vchan.chan.device_node); | |
1576 | } | |
1577 | ||
1578 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | |
1579 | struct of_dma *ofdma) | |
1580 | { | |
1581 | struct xilinx_dpdma_device *xdev = ofdma->of_dma_data; | |
1582 | uint32_t chan_id = dma_spec->args[0]; | |
1583 | ||
1584 | if (chan_id >= ARRAY_SIZE(xdev->chan)) | |
1585 | return NULL; | |
1586 | ||
1587 | if (!xdev->chan[chan_id]) | |
1588 | return NULL; | |
1589 | ||
1590 | return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan); | |
1591 | } | |
1592 | ||
1593 | static int xilinx_dpdma_probe(struct platform_device *pdev) | |
1594 | { | |
1595 | struct xilinx_dpdma_device *xdev; | |
1596 | struct dma_device *ddev; | |
1597 | unsigned int i; | |
1598 | int ret; | |
1599 | ||
1600 | xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); | |
1601 | if (!xdev) | |
1602 | return -ENOMEM; | |
1603 | ||
1604 | xdev->dev = &pdev->dev; | |
1605 | xdev->ext_addr = sizeof(dma_addr_t) > 4; | |
1606 | ||
1607 | INIT_LIST_HEAD(&xdev->common.channels); | |
1608 | ||
1609 | platform_set_drvdata(pdev, xdev); | |
1610 | ||
1611 | xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk"); | |
1612 | if (IS_ERR(xdev->axi_clk)) | |
1613 | return PTR_ERR(xdev->axi_clk); | |
1614 | ||
1615 | xdev->reg = devm_platform_ioremap_resource(pdev, 0); | |
1616 | if (IS_ERR(xdev->reg)) | |
1617 | return PTR_ERR(xdev->reg); | |
1618 | ||
1619 | xdev->irq = platform_get_irq(pdev, 0); | |
1620 | if (xdev->irq < 0) { | |
1621 | dev_err(xdev->dev, "failed to get platform irq\n"); | |
1622 | return xdev->irq; | |
1623 | } | |
1624 | ||
1625 | ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED, | |
1626 | dev_name(xdev->dev), xdev); | |
1627 | if (ret) { | |
1628 | dev_err(xdev->dev, "failed to request IRQ\n"); | |
1629 | return ret; | |
1630 | } | |
1631 | ||
1632 | ddev = &xdev->common; | |
1633 | ddev->dev = &pdev->dev; | |
1634 | ||
1635 | dma_cap_set(DMA_SLAVE, ddev->cap_mask); | |
1636 | dma_cap_set(DMA_PRIVATE, ddev->cap_mask); | |
1637 | dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask); | |
1638 | dma_cap_set(DMA_REPEAT, ddev->cap_mask); | |
1639 | dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask); | |
1640 | ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1); | |
1641 | ||
1642 | ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources; | |
1643 | ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources; | |
1644 | ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma; | |
1645 | /* TODO: Can we achieve better granularity ? */ | |
1646 | ddev->device_tx_status = dma_cookie_status; | |
1647 | ddev->device_issue_pending = xilinx_dpdma_issue_pending; | |
1648 | ddev->device_config = xilinx_dpdma_config; | |
1649 | ddev->device_pause = xilinx_dpdma_pause; | |
1650 | ddev->device_resume = xilinx_dpdma_resume; | |
1651 | ddev->device_terminate_all = xilinx_dpdma_terminate_all; | |
1652 | ddev->device_synchronize = xilinx_dpdma_synchronize; | |
1653 | ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED); | |
1654 | ddev->directions = BIT(DMA_MEM_TO_DEV); | |
1655 | ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | |
1656 | ||
1657 | for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) { | |
1658 | ret = xilinx_dpdma_chan_init(xdev, i); | |
1659 | if (ret < 0) { | |
1660 | dev_err(xdev->dev, "failed to initialize channel %u\n", | |
1661 | i); | |
1662 | goto error; | |
1663 | } | |
1664 | } | |
1665 | ||
1666 | ret = clk_prepare_enable(xdev->axi_clk); | |
1667 | if (ret) { | |
1668 | dev_err(xdev->dev, "failed to enable the axi clock\n"); | |
1669 | goto error; | |
1670 | } | |
1671 | ||
1672 | ret = dma_async_device_register(ddev); | |
1673 | if (ret) { | |
1674 | dev_err(xdev->dev, "failed to register the dma device\n"); | |
1675 | goto error_dma_async; | |
1676 | } | |
1677 | ||
1678 | ret = of_dma_controller_register(xdev->dev->of_node, | |
1679 | of_dma_xilinx_xlate, ddev); | |
1680 | if (ret) { | |
1681 | dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n"); | |
1682 | goto error_of_dma; | |
1683 | } | |
1684 | ||
1685 | xilinx_dpdma_enable_irq(xdev); | |
1686 | ||
1d220435 LP |
1687 | xilinx_dpdma_debugfs_init(xdev); |
1688 | ||
7cbb0c63 HK |
1689 | dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n"); |
1690 | ||
1691 | return 0; | |
1692 | ||
1693 | error_of_dma: | |
1694 | dma_async_device_unregister(ddev); | |
1695 | error_dma_async: | |
1696 | clk_disable_unprepare(xdev->axi_clk); | |
1697 | error: | |
1698 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) | |
1699 | xilinx_dpdma_chan_remove(xdev->chan[i]); | |
1700 | ||
1701 | free_irq(xdev->irq, xdev); | |
1702 | ||
1703 | return ret; | |
1704 | } | |
1705 | ||
1706 | static int xilinx_dpdma_remove(struct platform_device *pdev) | |
1707 | { | |
1708 | struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev); | |
1709 | unsigned int i; | |
1710 | ||
1711 | /* Start by disabling the IRQ to avoid races during cleanup. */ | |
1712 | free_irq(xdev->irq, xdev); | |
1713 | ||
1714 | xilinx_dpdma_disable_irq(xdev); | |
1715 | of_dma_controller_free(pdev->dev.of_node); | |
1716 | dma_async_device_unregister(&xdev->common); | |
1717 | clk_disable_unprepare(xdev->axi_clk); | |
1718 | ||
1719 | for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) | |
1720 | xilinx_dpdma_chan_remove(xdev->chan[i]); | |
1721 | ||
1722 | return 0; | |
1723 | } | |
1724 | ||
1725 | static const struct of_device_id xilinx_dpdma_of_match[] = { | |
1726 | { .compatible = "xlnx,zynqmp-dpdma",}, | |
1727 | { /* end of table */ }, | |
1728 | }; | |
1729 | MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match); | |
1730 | ||
1731 | static struct platform_driver xilinx_dpdma_driver = { | |
1732 | .probe = xilinx_dpdma_probe, | |
1733 | .remove = xilinx_dpdma_remove, | |
1734 | .driver = { | |
1735 | .name = "xilinx-zynqmp-dpdma", | |
1736 | .of_match_table = xilinx_dpdma_of_match, | |
1737 | }, | |
1738 | }; | |
1739 | ||
1740 | module_platform_driver(xilinx_dpdma_driver); | |
1741 | ||
1742 | MODULE_AUTHOR("Xilinx, Inc."); | |
1743 | MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver"); | |
1744 | MODULE_LICENSE("GPL v2"); |