]>
Commit | Line | Data |
---|---|---|
a6e9be05 JB |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // | |
3 | // Copyright (C) 2019 Linaro Ltd. | |
4 | // Copyright (C) 2019 Socionext Inc. | |
5 | ||
6 | #include <linux/bits.h> | |
7 | #include <linux/dma-mapping.h> | |
8 | #include <linux/dmaengine.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/iopoll.h> | |
11 | #include <linux/list.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/of_dma.h> | |
14 | #include <linux/platform_device.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/bitfield.h> | |
18 | ||
19 | #include "virt-dma.h" | |
20 | ||
21 | /* global register */ | |
22 | #define M10V_XDACS 0x00 | |
23 | ||
24 | /* channel local register */ | |
25 | #define M10V_XDTBC 0x10 | |
26 | #define M10V_XDSSA 0x14 | |
27 | #define M10V_XDDSA 0x18 | |
28 | #define M10V_XDSAC 0x1C | |
29 | #define M10V_XDDAC 0x20 | |
30 | #define M10V_XDDCC 0x24 | |
31 | #define M10V_XDDES 0x28 | |
32 | #define M10V_XDDPC 0x2C | |
33 | #define M10V_XDDSD 0x30 | |
34 | ||
35 | #define M10V_XDACS_XE BIT(28) | |
36 | ||
37 | #define M10V_DEFBS 0x3 | |
38 | #define M10V_DEFBL 0xf | |
39 | ||
40 | #define M10V_XDSAC_SBS GENMASK(17, 16) | |
41 | #define M10V_XDSAC_SBL GENMASK(11, 8) | |
42 | ||
43 | #define M10V_XDDAC_DBS GENMASK(17, 16) | |
44 | #define M10V_XDDAC_DBL GENMASK(11, 8) | |
45 | ||
46 | #define M10V_XDDES_CE BIT(28) | |
47 | #define M10V_XDDES_SE BIT(24) | |
48 | #define M10V_XDDES_SA BIT(15) | |
49 | #define M10V_XDDES_TF GENMASK(23, 20) | |
50 | #define M10V_XDDES_EI BIT(1) | |
51 | #define M10V_XDDES_TI BIT(0) | |
52 | ||
53 | #define M10V_XDDSD_IS_MASK GENMASK(3, 0) | |
54 | #define M10V_XDDSD_IS_NORMAL 0x8 | |
55 | ||
56 | #define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ | |
57 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ | |
58 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ | |
59 | BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) | |
60 | ||
61 | struct milbeaut_xdmac_desc { | |
62 | struct virt_dma_desc vd; | |
63 | size_t len; | |
64 | dma_addr_t src; | |
65 | dma_addr_t dst; | |
66 | }; | |
67 | ||
68 | struct milbeaut_xdmac_chan { | |
69 | struct virt_dma_chan vc; | |
70 | struct milbeaut_xdmac_desc *md; | |
71 | void __iomem *reg_ch_base; | |
72 | }; | |
73 | ||
74 | struct milbeaut_xdmac_device { | |
75 | struct dma_device ddev; | |
76 | void __iomem *reg_base; | |
77 | struct milbeaut_xdmac_chan channels[0]; | |
78 | }; | |
79 | ||
80 | static struct milbeaut_xdmac_chan * | |
81 | to_milbeaut_xdmac_chan(struct virt_dma_chan *vc) | |
82 | { | |
83 | return container_of(vc, struct milbeaut_xdmac_chan, vc); | |
84 | } | |
85 | ||
86 | static struct milbeaut_xdmac_desc * | |
87 | to_milbeaut_xdmac_desc(struct virt_dma_desc *vd) | |
88 | { | |
89 | return container_of(vd, struct milbeaut_xdmac_desc, vd); | |
90 | } | |
91 | ||
92 | /* mc->vc.lock must be held by caller */ | |
93 | static struct milbeaut_xdmac_desc * | |
94 | milbeaut_xdmac_next_desc(struct milbeaut_xdmac_chan *mc) | |
95 | { | |
96 | struct virt_dma_desc *vd; | |
97 | ||
98 | vd = vchan_next_desc(&mc->vc); | |
99 | if (!vd) { | |
100 | mc->md = NULL; | |
101 | return NULL; | |
102 | } | |
103 | ||
104 | list_del(&vd->node); | |
105 | ||
106 | mc->md = to_milbeaut_xdmac_desc(vd); | |
107 | ||
108 | return mc->md; | |
109 | } | |
110 | ||
111 | /* mc->vc.lock must be held by caller */ | |
112 | static void milbeaut_chan_start(struct milbeaut_xdmac_chan *mc, | |
113 | struct milbeaut_xdmac_desc *md) | |
114 | { | |
115 | u32 val; | |
116 | ||
117 | /* Setup the channel */ | |
118 | val = md->len - 1; | |
119 | writel_relaxed(val, mc->reg_ch_base + M10V_XDTBC); | |
120 | ||
121 | val = md->src; | |
122 | writel_relaxed(val, mc->reg_ch_base + M10V_XDSSA); | |
123 | ||
124 | val = md->dst; | |
125 | writel_relaxed(val, mc->reg_ch_base + M10V_XDDSA); | |
126 | ||
127 | val = readl_relaxed(mc->reg_ch_base + M10V_XDSAC); | |
128 | val &= ~(M10V_XDSAC_SBS | M10V_XDSAC_SBL); | |
129 | val |= FIELD_PREP(M10V_XDSAC_SBS, M10V_DEFBS) | | |
130 | FIELD_PREP(M10V_XDSAC_SBL, M10V_DEFBL); | |
131 | writel_relaxed(val, mc->reg_ch_base + M10V_XDSAC); | |
132 | ||
133 | val = readl_relaxed(mc->reg_ch_base + M10V_XDDAC); | |
134 | val &= ~(M10V_XDDAC_DBS | M10V_XDDAC_DBL); | |
135 | val |= FIELD_PREP(M10V_XDDAC_DBS, M10V_DEFBS) | | |
136 | FIELD_PREP(M10V_XDDAC_DBL, M10V_DEFBL); | |
137 | writel_relaxed(val, mc->reg_ch_base + M10V_XDDAC); | |
138 | ||
139 | /* Start the channel */ | |
140 | val = readl_relaxed(mc->reg_ch_base + M10V_XDDES); | |
141 | val &= ~(M10V_XDDES_CE | M10V_XDDES_SE | M10V_XDDES_TF | | |
142 | M10V_XDDES_EI | M10V_XDDES_TI); | |
143 | val |= FIELD_PREP(M10V_XDDES_CE, 1) | FIELD_PREP(M10V_XDDES_SE, 1) | | |
144 | FIELD_PREP(M10V_XDDES_TF, 1) | FIELD_PREP(M10V_XDDES_EI, 1) | | |
145 | FIELD_PREP(M10V_XDDES_TI, 1); | |
146 | writel_relaxed(val, mc->reg_ch_base + M10V_XDDES); | |
147 | } | |
148 | ||
149 | /* mc->vc.lock must be held by caller */ | |
150 | static void milbeaut_xdmac_start(struct milbeaut_xdmac_chan *mc) | |
151 | { | |
152 | struct milbeaut_xdmac_desc *md; | |
153 | ||
154 | md = milbeaut_xdmac_next_desc(mc); | |
155 | if (md) | |
156 | milbeaut_chan_start(mc, md); | |
157 | } | |
158 | ||
159 | static irqreturn_t milbeaut_xdmac_interrupt(int irq, void *dev_id) | |
160 | { | |
161 | struct milbeaut_xdmac_chan *mc = dev_id; | |
162 | struct milbeaut_xdmac_desc *md; | |
163 | unsigned long flags; | |
164 | u32 val; | |
165 | ||
166 | spin_lock_irqsave(&mc->vc.lock, flags); | |
167 | ||
168 | /* Ack and Stop */ | |
169 | val = FIELD_PREP(M10V_XDDSD_IS_MASK, 0x0); | |
170 | writel_relaxed(val, mc->reg_ch_base + M10V_XDDSD); | |
171 | ||
172 | md = mc->md; | |
173 | if (!md) | |
174 | goto out; | |
175 | ||
176 | vchan_cookie_complete(&md->vd); | |
177 | ||
178 | milbeaut_xdmac_start(mc); | |
179 | out: | |
180 | spin_unlock_irqrestore(&mc->vc.lock, flags); | |
181 | return IRQ_HANDLED; | |
182 | } | |
183 | ||
184 | static void milbeaut_xdmac_free_chan_resources(struct dma_chan *chan) | |
185 | { | |
186 | vchan_free_chan_resources(to_virt_chan(chan)); | |
187 | } | |
188 | ||
189 | static struct dma_async_tx_descriptor * | |
190 | milbeaut_xdmac_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, | |
191 | dma_addr_t src, size_t len, unsigned long flags) | |
192 | { | |
193 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
194 | struct milbeaut_xdmac_desc *md; | |
195 | ||
196 | md = kzalloc(sizeof(*md), GFP_NOWAIT); | |
197 | if (!md) | |
198 | return NULL; | |
199 | ||
200 | md->len = len; | |
201 | md->src = src; | |
202 | md->dst = dst; | |
203 | ||
204 | return vchan_tx_prep(vc, &md->vd, flags); | |
205 | } | |
206 | ||
207 | static int milbeaut_xdmac_terminate_all(struct dma_chan *chan) | |
208 | { | |
209 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
210 | struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); | |
211 | unsigned long flags; | |
212 | u32 val; | |
213 | ||
214 | LIST_HEAD(head); | |
215 | ||
216 | spin_lock_irqsave(&vc->lock, flags); | |
217 | ||
218 | /* Halt the channel */ | |
219 | val = readl(mc->reg_ch_base + M10V_XDDES); | |
220 | val &= ~M10V_XDDES_CE; | |
221 | val |= FIELD_PREP(M10V_XDDES_CE, 0); | |
222 | writel(val, mc->reg_ch_base + M10V_XDDES); | |
223 | ||
224 | if (mc->md) { | |
225 | vchan_terminate_vdesc(&mc->md->vd); | |
226 | mc->md = NULL; | |
227 | } | |
228 | ||
229 | vchan_get_all_descriptors(vc, &head); | |
230 | ||
231 | spin_unlock_irqrestore(&vc->lock, flags); | |
232 | ||
233 | vchan_dma_desc_free_list(vc, &head); | |
234 | ||
235 | return 0; | |
236 | } | |
237 | ||
238 | static void milbeaut_xdmac_synchronize(struct dma_chan *chan) | |
239 | { | |
240 | vchan_synchronize(to_virt_chan(chan)); | |
241 | } | |
242 | ||
243 | static void milbeaut_xdmac_issue_pending(struct dma_chan *chan) | |
244 | { | |
245 | struct virt_dma_chan *vc = to_virt_chan(chan); | |
246 | struct milbeaut_xdmac_chan *mc = to_milbeaut_xdmac_chan(vc); | |
247 | unsigned long flags; | |
248 | ||
249 | spin_lock_irqsave(&vc->lock, flags); | |
250 | ||
251 | if (vchan_issue_pending(vc) && !mc->md) | |
252 | milbeaut_xdmac_start(mc); | |
253 | ||
254 | spin_unlock_irqrestore(&vc->lock, flags); | |
255 | } | |
256 | ||
257 | static void milbeaut_xdmac_desc_free(struct virt_dma_desc *vd) | |
258 | { | |
259 | kfree(to_milbeaut_xdmac_desc(vd)); | |
260 | } | |
261 | ||
262 | static int milbeaut_xdmac_chan_init(struct platform_device *pdev, | |
263 | struct milbeaut_xdmac_device *mdev, | |
264 | int chan_id) | |
265 | { | |
266 | struct device *dev = &pdev->dev; | |
267 | struct milbeaut_xdmac_chan *mc = &mdev->channels[chan_id]; | |
268 | char *irq_name; | |
269 | int irq, ret; | |
270 | ||
271 | irq = platform_get_irq(pdev, chan_id); | |
cdc3e306 | 272 | if (irq < 0) |
a6e9be05 | 273 | return irq; |
a6e9be05 JB |
274 | |
275 | irq_name = devm_kasprintf(dev, GFP_KERNEL, "milbeaut-xdmac-%d", | |
276 | chan_id); | |
277 | if (!irq_name) | |
278 | return -ENOMEM; | |
279 | ||
280 | ret = devm_request_irq(dev, irq, milbeaut_xdmac_interrupt, | |
281 | IRQF_SHARED, irq_name, mc); | |
282 | if (ret) | |
283 | return ret; | |
284 | ||
285 | mc->reg_ch_base = mdev->reg_base + chan_id * 0x30; | |
286 | ||
287 | mc->vc.desc_free = milbeaut_xdmac_desc_free; | |
288 | vchan_init(&mc->vc, &mdev->ddev); | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | static void enable_xdmac(struct milbeaut_xdmac_device *mdev) | |
294 | { | |
295 | unsigned int val; | |
296 | ||
297 | val = readl(mdev->reg_base + M10V_XDACS); | |
298 | val |= M10V_XDACS_XE; | |
299 | writel(val, mdev->reg_base + M10V_XDACS); | |
300 | } | |
301 | ||
302 | static void disable_xdmac(struct milbeaut_xdmac_device *mdev) | |
303 | { | |
304 | unsigned int val; | |
305 | ||
306 | val = readl(mdev->reg_base + M10V_XDACS); | |
307 | val &= ~M10V_XDACS_XE; | |
308 | writel(val, mdev->reg_base + M10V_XDACS); | |
309 | } | |
310 | ||
311 | static int milbeaut_xdmac_probe(struct platform_device *pdev) | |
312 | { | |
313 | struct device *dev = &pdev->dev; | |
314 | struct milbeaut_xdmac_device *mdev; | |
315 | struct dma_device *ddev; | |
316 | int nr_chans, ret, i; | |
317 | ||
318 | nr_chans = platform_irq_count(pdev); | |
319 | if (nr_chans < 0) | |
320 | return nr_chans; | |
321 | ||
322 | mdev = devm_kzalloc(dev, struct_size(mdev, channels, nr_chans), | |
323 | GFP_KERNEL); | |
324 | if (!mdev) | |
325 | return -ENOMEM; | |
326 | ||
327 | mdev->reg_base = devm_platform_ioremap_resource(pdev, 0); | |
328 | if (IS_ERR(mdev->reg_base)) | |
329 | return PTR_ERR(mdev->reg_base); | |
330 | ||
331 | ddev = &mdev->ddev; | |
332 | ddev->dev = dev; | |
333 | dma_cap_set(DMA_MEMCPY, ddev->cap_mask); | |
334 | ddev->src_addr_widths = MLB_XDMAC_BUSWIDTHS; | |
335 | ddev->dst_addr_widths = MLB_XDMAC_BUSWIDTHS; | |
336 | ddev->device_free_chan_resources = milbeaut_xdmac_free_chan_resources; | |
337 | ddev->device_prep_dma_memcpy = milbeaut_xdmac_prep_memcpy; | |
338 | ddev->device_terminate_all = milbeaut_xdmac_terminate_all; | |
339 | ddev->device_synchronize = milbeaut_xdmac_synchronize; | |
340 | ddev->device_tx_status = dma_cookie_status; | |
341 | ddev->device_issue_pending = milbeaut_xdmac_issue_pending; | |
342 | INIT_LIST_HEAD(&ddev->channels); | |
343 | ||
344 | for (i = 0; i < nr_chans; i++) { | |
345 | ret = milbeaut_xdmac_chan_init(pdev, mdev, i); | |
346 | if (ret) | |
347 | return ret; | |
348 | } | |
349 | ||
350 | enable_xdmac(mdev); | |
351 | ||
352 | ret = dma_async_device_register(ddev); | |
353 | if (ret) | |
354 | return ret; | |
355 | ||
356 | ret = of_dma_controller_register(dev->of_node, | |
357 | of_dma_simple_xlate, mdev); | |
358 | if (ret) | |
359 | goto unregister_dmac; | |
360 | ||
361 | platform_set_drvdata(pdev, mdev); | |
362 | ||
363 | return 0; | |
364 | ||
365 | unregister_dmac: | |
366 | dma_async_device_unregister(ddev); | |
367 | return ret; | |
368 | } | |
369 | ||
370 | static int milbeaut_xdmac_remove(struct platform_device *pdev) | |
371 | { | |
372 | struct milbeaut_xdmac_device *mdev = platform_get_drvdata(pdev); | |
373 | struct dma_chan *chan; | |
374 | int ret; | |
375 | ||
376 | /* | |
377 | * Before reaching here, almost all descriptors have been freed by the | |
378 | * ->device_free_chan_resources() hook. However, each channel might | |
379 | * be still holding one descriptor that was on-flight at that moment. | |
380 | * Terminate it to make sure this hardware is no longer running. Then, | |
381 | * free the channel resources once again to avoid memory leak. | |
382 | */ | |
383 | list_for_each_entry(chan, &mdev->ddev.channels, device_node) { | |
384 | ret = dmaengine_terminate_sync(chan); | |
385 | if (ret) | |
386 | return ret; | |
387 | milbeaut_xdmac_free_chan_resources(chan); | |
388 | } | |
389 | ||
390 | of_dma_controller_free(pdev->dev.of_node); | |
391 | dma_async_device_unregister(&mdev->ddev); | |
392 | ||
393 | disable_xdmac(mdev); | |
394 | ||
395 | return 0; | |
396 | } | |
397 | ||
398 | static const struct of_device_id milbeaut_xdmac_match[] = { | |
399 | { .compatible = "socionext,milbeaut-m10v-xdmac" }, | |
400 | { /* sentinel */ } | |
401 | }; | |
402 | MODULE_DEVICE_TABLE(of, milbeaut_xdmac_match); | |
403 | ||
404 | static struct platform_driver milbeaut_xdmac_driver = { | |
405 | .probe = milbeaut_xdmac_probe, | |
406 | .remove = milbeaut_xdmac_remove, | |
407 | .driver = { | |
408 | .name = "milbeaut-m10v-xdmac", | |
409 | .of_match_table = milbeaut_xdmac_match, | |
410 | }, | |
411 | }; | |
412 | module_platform_driver(milbeaut_xdmac_driver); | |
413 | ||
414 | MODULE_DESCRIPTION("Milbeaut XDMAC DmaEngine driver"); | |
415 | MODULE_LICENSE("GPL v2"); |