]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/dma/ti/dma-crossbar.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 500
[mirror_ubuntu-hirsute-kernel.git] / drivers / dma / ti / dma-crossbar.c
CommitLineData
d2912cb1 1// SPDX-License-Identifier: GPL-2.0-only
a074ae38
PU
2/*
3 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
a074ae38
PU
5 */
6#include <linux/slab.h>
7#include <linux/err.h>
8#include <linux/init.h>
9#include <linux/list.h>
10#include <linux/io.h>
a074ae38
PU
11#include <linux/of_address.h>
12#include <linux/of_device.h>
13#include <linux/of_dma.h>
14
42dbdcc6
PU
15#define TI_XBAR_DRA7 0
16#define TI_XBAR_AM335X 1
5f9367a8
PU
17static const u32 ti_xbar_type[] = {
18 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
19 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
20};
42dbdcc6
PU
21
22static const struct of_device_id ti_dma_xbar_match[] = {
23 {
24 .compatible = "ti,dra7-dma-crossbar",
5f9367a8 25 .data = &ti_xbar_type[TI_XBAR_DRA7],
42dbdcc6
PU
26 },
27 {
28 .compatible = "ti,am335x-edma-crossbar",
5f9367a8 29 .data = &ti_xbar_type[TI_XBAR_AM335X],
42dbdcc6
PU
30 },
31 {},
32};
33
34/* Crossbar on AM335x/AM437x family */
35#define TI_AM335X_XBAR_LINES 64
36
37struct ti_am335x_xbar_data {
38 void __iomem *iomem;
39
40 struct dma_router dmarouter;
41
42 u32 xbar_events; /* maximum number of events to select in xbar */
43 u32 dma_requests; /* number of DMA requests on eDMA */
44};
45
46struct ti_am335x_xbar_map {
47 u16 dma_line;
288e7560 48 u8 mux_val;
42dbdcc6
PU
49};
50
288e7560 51static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u8 val)
42dbdcc6 52{
d087f157
V
53 /*
54 * TPCC_EVT_MUX_60_63 register layout is different than the
55 * rest, in the sense, that event 63 is mapped to lowest byte
56 * and event 60 is mapped to highest, handle it separately.
57 */
58 if (event >= 60 && event <= 63)
59 writeb_relaxed(val, iomem + (63 - event % 4));
60 else
61 writeb_relaxed(val, iomem + event);
42dbdcc6
PU
62}
63
64static void ti_am335x_xbar_free(struct device *dev, void *route_data)
65{
66 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
67 struct ti_am335x_xbar_map *map = route_data;
68
69 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
70 map->mux_val, map->dma_line);
71
72 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
73 kfree(map);
74}
75
76static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
77 struct of_dma *ofdma)
78{
79 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
80 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
81 struct ti_am335x_xbar_map *map;
82
83 if (dma_spec->args_count != 3)
84 return ERR_PTR(-EINVAL);
85
86 if (dma_spec->args[2] >= xbar->xbar_events) {
87 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
88 dma_spec->args[2]);
89 return ERR_PTR(-EINVAL);
90 }
91
92 if (dma_spec->args[0] >= xbar->dma_requests) {
93 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
94 dma_spec->args[0]);
95 return ERR_PTR(-EINVAL);
96 }
97
98 /* The of_node_put() will be done in the core for the node */
99 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
100 if (!dma_spec->np) {
101 dev_err(&pdev->dev, "Can't get DMA master\n");
102 return ERR_PTR(-EINVAL);
103 }
104
105 map = kzalloc(sizeof(*map), GFP_KERNEL);
106 if (!map) {
107 of_node_put(dma_spec->np);
108 return ERR_PTR(-ENOMEM);
109 }
110
111 map->dma_line = (u16)dma_spec->args[0];
288e7560 112 map->mux_val = (u8)dma_spec->args[2];
42dbdcc6
PU
113
114 dma_spec->args[2] = 0;
115 dma_spec->args_count = 2;
116
117 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
118 map->mux_val, map->dma_line);
119
120 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
121
122 return map;
123}
124
125static const struct of_device_id ti_am335x_master_match[] = {
126 { .compatible = "ti,edma3-tpcc", },
127 {},
128};
129
130static int ti_am335x_xbar_probe(struct platform_device *pdev)
131{
132 struct device_node *node = pdev->dev.of_node;
133 const struct of_device_id *match;
134 struct device_node *dma_node;
135 struct ti_am335x_xbar_data *xbar;
136 struct resource *res;
137 void __iomem *iomem;
138 int i, ret;
139
140 if (!node)
141 return -ENODEV;
142
143 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
144 if (!xbar)
145 return -ENOMEM;
146
147 dma_node = of_parse_phandle(node, "dma-masters", 0);
148 if (!dma_node) {
149 dev_err(&pdev->dev, "Can't get DMA master node\n");
150 return -ENODEV;
151 }
152
153 match = of_match_node(ti_am335x_master_match, dma_node);
154 if (!match) {
155 dev_err(&pdev->dev, "DMA master is not supported\n");
75bdc7f3 156 of_node_put(dma_node);
42dbdcc6
PU
157 return -EINVAL;
158 }
159
160 if (of_property_read_u32(dma_node, "dma-requests",
161 &xbar->dma_requests)) {
162 dev_info(&pdev->dev,
163 "Missing XBAR output information, using %u.\n",
164 TI_AM335X_XBAR_LINES);
165 xbar->dma_requests = TI_AM335X_XBAR_LINES;
166 }
167 of_node_put(dma_node);
168
169 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
170 dev_info(&pdev->dev,
171 "Missing XBAR input information, using %u.\n",
172 TI_AM335X_XBAR_LINES);
173 xbar->xbar_events = TI_AM335X_XBAR_LINES;
174 }
175
176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
177 iomem = devm_ioremap_resource(&pdev->dev, res);
178 if (IS_ERR(iomem))
179 return PTR_ERR(iomem);
180
181 xbar->iomem = iomem;
182
183 xbar->dmarouter.dev = &pdev->dev;
184 xbar->dmarouter.route_free = ti_am335x_xbar_free;
185
186 platform_set_drvdata(pdev, xbar);
187
188 /* Reset the crossbar */
189 for (i = 0; i < xbar->dma_requests; i++)
190 ti_am335x_xbar_write(xbar->iomem, i, 0);
191
192 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
193 &xbar->dmarouter);
194
195 return ret;
196}
197
198/* Crossbar on DRA7xx family */
199#define TI_DRA7_XBAR_OUTPUTS 127
200#define TI_DRA7_XBAR_INPUTS 256
a074ae38 201
42dbdcc6 202struct ti_dra7_xbar_data {
a074ae38
PU
203 void __iomem *iomem;
204
205 struct dma_router dmarouter;
ec9bfa1e
PU
206 struct mutex mutex;
207 unsigned long *dma_inuse;
a074ae38
PU
208
209 u16 safe_val; /* Value to rest the crossbar lines */
210 u32 xbar_requests; /* number of DMA requests connected to XBAR */
211 u32 dma_requests; /* number of DMA requests forwarded to DMA */
1eb995bb 212 u32 dma_offset;
a074ae38
PU
213};
214
42dbdcc6 215struct ti_dra7_xbar_map {
a074ae38
PU
216 u16 xbar_in;
217 int xbar_out;
218};
219
42dbdcc6 220static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
a074ae38
PU
221{
222 writew_relaxed(val, iomem + (xbar * 2));
223}
224
42dbdcc6 225static void ti_dra7_xbar_free(struct device *dev, void *route_data)
a074ae38 226{
42dbdcc6
PU
227 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
228 struct ti_dra7_xbar_map *map = route_data;
a074ae38
PU
229
230 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
231 map->xbar_in, map->xbar_out);
232
42dbdcc6 233 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
ec9bfa1e
PU
234 mutex_lock(&xbar->mutex);
235 clear_bit(map->xbar_out, xbar->dma_inuse);
236 mutex_unlock(&xbar->mutex);
a074ae38
PU
237 kfree(map);
238}
239
42dbdcc6
PU
240static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
241 struct of_dma *ofdma)
a074ae38
PU
242{
243 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
42dbdcc6
PU
244 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
245 struct ti_dra7_xbar_map *map;
a074ae38
PU
246
247 if (dma_spec->args[0] >= xbar->xbar_requests) {
248 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
249 dma_spec->args[0]);
250 return ERR_PTR(-EINVAL);
251 }
252
253 /* The of_node_put() will be done in the core for the node */
254 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
255 if (!dma_spec->np) {
256 dev_err(&pdev->dev, "Can't get DMA master\n");
257 return ERR_PTR(-EINVAL);
258 }
259
260 map = kzalloc(sizeof(*map), GFP_KERNEL);
261 if (!map) {
262 of_node_put(dma_spec->np);
263 return ERR_PTR(-ENOMEM);
264 }
265
ec9bfa1e
PU
266 mutex_lock(&xbar->mutex);
267 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
268 xbar->dma_requests);
ec9bfa1e 269 if (map->xbar_out == xbar->dma_requests) {
2ccb4837 270 mutex_unlock(&xbar->mutex);
ec9bfa1e
PU
271 dev_err(&pdev->dev, "Run out of free DMA requests\n");
272 kfree(map);
273 return ERR_PTR(-ENOMEM);
274 }
275 set_bit(map->xbar_out, xbar->dma_inuse);
2ccb4837 276 mutex_unlock(&xbar->mutex);
ec9bfa1e 277
a074ae38
PU
278 map->xbar_in = (u16)dma_spec->args[0];
279
1eb995bb 280 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
a074ae38
PU
281
282 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
283 map->xbar_in, map->xbar_out);
284
42dbdcc6 285 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
a074ae38
PU
286
287 return map;
288}
289
5f9367a8
PU
290#define TI_XBAR_EDMA_OFFSET 0
291#define TI_XBAR_SDMA_OFFSET 1
292static const u32 ti_dma_offset[] = {
293 [TI_XBAR_EDMA_OFFSET] = 0,
294 [TI_XBAR_SDMA_OFFSET] = 1,
295};
296
42dbdcc6 297static const struct of_device_id ti_dra7_master_match[] = {
1eb995bb
PU
298 {
299 .compatible = "ti,omap4430-sdma",
5f9367a8 300 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
1eb995bb
PU
301 },
302 {
303 .compatible = "ti,edma3",
5f9367a8 304 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
1eb995bb 305 },
2adb2743
PU
306 {
307 .compatible = "ti,edma3-tpcc",
5f9367a8 308 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
2adb2743 309 },
1eb995bb
PU
310 {},
311};
312
0f73f3e8
PU
313static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
314{
315 for (; len > 0; len--)
a2f6721b 316 set_bit(offset + (len - 1), p);
0f73f3e8
PU
317}
318
42dbdcc6 319static int ti_dra7_xbar_probe(struct platform_device *pdev)
a074ae38
PU
320{
321 struct device_node *node = pdev->dev.of_node;
1eb995bb 322 const struct of_device_id *match;
a074ae38 323 struct device_node *dma_node;
42dbdcc6 324 struct ti_dra7_xbar_data *xbar;
0f73f3e8 325 struct property *prop;
a074ae38
PU
326 struct resource *res;
327 u32 safe_val;
e7282b66 328 int sz;
a074ae38
PU
329 void __iomem *iomem;
330 int i, ret;
331
332 if (!node)
333 return -ENODEV;
334
335 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
336 if (!xbar)
337 return -ENOMEM;
338
339 dma_node = of_parse_phandle(node, "dma-masters", 0);
340 if (!dma_node) {
341 dev_err(&pdev->dev, "Can't get DMA master node\n");
342 return -ENODEV;
343 }
344
42dbdcc6 345 match = of_match_node(ti_dra7_master_match, dma_node);
1eb995bb
PU
346 if (!match) {
347 dev_err(&pdev->dev, "DMA master is not supported\n");
75bdc7f3 348 of_node_put(dma_node);
1eb995bb
PU
349 return -EINVAL;
350 }
351
a074ae38
PU
352 if (of_property_read_u32(dma_node, "dma-requests",
353 &xbar->dma_requests)) {
354 dev_info(&pdev->dev,
355 "Missing XBAR output information, using %u.\n",
42dbdcc6
PU
356 TI_DRA7_XBAR_OUTPUTS);
357 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
a074ae38
PU
358 }
359 of_node_put(dma_node);
360
ec9bfa1e
PU
361 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
362 BITS_TO_LONGS(xbar->dma_requests),
363 sizeof(unsigned long), GFP_KERNEL);
364 if (!xbar->dma_inuse)
365 return -ENOMEM;
366
a074ae38
PU
367 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
368 dev_info(&pdev->dev,
369 "Missing XBAR input information, using %u.\n",
42dbdcc6
PU
370 TI_DRA7_XBAR_INPUTS);
371 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
a074ae38
PU
372 }
373
374 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
375 xbar->safe_val = (u16)safe_val;
376
0f73f3e8
PU
377
378 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
379 if (prop) {
380 const char pname[] = "ti,reserved-dma-request-ranges";
381 u32 (*rsv_events)[2];
382 size_t nelm = sz / sizeof(*rsv_events);
383 int i;
384
385 if (!nelm)
386 return -EINVAL;
387
388 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
389 if (!rsv_events)
390 return -ENOMEM;
391
392 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
393 nelm * 2);
394 if (ret)
395 return ret;
396
397 for (i = 0; i < nelm; i++) {
398 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
399 xbar->dma_inuse);
400 }
401 kfree(rsv_events);
402 }
403
a074ae38 404 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
a074ae38 405 iomem = devm_ioremap_resource(&pdev->dev, res);
28eb232f
AL
406 if (IS_ERR(iomem))
407 return PTR_ERR(iomem);
a074ae38
PU
408
409 xbar->iomem = iomem;
410
411 xbar->dmarouter.dev = &pdev->dev;
42dbdcc6 412 xbar->dmarouter.route_free = ti_dra7_xbar_free;
5f9367a8 413 xbar->dma_offset = *(u32 *)match->data;
a074ae38 414
ec9bfa1e 415 mutex_init(&xbar->mutex);
a074ae38
PU
416 platform_set_drvdata(pdev, xbar);
417
418 /* Reset the crossbar */
0f73f3e8
PU
419 for (i = 0; i < xbar->dma_requests; i++) {
420 if (!test_bit(i, xbar->dma_inuse))
421 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
422 }
a074ae38 423
42dbdcc6 424 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
a074ae38
PU
425 &xbar->dmarouter);
426 if (ret) {
427 /* Restore the defaults for the crossbar */
0f73f3e8
PU
428 for (i = 0; i < xbar->dma_requests; i++) {
429 if (!test_bit(i, xbar->dma_inuse))
430 ti_dra7_xbar_write(xbar->iomem, i, i);
431 }
a074ae38
PU
432 }
433
434 return ret;
435}
436
42dbdcc6
PU
437static int ti_dma_xbar_probe(struct platform_device *pdev)
438{
439 const struct of_device_id *match;
440 int ret;
441
442 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
443 if (unlikely(!match))
444 return -EINVAL;
445
5f9367a8 446 switch (*(u32 *)match->data) {
42dbdcc6
PU
447 case TI_XBAR_DRA7:
448 ret = ti_dra7_xbar_probe(pdev);
449 break;
450 case TI_XBAR_AM335X:
451 ret = ti_am335x_xbar_probe(pdev);
452 break;
453 default:
454 dev_err(&pdev->dev, "Unsupported crossbar\n");
455 ret = -ENODEV;
456 break;
457 }
458
459 return ret;
460}
a074ae38
PU
461
462static struct platform_driver ti_dma_xbar_driver = {
463 .driver = {
464 .name = "ti-dma-crossbar",
465 .of_match_table = of_match_ptr(ti_dma_xbar_match),
466 },
467 .probe = ti_dma_xbar_probe,
468};
469
d646162b 470static int omap_dmaxbar_init(void)
a074ae38
PU
471{
472 return platform_driver_register(&ti_dma_xbar_driver);
473}
474arch_initcall(omap_dmaxbar_init);