]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/ti-dma-crossbar.c
dmaengine: stm32-dma: Fix null pointer dereference in stm32_dma_tx_status
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / ti-dma-crossbar.c
CommitLineData
a074ae38
PU
1/*
2 * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com
3 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 */
10#include <linux/slab.h>
11#include <linux/err.h>
12#include <linux/init.h>
13#include <linux/list.h>
14#include <linux/io.h>
a074ae38
PU
15#include <linux/of_address.h>
16#include <linux/of_device.h>
17#include <linux/of_dma.h>
18
42dbdcc6
PU
19#define TI_XBAR_DRA7 0
20#define TI_XBAR_AM335X 1
5f9367a8
PU
21static const u32 ti_xbar_type[] = {
22 [TI_XBAR_DRA7] = TI_XBAR_DRA7,
23 [TI_XBAR_AM335X] = TI_XBAR_AM335X,
24};
42dbdcc6
PU
25
26static const struct of_device_id ti_dma_xbar_match[] = {
27 {
28 .compatible = "ti,dra7-dma-crossbar",
5f9367a8 29 .data = &ti_xbar_type[TI_XBAR_DRA7],
42dbdcc6
PU
30 },
31 {
32 .compatible = "ti,am335x-edma-crossbar",
5f9367a8 33 .data = &ti_xbar_type[TI_XBAR_AM335X],
42dbdcc6
PU
34 },
35 {},
36};
37
38/* Crossbar on AM335x/AM437x family */
39#define TI_AM335X_XBAR_LINES 64
40
41struct ti_am335x_xbar_data {
42 void __iomem *iomem;
43
44 struct dma_router dmarouter;
45
46 u32 xbar_events; /* maximum number of events to select in xbar */
47 u32 dma_requests; /* number of DMA requests on eDMA */
48};
49
50struct ti_am335x_xbar_map {
51 u16 dma_line;
52 u16 mux_val;
53};
54
55static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val)
56{
57 writeb_relaxed(val & 0x1f, iomem + event);
58}
59
60static void ti_am335x_xbar_free(struct device *dev, void *route_data)
61{
62 struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev);
63 struct ti_am335x_xbar_map *map = route_data;
64
65 dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n",
66 map->mux_val, map->dma_line);
67
68 ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0);
69 kfree(map);
70}
71
72static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec,
73 struct of_dma *ofdma)
74{
75 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
76 struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev);
77 struct ti_am335x_xbar_map *map;
78
79 if (dma_spec->args_count != 3)
80 return ERR_PTR(-EINVAL);
81
82 if (dma_spec->args[2] >= xbar->xbar_events) {
83 dev_err(&pdev->dev, "Invalid XBAR event number: %d\n",
84 dma_spec->args[2]);
85 return ERR_PTR(-EINVAL);
86 }
87
88 if (dma_spec->args[0] >= xbar->dma_requests) {
89 dev_err(&pdev->dev, "Invalid DMA request line number: %d\n",
90 dma_spec->args[0]);
91 return ERR_PTR(-EINVAL);
92 }
93
94 /* The of_node_put() will be done in the core for the node */
95 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
96 if (!dma_spec->np) {
97 dev_err(&pdev->dev, "Can't get DMA master\n");
98 return ERR_PTR(-EINVAL);
99 }
100
101 map = kzalloc(sizeof(*map), GFP_KERNEL);
102 if (!map) {
103 of_node_put(dma_spec->np);
104 return ERR_PTR(-ENOMEM);
105 }
106
107 map->dma_line = (u16)dma_spec->args[0];
108 map->mux_val = (u16)dma_spec->args[2];
109
110 dma_spec->args[2] = 0;
111 dma_spec->args_count = 2;
112
113 dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n",
114 map->mux_val, map->dma_line);
115
116 ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val);
117
118 return map;
119}
120
121static const struct of_device_id ti_am335x_master_match[] = {
122 { .compatible = "ti,edma3-tpcc", },
123 {},
124};
125
126static int ti_am335x_xbar_probe(struct platform_device *pdev)
127{
128 struct device_node *node = pdev->dev.of_node;
129 const struct of_device_id *match;
130 struct device_node *dma_node;
131 struct ti_am335x_xbar_data *xbar;
132 struct resource *res;
133 void __iomem *iomem;
134 int i, ret;
135
136 if (!node)
137 return -ENODEV;
138
139 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
140 if (!xbar)
141 return -ENOMEM;
142
143 dma_node = of_parse_phandle(node, "dma-masters", 0);
144 if (!dma_node) {
145 dev_err(&pdev->dev, "Can't get DMA master node\n");
146 return -ENODEV;
147 }
148
149 match = of_match_node(ti_am335x_master_match, dma_node);
150 if (!match) {
151 dev_err(&pdev->dev, "DMA master is not supported\n");
152 return -EINVAL;
153 }
154
155 if (of_property_read_u32(dma_node, "dma-requests",
156 &xbar->dma_requests)) {
157 dev_info(&pdev->dev,
158 "Missing XBAR output information, using %u.\n",
159 TI_AM335X_XBAR_LINES);
160 xbar->dma_requests = TI_AM335X_XBAR_LINES;
161 }
162 of_node_put(dma_node);
163
164 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) {
165 dev_info(&pdev->dev,
166 "Missing XBAR input information, using %u.\n",
167 TI_AM335X_XBAR_LINES);
168 xbar->xbar_events = TI_AM335X_XBAR_LINES;
169 }
170
171 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
172 iomem = devm_ioremap_resource(&pdev->dev, res);
173 if (IS_ERR(iomem))
174 return PTR_ERR(iomem);
175
176 xbar->iomem = iomem;
177
178 xbar->dmarouter.dev = &pdev->dev;
179 xbar->dmarouter.route_free = ti_am335x_xbar_free;
180
181 platform_set_drvdata(pdev, xbar);
182
183 /* Reset the crossbar */
184 for (i = 0; i < xbar->dma_requests; i++)
185 ti_am335x_xbar_write(xbar->iomem, i, 0);
186
187 ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate,
188 &xbar->dmarouter);
189
190 return ret;
191}
192
193/* Crossbar on DRA7xx family */
194#define TI_DRA7_XBAR_OUTPUTS 127
195#define TI_DRA7_XBAR_INPUTS 256
a074ae38 196
42dbdcc6 197struct ti_dra7_xbar_data {
a074ae38
PU
198 void __iomem *iomem;
199
200 struct dma_router dmarouter;
ec9bfa1e
PU
201 struct mutex mutex;
202 unsigned long *dma_inuse;
a074ae38
PU
203
204 u16 safe_val; /* Value to rest the crossbar lines */
205 u32 xbar_requests; /* number of DMA requests connected to XBAR */
206 u32 dma_requests; /* number of DMA requests forwarded to DMA */
1eb995bb 207 u32 dma_offset;
a074ae38
PU
208};
209
42dbdcc6 210struct ti_dra7_xbar_map {
a074ae38
PU
211 u16 xbar_in;
212 int xbar_out;
213};
214
42dbdcc6 215static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val)
a074ae38
PU
216{
217 writew_relaxed(val, iomem + (xbar * 2));
218}
219
42dbdcc6 220static void ti_dra7_xbar_free(struct device *dev, void *route_data)
a074ae38 221{
42dbdcc6
PU
222 struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev);
223 struct ti_dra7_xbar_map *map = route_data;
a074ae38
PU
224
225 dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n",
226 map->xbar_in, map->xbar_out);
227
42dbdcc6 228 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val);
ec9bfa1e
PU
229 mutex_lock(&xbar->mutex);
230 clear_bit(map->xbar_out, xbar->dma_inuse);
231 mutex_unlock(&xbar->mutex);
a074ae38
PU
232 kfree(map);
233}
234
42dbdcc6
PU
235static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec,
236 struct of_dma *ofdma)
a074ae38
PU
237{
238 struct platform_device *pdev = of_find_device_by_node(ofdma->of_node);
42dbdcc6
PU
239 struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev);
240 struct ti_dra7_xbar_map *map;
a074ae38
PU
241
242 if (dma_spec->args[0] >= xbar->xbar_requests) {
243 dev_err(&pdev->dev, "Invalid XBAR request number: %d\n",
244 dma_spec->args[0]);
245 return ERR_PTR(-EINVAL);
246 }
247
248 /* The of_node_put() will be done in the core for the node */
249 dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0);
250 if (!dma_spec->np) {
251 dev_err(&pdev->dev, "Can't get DMA master\n");
252 return ERR_PTR(-EINVAL);
253 }
254
255 map = kzalloc(sizeof(*map), GFP_KERNEL);
256 if (!map) {
257 of_node_put(dma_spec->np);
258 return ERR_PTR(-ENOMEM);
259 }
260
ec9bfa1e
PU
261 mutex_lock(&xbar->mutex);
262 map->xbar_out = find_first_zero_bit(xbar->dma_inuse,
263 xbar->dma_requests);
264 mutex_unlock(&xbar->mutex);
265 if (map->xbar_out == xbar->dma_requests) {
266 dev_err(&pdev->dev, "Run out of free DMA requests\n");
267 kfree(map);
268 return ERR_PTR(-ENOMEM);
269 }
270 set_bit(map->xbar_out, xbar->dma_inuse);
271
a074ae38
PU
272 map->xbar_in = (u16)dma_spec->args[0];
273
1eb995bb 274 dma_spec->args[0] = map->xbar_out + xbar->dma_offset;
a074ae38
PU
275
276 dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n",
277 map->xbar_in, map->xbar_out);
278
42dbdcc6 279 ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in);
a074ae38
PU
280
281 return map;
282}
283
5f9367a8
PU
284#define TI_XBAR_EDMA_OFFSET 0
285#define TI_XBAR_SDMA_OFFSET 1
286static const u32 ti_dma_offset[] = {
287 [TI_XBAR_EDMA_OFFSET] = 0,
288 [TI_XBAR_SDMA_OFFSET] = 1,
289};
290
42dbdcc6 291static const struct of_device_id ti_dra7_master_match[] = {
1eb995bb
PU
292 {
293 .compatible = "ti,omap4430-sdma",
5f9367a8 294 .data = &ti_dma_offset[TI_XBAR_SDMA_OFFSET],
1eb995bb
PU
295 },
296 {
297 .compatible = "ti,edma3",
5f9367a8 298 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
1eb995bb 299 },
2adb2743
PU
300 {
301 .compatible = "ti,edma3-tpcc",
5f9367a8 302 .data = &ti_dma_offset[TI_XBAR_EDMA_OFFSET],
2adb2743 303 },
1eb995bb
PU
304 {},
305};
306
0f73f3e8
PU
307static inline void ti_dra7_xbar_reserve(int offset, int len, unsigned long *p)
308{
309 for (; len > 0; len--)
310 clear_bit(offset + (len - 1), p);
311}
312
42dbdcc6 313static int ti_dra7_xbar_probe(struct platform_device *pdev)
a074ae38
PU
314{
315 struct device_node *node = pdev->dev.of_node;
1eb995bb 316 const struct of_device_id *match;
a074ae38 317 struct device_node *dma_node;
42dbdcc6 318 struct ti_dra7_xbar_data *xbar;
0f73f3e8 319 struct property *prop;
a074ae38
PU
320 struct resource *res;
321 u32 safe_val;
e7282b66 322 int sz;
a074ae38
PU
323 void __iomem *iomem;
324 int i, ret;
325
326 if (!node)
327 return -ENODEV;
328
329 xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL);
330 if (!xbar)
331 return -ENOMEM;
332
333 dma_node = of_parse_phandle(node, "dma-masters", 0);
334 if (!dma_node) {
335 dev_err(&pdev->dev, "Can't get DMA master node\n");
336 return -ENODEV;
337 }
338
42dbdcc6 339 match = of_match_node(ti_dra7_master_match, dma_node);
1eb995bb
PU
340 if (!match) {
341 dev_err(&pdev->dev, "DMA master is not supported\n");
342 return -EINVAL;
343 }
344
a074ae38
PU
345 if (of_property_read_u32(dma_node, "dma-requests",
346 &xbar->dma_requests)) {
347 dev_info(&pdev->dev,
348 "Missing XBAR output information, using %u.\n",
42dbdcc6
PU
349 TI_DRA7_XBAR_OUTPUTS);
350 xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS;
a074ae38
PU
351 }
352 of_node_put(dma_node);
353
ec9bfa1e
PU
354 xbar->dma_inuse = devm_kcalloc(&pdev->dev,
355 BITS_TO_LONGS(xbar->dma_requests),
356 sizeof(unsigned long), GFP_KERNEL);
357 if (!xbar->dma_inuse)
358 return -ENOMEM;
359
a074ae38
PU
360 if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) {
361 dev_info(&pdev->dev,
362 "Missing XBAR input information, using %u.\n",
42dbdcc6
PU
363 TI_DRA7_XBAR_INPUTS);
364 xbar->xbar_requests = TI_DRA7_XBAR_INPUTS;
a074ae38
PU
365 }
366
367 if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val))
368 xbar->safe_val = (u16)safe_val;
369
0f73f3e8
PU
370
371 prop = of_find_property(node, "ti,reserved-dma-request-ranges", &sz);
372 if (prop) {
373 const char pname[] = "ti,reserved-dma-request-ranges";
374 u32 (*rsv_events)[2];
375 size_t nelm = sz / sizeof(*rsv_events);
376 int i;
377
378 if (!nelm)
379 return -EINVAL;
380
381 rsv_events = kcalloc(nelm, sizeof(*rsv_events), GFP_KERNEL);
382 if (!rsv_events)
383 return -ENOMEM;
384
385 ret = of_property_read_u32_array(node, pname, (u32 *)rsv_events,
386 nelm * 2);
387 if (ret)
388 return ret;
389
390 for (i = 0; i < nelm; i++) {
391 ti_dra7_xbar_reserve(rsv_events[i][0], rsv_events[i][1],
392 xbar->dma_inuse);
393 }
394 kfree(rsv_events);
395 }
396
a074ae38 397 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
a074ae38 398 iomem = devm_ioremap_resource(&pdev->dev, res);
28eb232f
AL
399 if (IS_ERR(iomem))
400 return PTR_ERR(iomem);
a074ae38
PU
401
402 xbar->iomem = iomem;
403
404 xbar->dmarouter.dev = &pdev->dev;
42dbdcc6 405 xbar->dmarouter.route_free = ti_dra7_xbar_free;
5f9367a8 406 xbar->dma_offset = *(u32 *)match->data;
a074ae38 407
ec9bfa1e 408 mutex_init(&xbar->mutex);
a074ae38
PU
409 platform_set_drvdata(pdev, xbar);
410
411 /* Reset the crossbar */
0f73f3e8
PU
412 for (i = 0; i < xbar->dma_requests; i++) {
413 if (!test_bit(i, xbar->dma_inuse))
414 ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val);
415 }
a074ae38 416
42dbdcc6 417 ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate,
a074ae38
PU
418 &xbar->dmarouter);
419 if (ret) {
420 /* Restore the defaults for the crossbar */
0f73f3e8
PU
421 for (i = 0; i < xbar->dma_requests; i++) {
422 if (!test_bit(i, xbar->dma_inuse))
423 ti_dra7_xbar_write(xbar->iomem, i, i);
424 }
a074ae38
PU
425 }
426
427 return ret;
428}
429
42dbdcc6
PU
430static int ti_dma_xbar_probe(struct platform_device *pdev)
431{
432 const struct of_device_id *match;
433 int ret;
434
435 match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node);
436 if (unlikely(!match))
437 return -EINVAL;
438
5f9367a8 439 switch (*(u32 *)match->data) {
42dbdcc6
PU
440 case TI_XBAR_DRA7:
441 ret = ti_dra7_xbar_probe(pdev);
442 break;
443 case TI_XBAR_AM335X:
444 ret = ti_am335x_xbar_probe(pdev);
445 break;
446 default:
447 dev_err(&pdev->dev, "Unsupported crossbar\n");
448 ret = -ENODEV;
449 break;
450 }
451
452 return ret;
453}
a074ae38
PU
454
455static struct platform_driver ti_dma_xbar_driver = {
456 .driver = {
457 .name = "ti-dma-crossbar",
458 .of_match_table = of_match_ptr(ti_dma_xbar_match),
459 },
460 .probe = ti_dma_xbar_probe,
461};
462
d646162b 463static int omap_dmaxbar_init(void)
a074ae38
PU
464{
465 return platform_driver_register(&ti_dma_xbar_driver);
466}
467arch_initcall(omap_dmaxbar_init);