]>
Commit | Line | Data |
---|---|---|
67a2003e SK |
1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine interface | |
3 | * | |
570d0176 | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
67a2003e SK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | /* | |
17 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | |
18 | * Copyright (C) Semihalf 2009 | |
19 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | |
20 | * Copyright (C) Alexander Popov, Promcontroller 2014 | |
21 | * | |
22 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | |
23 | * (defines, structures and comments) was taken from MPC5121 DMA driver | |
24 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | |
25 | * | |
26 | * Approved as OSADL project by a majority of OSADL members and funded | |
27 | * by OSADL membership fees in 2009; for details see www.osadl.org. | |
28 | * | |
29 | * This program is free software; you can redistribute it and/or modify it | |
30 | * under the terms of the GNU General Public License as published by the Free | |
31 | * Software Foundation; either version 2 of the License, or (at your option) | |
32 | * any later version. | |
33 | * | |
34 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
35 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
36 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
37 | * more details. | |
38 | * | |
39 | * The full GNU General Public License is included in this distribution in the | |
40 | * file called COPYING. | |
41 | */ | |
42 | ||
43 | /* Linux Foundation elects GPLv2 license only. */ | |
44 | ||
45 | #include <linux/dmaengine.h> | |
46 | #include <linux/dma-mapping.h> | |
47 | #include <linux/list.h> | |
48 | #include <linux/module.h> | |
49 | #include <linux/platform_device.h> | |
50 | #include <linux/slab.h> | |
51 | #include <linux/spinlock.h> | |
52 | #include <linux/of_dma.h> | |
53 | #include <linux/property.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/acpi.h> | |
56 | #include <linux/irq.h> | |
57 | #include <linux/atomic.h> | |
58 | #include <linux/pm_runtime.h> | |
59 | ||
60 | #include "../dmaengine.h" | |
61 | #include "hidma.h" | |
62 | ||
63 | /* | |
64 | * Default idle time is 2 seconds. This parameter can | |
65 | * be overridden by changing the following | |
66 | * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms | |
67 | * during kernel boot. | |
68 | */ | |
69 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | |
70 | #define HIDMA_ERR_INFO_SW 0xFF | |
71 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | |
72 | #define HIDMA_NR_DEFAULT_DESC 10 | |
73 | ||
74 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | |
75 | { | |
76 | return container_of(dmadev, struct hidma_dev, ddev); | |
77 | } | |
78 | ||
79 | static inline | |
80 | struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) | |
81 | { | |
82 | return container_of(_lldevp, struct hidma_dev, lldev); | |
83 | } | |
84 | ||
85 | static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) | |
86 | { | |
87 | return container_of(dmach, struct hidma_chan, chan); | |
88 | } | |
89 | ||
90 | static inline | |
91 | struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) | |
92 | { | |
93 | return container_of(t, struct hidma_desc, desc); | |
94 | } | |
95 | ||
96 | static void hidma_free(struct hidma_dev *dmadev) | |
97 | { | |
98 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
99 | } | |
100 | ||
101 | static unsigned int nr_desc_prm; | |
102 | module_param(nr_desc_prm, uint, 0644); | |
103 | MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); | |
104 | ||
105 | ||
106 | /* process completed descriptors */ | |
107 | static void hidma_process_completed(struct hidma_chan *mchan) | |
108 | { | |
109 | struct dma_device *ddev = mchan->chan.device; | |
110 | struct hidma_dev *mdma = to_hidma_dev(ddev); | |
111 | struct dma_async_tx_descriptor *desc; | |
112 | dma_cookie_t last_cookie; | |
113 | struct hidma_desc *mdesc; | |
114 | unsigned long irqflags; | |
115 | struct list_head list; | |
116 | ||
117 | INIT_LIST_HEAD(&list); | |
118 | ||
119 | /* Get all completed descriptors */ | |
120 | spin_lock_irqsave(&mchan->lock, irqflags); | |
121 | list_splice_tail_init(&mchan->completed, &list); | |
122 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
123 | ||
124 | /* Execute callbacks and run dependencies */ | |
125 | list_for_each_entry(mdesc, &list, node) { | |
126 | enum dma_status llstat; | |
127 | ||
128 | desc = &mdesc->desc; | |
129 | ||
130 | spin_lock_irqsave(&mchan->lock, irqflags); | |
131 | dma_cookie_complete(desc); | |
132 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
133 | ||
134 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | |
135 | if (desc->callback && (llstat == DMA_COMPLETE)) | |
136 | desc->callback(desc->callback_param); | |
137 | ||
138 | last_cookie = desc->cookie; | |
139 | dma_run_dependencies(desc); | |
140 | } | |
141 | ||
142 | /* Free descriptors */ | |
143 | spin_lock_irqsave(&mchan->lock, irqflags); | |
144 | list_splice_tail_init(&list, &mchan->free); | |
145 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
146 | ||
147 | } | |
148 | ||
149 | /* | |
150 | * Called once for each submitted descriptor. | |
151 | * PM is locked once for each descriptor that is currently | |
152 | * in execution. | |
153 | */ | |
154 | static void hidma_callback(void *data) | |
155 | { | |
156 | struct hidma_desc *mdesc = data; | |
157 | struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); | |
158 | struct dma_device *ddev = mchan->chan.device; | |
159 | struct hidma_dev *dmadev = to_hidma_dev(ddev); | |
160 | unsigned long irqflags; | |
161 | bool queued = false; | |
162 | ||
163 | spin_lock_irqsave(&mchan->lock, irqflags); | |
164 | if (mdesc->node.next) { | |
165 | /* Delete from the active list, add to completed list */ | |
166 | list_move_tail(&mdesc->node, &mchan->completed); | |
167 | queued = true; | |
168 | ||
169 | /* calculate the next running descriptor */ | |
170 | mchan->running = list_first_entry(&mchan->active, | |
171 | struct hidma_desc, node); | |
172 | } | |
173 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
174 | ||
175 | hidma_process_completed(mchan); | |
176 | ||
177 | if (queued) { | |
178 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
179 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
180 | } | |
181 | } | |
182 | ||
183 | static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) | |
184 | { | |
185 | struct hidma_chan *mchan; | |
186 | struct dma_device *ddev; | |
187 | ||
188 | mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); | |
189 | if (!mchan) | |
190 | return -ENOMEM; | |
191 | ||
192 | ddev = &dmadev->ddev; | |
193 | mchan->dma_sig = dma_sig; | |
194 | mchan->dmadev = dmadev; | |
195 | mchan->chan.device = ddev; | |
196 | dma_cookie_init(&mchan->chan); | |
197 | ||
198 | INIT_LIST_HEAD(&mchan->free); | |
199 | INIT_LIST_HEAD(&mchan->prepared); | |
200 | INIT_LIST_HEAD(&mchan->active); | |
201 | INIT_LIST_HEAD(&mchan->completed); | |
202 | ||
203 | spin_lock_init(&mchan->lock); | |
204 | list_add_tail(&mchan->chan.device_node, &ddev->channels); | |
205 | dmadev->ddev.chancnt++; | |
206 | return 0; | |
207 | } | |
208 | ||
209 | static void hidma_issue_task(unsigned long arg) | |
210 | { | |
211 | struct hidma_dev *dmadev = (struct hidma_dev *)arg; | |
212 | ||
213 | pm_runtime_get_sync(dmadev->ddev.dev); | |
214 | hidma_ll_start(dmadev->lldev); | |
215 | } | |
216 | ||
217 | static void hidma_issue_pending(struct dma_chan *dmach) | |
218 | { | |
219 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
220 | struct hidma_dev *dmadev = mchan->dmadev; | |
221 | unsigned long flags; | |
222 | int status; | |
223 | ||
224 | spin_lock_irqsave(&mchan->lock, flags); | |
225 | if (!mchan->running) { | |
226 | struct hidma_desc *desc = list_first_entry(&mchan->active, | |
227 | struct hidma_desc, | |
228 | node); | |
229 | mchan->running = desc; | |
230 | } | |
231 | spin_unlock_irqrestore(&mchan->lock, flags); | |
232 | ||
233 | /* PM will be released in hidma_callback function. */ | |
234 | status = pm_runtime_get(dmadev->ddev.dev); | |
235 | if (status < 0) | |
236 | tasklet_schedule(&dmadev->task); | |
237 | else | |
238 | hidma_ll_start(dmadev->lldev); | |
239 | } | |
240 | ||
241 | static enum dma_status hidma_tx_status(struct dma_chan *dmach, | |
242 | dma_cookie_t cookie, | |
243 | struct dma_tx_state *txstate) | |
244 | { | |
245 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
246 | enum dma_status ret; | |
247 | ||
248 | ret = dma_cookie_status(dmach, cookie, txstate); | |
249 | if (ret == DMA_COMPLETE) | |
250 | return ret; | |
251 | ||
252 | if (mchan->paused && (ret == DMA_IN_PROGRESS)) { | |
253 | unsigned long flags; | |
254 | dma_cookie_t runcookie; | |
255 | ||
256 | spin_lock_irqsave(&mchan->lock, flags); | |
257 | if (mchan->running) | |
258 | runcookie = mchan->running->desc.cookie; | |
259 | else | |
260 | runcookie = -EINVAL; | |
261 | ||
262 | if (runcookie == cookie) | |
263 | ret = DMA_PAUSED; | |
264 | ||
265 | spin_unlock_irqrestore(&mchan->lock, flags); | |
266 | } | |
267 | ||
268 | return ret; | |
269 | } | |
270 | ||
271 | /* | |
272 | * Submit descriptor to hardware. | |
273 | * Lock the PM for each descriptor we are sending. | |
274 | */ | |
275 | static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) | |
276 | { | |
277 | struct hidma_chan *mchan = to_hidma_chan(txd->chan); | |
278 | struct hidma_dev *dmadev = mchan->dmadev; | |
279 | struct hidma_desc *mdesc; | |
280 | unsigned long irqflags; | |
281 | dma_cookie_t cookie; | |
282 | ||
283 | pm_runtime_get_sync(dmadev->ddev.dev); | |
284 | if (!hidma_ll_isenabled(dmadev->lldev)) { | |
285 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
286 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
287 | return -ENODEV; | |
288 | } | |
289 | ||
290 | mdesc = container_of(txd, struct hidma_desc, desc); | |
291 | spin_lock_irqsave(&mchan->lock, irqflags); | |
292 | ||
293 | /* Move descriptor to active */ | |
294 | list_move_tail(&mdesc->node, &mchan->active); | |
295 | ||
296 | /* Update cookie */ | |
297 | cookie = dma_cookie_assign(txd); | |
298 | ||
299 | hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); | |
300 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
301 | ||
302 | return cookie; | |
303 | } | |
304 | ||
305 | static int hidma_alloc_chan_resources(struct dma_chan *dmach) | |
306 | { | |
307 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
308 | struct hidma_dev *dmadev = mchan->dmadev; | |
309 | struct hidma_desc *mdesc, *tmp; | |
310 | unsigned long irqflags; | |
311 | LIST_HEAD(descs); | |
312 | unsigned int i; | |
313 | int rc = 0; | |
314 | ||
315 | if (mchan->allocated) | |
316 | return 0; | |
317 | ||
318 | /* Alloc descriptors for this channel */ | |
319 | for (i = 0; i < dmadev->nr_descriptors; i++) { | |
320 | mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); | |
321 | if (!mdesc) { | |
322 | rc = -ENOMEM; | |
323 | break; | |
324 | } | |
325 | dma_async_tx_descriptor_init(&mdesc->desc, dmach); | |
326 | mdesc->desc.tx_submit = hidma_tx_submit; | |
327 | ||
328 | rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, | |
329 | "DMA engine", hidma_callback, mdesc, | |
330 | &mdesc->tre_ch); | |
331 | if (rc) { | |
332 | dev_err(dmach->device->dev, | |
333 | "channel alloc failed at %u\n", i); | |
334 | kfree(mdesc); | |
335 | break; | |
336 | } | |
337 | list_add_tail(&mdesc->node, &descs); | |
338 | } | |
339 | ||
340 | if (rc) { | |
341 | /* return the allocated descriptors */ | |
342 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | |
343 | hidma_ll_free(dmadev->lldev, mdesc->tre_ch); | |
344 | kfree(mdesc); | |
345 | } | |
346 | return rc; | |
347 | } | |
348 | ||
349 | spin_lock_irqsave(&mchan->lock, irqflags); | |
350 | list_splice_tail_init(&descs, &mchan->free); | |
351 | mchan->allocated = true; | |
352 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
353 | return 1; | |
354 | } | |
355 | ||
356 | static struct dma_async_tx_descriptor * | |
357 | hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | |
358 | size_t len, unsigned long flags) | |
359 | { | |
360 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
361 | struct hidma_desc *mdesc = NULL; | |
362 | struct hidma_dev *mdma = mchan->dmadev; | |
363 | unsigned long irqflags; | |
364 | ||
365 | /* Get free descriptor */ | |
366 | spin_lock_irqsave(&mchan->lock, irqflags); | |
367 | if (!list_empty(&mchan->free)) { | |
368 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | |
369 | list_del(&mdesc->node); | |
370 | } | |
371 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
372 | ||
373 | if (!mdesc) | |
374 | return NULL; | |
375 | ||
376 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | |
377 | src, dest, len, flags); | |
378 | ||
379 | /* Place descriptor in prepared list */ | |
380 | spin_lock_irqsave(&mchan->lock, irqflags); | |
381 | list_add_tail(&mdesc->node, &mchan->prepared); | |
382 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
383 | ||
384 | return &mdesc->desc; | |
385 | } | |
386 | ||
387 | static int hidma_terminate_channel(struct dma_chan *chan) | |
388 | { | |
389 | struct hidma_chan *mchan = to_hidma_chan(chan); | |
390 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | |
391 | struct hidma_desc *tmp, *mdesc; | |
392 | unsigned long irqflags; | |
393 | LIST_HEAD(list); | |
394 | int rc; | |
395 | ||
396 | pm_runtime_get_sync(dmadev->ddev.dev); | |
397 | /* give completed requests a chance to finish */ | |
398 | hidma_process_completed(mchan); | |
399 | ||
400 | spin_lock_irqsave(&mchan->lock, irqflags); | |
401 | list_splice_init(&mchan->active, &list); | |
402 | list_splice_init(&mchan->prepared, &list); | |
403 | list_splice_init(&mchan->completed, &list); | |
404 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
405 | ||
406 | /* this suspends the existing transfer */ | |
d1615ca2 | 407 | rc = hidma_ll_disable(dmadev->lldev); |
67a2003e SK |
408 | if (rc) { |
409 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | |
410 | goto out; | |
411 | } | |
412 | ||
413 | /* return all user requests */ | |
414 | list_for_each_entry_safe(mdesc, tmp, &list, node) { | |
415 | struct dma_async_tx_descriptor *txd = &mdesc->desc; | |
416 | dma_async_tx_callback callback = mdesc->desc.callback; | |
417 | void *param = mdesc->desc.callback_param; | |
418 | ||
419 | dma_descriptor_unmap(txd); | |
420 | ||
421 | if (callback) | |
422 | callback(param); | |
423 | ||
424 | dma_run_dependencies(txd); | |
425 | ||
426 | /* move myself to free_list */ | |
427 | list_move(&mdesc->node, &mchan->free); | |
428 | } | |
429 | ||
d1615ca2 | 430 | rc = hidma_ll_enable(dmadev->lldev); |
67a2003e SK |
431 | out: |
432 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
433 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
434 | return rc; | |
435 | } | |
436 | ||
437 | static int hidma_terminate_all(struct dma_chan *chan) | |
438 | { | |
439 | struct hidma_chan *mchan = to_hidma_chan(chan); | |
440 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | |
441 | int rc; | |
442 | ||
443 | rc = hidma_terminate_channel(chan); | |
444 | if (rc) | |
445 | return rc; | |
446 | ||
447 | /* reinitialize the hardware */ | |
448 | pm_runtime_get_sync(dmadev->ddev.dev); | |
449 | rc = hidma_ll_setup(dmadev->lldev); | |
450 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
451 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
452 | return rc; | |
453 | } | |
454 | ||
455 | static void hidma_free_chan_resources(struct dma_chan *dmach) | |
456 | { | |
457 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
458 | struct hidma_dev *mdma = mchan->dmadev; | |
459 | struct hidma_desc *mdesc, *tmp; | |
460 | unsigned long irqflags; | |
461 | LIST_HEAD(descs); | |
462 | ||
463 | /* terminate running transactions and free descriptors */ | |
464 | hidma_terminate_channel(dmach); | |
465 | ||
466 | spin_lock_irqsave(&mchan->lock, irqflags); | |
467 | ||
468 | /* Move data */ | |
469 | list_splice_tail_init(&mchan->free, &descs); | |
470 | ||
471 | /* Free descriptors */ | |
472 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | |
473 | hidma_ll_free(mdma->lldev, mdesc->tre_ch); | |
474 | list_del(&mdesc->node); | |
475 | kfree(mdesc); | |
476 | } | |
477 | ||
478 | mchan->allocated = 0; | |
479 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
480 | } | |
481 | ||
482 | static int hidma_pause(struct dma_chan *chan) | |
483 | { | |
484 | struct hidma_chan *mchan; | |
485 | struct hidma_dev *dmadev; | |
486 | ||
487 | mchan = to_hidma_chan(chan); | |
488 | dmadev = to_hidma_dev(mchan->chan.device); | |
489 | if (!mchan->paused) { | |
490 | pm_runtime_get_sync(dmadev->ddev.dev); | |
d1615ca2 | 491 | if (hidma_ll_disable(dmadev->lldev)) |
67a2003e SK |
492 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); |
493 | mchan->paused = true; | |
494 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
495 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
496 | } | |
497 | return 0; | |
498 | } | |
499 | ||
500 | static int hidma_resume(struct dma_chan *chan) | |
501 | { | |
502 | struct hidma_chan *mchan; | |
503 | struct hidma_dev *dmadev; | |
504 | int rc = 0; | |
505 | ||
506 | mchan = to_hidma_chan(chan); | |
507 | dmadev = to_hidma_dev(mchan->chan.device); | |
508 | if (mchan->paused) { | |
509 | pm_runtime_get_sync(dmadev->ddev.dev); | |
d1615ca2 | 510 | rc = hidma_ll_enable(dmadev->lldev); |
67a2003e SK |
511 | if (!rc) |
512 | mchan->paused = false; | |
513 | else | |
514 | dev_err(dmadev->ddev.dev, | |
515 | "failed to resume the channel"); | |
516 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
517 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
518 | } | |
519 | return rc; | |
520 | } | |
521 | ||
522 | static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | |
523 | { | |
524 | struct hidma_lldev *lldev = arg; | |
525 | ||
526 | /* | |
527 | * All interrupts are request driven. | |
528 | * HW doesn't send an interrupt by itself. | |
529 | */ | |
530 | return hidma_ll_inthandler(chirq, lldev); | |
531 | } | |
532 | ||
42d236f8 SK |
533 | static ssize_t hidma_show_values(struct device *dev, |
534 | struct device_attribute *attr, char *buf) | |
535 | { | |
536 | struct platform_device *pdev = to_platform_device(dev); | |
537 | struct hidma_dev *mdev = platform_get_drvdata(pdev); | |
538 | ||
539 | buf[0] = 0; | |
540 | ||
541 | if (strcmp(attr->attr.name, "chid") == 0) | |
542 | sprintf(buf, "%d\n", mdev->chidx); | |
543 | ||
544 | return strlen(buf); | |
545 | } | |
546 | ||
547 | static int hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, | |
548 | int mode) | |
549 | { | |
550 | struct device_attribute *attrs; | |
551 | char *name_copy; | |
552 | ||
553 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), | |
554 | GFP_KERNEL); | |
555 | if (!attrs) | |
556 | return -ENOMEM; | |
557 | ||
558 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); | |
559 | if (!name_copy) | |
560 | return -ENOMEM; | |
561 | ||
562 | attrs->attr.name = name_copy; | |
563 | attrs->attr.mode = mode; | |
564 | attrs->show = hidma_show_values; | |
565 | sysfs_attr_init(&attrs->attr); | |
566 | ||
567 | return device_create_file(dev->ddev.dev, attrs); | |
568 | } | |
569 | ||
67a2003e SK |
570 | static int hidma_probe(struct platform_device *pdev) |
571 | { | |
572 | struct hidma_dev *dmadev; | |
573 | struct resource *trca_resource; | |
574 | struct resource *evca_resource; | |
575 | int chirq; | |
576 | void __iomem *evca; | |
577 | void __iomem *trca; | |
578 | int rc; | |
579 | ||
580 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | |
581 | pm_runtime_use_autosuspend(&pdev->dev); | |
582 | pm_runtime_set_active(&pdev->dev); | |
583 | pm_runtime_enable(&pdev->dev); | |
584 | ||
585 | trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
586 | trca = devm_ioremap_resource(&pdev->dev, trca_resource); | |
587 | if (IS_ERR(trca)) { | |
588 | rc = -ENOMEM; | |
589 | goto bailout; | |
590 | } | |
591 | ||
592 | evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
593 | evca = devm_ioremap_resource(&pdev->dev, evca_resource); | |
594 | if (IS_ERR(evca)) { | |
595 | rc = -ENOMEM; | |
596 | goto bailout; | |
597 | } | |
598 | ||
599 | /* | |
600 | * This driver only handles the channel IRQs. | |
601 | * Common IRQ is handled by the management driver. | |
602 | */ | |
603 | chirq = platform_get_irq(pdev, 0); | |
604 | if (chirq < 0) { | |
605 | rc = -ENODEV; | |
606 | goto bailout; | |
607 | } | |
608 | ||
609 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | |
610 | if (!dmadev) { | |
611 | rc = -ENOMEM; | |
612 | goto bailout; | |
613 | } | |
614 | ||
615 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
616 | spin_lock_init(&dmadev->lock); | |
617 | dmadev->ddev.dev = &pdev->dev; | |
618 | pm_runtime_get_sync(dmadev->ddev.dev); | |
619 | ||
620 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | |
621 | if (WARN_ON(!pdev->dev.dma_mask)) { | |
622 | rc = -ENXIO; | |
623 | goto dmafree; | |
624 | } | |
625 | ||
626 | dmadev->dev_evca = evca; | |
627 | dmadev->evca_resource = evca_resource; | |
628 | dmadev->dev_trca = trca; | |
629 | dmadev->trca_resource = trca_resource; | |
630 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | |
631 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | |
632 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | |
633 | dmadev->ddev.device_tx_status = hidma_tx_status; | |
634 | dmadev->ddev.device_issue_pending = hidma_issue_pending; | |
635 | dmadev->ddev.device_pause = hidma_pause; | |
636 | dmadev->ddev.device_resume = hidma_resume; | |
637 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | |
638 | dmadev->ddev.copy_align = 8; | |
639 | ||
640 | device_property_read_u32(&pdev->dev, "desc-count", | |
641 | &dmadev->nr_descriptors); | |
642 | ||
643 | if (!dmadev->nr_descriptors && nr_desc_prm) | |
644 | dmadev->nr_descriptors = nr_desc_prm; | |
645 | ||
646 | if (!dmadev->nr_descriptors) | |
647 | dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; | |
648 | ||
649 | dmadev->chidx = readl(dmadev->dev_trca + 0x28); | |
650 | ||
651 | /* Set DMA mask to 64 bits. */ | |
652 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
653 | if (rc) { | |
654 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | |
655 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
656 | if (rc) | |
657 | goto dmafree; | |
658 | } | |
659 | ||
660 | dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, | |
661 | dmadev->nr_descriptors, dmadev->dev_trca, | |
662 | dmadev->dev_evca, dmadev->chidx); | |
663 | if (!dmadev->lldev) { | |
664 | rc = -EPROBE_DEFER; | |
665 | goto dmafree; | |
666 | } | |
667 | ||
668 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0, | |
669 | "qcom-hidma", dmadev->lldev); | |
670 | if (rc) | |
671 | goto uninit; | |
672 | ||
673 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
674 | rc = hidma_chan_init(dmadev, 0); | |
675 | if (rc) | |
676 | goto uninit; | |
677 | ||
678 | rc = dma_async_device_register(&dmadev->ddev); | |
679 | if (rc) | |
680 | goto uninit; | |
681 | ||
682 | dmadev->irq = chirq; | |
683 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | |
570d0176 | 684 | hidma_debug_init(dmadev); |
42d236f8 | 685 | hidma_create_sysfs_entry(dmadev, "chid", S_IRUGO); |
67a2003e SK |
686 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); |
687 | platform_set_drvdata(pdev, dmadev); | |
688 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
689 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
690 | return 0; | |
691 | ||
692 | uninit: | |
570d0176 | 693 | hidma_debug_uninit(dmadev); |
67a2003e SK |
694 | hidma_ll_uninit(dmadev->lldev); |
695 | dmafree: | |
696 | if (dmadev) | |
697 | hidma_free(dmadev); | |
698 | bailout: | |
699 | pm_runtime_put_sync(&pdev->dev); | |
700 | pm_runtime_disable(&pdev->dev); | |
701 | return rc; | |
702 | } | |
703 | ||
704 | static int hidma_remove(struct platform_device *pdev) | |
705 | { | |
706 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | |
707 | ||
708 | pm_runtime_get_sync(dmadev->ddev.dev); | |
709 | dma_async_device_unregister(&dmadev->ddev); | |
710 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | |
570d0176 | 711 | hidma_debug_uninit(dmadev); |
67a2003e SK |
712 | hidma_ll_uninit(dmadev->lldev); |
713 | hidma_free(dmadev); | |
714 | ||
715 | dev_info(&pdev->dev, "HI-DMA engine removed\n"); | |
716 | pm_runtime_put_sync_suspend(&pdev->dev); | |
717 | pm_runtime_disable(&pdev->dev); | |
718 | ||
719 | return 0; | |
720 | } | |
721 | ||
722 | #if IS_ENABLED(CONFIG_ACPI) | |
723 | static const struct acpi_device_id hidma_acpi_ids[] = { | |
724 | {"QCOM8061"}, | |
725 | {}, | |
726 | }; | |
727 | #endif | |
728 | ||
729 | static const struct of_device_id hidma_match[] = { | |
730 | {.compatible = "qcom,hidma-1.0",}, | |
731 | {}, | |
732 | }; | |
67a2003e SK |
733 | MODULE_DEVICE_TABLE(of, hidma_match); |
734 | ||
735 | static struct platform_driver hidma_driver = { | |
736 | .probe = hidma_probe, | |
737 | .remove = hidma_remove, | |
738 | .driver = { | |
739 | .name = "hidma", | |
740 | .of_match_table = hidma_match, | |
741 | .acpi_match_table = ACPI_PTR(hidma_acpi_ids), | |
742 | }, | |
743 | }; | |
744 | ||
745 | module_platform_driver(hidma_driver); | |
746 | MODULE_LICENSE("GPL v2"); |