]>
Commit | Line | Data |
---|---|---|
67a2003e SK |
1 | /* |
2 | * Qualcomm Technologies HIDMA DMA engine interface | |
3 | * | |
570d0176 | 4 | * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved. |
67a2003e SK |
5 | * |
6 | * This program is free software; you can redistribute it and/or modify | |
7 | * it under the terms of the GNU General Public License version 2 and | |
8 | * only version 2 as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, | |
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
13 | * GNU General Public License for more details. | |
14 | */ | |
15 | ||
16 | /* | |
17 | * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. | |
18 | * Copyright (C) Semihalf 2009 | |
19 | * Copyright (C) Ilya Yanok, Emcraft Systems 2010 | |
20 | * Copyright (C) Alexander Popov, Promcontroller 2014 | |
21 | * | |
22 | * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description | |
23 | * (defines, structures and comments) was taken from MPC5121 DMA driver | |
24 | * written by Hongjun Chen <hong-jun.chen@freescale.com>. | |
25 | * | |
26 | * Approved as OSADL project by a majority of OSADL members and funded | |
27 | * by OSADL membership fees in 2009; for details see www.osadl.org. | |
28 | * | |
29 | * This program is free software; you can redistribute it and/or modify it | |
30 | * under the terms of the GNU General Public License as published by the Free | |
31 | * Software Foundation; either version 2 of the License, or (at your option) | |
32 | * any later version. | |
33 | * | |
34 | * This program is distributed in the hope that it will be useful, but WITHOUT | |
35 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
36 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
37 | * more details. | |
38 | * | |
39 | * The full GNU General Public License is included in this distribution in the | |
40 | * file called COPYING. | |
41 | */ | |
42 | ||
43 | /* Linux Foundation elects GPLv2 license only. */ | |
44 | ||
45 | #include <linux/dmaengine.h> | |
46 | #include <linux/dma-mapping.h> | |
47 | #include <linux/list.h> | |
48 | #include <linux/module.h> | |
49 | #include <linux/platform_device.h> | |
50 | #include <linux/slab.h> | |
51 | #include <linux/spinlock.h> | |
52 | #include <linux/of_dma.h> | |
53 | #include <linux/property.h> | |
54 | #include <linux/delay.h> | |
55 | #include <linux/acpi.h> | |
56 | #include <linux/irq.h> | |
57 | #include <linux/atomic.h> | |
58 | #include <linux/pm_runtime.h> | |
1c0e3e82 | 59 | #include <linux/msi.h> |
67a2003e SK |
60 | |
61 | #include "../dmaengine.h" | |
62 | #include "hidma.h" | |
63 | ||
64 | /* | |
65 | * Default idle time is 2 seconds. This parameter can | |
66 | * be overridden by changing the following | |
67 | * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms | |
68 | * during kernel boot. | |
69 | */ | |
70 | #define HIDMA_AUTOSUSPEND_TIMEOUT 2000 | |
71 | #define HIDMA_ERR_INFO_SW 0xFF | |
72 | #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0 | |
73 | #define HIDMA_NR_DEFAULT_DESC 10 | |
1c0e3e82 | 74 | #define HIDMA_MSI_INTS 11 |
67a2003e SK |
75 | |
76 | static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev) | |
77 | { | |
78 | return container_of(dmadev, struct hidma_dev, ddev); | |
79 | } | |
80 | ||
81 | static inline | |
82 | struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp) | |
83 | { | |
84 | return container_of(_lldevp, struct hidma_dev, lldev); | |
85 | } | |
86 | ||
87 | static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach) | |
88 | { | |
89 | return container_of(dmach, struct hidma_chan, chan); | |
90 | } | |
91 | ||
92 | static inline | |
93 | struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t) | |
94 | { | |
95 | return container_of(t, struct hidma_desc, desc); | |
96 | } | |
97 | ||
98 | static void hidma_free(struct hidma_dev *dmadev) | |
99 | { | |
100 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
101 | } | |
102 | ||
103 | static unsigned int nr_desc_prm; | |
104 | module_param(nr_desc_prm, uint, 0644); | |
105 | MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)"); | |
106 | ||
107 | ||
108 | /* process completed descriptors */ | |
109 | static void hidma_process_completed(struct hidma_chan *mchan) | |
110 | { | |
111 | struct dma_device *ddev = mchan->chan.device; | |
112 | struct hidma_dev *mdma = to_hidma_dev(ddev); | |
113 | struct dma_async_tx_descriptor *desc; | |
114 | dma_cookie_t last_cookie; | |
115 | struct hidma_desc *mdesc; | |
8a31f8b5 | 116 | struct hidma_desc *next; |
67a2003e SK |
117 | unsigned long irqflags; |
118 | struct list_head list; | |
119 | ||
120 | INIT_LIST_HEAD(&list); | |
121 | ||
122 | /* Get all completed descriptors */ | |
123 | spin_lock_irqsave(&mchan->lock, irqflags); | |
124 | list_splice_tail_init(&mchan->completed, &list); | |
125 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
126 | ||
127 | /* Execute callbacks and run dependencies */ | |
8a31f8b5 | 128 | list_for_each_entry_safe(mdesc, next, &list, node) { |
67a2003e | 129 | enum dma_status llstat; |
8a31f8b5 | 130 | struct dmaengine_desc_callback cb; |
55c370e5 | 131 | struct dmaengine_result result; |
67a2003e SK |
132 | |
133 | desc = &mdesc->desc; | |
793ae66c | 134 | last_cookie = desc->cookie; |
67a2003e SK |
135 | |
136 | spin_lock_irqsave(&mchan->lock, irqflags); | |
137 | dma_cookie_complete(desc); | |
138 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
139 | ||
140 | llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch); | |
8a31f8b5 | 141 | dmaengine_desc_get_callback(desc, &cb); |
67a2003e | 142 | |
67a2003e | 143 | dma_run_dependencies(desc); |
67a2003e | 144 | |
8a31f8b5 SK |
145 | spin_lock_irqsave(&mchan->lock, irqflags); |
146 | list_move(&mdesc->node, &mchan->free); | |
67a2003e | 147 | |
793ae66c SK |
148 | if (llstat == DMA_COMPLETE) { |
149 | mchan->last_success = last_cookie; | |
55c370e5 | 150 | result.result = DMA_TRANS_NOERROR; |
793ae66c | 151 | } else |
55c370e5 SK |
152 | result.result = DMA_TRANS_ABORTED; |
153 | ||
154 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
155 | ||
156 | dmaengine_desc_callback_invoke(&cb, &result); | |
8a31f8b5 | 157 | } |
67a2003e SK |
158 | } |
159 | ||
160 | /* | |
161 | * Called once for each submitted descriptor. | |
162 | * PM is locked once for each descriptor that is currently | |
163 | * in execution. | |
164 | */ | |
165 | static void hidma_callback(void *data) | |
166 | { | |
167 | struct hidma_desc *mdesc = data; | |
168 | struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan); | |
169 | struct dma_device *ddev = mchan->chan.device; | |
170 | struct hidma_dev *dmadev = to_hidma_dev(ddev); | |
171 | unsigned long irqflags; | |
172 | bool queued = false; | |
173 | ||
174 | spin_lock_irqsave(&mchan->lock, irqflags); | |
175 | if (mdesc->node.next) { | |
176 | /* Delete from the active list, add to completed list */ | |
177 | list_move_tail(&mdesc->node, &mchan->completed); | |
178 | queued = true; | |
179 | ||
180 | /* calculate the next running descriptor */ | |
181 | mchan->running = list_first_entry(&mchan->active, | |
182 | struct hidma_desc, node); | |
183 | } | |
184 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
185 | ||
186 | hidma_process_completed(mchan); | |
187 | ||
188 | if (queued) { | |
189 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
190 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
191 | } | |
192 | } | |
193 | ||
194 | static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig) | |
195 | { | |
196 | struct hidma_chan *mchan; | |
197 | struct dma_device *ddev; | |
198 | ||
199 | mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL); | |
200 | if (!mchan) | |
201 | return -ENOMEM; | |
202 | ||
203 | ddev = &dmadev->ddev; | |
204 | mchan->dma_sig = dma_sig; | |
205 | mchan->dmadev = dmadev; | |
206 | mchan->chan.device = ddev; | |
207 | dma_cookie_init(&mchan->chan); | |
208 | ||
209 | INIT_LIST_HEAD(&mchan->free); | |
210 | INIT_LIST_HEAD(&mchan->prepared); | |
211 | INIT_LIST_HEAD(&mchan->active); | |
212 | INIT_LIST_HEAD(&mchan->completed); | |
213 | ||
214 | spin_lock_init(&mchan->lock); | |
215 | list_add_tail(&mchan->chan.device_node, &ddev->channels); | |
216 | dmadev->ddev.chancnt++; | |
217 | return 0; | |
218 | } | |
219 | ||
220 | static void hidma_issue_task(unsigned long arg) | |
221 | { | |
222 | struct hidma_dev *dmadev = (struct hidma_dev *)arg; | |
223 | ||
224 | pm_runtime_get_sync(dmadev->ddev.dev); | |
225 | hidma_ll_start(dmadev->lldev); | |
226 | } | |
227 | ||
228 | static void hidma_issue_pending(struct dma_chan *dmach) | |
229 | { | |
230 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
231 | struct hidma_dev *dmadev = mchan->dmadev; | |
232 | unsigned long flags; | |
233 | int status; | |
234 | ||
235 | spin_lock_irqsave(&mchan->lock, flags); | |
236 | if (!mchan->running) { | |
237 | struct hidma_desc *desc = list_first_entry(&mchan->active, | |
238 | struct hidma_desc, | |
239 | node); | |
240 | mchan->running = desc; | |
241 | } | |
242 | spin_unlock_irqrestore(&mchan->lock, flags); | |
243 | ||
244 | /* PM will be released in hidma_callback function. */ | |
245 | status = pm_runtime_get(dmadev->ddev.dev); | |
246 | if (status < 0) | |
247 | tasklet_schedule(&dmadev->task); | |
248 | else | |
249 | hidma_ll_start(dmadev->lldev); | |
250 | } | |
251 | ||
793ae66c SK |
252 | static inline bool hidma_txn_is_success(dma_cookie_t cookie, |
253 | dma_cookie_t last_success, dma_cookie_t last_used) | |
254 | { | |
255 | if (last_success <= last_used) { | |
256 | if ((cookie <= last_success) || (cookie > last_used)) | |
257 | return true; | |
258 | } else { | |
259 | if ((cookie <= last_success) && (cookie > last_used)) | |
260 | return true; | |
261 | } | |
262 | return false; | |
263 | } | |
264 | ||
67a2003e SK |
265 | static enum dma_status hidma_tx_status(struct dma_chan *dmach, |
266 | dma_cookie_t cookie, | |
267 | struct dma_tx_state *txstate) | |
268 | { | |
269 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
270 | enum dma_status ret; | |
271 | ||
272 | ret = dma_cookie_status(dmach, cookie, txstate); | |
793ae66c SK |
273 | if (ret == DMA_COMPLETE) { |
274 | bool is_success; | |
275 | ||
276 | is_success = hidma_txn_is_success(cookie, mchan->last_success, | |
277 | dmach->cookie); | |
278 | return is_success ? ret : DMA_ERROR; | |
279 | } | |
67a2003e SK |
280 | |
281 | if (mchan->paused && (ret == DMA_IN_PROGRESS)) { | |
282 | unsigned long flags; | |
283 | dma_cookie_t runcookie; | |
284 | ||
285 | spin_lock_irqsave(&mchan->lock, flags); | |
286 | if (mchan->running) | |
287 | runcookie = mchan->running->desc.cookie; | |
288 | else | |
289 | runcookie = -EINVAL; | |
290 | ||
291 | if (runcookie == cookie) | |
292 | ret = DMA_PAUSED; | |
293 | ||
294 | spin_unlock_irqrestore(&mchan->lock, flags); | |
295 | } | |
296 | ||
297 | return ret; | |
298 | } | |
299 | ||
300 | /* | |
301 | * Submit descriptor to hardware. | |
302 | * Lock the PM for each descriptor we are sending. | |
303 | */ | |
304 | static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd) | |
305 | { | |
306 | struct hidma_chan *mchan = to_hidma_chan(txd->chan); | |
307 | struct hidma_dev *dmadev = mchan->dmadev; | |
308 | struct hidma_desc *mdesc; | |
309 | unsigned long irqflags; | |
310 | dma_cookie_t cookie; | |
311 | ||
312 | pm_runtime_get_sync(dmadev->ddev.dev); | |
313 | if (!hidma_ll_isenabled(dmadev->lldev)) { | |
314 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
315 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
316 | return -ENODEV; | |
317 | } | |
318 | ||
319 | mdesc = container_of(txd, struct hidma_desc, desc); | |
320 | spin_lock_irqsave(&mchan->lock, irqflags); | |
321 | ||
322 | /* Move descriptor to active */ | |
323 | list_move_tail(&mdesc->node, &mchan->active); | |
324 | ||
325 | /* Update cookie */ | |
326 | cookie = dma_cookie_assign(txd); | |
327 | ||
328 | hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch); | |
329 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
330 | ||
331 | return cookie; | |
332 | } | |
333 | ||
334 | static int hidma_alloc_chan_resources(struct dma_chan *dmach) | |
335 | { | |
336 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
337 | struct hidma_dev *dmadev = mchan->dmadev; | |
338 | struct hidma_desc *mdesc, *tmp; | |
339 | unsigned long irqflags; | |
340 | LIST_HEAD(descs); | |
341 | unsigned int i; | |
342 | int rc = 0; | |
343 | ||
344 | if (mchan->allocated) | |
345 | return 0; | |
346 | ||
347 | /* Alloc descriptors for this channel */ | |
348 | for (i = 0; i < dmadev->nr_descriptors; i++) { | |
349 | mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT); | |
350 | if (!mdesc) { | |
351 | rc = -ENOMEM; | |
352 | break; | |
353 | } | |
354 | dma_async_tx_descriptor_init(&mdesc->desc, dmach); | |
355 | mdesc->desc.tx_submit = hidma_tx_submit; | |
356 | ||
357 | rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig, | |
358 | "DMA engine", hidma_callback, mdesc, | |
359 | &mdesc->tre_ch); | |
360 | if (rc) { | |
361 | dev_err(dmach->device->dev, | |
362 | "channel alloc failed at %u\n", i); | |
363 | kfree(mdesc); | |
364 | break; | |
365 | } | |
366 | list_add_tail(&mdesc->node, &descs); | |
367 | } | |
368 | ||
369 | if (rc) { | |
370 | /* return the allocated descriptors */ | |
371 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | |
372 | hidma_ll_free(dmadev->lldev, mdesc->tre_ch); | |
373 | kfree(mdesc); | |
374 | } | |
375 | return rc; | |
376 | } | |
377 | ||
378 | spin_lock_irqsave(&mchan->lock, irqflags); | |
379 | list_splice_tail_init(&descs, &mchan->free); | |
380 | mchan->allocated = true; | |
381 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
382 | return 1; | |
383 | } | |
384 | ||
385 | static struct dma_async_tx_descriptor * | |
386 | hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src, | |
387 | size_t len, unsigned long flags) | |
388 | { | |
389 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
390 | struct hidma_desc *mdesc = NULL; | |
391 | struct hidma_dev *mdma = mchan->dmadev; | |
392 | unsigned long irqflags; | |
393 | ||
394 | /* Get free descriptor */ | |
395 | spin_lock_irqsave(&mchan->lock, irqflags); | |
396 | if (!list_empty(&mchan->free)) { | |
397 | mdesc = list_first_entry(&mchan->free, struct hidma_desc, node); | |
398 | list_del(&mdesc->node); | |
399 | } | |
400 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
401 | ||
402 | if (!mdesc) | |
403 | return NULL; | |
404 | ||
405 | hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, | |
406 | src, dest, len, flags); | |
407 | ||
408 | /* Place descriptor in prepared list */ | |
409 | spin_lock_irqsave(&mchan->lock, irqflags); | |
410 | list_add_tail(&mdesc->node, &mchan->prepared); | |
411 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
412 | ||
413 | return &mdesc->desc; | |
414 | } | |
415 | ||
416 | static int hidma_terminate_channel(struct dma_chan *chan) | |
417 | { | |
418 | struct hidma_chan *mchan = to_hidma_chan(chan); | |
419 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | |
420 | struct hidma_desc *tmp, *mdesc; | |
421 | unsigned long irqflags; | |
422 | LIST_HEAD(list); | |
423 | int rc; | |
424 | ||
425 | pm_runtime_get_sync(dmadev->ddev.dev); | |
426 | /* give completed requests a chance to finish */ | |
427 | hidma_process_completed(mchan); | |
428 | ||
429 | spin_lock_irqsave(&mchan->lock, irqflags); | |
793ae66c | 430 | mchan->last_success = 0; |
67a2003e SK |
431 | list_splice_init(&mchan->active, &list); |
432 | list_splice_init(&mchan->prepared, &list); | |
433 | list_splice_init(&mchan->completed, &list); | |
434 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
435 | ||
436 | /* this suspends the existing transfer */ | |
d1615ca2 | 437 | rc = hidma_ll_disable(dmadev->lldev); |
67a2003e SK |
438 | if (rc) { |
439 | dev_err(dmadev->ddev.dev, "channel did not pause\n"); | |
440 | goto out; | |
441 | } | |
442 | ||
443 | /* return all user requests */ | |
444 | list_for_each_entry_safe(mdesc, tmp, &list, node) { | |
445 | struct dma_async_tx_descriptor *txd = &mdesc->desc; | |
67a2003e SK |
446 | |
447 | dma_descriptor_unmap(txd); | |
5ade6683 | 448 | dmaengine_desc_get_callback_invoke(txd, NULL); |
67a2003e SK |
449 | dma_run_dependencies(txd); |
450 | ||
451 | /* move myself to free_list */ | |
452 | list_move(&mdesc->node, &mchan->free); | |
453 | } | |
454 | ||
d1615ca2 | 455 | rc = hidma_ll_enable(dmadev->lldev); |
67a2003e SK |
456 | out: |
457 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
458 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
459 | return rc; | |
460 | } | |
461 | ||
462 | static int hidma_terminate_all(struct dma_chan *chan) | |
463 | { | |
464 | struct hidma_chan *mchan = to_hidma_chan(chan); | |
465 | struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device); | |
466 | int rc; | |
467 | ||
468 | rc = hidma_terminate_channel(chan); | |
469 | if (rc) | |
470 | return rc; | |
471 | ||
472 | /* reinitialize the hardware */ | |
473 | pm_runtime_get_sync(dmadev->ddev.dev); | |
474 | rc = hidma_ll_setup(dmadev->lldev); | |
475 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
476 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
477 | return rc; | |
478 | } | |
479 | ||
480 | static void hidma_free_chan_resources(struct dma_chan *dmach) | |
481 | { | |
482 | struct hidma_chan *mchan = to_hidma_chan(dmach); | |
483 | struct hidma_dev *mdma = mchan->dmadev; | |
484 | struct hidma_desc *mdesc, *tmp; | |
485 | unsigned long irqflags; | |
486 | LIST_HEAD(descs); | |
487 | ||
488 | /* terminate running transactions and free descriptors */ | |
489 | hidma_terminate_channel(dmach); | |
490 | ||
491 | spin_lock_irqsave(&mchan->lock, irqflags); | |
492 | ||
493 | /* Move data */ | |
494 | list_splice_tail_init(&mchan->free, &descs); | |
495 | ||
496 | /* Free descriptors */ | |
497 | list_for_each_entry_safe(mdesc, tmp, &descs, node) { | |
498 | hidma_ll_free(mdma->lldev, mdesc->tre_ch); | |
499 | list_del(&mdesc->node); | |
500 | kfree(mdesc); | |
501 | } | |
502 | ||
503 | mchan->allocated = 0; | |
504 | spin_unlock_irqrestore(&mchan->lock, irqflags); | |
505 | } | |
506 | ||
507 | static int hidma_pause(struct dma_chan *chan) | |
508 | { | |
509 | struct hidma_chan *mchan; | |
510 | struct hidma_dev *dmadev; | |
511 | ||
512 | mchan = to_hidma_chan(chan); | |
513 | dmadev = to_hidma_dev(mchan->chan.device); | |
514 | if (!mchan->paused) { | |
515 | pm_runtime_get_sync(dmadev->ddev.dev); | |
d1615ca2 | 516 | if (hidma_ll_disable(dmadev->lldev)) |
67a2003e SK |
517 | dev_warn(dmadev->ddev.dev, "channel did not stop\n"); |
518 | mchan->paused = true; | |
519 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
520 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
521 | } | |
522 | return 0; | |
523 | } | |
524 | ||
525 | static int hidma_resume(struct dma_chan *chan) | |
526 | { | |
527 | struct hidma_chan *mchan; | |
528 | struct hidma_dev *dmadev; | |
529 | int rc = 0; | |
530 | ||
531 | mchan = to_hidma_chan(chan); | |
532 | dmadev = to_hidma_dev(mchan->chan.device); | |
533 | if (mchan->paused) { | |
534 | pm_runtime_get_sync(dmadev->ddev.dev); | |
d1615ca2 | 535 | rc = hidma_ll_enable(dmadev->lldev); |
67a2003e SK |
536 | if (!rc) |
537 | mchan->paused = false; | |
538 | else | |
539 | dev_err(dmadev->ddev.dev, | |
540 | "failed to resume the channel"); | |
541 | pm_runtime_mark_last_busy(dmadev->ddev.dev); | |
542 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
543 | } | |
544 | return rc; | |
545 | } | |
546 | ||
547 | static irqreturn_t hidma_chirq_handler(int chirq, void *arg) | |
548 | { | |
549 | struct hidma_lldev *lldev = arg; | |
550 | ||
551 | /* | |
552 | * All interrupts are request driven. | |
553 | * HW doesn't send an interrupt by itself. | |
554 | */ | |
555 | return hidma_ll_inthandler(chirq, lldev); | |
556 | } | |
557 | ||
8cc12b26 | 558 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
1c0e3e82 SK |
559 | static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg) |
560 | { | |
561 | struct hidma_lldev **lldevp = arg; | |
562 | struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp); | |
563 | ||
564 | return hidma_ll_inthandler_msi(chirq, *lldevp, | |
565 | 1 << (chirq - dmadev->msi_virqbase)); | |
566 | } | |
8cc12b26 | 567 | #endif |
1c0e3e82 | 568 | |
42d236f8 SK |
569 | static ssize_t hidma_show_values(struct device *dev, |
570 | struct device_attribute *attr, char *buf) | |
571 | { | |
572 | struct platform_device *pdev = to_platform_device(dev); | |
573 | struct hidma_dev *mdev = platform_get_drvdata(pdev); | |
574 | ||
575 | buf[0] = 0; | |
576 | ||
577 | if (strcmp(attr->attr.name, "chid") == 0) | |
578 | sprintf(buf, "%d\n", mdev->chidx); | |
579 | ||
580 | return strlen(buf); | |
581 | } | |
582 | ||
c6e4584d SK |
583 | static inline void hidma_sysfs_uninit(struct hidma_dev *dev) |
584 | { | |
585 | device_remove_file(dev->ddev.dev, dev->chid_attrs); | |
586 | } | |
587 | ||
588 | static struct device_attribute* | |
589 | hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode) | |
42d236f8 SK |
590 | { |
591 | struct device_attribute *attrs; | |
592 | char *name_copy; | |
593 | ||
594 | attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute), | |
595 | GFP_KERNEL); | |
596 | if (!attrs) | |
c6e4584d | 597 | return NULL; |
42d236f8 SK |
598 | |
599 | name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL); | |
600 | if (!name_copy) | |
c6e4584d | 601 | return NULL; |
42d236f8 SK |
602 | |
603 | attrs->attr.name = name_copy; | |
604 | attrs->attr.mode = mode; | |
605 | attrs->show = hidma_show_values; | |
606 | sysfs_attr_init(&attrs->attr); | |
607 | ||
c6e4584d SK |
608 | return attrs; |
609 | } | |
610 | ||
611 | static int hidma_sysfs_init(struct hidma_dev *dev) | |
612 | { | |
613 | dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO); | |
614 | if (!dev->chid_attrs) | |
615 | return -ENOMEM; | |
616 | ||
617 | return device_create_file(dev->ddev.dev, dev->chid_attrs); | |
42d236f8 SK |
618 | } |
619 | ||
1c0e3e82 SK |
620 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN |
621 | static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg) | |
622 | { | |
623 | struct device *dev = msi_desc_to_dev(desc); | |
624 | struct hidma_dev *dmadev = dev_get_drvdata(dev); | |
625 | ||
626 | if (!desc->platform.msi_index) { | |
627 | writel(msg->address_lo, dmadev->dev_evca + 0x118); | |
628 | writel(msg->address_hi, dmadev->dev_evca + 0x11C); | |
629 | writel(msg->data, dmadev->dev_evca + 0x120); | |
630 | } | |
631 | } | |
632 | #endif | |
633 | ||
634 | static void hidma_free_msis(struct hidma_dev *dmadev) | |
635 | { | |
636 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | |
637 | struct device *dev = dmadev->ddev.dev; | |
638 | struct msi_desc *desc; | |
639 | ||
640 | /* free allocated MSI interrupts above */ | |
641 | for_each_msi_entry(desc, dev) | |
642 | devm_free_irq(dev, desc->irq, &dmadev->lldev); | |
643 | ||
644 | platform_msi_domain_free_irqs(dev); | |
645 | #endif | |
646 | } | |
647 | ||
648 | static int hidma_request_msi(struct hidma_dev *dmadev, | |
649 | struct platform_device *pdev) | |
650 | { | |
651 | #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN | |
652 | int rc; | |
653 | struct msi_desc *desc; | |
654 | struct msi_desc *failed_desc = NULL; | |
655 | ||
656 | rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS, | |
657 | hidma_write_msi_msg); | |
658 | if (rc) | |
659 | return rc; | |
660 | ||
661 | for_each_msi_entry(desc, &pdev->dev) { | |
662 | if (!desc->platform.msi_index) | |
663 | dmadev->msi_virqbase = desc->irq; | |
664 | ||
665 | rc = devm_request_irq(&pdev->dev, desc->irq, | |
666 | hidma_chirq_handler_msi, | |
667 | 0, "qcom-hidma-msi", | |
668 | &dmadev->lldev); | |
669 | if (rc) { | |
670 | failed_desc = desc; | |
671 | break; | |
672 | } | |
673 | } | |
674 | ||
675 | if (rc) { | |
676 | /* free allocated MSI interrupts above */ | |
677 | for_each_msi_entry(desc, &pdev->dev) { | |
678 | if (desc == failed_desc) | |
679 | break; | |
680 | devm_free_irq(&pdev->dev, desc->irq, | |
681 | &dmadev->lldev); | |
682 | } | |
683 | } else { | |
684 | /* Add callback to free MSIs on teardown */ | |
685 | hidma_ll_setup_irq(dmadev->lldev, true); | |
686 | ||
687 | } | |
688 | if (rc) | |
689 | dev_warn(&pdev->dev, | |
690 | "failed to request MSI irq, falling back to wired IRQ\n"); | |
691 | return rc; | |
692 | #else | |
693 | return -EINVAL; | |
694 | #endif | |
695 | } | |
696 | ||
697 | static bool hidma_msi_capable(struct device *dev) | |
698 | { | |
699 | struct acpi_device *adev = ACPI_COMPANION(dev); | |
700 | const char *of_compat; | |
701 | int ret = -EINVAL; | |
702 | ||
703 | if (!adev || acpi_disabled) { | |
704 | ret = device_property_read_string(dev, "compatible", | |
705 | &of_compat); | |
706 | if (ret) | |
707 | return false; | |
708 | ||
709 | ret = strcmp(of_compat, "qcom,hidma-1.1"); | |
710 | } else { | |
711 | #ifdef CONFIG_ACPI | |
712 | ret = strcmp(acpi_device_hid(adev), "QCOM8062"); | |
713 | #endif | |
714 | } | |
715 | return ret == 0; | |
716 | } | |
717 | ||
67a2003e SK |
718 | static int hidma_probe(struct platform_device *pdev) |
719 | { | |
720 | struct hidma_dev *dmadev; | |
721 | struct resource *trca_resource; | |
722 | struct resource *evca_resource; | |
723 | int chirq; | |
724 | void __iomem *evca; | |
725 | void __iomem *trca; | |
726 | int rc; | |
1c0e3e82 | 727 | bool msi; |
67a2003e SK |
728 | |
729 | pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT); | |
730 | pm_runtime_use_autosuspend(&pdev->dev); | |
731 | pm_runtime_set_active(&pdev->dev); | |
732 | pm_runtime_enable(&pdev->dev); | |
733 | ||
734 | trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
735 | trca = devm_ioremap_resource(&pdev->dev, trca_resource); | |
736 | if (IS_ERR(trca)) { | |
737 | rc = -ENOMEM; | |
738 | goto bailout; | |
739 | } | |
740 | ||
741 | evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1); | |
742 | evca = devm_ioremap_resource(&pdev->dev, evca_resource); | |
743 | if (IS_ERR(evca)) { | |
744 | rc = -ENOMEM; | |
745 | goto bailout; | |
746 | } | |
747 | ||
748 | /* | |
749 | * This driver only handles the channel IRQs. | |
750 | * Common IRQ is handled by the management driver. | |
751 | */ | |
752 | chirq = platform_get_irq(pdev, 0); | |
753 | if (chirq < 0) { | |
754 | rc = -ENODEV; | |
755 | goto bailout; | |
756 | } | |
757 | ||
758 | dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL); | |
759 | if (!dmadev) { | |
760 | rc = -ENOMEM; | |
761 | goto bailout; | |
762 | } | |
763 | ||
764 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
765 | spin_lock_init(&dmadev->lock); | |
766 | dmadev->ddev.dev = &pdev->dev; | |
767 | pm_runtime_get_sync(dmadev->ddev.dev); | |
768 | ||
769 | dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); | |
770 | if (WARN_ON(!pdev->dev.dma_mask)) { | |
771 | rc = -ENXIO; | |
772 | goto dmafree; | |
773 | } | |
774 | ||
775 | dmadev->dev_evca = evca; | |
776 | dmadev->evca_resource = evca_resource; | |
777 | dmadev->dev_trca = trca; | |
778 | dmadev->trca_resource = trca_resource; | |
779 | dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; | |
780 | dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; | |
781 | dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; | |
782 | dmadev->ddev.device_tx_status = hidma_tx_status; | |
783 | dmadev->ddev.device_issue_pending = hidma_issue_pending; | |
784 | dmadev->ddev.device_pause = hidma_pause; | |
785 | dmadev->ddev.device_resume = hidma_resume; | |
786 | dmadev->ddev.device_terminate_all = hidma_terminate_all; | |
787 | dmadev->ddev.copy_align = 8; | |
788 | ||
1c0e3e82 SK |
789 | /* |
790 | * Determine the MSI capability of the platform. Old HW doesn't | |
791 | * support MSI. | |
792 | */ | |
793 | msi = hidma_msi_capable(&pdev->dev); | |
794 | ||
67a2003e SK |
795 | device_property_read_u32(&pdev->dev, "desc-count", |
796 | &dmadev->nr_descriptors); | |
797 | ||
798 | if (!dmadev->nr_descriptors && nr_desc_prm) | |
799 | dmadev->nr_descriptors = nr_desc_prm; | |
800 | ||
801 | if (!dmadev->nr_descriptors) | |
802 | dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC; | |
803 | ||
804 | dmadev->chidx = readl(dmadev->dev_trca + 0x28); | |
805 | ||
806 | /* Set DMA mask to 64 bits. */ | |
807 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); | |
808 | if (rc) { | |
809 | dev_warn(&pdev->dev, "unable to set coherent mask to 64"); | |
810 | rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); | |
811 | if (rc) | |
812 | goto dmafree; | |
813 | } | |
814 | ||
815 | dmadev->lldev = hidma_ll_init(dmadev->ddev.dev, | |
816 | dmadev->nr_descriptors, dmadev->dev_trca, | |
817 | dmadev->dev_evca, dmadev->chidx); | |
818 | if (!dmadev->lldev) { | |
819 | rc = -EPROBE_DEFER; | |
820 | goto dmafree; | |
821 | } | |
822 | ||
1c0e3e82 SK |
823 | platform_set_drvdata(pdev, dmadev); |
824 | if (msi) | |
825 | rc = hidma_request_msi(dmadev, pdev); | |
826 | ||
827 | if (!msi || rc) { | |
828 | hidma_ll_setup_irq(dmadev->lldev, false); | |
829 | rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, | |
830 | 0, "qcom-hidma", dmadev->lldev); | |
831 | if (rc) | |
832 | goto uninit; | |
833 | } | |
67a2003e SK |
834 | |
835 | INIT_LIST_HEAD(&dmadev->ddev.channels); | |
836 | rc = hidma_chan_init(dmadev, 0); | |
837 | if (rc) | |
838 | goto uninit; | |
839 | ||
840 | rc = dma_async_device_register(&dmadev->ddev); | |
841 | if (rc) | |
842 | goto uninit; | |
843 | ||
844 | dmadev->irq = chirq; | |
845 | tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev); | |
570d0176 | 846 | hidma_debug_init(dmadev); |
c6e4584d | 847 | hidma_sysfs_init(dmadev); |
67a2003e | 848 | dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n"); |
67a2003e SK |
849 | pm_runtime_mark_last_busy(dmadev->ddev.dev); |
850 | pm_runtime_put_autosuspend(dmadev->ddev.dev); | |
851 | return 0; | |
852 | ||
853 | uninit: | |
1c0e3e82 SK |
854 | if (msi) |
855 | hidma_free_msis(dmadev); | |
856 | ||
570d0176 | 857 | hidma_debug_uninit(dmadev); |
67a2003e SK |
858 | hidma_ll_uninit(dmadev->lldev); |
859 | dmafree: | |
860 | if (dmadev) | |
861 | hidma_free(dmadev); | |
862 | bailout: | |
863 | pm_runtime_put_sync(&pdev->dev); | |
864 | pm_runtime_disable(&pdev->dev); | |
865 | return rc; | |
866 | } | |
867 | ||
868 | static int hidma_remove(struct platform_device *pdev) | |
869 | { | |
870 | struct hidma_dev *dmadev = platform_get_drvdata(pdev); | |
871 | ||
872 | pm_runtime_get_sync(dmadev->ddev.dev); | |
873 | dma_async_device_unregister(&dmadev->ddev); | |
1c0e3e82 SK |
874 | if (!dmadev->lldev->msi_support) |
875 | devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev); | |
876 | else | |
877 | hidma_free_msis(dmadev); | |
878 | ||
bd16934a | 879 | tasklet_kill(&dmadev->task); |
c6e4584d | 880 | hidma_sysfs_uninit(dmadev); |
570d0176 | 881 | hidma_debug_uninit(dmadev); |
67a2003e SK |
882 | hidma_ll_uninit(dmadev->lldev); |
883 | hidma_free(dmadev); | |
884 | ||
885 | dev_info(&pdev->dev, "HI-DMA engine removed\n"); | |
886 | pm_runtime_put_sync_suspend(&pdev->dev); | |
887 | pm_runtime_disable(&pdev->dev); | |
888 | ||
889 | return 0; | |
890 | } | |
891 | ||
892 | #if IS_ENABLED(CONFIG_ACPI) | |
893 | static const struct acpi_device_id hidma_acpi_ids[] = { | |
894 | {"QCOM8061"}, | |
1c0e3e82 | 895 | {"QCOM8062"}, |
67a2003e SK |
896 | {}, |
897 | }; | |
75ff7668 | 898 | MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); |
67a2003e SK |
899 | #endif |
900 | ||
901 | static const struct of_device_id hidma_match[] = { | |
902 | {.compatible = "qcom,hidma-1.0",}, | |
1c0e3e82 | 903 | {.compatible = "qcom,hidma-1.1",}, |
67a2003e SK |
904 | {}, |
905 | }; | |
67a2003e SK |
906 | MODULE_DEVICE_TABLE(of, hidma_match); |
907 | ||
908 | static struct platform_driver hidma_driver = { | |
909 | .probe = hidma_probe, | |
910 | .remove = hidma_remove, | |
911 | .driver = { | |
912 | .name = "hidma", | |
913 | .of_match_table = hidma_match, | |
914 | .acpi_match_table = ACPI_PTR(hidma_acpi_ids), | |
915 | }, | |
916 | }; | |
917 | ||
918 | module_platform_driver(hidma_driver); | |
919 | MODULE_LICENSE("GPL v2"); |