]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/qcom/hidma.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / dma / qcom / hidma.c
CommitLineData
67a2003e
SK
1/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
13058e33 4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
67a2003e
SK
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/property.h>
54#include <linux/delay.h>
55#include <linux/acpi.h>
56#include <linux/irq.h>
57#include <linux/atomic.h>
58#include <linux/pm_runtime.h>
1c0e3e82 59#include <linux/msi.h>
67a2003e
SK
60
61#include "../dmaengine.h"
62#include "hidma.h"
63
64/*
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
68 * during kernel boot.
69 */
70#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71#define HIDMA_ERR_INFO_SW 0xFF
72#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73#define HIDMA_NR_DEFAULT_DESC 10
1c0e3e82 74#define HIDMA_MSI_INTS 11
67a2003e
SK
75
76static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
77{
78 return container_of(dmadev, struct hidma_dev, ddev);
79}
80
81static inline
82struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
83{
84 return container_of(_lldevp, struct hidma_dev, lldev);
85}
86
87static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
88{
89 return container_of(dmach, struct hidma_chan, chan);
90}
91
92static inline
93struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
94{
95 return container_of(t, struct hidma_desc, desc);
96}
97
98static void hidma_free(struct hidma_dev *dmadev)
99{
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
101}
102
103static unsigned int nr_desc_prm;
104module_param(nr_desc_prm, uint, 0644);
105MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
106
107
108/* process completed descriptors */
109static void hidma_process_completed(struct hidma_chan *mchan)
110{
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
8a31f8b5 116 struct hidma_desc *next;
67a2003e
SK
117 unsigned long irqflags;
118 struct list_head list;
119
120 INIT_LIST_HEAD(&list);
121
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
126
127 /* Execute callbacks and run dependencies */
8a31f8b5 128 list_for_each_entry_safe(mdesc, next, &list, node) {
67a2003e 129 enum dma_status llstat;
8a31f8b5 130 struct dmaengine_desc_callback cb;
55c370e5 131 struct dmaengine_result result;
67a2003e
SK
132
133 desc = &mdesc->desc;
793ae66c 134 last_cookie = desc->cookie;
67a2003e
SK
135
136 spin_lock_irqsave(&mchan->lock, irqflags);
137 dma_cookie_complete(desc);
138 spin_unlock_irqrestore(&mchan->lock, irqflags);
139
140 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
8a31f8b5 141 dmaengine_desc_get_callback(desc, &cb);
67a2003e 142
67a2003e 143 dma_run_dependencies(desc);
67a2003e 144
8a31f8b5
SK
145 spin_lock_irqsave(&mchan->lock, irqflags);
146 list_move(&mdesc->node, &mchan->free);
67a2003e 147
793ae66c
SK
148 if (llstat == DMA_COMPLETE) {
149 mchan->last_success = last_cookie;
55c370e5 150 result.result = DMA_TRANS_NOERROR;
793ae66c 151 } else
55c370e5
SK
152 result.result = DMA_TRANS_ABORTED;
153
154 spin_unlock_irqrestore(&mchan->lock, irqflags);
155
156 dmaengine_desc_callback_invoke(&cb, &result);
8a31f8b5 157 }
67a2003e
SK
158}
159
160/*
161 * Called once for each submitted descriptor.
162 * PM is locked once for each descriptor that is currently
163 * in execution.
164 */
165static void hidma_callback(void *data)
166{
167 struct hidma_desc *mdesc = data;
168 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
169 struct dma_device *ddev = mchan->chan.device;
170 struct hidma_dev *dmadev = to_hidma_dev(ddev);
171 unsigned long irqflags;
172 bool queued = false;
173
174 spin_lock_irqsave(&mchan->lock, irqflags);
175 if (mdesc->node.next) {
176 /* Delete from the active list, add to completed list */
177 list_move_tail(&mdesc->node, &mchan->completed);
178 queued = true;
179
180 /* calculate the next running descriptor */
181 mchan->running = list_first_entry(&mchan->active,
182 struct hidma_desc, node);
183 }
184 spin_unlock_irqrestore(&mchan->lock, irqflags);
185
186 hidma_process_completed(mchan);
187
188 if (queued) {
189 pm_runtime_mark_last_busy(dmadev->ddev.dev);
190 pm_runtime_put_autosuspend(dmadev->ddev.dev);
191 }
192}
193
194static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
195{
196 struct hidma_chan *mchan;
197 struct dma_device *ddev;
198
199 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
200 if (!mchan)
201 return -ENOMEM;
202
203 ddev = &dmadev->ddev;
204 mchan->dma_sig = dma_sig;
205 mchan->dmadev = dmadev;
206 mchan->chan.device = ddev;
207 dma_cookie_init(&mchan->chan);
208
209 INIT_LIST_HEAD(&mchan->free);
210 INIT_LIST_HEAD(&mchan->prepared);
211 INIT_LIST_HEAD(&mchan->active);
212 INIT_LIST_HEAD(&mchan->completed);
99efdb3e 213 INIT_LIST_HEAD(&mchan->queued);
67a2003e
SK
214
215 spin_lock_init(&mchan->lock);
216 list_add_tail(&mchan->chan.device_node, &ddev->channels);
217 dmadev->ddev.chancnt++;
218 return 0;
219}
220
221static void hidma_issue_task(unsigned long arg)
222{
223 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
224
225 pm_runtime_get_sync(dmadev->ddev.dev);
226 hidma_ll_start(dmadev->lldev);
227}
228
229static void hidma_issue_pending(struct dma_chan *dmach)
230{
231 struct hidma_chan *mchan = to_hidma_chan(dmach);
232 struct hidma_dev *dmadev = mchan->dmadev;
233 unsigned long flags;
99efdb3e 234 struct hidma_desc *qdesc, *next;
67a2003e
SK
235 int status;
236
237 spin_lock_irqsave(&mchan->lock, flags);
99efdb3e
SK
238 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
239 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
240 list_move_tail(&qdesc->node, &mchan->active);
241 }
242
67a2003e
SK
243 if (!mchan->running) {
244 struct hidma_desc *desc = list_first_entry(&mchan->active,
245 struct hidma_desc,
246 node);
247 mchan->running = desc;
248 }
249 spin_unlock_irqrestore(&mchan->lock, flags);
250
251 /* PM will be released in hidma_callback function. */
252 status = pm_runtime_get(dmadev->ddev.dev);
253 if (status < 0)
254 tasklet_schedule(&dmadev->task);
255 else
256 hidma_ll_start(dmadev->lldev);
257}
258
793ae66c
SK
259static inline bool hidma_txn_is_success(dma_cookie_t cookie,
260 dma_cookie_t last_success, dma_cookie_t last_used)
261{
262 if (last_success <= last_used) {
263 if ((cookie <= last_success) || (cookie > last_used))
264 return true;
265 } else {
266 if ((cookie <= last_success) && (cookie > last_used))
267 return true;
268 }
269 return false;
270}
271
67a2003e
SK
272static enum dma_status hidma_tx_status(struct dma_chan *dmach,
273 dma_cookie_t cookie,
274 struct dma_tx_state *txstate)
275{
276 struct hidma_chan *mchan = to_hidma_chan(dmach);
277 enum dma_status ret;
278
279 ret = dma_cookie_status(dmach, cookie, txstate);
793ae66c
SK
280 if (ret == DMA_COMPLETE) {
281 bool is_success;
282
283 is_success = hidma_txn_is_success(cookie, mchan->last_success,
284 dmach->cookie);
285 return is_success ? ret : DMA_ERROR;
286 }
67a2003e
SK
287
288 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
289 unsigned long flags;
290 dma_cookie_t runcookie;
291
292 spin_lock_irqsave(&mchan->lock, flags);
293 if (mchan->running)
294 runcookie = mchan->running->desc.cookie;
295 else
296 runcookie = -EINVAL;
297
298 if (runcookie == cookie)
299 ret = DMA_PAUSED;
300
301 spin_unlock_irqrestore(&mchan->lock, flags);
302 }
303
304 return ret;
305}
306
307/*
308 * Submit descriptor to hardware.
309 * Lock the PM for each descriptor we are sending.
310 */
311static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
312{
313 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
314 struct hidma_dev *dmadev = mchan->dmadev;
315 struct hidma_desc *mdesc;
316 unsigned long irqflags;
317 dma_cookie_t cookie;
318
319 pm_runtime_get_sync(dmadev->ddev.dev);
320 if (!hidma_ll_isenabled(dmadev->lldev)) {
321 pm_runtime_mark_last_busy(dmadev->ddev.dev);
322 pm_runtime_put_autosuspend(dmadev->ddev.dev);
323 return -ENODEV;
324 }
99efdb3e
SK
325 pm_runtime_mark_last_busy(dmadev->ddev.dev);
326 pm_runtime_put_autosuspend(dmadev->ddev.dev);
67a2003e
SK
327
328 mdesc = container_of(txd, struct hidma_desc, desc);
329 spin_lock_irqsave(&mchan->lock, irqflags);
330
99efdb3e
SK
331 /* Move descriptor to queued */
332 list_move_tail(&mdesc->node, &mchan->queued);
67a2003e
SK
333
334 /* Update cookie */
335 cookie = dma_cookie_assign(txd);
336
67a2003e
SK
337 spin_unlock_irqrestore(&mchan->lock, irqflags);
338
339 return cookie;
340}
341
342static int hidma_alloc_chan_resources(struct dma_chan *dmach)
343{
344 struct hidma_chan *mchan = to_hidma_chan(dmach);
345 struct hidma_dev *dmadev = mchan->dmadev;
346 struct hidma_desc *mdesc, *tmp;
347 unsigned long irqflags;
348 LIST_HEAD(descs);
349 unsigned int i;
350 int rc = 0;
351
352 if (mchan->allocated)
353 return 0;
354
355 /* Alloc descriptors for this channel */
356 for (i = 0; i < dmadev->nr_descriptors; i++) {
357 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
358 if (!mdesc) {
359 rc = -ENOMEM;
360 break;
361 }
362 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
363 mdesc->desc.tx_submit = hidma_tx_submit;
364
365 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
366 "DMA engine", hidma_callback, mdesc,
367 &mdesc->tre_ch);
368 if (rc) {
369 dev_err(dmach->device->dev,
370 "channel alloc failed at %u\n", i);
371 kfree(mdesc);
372 break;
373 }
374 list_add_tail(&mdesc->node, &descs);
375 }
376
377 if (rc) {
378 /* return the allocated descriptors */
379 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
380 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
381 kfree(mdesc);
382 }
383 return rc;
384 }
385
386 spin_lock_irqsave(&mchan->lock, irqflags);
387 list_splice_tail_init(&descs, &mchan->free);
388 mchan->allocated = true;
389 spin_unlock_irqrestore(&mchan->lock, irqflags);
390 return 1;
391}
392
393static struct dma_async_tx_descriptor *
394hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
395 size_t len, unsigned long flags)
396{
397 struct hidma_chan *mchan = to_hidma_chan(dmach);
398 struct hidma_desc *mdesc = NULL;
399 struct hidma_dev *mdma = mchan->dmadev;
400 unsigned long irqflags;
401
402 /* Get free descriptor */
403 spin_lock_irqsave(&mchan->lock, irqflags);
404 if (!list_empty(&mchan->free)) {
405 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
406 list_del(&mdesc->node);
407 }
408 spin_unlock_irqrestore(&mchan->lock, irqflags);
409
410 if (!mdesc)
411 return NULL;
412
413 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
414 src, dest, len, flags);
415
416 /* Place descriptor in prepared list */
417 spin_lock_irqsave(&mchan->lock, irqflags);
418 list_add_tail(&mdesc->node, &mchan->prepared);
419 spin_unlock_irqrestore(&mchan->lock, irqflags);
420
421 return &mdesc->desc;
422}
423
424static int hidma_terminate_channel(struct dma_chan *chan)
425{
426 struct hidma_chan *mchan = to_hidma_chan(chan);
427 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
428 struct hidma_desc *tmp, *mdesc;
429 unsigned long irqflags;
430 LIST_HEAD(list);
431 int rc;
432
433 pm_runtime_get_sync(dmadev->ddev.dev);
434 /* give completed requests a chance to finish */
435 hidma_process_completed(mchan);
436
437 spin_lock_irqsave(&mchan->lock, irqflags);
793ae66c 438 mchan->last_success = 0;
67a2003e
SK
439 list_splice_init(&mchan->active, &list);
440 list_splice_init(&mchan->prepared, &list);
441 list_splice_init(&mchan->completed, &list);
99efdb3e 442 list_splice_init(&mchan->queued, &list);
67a2003e
SK
443 spin_unlock_irqrestore(&mchan->lock, irqflags);
444
445 /* this suspends the existing transfer */
d1615ca2 446 rc = hidma_ll_disable(dmadev->lldev);
67a2003e
SK
447 if (rc) {
448 dev_err(dmadev->ddev.dev, "channel did not pause\n");
449 goto out;
450 }
451
452 /* return all user requests */
453 list_for_each_entry_safe(mdesc, tmp, &list, node) {
454 struct dma_async_tx_descriptor *txd = &mdesc->desc;
67a2003e
SK
455
456 dma_descriptor_unmap(txd);
5ade6683 457 dmaengine_desc_get_callback_invoke(txd, NULL);
67a2003e
SK
458 dma_run_dependencies(txd);
459
460 /* move myself to free_list */
461 list_move(&mdesc->node, &mchan->free);
462 }
463
d1615ca2 464 rc = hidma_ll_enable(dmadev->lldev);
67a2003e
SK
465out:
466 pm_runtime_mark_last_busy(dmadev->ddev.dev);
467 pm_runtime_put_autosuspend(dmadev->ddev.dev);
468 return rc;
469}
470
471static int hidma_terminate_all(struct dma_chan *chan)
472{
473 struct hidma_chan *mchan = to_hidma_chan(chan);
474 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
475 int rc;
476
477 rc = hidma_terminate_channel(chan);
478 if (rc)
479 return rc;
480
481 /* reinitialize the hardware */
482 pm_runtime_get_sync(dmadev->ddev.dev);
483 rc = hidma_ll_setup(dmadev->lldev);
484 pm_runtime_mark_last_busy(dmadev->ddev.dev);
485 pm_runtime_put_autosuspend(dmadev->ddev.dev);
486 return rc;
487}
488
489static void hidma_free_chan_resources(struct dma_chan *dmach)
490{
491 struct hidma_chan *mchan = to_hidma_chan(dmach);
492 struct hidma_dev *mdma = mchan->dmadev;
493 struct hidma_desc *mdesc, *tmp;
494 unsigned long irqflags;
495 LIST_HEAD(descs);
496
497 /* terminate running transactions and free descriptors */
498 hidma_terminate_channel(dmach);
499
500 spin_lock_irqsave(&mchan->lock, irqflags);
501
502 /* Move data */
503 list_splice_tail_init(&mchan->free, &descs);
504
505 /* Free descriptors */
506 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
507 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
508 list_del(&mdesc->node);
509 kfree(mdesc);
510 }
511
512 mchan->allocated = 0;
513 spin_unlock_irqrestore(&mchan->lock, irqflags);
514}
515
516static int hidma_pause(struct dma_chan *chan)
517{
518 struct hidma_chan *mchan;
519 struct hidma_dev *dmadev;
520
521 mchan = to_hidma_chan(chan);
522 dmadev = to_hidma_dev(mchan->chan.device);
523 if (!mchan->paused) {
524 pm_runtime_get_sync(dmadev->ddev.dev);
d1615ca2 525 if (hidma_ll_disable(dmadev->lldev))
67a2003e
SK
526 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
527 mchan->paused = true;
528 pm_runtime_mark_last_busy(dmadev->ddev.dev);
529 pm_runtime_put_autosuspend(dmadev->ddev.dev);
530 }
531 return 0;
532}
533
534static int hidma_resume(struct dma_chan *chan)
535{
536 struct hidma_chan *mchan;
537 struct hidma_dev *dmadev;
538 int rc = 0;
539
540 mchan = to_hidma_chan(chan);
541 dmadev = to_hidma_dev(mchan->chan.device);
542 if (mchan->paused) {
543 pm_runtime_get_sync(dmadev->ddev.dev);
d1615ca2 544 rc = hidma_ll_enable(dmadev->lldev);
67a2003e
SK
545 if (!rc)
546 mchan->paused = false;
547 else
548 dev_err(dmadev->ddev.dev,
549 "failed to resume the channel");
550 pm_runtime_mark_last_busy(dmadev->ddev.dev);
551 pm_runtime_put_autosuspend(dmadev->ddev.dev);
552 }
553 return rc;
554}
555
556static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
557{
558 struct hidma_lldev *lldev = arg;
559
560 /*
561 * All interrupts are request driven.
562 * HW doesn't send an interrupt by itself.
563 */
564 return hidma_ll_inthandler(chirq, lldev);
565}
566
8cc12b26 567#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
1c0e3e82
SK
568static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
569{
570 struct hidma_lldev **lldevp = arg;
571 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
572
573 return hidma_ll_inthandler_msi(chirq, *lldevp,
574 1 << (chirq - dmadev->msi_virqbase));
575}
8cc12b26 576#endif
1c0e3e82 577
42d236f8
SK
578static ssize_t hidma_show_values(struct device *dev,
579 struct device_attribute *attr, char *buf)
580{
581 struct platform_device *pdev = to_platform_device(dev);
582 struct hidma_dev *mdev = platform_get_drvdata(pdev);
583
584 buf[0] = 0;
585
586 if (strcmp(attr->attr.name, "chid") == 0)
587 sprintf(buf, "%d\n", mdev->chidx);
588
589 return strlen(buf);
590}
591
c6e4584d
SK
592static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
593{
594 device_remove_file(dev->ddev.dev, dev->chid_attrs);
595}
596
597static struct device_attribute*
598hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
42d236f8
SK
599{
600 struct device_attribute *attrs;
601 char *name_copy;
602
603 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
604 GFP_KERNEL);
605 if (!attrs)
c6e4584d 606 return NULL;
42d236f8
SK
607
608 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
609 if (!name_copy)
c6e4584d 610 return NULL;
42d236f8
SK
611
612 attrs->attr.name = name_copy;
613 attrs->attr.mode = mode;
614 attrs->show = hidma_show_values;
615 sysfs_attr_init(&attrs->attr);
616
c6e4584d
SK
617 return attrs;
618}
619
620static int hidma_sysfs_init(struct hidma_dev *dev)
621{
622 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
623 if (!dev->chid_attrs)
624 return -ENOMEM;
625
626 return device_create_file(dev->ddev.dev, dev->chid_attrs);
42d236f8
SK
627}
628
1c0e3e82
SK
629#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
630static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
631{
632 struct device *dev = msi_desc_to_dev(desc);
633 struct hidma_dev *dmadev = dev_get_drvdata(dev);
634
635 if (!desc->platform.msi_index) {
636 writel(msg->address_lo, dmadev->dev_evca + 0x118);
637 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
638 writel(msg->data, dmadev->dev_evca + 0x120);
639 }
640}
641#endif
642
643static void hidma_free_msis(struct hidma_dev *dmadev)
644{
645#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
646 struct device *dev = dmadev->ddev.dev;
647 struct msi_desc *desc;
648
649 /* free allocated MSI interrupts above */
650 for_each_msi_entry(desc, dev)
651 devm_free_irq(dev, desc->irq, &dmadev->lldev);
652
653 platform_msi_domain_free_irqs(dev);
654#endif
655}
656
657static int hidma_request_msi(struct hidma_dev *dmadev,
658 struct platform_device *pdev)
659{
660#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
661 int rc;
662 struct msi_desc *desc;
663 struct msi_desc *failed_desc = NULL;
664
665 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
666 hidma_write_msi_msg);
667 if (rc)
668 return rc;
669
670 for_each_msi_entry(desc, &pdev->dev) {
671 if (!desc->platform.msi_index)
672 dmadev->msi_virqbase = desc->irq;
673
674 rc = devm_request_irq(&pdev->dev, desc->irq,
675 hidma_chirq_handler_msi,
676 0, "qcom-hidma-msi",
677 &dmadev->lldev);
678 if (rc) {
679 failed_desc = desc;
680 break;
681 }
682 }
683
684 if (rc) {
685 /* free allocated MSI interrupts above */
686 for_each_msi_entry(desc, &pdev->dev) {
687 if (desc == failed_desc)
688 break;
689 devm_free_irq(&pdev->dev, desc->irq,
690 &dmadev->lldev);
691 }
692 } else {
693 /* Add callback to free MSIs on teardown */
694 hidma_ll_setup_irq(dmadev->lldev, true);
695
696 }
697 if (rc)
698 dev_warn(&pdev->dev,
699 "failed to request MSI irq, falling back to wired IRQ\n");
700 return rc;
701#else
702 return -EINVAL;
703#endif
704}
705
706static bool hidma_msi_capable(struct device *dev)
707{
708 struct acpi_device *adev = ACPI_COMPANION(dev);
709 const char *of_compat;
710 int ret = -EINVAL;
711
712 if (!adev || acpi_disabled) {
713 ret = device_property_read_string(dev, "compatible",
714 &of_compat);
715 if (ret)
716 return false;
717
718 ret = strcmp(of_compat, "qcom,hidma-1.1");
719 } else {
720#ifdef CONFIG_ACPI
721 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
722#endif
723 }
724 return ret == 0;
725}
726
67a2003e
SK
727static int hidma_probe(struct platform_device *pdev)
728{
729 struct hidma_dev *dmadev;
730 struct resource *trca_resource;
731 struct resource *evca_resource;
732 int chirq;
733 void __iomem *evca;
734 void __iomem *trca;
735 int rc;
1c0e3e82 736 bool msi;
67a2003e
SK
737
738 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
739 pm_runtime_use_autosuspend(&pdev->dev);
740 pm_runtime_set_active(&pdev->dev);
741 pm_runtime_enable(&pdev->dev);
742
743 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
744 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
745 if (IS_ERR(trca)) {
746 rc = -ENOMEM;
747 goto bailout;
748 }
749
750 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
751 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
752 if (IS_ERR(evca)) {
753 rc = -ENOMEM;
754 goto bailout;
755 }
756
757 /*
758 * This driver only handles the channel IRQs.
759 * Common IRQ is handled by the management driver.
760 */
761 chirq = platform_get_irq(pdev, 0);
762 if (chirq < 0) {
763 rc = -ENODEV;
764 goto bailout;
765 }
766
767 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
768 if (!dmadev) {
769 rc = -ENOMEM;
770 goto bailout;
771 }
772
773 INIT_LIST_HEAD(&dmadev->ddev.channels);
774 spin_lock_init(&dmadev->lock);
775 dmadev->ddev.dev = &pdev->dev;
776 pm_runtime_get_sync(dmadev->ddev.dev);
777
778 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
779 if (WARN_ON(!pdev->dev.dma_mask)) {
780 rc = -ENXIO;
781 goto dmafree;
782 }
783
784 dmadev->dev_evca = evca;
785 dmadev->evca_resource = evca_resource;
786 dmadev->dev_trca = trca;
787 dmadev->trca_resource = trca_resource;
788 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
789 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
790 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
791 dmadev->ddev.device_tx_status = hidma_tx_status;
792 dmadev->ddev.device_issue_pending = hidma_issue_pending;
793 dmadev->ddev.device_pause = hidma_pause;
794 dmadev->ddev.device_resume = hidma_resume;
795 dmadev->ddev.device_terminate_all = hidma_terminate_all;
796 dmadev->ddev.copy_align = 8;
797
1c0e3e82
SK
798 /*
799 * Determine the MSI capability of the platform. Old HW doesn't
800 * support MSI.
801 */
802 msi = hidma_msi_capable(&pdev->dev);
803
67a2003e
SK
804 device_property_read_u32(&pdev->dev, "desc-count",
805 &dmadev->nr_descriptors);
806
13058e33
SK
807 if (nr_desc_prm) {
808 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
809 nr_desc_prm);
67a2003e 810 dmadev->nr_descriptors = nr_desc_prm;
13058e33 811 }
67a2003e
SK
812
813 if (!dmadev->nr_descriptors)
814 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
815
816 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
817
818 /* Set DMA mask to 64 bits. */
819 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
820 if (rc) {
821 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
822 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
823 if (rc)
824 goto dmafree;
825 }
826
827 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
828 dmadev->nr_descriptors, dmadev->dev_trca,
829 dmadev->dev_evca, dmadev->chidx);
830 if (!dmadev->lldev) {
831 rc = -EPROBE_DEFER;
832 goto dmafree;
833 }
834
1c0e3e82
SK
835 platform_set_drvdata(pdev, dmadev);
836 if (msi)
837 rc = hidma_request_msi(dmadev, pdev);
838
839 if (!msi || rc) {
840 hidma_ll_setup_irq(dmadev->lldev, false);
841 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
842 0, "qcom-hidma", dmadev->lldev);
843 if (rc)
844 goto uninit;
845 }
67a2003e
SK
846
847 INIT_LIST_HEAD(&dmadev->ddev.channels);
848 rc = hidma_chan_init(dmadev, 0);
849 if (rc)
850 goto uninit;
851
852 rc = dma_async_device_register(&dmadev->ddev);
853 if (rc)
854 goto uninit;
855
856 dmadev->irq = chirq;
857 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
570d0176 858 hidma_debug_init(dmadev);
c6e4584d 859 hidma_sysfs_init(dmadev);
67a2003e 860 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
67a2003e
SK
861 pm_runtime_mark_last_busy(dmadev->ddev.dev);
862 pm_runtime_put_autosuspend(dmadev->ddev.dev);
863 return 0;
864
865uninit:
1c0e3e82
SK
866 if (msi)
867 hidma_free_msis(dmadev);
868
570d0176 869 hidma_debug_uninit(dmadev);
67a2003e
SK
870 hidma_ll_uninit(dmadev->lldev);
871dmafree:
872 if (dmadev)
873 hidma_free(dmadev);
874bailout:
875 pm_runtime_put_sync(&pdev->dev);
876 pm_runtime_disable(&pdev->dev);
877 return rc;
878}
879
dc7c733a
SK
880static void hidma_shutdown(struct platform_device *pdev)
881{
882 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
883
884 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
885
886 pm_runtime_get_sync(dmadev->ddev.dev);
887 if (hidma_ll_disable(dmadev->lldev))
888 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
889 pm_runtime_mark_last_busy(dmadev->ddev.dev);
890 pm_runtime_put_autosuspend(dmadev->ddev.dev);
891
892}
893
67a2003e
SK
894static int hidma_remove(struct platform_device *pdev)
895{
896 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
897
898 pm_runtime_get_sync(dmadev->ddev.dev);
899 dma_async_device_unregister(&dmadev->ddev);
1c0e3e82
SK
900 if (!dmadev->lldev->msi_support)
901 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
902 else
903 hidma_free_msis(dmadev);
904
bd16934a 905 tasklet_kill(&dmadev->task);
c6e4584d 906 hidma_sysfs_uninit(dmadev);
570d0176 907 hidma_debug_uninit(dmadev);
67a2003e
SK
908 hidma_ll_uninit(dmadev->lldev);
909 hidma_free(dmadev);
910
911 dev_info(&pdev->dev, "HI-DMA engine removed\n");
912 pm_runtime_put_sync_suspend(&pdev->dev);
913 pm_runtime_disable(&pdev->dev);
914
915 return 0;
916}
917
918#if IS_ENABLED(CONFIG_ACPI)
919static const struct acpi_device_id hidma_acpi_ids[] = {
920 {"QCOM8061"},
1c0e3e82 921 {"QCOM8062"},
67a2003e
SK
922 {},
923};
75ff7668 924MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
67a2003e
SK
925#endif
926
927static const struct of_device_id hidma_match[] = {
928 {.compatible = "qcom,hidma-1.0",},
1c0e3e82 929 {.compatible = "qcom,hidma-1.1",},
67a2003e
SK
930 {},
931};
67a2003e
SK
932MODULE_DEVICE_TABLE(of, hidma_match);
933
934static struct platform_driver hidma_driver = {
935 .probe = hidma_probe,
936 .remove = hidma_remove,
dc7c733a 937 .shutdown = hidma_shutdown,
67a2003e
SK
938 .driver = {
939 .name = "hidma",
940 .of_match_table = hidma_match,
941 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
942 },
943};
944
945module_platform_driver(hidma_driver);
946MODULE_LICENSE("GPL v2");