]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/dma/qcom/hidma.c
dmaengine: qcom_hidma: assign channel cookie correctly
[mirror_ubuntu-bionic-kernel.git] / drivers / dma / qcom / hidma.c
CommitLineData
67a2003e
SK
1/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
13058e33 4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
67a2003e
SK
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/property.h>
54#include <linux/delay.h>
55#include <linux/acpi.h>
56#include <linux/irq.h>
57#include <linux/atomic.h>
58#include <linux/pm_runtime.h>
1c0e3e82 59#include <linux/msi.h>
67a2003e
SK
60
61#include "../dmaengine.h"
62#include "hidma.h"
63
64/*
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
68 * during kernel boot.
69 */
70#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71#define HIDMA_ERR_INFO_SW 0xFF
72#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73#define HIDMA_NR_DEFAULT_DESC 10
1c0e3e82 74#define HIDMA_MSI_INTS 11
67a2003e
SK
75
76static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
77{
78 return container_of(dmadev, struct hidma_dev, ddev);
79}
80
81static inline
82struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
83{
84 return container_of(_lldevp, struct hidma_dev, lldev);
85}
86
87static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
88{
89 return container_of(dmach, struct hidma_chan, chan);
90}
91
92static inline
93struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
94{
95 return container_of(t, struct hidma_desc, desc);
96}
97
98static void hidma_free(struct hidma_dev *dmadev)
99{
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
101}
102
103static unsigned int nr_desc_prm;
104module_param(nr_desc_prm, uint, 0644);
105MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
106
107
108/* process completed descriptors */
109static void hidma_process_completed(struct hidma_chan *mchan)
110{
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
8a31f8b5 116 struct hidma_desc *next;
67a2003e
SK
117 unsigned long irqflags;
118 struct list_head list;
119
120 INIT_LIST_HEAD(&list);
121
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
126
127 /* Execute callbacks and run dependencies */
8a31f8b5 128 list_for_each_entry_safe(mdesc, next, &list, node) {
67a2003e 129 enum dma_status llstat;
8a31f8b5 130 struct dmaengine_desc_callback cb;
55c370e5 131 struct dmaengine_result result;
67a2003e
SK
132
133 desc = &mdesc->desc;
793ae66c 134 last_cookie = desc->cookie;
67a2003e 135
3d542627
SY
136 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
137
67a2003e 138 spin_lock_irqsave(&mchan->lock, irqflags);
3d542627
SY
139 if (llstat == DMA_COMPLETE) {
140 mchan->last_success = last_cookie;
141 result.result = DMA_TRANS_NOERROR;
142 } else {
143 result.result = DMA_TRANS_ABORTED;
144 }
145
67a2003e
SK
146 dma_cookie_complete(desc);
147 spin_unlock_irqrestore(&mchan->lock, irqflags);
148
8a31f8b5 149 dmaengine_desc_get_callback(desc, &cb);
67a2003e 150
67a2003e 151 dma_run_dependencies(desc);
67a2003e 152
8a31f8b5
SK
153 spin_lock_irqsave(&mchan->lock, irqflags);
154 list_move(&mdesc->node, &mchan->free);
55c370e5
SK
155 spin_unlock_irqrestore(&mchan->lock, irqflags);
156
157 dmaengine_desc_callback_invoke(&cb, &result);
8a31f8b5 158 }
67a2003e
SK
159}
160
161/*
162 * Called once for each submitted descriptor.
163 * PM is locked once for each descriptor that is currently
164 * in execution.
165 */
166static void hidma_callback(void *data)
167{
168 struct hidma_desc *mdesc = data;
169 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
170 struct dma_device *ddev = mchan->chan.device;
171 struct hidma_dev *dmadev = to_hidma_dev(ddev);
172 unsigned long irqflags;
173 bool queued = false;
174
175 spin_lock_irqsave(&mchan->lock, irqflags);
176 if (mdesc->node.next) {
177 /* Delete from the active list, add to completed list */
178 list_move_tail(&mdesc->node, &mchan->completed);
179 queued = true;
180
181 /* calculate the next running descriptor */
182 mchan->running = list_first_entry(&mchan->active,
183 struct hidma_desc, node);
184 }
185 spin_unlock_irqrestore(&mchan->lock, irqflags);
186
187 hidma_process_completed(mchan);
188
189 if (queued) {
190 pm_runtime_mark_last_busy(dmadev->ddev.dev);
191 pm_runtime_put_autosuspend(dmadev->ddev.dev);
192 }
193}
194
195static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
196{
197 struct hidma_chan *mchan;
198 struct dma_device *ddev;
199
200 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
201 if (!mchan)
202 return -ENOMEM;
203
204 ddev = &dmadev->ddev;
205 mchan->dma_sig = dma_sig;
206 mchan->dmadev = dmadev;
207 mchan->chan.device = ddev;
208 dma_cookie_init(&mchan->chan);
209
210 INIT_LIST_HEAD(&mchan->free);
211 INIT_LIST_HEAD(&mchan->prepared);
212 INIT_LIST_HEAD(&mchan->active);
213 INIT_LIST_HEAD(&mchan->completed);
99efdb3e 214 INIT_LIST_HEAD(&mchan->queued);
67a2003e
SK
215
216 spin_lock_init(&mchan->lock);
217 list_add_tail(&mchan->chan.device_node, &ddev->channels);
218 dmadev->ddev.chancnt++;
219 return 0;
220}
221
222static void hidma_issue_task(unsigned long arg)
223{
224 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
225
226 pm_runtime_get_sync(dmadev->ddev.dev);
227 hidma_ll_start(dmadev->lldev);
228}
229
230static void hidma_issue_pending(struct dma_chan *dmach)
231{
232 struct hidma_chan *mchan = to_hidma_chan(dmach);
233 struct hidma_dev *dmadev = mchan->dmadev;
234 unsigned long flags;
99efdb3e 235 struct hidma_desc *qdesc, *next;
67a2003e
SK
236 int status;
237
238 spin_lock_irqsave(&mchan->lock, flags);
99efdb3e
SK
239 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
240 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
241 list_move_tail(&qdesc->node, &mchan->active);
242 }
243
67a2003e
SK
244 if (!mchan->running) {
245 struct hidma_desc *desc = list_first_entry(&mchan->active,
246 struct hidma_desc,
247 node);
248 mchan->running = desc;
249 }
250 spin_unlock_irqrestore(&mchan->lock, flags);
251
252 /* PM will be released in hidma_callback function. */
253 status = pm_runtime_get(dmadev->ddev.dev);
254 if (status < 0)
255 tasklet_schedule(&dmadev->task);
256 else
257 hidma_ll_start(dmadev->lldev);
258}
259
793ae66c
SK
260static inline bool hidma_txn_is_success(dma_cookie_t cookie,
261 dma_cookie_t last_success, dma_cookie_t last_used)
262{
263 if (last_success <= last_used) {
264 if ((cookie <= last_success) || (cookie > last_used))
265 return true;
266 } else {
267 if ((cookie <= last_success) && (cookie > last_used))
268 return true;
269 }
270 return false;
271}
272
67a2003e
SK
273static enum dma_status hidma_tx_status(struct dma_chan *dmach,
274 dma_cookie_t cookie,
275 struct dma_tx_state *txstate)
276{
277 struct hidma_chan *mchan = to_hidma_chan(dmach);
278 enum dma_status ret;
279
280 ret = dma_cookie_status(dmach, cookie, txstate);
793ae66c
SK
281 if (ret == DMA_COMPLETE) {
282 bool is_success;
283
284 is_success = hidma_txn_is_success(cookie, mchan->last_success,
285 dmach->cookie);
286 return is_success ? ret : DMA_ERROR;
287 }
67a2003e
SK
288
289 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
290 unsigned long flags;
291 dma_cookie_t runcookie;
292
293 spin_lock_irqsave(&mchan->lock, flags);
294 if (mchan->running)
295 runcookie = mchan->running->desc.cookie;
296 else
297 runcookie = -EINVAL;
298
299 if (runcookie == cookie)
300 ret = DMA_PAUSED;
301
302 spin_unlock_irqrestore(&mchan->lock, flags);
303 }
304
305 return ret;
306}
307
308/*
309 * Submit descriptor to hardware.
310 * Lock the PM for each descriptor we are sending.
311 */
312static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
313{
314 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
315 struct hidma_dev *dmadev = mchan->dmadev;
316 struct hidma_desc *mdesc;
317 unsigned long irqflags;
318 dma_cookie_t cookie;
319
320 pm_runtime_get_sync(dmadev->ddev.dev);
321 if (!hidma_ll_isenabled(dmadev->lldev)) {
322 pm_runtime_mark_last_busy(dmadev->ddev.dev);
323 pm_runtime_put_autosuspend(dmadev->ddev.dev);
324 return -ENODEV;
325 }
99efdb3e
SK
326 pm_runtime_mark_last_busy(dmadev->ddev.dev);
327 pm_runtime_put_autosuspend(dmadev->ddev.dev);
67a2003e
SK
328
329 mdesc = container_of(txd, struct hidma_desc, desc);
330 spin_lock_irqsave(&mchan->lock, irqflags);
331
99efdb3e
SK
332 /* Move descriptor to queued */
333 list_move_tail(&mdesc->node, &mchan->queued);
67a2003e
SK
334
335 /* Update cookie */
336 cookie = dma_cookie_assign(txd);
337
67a2003e
SK
338 spin_unlock_irqrestore(&mchan->lock, irqflags);
339
340 return cookie;
341}
342
343static int hidma_alloc_chan_resources(struct dma_chan *dmach)
344{
345 struct hidma_chan *mchan = to_hidma_chan(dmach);
346 struct hidma_dev *dmadev = mchan->dmadev;
347 struct hidma_desc *mdesc, *tmp;
348 unsigned long irqflags;
349 LIST_HEAD(descs);
350 unsigned int i;
351 int rc = 0;
352
353 if (mchan->allocated)
354 return 0;
355
356 /* Alloc descriptors for this channel */
357 for (i = 0; i < dmadev->nr_descriptors; i++) {
358 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
359 if (!mdesc) {
360 rc = -ENOMEM;
361 break;
362 }
363 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
364 mdesc->desc.tx_submit = hidma_tx_submit;
365
366 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
367 "DMA engine", hidma_callback, mdesc,
368 &mdesc->tre_ch);
369 if (rc) {
370 dev_err(dmach->device->dev,
371 "channel alloc failed at %u\n", i);
372 kfree(mdesc);
373 break;
374 }
375 list_add_tail(&mdesc->node, &descs);
376 }
377
378 if (rc) {
379 /* return the allocated descriptors */
380 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
381 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
382 kfree(mdesc);
383 }
384 return rc;
385 }
386
387 spin_lock_irqsave(&mchan->lock, irqflags);
388 list_splice_tail_init(&descs, &mchan->free);
389 mchan->allocated = true;
390 spin_unlock_irqrestore(&mchan->lock, irqflags);
391 return 1;
392}
393
394static struct dma_async_tx_descriptor *
395hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
396 size_t len, unsigned long flags)
397{
398 struct hidma_chan *mchan = to_hidma_chan(dmach);
399 struct hidma_desc *mdesc = NULL;
400 struct hidma_dev *mdma = mchan->dmadev;
401 unsigned long irqflags;
402
403 /* Get free descriptor */
404 spin_lock_irqsave(&mchan->lock, irqflags);
405 if (!list_empty(&mchan->free)) {
406 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
407 list_del(&mdesc->node);
408 }
409 spin_unlock_irqrestore(&mchan->lock, irqflags);
410
411 if (!mdesc)
412 return NULL;
413
414 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
5e2db086
SK
415 src, dest, len, flags,
416 HIDMA_TRE_MEMCPY);
417
418 /* Place descriptor in prepared list */
419 spin_lock_irqsave(&mchan->lock, irqflags);
420 list_add_tail(&mdesc->node, &mchan->prepared);
421 spin_unlock_irqrestore(&mchan->lock, irqflags);
422
423 return &mdesc->desc;
424}
425
426static struct dma_async_tx_descriptor *
427hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
428 size_t len, unsigned long flags)
429{
430 struct hidma_chan *mchan = to_hidma_chan(dmach);
431 struct hidma_desc *mdesc = NULL;
432 struct hidma_dev *mdma = mchan->dmadev;
433 unsigned long irqflags;
434
435 /* Get free descriptor */
436 spin_lock_irqsave(&mchan->lock, irqflags);
437 if (!list_empty(&mchan->free)) {
438 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
439 list_del(&mdesc->node);
440 }
441 spin_unlock_irqrestore(&mchan->lock, irqflags);
442
443 if (!mdesc)
444 return NULL;
445
446 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
447 value, dest, len, flags,
448 HIDMA_TRE_MEMSET);
67a2003e
SK
449
450 /* Place descriptor in prepared list */
451 spin_lock_irqsave(&mchan->lock, irqflags);
452 list_add_tail(&mdesc->node, &mchan->prepared);
453 spin_unlock_irqrestore(&mchan->lock, irqflags);
454
455 return &mdesc->desc;
456}
457
458static int hidma_terminate_channel(struct dma_chan *chan)
459{
460 struct hidma_chan *mchan = to_hidma_chan(chan);
461 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
462 struct hidma_desc *tmp, *mdesc;
463 unsigned long irqflags;
464 LIST_HEAD(list);
465 int rc;
466
467 pm_runtime_get_sync(dmadev->ddev.dev);
468 /* give completed requests a chance to finish */
469 hidma_process_completed(mchan);
470
471 spin_lock_irqsave(&mchan->lock, irqflags);
793ae66c 472 mchan->last_success = 0;
67a2003e
SK
473 list_splice_init(&mchan->active, &list);
474 list_splice_init(&mchan->prepared, &list);
475 list_splice_init(&mchan->completed, &list);
99efdb3e 476 list_splice_init(&mchan->queued, &list);
67a2003e
SK
477 spin_unlock_irqrestore(&mchan->lock, irqflags);
478
479 /* this suspends the existing transfer */
d1615ca2 480 rc = hidma_ll_disable(dmadev->lldev);
67a2003e
SK
481 if (rc) {
482 dev_err(dmadev->ddev.dev, "channel did not pause\n");
483 goto out;
484 }
485
486 /* return all user requests */
487 list_for_each_entry_safe(mdesc, tmp, &list, node) {
488 struct dma_async_tx_descriptor *txd = &mdesc->desc;
67a2003e
SK
489
490 dma_descriptor_unmap(txd);
5ade6683 491 dmaengine_desc_get_callback_invoke(txd, NULL);
67a2003e
SK
492 dma_run_dependencies(txd);
493
494 /* move myself to free_list */
495 list_move(&mdesc->node, &mchan->free);
496 }
497
d1615ca2 498 rc = hidma_ll_enable(dmadev->lldev);
67a2003e
SK
499out:
500 pm_runtime_mark_last_busy(dmadev->ddev.dev);
501 pm_runtime_put_autosuspend(dmadev->ddev.dev);
502 return rc;
503}
504
505static int hidma_terminate_all(struct dma_chan *chan)
506{
507 struct hidma_chan *mchan = to_hidma_chan(chan);
508 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
509 int rc;
510
511 rc = hidma_terminate_channel(chan);
512 if (rc)
513 return rc;
514
515 /* reinitialize the hardware */
516 pm_runtime_get_sync(dmadev->ddev.dev);
517 rc = hidma_ll_setup(dmadev->lldev);
518 pm_runtime_mark_last_busy(dmadev->ddev.dev);
519 pm_runtime_put_autosuspend(dmadev->ddev.dev);
520 return rc;
521}
522
523static void hidma_free_chan_resources(struct dma_chan *dmach)
524{
525 struct hidma_chan *mchan = to_hidma_chan(dmach);
526 struct hidma_dev *mdma = mchan->dmadev;
527 struct hidma_desc *mdesc, *tmp;
528 unsigned long irqflags;
529 LIST_HEAD(descs);
530
531 /* terminate running transactions and free descriptors */
532 hidma_terminate_channel(dmach);
533
534 spin_lock_irqsave(&mchan->lock, irqflags);
535
536 /* Move data */
537 list_splice_tail_init(&mchan->free, &descs);
538
539 /* Free descriptors */
540 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
541 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
542 list_del(&mdesc->node);
543 kfree(mdesc);
544 }
545
546 mchan->allocated = 0;
547 spin_unlock_irqrestore(&mchan->lock, irqflags);
548}
549
550static int hidma_pause(struct dma_chan *chan)
551{
552 struct hidma_chan *mchan;
553 struct hidma_dev *dmadev;
554
555 mchan = to_hidma_chan(chan);
556 dmadev = to_hidma_dev(mchan->chan.device);
557 if (!mchan->paused) {
558 pm_runtime_get_sync(dmadev->ddev.dev);
d1615ca2 559 if (hidma_ll_disable(dmadev->lldev))
67a2003e
SK
560 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
561 mchan->paused = true;
562 pm_runtime_mark_last_busy(dmadev->ddev.dev);
563 pm_runtime_put_autosuspend(dmadev->ddev.dev);
564 }
565 return 0;
566}
567
568static int hidma_resume(struct dma_chan *chan)
569{
570 struct hidma_chan *mchan;
571 struct hidma_dev *dmadev;
572 int rc = 0;
573
574 mchan = to_hidma_chan(chan);
575 dmadev = to_hidma_dev(mchan->chan.device);
576 if (mchan->paused) {
577 pm_runtime_get_sync(dmadev->ddev.dev);
d1615ca2 578 rc = hidma_ll_enable(dmadev->lldev);
67a2003e
SK
579 if (!rc)
580 mchan->paused = false;
581 else
582 dev_err(dmadev->ddev.dev,
583 "failed to resume the channel");
584 pm_runtime_mark_last_busy(dmadev->ddev.dev);
585 pm_runtime_put_autosuspend(dmadev->ddev.dev);
586 }
587 return rc;
588}
589
590static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
591{
592 struct hidma_lldev *lldev = arg;
593
594 /*
595 * All interrupts are request driven.
596 * HW doesn't send an interrupt by itself.
597 */
598 return hidma_ll_inthandler(chirq, lldev);
599}
600
8cc12b26 601#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
1c0e3e82
SK
602static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
603{
604 struct hidma_lldev **lldevp = arg;
605 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
606
607 return hidma_ll_inthandler_msi(chirq, *lldevp,
608 1 << (chirq - dmadev->msi_virqbase));
609}
8cc12b26 610#endif
1c0e3e82 611
42d236f8
SK
612static ssize_t hidma_show_values(struct device *dev,
613 struct device_attribute *attr, char *buf)
614{
615 struct platform_device *pdev = to_platform_device(dev);
616 struct hidma_dev *mdev = platform_get_drvdata(pdev);
617
618 buf[0] = 0;
619
620 if (strcmp(attr->attr.name, "chid") == 0)
621 sprintf(buf, "%d\n", mdev->chidx);
622
623 return strlen(buf);
624}
625
c6e4584d
SK
626static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
627{
628 device_remove_file(dev->ddev.dev, dev->chid_attrs);
629}
630
631static struct device_attribute*
632hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
42d236f8
SK
633{
634 struct device_attribute *attrs;
635 char *name_copy;
636
637 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
638 GFP_KERNEL);
639 if (!attrs)
c6e4584d 640 return NULL;
42d236f8
SK
641
642 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
643 if (!name_copy)
c6e4584d 644 return NULL;
42d236f8
SK
645
646 attrs->attr.name = name_copy;
647 attrs->attr.mode = mode;
648 attrs->show = hidma_show_values;
649 sysfs_attr_init(&attrs->attr);
650
c6e4584d
SK
651 return attrs;
652}
653
654static int hidma_sysfs_init(struct hidma_dev *dev)
655{
656 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
657 if (!dev->chid_attrs)
658 return -ENOMEM;
659
660 return device_create_file(dev->ddev.dev, dev->chid_attrs);
42d236f8
SK
661}
662
1c0e3e82
SK
663#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
664static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
665{
666 struct device *dev = msi_desc_to_dev(desc);
667 struct hidma_dev *dmadev = dev_get_drvdata(dev);
668
669 if (!desc->platform.msi_index) {
670 writel(msg->address_lo, dmadev->dev_evca + 0x118);
671 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
672 writel(msg->data, dmadev->dev_evca + 0x120);
673 }
674}
675#endif
676
677static void hidma_free_msis(struct hidma_dev *dmadev)
678{
679#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
680 struct device *dev = dmadev->ddev.dev;
681 struct msi_desc *desc;
682
683 /* free allocated MSI interrupts above */
684 for_each_msi_entry(desc, dev)
685 devm_free_irq(dev, desc->irq, &dmadev->lldev);
686
687 platform_msi_domain_free_irqs(dev);
688#endif
689}
690
691static int hidma_request_msi(struct hidma_dev *dmadev,
692 struct platform_device *pdev)
693{
694#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
695 int rc;
696 struct msi_desc *desc;
697 struct msi_desc *failed_desc = NULL;
698
699 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
700 hidma_write_msi_msg);
701 if (rc)
702 return rc;
703
704 for_each_msi_entry(desc, &pdev->dev) {
705 if (!desc->platform.msi_index)
706 dmadev->msi_virqbase = desc->irq;
707
708 rc = devm_request_irq(&pdev->dev, desc->irq,
709 hidma_chirq_handler_msi,
710 0, "qcom-hidma-msi",
711 &dmadev->lldev);
712 if (rc) {
713 failed_desc = desc;
714 break;
715 }
716 }
717
718 if (rc) {
719 /* free allocated MSI interrupts above */
720 for_each_msi_entry(desc, &pdev->dev) {
721 if (desc == failed_desc)
722 break;
723 devm_free_irq(&pdev->dev, desc->irq,
724 &dmadev->lldev);
725 }
726 } else {
727 /* Add callback to free MSIs on teardown */
728 hidma_ll_setup_irq(dmadev->lldev, true);
729
730 }
731 if (rc)
732 dev_warn(&pdev->dev,
733 "failed to request MSI irq, falling back to wired IRQ\n");
734 return rc;
735#else
736 return -EINVAL;
737#endif
738}
739
740static bool hidma_msi_capable(struct device *dev)
741{
742 struct acpi_device *adev = ACPI_COMPANION(dev);
743 const char *of_compat;
744 int ret = -EINVAL;
745
746 if (!adev || acpi_disabled) {
747 ret = device_property_read_string(dev, "compatible",
748 &of_compat);
749 if (ret)
750 return false;
751
752 ret = strcmp(of_compat, "qcom,hidma-1.1");
753 } else {
754#ifdef CONFIG_ACPI
755 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
756#endif
757 }
758 return ret == 0;
759}
760
67a2003e
SK
761static int hidma_probe(struct platform_device *pdev)
762{
763 struct hidma_dev *dmadev;
764 struct resource *trca_resource;
765 struct resource *evca_resource;
766 int chirq;
767 void __iomem *evca;
768 void __iomem *trca;
769 int rc;
1c0e3e82 770 bool msi;
67a2003e
SK
771
772 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
773 pm_runtime_use_autosuspend(&pdev->dev);
774 pm_runtime_set_active(&pdev->dev);
775 pm_runtime_enable(&pdev->dev);
776
777 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
778 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
779 if (IS_ERR(trca)) {
780 rc = -ENOMEM;
781 goto bailout;
782 }
783
784 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
785 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
786 if (IS_ERR(evca)) {
787 rc = -ENOMEM;
788 goto bailout;
789 }
790
791 /*
792 * This driver only handles the channel IRQs.
793 * Common IRQ is handled by the management driver.
794 */
795 chirq = platform_get_irq(pdev, 0);
796 if (chirq < 0) {
797 rc = -ENODEV;
798 goto bailout;
799 }
800
801 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
802 if (!dmadev) {
803 rc = -ENOMEM;
804 goto bailout;
805 }
806
807 INIT_LIST_HEAD(&dmadev->ddev.channels);
808 spin_lock_init(&dmadev->lock);
809 dmadev->ddev.dev = &pdev->dev;
810 pm_runtime_get_sync(dmadev->ddev.dev);
811
812 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
5e2db086 813 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
67a2003e
SK
814 if (WARN_ON(!pdev->dev.dma_mask)) {
815 rc = -ENXIO;
816 goto dmafree;
817 }
818
819 dmadev->dev_evca = evca;
820 dmadev->evca_resource = evca_resource;
821 dmadev->dev_trca = trca;
822 dmadev->trca_resource = trca_resource;
823 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
5e2db086 824 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
67a2003e
SK
825 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
826 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
827 dmadev->ddev.device_tx_status = hidma_tx_status;
828 dmadev->ddev.device_issue_pending = hidma_issue_pending;
829 dmadev->ddev.device_pause = hidma_pause;
830 dmadev->ddev.device_resume = hidma_resume;
831 dmadev->ddev.device_terminate_all = hidma_terminate_all;
832 dmadev->ddev.copy_align = 8;
833
1c0e3e82
SK
834 /*
835 * Determine the MSI capability of the platform. Old HW doesn't
836 * support MSI.
837 */
838 msi = hidma_msi_capable(&pdev->dev);
839
67a2003e
SK
840 device_property_read_u32(&pdev->dev, "desc-count",
841 &dmadev->nr_descriptors);
842
13058e33
SK
843 if (nr_desc_prm) {
844 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
845 nr_desc_prm);
67a2003e 846 dmadev->nr_descriptors = nr_desc_prm;
13058e33 847 }
67a2003e
SK
848
849 if (!dmadev->nr_descriptors)
850 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
851
852 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
853
854 /* Set DMA mask to 64 bits. */
855 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
856 if (rc) {
857 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
858 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
859 if (rc)
860 goto dmafree;
861 }
862
863 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
864 dmadev->nr_descriptors, dmadev->dev_trca,
865 dmadev->dev_evca, dmadev->chidx);
866 if (!dmadev->lldev) {
867 rc = -EPROBE_DEFER;
868 goto dmafree;
869 }
870
1c0e3e82
SK
871 platform_set_drvdata(pdev, dmadev);
872 if (msi)
873 rc = hidma_request_msi(dmadev, pdev);
874
875 if (!msi || rc) {
876 hidma_ll_setup_irq(dmadev->lldev, false);
877 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
878 0, "qcom-hidma", dmadev->lldev);
879 if (rc)
880 goto uninit;
881 }
67a2003e
SK
882
883 INIT_LIST_HEAD(&dmadev->ddev.channels);
884 rc = hidma_chan_init(dmadev, 0);
885 if (rc)
886 goto uninit;
887
888 rc = dma_async_device_register(&dmadev->ddev);
889 if (rc)
890 goto uninit;
891
892 dmadev->irq = chirq;
893 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
570d0176 894 hidma_debug_init(dmadev);
c6e4584d 895 hidma_sysfs_init(dmadev);
67a2003e 896 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
67a2003e
SK
897 pm_runtime_mark_last_busy(dmadev->ddev.dev);
898 pm_runtime_put_autosuspend(dmadev->ddev.dev);
899 return 0;
900
901uninit:
1c0e3e82
SK
902 if (msi)
903 hidma_free_msis(dmadev);
904
570d0176 905 hidma_debug_uninit(dmadev);
67a2003e
SK
906 hidma_ll_uninit(dmadev->lldev);
907dmafree:
908 if (dmadev)
909 hidma_free(dmadev);
910bailout:
911 pm_runtime_put_sync(&pdev->dev);
912 pm_runtime_disable(&pdev->dev);
913 return rc;
914}
915
dc7c733a
SK
916static void hidma_shutdown(struct platform_device *pdev)
917{
918 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
919
920 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
921
922 pm_runtime_get_sync(dmadev->ddev.dev);
923 if (hidma_ll_disable(dmadev->lldev))
924 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
925 pm_runtime_mark_last_busy(dmadev->ddev.dev);
926 pm_runtime_put_autosuspend(dmadev->ddev.dev);
927
928}
929
67a2003e
SK
930static int hidma_remove(struct platform_device *pdev)
931{
932 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
933
934 pm_runtime_get_sync(dmadev->ddev.dev);
935 dma_async_device_unregister(&dmadev->ddev);
1c0e3e82
SK
936 if (!dmadev->lldev->msi_support)
937 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
938 else
939 hidma_free_msis(dmadev);
940
bd16934a 941 tasklet_kill(&dmadev->task);
c6e4584d 942 hidma_sysfs_uninit(dmadev);
570d0176 943 hidma_debug_uninit(dmadev);
67a2003e
SK
944 hidma_ll_uninit(dmadev->lldev);
945 hidma_free(dmadev);
946
947 dev_info(&pdev->dev, "HI-DMA engine removed\n");
948 pm_runtime_put_sync_suspend(&pdev->dev);
949 pm_runtime_disable(&pdev->dev);
950
951 return 0;
952}
953
954#if IS_ENABLED(CONFIG_ACPI)
955static const struct acpi_device_id hidma_acpi_ids[] = {
956 {"QCOM8061"},
1c0e3e82 957 {"QCOM8062"},
67a2003e
SK
958 {},
959};
75ff7668 960MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
67a2003e
SK
961#endif
962
963static const struct of_device_id hidma_match[] = {
964 {.compatible = "qcom,hidma-1.0",},
1c0e3e82 965 {.compatible = "qcom,hidma-1.1",},
67a2003e
SK
966 {},
967};
67a2003e
SK
968MODULE_DEVICE_TABLE(of, hidma_match);
969
970static struct platform_driver hidma_driver = {
971 .probe = hidma_probe,
972 .remove = hidma_remove,
dc7c733a 973 .shutdown = hidma_shutdown,
67a2003e
SK
974 .driver = {
975 .name = "hidma",
976 .of_match_table = hidma_match,
977 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
978 },
979};
980
981module_platform_driver(hidma_driver);
982MODULE_LICENSE("GPL v2");