]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/dma/amba-pl08x.c
dmaengine: PL08x: convert to a list of completed descriptors
[mirror_ubuntu-artful-kernel.git] / drivers / dma / amba-pl08x.c
CommitLineData
e8689e63
LW
1/*
2 * Copyright (c) 2006 ARM Ltd.
3 * Copyright (c) 2010 ST-Ericsson SA
4 *
5 * Author: Peter Pearse <peter.pearse@arm.com>
6 * Author: Linus Walleij <linus.walleij@stericsson.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 *
94ae8522
RKAL
22 * The full GNU General Public License is in this distribution in the file
23 * called COPYING.
e8689e63
LW
24 *
25 * Documentation: ARM DDI 0196G == PL080
94ae8522 26 * Documentation: ARM DDI 0218E == PL081
e8689e63 27 *
94ae8522
RKAL
28 * PL080 & PL081 both have 16 sets of DMA signals that can be routed to any
29 * channel.
e8689e63
LW
30 *
31 * The PL080 has 8 channels available for simultaneous use, and the PL081
32 * has only two channels. So on these DMA controllers the number of channels
33 * and the number of incoming DMA signals are two totally different things.
34 * It is usually not possible to theoretically handle all physical signals,
35 * so a multiplexing scheme with possible denial of use is necessary.
36 *
37 * The PL080 has a dual bus master, PL081 has a single master.
38 *
39 * Memory to peripheral transfer may be visualized as
40 * Get data from memory to DMAC
41 * Until no data left
42 * On burst request from peripheral
43 * Destination burst from DMAC to peripheral
44 * Clear burst request
45 * Raise terminal count interrupt
46 *
47 * For peripherals with a FIFO:
48 * Source burst size == half the depth of the peripheral FIFO
49 * Destination burst size == the depth of the peripheral FIFO
50 *
51 * (Bursts are irrelevant for mem to mem transfers - there are no burst
52 * signals, the DMA controller will simply facilitate its AHB master.)
53 *
54 * ASSUMES default (little) endianness for DMA transfers
55 *
9dc2c200
RKAL
56 * The PL08x has two flow control settings:
57 * - DMAC flow control: the transfer size defines the number of transfers
58 * which occur for the current LLI entry, and the DMAC raises TC at the
59 * end of every LLI entry. Observed behaviour shows the DMAC listening
60 * to both the BREQ and SREQ signals (contrary to documented),
61 * transferring data if either is active. The LBREQ and LSREQ signals
62 * are ignored.
63 *
64 * - Peripheral flow control: the transfer size is ignored (and should be
65 * zero). The data is transferred from the current LLI entry, until
66 * after the final transfer signalled by LBREQ or LSREQ. The DMAC
67 * will then move to the next LLI entry.
68 *
e8689e63
LW
69 * Global TODO:
70 * - Break out common code from arch/arm/mach-s3c64xx and share
71 */
730404ac 72#include <linux/amba/bus.h>
e8689e63
LW
73#include <linux/amba/pl08x.h>
74#include <linux/debugfs.h>
0c38d701
VK
75#include <linux/delay.h>
76#include <linux/device.h>
77#include <linux/dmaengine.h>
78#include <linux/dmapool.h>
8516f52f 79#include <linux/dma-mapping.h>
0c38d701
VK
80#include <linux/init.h>
81#include <linux/interrupt.h>
82#include <linux/module.h>
b7b6018b 83#include <linux/pm_runtime.h>
e8689e63 84#include <linux/seq_file.h>
0c38d701 85#include <linux/slab.h>
e8689e63 86#include <asm/hardware/pl080.h>
e8689e63 87
d2ebfb33
RKAL
88#include "dmaengine.h"
89
e8689e63
LW
90#define DRIVER_NAME "pl08xdmac"
91
7703eac9 92static struct amba_driver pl08x_amba_driver;
b23f204c 93struct pl08x_driver_data;
7703eac9 94
e8689e63 95/**
94ae8522 96 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
e8689e63 97 * @channels: the number of channels available in this variant
94ae8522 98 * @dualmaster: whether this version supports dual AHB masters or not.
affa115e
LW
99 * @nomadik: whether the channels have Nomadik security extension bits
100 * that need to be checked for permission before use and some registers are
101 * missing
e8689e63
LW
102 */
103struct vendor_data {
e8689e63
LW
104 u8 channels;
105 bool dualmaster;
affa115e 106 bool nomadik;
e8689e63
LW
107};
108
109/*
110 * PL08X private data structures
e8b5e11d 111 * An LLI struct - see PL08x TRM. Note that next uses bit[0] as a bus bit,
e25761d7
RKAL
112 * start & end do not - their bus bit info is in cctl. Also note that these
113 * are fixed 32-bit quantities.
e8689e63 114 */
7cb72ad9 115struct pl08x_lli {
e25761d7
RKAL
116 u32 src;
117 u32 dst;
bfddfb45 118 u32 lli;
e8689e63
LW
119 u32 cctl;
120};
121
b23f204c
RK
122/**
123 * struct pl08x_bus_data - information of source or destination
124 * busses for a transfer
125 * @addr: current address
126 * @maxwidth: the maximum width of a transfer on this bus
127 * @buswidth: the width of this bus in bytes: 1, 2 or 4
128 */
129struct pl08x_bus_data {
130 dma_addr_t addr;
131 u8 maxwidth;
132 u8 buswidth;
133};
134
135/**
136 * struct pl08x_phy_chan - holder for the physical channels
137 * @id: physical index to this channel
138 * @lock: a lock to use when altering an instance of this struct
b23f204c
RK
139 * @serving: the virtual channel currently being served by this physical
140 * channel
ad0de2ac
RK
141 * @locked: channel unavailable for the system, e.g. dedicated to secure
142 * world
b23f204c
RK
143 */
144struct pl08x_phy_chan {
145 unsigned int id;
146 void __iomem *base;
147 spinlock_t lock;
b23f204c 148 struct pl08x_dma_chan *serving;
ad0de2ac 149 bool locked;
b23f204c
RK
150};
151
152/**
153 * struct pl08x_sg - structure containing data per sg
154 * @src_addr: src address of sg
155 * @dst_addr: dst address of sg
156 * @len: transfer len in bytes
157 * @node: node for txd's dsg_list
158 */
159struct pl08x_sg {
160 dma_addr_t src_addr;
161 dma_addr_t dst_addr;
162 size_t len;
163 struct list_head node;
164};
165
166/**
167 * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor
168 * @tx: async tx descriptor
169 * @node: node for txd list for channels
170 * @dsg_list: list of children sg's
171 * @direction: direction of transfer
172 * @llis_bus: DMA memory address (physical) start for the LLIs
173 * @llis_va: virtual memory address start for the LLIs
174 * @cctl: control reg values for current txd
175 * @ccfg: config reg values for current txd
176 */
177struct pl08x_txd {
178 struct dma_async_tx_descriptor tx;
179 struct list_head node;
180 struct list_head dsg_list;
181 enum dma_transfer_direction direction;
182 dma_addr_t llis_bus;
183 struct pl08x_lli *llis_va;
184 /* Default cctl value for LLIs */
185 u32 cctl;
186 /*
187 * Settings to be put into the physical channel when we
188 * trigger this txd. Other registers are in llis_va[0].
189 */
190 u32 ccfg;
191};
192
193/**
194 * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel
195 * states
196 * @PL08X_CHAN_IDLE: the channel is idle
197 * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport
198 * channel and is running a transfer on it
199 * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport
200 * channel, but the transfer is currently paused
201 * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport
202 * channel to become available (only pertains to memcpy channels)
203 */
204enum pl08x_dma_chan_state {
205 PL08X_CHAN_IDLE,
206 PL08X_CHAN_RUNNING,
207 PL08X_CHAN_PAUSED,
208 PL08X_CHAN_WAITING,
209};
210
211/**
212 * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel
213 * @chan: wrappped abstract channel
214 * @phychan: the physical channel utilized by this channel, if there is one
215 * @phychan_hold: if non-zero, hold on to the physical channel even if we
216 * have no pending entries
217 * @tasklet: tasklet scheduled by the IRQ to handle actual work etc
218 * @name: name of channel
219 * @cd: channel platform data
220 * @runtime_addr: address for RX/TX according to the runtime config
b23f204c 221 * @pend_list: queued transactions pending on this channel
a936e793 222 * @done_list: list of completed transactions
b23f204c
RK
223 * @at: active transaction on this channel
224 * @lock: a lock for this channel data
225 * @host: a pointer to the host (internal use)
226 * @state: whether the channel is idle, paused, running etc
227 * @slave: whether this channel is a device (slave) or for memcpy
b23f204c
RK
228 * @waiting: a TX descriptor on this channel which is waiting for a physical
229 * channel to become available
ad0de2ac 230 * @signal: the physical DMA request signal which this channel is using
5e2479bd 231 * @mux_use: count of descriptors using this DMA request signal setting
b23f204c
RK
232 */
233struct pl08x_dma_chan {
234 struct dma_chan chan;
235 struct pl08x_phy_chan *phychan;
236 int phychan_hold;
237 struct tasklet_struct tasklet;
550ec36f 238 const char *name;
b23f204c 239 const struct pl08x_channel_data *cd;
ed91c13d 240 struct dma_slave_config cfg;
b23f204c 241 struct list_head pend_list;
a936e793 242 struct list_head done_list;
b23f204c
RK
243 struct pl08x_txd *at;
244 spinlock_t lock;
245 struct pl08x_driver_data *host;
246 enum pl08x_dma_chan_state state;
247 bool slave;
b23f204c 248 struct pl08x_txd *waiting;
ad0de2ac 249 int signal;
5e2479bd 250 unsigned mux_use;
b23f204c
RK
251};
252
e8689e63
LW
253/**
254 * struct pl08x_driver_data - the local state holder for the PL08x
255 * @slave: slave engine for this instance
256 * @memcpy: memcpy engine for this instance
257 * @base: virtual memory base (remapped) for the PL08x
258 * @adev: the corresponding AMBA (PrimeCell) bus entry
259 * @vd: vendor data for this PL08x variant
260 * @pd: platform data passed in from the platform/machine
261 * @phy_chans: array of data for the physical channels
262 * @pool: a pool for the LLI descriptors
263 * @pool_ctr: counter of LLIs in the pool
3e27ee84
VK
264 * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI
265 * fetches
30749cb4 266 * @mem_buses: set to indicate memory transfers on AHB2.
e8689e63
LW
267 * @lock: a spinlock for this struct
268 */
269struct pl08x_driver_data {
270 struct dma_device slave;
271 struct dma_device memcpy;
272 void __iomem *base;
273 struct amba_device *adev;
f96ca9ec 274 const struct vendor_data *vd;
e8689e63
LW
275 struct pl08x_platform_data *pd;
276 struct pl08x_phy_chan *phy_chans;
277 struct dma_pool *pool;
278 int pool_ctr;
30749cb4
RKAL
279 u8 lli_buses;
280 u8 mem_buses;
e8689e63
LW
281};
282
283/*
284 * PL08X specific defines
285 */
286
e8689e63
LW
287/* Size (bytes) of each LLI buffer allocated for one transfer */
288# define PL08X_LLI_TSFR_SIZE 0x2000
289
e8b5e11d 290/* Maximum times we call dma_pool_alloc on this pool without freeing */
7cb72ad9 291#define MAX_NUM_TSFR_LLIS (PL08X_LLI_TSFR_SIZE/sizeof(struct pl08x_lli))
e8689e63
LW
292#define PL08X_ALIGN 8
293
294static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan)
295{
296 return container_of(chan, struct pl08x_dma_chan, chan);
297}
298
501e67e8
RKAL
299static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx)
300{
301 return container_of(tx, struct pl08x_txd, tx);
302}
303
6b16c8b1
RK
304/*
305 * Mux handling.
306 *
307 * This gives us the DMA request input to the PL08x primecell which the
308 * peripheral described by the channel data will be routed to, possibly
309 * via a board/SoC specific external MUX. One important point to note
310 * here is that this does not depend on the physical channel.
311 */
ad0de2ac 312static int pl08x_request_mux(struct pl08x_dma_chan *plchan)
6b16c8b1
RK
313{
314 const struct pl08x_platform_data *pd = plchan->host->pd;
315 int ret;
316
5e2479bd 317 if (plchan->mux_use++ == 0 && pd->get_signal) {
6b16c8b1 318 ret = pd->get_signal(plchan->cd);
5e2479bd
RK
319 if (ret < 0) {
320 plchan->mux_use = 0;
6b16c8b1 321 return ret;
5e2479bd 322 }
6b16c8b1 323
ad0de2ac 324 plchan->signal = ret;
6b16c8b1
RK
325 }
326 return 0;
327}
328
329static void pl08x_release_mux(struct pl08x_dma_chan *plchan)
330{
331 const struct pl08x_platform_data *pd = plchan->host->pd;
332
5e2479bd
RK
333 if (plchan->signal >= 0) {
334 WARN_ON(plchan->mux_use == 0);
335
336 if (--plchan->mux_use == 0 && pd->put_signal) {
337 pd->put_signal(plchan->cd, plchan->signal);
338 plchan->signal = -1;
339 }
6b16c8b1
RK
340 }
341}
342
e8689e63
LW
343/*
344 * Physical channel handling
345 */
346
347/* Whether a certain channel is busy or not */
348static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch)
349{
350 unsigned int val;
351
352 val = readl(ch->base + PL080_CH_CONFIG);
353 return val & PL080_CONFIG_ACTIVE;
354}
355
356/*
357 * Set the initial DMA register values i.e. those for the first LLI
e8b5e11d 358 * The next LLI pointer and the configuration interrupt bit have
c885bee4
RKAL
359 * been set when the LLIs were constructed. Poke them into the hardware
360 * and start the transfer.
e8689e63 361 */
c885bee4
RKAL
362static void pl08x_start_txd(struct pl08x_dma_chan *plchan,
363 struct pl08x_txd *txd)
e8689e63 364{
c885bee4 365 struct pl08x_driver_data *pl08x = plchan->host;
e8689e63 366 struct pl08x_phy_chan *phychan = plchan->phychan;
19524d77 367 struct pl08x_lli *lli = &txd->llis_va[0];
09b3c323 368 u32 val;
c885bee4
RKAL
369
370 plchan->at = txd;
e8689e63 371
c885bee4
RKAL
372 /* Wait for channel inactive */
373 while (pl08x_phy_channel_busy(phychan))
374 cpu_relax();
e8689e63 375
c885bee4
RKAL
376 dev_vdbg(&pl08x->adev->dev,
377 "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, "
19524d77
RKAL
378 "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n",
379 phychan->id, lli->src, lli->dst, lli->lli, lli->cctl,
09b3c323 380 txd->ccfg);
19524d77
RKAL
381
382 writel(lli->src, phychan->base + PL080_CH_SRC_ADDR);
383 writel(lli->dst, phychan->base + PL080_CH_DST_ADDR);
384 writel(lli->lli, phychan->base + PL080_CH_LLI);
385 writel(lli->cctl, phychan->base + PL080_CH_CONTROL);
09b3c323 386 writel(txd->ccfg, phychan->base + PL080_CH_CONFIG);
c885bee4
RKAL
387
388 /* Enable the DMA channel */
389 /* Do not access config register until channel shows as disabled */
390 while (readl(pl08x->base + PL080_EN_CHAN) & (1 << phychan->id))
19386b32 391 cpu_relax();
e8689e63 392
c885bee4
RKAL
393 /* Do not access config register until channel shows as inactive */
394 val = readl(phychan->base + PL080_CH_CONFIG);
e8689e63 395 while ((val & PL080_CONFIG_ACTIVE) || (val & PL080_CONFIG_ENABLE))
c885bee4 396 val = readl(phychan->base + PL080_CH_CONFIG);
e8689e63 397
c885bee4 398 writel(val | PL080_CONFIG_ENABLE, phychan->base + PL080_CH_CONFIG);
e8689e63
LW
399}
400
401/*
81796616 402 * Pause the channel by setting the HALT bit.
e8689e63 403 *
81796616
RKAL
404 * For M->P transfers, pause the DMAC first and then stop the peripheral -
405 * the FIFO can only drain if the peripheral is still requesting data.
406 * (note: this can still timeout if the DMAC FIFO never drains of data.)
e8689e63 407 *
81796616
RKAL
408 * For P->M transfers, disable the peripheral first to stop it filling
409 * the DMAC FIFO, and then pause the DMAC.
e8689e63
LW
410 */
411static void pl08x_pause_phy_chan(struct pl08x_phy_chan *ch)
412{
413 u32 val;
81796616 414 int timeout;
e8689e63
LW
415
416 /* Set the HALT bit and wait for the FIFO to drain */
417 val = readl(ch->base + PL080_CH_CONFIG);
418 val |= PL080_CONFIG_HALT;
419 writel(val, ch->base + PL080_CH_CONFIG);
420
421 /* Wait for channel inactive */
81796616
RKAL
422 for (timeout = 1000; timeout; timeout--) {
423 if (!pl08x_phy_channel_busy(ch))
424 break;
425 udelay(1);
426 }
427 if (pl08x_phy_channel_busy(ch))
428 pr_err("pl08x: channel%u timeout waiting for pause\n", ch->id);
e8689e63
LW
429}
430
431static void pl08x_resume_phy_chan(struct pl08x_phy_chan *ch)
432{
433 u32 val;
434
435 /* Clear the HALT bit */
436 val = readl(ch->base + PL080_CH_CONFIG);
437 val &= ~PL080_CONFIG_HALT;
438 writel(val, ch->base + PL080_CH_CONFIG);
439}
440
fb526210
RKAL
441/*
442 * pl08x_terminate_phy_chan() stops the channel, clears the FIFO and
443 * clears any pending interrupt status. This should not be used for
444 * an on-going transfer, but as a method of shutting down a channel
445 * (eg, when it's no longer used) or terminating a transfer.
446 */
447static void pl08x_terminate_phy_chan(struct pl08x_driver_data *pl08x,
448 struct pl08x_phy_chan *ch)
e8689e63 449{
fb526210 450 u32 val = readl(ch->base + PL080_CH_CONFIG);
e8689e63 451
fb526210
RKAL
452 val &= ~(PL080_CONFIG_ENABLE | PL080_CONFIG_ERR_IRQ_MASK |
453 PL080_CONFIG_TC_IRQ_MASK);
e8689e63 454
e8689e63 455 writel(val, ch->base + PL080_CH_CONFIG);
fb526210
RKAL
456
457 writel(1 << ch->id, pl08x->base + PL080_ERR_CLEAR);
458 writel(1 << ch->id, pl08x->base + PL080_TC_CLEAR);
e8689e63
LW
459}
460
461static inline u32 get_bytes_in_cctl(u32 cctl)
462{
463 /* The source width defines the number of bytes */
464 u32 bytes = cctl & PL080_CONTROL_TRANSFER_SIZE_MASK;
465
466 switch (cctl >> PL080_CONTROL_SWIDTH_SHIFT) {
467 case PL080_WIDTH_8BIT:
468 break;
469 case PL080_WIDTH_16BIT:
470 bytes *= 2;
471 break;
472 case PL080_WIDTH_32BIT:
473 bytes *= 4;
474 break;
475 }
476 return bytes;
477}
478
479/* The channel should be paused when calling this */
480static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan)
481{
482 struct pl08x_phy_chan *ch;
e8689e63
LW
483 struct pl08x_txd *txd;
484 unsigned long flags;
cace6585 485 size_t bytes = 0;
e8689e63
LW
486
487 spin_lock_irqsave(&plchan->lock, flags);
e8689e63
LW
488 ch = plchan->phychan;
489 txd = plchan->at;
490
491 /*
db9f136a
RKAL
492 * Follow the LLIs to get the number of remaining
493 * bytes in the currently active transaction.
e8689e63
LW
494 */
495 if (ch && txd) {
4c0df6a3 496 u32 clli = readl(ch->base + PL080_CH_LLI) & ~PL080_LLI_LM_AHB2;
e8689e63 497
db9f136a 498 /* First get the remaining bytes in the active transfer */
e8689e63
LW
499 bytes = get_bytes_in_cctl(readl(ch->base + PL080_CH_CONTROL));
500
501 if (clli) {
db9f136a
RKAL
502 struct pl08x_lli *llis_va = txd->llis_va;
503 dma_addr_t llis_bus = txd->llis_bus;
504 int index;
505
506 BUG_ON(clli < llis_bus || clli >= llis_bus +
507 sizeof(struct pl08x_lli) * MAX_NUM_TSFR_LLIS);
e8689e63 508
db9f136a
RKAL
509 /*
510 * Locate the next LLI - as this is an array,
511 * it's simple maths to find.
512 */
513 index = (clli - llis_bus) / sizeof(struct pl08x_lli);
514
515 for (; index < MAX_NUM_TSFR_LLIS; index++) {
516 bytes += get_bytes_in_cctl(llis_va[index].cctl);
e8689e63 517
e8689e63 518 /*
e8b5e11d 519 * A LLI pointer of 0 terminates the LLI list
e8689e63 520 */
db9f136a
RKAL
521 if (!llis_va[index].lli)
522 break;
e8689e63
LW
523 }
524 }
525 }
526
527 /* Sum up all queued transactions */
15c17232 528 if (!list_empty(&plchan->pend_list)) {
db9f136a 529 struct pl08x_txd *txdi;
15c17232 530 list_for_each_entry(txdi, &plchan->pend_list, node) {
b7f69d9d
VK
531 struct pl08x_sg *dsg;
532 list_for_each_entry(dsg, &txd->dsg_list, node)
533 bytes += dsg->len;
e8689e63 534 }
e8689e63
LW
535 }
536
537 spin_unlock_irqrestore(&plchan->lock, flags);
538
539 return bytes;
540}
541
542/*
543 * Allocate a physical channel for a virtual channel
94ae8522
RKAL
544 *
545 * Try to locate a physical channel to be used for this transfer. If all
546 * are taken return NULL and the requester will have to cope by using
547 * some fallback PIO mode or retrying later.
e8689e63
LW
548 */
549static struct pl08x_phy_chan *
550pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
551 struct pl08x_dma_chan *virt_chan)
552{
553 struct pl08x_phy_chan *ch = NULL;
554 unsigned long flags;
555 int i;
556
e8689e63
LW
557 for (i = 0; i < pl08x->vd->channels; i++) {
558 ch = &pl08x->phy_chans[i];
559
560 spin_lock_irqsave(&ch->lock, flags);
561
affa115e 562 if (!ch->locked && !ch->serving) {
e8689e63 563 ch->serving = virt_chan;
e8689e63
LW
564 spin_unlock_irqrestore(&ch->lock, flags);
565 break;
566 }
567
568 spin_unlock_irqrestore(&ch->lock, flags);
569 }
570
571 if (i == pl08x->vd->channels) {
572 /* No physical channel available, cope with it */
573 return NULL;
574 }
575
576 return ch;
577}
578
579static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x,
580 struct pl08x_phy_chan *ch)
581{
582 unsigned long flags;
583
fb526210
RKAL
584 spin_lock_irqsave(&ch->lock, flags);
585
e8689e63 586 /* Stop the channel and clear its interrupts */
fb526210 587 pl08x_terminate_phy_chan(pl08x, ch);
e8689e63
LW
588
589 /* Mark it as free */
e8689e63
LW
590 ch->serving = NULL;
591 spin_unlock_irqrestore(&ch->lock, flags);
592}
593
594/*
595 * LLI handling
596 */
597
598static inline unsigned int pl08x_get_bytes_for_cctl(unsigned int coded)
599{
600 switch (coded) {
601 case PL080_WIDTH_8BIT:
602 return 1;
603 case PL080_WIDTH_16BIT:
604 return 2;
605 case PL080_WIDTH_32BIT:
606 return 4;
607 default:
608 break;
609 }
610 BUG();
611 return 0;
612}
613
614static inline u32 pl08x_cctl_bits(u32 cctl, u8 srcwidth, u8 dstwidth,
cace6585 615 size_t tsize)
e8689e63
LW
616{
617 u32 retbits = cctl;
618
e8b5e11d 619 /* Remove all src, dst and transfer size bits */
e8689e63
LW
620 retbits &= ~PL080_CONTROL_DWIDTH_MASK;
621 retbits &= ~PL080_CONTROL_SWIDTH_MASK;
622 retbits &= ~PL080_CONTROL_TRANSFER_SIZE_MASK;
623
624 /* Then set the bits according to the parameters */
625 switch (srcwidth) {
626 case 1:
627 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_SWIDTH_SHIFT;
628 break;
629 case 2:
630 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_SWIDTH_SHIFT;
631 break;
632 case 4:
633 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_SWIDTH_SHIFT;
634 break;
635 default:
636 BUG();
637 break;
638 }
639
640 switch (dstwidth) {
641 case 1:
642 retbits |= PL080_WIDTH_8BIT << PL080_CONTROL_DWIDTH_SHIFT;
643 break;
644 case 2:
645 retbits |= PL080_WIDTH_16BIT << PL080_CONTROL_DWIDTH_SHIFT;
646 break;
647 case 4:
648 retbits |= PL080_WIDTH_32BIT << PL080_CONTROL_DWIDTH_SHIFT;
649 break;
650 default:
651 BUG();
652 break;
653 }
654
655 retbits |= tsize << PL080_CONTROL_TRANSFER_SIZE_SHIFT;
656 return retbits;
657}
658
542361f8
RKAL
659struct pl08x_lli_build_data {
660 struct pl08x_txd *txd;
542361f8
RKAL
661 struct pl08x_bus_data srcbus;
662 struct pl08x_bus_data dstbus;
663 size_t remainder;
25c94f7f 664 u32 lli_bus;
542361f8
RKAL
665};
666
e8689e63 667/*
0532e6fc
VK
668 * Autoselect a master bus to use for the transfer. Slave will be the chosen as
669 * victim in case src & dest are not similarly aligned. i.e. If after aligning
670 * masters address with width requirements of transfer (by sending few byte by
671 * byte data), slave is still not aligned, then its width will be reduced to
672 * BYTE.
673 * - prefers the destination bus if both available
036f05fd 674 * - prefers bus with fixed address (i.e. peripheral)
e8689e63 675 */
542361f8
RKAL
676static void pl08x_choose_master_bus(struct pl08x_lli_build_data *bd,
677 struct pl08x_bus_data **mbus, struct pl08x_bus_data **sbus, u32 cctl)
e8689e63
LW
678{
679 if (!(cctl & PL080_CONTROL_DST_INCR)) {
542361f8
RKAL
680 *mbus = &bd->dstbus;
681 *sbus = &bd->srcbus;
036f05fd
VK
682 } else if (!(cctl & PL080_CONTROL_SRC_INCR)) {
683 *mbus = &bd->srcbus;
684 *sbus = &bd->dstbus;
e8689e63 685 } else {
036f05fd 686 if (bd->dstbus.buswidth >= bd->srcbus.buswidth) {
542361f8
RKAL
687 *mbus = &bd->dstbus;
688 *sbus = &bd->srcbus;
036f05fd 689 } else {
542361f8
RKAL
690 *mbus = &bd->srcbus;
691 *sbus = &bd->dstbus;
e8689e63
LW
692 }
693 }
694}
695
696/*
94ae8522 697 * Fills in one LLI for a certain transfer descriptor and advance the counter
e8689e63 698 */
542361f8
RKAL
699static void pl08x_fill_lli_for_desc(struct pl08x_lli_build_data *bd,
700 int num_llis, int len, u32 cctl)
e8689e63 701{
542361f8
RKAL
702 struct pl08x_lli *llis_va = bd->txd->llis_va;
703 dma_addr_t llis_bus = bd->txd->llis_bus;
e8689e63
LW
704
705 BUG_ON(num_llis >= MAX_NUM_TSFR_LLIS);
706
30749cb4 707 llis_va[num_llis].cctl = cctl;
542361f8
RKAL
708 llis_va[num_llis].src = bd->srcbus.addr;
709 llis_va[num_llis].dst = bd->dstbus.addr;
3e27ee84
VK
710 llis_va[num_llis].lli = llis_bus + (num_llis + 1) *
711 sizeof(struct pl08x_lli);
25c94f7f 712 llis_va[num_llis].lli |= bd->lli_bus;
e8689e63
LW
713
714 if (cctl & PL080_CONTROL_SRC_INCR)
542361f8 715 bd->srcbus.addr += len;
e8689e63 716 if (cctl & PL080_CONTROL_DST_INCR)
542361f8 717 bd->dstbus.addr += len;
e8689e63 718
542361f8 719 BUG_ON(bd->remainder < len);
cace6585 720
542361f8 721 bd->remainder -= len;
e8689e63
LW
722}
723
03af500f
VK
724static inline void prep_byte_width_lli(struct pl08x_lli_build_data *bd,
725 u32 *cctl, u32 len, int num_llis, size_t *total_bytes)
e8689e63 726{
03af500f
VK
727 *cctl = pl08x_cctl_bits(*cctl, 1, 1, len);
728 pl08x_fill_lli_for_desc(bd, num_llis, len, *cctl);
729 (*total_bytes) += len;
e8689e63
LW
730}
731
732/*
733 * This fills in the table of LLIs for the transfer descriptor
734 * Note that we assume we never have to change the burst sizes
735 * Return 0 for error
736 */
737static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x,
738 struct pl08x_txd *txd)
739{
e8689e63 740 struct pl08x_bus_data *mbus, *sbus;
542361f8 741 struct pl08x_lli_build_data bd;
e8689e63 742 int num_llis = 0;
03af500f 743 u32 cctl, early_bytes = 0;
b7f69d9d 744 size_t max_bytes_per_lli, total_bytes;
7cb72ad9 745 struct pl08x_lli *llis_va;
b7f69d9d 746 struct pl08x_sg *dsg;
e8689e63 747
3e27ee84 748 txd->llis_va = dma_pool_alloc(pl08x->pool, GFP_NOWAIT, &txd->llis_bus);
e8689e63
LW
749 if (!txd->llis_va) {
750 dev_err(&pl08x->adev->dev, "%s no memory for llis\n", __func__);
751 return 0;
752 }
753
754 pl08x->pool_ctr++;
755
542361f8 756 bd.txd = txd;
25c94f7f 757 bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0;
b7f69d9d 758 cctl = txd->cctl;
542361f8 759
e8689e63 760 /* Find maximum width of the source bus */
542361f8 761 bd.srcbus.maxwidth =
e8689e63
LW
762 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_SWIDTH_MASK) >>
763 PL080_CONTROL_SWIDTH_SHIFT);
764
765 /* Find maximum width of the destination bus */
542361f8 766 bd.dstbus.maxwidth =
e8689e63
LW
767 pl08x_get_bytes_for_cctl((cctl & PL080_CONTROL_DWIDTH_MASK) >>
768 PL080_CONTROL_DWIDTH_SHIFT);
769
b7f69d9d
VK
770 list_for_each_entry(dsg, &txd->dsg_list, node) {
771 total_bytes = 0;
772 cctl = txd->cctl;
e8689e63 773
b7f69d9d
VK
774 bd.srcbus.addr = dsg->src_addr;
775 bd.dstbus.addr = dsg->dst_addr;
776 bd.remainder = dsg->len;
777 bd.srcbus.buswidth = bd.srcbus.maxwidth;
778 bd.dstbus.buswidth = bd.dstbus.maxwidth;
e8689e63 779
b7f69d9d 780 pl08x_choose_master_bus(&bd, &mbus, &sbus, cctl);
e8689e63 781
b7f69d9d
VK
782 dev_vdbg(&pl08x->adev->dev, "src=0x%08x%s/%u dst=0x%08x%s/%u len=%zu\n",
783 bd.srcbus.addr, cctl & PL080_CONTROL_SRC_INCR ? "+" : "",
784 bd.srcbus.buswidth,
785 bd.dstbus.addr, cctl & PL080_CONTROL_DST_INCR ? "+" : "",
786 bd.dstbus.buswidth,
787 bd.remainder);
788 dev_vdbg(&pl08x->adev->dev, "mbus=%s sbus=%s\n",
789 mbus == &bd.srcbus ? "src" : "dst",
790 sbus == &bd.srcbus ? "src" : "dst");
fc74eb79 791
b7f69d9d
VK
792 /*
793 * Zero length is only allowed if all these requirements are
794 * met:
795 * - flow controller is peripheral.
796 * - src.addr is aligned to src.width
797 * - dst.addr is aligned to dst.width
798 *
799 * sg_len == 1 should be true, as there can be two cases here:
800 *
801 * - Memory addresses are contiguous and are not scattered.
802 * Here, Only one sg will be passed by user driver, with
803 * memory address and zero length. We pass this to controller
804 * and after the transfer it will receive the last burst
805 * request from peripheral and so transfer finishes.
806 *
807 * - Memory addresses are scattered and are not contiguous.
808 * Here, Obviously as DMA controller doesn't know when a lli's
809 * transfer gets over, it can't load next lli. So in this
810 * case, there has to be an assumption that only one lli is
811 * supported. Thus, we can't have scattered addresses.
812 */
813 if (!bd.remainder) {
814 u32 fc = (txd->ccfg & PL080_CONFIG_FLOW_CONTROL_MASK) >>
815 PL080_CONFIG_FLOW_CONTROL_SHIFT;
816 if (!((fc >= PL080_FLOW_SRC2DST_DST) &&
0a235657 817 (fc <= PL080_FLOW_SRC2DST_SRC))) {
b7f69d9d
VK
818 dev_err(&pl08x->adev->dev, "%s sg len can't be zero",
819 __func__);
820 return 0;
821 }
0a235657 822
b7f69d9d 823 if ((bd.srcbus.addr % bd.srcbus.buswidth) ||
880db3ff 824 (bd.dstbus.addr % bd.dstbus.buswidth)) {
b7f69d9d
VK
825 dev_err(&pl08x->adev->dev,
826 "%s src & dst address must be aligned to src"
827 " & dst width if peripheral is flow controller",
828 __func__);
829 return 0;
830 }
03af500f 831
b7f69d9d
VK
832 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
833 bd.dstbus.buswidth, 0);
834 pl08x_fill_lli_for_desc(&bd, num_llis++, 0, cctl);
835 break;
836 }
e8689e63
LW
837
838 /*
b7f69d9d
VK
839 * Send byte by byte for following cases
840 * - Less than a bus width available
841 * - until master bus is aligned
e8689e63 842 */
b7f69d9d
VK
843 if (bd.remainder < mbus->buswidth)
844 early_bytes = bd.remainder;
845 else if ((mbus->addr) % (mbus->buswidth)) {
846 early_bytes = mbus->buswidth - (mbus->addr) %
847 (mbus->buswidth);
848 if ((bd.remainder - early_bytes) < mbus->buswidth)
849 early_bytes = bd.remainder;
850 }
e8689e63 851
b7f69d9d
VK
852 if (early_bytes) {
853 dev_vdbg(&pl08x->adev->dev,
854 "%s byte width LLIs (remain 0x%08x)\n",
855 __func__, bd.remainder);
856 prep_byte_width_lli(&bd, &cctl, early_bytes, num_llis++,
857 &total_bytes);
e8689e63
LW
858 }
859
b7f69d9d
VK
860 if (bd.remainder) {
861 /*
862 * Master now aligned
863 * - if slave is not then we must set its width down
864 */
865 if (sbus->addr % sbus->buswidth) {
866 dev_dbg(&pl08x->adev->dev,
867 "%s set down bus width to one byte\n",
868 __func__);
fa6a940b 869
b7f69d9d
VK
870 sbus->buswidth = 1;
871 }
e8689e63
LW
872
873 /*
b7f69d9d
VK
874 * Bytes transferred = tsize * src width, not
875 * MIN(buswidths)
e8689e63 876 */
b7f69d9d
VK
877 max_bytes_per_lli = bd.srcbus.buswidth *
878 PL080_CONTROL_TRANSFER_SIZE_MASK;
879 dev_vdbg(&pl08x->adev->dev,
880 "%s max bytes per lli = %zu\n",
881 __func__, max_bytes_per_lli);
e8689e63
LW
882
883 /*
b7f69d9d
VK
884 * Make largest possible LLIs until less than one bus
885 * width left
e8689e63 886 */
b7f69d9d
VK
887 while (bd.remainder > (mbus->buswidth - 1)) {
888 size_t lli_len, tsize, width;
e8689e63 889
b7f69d9d
VK
890 /*
891 * If enough left try to send max possible,
892 * otherwise try to send the remainder
893 */
894 lli_len = min(bd.remainder, max_bytes_per_lli);
16a2e7d3 895
b7f69d9d
VK
896 /*
897 * Check against maximum bus alignment:
898 * Calculate actual transfer size in relation to
899 * bus width an get a maximum remainder of the
900 * highest bus width - 1
901 */
902 width = max(mbus->buswidth, sbus->buswidth);
903 lli_len = (lli_len / width) * width;
904 tsize = lli_len / bd.srcbus.buswidth;
905
906 dev_vdbg(&pl08x->adev->dev,
907 "%s fill lli with single lli chunk of "
908 "size 0x%08zx (remainder 0x%08zx)\n",
909 __func__, lli_len, bd.remainder);
910
911 cctl = pl08x_cctl_bits(cctl, bd.srcbus.buswidth,
16a2e7d3 912 bd.dstbus.buswidth, tsize);
b7f69d9d
VK
913 pl08x_fill_lli_for_desc(&bd, num_llis++,
914 lli_len, cctl);
915 total_bytes += lli_len;
916 }
e8689e63 917
b7f69d9d
VK
918 /*
919 * Send any odd bytes
920 */
921 if (bd.remainder) {
922 dev_vdbg(&pl08x->adev->dev,
923 "%s align with boundary, send odd bytes (remain %zu)\n",
924 __func__, bd.remainder);
925 prep_byte_width_lli(&bd, &cctl, bd.remainder,
926 num_llis++, &total_bytes);
927 }
e8689e63 928 }
16a2e7d3 929
b7f69d9d
VK
930 if (total_bytes != dsg->len) {
931 dev_err(&pl08x->adev->dev,
932 "%s size of encoded lli:s don't match total txd, transferred 0x%08zx from size 0x%08zx\n",
933 __func__, total_bytes, dsg->len);
934 return 0;
935 }
e8689e63 936
b7f69d9d
VK
937 if (num_llis >= MAX_NUM_TSFR_LLIS) {
938 dev_err(&pl08x->adev->dev,
939 "%s need to increase MAX_NUM_TSFR_LLIS from 0x%08x\n",
940 __func__, (u32) MAX_NUM_TSFR_LLIS);
941 return 0;
942 }
e8689e63 943 }
b58b6b5b
RKAL
944
945 llis_va = txd->llis_va;
94ae8522 946 /* The final LLI terminates the LLI. */
bfddfb45 947 llis_va[num_llis - 1].lli = 0;
94ae8522 948 /* The final LLI element shall also fire an interrupt. */
b58b6b5b 949 llis_va[num_llis - 1].cctl |= PL080_CONTROL_TC_IRQ_EN;
e8689e63 950
e8689e63
LW
951#ifdef VERBOSE_DEBUG
952 {
953 int i;
954
fc74eb79
RKAL
955 dev_vdbg(&pl08x->adev->dev,
956 "%-3s %-9s %-10s %-10s %-10s %s\n",
957 "lli", "", "csrc", "cdst", "clli", "cctl");
e8689e63
LW
958 for (i = 0; i < num_llis; i++) {
959 dev_vdbg(&pl08x->adev->dev,
fc74eb79
RKAL
960 "%3d @%p: 0x%08x 0x%08x 0x%08x 0x%08x\n",
961 i, &llis_va[i], llis_va[i].src,
962 llis_va[i].dst, llis_va[i].lli, llis_va[i].cctl
e8689e63
LW
963 );
964 }
965 }
966#endif
967
968 return num_llis;
969}
970
971/* You should call this with the struct pl08x lock held */
972static void pl08x_free_txd(struct pl08x_driver_data *pl08x,
973 struct pl08x_txd *txd)
974{
b7f69d9d
VK
975 struct pl08x_sg *dsg, *_dsg;
976
e8689e63 977 /* Free the LLI */
c1205646
VK
978 if (txd->llis_va)
979 dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus);
e8689e63
LW
980
981 pl08x->pool_ctr--;
982
b7f69d9d
VK
983 list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) {
984 list_del(&dsg->node);
985 kfree(dsg);
986 }
987
e8689e63
LW
988 kfree(txd);
989}
990
991static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x,
992 struct pl08x_dma_chan *plchan)
993{
994 struct pl08x_txd *txdi = NULL;
995 struct pl08x_txd *next;
996
15c17232 997 if (!list_empty(&plchan->pend_list)) {
e8689e63 998 list_for_each_entry_safe(txdi,
15c17232 999 next, &plchan->pend_list, node) {
e8689e63
LW
1000 list_del(&txdi->node);
1001 pl08x_free_txd(pl08x, txdi);
1002 }
e8689e63
LW
1003 }
1004}
1005
1006/*
1007 * The DMA ENGINE API
1008 */
1009static int pl08x_alloc_chan_resources(struct dma_chan *chan)
1010{
1011 return 0;
1012}
1013
1014static void pl08x_free_chan_resources(struct dma_chan *chan)
1015{
1016}
1017
1018/*
1019 * This should be called with the channel plchan->lock held
1020 */
1021static int prep_phy_channel(struct pl08x_dma_chan *plchan,
1022 struct pl08x_txd *txd)
1023{
1024 struct pl08x_driver_data *pl08x = plchan->host;
1025 struct pl08x_phy_chan *ch;
1026 int ret;
1027
1028 /* Check if we already have a channel */
8f0d30f9
VK
1029 if (plchan->phychan) {
1030 ch = plchan->phychan;
1031 goto got_channel;
1032 }
e8689e63
LW
1033
1034 ch = pl08x_get_phy_channel(pl08x, plchan);
1035 if (!ch) {
1036 /* No physical channel available, cope with it */
1037 dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
1038 return -EBUSY;
1039 }
1040
1041 /*
1042 * OK we have a physical channel: for memcpy() this is all we
1043 * need, but for slaves the physical signals may be muxed!
1044 * Can the platform allow us to use this channel?
1045 */
6b16c8b1 1046 if (plchan->slave) {
ad0de2ac 1047 ret = pl08x_request_mux(plchan);
e8689e63
LW
1048 if (ret < 0) {
1049 dev_dbg(&pl08x->adev->dev,
1050 "unable to use physical channel %d for transfer on %s due to platform restrictions\n",
1051 ch->id, plchan->name);
1052 /* Release physical channel & return */
1053 pl08x_put_phy_channel(pl08x, ch);
1054 return -EBUSY;
1055 }
e8689e63
LW
1056 }
1057
8f0d30f9 1058 plchan->phychan = ch;
e8689e63
LW
1059 dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
1060 ch->id,
ad0de2ac 1061 plchan->signal,
e8689e63
LW
1062 plchan->name);
1063
8f0d30f9
VK
1064got_channel:
1065 /* Assign the flow control signal to this channel */
1066 if (txd->direction == DMA_MEM_TO_DEV)
ad0de2ac 1067 txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT;
8f0d30f9 1068 else if (txd->direction == DMA_DEV_TO_MEM)
ad0de2ac 1069 txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT;
8f0d30f9 1070
8087aacd 1071 plchan->phychan_hold++;
e8689e63
LW
1072
1073 return 0;
1074}
1075
8c8cc2b1
RKAL
1076static void release_phy_channel(struct pl08x_dma_chan *plchan)
1077{
1078 struct pl08x_driver_data *pl08x = plchan->host;
1079
6b16c8b1 1080 pl08x_release_mux(plchan);
8c8cc2b1
RKAL
1081 pl08x_put_phy_channel(pl08x, plchan->phychan);
1082 plchan->phychan = NULL;
1083}
1084
e8689e63
LW
1085static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx)
1086{
1087 struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan);
501e67e8 1088 struct pl08x_txd *txd = to_pl08x_txd(tx);
c370e594 1089 unsigned long flags;
884485e1 1090 dma_cookie_t cookie;
c370e594
RKAL
1091
1092 spin_lock_irqsave(&plchan->lock, flags);
884485e1 1093 cookie = dma_cookie_assign(tx);
501e67e8
RKAL
1094
1095 /* Put this onto the pending list */
1096 list_add_tail(&txd->node, &plchan->pend_list);
1097
1098 /*
1099 * If there was no physical channel available for this memcpy,
1100 * stack the request up and indicate that the channel is waiting
1101 * for a free physical channel.
1102 */
1103 if (!plchan->slave && !plchan->phychan) {
1104 /* Do this memcpy whenever there is a channel ready */
1105 plchan->state = PL08X_CHAN_WAITING;
1106 plchan->waiting = txd;
8087aacd
RKAL
1107 } else {
1108 plchan->phychan_hold--;
501e67e8
RKAL
1109 }
1110
c370e594 1111 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1112
884485e1 1113 return cookie;
e8689e63
LW
1114}
1115
1116static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt(
1117 struct dma_chan *chan, unsigned long flags)
1118{
1119 struct dma_async_tx_descriptor *retval = NULL;
1120
1121 return retval;
1122}
1123
1124/*
94ae8522
RKAL
1125 * Code accessing dma_async_is_complete() in a tight loop may give problems.
1126 * If slaves are relying on interrupts to signal completion this function
1127 * must not be called with interrupts disabled.
e8689e63 1128 */
3e27ee84
VK
1129static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan,
1130 dma_cookie_t cookie, struct dma_tx_state *txstate)
e8689e63
LW
1131{
1132 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63 1133 enum dma_status ret;
e8689e63 1134
96a2af41
RKAL
1135 ret = dma_cookie_status(chan, cookie, txstate);
1136 if (ret == DMA_SUCCESS)
e8689e63 1137 return ret;
e8689e63 1138
e8689e63
LW
1139 /*
1140 * This cookie not complete yet
96a2af41 1141 * Get number of bytes left in the active transactions and queue
e8689e63 1142 */
96a2af41 1143 dma_set_residue(txstate, pl08x_getbytes_chan(plchan));
e8689e63
LW
1144
1145 if (plchan->state == PL08X_CHAN_PAUSED)
1146 return DMA_PAUSED;
1147
1148 /* Whether waiting or running, we're in progress */
1149 return DMA_IN_PROGRESS;
1150}
1151
1152/* PrimeCell DMA extension */
1153struct burst_table {
760596c6 1154 u32 burstwords;
e8689e63
LW
1155 u32 reg;
1156};
1157
1158static const struct burst_table burst_sizes[] = {
1159 {
1160 .burstwords = 256,
760596c6 1161 .reg = PL080_BSIZE_256,
e8689e63
LW
1162 },
1163 {
1164 .burstwords = 128,
760596c6 1165 .reg = PL080_BSIZE_128,
e8689e63
LW
1166 },
1167 {
1168 .burstwords = 64,
760596c6 1169 .reg = PL080_BSIZE_64,
e8689e63
LW
1170 },
1171 {
1172 .burstwords = 32,
760596c6 1173 .reg = PL080_BSIZE_32,
e8689e63
LW
1174 },
1175 {
1176 .burstwords = 16,
760596c6 1177 .reg = PL080_BSIZE_16,
e8689e63
LW
1178 },
1179 {
1180 .burstwords = 8,
760596c6 1181 .reg = PL080_BSIZE_8,
e8689e63
LW
1182 },
1183 {
1184 .burstwords = 4,
760596c6 1185 .reg = PL080_BSIZE_4,
e8689e63
LW
1186 },
1187 {
760596c6
RKAL
1188 .burstwords = 0,
1189 .reg = PL080_BSIZE_1,
e8689e63
LW
1190 },
1191};
1192
121c8476
RKAL
1193/*
1194 * Given the source and destination available bus masks, select which
1195 * will be routed to each port. We try to have source and destination
1196 * on separate ports, but always respect the allowable settings.
1197 */
1198static u32 pl08x_select_bus(u8 src, u8 dst)
1199{
1200 u32 cctl = 0;
1201
1202 if (!(dst & PL08X_AHB1) || ((dst & PL08X_AHB2) && (src & PL08X_AHB1)))
1203 cctl |= PL080_CONTROL_DST_AHB2;
1204 if (!(src & PL08X_AHB1) || ((src & PL08X_AHB2) && !(dst & PL08X_AHB2)))
1205 cctl |= PL080_CONTROL_SRC_AHB2;
1206
1207 return cctl;
1208}
1209
f14c426c
RKAL
1210static u32 pl08x_cctl(u32 cctl)
1211{
1212 cctl &= ~(PL080_CONTROL_SRC_AHB2 | PL080_CONTROL_DST_AHB2 |
1213 PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR |
1214 PL080_CONTROL_PROT_MASK);
1215
1216 /* Access the cell in privileged mode, non-bufferable, non-cacheable */
1217 return cctl | PL080_CONTROL_PROT_SYS;
1218}
1219
aa88cdaa
RKAL
1220static u32 pl08x_width(enum dma_slave_buswidth width)
1221{
1222 switch (width) {
1223 case DMA_SLAVE_BUSWIDTH_1_BYTE:
1224 return PL080_WIDTH_8BIT;
1225 case DMA_SLAVE_BUSWIDTH_2_BYTES:
1226 return PL080_WIDTH_16BIT;
1227 case DMA_SLAVE_BUSWIDTH_4_BYTES:
1228 return PL080_WIDTH_32BIT;
f32807f1
VK
1229 default:
1230 return ~0;
aa88cdaa 1231 }
aa88cdaa
RKAL
1232}
1233
760596c6
RKAL
1234static u32 pl08x_burst(u32 maxburst)
1235{
1236 int i;
1237
1238 for (i = 0; i < ARRAY_SIZE(burst_sizes); i++)
1239 if (burst_sizes[i].burstwords <= maxburst)
1240 break;
1241
1242 return burst_sizes[i].reg;
1243}
1244
9862ba17
RK
1245static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan,
1246 enum dma_slave_buswidth addr_width, u32 maxburst)
1247{
1248 u32 width, burst, cctl = 0;
1249
1250 width = pl08x_width(addr_width);
1251 if (width == ~0)
1252 return ~0;
1253
1254 cctl |= width << PL080_CONTROL_SWIDTH_SHIFT;
1255 cctl |= width << PL080_CONTROL_DWIDTH_SHIFT;
1256
1257 /*
1258 * If this channel will only request single transfers, set this
1259 * down to ONE element. Also select one element if no maxburst
1260 * is specified.
1261 */
1262 if (plchan->cd->single)
1263 maxburst = 1;
1264
1265 burst = pl08x_burst(maxburst);
1266 cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
1267 cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
1268
1269 return pl08x_cctl(cctl);
1270}
1271
f0fd9446
RKAL
1272static int dma_set_runtime_config(struct dma_chan *chan,
1273 struct dma_slave_config *config)
e8689e63
LW
1274{
1275 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
b7f75865
RKAL
1276
1277 if (!plchan->slave)
1278 return -EINVAL;
e8689e63 1279
dc8d5f8d
RK
1280 /* Reject definitely invalid configurations */
1281 if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
1282 config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
f0fd9446 1283 return -EINVAL;
e8689e63 1284
ed91c13d
RK
1285 plchan->cfg = *config;
1286
f0fd9446 1287 return 0;
e8689e63
LW
1288}
1289
1290/*
1291 * Slave transactions callback to the slave device to allow
1292 * synchronization of slave DMA signals with the DMAC enable
1293 */
1294static void pl08x_issue_pending(struct dma_chan *chan)
1295{
1296 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
e8689e63
LW
1297 unsigned long flags;
1298
1299 spin_lock_irqsave(&plchan->lock, flags);
9c0bb43b
RKAL
1300 /* Something is already active, or we're waiting for a channel... */
1301 if (plchan->at || plchan->state == PL08X_CHAN_WAITING) {
1302 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1303 return;
9c0bb43b 1304 }
e8689e63
LW
1305
1306 /* Take the first element in the queue and execute it */
15c17232 1307 if (!list_empty(&plchan->pend_list)) {
e8689e63
LW
1308 struct pl08x_txd *next;
1309
15c17232 1310 next = list_first_entry(&plchan->pend_list,
e8689e63
LW
1311 struct pl08x_txd,
1312 node);
1313 list_del(&next->node);
e8689e63
LW
1314 plchan->state = PL08X_CHAN_RUNNING;
1315
c885bee4 1316 pl08x_start_txd(plchan, next);
e8689e63
LW
1317 }
1318
1319 spin_unlock_irqrestore(&plchan->lock, flags);
1320}
1321
1322static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan,
1323 struct pl08x_txd *txd)
1324{
e8689e63 1325 struct pl08x_driver_data *pl08x = plchan->host;
c370e594
RKAL
1326 unsigned long flags;
1327 int num_llis, ret;
e8689e63
LW
1328
1329 num_llis = pl08x_fill_llis_for_desc(pl08x, txd);
dafa7317 1330 if (!num_llis) {
57001a60
VK
1331 spin_lock_irqsave(&plchan->lock, flags);
1332 pl08x_free_txd(pl08x, txd);
1333 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63 1334 return -EINVAL;
dafa7317 1335 }
e8689e63 1336
c370e594 1337 spin_lock_irqsave(&plchan->lock, flags);
e8689e63 1338
e8689e63
LW
1339 /*
1340 * See if we already have a physical channel allocated,
1341 * else this is the time to try to get one.
1342 */
1343 ret = prep_phy_channel(plchan, txd);
1344 if (ret) {
1345 /*
501e67e8
RKAL
1346 * No physical channel was available.
1347 *
1348 * memcpy transfers can be sorted out at submission time.
1349 *
1350 * Slave transfers may have been denied due to platform
1351 * channel muxing restrictions. Since there is no guarantee
1352 * that this will ever be resolved, and the signal must be
1353 * acquired AFTER acquiring the physical channel, we will let
1354 * them be NACK:ed with -EBUSY here. The drivers can retry
1355 * the prep() call if they are eager on doing this using DMA.
e8689e63
LW
1356 */
1357 if (plchan->slave) {
1358 pl08x_free_txd_list(pl08x, plchan);
501e67e8 1359 pl08x_free_txd(pl08x, txd);
c370e594 1360 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63
LW
1361 return -EBUSY;
1362 }
e8689e63
LW
1363 } else
1364 /*
94ae8522
RKAL
1365 * Else we're all set, paused and ready to roll, status
1366 * will switch to PL08X_CHAN_RUNNING when we call
1367 * issue_pending(). If there is something running on the
1368 * channel already we don't change its state.
e8689e63
LW
1369 */
1370 if (plchan->state == PL08X_CHAN_IDLE)
1371 plchan->state = PL08X_CHAN_PAUSED;
1372
c370e594 1373 spin_unlock_irqrestore(&plchan->lock, flags);
e8689e63
LW
1374
1375 return 0;
1376}
1377
c0428794
RKAL
1378static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan,
1379 unsigned long flags)
ac3cd20d 1380{
b201c111 1381 struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT);
ac3cd20d
RKAL
1382
1383 if (txd) {
1384 dma_async_tx_descriptor_init(&txd->tx, &plchan->chan);
c0428794 1385 txd->tx.flags = flags;
ac3cd20d
RKAL
1386 txd->tx.tx_submit = pl08x_tx_submit;
1387 INIT_LIST_HEAD(&txd->node);
b7f69d9d 1388 INIT_LIST_HEAD(&txd->dsg_list);
4983a04f
RKAL
1389
1390 /* Always enable error and terminal interrupts */
1391 txd->ccfg = PL080_CONFIG_ERR_IRQ_MASK |
1392 PL080_CONFIG_TC_IRQ_MASK;
ac3cd20d
RKAL
1393 }
1394 return txd;
1395}
1396
e8689e63
LW
1397/*
1398 * Initialize a descriptor to be used by memcpy submit
1399 */
1400static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
1401 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1402 size_t len, unsigned long flags)
1403{
1404 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1405 struct pl08x_driver_data *pl08x = plchan->host;
1406 struct pl08x_txd *txd;
b7f69d9d 1407 struct pl08x_sg *dsg;
e8689e63
LW
1408 int ret;
1409
c0428794 1410 txd = pl08x_get_txd(plchan, flags);
e8689e63
LW
1411 if (!txd) {
1412 dev_err(&pl08x->adev->dev,
1413 "%s no memory for descriptor\n", __func__);
1414 return NULL;
1415 }
1416
b7f69d9d
VK
1417 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1418 if (!dsg) {
1419 pl08x_free_txd(pl08x, txd);
1420 dev_err(&pl08x->adev->dev, "%s no memory for pl080 sg\n",
1421 __func__);
1422 return NULL;
1423 }
1424 list_add_tail(&dsg->node, &txd->dsg_list);
1425
92d2fd61 1426 txd->direction = DMA_MEM_TO_MEM;
b7f69d9d
VK
1427 dsg->src_addr = src;
1428 dsg->dst_addr = dest;
1429 dsg->len = len;
e8689e63
LW
1430
1431 /* Set platform data for m2m */
4983a04f 1432 txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT;
dc8d5f8d 1433 txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy &
c7da9a56 1434 ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2);
4983a04f 1435
e8689e63 1436 /* Both to be incremented or the code will break */
70b5ed6b 1437 txd->cctl |= PL080_CONTROL_SRC_INCR | PL080_CONTROL_DST_INCR;
c7da9a56 1438
c7da9a56 1439 if (pl08x->vd->dualmaster)
121c8476
RKAL
1440 txd->cctl |= pl08x_select_bus(pl08x->mem_buses,
1441 pl08x->mem_buses);
e8689e63 1442
e8689e63
LW
1443 ret = pl08x_prep_channel_resources(plchan, txd);
1444 if (ret)
1445 return NULL;
e8689e63
LW
1446
1447 return &txd->tx;
1448}
1449
3e2a037c 1450static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
e8689e63 1451 struct dma_chan *chan, struct scatterlist *sgl,
db8196df 1452 unsigned int sg_len, enum dma_transfer_direction direction,
185ecb5f 1453 unsigned long flags, void *context)
e8689e63
LW
1454{
1455 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1456 struct pl08x_driver_data *pl08x = plchan->host;
1457 struct pl08x_txd *txd;
b7f69d9d
VK
1458 struct pl08x_sg *dsg;
1459 struct scatterlist *sg;
dc8d5f8d 1460 enum dma_slave_buswidth addr_width;
b7f69d9d 1461 dma_addr_t slave_addr;
0a235657 1462 int ret, tmp;
409ec8db 1463 u8 src_buses, dst_buses;
dc8d5f8d 1464 u32 maxburst, cctl;
e8689e63 1465
e8689e63 1466 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
fdaf9c4b 1467 __func__, sg_dma_len(sgl), plchan->name);
e8689e63 1468
c0428794 1469 txd = pl08x_get_txd(plchan, flags);
e8689e63
LW
1470 if (!txd) {
1471 dev_err(&pl08x->adev->dev, "%s no txd\n", __func__);
1472 return NULL;
1473 }
1474
e8689e63
LW
1475 /*
1476 * Set up addresses, the PrimeCell configured address
1477 * will take precedence since this may configure the
1478 * channel target address dynamically at runtime.
1479 */
1480 txd->direction = direction;
c7da9a56 1481
db8196df 1482 if (direction == DMA_MEM_TO_DEV) {
dc8d5f8d 1483 cctl = PL080_CONTROL_SRC_INCR;
ed91c13d 1484 slave_addr = plchan->cfg.dst_addr;
dc8d5f8d
RK
1485 addr_width = plchan->cfg.dst_addr_width;
1486 maxburst = plchan->cfg.dst_maxburst;
409ec8db
RK
1487 src_buses = pl08x->mem_buses;
1488 dst_buses = plchan->cd->periph_buses;
db8196df 1489 } else if (direction == DMA_DEV_TO_MEM) {
dc8d5f8d 1490 cctl = PL080_CONTROL_DST_INCR;
ed91c13d 1491 slave_addr = plchan->cfg.src_addr;
dc8d5f8d
RK
1492 addr_width = plchan->cfg.src_addr_width;
1493 maxburst = plchan->cfg.src_maxburst;
409ec8db
RK
1494 src_buses = plchan->cd->periph_buses;
1495 dst_buses = pl08x->mem_buses;
e8689e63 1496 } else {
b7f69d9d 1497 pl08x_free_txd(pl08x, txd);
e8689e63
LW
1498 dev_err(&pl08x->adev->dev,
1499 "%s direction unsupported\n", __func__);
1500 return NULL;
1501 }
e8689e63 1502
dc8d5f8d 1503 cctl |= pl08x_get_cctl(plchan, addr_width, maxburst);
800d683e
RK
1504 if (cctl == ~0) {
1505 pl08x_free_txd(pl08x, txd);
1506 dev_err(&pl08x->adev->dev,
1507 "DMA slave configuration botched?\n");
1508 return NULL;
1509 }
1510
409ec8db
RK
1511 txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses);
1512
95442b22 1513 if (plchan->cfg.device_fc)
db8196df 1514 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
0a235657
VK
1515 PL080_FLOW_PER2MEM_PER;
1516 else
db8196df 1517 tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
0a235657
VK
1518 PL080_FLOW_PER2MEM;
1519
1520 txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
1521
b7f69d9d
VK
1522 for_each_sg(sgl, sg, sg_len, tmp) {
1523 dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT);
1524 if (!dsg) {
1525 pl08x_free_txd(pl08x, txd);
1526 dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n",
1527 __func__);
1528 return NULL;
1529 }
1530 list_add_tail(&dsg->node, &txd->dsg_list);
1531
1532 dsg->len = sg_dma_len(sg);
db8196df 1533 if (direction == DMA_MEM_TO_DEV) {
cbb796cc 1534 dsg->src_addr = sg_dma_address(sg);
b7f69d9d
VK
1535 dsg->dst_addr = slave_addr;
1536 } else {
1537 dsg->src_addr = slave_addr;
cbb796cc 1538 dsg->dst_addr = sg_dma_address(sg);
b7f69d9d
VK
1539 }
1540 }
1541
e8689e63
LW
1542 ret = pl08x_prep_channel_resources(plchan, txd);
1543 if (ret)
1544 return NULL;
e8689e63
LW
1545
1546 return &txd->tx;
1547}
1548
1549static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1550 unsigned long arg)
1551{
1552 struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
1553 struct pl08x_driver_data *pl08x = plchan->host;
1554 unsigned long flags;
1555 int ret = 0;
1556
1557 /* Controls applicable to inactive channels */
1558 if (cmd == DMA_SLAVE_CONFIG) {
f0fd9446
RKAL
1559 return dma_set_runtime_config(chan,
1560 (struct dma_slave_config *)arg);
e8689e63
LW
1561 }
1562
1563 /*
1564 * Anything succeeds on channels with no physical allocation and
1565 * no queued transfers.
1566 */
1567 spin_lock_irqsave(&plchan->lock, flags);
1568 if (!plchan->phychan && !plchan->at) {
1569 spin_unlock_irqrestore(&plchan->lock, flags);
1570 return 0;
1571 }
1572
1573 switch (cmd) {
1574 case DMA_TERMINATE_ALL:
1575 plchan->state = PL08X_CHAN_IDLE;
1576
1577 if (plchan->phychan) {
fb526210 1578 pl08x_terminate_phy_chan(pl08x, plchan->phychan);
e8689e63
LW
1579
1580 /*
1581 * Mark physical channel as free and free any slave
1582 * signal
1583 */
8c8cc2b1 1584 release_phy_channel(plchan);
88c08a3f 1585 plchan->phychan_hold = 0;
e8689e63 1586 }
e8689e63
LW
1587 /* Dequeue jobs and free LLIs */
1588 if (plchan->at) {
1589 pl08x_free_txd(pl08x, plchan->at);
1590 plchan->at = NULL;
1591 }
1592 /* Dequeue jobs not yet fired as well */
1593 pl08x_free_txd_list(pl08x, plchan);
1594 break;
1595 case DMA_PAUSE:
1596 pl08x_pause_phy_chan(plchan->phychan);
1597 plchan->state = PL08X_CHAN_PAUSED;
1598 break;
1599 case DMA_RESUME:
1600 pl08x_resume_phy_chan(plchan->phychan);
1601 plchan->state = PL08X_CHAN_RUNNING;
1602 break;
1603 default:
1604 /* Unknown command */
1605 ret = -ENXIO;
1606 break;
1607 }
1608
1609 spin_unlock_irqrestore(&plchan->lock, flags);
1610
1611 return ret;
1612}
1613
1614bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1615{
7703eac9 1616 struct pl08x_dma_chan *plchan;
e8689e63
LW
1617 char *name = chan_id;
1618
7703eac9
RKAL
1619 /* Reject channels for devices not bound to this driver */
1620 if (chan->device->dev->driver != &pl08x_amba_driver.drv)
1621 return false;
1622
1623 plchan = to_pl08x_chan(chan);
1624
e8689e63
LW
1625 /* Check that the channel is not taken! */
1626 if (!strcmp(plchan->name, name))
1627 return true;
1628
1629 return false;
1630}
1631
1632/*
1633 * Just check that the device is there and active
94ae8522
RKAL
1634 * TODO: turn this bit on/off depending on the number of physical channels
1635 * actually used, if it is zero... well shut it off. That will save some
1636 * power. Cut the clock at the same time.
e8689e63
LW
1637 */
1638static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1639{
affa115e
LW
1640 /* The Nomadik variant does not have the config register */
1641 if (pl08x->vd->nomadik)
1642 return;
48a59ef3 1643 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
e8689e63
LW
1644}
1645
3d992e1a
RKAL
1646static void pl08x_unmap_buffers(struct pl08x_txd *txd)
1647{
1648 struct device *dev = txd->tx.chan->device->dev;
b7f69d9d 1649 struct pl08x_sg *dsg;
3d992e1a
RKAL
1650
1651 if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
1652 if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
b7f69d9d
VK
1653 list_for_each_entry(dsg, &txd->dsg_list, node)
1654 dma_unmap_single(dev, dsg->src_addr, dsg->len,
1655 DMA_TO_DEVICE);
1656 else {
1657 list_for_each_entry(dsg, &txd->dsg_list, node)
1658 dma_unmap_page(dev, dsg->src_addr, dsg->len,
1659 DMA_TO_DEVICE);
1660 }
3d992e1a
RKAL
1661 }
1662 if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
1663 if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
b7f69d9d
VK
1664 list_for_each_entry(dsg, &txd->dsg_list, node)
1665 dma_unmap_single(dev, dsg->dst_addr, dsg->len,
1666 DMA_FROM_DEVICE);
3d992e1a 1667 else
b7f69d9d
VK
1668 list_for_each_entry(dsg, &txd->dsg_list, node)
1669 dma_unmap_page(dev, dsg->dst_addr, dsg->len,
1670 DMA_FROM_DEVICE);
3d992e1a
RKAL
1671 }
1672}
1673
e8689e63
LW
1674static void pl08x_tasklet(unsigned long data)
1675{
1676 struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data;
e8689e63 1677 struct pl08x_driver_data *pl08x = plchan->host;
bf072af4 1678 unsigned long flags;
a936e793 1679 LIST_HEAD(head);
e8689e63 1680
bf072af4 1681 spin_lock_irqsave(&plchan->lock, flags);
a936e793 1682 list_splice_tail_init(&plchan->done_list, &head);
8087aacd 1683
94ae8522 1684 /* If a new descriptor is queued, set it up plchan->at is NULL here */
15c17232 1685 if (!list_empty(&plchan->pend_list)) {
e8689e63
LW
1686 struct pl08x_txd *next;
1687
15c17232 1688 next = list_first_entry(&plchan->pend_list,
e8689e63
LW
1689 struct pl08x_txd,
1690 node);
1691 list_del(&next->node);
c885bee4
RKAL
1692
1693 pl08x_start_txd(plchan, next);
8087aacd
RKAL
1694 } else if (plchan->phychan_hold) {
1695 /*
1696 * This channel is still in use - we have a new txd being
1697 * prepared and will soon be queued. Don't give up the
1698 * physical channel.
1699 */
e8689e63
LW
1700 } else {
1701 struct pl08x_dma_chan *waiting = NULL;
1702
1703 /*
1704 * No more jobs, so free up the physical channel
1705 * Free any allocated signal on slave transfers too
1706 */
8c8cc2b1 1707 release_phy_channel(plchan);
e8689e63
LW
1708 plchan->state = PL08X_CHAN_IDLE;
1709
1710 /*
94ae8522
RKAL
1711 * And NOW before anyone else can grab that free:d up
1712 * physical channel, see if there is some memcpy pending
1713 * that seriously needs to start because of being stacked
1714 * up while we were choking the physical channels with data.
e8689e63
LW
1715 */
1716 list_for_each_entry(waiting, &pl08x->memcpy.channels,
1717 chan.device_node) {
3e27ee84
VK
1718 if (waiting->state == PL08X_CHAN_WAITING &&
1719 waiting->waiting != NULL) {
e8689e63
LW
1720 int ret;
1721
1722 /* This should REALLY not fail now */
1723 ret = prep_phy_channel(waiting,
1724 waiting->waiting);
1725 BUG_ON(ret);
8087aacd 1726 waiting->phychan_hold--;
e8689e63
LW
1727 waiting->state = PL08X_CHAN_RUNNING;
1728 waiting->waiting = NULL;
1729 pl08x_issue_pending(&waiting->chan);
1730 break;
1731 }
1732 }
1733 }
1734
bf072af4 1735 spin_unlock_irqrestore(&plchan->lock, flags);
858c21c0 1736
a936e793
RK
1737 while (!list_empty(&head)) {
1738 struct pl08x_txd *txd = list_first_entry(&head,
1739 struct pl08x_txd, node);
3d992e1a
RKAL
1740 dma_async_tx_callback callback = txd->tx.callback;
1741 void *callback_param = txd->tx.callback_param;
1742
a936e793
RK
1743 list_del(&txd->node);
1744
3d992e1a
RKAL
1745 /* Don't try to unmap buffers on slave channels */
1746 if (!plchan->slave)
1747 pl08x_unmap_buffers(txd);
1748
1749 /* Free the descriptor */
1750 spin_lock_irqsave(&plchan->lock, flags);
1751 pl08x_free_txd(pl08x, txd);
1752 spin_unlock_irqrestore(&plchan->lock, flags);
1753
1754 /* Callback to signal completion */
1755 if (callback)
1756 callback(callback_param);
1757 }
e8689e63
LW
1758}
1759
1760static irqreturn_t pl08x_irq(int irq, void *dev)
1761{
1762 struct pl08x_driver_data *pl08x = dev;
28da2836
VK
1763 u32 mask = 0, err, tc, i;
1764
1765 /* check & clear - ERR & TC interrupts */
1766 err = readl(pl08x->base + PL080_ERR_STATUS);
1767 if (err) {
1768 dev_err(&pl08x->adev->dev, "%s error interrupt, register value 0x%08x\n",
1769 __func__, err);
1770 writel(err, pl08x->base + PL080_ERR_CLEAR);
e8689e63 1771 }
d29bf019 1772 tc = readl(pl08x->base + PL080_TC_STATUS);
28da2836
VK
1773 if (tc)
1774 writel(tc, pl08x->base + PL080_TC_CLEAR);
1775
1776 if (!err && !tc)
1777 return IRQ_NONE;
1778
e8689e63 1779 for (i = 0; i < pl08x->vd->channels; i++) {
28da2836 1780 if (((1 << i) & err) || ((1 << i) & tc)) {
e8689e63
LW
1781 /* Locate physical channel */
1782 struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i];
1783 struct pl08x_dma_chan *plchan = phychan->serving;
a936e793 1784 struct pl08x_txd *tx;
e8689e63 1785
28da2836
VK
1786 if (!plchan) {
1787 dev_err(&pl08x->adev->dev,
1788 "%s Error TC interrupt on unused channel: 0x%08x\n",
1789 __func__, i);
1790 continue;
1791 }
1792
a936e793
RK
1793 spin_lock(&plchan->lock);
1794 tx = plchan->at;
1795 if (tx) {
1796 plchan->at = NULL;
1797 dma_cookie_complete(&tx->tx);
1798 list_add_tail(&tx->node, &plchan->done_list);
1799 }
1800 spin_unlock(&plchan->lock);
1801
e8689e63
LW
1802 /* Schedule tasklet on this channel */
1803 tasklet_schedule(&plchan->tasklet);
e8689e63
LW
1804 mask |= (1 << i);
1805 }
1806 }
e8689e63
LW
1807
1808 return mask ? IRQ_HANDLED : IRQ_NONE;
1809}
1810
121c8476
RKAL
1811static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan)
1812{
121c8476
RKAL
1813 chan->slave = true;
1814 chan->name = chan->cd->bus_id;
ed91c13d
RK
1815 chan->cfg.src_addr = chan->cd->addr;
1816 chan->cfg.dst_addr = chan->cd->addr;
121c8476
RKAL
1817}
1818
e8689e63
LW
1819/*
1820 * Initialise the DMAC memcpy/slave channels.
1821 * Make a local wrapper to hold required data
1822 */
1823static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x,
3e27ee84 1824 struct dma_device *dmadev, unsigned int channels, bool slave)
e8689e63
LW
1825{
1826 struct pl08x_dma_chan *chan;
1827 int i;
1828
1829 INIT_LIST_HEAD(&dmadev->channels);
94ae8522 1830
e8689e63
LW
1831 /*
1832 * Register as many many memcpy as we have physical channels,
1833 * we won't always be able to use all but the code will have
1834 * to cope with that situation.
1835 */
1836 for (i = 0; i < channels; i++) {
b201c111 1837 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
e8689e63
LW
1838 if (!chan) {
1839 dev_err(&pl08x->adev->dev,
1840 "%s no memory for channel\n", __func__);
1841 return -ENOMEM;
1842 }
1843
1844 chan->host = pl08x;
1845 chan->state = PL08X_CHAN_IDLE;
ad0de2ac 1846 chan->signal = -1;
e8689e63
LW
1847
1848 if (slave) {
e8689e63 1849 chan->cd = &pl08x->pd->slave_channels[i];
121c8476 1850 pl08x_dma_slave_init(chan);
e8689e63
LW
1851 } else {
1852 chan->cd = &pl08x->pd->memcpy_channel;
1853 chan->name = kasprintf(GFP_KERNEL, "memcpy%d", i);
1854 if (!chan->name) {
1855 kfree(chan);
1856 return -ENOMEM;
1857 }
1858 }
175a5e61 1859 dev_dbg(&pl08x->adev->dev,
e8689e63
LW
1860 "initialize virtual channel \"%s\"\n",
1861 chan->name);
1862
1863 chan->chan.device = dmadev;
d3ee98cd 1864 dma_cookie_init(&chan->chan);
e8689e63
LW
1865
1866 spin_lock_init(&chan->lock);
15c17232 1867 INIT_LIST_HEAD(&chan->pend_list);
a936e793 1868 INIT_LIST_HEAD(&chan->done_list);
e8689e63
LW
1869 tasklet_init(&chan->tasklet, pl08x_tasklet,
1870 (unsigned long) chan);
1871
1872 list_add_tail(&chan->chan.device_node, &dmadev->channels);
1873 }
1874 dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n",
1875 i, slave ? "slave" : "memcpy");
1876 return i;
1877}
1878
1879static void pl08x_free_virtual_channels(struct dma_device *dmadev)
1880{
1881 struct pl08x_dma_chan *chan = NULL;
1882 struct pl08x_dma_chan *next;
1883
1884 list_for_each_entry_safe(chan,
1885 next, &dmadev->channels, chan.device_node) {
1886 list_del(&chan->chan.device_node);
1887 kfree(chan);
1888 }
1889}
1890
1891#ifdef CONFIG_DEBUG_FS
1892static const char *pl08x_state_str(enum pl08x_dma_chan_state state)
1893{
1894 switch (state) {
1895 case PL08X_CHAN_IDLE:
1896 return "idle";
1897 case PL08X_CHAN_RUNNING:
1898 return "running";
1899 case PL08X_CHAN_PAUSED:
1900 return "paused";
1901 case PL08X_CHAN_WAITING:
1902 return "waiting";
1903 default:
1904 break;
1905 }
1906 return "UNKNOWN STATE";
1907}
1908
1909static int pl08x_debugfs_show(struct seq_file *s, void *data)
1910{
1911 struct pl08x_driver_data *pl08x = s->private;
1912 struct pl08x_dma_chan *chan;
1913 struct pl08x_phy_chan *ch;
1914 unsigned long flags;
1915 int i;
1916
1917 seq_printf(s, "PL08x physical channels:\n");
1918 seq_printf(s, "CHANNEL:\tUSER:\n");
1919 seq_printf(s, "--------\t-----\n");
1920 for (i = 0; i < pl08x->vd->channels; i++) {
1921 struct pl08x_dma_chan *virt_chan;
1922
1923 ch = &pl08x->phy_chans[i];
1924
1925 spin_lock_irqsave(&ch->lock, flags);
1926 virt_chan = ch->serving;
1927
affa115e
LW
1928 seq_printf(s, "%d\t\t%s%s\n",
1929 ch->id,
1930 virt_chan ? virt_chan->name : "(none)",
1931 ch->locked ? " LOCKED" : "");
e8689e63
LW
1932
1933 spin_unlock_irqrestore(&ch->lock, flags);
1934 }
1935
1936 seq_printf(s, "\nPL08x virtual memcpy channels:\n");
1937 seq_printf(s, "CHANNEL:\tSTATE:\n");
1938 seq_printf(s, "--------\t------\n");
1939 list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) {
3e2a037c 1940 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
1941 pl08x_state_str(chan->state));
1942 }
1943
1944 seq_printf(s, "\nPL08x virtual slave channels:\n");
1945 seq_printf(s, "CHANNEL:\tSTATE:\n");
1946 seq_printf(s, "--------\t------\n");
1947 list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) {
3e2a037c 1948 seq_printf(s, "%s\t\t%s\n", chan->name,
e8689e63
LW
1949 pl08x_state_str(chan->state));
1950 }
1951
1952 return 0;
1953}
1954
1955static int pl08x_debugfs_open(struct inode *inode, struct file *file)
1956{
1957 return single_open(file, pl08x_debugfs_show, inode->i_private);
1958}
1959
1960static const struct file_operations pl08x_debugfs_operations = {
1961 .open = pl08x_debugfs_open,
1962 .read = seq_read,
1963 .llseek = seq_lseek,
1964 .release = single_release,
1965};
1966
1967static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1968{
1969 /* Expose a simple debugfs interface to view all clocks */
3e27ee84
VK
1970 (void) debugfs_create_file(dev_name(&pl08x->adev->dev),
1971 S_IFREG | S_IRUGO, NULL, pl08x,
1972 &pl08x_debugfs_operations);
e8689e63
LW
1973}
1974
1975#else
1976static inline void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
1977{
1978}
1979#endif
1980
aa25afad 1981static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
e8689e63
LW
1982{
1983 struct pl08x_driver_data *pl08x;
f96ca9ec 1984 const struct vendor_data *vd = id->data;
e8689e63
LW
1985 int ret = 0;
1986 int i;
1987
1988 ret = amba_request_regions(adev, NULL);
1989 if (ret)
1990 return ret;
1991
1992 /* Create the driver state holder */
b201c111 1993 pl08x = kzalloc(sizeof(*pl08x), GFP_KERNEL);
e8689e63
LW
1994 if (!pl08x) {
1995 ret = -ENOMEM;
1996 goto out_no_pl08x;
1997 }
1998
1999 /* Initialize memcpy engine */
2000 dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask);
2001 pl08x->memcpy.dev = &adev->dev;
2002 pl08x->memcpy.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2003 pl08x->memcpy.device_free_chan_resources = pl08x_free_chan_resources;
2004 pl08x->memcpy.device_prep_dma_memcpy = pl08x_prep_dma_memcpy;
2005 pl08x->memcpy.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2006 pl08x->memcpy.device_tx_status = pl08x_dma_tx_status;
2007 pl08x->memcpy.device_issue_pending = pl08x_issue_pending;
2008 pl08x->memcpy.device_control = pl08x_control;
2009
2010 /* Initialize slave engine */
2011 dma_cap_set(DMA_SLAVE, pl08x->slave.cap_mask);
2012 pl08x->slave.dev = &adev->dev;
2013 pl08x->slave.device_alloc_chan_resources = pl08x_alloc_chan_resources;
2014 pl08x->slave.device_free_chan_resources = pl08x_free_chan_resources;
2015 pl08x->slave.device_prep_dma_interrupt = pl08x_prep_dma_interrupt;
2016 pl08x->slave.device_tx_status = pl08x_dma_tx_status;
2017 pl08x->slave.device_issue_pending = pl08x_issue_pending;
2018 pl08x->slave.device_prep_slave_sg = pl08x_prep_slave_sg;
2019 pl08x->slave.device_control = pl08x_control;
2020
2021 /* Get the platform data */
2022 pl08x->pd = dev_get_platdata(&adev->dev);
2023 if (!pl08x->pd) {
2024 dev_err(&adev->dev, "no platform data supplied\n");
2025 goto out_no_platdata;
2026 }
2027
2028 /* Assign useful pointers to the driver state */
2029 pl08x->adev = adev;
2030 pl08x->vd = vd;
2031
30749cb4
RKAL
2032 /* By default, AHB1 only. If dualmaster, from platform */
2033 pl08x->lli_buses = PL08X_AHB1;
2034 pl08x->mem_buses = PL08X_AHB1;
2035 if (pl08x->vd->dualmaster) {
2036 pl08x->lli_buses = pl08x->pd->lli_buses;
2037 pl08x->mem_buses = pl08x->pd->mem_buses;
2038 }
2039
e8689e63
LW
2040 /* A DMA memory pool for LLIs, align on 1-byte boundary */
2041 pl08x->pool = dma_pool_create(DRIVER_NAME, &pl08x->adev->dev,
2042 PL08X_LLI_TSFR_SIZE, PL08X_ALIGN, 0);
2043 if (!pl08x->pool) {
2044 ret = -ENOMEM;
2045 goto out_no_lli_pool;
2046 }
2047
e8689e63
LW
2048 pl08x->base = ioremap(adev->res.start, resource_size(&adev->res));
2049 if (!pl08x->base) {
2050 ret = -ENOMEM;
2051 goto out_no_ioremap;
2052 }
2053
2054 /* Turn on the PL08x */
2055 pl08x_ensure_on(pl08x);
2056
94ae8522 2057 /* Attach the interrupt handler */
e8689e63
LW
2058 writel(0x000000FF, pl08x->base + PL080_ERR_CLEAR);
2059 writel(0x000000FF, pl08x->base + PL080_TC_CLEAR);
2060
2061 ret = request_irq(adev->irq[0], pl08x_irq, IRQF_DISABLED,
b05cd8f4 2062 DRIVER_NAME, pl08x);
e8689e63
LW
2063 if (ret) {
2064 dev_err(&adev->dev, "%s failed to request interrupt %d\n",
2065 __func__, adev->irq[0]);
2066 goto out_no_irq;
2067 }
2068
2069 /* Initialize physical channels */
affa115e 2070 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
e8689e63
LW
2071 GFP_KERNEL);
2072 if (!pl08x->phy_chans) {
2073 dev_err(&adev->dev, "%s failed to allocate "
2074 "physical channel holders\n",
2075 __func__);
2076 goto out_no_phychans;
2077 }
2078
2079 for (i = 0; i < vd->channels; i++) {
2080 struct pl08x_phy_chan *ch = &pl08x->phy_chans[i];
2081
2082 ch->id = i;
2083 ch->base = pl08x->base + PL080_Cx_BASE(i);
2084 spin_lock_init(&ch->lock);
affa115e
LW
2085
2086 /*
2087 * Nomadik variants can have channels that are locked
2088 * down for the secure world only. Lock up these channels
2089 * by perpetually serving a dummy virtual channel.
2090 */
2091 if (vd->nomadik) {
2092 u32 val;
2093
2094 val = readl(ch->base + PL080_CH_CONFIG);
2095 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
2096 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
2097 ch->locked = true;
2098 }
2099 }
2100
175a5e61
VK
2101 dev_dbg(&adev->dev, "physical channel %d is %s\n",
2102 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
e8689e63
LW
2103 }
2104
2105 /* Register as many memcpy channels as there are physical channels */
2106 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->memcpy,
2107 pl08x->vd->channels, false);
2108 if (ret <= 0) {
2109 dev_warn(&pl08x->adev->dev,
2110 "%s failed to enumerate memcpy channels - %d\n",
2111 __func__, ret);
2112 goto out_no_memcpy;
2113 }
2114 pl08x->memcpy.chancnt = ret;
2115
2116 /* Register slave channels */
2117 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
3e27ee84 2118 pl08x->pd->num_slave_channels, true);
e8689e63
LW
2119 if (ret <= 0) {
2120 dev_warn(&pl08x->adev->dev,
2121 "%s failed to enumerate slave channels - %d\n",
2122 __func__, ret);
2123 goto out_no_slave;
2124 }
2125 pl08x->slave.chancnt = ret;
2126
2127 ret = dma_async_device_register(&pl08x->memcpy);
2128 if (ret) {
2129 dev_warn(&pl08x->adev->dev,
2130 "%s failed to register memcpy as an async device - %d\n",
2131 __func__, ret);
2132 goto out_no_memcpy_reg;
2133 }
2134
2135 ret = dma_async_device_register(&pl08x->slave);
2136 if (ret) {
2137 dev_warn(&pl08x->adev->dev,
2138 "%s failed to register slave as an async device - %d\n",
2139 __func__, ret);
2140 goto out_no_slave_reg;
2141 }
2142
2143 amba_set_drvdata(adev, pl08x);
2144 init_pl08x_debugfs(pl08x);
b05cd8f4
RKAL
2145 dev_info(&pl08x->adev->dev, "DMA: PL%03x rev%u at 0x%08llx irq %d\n",
2146 amba_part(adev), amba_rev(adev),
2147 (unsigned long long)adev->res.start, adev->irq[0]);
b7b6018b 2148
e8689e63
LW
2149 return 0;
2150
2151out_no_slave_reg:
2152 dma_async_device_unregister(&pl08x->memcpy);
2153out_no_memcpy_reg:
2154 pl08x_free_virtual_channels(&pl08x->slave);
2155out_no_slave:
2156 pl08x_free_virtual_channels(&pl08x->memcpy);
2157out_no_memcpy:
2158 kfree(pl08x->phy_chans);
2159out_no_phychans:
2160 free_irq(adev->irq[0], pl08x);
2161out_no_irq:
2162 iounmap(pl08x->base);
2163out_no_ioremap:
2164 dma_pool_destroy(pl08x->pool);
2165out_no_lli_pool:
2166out_no_platdata:
2167 kfree(pl08x);
2168out_no_pl08x:
2169 amba_release_regions(adev);
2170 return ret;
2171}
2172
2173/* PL080 has 8 channels and the PL080 have just 2 */
2174static struct vendor_data vendor_pl080 = {
e8689e63
LW
2175 .channels = 8,
2176 .dualmaster = true,
2177};
2178
affa115e
LW
2179static struct vendor_data vendor_nomadik = {
2180 .channels = 8,
2181 .dualmaster = true,
2182 .nomadik = true,
2183};
2184
e8689e63 2185static struct vendor_data vendor_pl081 = {
e8689e63
LW
2186 .channels = 2,
2187 .dualmaster = false,
2188};
2189
2190static struct amba_id pl08x_ids[] = {
2191 /* PL080 */
2192 {
2193 .id = 0x00041080,
2194 .mask = 0x000fffff,
2195 .data = &vendor_pl080,
2196 },
2197 /* PL081 */
2198 {
2199 .id = 0x00041081,
2200 .mask = 0x000fffff,
2201 .data = &vendor_pl081,
2202 },
2203 /* Nomadik 8815 PL080 variant */
2204 {
affa115e 2205 .id = 0x00280080,
e8689e63 2206 .mask = 0x00ffffff,
affa115e 2207 .data = &vendor_nomadik,
e8689e63
LW
2208 },
2209 { 0, 0 },
2210};
2211
037566df
DM
2212MODULE_DEVICE_TABLE(amba, pl08x_ids);
2213
e8689e63
LW
2214static struct amba_driver pl08x_amba_driver = {
2215 .drv.name = DRIVER_NAME,
2216 .id_table = pl08x_ids,
2217 .probe = pl08x_probe,
2218};
2219
2220static int __init pl08x_init(void)
2221{
2222 int retval;
2223 retval = amba_driver_register(&pl08x_amba_driver);
2224 if (retval)
2225 printk(KERN_WARNING DRIVER_NAME
e8b5e11d 2226 "failed to register as an AMBA device (%d)\n",
e8689e63
LW
2227 retval);
2228 return retval;
2229}
2230subsys_initcall(pl08x_init);